summaryrefslogtreecommitdiffstats
path: root/src/video_core/memory_manager.h
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2019-03-04 05:54:16 +0100
committerbunnei <bunneidev@gmail.com>2019-03-21 03:36:02 +0100
commit22d3dfbcd4c606d40e5ae36970db4661c302859f (patch)
tree24bf6fe7420aab7a34be7782bc1830e053b64679 /src/video_core/memory_manager.h
parentgpu: Move GPUVAddr definition to common_types. (diff)
downloadyuzu-22d3dfbcd4c606d40e5ae36970db4661c302859f.tar
yuzu-22d3dfbcd4c606d40e5ae36970db4661c302859f.tar.gz
yuzu-22d3dfbcd4c606d40e5ae36970db4661c302859f.tar.bz2
yuzu-22d3dfbcd4c606d40e5ae36970db4661c302859f.tar.lz
yuzu-22d3dfbcd4c606d40e5ae36970db4661c302859f.tar.xz
yuzu-22d3dfbcd4c606d40e5ae36970db4661c302859f.tar.zst
yuzu-22d3dfbcd4c606d40e5ae36970db4661c302859f.zip
Diffstat (limited to 'src/video_core/memory_manager.h')
-rw-r--r--src/video_core/memory_manager.h162
1 files changed, 115 insertions, 47 deletions
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index bb87fa24d..ac1b42936 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -1,79 +1,147 @@
-// Copyright 2018 yuzu emulator team
+// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
-#include <array>
-#include <memory>
+#include <map>
#include <optional>
-#include <vector>
#include "common/common_types.h"
+#include "common/page_table.h"
namespace Tegra {
+/**
+ * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space
+ * with homogeneous attributes across its extents. In this particular implementation each VMA is
+ * also backed by a single host memory allocation.
+ */
+struct VirtualMemoryArea {
+ enum class Type : u8 {
+ Unmapped,
+ Allocated,
+ Mapped,
+ };
+
+ /// Virtual base address of the region.
+ GPUVAddr base{};
+ /// Size of the region.
+ u64 size{};
+ /// Memory area mapping type.
+ Type type{Type::Unmapped};
+ /// CPU memory mapped address corresponding to this memory area.
+ VAddr backing_addr{};
+ /// Offset into the backing_memory the mapping starts from.
+ std::size_t offset{};
+ /// Pointer backing this VMA.
+ u8* backing_memory{};
+
+ /// Tests if this area can be merged to the right with `next`.
+ bool CanBeMergedWith(const VirtualMemoryArea& next) const;
+};
+
class MemoryManager final {
public:
MemoryManager();
GPUVAddr AllocateSpace(u64 size, u64 align);
GPUVAddr AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align);
- GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size);
- GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size);
+ GPUVAddr MapBufferEx(GPUVAddr cpu_addr, u64 size);
+ GPUVAddr MapBufferEx(GPUVAddr cpu_addr, GPUVAddr gpu_addr, u64 size);
GPUVAddr UnmapBuffer(GPUVAddr gpu_addr, u64 size);
- GPUVAddr GetRegionEnd(GPUVAddr region_start) const;
std::optional<VAddr> GpuToCpuAddress(GPUVAddr gpu_addr);
- static constexpr u64 PAGE_BITS = 16;
- static constexpr u64 PAGE_SIZE = 1 << PAGE_BITS;
- static constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
-
- u8 Read8(GPUVAddr addr);
- u16 Read16(GPUVAddr addr);
- u32 Read32(GPUVAddr addr);
- u64 Read64(GPUVAddr addr);
+ template <typename T>
+ T Read(GPUVAddr vaddr);
- void Write8(GPUVAddr addr, u8 data);
- void Write16(GPUVAddr addr, u16 data);
- void Write32(GPUVAddr addr, u32 data);
- void Write64(GPUVAddr addr, u64 data);
+ template <typename T>
+ void Write(GPUVAddr vaddr, T data);
u8* GetPointer(GPUVAddr vaddr);
void ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size);
void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size);
- void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size);
+ void CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size);
private:
- enum class PageStatus : u64 {
- Unmapped = 0xFFFFFFFFFFFFFFFFULL,
- Allocated = 0xFFFFFFFFFFFFFFFEULL,
- Reserved = 0xFFFFFFFFFFFFFFFDULL,
- };
-
- std::optional<GPUVAddr> FindFreeBlock(GPUVAddr region_start, u64 size, u64 align,
- PageStatus status);
- VAddr& PageSlot(GPUVAddr gpu_addr);
-
- static constexpr u64 MAX_ADDRESS{0x10000000000ULL};
- static constexpr u64 PAGE_TABLE_BITS{10};
- static constexpr u64 PAGE_TABLE_SIZE{1 << PAGE_TABLE_BITS};
- static constexpr u64 PAGE_TABLE_MASK{PAGE_TABLE_SIZE - 1};
- static constexpr u64 PAGE_BLOCK_BITS{14};
- static constexpr u64 PAGE_BLOCK_SIZE{1 << PAGE_BLOCK_BITS};
- static constexpr u64 PAGE_BLOCK_MASK{PAGE_BLOCK_SIZE - 1};
-
- using PageBlock = std::array<VAddr, PAGE_BLOCK_SIZE>;
- std::array<std::unique_ptr<PageBlock>, PAGE_TABLE_SIZE> page_table{};
-
- struct MappedRegion {
- VAddr cpu_addr;
- GPUVAddr gpu_addr;
- u64 size;
- };
+ using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>;
+ using VMAHandle = VMAMap::const_iterator;
+ using VMAIter = VMAMap::iterator;
+
+ void MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
+ VAddr backing_addr = 0);
+ void MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr);
+ void UnmapRegion(GPUVAddr base, u64 size);
+
+ /// Finds the VMA in which the given address is included in, or `vma_map.end()`.
+ VMAHandle FindVMA(GPUVAddr target) const;
+
+ VMAHandle AllocateMemory(GPUVAddr target, std::size_t offset, u64 size);
+
+ /**
+ * Maps an unmanaged host memory pointer at a given address.
+ *
+ * @param target The guest address to start the mapping at.
+ * @param memory The memory to be mapped.
+ * @param size Size of the mapping.
+ * @param state MemoryState tag to attach to the VMA.
+ */
+ VMAHandle MapBackingMemory(GPUVAddr target, u8* memory, u64 size, VAddr backing_addr);
+
+ /// Unmaps a range of addresses, splitting VMAs as necessary.
+ void UnmapRange(GPUVAddr target, u64 size);
+
+ /// Converts a VMAHandle to a mutable VMAIter.
+ VMAIter StripIterConstness(const VMAHandle& iter);
+
+ /// Unmaps the given VMA.
+ VMAIter Unmap(VMAIter vma);
+
+ /**
+ * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
+ * the appropriate error checking.
+ */
+ VMAIter CarveVMA(GPUVAddr base, u64 size);
+
+ /**
+ * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each
+ * end of the range.
+ */
+ VMAIter CarveVMARange(GPUVAddr base, u64 size);
+
+ /**
+ * Splits a VMA in two, at the specified offset.
+ * @returns the right side of the split, with the original iterator becoming the left side.
+ */
+ VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma);
+
+ /**
+ * Checks for and merges the specified VMA with adjacent ones if possible.
+ * @returns the merged VMA or the original if no merging was possible.
+ */
+ VMAIter MergeAdjacent(VMAIter vma);
+
+ /// Updates the pages corresponding to this VMA so they match the VMA's attributes.
+ void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
+
+ GPUVAddr FindFreeRegion(GPUVAddr region_start, u64 size, u64 align,
+ VirtualMemoryArea::Type vma_type);
- std::vector<MappedRegion> mapped_regions;
+private:
+ static constexpr u64 page_bits{16};
+ static constexpr u64 page_size{1 << page_bits};
+ static constexpr u64 page_mask{page_size - 1};
+
+ /// Address space in bits, this is fairly arbitrary but sufficiently large.
+ static constexpr u32 address_space_width = 39;
+ /// Start address for mapping, this is fairly arbitrary but must be non-zero.
+ static constexpr GPUVAddr address_space_base = 0x100000;
+ /// End of address space, based on address space in bits.
+ static constexpr GPUVAddr address_space_end = 1ULL << address_space_width;
+
+ Common::PageTable page_table{page_bits};
+ VMAMap vma_map;
};
} // namespace Tegra