summaryrefslogtreecommitdiffstats
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorLioncash <mathew1800@gmail.com>2019-11-26 21:56:13 +0100
committerLioncash <mathew1800@gmail.com>2019-11-27 03:55:38 +0100
commit849581075a230ad0f5419bb5d5e1f9e48e6cfd8a (patch)
tree62985359329da0b65f65429ea5c403f622aea5e3 /src/core/memory.cpp
parentcore/memory: Migrate over ReadCString() to the Memory class (diff)
downloadyuzu-849581075a230ad0f5419bb5d5e1f9e48e6cfd8a.tar
yuzu-849581075a230ad0f5419bb5d5e1f9e48e6cfd8a.tar.gz
yuzu-849581075a230ad0f5419bb5d5e1f9e48e6cfd8a.tar.bz2
yuzu-849581075a230ad0f5419bb5d5e1f9e48e6cfd8a.tar.lz
yuzu-849581075a230ad0f5419bb5d5e1f9e48e6cfd8a.tar.xz
yuzu-849581075a230ad0f5419bb5d5e1f9e48e6cfd8a.tar.zst
yuzu-849581075a230ad0f5419bb5d5e1f9e48e6cfd8a.zip
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp130
1 files changed, 67 insertions, 63 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index fb824d710..8c3489ed3 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -225,6 +225,69 @@ struct Memory::Impl {
return string;
}
+ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
+ if (vaddr == 0) {
+ return;
+ }
+
+ // Iterate over a contiguous CPU address space, which corresponds to the specified GPU
+ // address space, marking the region as un/cached. The region is marked un/cached at a
+ // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
+ // is different). This assumes the specified GPU address region is contiguous as well.
+
+ u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
+ for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
+ Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
+
+ if (cached) {
+ // Switch page type to cached if now cached
+ switch (page_type) {
+ case Common::PageType::Unmapped:
+ // It is not necessary for a process to have this region mapped into its address
+ // space, for example, a system module need not have a VRAM mapping.
+ break;
+ case Common::PageType::Memory:
+ page_type = Common::PageType::RasterizerCachedMemory;
+ current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
+ break;
+ case Common::PageType::RasterizerCachedMemory:
+ // There can be more than one GPU region mapped per CPU region, so it's common
+ // that this area is already marked as cached.
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Switch page type to uncached if now uncached
+ switch (page_type) {
+ case Common::PageType::Unmapped:
+ // It is not necessary for a process to have this region mapped into its address
+ // space, for example, a system module need not have a VRAM mapping.
+ break;
+ case Common::PageType::Memory:
+ // There can be more than one GPU region mapped per CPU region, so it's common
+ // that this area is already unmarked as cached.
+ break;
+ case Common::PageType::RasterizerCachedMemory: {
+ u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
+ if (pointer == nullptr) {
+ // It's possible that this function has been called while updating the
+ // pagetable after unmapping a VMA. In that case the underlying VMA will no
+ // longer exist, and we should just leave the pagetable entry blank.
+ page_type = Common::PageType::Unmapped;
+ } else {
+ page_type = Common::PageType::Memory;
+ current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+
/**
* Maps a region of pages as a specific type.
*
@@ -318,6 +381,10 @@ std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
return impl->ReadCString(vaddr, max_length);
}
+void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
+ impl->RasterizerMarkRegionCached(vaddr, size, cached);
+}
+
void SetCurrentPageTable(Kernel::Process& process) {
current_page_table = &process.VMManager().page_table;
@@ -334,69 +401,6 @@ bool IsKernelVirtualAddress(const VAddr vaddr) {
return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END;
}
-void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
- if (vaddr == 0) {
- return;
- }
-
- // Iterate over a contiguous CPU address space, which corresponds to the specified GPU address
- // space, marking the region as un/cached. The region is marked un/cached at a granularity of
- // CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This
- // assumes the specified GPU address region is contiguous as well.
-
- u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
- for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
- Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
-
- if (cached) {
- // Switch page type to cached if now cached
- switch (page_type) {
- case Common::PageType::Unmapped:
- // It is not necessary for a process to have this region mapped into its address
- // space, for example, a system module need not have a VRAM mapping.
- break;
- case Common::PageType::Memory:
- page_type = Common::PageType::RasterizerCachedMemory;
- current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
- break;
- case Common::PageType::RasterizerCachedMemory:
- // There can be more than one GPU region mapped per CPU region, so it's common that
- // this area is already marked as cached.
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Switch page type to uncached if now uncached
- switch (page_type) {
- case Common::PageType::Unmapped:
- // It is not necessary for a process to have this region mapped into its address
- // space, for example, a system module need not have a VRAM mapping.
- break;
- case Common::PageType::Memory:
- // There can be more than one GPU region mapped per CPU region, so it's common that
- // this area is already unmarked as cached.
- break;
- case Common::PageType::RasterizerCachedMemory: {
- u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
- if (pointer == nullptr) {
- // It's possible that this function has been called while updating the pagetable
- // after unmapping a VMA. In that case the underlying VMA will no longer exist,
- // and we should just leave the pagetable entry blank.
- page_type = Common::PageType::Unmapped;
- } else {
- page_type = Common::PageType::Memory;
- current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- }
-}
-
u8 Read8(const VAddr addr) {
return Read<u8>(addr);
}