summaryrefslogtreecommitdiffstats
path: root/src/video_core/memory_manager.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/memory_manager.cpp')
-rw-r--r--src/video_core/memory_manager.cpp138
1 files changed, 110 insertions, 28 deletions
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index e76b59842..0f4e820aa 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -5,16 +5,13 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
-#include "core/core.h"
#include "core/memory.h"
-#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
-#include "video_core/renderer_base.h"
namespace Tegra {
-MemoryManager::MemoryManager() {
+MemoryManager::MemoryManager(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {
std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
std::fill(page_table.attributes.begin(), page_table.attributes.end(),
Common::PageType::Unmapped);
@@ -70,23 +67,23 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
const u64 aligned_size{Common::AlignUp(size, page_size)};
const CacheAddr cache_addr{ToCacheAddr(GetPointer(gpu_addr))};
- Core::System::GetInstance().Renderer().Rasterizer().FlushAndInvalidateRegion(cache_addr,
- aligned_size);
+ rasterizer.FlushAndInvalidateRegion(cache_addr, aligned_size);
UnmapRange(gpu_addr, aligned_size);
return gpu_addr;
}
-GPUVAddr MemoryManager::FindFreeRegion(GPUVAddr region_start, u64 size) {
+GPUVAddr MemoryManager::FindFreeRegion(GPUVAddr region_start, u64 size) const {
// Find the first Free VMA.
- const VMAHandle vma_handle{std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
- if (vma.second.type != VirtualMemoryArea::Type::Unmapped) {
- return false;
- }
+ const VMAHandle vma_handle{
+ std::find_if(vma_map.begin(), vma_map.end(), [region_start, size](const auto& vma) {
+ if (vma.second.type != VirtualMemoryArea::Type::Unmapped) {
+ return false;
+ }
- const VAddr vma_end{vma.second.base + vma.second.size};
- return vma_end > region_start && vma_end >= region_start + size;
- })};
+ const VAddr vma_end{vma.second.base + vma.second.size};
+ return vma_end > region_start && vma_end >= region_start + size;
+ })};
if (vma_handle == vma_map.end()) {
return {};
@@ -99,12 +96,12 @@ bool MemoryManager::IsAddressValid(GPUVAddr addr) const {
return (addr >> page_bits) < page_table.pointers.size();
}
-std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr) {
+std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr) const {
if (!IsAddressValid(addr)) {
return {};
}
- VAddr cpu_addr{page_table.backing_addr[addr >> page_bits]};
+ const VAddr cpu_addr{page_table.backing_addr[addr >> page_bits]};
if (cpu_addr) {
return cpu_addr + (addr & page_mask);
}
@@ -113,7 +110,7 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr) {
}
template <typename T>
-T MemoryManager::Read(GPUVAddr addr) {
+T MemoryManager::Read(GPUVAddr addr) const {
if (!IsAddressValid(addr)) {
return {};
}
@@ -165,10 +162,10 @@ void MemoryManager::Write(GPUVAddr addr, T data) {
}
}
-template u8 MemoryManager::Read<u8>(GPUVAddr addr);
-template u16 MemoryManager::Read<u16>(GPUVAddr addr);
-template u32 MemoryManager::Read<u32>(GPUVAddr addr);
-template u64 MemoryManager::Read<u64>(GPUVAddr addr);
+template u8 MemoryManager::Read<u8>(GPUVAddr addr) const;
+template u16 MemoryManager::Read<u16>(GPUVAddr addr) const;
+template u32 MemoryManager::Read<u32>(GPUVAddr addr) const;
+template u64 MemoryManager::Read<u64>(GPUVAddr addr) const;
template void MemoryManager::Write<u8>(GPUVAddr addr, u8 data);
template void MemoryManager::Write<u16>(GPUVAddr addr, u16 data);
template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data);
@@ -179,8 +176,22 @@ u8* MemoryManager::GetPointer(GPUVAddr addr) {
return {};
}
- u8* page_pointer{page_table.pointers[addr >> page_bits]};
- if (page_pointer) {
+ u8* const page_pointer{page_table.pointers[addr >> page_bits]};
+ if (page_pointer != nullptr) {
+ return page_pointer + (addr & page_mask);
+ }
+
+ LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr);
+ return {};
+}
+
+const u8* MemoryManager::GetPointer(GPUVAddr addr) const {
+ if (!IsAddressValid(addr)) {
+ return {};
+ }
+
+ const u8* const page_pointer{page_table.pointers[addr >> page_bits]};
+ if (page_pointer != nullptr) {
return page_pointer + (addr & page_mask);
}
@@ -188,15 +199,86 @@ u8* MemoryManager::GetPointer(GPUVAddr addr) {
return {};
}
-void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) {
- std::memcpy(dest_buffer, GetPointer(src_addr), size);
+void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const {
+ std::size_t remaining_size{size};
+ std::size_t page_index{src_addr >> page_bits};
+ std::size_t page_offset{src_addr & page_mask};
+
+ while (remaining_size > 0) {
+ const std::size_t copy_amount{
+ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
+
+ switch (page_table.attributes[page_index]) {
+ case Common::PageType::Memory: {
+ const u8* src_ptr{page_table.pointers[page_index] + page_offset};
+ rasterizer.FlushRegion(ToCacheAddr(src_ptr), copy_amount);
+ std::memcpy(dest_buffer, src_ptr, copy_amount);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ page_index++;
+ page_offset = 0;
+ dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
+ remaining_size -= copy_amount;
+ }
}
+
void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size) {
- std::memcpy(GetPointer(dest_addr), src_buffer, size);
+ std::size_t remaining_size{size};
+ std::size_t page_index{dest_addr >> page_bits};
+ std::size_t page_offset{dest_addr & page_mask};
+
+ while (remaining_size > 0) {
+ const std::size_t copy_amount{
+ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
+
+ switch (page_table.attributes[page_index]) {
+ case Common::PageType::Memory: {
+ u8* dest_ptr{page_table.pointers[page_index] + page_offset};
+ rasterizer.InvalidateRegion(ToCacheAddr(dest_ptr), copy_amount);
+ std::memcpy(dest_ptr, src_buffer, copy_amount);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ page_index++;
+ page_offset = 0;
+ src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
+ remaining_size -= copy_amount;
+ }
}
void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size) {
- std::memcpy(GetPointer(dest_addr), GetPointer(src_addr), size);
+ std::size_t remaining_size{size};
+ std::size_t page_index{src_addr >> page_bits};
+ std::size_t page_offset{src_addr & page_mask};
+
+ while (remaining_size > 0) {
+ const std::size_t copy_amount{
+ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
+
+ switch (page_table.attributes[page_index]) {
+ case Common::PageType::Memory: {
+ const u8* src_ptr{page_table.pointers[page_index] + page_offset};
+ rasterizer.FlushRegion(ToCacheAddr(src_ptr), copy_amount);
+ WriteBlock(dest_addr, src_ptr, copy_amount);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ page_index++;
+ page_offset = 0;
+ dest_addr += static_cast<VAddr>(copy_amount);
+ src_addr += static_cast<VAddr>(copy_amount);
+ remaining_size -= copy_amount;
+ }
}
void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
@@ -336,7 +418,7 @@ MemoryManager::VMAIter MemoryManager::CarveVMA(GPUVAddr base, u64 size) {
const VirtualMemoryArea& vma{vma_handle->second};
if (vma.type == VirtualMemoryArea::Type::Mapped) {
// Region is already allocated
- return {};
+ return vma_handle;
}
const VAddr start_in_vma{base - vma.base};