summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.h1
-rw-r--r--src/core/memory.cpp51
-rw-r--r--src/core/memory.h3
3 files changed, 17 insertions, 38 deletions
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index 650ed8fbc..03b7356d0 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -10,6 +10,7 @@
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
+#include "video_core/memory_manager.h"
namespace Service::Nvidia::Devices {
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index bc34bfd6d..0e4e0157c 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -251,8 +251,8 @@ std::string ReadCString(VAddr vaddr, std::size_t max_length) {
return string;
}
-void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached) {
- if (gpu_addr == 0) {
+void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
+ if (vaddr == 0) {
return;
}
@@ -261,19 +261,8 @@ void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached)
// CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This
// assumes the specified GPU address region is contiguous as well.
- u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1;
- for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) {
- boost::optional<VAddr> maybe_vaddr =
- Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
- // The GPU <-> CPU virtual memory mapping is not 1:1
- if (!maybe_vaddr) {
- LOG_ERROR(HW_Memory,
- "Trying to flush a cached region to an invalid physical address {:016X}",
- gpu_addr);
- continue;
- }
- VAddr vaddr = *maybe_vaddr;
-
+ u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
+ for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
if (cached) {
@@ -344,29 +333,19 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
const VAddr overlap_start = std::max(start, region_start);
const VAddr overlap_end = std::min(end, region_end);
-
- const std::vector<Tegra::GPUVAddr> gpu_addresses =
- system_instance.GPU().MemoryManager().CpuToGpuAddress(overlap_start);
-
- if (gpu_addresses.empty()) {
- return;
- }
-
const u64 overlap_size = overlap_end - overlap_start;
- for (const auto& gpu_address : gpu_addresses) {
- auto& rasterizer = system_instance.Renderer().Rasterizer();
- switch (mode) {
- case FlushMode::Flush:
- rasterizer.FlushRegion(gpu_address, overlap_size);
- break;
- case FlushMode::Invalidate:
- rasterizer.InvalidateRegion(gpu_address, overlap_size);
- break;
- case FlushMode::FlushAndInvalidate:
- rasterizer.FlushAndInvalidateRegion(gpu_address, overlap_size);
- break;
- }
+ auto& rasterizer = system_instance.Renderer().Rasterizer();
+ switch (mode) {
+ case FlushMode::Flush:
+ rasterizer.FlushRegion(overlap_start, overlap_size);
+ break;
+ case FlushMode::Invalidate:
+ rasterizer.InvalidateRegion(overlap_start, overlap_size);
+ break;
+ case FlushMode::FlushAndInvalidate:
+ rasterizer.FlushAndInvalidateRegion(overlap_start, overlap_size);
+ break;
}
};
diff --git a/src/core/memory.h b/src/core/memory.h
index b7fb3b9ed..f06e04a75 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -11,7 +11,6 @@
#include <boost/icl/interval_map.hpp>
#include "common/common_types.h"
#include "core/memory_hook.h"
-#include "video_core/memory_manager.h"
namespace Kernel {
class Process;
@@ -179,7 +178,7 @@ enum class FlushMode {
/**
* Mark each page touching the region as cached.
*/
-void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached);
+void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached);
/**
* Flushes and invalidates any externally cached rasterizer resources touching the given virtual