From c6cac2ffaad4ac27f35cea25022d9c59c7ecfbf4 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 30 Apr 2023 17:14:06 +0200 Subject: GPU: Add Reactive flushing --- src/video_core/buffer_cache/buffer_base.h | 9 ++++++ src/video_core/buffer_cache/buffer_cache.h | 35 +++++++++++++++++------ src/video_core/buffer_cache/buffer_cache_base.h | 5 ++-- src/video_core/buffer_cache/memory_tracker_base.h | 26 +++++++++++++++++ src/video_core/buffer_cache/word_manager.h | 14 +++++++-- 5 files changed, 76 insertions(+), 13 deletions(-) (limited to 'src/video_core/buffer_cache') diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h index 9cbd95c4b..0bb3bf8ae 100644 --- a/src/video_core/buffer_cache/buffer_base.h +++ b/src/video_core/buffer_cache/buffer_base.h @@ -18,6 +18,7 @@ namespace VideoCommon { enum class BufferFlagBits { Picked = 1 << 0, CachedWrites = 1 << 1, + PreemtiveDownload = 1 << 2, }; DECLARE_ENUM_FLAG_OPERATORS(BufferFlagBits) @@ -54,6 +55,10 @@ public: flags |= BufferFlagBits::Picked; } + void MarkPreemtiveDownload() noexcept { + flags |= BufferFlagBits::PreemtiveDownload; + } + /// Unmark buffer as picked void Unpick() noexcept { flags &= ~BufferFlagBits::Picked; @@ -84,6 +89,10 @@ public: return True(flags & BufferFlagBits::CachedWrites); } + bool IsPreemtiveDownload() const noexcept { + return True(flags & BufferFlagBits::PreemtiveDownload); + } + /// Returns the base CPU address of the buffer [[nodiscard]] VAddr CpuAddr() const noexcept { return cpu_addr; diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index e534e1e9c..479a1a508 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -111,9 +111,24 @@ void BufferCache

::WriteMemory(VAddr cpu_addr, u64 size) { template void BufferCache

::CachedWriteMemory(VAddr cpu_addr, u64 size) { memory_tracker.CachedCpuWrite(cpu_addr, size); - const IntervalType add_interval{Common::AlignDown(cpu_addr, YUZU_PAGESIZE), - Common::AlignUp(cpu_addr + size, YUZU_PAGESIZE)}; - cached_ranges.add(add_interval); +} + +template +std::optional BufferCache

::GetFlushArea(VAddr cpu_addr, + u64 size) { + std::optional area{}; + area.emplace(); + VAddr cpu_addr_start_aligned = Common::AlignDown(cpu_addr, Core::Memory::YUZU_PAGESIZE); + VAddr cpu_addr_end_aligned = Common::AlignUp(cpu_addr + size, Core::Memory::YUZU_PAGESIZE); + area->start_address = cpu_addr_start_aligned; + area->end_address = cpu_addr_end_aligned; + if (memory_tracker.IsRegionPreflushable(cpu_addr, size)) { + area->preemtive = true; + return area; + }; + memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned, cpu_addr_end_aligned - cpu_addr_start_aligned); + area->preemtive = !IsRegionGpuModified(cpu_addr, size); + return area; } template @@ -191,8 +206,10 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am const VAddr new_base_address = *cpu_dest_address + diff; const IntervalType add_interval{new_base_address, new_base_address + size}; tmp_intervals.push_back(add_interval); - uncommitted_ranges.add(add_interval); - pending_ranges.add(add_interval); + if (memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) { + uncommitted_ranges.add(add_interval); + pending_ranges.add(add_interval); + } }; ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror); // This subtraction in this order is important for overlapping copies. @@ -205,7 +222,7 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am if (has_new_downloads) { memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount); } - std::vector tmp_buffer(amount); + tmp_buffer.resize(amount); cpu_memory.ReadBlockUnsafe(*cpu_src_address, tmp_buffer.data(), amount); cpu_memory.WriteBlockUnsafe(*cpu_dest_address, tmp_buffer.data(), amount); return true; @@ -441,9 +458,7 @@ void BufferCache

::BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_add template void BufferCache

::FlushCachedWrites() { - cached_write_buffer_ids.clear(); memory_tracker.FlushCachedWrites(); - cached_ranges.clear(); } template @@ -1221,6 +1236,9 @@ void BufferCache

::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s const IntervalType base_interval{cpu_addr, cpu_addr + size}; common_ranges.add(base_interval); + if (!memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) { + return; + } uncommitted_ranges.add(base_interval); pending_ranges.add(base_interval); } @@ -1629,7 +1647,6 @@ void BufferCache

::DeleteBuffer(BufferId buffer_id, bool do_not_mark) { replace(transform_feedback_buffers); replace(compute_uniform_buffers); replace(compute_storage_buffers); - std::erase(cached_write_buffer_ids, buffer_id); // Mark the whole buffer as CPU written to stop tracking CPU writes if (!do_not_mark) { diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h index 656baa550..e3914a53a 100644 --- a/src/video_core/buffer_cache/buffer_cache_base.h +++ b/src/video_core/buffer_cache/buffer_cache_base.h @@ -188,6 +188,8 @@ public: void DownloadMemory(VAddr cpu_addr, u64 size); + std::optional GetFlushArea(VAddr cpu_addr, u64 size); + bool InlineMemory(VAddr dest_address, size_t copy_size, std::span inlined_buffer); void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); @@ -541,8 +543,6 @@ private: std::array, NUM_STAGES>, Empty> uniform_buffer_binding_sizes{}; - std::vector cached_write_buffer_ids; - MemoryTracker memory_tracker; IntervalSet uncommitted_ranges; IntervalSet common_ranges; @@ -575,6 +575,7 @@ private: bool active_async_buffers = false; std::array> CACHING_PAGEBITS)> page_table; + std::vector tmp_buffer; }; } // namespace VideoCommon diff --git a/src/video_core/buffer_cache/memory_tracker_base.h b/src/video_core/buffer_cache/memory_tracker_base.h index dc4ebfcaa..6036b21c9 100644 --- a/src/video_core/buffer_cache/memory_tracker_base.h +++ b/src/video_core/buffer_cache/memory_tracker_base.h @@ -66,6 +66,14 @@ public: }); } + /// Returns true if a region has been marked as Preflushable + [[nodiscard]] bool IsRegionPreflushable(VAddr query_cpu_addr, u64 query_size) noexcept { + return IteratePages( + query_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) { + return manager->template IsRegionModified(offset, size); + }); + } + /// Mark region as CPU modified, notifying the rasterizer about this change void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) { IteratePages(dirty_cpu_addr, query_size, @@ -93,6 +101,15 @@ public: }); } + /// Mark region as modified from the host GPU + void MarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept { + IteratePages(dirty_cpu_addr, query_size, + [](Manager* manager, u64 offset, size_t size) { + manager->template ChangeRegionState( + manager->GetCpuAddr() + offset, size); + }); + } + /// Unmark region as modified from the host GPU void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 query_size) noexcept { IteratePages(dirty_cpu_addr, query_size, @@ -102,6 +119,15 @@ public: }); } + /// Unmark region as modified from the host GPU + void UnmarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept { + IteratePages(dirty_cpu_addr, query_size, + [](Manager* manager, u64 offset, size_t size) { + manager->template ChangeRegionState( + manager->GetCpuAddr() + offset, size); + }); + } + /// Mark region as modified from the CPU /// but don't mark it as modified until FlusHCachedWrites is called. void CachedCpuWrite(VAddr dirty_cpu_addr, u64 query_size) { diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h index a42455045..0fb199a54 100644 --- a/src/video_core/buffer_cache/word_manager.h +++ b/src/video_core/buffer_cache/word_manager.h @@ -26,6 +26,7 @@ enum class Type { GPU, CachedCPU, Untracked, + Preflushable, }; /// Vector tracking modified pages tightly packed with small vector optimization @@ -55,17 +56,20 @@ struct Words { gpu.stack.fill(0); cached_cpu.stack.fill(0); untracked.stack.fill(~u64{0}); + preflushable.stack.fill(0); } else { // Share allocation between CPU and GPU pages and set their default values - u64* const alloc = new u64[num_words * 4]; + u64* const alloc = new u64[num_words * 5]; cpu.heap = alloc; gpu.heap = alloc + num_words; cached_cpu.heap = alloc + num_words * 2; untracked.heap = alloc + num_words * 3; + preflushable.heap = alloc + num_words * 4; std::fill_n(cpu.heap, num_words, ~u64{0}); std::fill_n(gpu.heap, num_words, 0); std::fill_n(cached_cpu.heap, num_words, 0); std::fill_n(untracked.heap, num_words, ~u64{0}); + std::fill_n(preflushable.heap, num_words, 0); } // Clean up tailing bits const u64 last_word_size = size_bytes % BYTES_PER_WORD; @@ -88,13 +92,14 @@ struct Words { gpu = rhs.gpu; cached_cpu = rhs.cached_cpu; untracked = rhs.untracked; + preflushable = rhs.preflushable; rhs.cpu.heap = nullptr; return *this; } Words(Words&& rhs) noexcept : size_bytes{rhs.size_bytes}, num_words{rhs.num_words}, cpu{rhs.cpu}, gpu{rhs.gpu}, - cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked} { + cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked}, preflushable{rhs.preflushable} { rhs.cpu.heap = nullptr; } @@ -129,6 +134,8 @@ struct Words { return std::span(cached_cpu.Pointer(IsShort()), num_words); } else if constexpr (type == Type::Untracked) { return std::span(untracked.Pointer(IsShort()), num_words); + } else if constexpr (type == Type::Preflushable) { + return std::span(preflushable.Pointer(IsShort()), num_words); } } @@ -142,6 +149,8 @@ struct Words { return std::span(cached_cpu.Pointer(IsShort()), num_words); } else if constexpr (type == Type::Untracked) { return std::span(untracked.Pointer(IsShort()), num_words); + } else if constexpr (type == Type::Preflushable) { + return std::span(preflushable.Pointer(IsShort()), num_words); } } @@ -151,6 +160,7 @@ struct Words { WordsArray gpu; WordsArray cached_cpu; WordsArray untracked; + WordsArray preflushable; }; template -- cgit v1.2.3