diff options
Diffstat (limited to 'src/video_core')
-rw-r--r-- | src/video_core/gpu.cpp | 2 | ||||
-rw-r--r-- | src/video_core/memory_manager.cpp | 532 | ||||
-rw-r--r-- | src/video_core/memory_manager.h | 172 | ||||
-rw-r--r-- | src/video_core/renderer_opengl/gl_rasterizer.cpp | 13 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_blit_screen.cpp | 1 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_device.cpp | 3 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_pipeline_cache.cpp | 5 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.cpp | 9 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_sampler_cache.cpp | 1 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_texture_cache.cpp | 2 |
10 files changed, 227 insertions, 513 deletions
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 8e19c3373..512578c8b 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -81,7 +81,7 @@ void GPU::WaitFence(u32 syncpoint_id, u32 value) { } MICROPROFILE_SCOPE(GPU_wait); std::unique_lock lock{sync_mutex}; - sync_cv.wait(lock, [=]() { return syncpoints[syncpoint_id].load() >= value; }); + sync_cv.wait(lock, [=, this] { return syncpoints[syncpoint_id].load() >= value; }); } void GPU::IncrementSyncPoint(const u32 syncpoint_id) { diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index ff5505d12..844164645 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -4,7 +4,6 @@ #include "common/alignment.h" #include "common/assert.h" -#include "common/logging/log.h" #include "core/core.h" #include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" @@ -16,121 +15,137 @@ namespace Tegra { MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer) - : rasterizer{rasterizer}, system{system} { - page_table.Resize(address_space_width, page_bits, false); - - // Initialize the map with a single free region covering the entire managed space. - VirtualMemoryArea initial_vma; - initial_vma.size = address_space_end; - vma_map.emplace(initial_vma.base, initial_vma); - - UpdatePageTableForVMA(initial_vma); -} + : system{system}, rasterizer{rasterizer}, page_table(page_table_size) {} MemoryManager::~MemoryManager() = default; -GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) { - const u64 aligned_size{Common::AlignUp(size, page_size)}; - const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; - - AllocateMemory(gpu_addr, 0, aligned_size); - +GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { + u64 remaining_size{size}; + for (u64 offset{}; offset < size; offset += page_size) { + if (remaining_size < page_size) { + SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size); + } else { + SetPageEntry(gpu_addr + offset, page_entry + offset); + } + remaining_size -= page_size; + } return gpu_addr; } -GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) { - const u64 aligned_size{Common::AlignUp(size, page_size)}; - - AllocateMemory(gpu_addr, 0, aligned_size); +GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) { + return UpdateRange(gpu_addr, cpu_addr, size); +} - return gpu_addr; +GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) { + return Map(cpu_addr, *FindFreeRange(size, align), size); } -GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) { - const u64 aligned_size{Common::AlignUp(size, page_size)}; - const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; +void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { + if (!size) { + return; + } - MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); - ASSERT( - system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess()); + // Flush and invalidate through the GPU interface, to be asynchronous if possible. + system.GPU().FlushAndInvalidateRegion(*GpuToCpuAddress(gpu_addr), size); - return gpu_addr; + UpdateRange(gpu_addr, PageEntry::State::Unmapped, size); } -GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) { - ASSERT((gpu_addr & page_mask) == 0); +std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) { + for (u64 offset{}; offset < size; offset += page_size) { + if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) { + return {}; + } + } - const u64 aligned_size{Common::AlignUp(size, page_size)}; + return UpdateRange(gpu_addr, PageEntry::State::Allocated, size); +} - MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); - ASSERT( - system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess()); - return gpu_addr; +GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) { + return *AllocateFixed(*FindFreeRange(size, align), size); } -GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) { - ASSERT((gpu_addr & page_mask) == 0); +void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) { + if (!page_entry.IsValid()) { + return; + } - const u64 aligned_size{Common::AlignUp(size, page_size)}; - const auto cpu_addr = GpuToCpuAddress(gpu_addr); - ASSERT(cpu_addr); + ASSERT(system.CurrentProcess() + ->PageTable() + .LockForDeviceAddressSpace(page_entry.ToAddress(), size) + .IsSuccess()); +} - // Flush and invalidate through the GPU interface, to be asynchronous if possible. - system.GPU().FlushAndInvalidateRegion(*cpu_addr, aligned_size); +void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) { + if (!page_entry.IsValid()) { + return; + } - UnmapRange(gpu_addr, aligned_size); ASSERT(system.CurrentProcess() ->PageTable() - .UnlockForDeviceAddressSpace(cpu_addr.value(), size) + .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size) .IsSuccess()); - - return gpu_addr; } -GPUVAddr MemoryManager::FindFreeRegion(GPUVAddr region_start, u64 size) const { - // Find the first Free VMA. - const VMAHandle vma_handle{ - std::find_if(vma_map.begin(), vma_map.end(), [region_start, size](const auto& vma) { - if (vma.second.type != VirtualMemoryArea::Type::Unmapped) { - return false; - } +PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const { + return page_table[PageEntryIndex(gpu_addr)]; +} - const VAddr vma_end{vma.second.base + vma.second.size}; - return vma_end > region_start && vma_end >= region_start + size; - })}; +void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { + // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to + // improper tracking, but should be fixed in the future. - if (vma_handle == vma_map.end()) { - return {}; - } + //// Unlock the old page + // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size); - return std::max(region_start, vma_handle->second.base); -} + //// Lock the new page + // TryLockPage(page_entry, size); -bool MemoryManager::IsAddressValid(GPUVAddr addr) const { - return (addr >> page_bits) < page_table.pointers.size(); + page_table[PageEntryIndex(gpu_addr)] = page_entry; } -std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr) const { - if (!IsAddressValid(addr)) { - return {}; +std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align) const { + if (!align) { + align = page_size; + } else { + align = Common::AlignUp(align, page_size); } - const VAddr cpu_addr{page_table.backing_addr[addr >> page_bits]}; - if (cpu_addr) { - return cpu_addr + (addr & page_mask); + u64 available_size{}; + GPUVAddr gpu_addr{address_space_start}; + while (gpu_addr + available_size < address_space_size) { + if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) { + available_size += page_size; + + if (available_size >= size) { + return gpu_addr; + } + } else { + gpu_addr += available_size + page_size; + available_size = 0; + + const auto remainder{gpu_addr % align}; + if (remainder) { + gpu_addr = (gpu_addr - remainder) + align; + } + } } return {}; } -template <typename T> -T MemoryManager::Read(GPUVAddr addr) const { - if (!IsAddressValid(addr)) { +std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { + const auto page_entry{GetPageEntry(gpu_addr)}; + if (!page_entry.IsValid()) { return {}; } - const u8* page_pointer{GetPointer(addr)}; - if (page_pointer) { + return page_entry.ToAddress() + (gpu_addr & page_mask); +} + +template <typename T> +T MemoryManager::Read(GPUVAddr addr) const { + if (auto page_pointer{GetPointer(addr)}; page_pointer) { // NOTE: Avoid adding any extra logic to this fast-path block T value; std::memcpy(&value, page_pointer, sizeof(T)); @@ -144,12 +159,7 @@ T MemoryManager::Read(GPUVAddr addr) const { template <typename T> void MemoryManager::Write(GPUVAddr addr, T data) { - if (!IsAddressValid(addr)) { - return; - } - - u8* page_pointer{GetPointer(addr)}; - if (page_pointer) { + if (auto page_pointer{GetPointer(addr)}; page_pointer) { // NOTE: Avoid adding any extra logic to this fast-path block std::memcpy(page_pointer, &data, sizeof(T)); return; @@ -167,66 +177,49 @@ template void MemoryManager::Write<u16>(GPUVAddr addr, u16 data); template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data); template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data); -u8* MemoryManager::GetPointer(GPUVAddr addr) { - if (!IsAddressValid(addr)) { +u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { + if (!GetPageEntry(gpu_addr).IsValid()) { return {}; } - auto& memory = system.Memory(); - - const VAddr page_addr{page_table.backing_addr[addr >> page_bits]}; - - if (page_addr != 0) { - return memory.GetPointer(page_addr + (addr & page_mask)); + const auto address{GpuToCpuAddress(gpu_addr)}; + if (!address) { + return {}; } - LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr); - return {}; + return system.Memory().GetPointer(*address); } -const u8* MemoryManager::GetPointer(GPUVAddr addr) const { - if (!IsAddressValid(addr)) { +const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { + if (!GetPageEntry(gpu_addr).IsValid()) { return {}; } - const auto& memory = system.Memory(); - - const VAddr page_addr{page_table.backing_addr[addr >> page_bits]}; - - if (page_addr != 0) { - return memory.GetPointer(page_addr + (addr & page_mask)); + const auto address{GpuToCpuAddress(gpu_addr)}; + if (!address) { + return {}; } - LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr); - return {}; -} - -bool MemoryManager::IsBlockContinuous(const GPUVAddr start, const std::size_t size) const { - const std::size_t inner_size = size - 1; - const GPUVAddr end = start + inner_size; - const auto host_ptr_start = reinterpret_cast<std::uintptr_t>(GetPointer(start)); - const auto host_ptr_end = reinterpret_cast<std::uintptr_t>(GetPointer(end)); - const auto range = static_cast<std::size_t>(host_ptr_end - host_ptr_start); - return range == inner_size; + return system.Memory().GetPointer(*address); } -void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, - const std::size_t size) const { +void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { std::size_t remaining_size{size}; std::size_t page_index{gpu_src_addr >> page_bits}; std::size_t page_offset{gpu_src_addr & page_mask}; - auto& memory = system.Memory(); - while (remaining_size > 0) { const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; - // Flush must happen on the rasterizer interface, such that memory is always synchronous - // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. - rasterizer.FlushRegion(src_addr, copy_amount); - memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); + if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { + const auto src_addr{*page_addr + page_offset}; + + // Flush must happen on the rasterizer interface, such that memory is always synchronous + // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. + rasterizer.FlushRegion(src_addr, copy_amount); + system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); + } page_index++; page_offset = 0; @@ -241,18 +234,17 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t page_index{gpu_src_addr >> page_bits}; std::size_t page_offset{gpu_src_addr & page_mask}; - auto& memory = system.Memory(); - while (remaining_size > 0) { const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - const u8* page_pointer = page_table.pointers[page_index]; - if (page_pointer) { - const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; - memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); + + if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { + const auto src_addr{*page_addr + page_offset}; + system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); } else { std::memset(dest_buffer, 0, copy_amount); } + page_index++; page_offset = 0; dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; @@ -260,23 +252,23 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, } } -void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, - const std::size_t size) { +void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { std::size_t remaining_size{size}; std::size_t page_index{gpu_dest_addr >> page_bits}; std::size_t page_offset{gpu_dest_addr & page_mask}; - auto& memory = system.Memory(); - while (remaining_size > 0) { const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; - // Invalidate must happen on the rasterizer interface, such that memory is always - // synchronous when it is written (even when in asynchronous GPU mode). - rasterizer.InvalidateRegion(dest_addr, copy_amount); - memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); + if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { + const auto dest_addr{*page_addr + page_offset}; + + // Invalidate must happen on the rasterizer interface, such that memory is always + // synchronous when it is written (even when in asynchronous GPU mode). + rasterizer.InvalidateRegion(dest_addr, copy_amount); + system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); + } page_index++; page_offset = 0; @@ -286,21 +278,20 @@ void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, } void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, - const std::size_t size) { + std::size_t size) { std::size_t remaining_size{size}; std::size_t page_index{gpu_dest_addr >> page_bits}; std::size_t page_offset{gpu_dest_addr & page_mask}; - auto& memory = system.Memory(); - while (remaining_size > 0) { const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - u8* page_pointer = page_table.pointers[page_index]; - if (page_pointer) { - const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; - memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); + + if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { + const auto dest_addr{*page_addr + page_offset}; + system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); } + page_index++; page_offset = 0; src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; @@ -308,273 +299,26 @@ void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buf } } -void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, - const std::size_t size) { +void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) { std::vector<u8> tmp_buffer(size); ReadBlock(gpu_src_addr, tmp_buffer.data(), size); WriteBlock(gpu_dest_addr, tmp_buffer.data(), size); } void MemoryManager::CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, - const std::size_t size) { + std::size_t size) { std::vector<u8> tmp_buffer(size); ReadBlockUnsafe(gpu_src_addr, tmp_buffer.data(), size); WriteBlockUnsafe(gpu_dest_addr, tmp_buffer.data(), size); } bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { - const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits]; - const std::size_t page = (addr & Core::Memory::PAGE_MASK) + size; - return page <= Core::Memory::PAGE_SIZE; -} - -void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, - VAddr backing_addr) { - LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size, - (base + size) * page_size); - - const VAddr end{base + size}; - ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", - base + page_table.pointers.size()); - - if (memory == nullptr) { - while (base != end) { - page_table.pointers[base] = nullptr; - page_table.backing_addr[base] = 0; - - base += 1; - } - } else { - while (base != end) { - page_table.pointers[base] = memory; - page_table.backing_addr[base] = backing_addr; - - base += 1; - memory += page_size; - backing_addr += page_size; - } - } -} - -void MemoryManager::MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr) { - ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: {:016X}", base); - MapPages(base / page_size, size / page_size, target, Common::PageType::Memory, backing_addr); -} - -void MemoryManager::UnmapRegion(GPUVAddr base, u64 size) { - ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: {:016X}", base); - MapPages(base / page_size, size / page_size, nullptr, Common::PageType::Unmapped); -} - -bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { - ASSERT(base + size == next.base); - if (type != next.type) { - return {}; - } - if (type == VirtualMemoryArea::Type::Allocated && (offset + size != next.offset)) { - return {}; - } - if (type == VirtualMemoryArea::Type::Mapped && backing_memory + size != next.backing_memory) { - return {}; - } - return true; -} - -MemoryManager::VMAHandle MemoryManager::FindVMA(GPUVAddr target) const { - if (target >= address_space_end) { - return vma_map.end(); - } else { - return std::prev(vma_map.upper_bound(target)); - } -} - -MemoryManager::VMAIter MemoryManager::Allocate(VMAIter vma_handle) { - VirtualMemoryArea& vma{vma_handle->second}; - - vma.type = VirtualMemoryArea::Type::Allocated; - vma.backing_addr = 0; - vma.backing_memory = {}; - UpdatePageTableForVMA(vma); - - return MergeAdjacent(vma_handle); -} - -MemoryManager::VMAHandle MemoryManager::AllocateMemory(GPUVAddr target, std::size_t offset, - u64 size) { - - // This is the appropriately sized VMA that will turn into our allocation. - VMAIter vma_handle{CarveVMA(target, size)}; - VirtualMemoryArea& vma{vma_handle->second}; - - ASSERT(vma.size == size); - - vma.offset = offset; - - return Allocate(vma_handle); -} - -MemoryManager::VMAHandle MemoryManager::MapBackingMemory(GPUVAddr target, u8* memory, u64 size, - VAddr backing_addr) { - // This is the appropriately sized VMA that will turn into our allocation. - VMAIter vma_handle{CarveVMA(target, size)}; - VirtualMemoryArea& vma{vma_handle->second}; - - ASSERT(vma.size == size); - - vma.type = VirtualMemoryArea::Type::Mapped; - vma.backing_memory = memory; - vma.backing_addr = backing_addr; - UpdatePageTableForVMA(vma); - - return MergeAdjacent(vma_handle); -} - -void MemoryManager::UnmapRange(GPUVAddr target, u64 size) { - VMAIter vma{CarveVMARange(target, size)}; - const VAddr target_end{target + size}; - const VMAIter end{vma_map.end()}; - - // The comparison against the end of the range must be done using addresses since VMAs can be - // merged during this process, causing invalidation of the iterators. - while (vma != end && vma->second.base < target_end) { - // Unmapped ranges return to allocated state and can be reused - // This behavior is used by Super Mario Odyssey, Sonic Forces, and likely other games - vma = std::next(Allocate(vma)); - } - - ASSERT(FindVMA(target)->second.size >= size); -} - -MemoryManager::VMAIter MemoryManager::StripIterConstness(const VMAHandle& iter) { - // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given - // non-const access to its container. - return vma_map.erase(iter, iter); // Erases an empty range of elements -} - -MemoryManager::VMAIter MemoryManager::CarveVMA(GPUVAddr base, u64 size) { - ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: 0x{:016X}", size); - ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: 0x{:016X}", base); - - VMAIter vma_handle{StripIterConstness(FindVMA(base))}; - if (vma_handle == vma_map.end()) { - // Target address is outside the managed range - return {}; - } - - const VirtualMemoryArea& vma{vma_handle->second}; - if (vma.type == VirtualMemoryArea::Type::Mapped) { - // Region is already allocated - return vma_handle; - } - - const VAddr start_in_vma{base - vma.base}; - const VAddr end_in_vma{start_in_vma + size}; - - ASSERT_MSG(end_in_vma <= vma.size, "region size 0x{:016X} is less than required size 0x{:016X}", - vma.size, end_in_vma); - - if (end_in_vma < vma.size) { - // Split VMA at the end of the allocated region - SplitVMA(vma_handle, end_in_vma); - } - if (start_in_vma != 0) { - // Split VMA at the start of the allocated region - vma_handle = SplitVMA(vma_handle, start_in_vma); - } - - return vma_handle; -} - -MemoryManager::VMAIter MemoryManager::CarveVMARange(GPUVAddr target, u64 size) { - ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: 0x{:016X}", size); - ASSERT_MSG((target & page_mask) == 0, "non-page aligned base: 0x{:016X}", target); - - const VAddr target_end{target + size}; - ASSERT(target_end >= target); - ASSERT(size > 0); - - VMAIter begin_vma{StripIterConstness(FindVMA(target))}; - const VMAIter i_end{vma_map.lower_bound(target_end)}; - if (std::any_of(begin_vma, i_end, [](const auto& entry) { - return entry.second.type == VirtualMemoryArea::Type::Unmapped; - })) { + const auto cpu_addr{GpuToCpuAddress(gpu_addr)}; + if (!cpu_addr) { return {}; } - - if (target != begin_vma->second.base) { - begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base); - } - - VMAIter end_vma{StripIterConstness(FindVMA(target_end))}; - if (end_vma != vma_map.end() && target_end != end_vma->second.base) { - end_vma = SplitVMA(end_vma, target_end - end_vma->second.base); - } - - return begin_vma; -} - -MemoryManager::VMAIter MemoryManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { - VirtualMemoryArea& old_vma{vma_handle->second}; - VirtualMemoryArea new_vma{old_vma}; // Make a copy of the VMA - - // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably - // a bug. This restriction might be removed later. - ASSERT(offset_in_vma < old_vma.size); - ASSERT(offset_in_vma > 0); - - old_vma.size = offset_in_vma; - new_vma.base += offset_in_vma; - new_vma.size -= offset_in_vma; - - switch (new_vma.type) { - case VirtualMemoryArea::Type::Unmapped: - break; - case VirtualMemoryArea::Type::Allocated: - new_vma.offset += offset_in_vma; - break; - case VirtualMemoryArea::Type::Mapped: - new_vma.backing_memory += offset_in_vma; - break; - } - - ASSERT(old_vma.CanBeMergedWith(new_vma)); - - return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma); -} - -MemoryManager::VMAIter MemoryManager::MergeAdjacent(VMAIter iter) { - const VMAIter next_vma{std::next(iter)}; - if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { - iter->second.size += next_vma->second.size; - vma_map.erase(next_vma); - } - - if (iter != vma_map.begin()) { - VMAIter prev_vma{std::prev(iter)}; - if (prev_vma->second.CanBeMergedWith(iter->second)) { - prev_vma->second.size += iter->second.size; - vma_map.erase(iter); - iter = prev_vma; - } - } - - return iter; -} - -void MemoryManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { - switch (vma.type) { - case VirtualMemoryArea::Type::Unmapped: - UnmapRegion(vma.base, vma.size); - break; - case VirtualMemoryArea::Type::Allocated: - MapMemoryRegion(vma.base, vma.size, nullptr, vma.backing_addr); - break; - case VirtualMemoryArea::Type::Mapped: - MapMemoryRegion(vma.base, vma.size, vma.backing_memory, vma.backing_addr); - break; - } + const std::size_t page{(*cpu_addr & Core::Memory::PAGE_MASK) + size}; + return page <= Core::Memory::PAGE_SIZE; } } // namespace Tegra diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 87658e87a..681bd9588 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -6,9 +6,9 @@ #include <map> #include <optional> +#include <vector> #include "common/common_types.h" -#include "common/page_table.h" namespace VideoCore { class RasterizerInterface; @@ -20,45 +20,57 @@ class System; namespace Tegra { -/** - * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space - * with homogeneous attributes across its extents. In this particular implementation each VMA is - * also backed by a single host memory allocation. - */ -struct VirtualMemoryArea { - enum class Type : u8 { - Unmapped, - Allocated, - Mapped, +class PageEntry final { +public: + enum class State : u32 { + Unmapped = static_cast<u32>(-1), + Allocated = static_cast<u32>(-2), }; - /// Virtual base address of the region. - GPUVAddr base{}; - /// Size of the region. - u64 size{}; - /// Memory area mapping type. - Type type{Type::Unmapped}; - /// CPU memory mapped address corresponding to this memory area. - VAddr backing_addr{}; - /// Offset into the backing_memory the mapping starts from. - std::size_t offset{}; - /// Pointer backing this VMA. - u8* backing_memory{}; - - /// Tests if this area can be merged to the right with `next`. - bool CanBeMergedWith(const VirtualMemoryArea& next) const; + constexpr PageEntry() = default; + constexpr PageEntry(State state) : state{state} {} + constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {} + + constexpr bool IsUnmapped() const { + return state == State::Unmapped; + } + + constexpr bool IsAllocated() const { + return state == State::Allocated; + } + + constexpr bool IsValid() const { + return !IsUnmapped() && !IsAllocated(); + } + + constexpr VAddr ToAddress() const { + if (!IsValid()) { + return {}; + } + + return static_cast<VAddr>(state) << ShiftBits; + } + + constexpr PageEntry operator+(u64 offset) { + // If this is a reserved value, offsets do not apply + if (!IsValid()) { + return *this; + } + return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset}; + } + +private: + static constexpr std::size_t ShiftBits{12}; + + State state{State::Unmapped}; }; +static_assert(sizeof(PageEntry) == 4, "PageEntry is too large"); class MemoryManager final { public: explicit MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer); ~MemoryManager(); - GPUVAddr AllocateSpace(u64 size, u64 align); - GPUVAddr AllocateSpace(GPUVAddr addr, u64 size, u64 align); - GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size); - GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr addr, u64 size); - GPUVAddr UnmapBuffer(GPUVAddr addr, u64 size); std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr) const; template <typename T> @@ -70,9 +82,6 @@ public: u8* GetPointer(GPUVAddr addr); const u8* GetPointer(GPUVAddr addr) const; - /// Returns true if the block is continuous in host memory, false otherwise - bool IsBlockContinuous(GPUVAddr start, std::size_t size) const; - /** * ReadBlock and WriteBlock are full read and write operations over virtual * GPU Memory. It's important to use these when GPU memory may not be continuous @@ -98,92 +107,43 @@ public: void CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size); /** - * IsGranularRange checks if a gpu region can be simply read with a pointer + * IsGranularRange checks if a gpu region can be simply read with a pointer. */ bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size); -private: - using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>; - using VMAHandle = VMAMap::const_iterator; - using VMAIter = VMAMap::iterator; - - bool IsAddressValid(GPUVAddr addr) const; - void MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, - VAddr backing_addr = 0); - void MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr); - void UnmapRegion(GPUVAddr base, u64 size); - - /// Finds the VMA in which the given address is included in, or `vma_map.end()`. - VMAHandle FindVMA(GPUVAddr target) const; - - VMAHandle AllocateMemory(GPUVAddr target, std::size_t offset, u64 size); - - /** - * Maps an unmanaged host memory pointer at a given address. - * - * @param target The guest address to start the mapping at. - * @param memory The memory to be mapped. - * @param size Size of the mapping in bytes. - * @param backing_addr The base address of the range to back this mapping. - */ - VMAHandle MapBackingMemory(GPUVAddr target, u8* memory, u64 size, VAddr backing_addr); - - /// Unmaps a range of addresses, splitting VMAs as necessary. - void UnmapRange(GPUVAddr target, u64 size); - - /// Converts a VMAHandle to a mutable VMAIter. - VMAIter StripIterConstness(const VMAHandle& iter); - - /// Marks as the specified VMA as allocated. - VMAIter Allocate(VMAIter vma); - - /** - * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing - * the appropriate error checking. - */ - VMAIter CarveVMA(GPUVAddr base, u64 size); - - /** - * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each - * end of the range. - */ - VMAIter CarveVMARange(GPUVAddr base, u64 size); - - /** - * Splits a VMA in two, at the specified offset. - * @returns the right side of the split, with the original iterator becoming the left side. - */ - VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma); + GPUVAddr Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size); + GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align); + std::optional<GPUVAddr> AllocateFixed(GPUVAddr gpu_addr, std::size_t size); + GPUVAddr Allocate(std::size_t size, std::size_t align); + void Unmap(GPUVAddr gpu_addr, std::size_t size); - /** - * Checks for and merges the specified VMA with adjacent ones if possible. - * @returns the merged VMA or the original if no merging was possible. - */ - VMAIter MergeAdjacent(VMAIter vma); +private: + PageEntry GetPageEntry(GPUVAddr gpu_addr) const; + void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size); + GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size); + std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align) const; - /// Updates the pages corresponding to this VMA so they match the VMA's attributes. - void UpdatePageTableForVMA(const VirtualMemoryArea& vma); + void TryLockPage(PageEntry page_entry, std::size_t size); + void TryUnlockPage(PageEntry page_entry, std::size_t size); - /// Finds a free (unmapped region) of the specified size starting at the specified address. - GPUVAddr FindFreeRegion(GPUVAddr region_start, u64 size) const; + static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { + return (gpu_addr >> page_bits) & page_table_mask; + } -private: + static constexpr u64 address_space_size = 1ULL << 40; + static constexpr u64 address_space_start = 1ULL << 32; static constexpr u64 page_bits{16}; static constexpr u64 page_size{1 << page_bits}; static constexpr u64 page_mask{page_size - 1}; + static constexpr u64 page_table_bits{24}; + static constexpr u64 page_table_size{1 << page_table_bits}; + static constexpr u64 page_table_mask{page_table_size - 1}; - /// Address space in bits, according to Tegra X1 TRM - static constexpr u32 address_space_width{40}; - /// Start address for mapping, this is fairly arbitrary but must be non-zero. - static constexpr GPUVAddr address_space_base{0x100000}; - /// End of address space, based on address space in bits. - static constexpr GPUVAddr address_space_end{1ULL << address_space_width}; + Core::System& system; - Common::PageTable page_table; - VMAMap vma_map; VideoCore::RasterizerInterface& rasterizer; - Core::System& system; + std::vector<PageEntry> page_table; }; } // namespace Tegra diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 03e82c599..cb284db77 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -178,16 +178,11 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind if (device.UseAsynchronousShaders()) { // Max worker threads we should allow - constexpr auto MAX_THREADS = 2u; - // Amount of threads we should reserve for other parts of yuzu - constexpr auto RESERVED_THREADS = 6u; - // Get the amount of threads we can use(this can return zero) - const auto cpu_thread_count = - std::max(RESERVED_THREADS, std::thread::hardware_concurrency()); - // Deduce how many "extra" threads we have to use. - const auto max_threads_unused = cpu_thread_count - RESERVED_THREADS; + constexpr u32 MAX_THREADS = 4; + // Deduce how many threads we can use + const u32 threads_used = std::thread::hardware_concurrency() / 4; // Always allow at least 1 thread regardless of our settings - const auto max_worker_count = std::max(1u, max_threads_unused); + const auto max_worker_count = std::max(1U, threads_used); // Don't use more than MAX_THREADS const auto worker_count = std::min(max_worker_count, MAX_THREADS); async_shaders.AllocateWorkers(worker_count); diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index ce53e5a6b..a551e3de8 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp @@ -696,6 +696,7 @@ void VKBlitScreen::CreateFramebuffers() { .flags = 0, .renderPass = *renderpass, .attachmentCount = 1, + .pAttachments = nullptr, .width = size.width, .height = size.height, .layers = 1, diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp index 6245e0d78..0c03e4d83 100644 --- a/src/video_core/renderer_vulkan/vk_device.cpp +++ b/src/video_core/renderer_vulkan/vk_device.cpp @@ -771,8 +771,9 @@ std::vector<VkDeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const .pNext = nullptr, .flags = 0, .queueFamilyIndex = queue_family, + .queueCount = 1, + .pQueuePriorities = nullptr, }); - ci.queueCount = 1; ci.pQueuePriorities = &QUEUE_PRIORITY; } diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index 42b3a744c..418c62bc4 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -261,8 +261,13 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach } const Specialization specialization{ + .base_binding = 0, .workgroup_size = key.workgroup_size, .shared_memory_size = key.shared_memory_size, + .point_size = std::nullopt, + .enabled_attributes = {}, + .attribute_types = {}, + .ndc_minus_one_to_one = false, }; const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute, shader->GetRegistry(), specialization), diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 2ed2004f0..7500e8244 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -815,8 +815,13 @@ bool RasterizerVulkan::WalkAttachmentOverlaps(const CachedSurfaceView& attachmen std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers( VkRenderPass renderpass) { - FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(), - std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()}; + FramebufferCacheKey key{ + .renderpass = renderpass, + .width = std::numeric_limits<u32>::max(), + .height = std::numeric_limits<u32>::max(), + .layers = std::numeric_limits<u32>::max(), + .views = {}, + }; const auto try_push = [&key](const View& view) { if (!view) { diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp index 2d5460776..b068888f9 100644 --- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp @@ -47,6 +47,7 @@ vk::Sampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) c VkSamplerCustomBorderColorCreateInfoEXT border{ .sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT, .pNext = nullptr, + .customBorderColor = {}, .format = VK_FORMAT_UNDEFINED, }; std::memcpy(&border.customBorderColor, color.data(), sizeof(color)); diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index efd4bb13b..2c6f54101 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -473,6 +473,8 @@ VkImageView CachedSurfaceView::GetAttachment() { .aspectMask = aspect_mask, .baseMipLevel = base_level, .levelCount = num_levels, + .baseArrayLayer = 0, + .layerCount = 0, }, }; if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) { |