diff options
20 files changed, 567 insertions, 308 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 0252c8c31..5afdeb5ff 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -226,6 +226,7 @@ add_library(core STATIC hle/kernel/k_page_buffer.h hle/kernel/k_page_heap.cpp hle/kernel/k_page_heap.h + hle/kernel/k_page_group.cpp hle/kernel/k_page_group.h hle/kernel/k_page_table.cpp hle/kernel/k_page_table.h diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index 4b1c134d4..d9da1e600 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si auto& page_table = m_owner->PageTable(); // Construct the page group. - m_page_group = {}; + m_page_group.emplace(kernel, page_table.GetBlockInfoManager()); // Lock the memory. - R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size)) + R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size)) // Clear the memory. - for (const auto& block : m_page_group.Nodes()) { + for (const auto& block : *m_page_group) { std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); } @@ -51,12 +51,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si void KCodeMemory::Finalize() { // Unlock. if (!m_is_mapped && !m_is_owner_mapped) { - const size_t size = m_page_group.GetNumPages() * PageSize; - m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group); + const size_t size = m_page_group->GetNumPages() * PageSize; + m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group); } // Close the page group. - m_page_group = {}; + m_page_group->Close(); + m_page_group->Finalize(); // Close our reference to our owner. m_owner->Close(); @@ -64,7 +65,7 @@ void KCodeMemory::Finalize() { Result KCodeMemory::Map(VAddr address, size_t size) { // Validate the size. - R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); + R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); // Lock ourselves. KScopedLightLock lk(m_lock); @@ -74,7 +75,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) { // Map the memory. R_TRY(kernel.CurrentProcess()->PageTable().MapPages( - address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); + address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); // Mark ourselves as mapped. m_is_mapped = true; @@ -84,13 +85,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) { Result KCodeMemory::Unmap(VAddr address, size_t size) { // Validate the size. - R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); + R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); // Lock ourselves. KScopedLightLock lk(m_lock); // Unmap the memory. - R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group, + R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group, KMemoryState::CodeOut)); // Mark ourselves as unmapped. @@ -101,7 +102,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) { Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { // Validate the size. - R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); + R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); // Lock ourselves. KScopedLightLock lk(m_lock); @@ -125,7 +126,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission // Map the memory. R_TRY( - m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm)); + m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm)); // Mark ourselves as mapped. m_is_owner_mapped = true; @@ -135,13 +136,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { // Validate the size. - R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); + R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); // Lock ourselves. KScopedLightLock lk(m_lock); // Unmap the memory. - R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode)); + R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode)); // Mark ourselves as unmapped. m_is_owner_mapped = false; diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h index 2e7e1436a..5b260b385 100644 --- a/src/core/hle/kernel/k_code_memory.h +++ b/src/core/hle/kernel/k_code_memory.h @@ -3,6 +3,8 @@ #pragma once +#include <optional> + #include "common/common_types.h" #include "core/device_memory.h" #include "core/hle/kernel/k_auto_object.h" @@ -49,11 +51,11 @@ public: return m_address; } size_t GetSize() const { - return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0; + return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0; } private: - KPageGroup m_page_group{}; + std::optional<KPageGroup> m_page_group{}; KProcess* m_owner{}; VAddr m_address{}; KLightLock m_lock; diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index bd33571da..cd6ea388e 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, // Ensure that we don't leave anything un-freed. ON_RESULT_FAILURE { - for (const auto& it : out->Nodes()) { + for (const auto& it : *out) { auto& manager = this->GetManager(it.GetAddress()); const size_t node_num_pages = std::min<u64>( it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); @@ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op m_has_optimized_process[static_cast<size_t>(pool)], true)); // Open the first reference to the pages. - for (const auto& block : out->Nodes()) { + for (const auto& block : *out) { PAddr cur_address = block.GetAddress(); size_t remaining_pages = block.GetNumPages(); while (remaining_pages > 0) { @@ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 // Perform optimized memory tracking, if we should. if (optimized) { // Iterate over the allocated blocks. - for (const auto& block : out->Nodes()) { + for (const auto& block : *out) { // Get the block extents. const PAddr block_address = block.GetAddress(); const size_t block_pages = block.GetNumPages(); @@ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 } } else { // Set all the allocated memory. - for (const auto& block : out->Nodes()) { + for (const auto& block : *out) { std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, block.GetSize()); } diff --git a/src/core/hle/kernel/k_page_group.cpp b/src/core/hle/kernel/k_page_group.cpp new file mode 100644 index 000000000..d8c644a33 --- /dev/null +++ b/src/core/hle/kernel/k_page_group.cpp @@ -0,0 +1,121 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/hle/kernel/k_dynamic_resource_manager.h" +#include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/kernel/k_page_group.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +void KPageGroup::Finalize() { + KBlockInfo* cur = m_first_block; + while (cur != nullptr) { + KBlockInfo* next = cur->GetNext(); + m_manager->Free(cur); + cur = next; + } + + m_first_block = nullptr; + m_last_block = nullptr; +} + +void KPageGroup::CloseAndReset() { + auto& mm = m_kernel.MemoryManager(); + + KBlockInfo* cur = m_first_block; + while (cur != nullptr) { + KBlockInfo* next = cur->GetNext(); + mm.Close(cur->GetAddress(), cur->GetNumPages()); + m_manager->Free(cur); + cur = next; + } + + m_first_block = nullptr; + m_last_block = nullptr; +} + +size_t KPageGroup::GetNumPages() const { + size_t num_pages = 0; + + for (const auto& it : *this) { + num_pages += it.GetNumPages(); + } + + return num_pages; +} + +Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) { + // Succeed immediately if we're adding no pages. + R_SUCCEED_IF(num_pages == 0); + + // Check for overflow. + ASSERT(addr < addr + num_pages * PageSize); + + // Try to just append to the last block. + if (m_last_block != nullptr) { + R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages)); + } + + // Allocate a new block. + KBlockInfo* new_block = m_manager->Allocate(); + R_UNLESS(new_block != nullptr, ResultOutOfResource); + + // Initialize the block. + new_block->Initialize(addr, num_pages); + + // Add the block to our list. + if (m_last_block != nullptr) { + m_last_block->SetNext(new_block); + } else { + m_first_block = new_block; + } + m_last_block = new_block; + + R_SUCCEED(); +} + +void KPageGroup::Open() const { + auto& mm = m_kernel.MemoryManager(); + + for (const auto& it : *this) { + mm.Open(it.GetAddress(), it.GetNumPages()); + } +} + +void KPageGroup::OpenFirst() const { + auto& mm = m_kernel.MemoryManager(); + + for (const auto& it : *this) { + mm.OpenFirst(it.GetAddress(), it.GetNumPages()); + } +} + +void KPageGroup::Close() const { + auto& mm = m_kernel.MemoryManager(); + + for (const auto& it : *this) { + mm.Close(it.GetAddress(), it.GetNumPages()); + } +} + +bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const { + auto lit = this->begin(); + auto rit = rhs.begin(); + auto lend = this->end(); + auto rend = rhs.end(); + + while (lit != lend && rit != rend) { + if (*lit != *rit) { + return false; + } + + ++lit; + ++rit; + } + + return lit == lend && rit == rend; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index 316f172f2..c07f17663 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h @@ -1,4 +1,4 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later #pragma once @@ -13,24 +13,23 @@ namespace Kernel { +class KBlockInfoManager; +class KernelCore; class KPageGroup; class KBlockInfo { -private: - friend class KPageGroup; - public: - constexpr KBlockInfo() = default; + constexpr explicit KBlockInfo() : m_next(nullptr) {} - constexpr void Initialize(PAddr addr, size_t np) { + constexpr void Initialize(KPhysicalAddress addr, size_t np) { ASSERT(Common::IsAligned(addr, PageSize)); ASSERT(static_cast<u32>(np) == np); - m_page_index = static_cast<u32>(addr) / PageSize; + m_page_index = static_cast<u32>(addr / PageSize); m_num_pages = static_cast<u32>(np); } - constexpr PAddr GetAddress() const { + constexpr KPhysicalAddress GetAddress() const { return m_page_index * PageSize; } constexpr size_t GetNumPages() const { @@ -39,10 +38,10 @@ public: constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; } - constexpr PAddr GetEndAddress() const { + constexpr KPhysicalAddress GetEndAddress() const { return (m_page_index + m_num_pages) * PageSize; } - constexpr PAddr GetLastAddress() const { + constexpr KPhysicalAddress GetLastAddress() const { return this->GetEndAddress() - 1; } @@ -62,8 +61,8 @@ public: return !(*this == rhs); } - constexpr bool IsStrictlyBefore(PAddr addr) const { - const PAddr end = this->GetEndAddress(); + constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const { + const KPhysicalAddress end = this->GetEndAddress(); if (m_page_index != 0 && end == 0) { return false; @@ -72,11 +71,11 @@ public: return end < addr; } - constexpr bool operator<(PAddr addr) const { + constexpr bool operator<(KPhysicalAddress addr) const { return this->IsStrictlyBefore(addr); } - constexpr bool TryConcatenate(PAddr addr, size_t np) { + constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) { if (addr != 0 && addr == this->GetEndAddress()) { m_num_pages += static_cast<u32>(np); return true; @@ -90,96 +89,118 @@ private: } private: + friend class KPageGroup; + KBlockInfo* m_next{}; u32 m_page_index{}; u32 m_num_pages{}; }; static_assert(sizeof(KBlockInfo) <= 0x10); -class KPageGroup final { +class KPageGroup { public: - class Node final { + class Iterator { public: - constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {} + using iterator_category = std::forward_iterator_tag; + using value_type = const KBlockInfo; + using difference_type = std::ptrdiff_t; + using pointer = value_type*; + using reference = value_type&; + + constexpr explicit Iterator(pointer n) : m_node(n) {} + + constexpr bool operator==(const Iterator& rhs) const { + return m_node == rhs.m_node; + } + constexpr bool operator!=(const Iterator& rhs) const { + return !(*this == rhs); + } - constexpr u64 GetAddress() const { - return addr; + constexpr pointer operator->() const { + return m_node; + } + constexpr reference operator*() const { + return *m_node; } - constexpr std::size_t GetNumPages() const { - return num_pages; + constexpr Iterator& operator++() { + m_node = m_node->GetNext(); + return *this; } - constexpr std::size_t GetSize() const { - return GetNumPages() * PageSize; + constexpr Iterator operator++(int) { + const Iterator it{*this}; + ++(*this); + return it; } private: - u64 addr{}; - std::size_t num_pages{}; + pointer m_node{}; }; -public: - KPageGroup() = default; - KPageGroup(u64 address, u64 num_pages) { - ASSERT(AddBlock(address, num_pages).IsSuccess()); + explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m) + : m_kernel{kernel}, m_manager{m} {} + ~KPageGroup() { + this->Finalize(); } - constexpr std::list<Node>& Nodes() { - return nodes; - } + void CloseAndReset(); + void Finalize(); - constexpr const std::list<Node>& Nodes() const { - return nodes; + Iterator begin() const { + return Iterator{m_first_block}; + } + Iterator end() const { + return Iterator{nullptr}; + } + bool empty() const { + return m_first_block == nullptr; } - std::size_t GetNumPages() const { - std::size_t num_pages = 0; - for (const Node& node : nodes) { - num_pages += node.GetNumPages(); - } - return num_pages; - } - - bool IsEqual(KPageGroup& other) const { - auto this_node = nodes.begin(); - auto other_node = other.nodes.begin(); - while (this_node != nodes.end() && other_node != other.nodes.end()) { - if (this_node->GetAddress() != other_node->GetAddress() || - this_node->GetNumPages() != other_node->GetNumPages()) { - return false; - } - this_node = std::next(this_node); - other_node = std::next(other_node); - } + Result AddBlock(KPhysicalAddress addr, size_t num_pages); + void Open() const; + void OpenFirst() const; + void Close() const; + + size_t GetNumPages() const; + + bool IsEquivalentTo(const KPageGroup& rhs) const; + + bool operator==(const KPageGroup& rhs) const { + return this->IsEquivalentTo(rhs); + } - return this_node == nodes.end() && other_node == other.nodes.end(); + bool operator!=(const KPageGroup& rhs) const { + return !(*this == rhs); } - Result AddBlock(u64 address, u64 num_pages) { - if (!num_pages) { - return ResultSuccess; +private: + KernelCore& m_kernel; + KBlockInfo* m_first_block{}; + KBlockInfo* m_last_block{}; + KBlockInfoManager* m_manager{}; +}; + +class KScopedPageGroup { +public: + explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { + if (m_pg) { + m_pg->Open(); } - if (!nodes.empty()) { - const auto node = nodes.back(); - if (node.GetAddress() + node.GetNumPages() * PageSize == address) { - address = node.GetAddress(); - num_pages += node.GetNumPages(); - nodes.pop_back(); - } + } + explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} + ~KScopedPageGroup() { + if (m_pg) { + m_pg->Close(); } - nodes.push_back({address, num_pages}); - return ResultSuccess; } - bool Empty() const { - return nodes.empty(); + void CancelClose() { + m_pg = nullptr; } - void Finalize() {} - private: - std::list<Node> nodes; + const KPageGroup* m_pg{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 612fc76fa..9c7ac22dc 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a KPageTable::KPageTable(Core::System& system_) : m_general_lock{system_.Kernel()}, - m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} + m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {} KPageTable::~KPageTable() = default; @@ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta m_memory_block_slab_manager); // Allocate and open. - KPageGroup pg; + KPageGroup pg{m_kernel, m_block_info_manager}; R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( &pg, num_pages, KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); @@ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si const size_t num_pages = size / PageSize; // Create page groups for the memory being mapped. - KPageGroup pg; + KPageGroup pg{m_kernel, m_block_info_manager}; AddRegionToPages(src_address, num_pages, pg); // Reprotect the source as kernel-read/not mapped. @@ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { const size_t size = num_pages * PageSize; // We're making a new group, not adding to an existing one. - R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory); + R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); // Begin traversal. Common::PageTable::TraversalContext context; @@ -640,11 +640,10 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { R_SUCCEED(); } -bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { +bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) { ASSERT(this->IsLockedByCurrentThread()); const size_t size = num_pages * PageSize; - const auto& pg = pg_ll.Nodes(); const auto& memory_layout = m_system.Kernel().MemoryLayout(); // Empty groups are necessarily invalid. @@ -942,9 +941,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add ON_RESULT_FAILURE { if (cur_mapped_addr != dst_addr) { - // HACK: Manually close the pages. - HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize); - ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, KMemoryPermission::None, OperationType::Unmap) .IsSuccess()); @@ -1020,9 +1016,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add // Map the page. R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); - // HACK: Manually open the pages. - HACK_OpenPages(start_partial_page, 1); - // Update tracking extents. cur_mapped_addr += PageSize; cur_block_addr += PageSize; @@ -1051,9 +1044,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, cur_block_addr)); - // HACK: Manually open the pages. - HACK_OpenPages(cur_block_addr, cur_block_size / PageSize); - // Update tracking extents. cur_mapped_addr += cur_block_size; cur_block_addr = next_entry.phys_addr; @@ -1073,9 +1063,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, cur_block_addr)); - // HACK: Manually open the pages. - HACK_OpenPages(cur_block_addr, last_block_size / PageSize); - // Update tracking extents. cur_mapped_addr += last_block_size; cur_block_addr += last_block_size; @@ -1107,9 +1094,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add // Map the page. R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); - - // HACK: Manually open the pages. - HACK_OpenPages(end_partial_page, 1); } // Update memory blocks to reflect our changes @@ -1211,9 +1195,6 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState const size_t aligned_size = aligned_end - aligned_start; const size_t aligned_num_pages = aligned_size / PageSize; - // HACK: Manually close the pages. - HACK_ClosePages(aligned_start, aligned_num_pages); - // Unmap the pages. R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); @@ -1501,17 +1482,6 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi } } -void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { - m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); -} - -void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { - for (size_t index = 0; index < num_pages; ++index) { - const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); - m_system.Kernel().MemoryManager().Close(paddr, 1); - } -} - Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { // Lock the physical memory lock. KScopedLightLock phys_lk(m_map_physical_memory_lock); @@ -1572,7 +1542,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the new memory. - KPageGroup pg; + KPageGroup pg{m_kernel, m_block_info_manager}; R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); @@ -1650,7 +1620,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { KScopedPageTableUpdater updater(this); // Prepare to iterate over the memory. - auto pg_it = pg.Nodes().begin(); + auto pg_it = pg.begin(); PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); @@ -1680,9 +1650,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { last_unmap_address + 1 - cur_address) / PageSize; - // HACK: Manually close the pages. - HACK_ClosePages(cur_address, cur_pages); - // Unmap. ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) @@ -1703,7 +1670,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { // Release any remaining unmapped memory. m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); - for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { + for (++pg_it; pg_it != pg.end(); ++pg_it) { m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), pg_it->GetNumPages()); m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), @@ -1731,7 +1698,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { // Check if we're at the end of the physical block. if (pg_pages == 0) { // Ensure there are more pages to map. - ASSERT(pg_it != pg.Nodes().end()); + ASSERT(pg_it != pg.end()); // Advance our physical block. ++pg_it; @@ -1742,10 +1709,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { // Map whatever we can. const size_t cur_pages = std::min(pg_pages, map_pages); R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, - OperationType::Map, pg_phys_addr)); - - // HACK: Manually open the pages. - HACK_OpenPages(pg_phys_addr, cur_pages); + OperationType::MapFirst, pg_phys_addr)); // Advance. cur_address += cur_pages * PageSize; @@ -1888,9 +1852,6 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { last_address + 1 - cur_address) / PageSize; - // HACK: Manually close the pages. - HACK_ClosePages(cur_address, cur_pages); - // Unmap. ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) .IsSuccess()); @@ -1955,7 +1916,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) R_TRY(dst_allocator_result); // Map the memory. - KPageGroup page_linked_list; + KPageGroup page_linked_list{m_kernel, m_block_info_manager}; const size_t num_pages{size / PageSize}; const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); @@ -2022,14 +1983,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size num_dst_allocator_blocks); R_TRY(dst_allocator_result); - KPageGroup src_pages; - KPageGroup dst_pages; + KPageGroup src_pages{m_kernel, m_block_info_manager}; + KPageGroup dst_pages{m_kernel, m_block_info_manager}; const size_t num_pages{size / PageSize}; AddRegionToPages(src_address, num_pages, src_pages); AddRegionToPages(dst_address, num_pages, dst_pages); - R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); + R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion); { auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); @@ -2060,7 +2021,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, VAddr cur_addr{addr}; - for (const auto& node : page_linked_list.Nodes()) { + for (const auto& node : page_linked_list) { if (const auto result{ Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; result.IsError()) { @@ -2160,7 +2121,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { VAddr cur_addr{addr}; - for (const auto& node : page_linked_list.Nodes()) { + for (const auto& node : page_linked_list) { if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, OperationType::Unmap)}; result.IsError()) { @@ -2527,13 +2488,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the heap extension. - KPageGroup pg; + KPageGroup pg{m_kernel, m_block_info_manager}; R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( &pg, allocation_size / PageSize, KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); // Clear all the newly allocated pages. - for (const auto& it : pg.Nodes()) { + for (const auto& it : pg) { std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, it.GetSize()); } @@ -2610,11 +2571,23 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ if (is_map_only) { R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); } else { - KPageGroup page_group; - R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( - &page_group, needed_num_pages, - KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); - R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); + // Create a page group tohold the pages we allocate. + KPageGroup pg{m_kernel, m_block_info_manager}; + + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( + &pg, needed_num_pages, + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); + + // Ensure that the page group is closed when we're done working with it. + SCOPE_EXIT({ pg.Close(); }); + + // Clear all pages. + for (const auto& it : pg) { + std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), + m_heap_fill_value, it.GetSize()); + } + + R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup)); } // Update the blocks. @@ -2795,19 +2768,28 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_ ASSERT(num_pages > 0); ASSERT(num_pages == page_group.GetNumPages()); - for (const auto& node : page_group.Nodes()) { - const size_t size{node.GetNumPages() * PageSize}; + switch (operation) { + case OperationType::MapGroup: { + // We want to maintain a new reference to every page in the group. + KScopedPageGroup spg(page_group); + + for (const auto& node : page_group) { + const size_t size{node.GetNumPages() * PageSize}; - switch (operation) { - case OperationType::MapGroup: + // Map the pages. m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); - break; - default: - ASSERT(false); - break; + + addr += size; } - addr += size; + // We succeeded! We want to persist the reference to the pages. + spg.CancelClose(); + + break; + } + default: + ASSERT(false); + break; } R_SUCCEED(); @@ -2822,13 +2804,29 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, ASSERT(ContainsPages(addr, num_pages)); switch (operation) { - case OperationType::Unmap: + case OperationType::Unmap: { + // Ensure that any pages we track close on exit. + KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()}; + SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); + + this->AddRegionToPages(addr, num_pages, pages_to_close); m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); break; + } + case OperationType::MapFirst: case OperationType::Map: { ASSERT(map_addr); ASSERT(Common::IsAligned(map_addr, PageSize)); m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); + + // Open references to pages, if we should. + if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { + if (operation == OperationType::MapFirst) { + m_kernel.MemoryManager().OpenFirst(map_addr, num_pages); + } else { + m_kernel.MemoryManager().Open(map_addr, num_pages); + } + } break; } case OperationType::Separate: { diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index f1ca785d7..0a454b05b 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -107,6 +107,10 @@ public: return *m_page_table_impl; } + KBlockInfoManager* GetBlockInfoManager() { + return m_block_info_manager; + } + bool CanContain(VAddr addr, size_t size, KMemoryState state) const; protected: @@ -261,10 +265,6 @@ private: void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, size_t size, KMemoryPermission prot_perm); - // HACK: These will be removed once we automatically manage page reference counts. - void HACK_OpenPages(PAddr phys_addr, size_t num_pages); - void HACK_ClosePages(VAddr virt_addr, size_t num_pages); - mutable KLightLock m_general_lock; mutable KLightLock m_map_physical_memory_lock; @@ -488,6 +488,7 @@ private: std::unique_ptr<Common::PageTable> m_page_table_impl; Core::System& m_system; + KernelCore& m_kernel; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index 0aa68103c..3cf2b5d91 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -13,10 +13,7 @@ namespace Kernel { KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} - -KSharedMemory::~KSharedMemory() { - kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size); -} +KSharedMemory::~KSharedMemory() = default; Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, Svc::MemoryPermission owner_permission_, @@ -49,7 +46,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o R_UNLESS(physical_address != 0, ResultOutOfMemory); //! Insert the result into our page group. - page_group.emplace(physical_address, num_pages); + page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); + page_group->AddBlock(physical_address, num_pages); // Commit our reservation. memory_reservation.Commit(); @@ -62,7 +60,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o is_initialized = true; // Clear all pages in the memory. - for (const auto& block : page_group->Nodes()) { + for (const auto& block : *page_group) { std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); } @@ -71,13 +69,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o void KSharedMemory::Finalize() { // Close and finalize the page group. - // page_group->Close(); - // page_group->Finalize(); - - //! HACK: Manually close. - for (const auto& block : page_group->Nodes()) { - kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages()); - } + page_group->Close(); + page_group->Finalize(); // Release the memory reservation. resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h index 3975507bd..92b8b37ac 100644 --- a/src/core/hle/kernel/memory_types.h +++ b/src/core/hle/kernel/memory_types.h @@ -14,4 +14,7 @@ constexpr std::size_t PageSize{1 << PageBits}; using Page = std::array<u8, PageSize>; +using KPhysicalAddress = PAddr; +using KProcessAddress = VAddr; + } // namespace Kernel diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 788ee2160..aca442196 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p ResultInvalidMemoryRegion); // Create a new page group. - KPageGroup pg; + KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()}; R_TRY(src_pt.MakeAndOpenPageGroup( std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess, KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 6c8d98946..f1c60d1f3 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -666,9 +666,10 @@ void BufferCache<P>::BindHostGeometryBuffers(bool is_indexed) { BindHostIndexBuffer(); } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { const auto& draw_state = maxwell3d->draw_manager->GetDrawState(); - if (draw_state.topology == Maxwell::PrimitiveTopology::Quads) { - runtime.BindQuadArrayIndexBuffer(draw_state.vertex_buffer.first, - draw_state.vertex_buffer.count); + if (draw_state.topology == Maxwell::PrimitiveTopology::Quads || + draw_state.topology == Maxwell::PrimitiveTopology::QuadStrip) { + runtime.BindQuadIndexBuffer(draw_state.topology, draw_state.vertex_buffer.first, + draw_state.vertex_buffer.count); } } BindHostVertexBuffers(); diff --git a/src/video_core/host_shaders/vulkan_quad_indexed.comp b/src/video_core/host_shaders/vulkan_quad_indexed.comp index a412f30ff..066fe4a9c 100644 --- a/src/video_core/host_shaders/vulkan_quad_indexed.comp +++ b/src/video_core/host_shaders/vulkan_quad_indexed.comp @@ -16,6 +16,7 @@ layout (std430, set = 0, binding = 1) writeonly buffer OutputBuffer { layout (push_constant) uniform PushConstants { uint base_vertex; int index_shift; // 0: uint8, 1: uint16, 2: uint32 + int is_strip; // 0: quads 1: quadstrip }; void main() { @@ -28,9 +29,10 @@ void main() { int flipped_shift = 2 - index_shift; int mask = (1 << flipped_shift) - 1; - const int quad_swizzle[6] = int[](0, 1, 2, 0, 2, 3); + const int quads_swizzle[6] = int[](0, 1, 2, 0, 2, 3); + const int quad_strip_swizzle[6] = int[](0, 3, 1, 0, 2, 3); for (uint vertex = 0; vertex < 6; ++vertex) { - int offset = primitive * 4 + quad_swizzle[vertex]; + int offset = (is_strip == 0 ? primitive * 4 + quads_swizzle[vertex] : primitive * 2 + quad_strip_swizzle[vertex]); int int_offset = offset >> flipped_shift; int bit_offset = (offset & mask) * index_size; uint packed_input = input_indexes[int_offset]; diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index 3e03c5cd6..ca52e2389 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -301,6 +301,8 @@ VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const Device& device, return VK_PRIMITIVE_TOPOLOGY_POINT_LIST; case Maxwell::PrimitiveTopology::Lines: return VK_PRIMITIVE_TOPOLOGY_LINE_LIST; + case Maxwell::PrimitiveTopology::LineLoop: + return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; case Maxwell::PrimitiveTopology::LineStrip: return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP; case Maxwell::PrimitiveTopology::Triangles: @@ -309,15 +311,28 @@ VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const Device& device, return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; case Maxwell::PrimitiveTopology::TriangleFan: return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN; + case Maxwell::PrimitiveTopology::LinesAdjacency: + return VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY; + case Maxwell::PrimitiveTopology::LineStripAdjacency: + return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY; + case Maxwell::PrimitiveTopology::TrianglesAdjacency: + return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY; + case Maxwell::PrimitiveTopology::TriangleStripAdjacency: + return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY; case Maxwell::PrimitiveTopology::Quads: - // TODO(Rodrigo): Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT whenever it releases + case Maxwell::PrimitiveTopology::QuadStrip: + // TODO: Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT/VK_PRIMITIVE_TOPOLOGY_QUAD_STRIP_EXT + // whenever it releases return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; case Maxwell::PrimitiveTopology::Patches: return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; - default: - UNIMPLEMENTED_MSG("Unimplemented topology={}", topology); - return {}; + case Maxwell::PrimitiveTopology::Polygon: + LOG_WARNING(Render_Vulkan, "Draw mode is Polygon with a polygon mode of lines should be a " + "single body and not a bunch of triangles."); + return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN; } + UNIMPLEMENTED_MSG("Unimplemented topology={}", topology); + return {}; } VkFormat VertexFormat(const Device& device, Maxwell::VertexAttribute::Type type, diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index 84d36fea6..6b54d7111 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -51,15 +51,6 @@ size_t BytesPerIndex(VkIndexType index_type) { } } -template <typename T> -std::array<T, 6> MakeQuadIndices(u32 quad, u32 first) { - std::array<T, 6> indices{0, 1, 2, 0, 2, 3}; - for (T& index : indices) { - index = static_cast<T>(first + index + quad * 4); - } - return indices; -} - vk::Buffer CreateBuffer(const Device& device, u64 size) { VkBufferUsageFlags flags = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | @@ -123,6 +114,187 @@ VkBufferView Buffer::View(u32 offset, u32 size, VideoCore::Surface::PixelFormat return *views.back().handle; } +class QuadIndexBuffer { +public: + QuadIndexBuffer(const Device& device_, MemoryAllocator& memory_allocator_, + Scheduler& scheduler_, StagingBufferPool& staging_pool_) + : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_}, + staging_pool{staging_pool_} {} + + virtual ~QuadIndexBuffer() = default; + + void UpdateBuffer(u32 num_indices_) { + if (num_indices_ <= num_indices) { + return; + } + + scheduler.Finish(); + + num_indices = num_indices_; + index_type = IndexTypeFromNumElements(device, num_indices); + + const u32 num_quads = GetQuadsNum(num_indices); + const u32 num_triangle_indices = num_quads * 6; + const u32 num_first_offset_copies = 4; + const size_t bytes_per_index = BytesPerIndex(index_type); + const size_t size_bytes = num_triangle_indices * bytes_per_index * num_first_offset_copies; + buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{ + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .size = size_bytes, + .usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = nullptr, + }); + if (device.HasDebuggingToolAttached()) { + buffer.SetObjectNameEXT("Quad LUT"); + } + memory_commit = memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal); + + const StagingBufferRef staging = staging_pool.Request(size_bytes, MemoryUsage::Upload); + u8* staging_data = staging.mapped_span.data(); + const size_t quad_size = bytes_per_index * 6; + + for (u32 first = 0; first < num_first_offset_copies; ++first) { + for (u32 quad = 0; quad < num_quads; ++quad) { + MakeAndUpdateIndices(staging_data, quad_size, quad, first); + staging_data += quad_size; + } + } + + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([src_buffer = staging.buffer, src_offset = staging.offset, + dst_buffer = *buffer, size_bytes](vk::CommandBuffer cmdbuf) { + const VkBufferCopy copy{ + .srcOffset = src_offset, + .dstOffset = 0, + .size = size_bytes, + }; + const VkBufferMemoryBarrier write_barrier{ + .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_INDEX_READ_BIT, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .buffer = dst_buffer, + .offset = 0, + .size = size_bytes, + }; + cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy); + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, write_barrier); + }); + } + + void BindBuffer(u32 first) { + const VkIndexType index_type_ = index_type; + const size_t sub_first_offset = static_cast<size_t>(first % 4) * GetQuadsNum(num_indices); + const size_t offset = + (sub_first_offset + GetQuadsNum(first)) * 6ULL * BytesPerIndex(index_type); + scheduler.Record([buffer = *buffer, index_type_, offset](vk::CommandBuffer cmdbuf) { + cmdbuf.BindIndexBuffer(buffer, offset, index_type_); + }); + } + +protected: + virtual u32 GetQuadsNum(u32 num_indices) const = 0; + + virtual void MakeAndUpdateIndices(u8* staging_data, size_t quad_size, u32 quad, u32 first) = 0; + + const Device& device; + MemoryAllocator& memory_allocator; + Scheduler& scheduler; + StagingBufferPool& staging_pool; + + vk::Buffer buffer{}; + MemoryCommit memory_commit{}; + VkIndexType index_type{}; + u32 num_indices = 0; +}; + +class QuadArrayIndexBuffer : public QuadIndexBuffer { +public: + QuadArrayIndexBuffer(const Device& device_, MemoryAllocator& memory_allocator_, + Scheduler& scheduler_, StagingBufferPool& staging_pool_) + : QuadIndexBuffer(device_, memory_allocator_, scheduler_, staging_pool_) {} + + ~QuadArrayIndexBuffer() = default; + +private: + u32 GetQuadsNum(u32 num_indices_) const override { + return num_indices_ / 4; + } + + template <typename T> + static std::array<T, 6> MakeIndices(u32 quad, u32 first) { + std::array<T, 6> indices{0, 1, 2, 0, 2, 3}; + for (T& index : indices) { + index = static_cast<T>(first + index + quad * 4); + } + return indices; + } + + void MakeAndUpdateIndices(u8* staging_data, size_t quad_size, u32 quad, u32 first) { + switch (index_type) { + case VK_INDEX_TYPE_UINT8_EXT: + std::memcpy(staging_data, MakeIndices<u8>(quad, first).data(), quad_size); + break; + case VK_INDEX_TYPE_UINT16: + std::memcpy(staging_data, MakeIndices<u16>(quad, first).data(), quad_size); + break; + case VK_INDEX_TYPE_UINT32: + std::memcpy(staging_data, MakeIndices<u32>(quad, first).data(), quad_size); + break; + default: + ASSERT(false); + break; + } + } +}; + +class QuadStripIndexBuffer : public QuadIndexBuffer { +public: + QuadStripIndexBuffer(const Device& device_, MemoryAllocator& memory_allocator_, + Scheduler& scheduler_, StagingBufferPool& staging_pool_) + : QuadIndexBuffer(device_, memory_allocator_, scheduler_, staging_pool_) {} + + ~QuadStripIndexBuffer() = default; + +private: + u32 GetQuadsNum(u32 num_indices_) const override { + return num_indices_ >= 4 ? (num_indices_ - 2) / 2 : 0; + } + + template <typename T> + static std::array<T, 6> MakeIndices(u32 quad, u32 first) { + std::array<T, 6> indices{0, 3, 1, 0, 2, 3}; + for (T& index : indices) { + index = static_cast<T>(first + index + quad * 2); + } + return indices; + } + + void MakeAndUpdateIndices(u8* staging_data, size_t quad_size, u32 quad, u32 first) { + switch (index_type) { + case VK_INDEX_TYPE_UINT8_EXT: + std::memcpy(staging_data, MakeIndices<u8>(quad, first).data(), quad_size); + break; + case VK_INDEX_TYPE_UINT16: + std::memcpy(staging_data, MakeIndices<u16>(quad, first).data(), quad_size); + break; + case VK_INDEX_TYPE_UINT32: + std::memcpy(staging_data, MakeIndices<u32>(quad, first).data(), quad_size); + break; + default: + ASSERT(false); + break; + } + } +}; + BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_allocator_, Scheduler& scheduler_, StagingBufferPool& staging_pool_, UpdateDescriptorQueue& update_descriptor_queue_, @@ -130,7 +302,12 @@ BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& m : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_}, staging_pool{staging_pool_}, update_descriptor_queue{update_descriptor_queue_}, uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), - quad_index_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue) {} + quad_index_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue) { + quad_array_index_buffer = std::make_shared<QuadArrayIndexBuffer>(device_, memory_allocator_, + scheduler_, staging_pool_); + quad_strip_index_buffer = std::make_shared<QuadStripIndexBuffer>(device_, memory_allocator_, + scheduler_, staging_pool_); +} StagingBufferRef BufferCacheRuntime::UploadStagingBuffer(size_t size) { return staging_pool.Request(size, MemoryUsage::Upload); @@ -245,10 +422,11 @@ void BufferCacheRuntime::BindIndexBuffer(PrimitiveTopology topology, IndexFormat VkIndexType vk_index_type = MaxwellToVK::IndexFormat(index_format); VkDeviceSize vk_offset = offset; VkBuffer vk_buffer = buffer; - if (topology == PrimitiveTopology::Quads) { + if (topology == PrimitiveTopology::Quads || topology == PrimitiveTopology::QuadStrip) { vk_index_type = VK_INDEX_TYPE_UINT32; std::tie(vk_buffer, vk_offset) = - quad_index_pass.Assemble(index_format, num_indices, base_vertex, buffer, offset); + quad_index_pass.Assemble(index_format, num_indices, base_vertex, buffer, offset, + topology == PrimitiveTopology::QuadStrip); } else if (vk_index_type == VK_INDEX_TYPE_UINT8_EXT && !device.IsExtIndexTypeUint8Supported()) { vk_index_type = VK_INDEX_TYPE_UINT16; std::tie(vk_buffer, vk_offset) = uint8_pass.Assemble(num_indices, buffer, offset); @@ -263,7 +441,7 @@ void BufferCacheRuntime::BindIndexBuffer(PrimitiveTopology topology, IndexFormat }); } -void BufferCacheRuntime::BindQuadArrayIndexBuffer(u32 first, u32 count) { +void BufferCacheRuntime::BindQuadIndexBuffer(PrimitiveTopology topology, u32 first, u32 count) { if (count == 0) { ReserveNullBuffer(); scheduler.Record([this](vk::CommandBuffer cmdbuf) { @@ -271,16 +449,14 @@ void BufferCacheRuntime::BindQuadArrayIndexBuffer(u32 first, u32 count) { }); return; } - ReserveQuadArrayLUT(first + count, true); - - // The LUT has the indices 0, 1, 2, and 3 copied as an array - // To apply these 'first' offsets we can apply an offset based on the modulus. - const VkIndexType index_type = quad_array_lut_index_type; - const size_t sub_first_offset = static_cast<size_t>(first % 4) * (current_num_indices / 4); - const size_t offset = (sub_first_offset + first / 4) * 6ULL * BytesPerIndex(index_type); - scheduler.Record([buffer = *quad_array_lut, index_type, offset](vk::CommandBuffer cmdbuf) { - cmdbuf.BindIndexBuffer(buffer, offset, index_type); - }); + + if (topology == PrimitiveTopology::Quads) { + quad_array_index_buffer->UpdateBuffer(first + count); + quad_array_index_buffer->BindBuffer(first); + } else if (topology == PrimitiveTopology::QuadStrip) { + quad_strip_index_buffer->UpdateBuffer(first + count); + quad_strip_index_buffer->BindBuffer(first); + } } void BufferCacheRuntime::BindVertexBuffer(u32 index, VkBuffer buffer, u32 offset, u32 size, @@ -323,83 +499,6 @@ void BufferCacheRuntime::BindTransformFeedbackBuffer(u32 index, VkBuffer buffer, }); } -void BufferCacheRuntime::ReserveQuadArrayLUT(u32 num_indices, bool wait_for_idle) { - if (num_indices <= current_num_indices) { - return; - } - if (wait_for_idle) { - scheduler.Finish(); - } - current_num_indices = num_indices; - quad_array_lut_index_type = IndexTypeFromNumElements(device, num_indices); - - const u32 num_quads = num_indices / 4; - const u32 num_triangle_indices = num_quads * 6; - const u32 num_first_offset_copies = 4; - const size_t bytes_per_index = BytesPerIndex(quad_array_lut_index_type); - const size_t size_bytes = num_triangle_indices * bytes_per_index * num_first_offset_copies; - quad_array_lut = device.GetLogical().CreateBuffer(VkBufferCreateInfo{ - .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, - .pNext = nullptr, - .flags = 0, - .size = size_bytes, - .usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, - .sharingMode = VK_SHARING_MODE_EXCLUSIVE, - .queueFamilyIndexCount = 0, - .pQueueFamilyIndices = nullptr, - }); - if (device.HasDebuggingToolAttached()) { - quad_array_lut.SetObjectNameEXT("Quad LUT"); - } - quad_array_lut_commit = memory_allocator.Commit(quad_array_lut, MemoryUsage::DeviceLocal); - - const StagingBufferRef staging = staging_pool.Request(size_bytes, MemoryUsage::Upload); - u8* staging_data = staging.mapped_span.data(); - const size_t quad_size = bytes_per_index * 6; - for (u32 first = 0; first < num_first_offset_copies; ++first) { - for (u32 quad = 0; quad < num_quads; ++quad) { - switch (quad_array_lut_index_type) { - case VK_INDEX_TYPE_UINT8_EXT: - std::memcpy(staging_data, MakeQuadIndices<u8>(quad, first).data(), quad_size); - break; - case VK_INDEX_TYPE_UINT16: - std::memcpy(staging_data, MakeQuadIndices<u16>(quad, first).data(), quad_size); - break; - case VK_INDEX_TYPE_UINT32: - std::memcpy(staging_data, MakeQuadIndices<u32>(quad, first).data(), quad_size); - break; - default: - ASSERT(false); - break; - } - staging_data += quad_size; - } - } - scheduler.RequestOutsideRenderPassOperationContext(); - scheduler.Record([src_buffer = staging.buffer, src_offset = staging.offset, - dst_buffer = *quad_array_lut, size_bytes](vk::CommandBuffer cmdbuf) { - const VkBufferCopy copy{ - .srcOffset = src_offset, - .dstOffset = 0, - .size = size_bytes, - }; - const VkBufferMemoryBarrier write_barrier{ - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, - .dstAccessMask = VK_ACCESS_INDEX_READ_BIT, - .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .buffer = dst_buffer, - .offset = 0, - .size = size_bytes, - }; - cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy); - cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, - 0, write_barrier); - }); -} - void BufferCacheRuntime::ReserveNullBuffer() { if (null_buffer) { return; diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index a15c8b39b..183b33632 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h @@ -50,6 +50,9 @@ private: std::vector<BufferView> views; }; +class QuadArrayIndexBuffer; +class QuadStripIndexBuffer; + class BufferCacheRuntime { friend Buffer; @@ -86,7 +89,7 @@ public: void BindIndexBuffer(PrimitiveTopology topology, IndexFormat index_format, u32 num_indices, u32 base_vertex, VkBuffer buffer, u32 offset, u32 size); - void BindQuadArrayIndexBuffer(u32 first, u32 count); + void BindQuadIndexBuffer(PrimitiveTopology topology, u32 first, u32 count); void BindVertexBuffer(u32 index, VkBuffer buffer, u32 offset, u32 size, u32 stride); @@ -118,8 +121,6 @@ private: update_descriptor_queue.AddBuffer(buffer, offset, size); } - void ReserveQuadArrayLUT(u32 num_indices, bool wait_for_idle); - void ReserveNullBuffer(); const Device& device; @@ -128,10 +129,8 @@ private: StagingBufferPool& staging_pool; UpdateDescriptorQueue& update_descriptor_queue; - vk::Buffer quad_array_lut; - MemoryCommit quad_array_lut_commit; - VkIndexType quad_array_lut_index_type{}; - u32 current_num_indices = 0; + std::shared_ptr<QuadArrayIndexBuffer> quad_array_index_buffer; + std::shared_ptr<QuadStripIndexBuffer> quad_strip_index_buffer; vk::Buffer null_buffer; MemoryCommit null_buffer_commit; diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp index 2c00979d7..1a316b6eb 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp @@ -245,7 +245,7 @@ QuadIndexedPass::QuadIndexedPass(const Device& device_, Scheduler& scheduler_, UpdateDescriptorQueue& update_descriptor_queue_) : ComputePass(device_, descriptor_pool_, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS, INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, - COMPUTE_PUSH_CONSTANT_RANGE<sizeof(u32) * 2>, VULKAN_QUAD_INDEXED_COMP_SPV), + COMPUTE_PUSH_CONSTANT_RANGE<sizeof(u32) * 3>, VULKAN_QUAD_INDEXED_COMP_SPV), scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_}, update_descriptor_queue{update_descriptor_queue_} {} @@ -253,7 +253,7 @@ QuadIndexedPass::~QuadIndexedPass() = default; std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble( Tegra::Engines::Maxwell3D::Regs::IndexFormat index_format, u32 num_vertices, u32 base_vertex, - VkBuffer src_buffer, u32 src_offset) { + VkBuffer src_buffer, u32 src_offset, bool is_strip) { const u32 index_shift = [index_format] { switch (index_format) { case Tegra::Engines::Maxwell3D::Regs::IndexFormat::UnsignedByte: @@ -267,7 +267,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble( return 2; }(); const u32 input_size = num_vertices << index_shift; - const u32 num_tri_vertices = (num_vertices / 4) * 6; + const u32 num_tri_vertices = (is_strip ? (num_vertices - 2) / 2 : num_vertices / 4) * 6; const std::size_t staging_size = num_tri_vertices * sizeof(u32); const auto staging = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal); @@ -278,8 +278,8 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble( const void* const descriptor_data{update_descriptor_queue.UpdateData()}; scheduler.RequestOutsideRenderPassOperationContext(); - scheduler.Record([this, descriptor_data, num_tri_vertices, base_vertex, - index_shift](vk::CommandBuffer cmdbuf) { + scheduler.Record([this, descriptor_data, num_tri_vertices, base_vertex, index_shift, + is_strip](vk::CommandBuffer cmdbuf) { static constexpr u32 DISPATCH_SIZE = 1024; static constexpr VkMemoryBarrier WRITE_BARRIER{ .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, @@ -287,7 +287,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble( .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, .dstAccessMask = VK_ACCESS_INDEX_READ_BIT, }; - const std::array<u32, 2> push_constants{base_vertex, index_shift}; + const std::array<u32, 3> push_constants{base_vertex, index_shift, is_strip ? 1u : 0u}; const VkDescriptorSet set = descriptor_allocator.Commit(); device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data); cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline); diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h index 5d32e3caf..c4c8fa081 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.h +++ b/src/video_core/renderer_vulkan/vk_compute_pass.h @@ -74,7 +74,7 @@ public: std::pair<VkBuffer, VkDeviceSize> Assemble( Tegra::Engines::Maxwell3D::Regs::IndexFormat index_format, u32 num_vertices, - u32 base_vertex, VkBuffer src_buffer, u32 src_offset); + u32 base_vertex, VkBuffer src_buffer, u32 src_offset, bool is_strip); private: Scheduler& scheduler; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 4b7126c30..ac1eb9895 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -138,12 +138,16 @@ DrawParams MakeDrawParams(const MaxwellDrawState& draw_state, u32 num_instances, .first_index = is_indexed ? draw_state.index_buffer.first : 0, .is_indexed = is_indexed, }; + // 6 triangle vertices per quad, base vertex is part of the index + // See BindQuadIndexBuffer for more details if (draw_state.topology == Maxwell::PrimitiveTopology::Quads) { - // 6 triangle vertices per quad, base vertex is part of the index - // See BindQuadArrayIndexBuffer for more details params.num_vertices = (params.num_vertices / 4) * 6; params.base_vertex = 0; params.is_indexed = true; + } else if (draw_state.topology == Maxwell::PrimitiveTopology::QuadStrip) { + params.num_vertices = (params.num_vertices - 2) / 2 * 6; + params.base_vertex = 0; + params.is_indexed = true; } return params; } diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp index 2ea4f367b..3e51426c8 100644 --- a/src/yuzu/configuration/config.cpp +++ b/src/yuzu/configuration/config.cpp @@ -941,7 +941,6 @@ void Config::ReadValues() { ReadRendererValues(); ReadAudioValues(); ReadSystemValues(); - ReadMultiplayerValues(); } void Config::SavePlayerValue(std::size_t player_index) { @@ -1099,7 +1098,6 @@ void Config::SaveValues() { SaveRendererValues(); SaveAudioValues(); SaveSystemValues(); - SaveMultiplayerValues(); } void Config::SaveAudioValues() { |