From 93e20867b0ab2e737e231a9b5bb29d40947fb311 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 12 Feb 2021 17:58:31 -0800 Subject: hle: kernel: Migrate PageHeap/PageTable to KPageHeap/KPageTable. --- src/core/CMakeLists.txt | 8 +- src/core/hle/kernel/k_memory_manager.cpp | 10 +- src/core/hle/kernel/k_memory_manager.h | 8 +- src/core/hle/kernel/k_page_heap.cpp | 116 +++ src/core/hle/kernel/k_page_heap.h | 193 ++++ src/core/hle/kernel/k_page_table.cpp | 1190 +++++++++++++++++++++++ src/core/hle/kernel/k_page_table.h | 279 ++++++ src/core/hle/kernel/k_shared_memory.cpp | 5 +- src/core/hle/kernel/memory/page_heap.cpp | 119 --- src/core/hle/kernel/memory/page_heap.h | 196 ---- src/core/hle/kernel/memory/page_table.cpp | 1189 ---------------------- src/core/hle/kernel/memory/page_table.h | 281 ------ src/core/hle/kernel/process.cpp | 8 +- src/core/hle/kernel/process.h | 11 +- src/core/hle/kernel/process_capability.cpp | 14 +- src/core/hle/kernel/process_capability.h | 16 +- src/core/hle/kernel/svc.cpp | 6 +- src/core/hle/kernel/transfer_memory.cpp | 2 +- src/core/hle/service/ldr/ldr.cpp | 7 +- src/core/loader/deconstructed_rom_directory.cpp | 2 +- src/core/loader/elf.cpp | 2 +- src/core/loader/kip.cpp | 2 +- src/core/loader/nro.cpp | 2 +- src/core/loader/nso.cpp | 2 +- src/core/memory.cpp | 2 +- src/core/memory/cheat_engine.cpp | 2 +- src/core/reporter.cpp | 2 +- src/video_core/memory_manager.cpp | 2 +- 28 files changed, 1830 insertions(+), 1846 deletions(-) create mode 100644 src/core/hle/kernel/k_page_heap.cpp create mode 100644 src/core/hle/kernel/k_page_heap.h create mode 100644 src/core/hle/kernel/k_page_table.cpp create mode 100644 src/core/hle/kernel/k_page_table.h delete mode 100644 src/core/hle/kernel/memory/page_heap.cpp delete mode 100644 src/core/hle/kernel/memory/page_heap.h delete mode 100644 src/core/hle/kernel/memory/page_table.cpp delete mode 100644 src/core/hle/kernel/memory/page_table.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 2976187c0..17f251c37 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -173,7 +173,11 @@ add_library(core STATIC hle/kernel/k_memory_manager.cpp hle/kernel/k_memory_manager.h hle/kernel/k_page_bitmap.h + hle/kernel/k_page_heap.cpp + hle/kernel/k_page_heap.h hle/kernel/k_page_linked_list.h + hle/kernel/k_page_table.cpp + hle/kernel/k_page_table.h hle/kernel/k_priority_queue.h hle/kernel/k_readable_event.cpp hle/kernel/k_readable_event.h @@ -202,10 +206,6 @@ add_library(core STATIC hle/kernel/kernel.cpp hle/kernel/kernel.h hle/kernel/memory_types.h - hle/kernel/memory/page_heap.cpp - hle/kernel/memory/page_heap.h - hle/kernel/memory/page_table.cpp - hle/kernel/memory/page_table.h hle/kernel/object.cpp hle/kernel/object.h hle/kernel/physical_core.cpp diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 5ea11a918..9027602bf 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -21,7 +21,7 @@ std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u const auto ref_count_size{(size / PageSize) * sizeof(u16)}; const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)}; const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)}; - const auto page_heap_size{Memory::PageHeap::CalculateManagementOverheadSize(size)}; + const auto page_heap_size{KPageHeap::CalculateManagementOverheadSize(size)}; const auto total_metadata_size{manager_size + page_heap_size}; ASSERT(manager_size <= total_metadata_size); ASSERT(Common::IsAligned(total_metadata_size, PageSize)); @@ -59,7 +59,7 @@ VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size std::lock_guard lock{pool_locks[pool_index]}; // Choose a heap based on our page size request - const s32 heap_index{Memory::PageHeap::GetAlignedBlockIndex(num_pages, align_pages)}; + const s32 heap_index{KPageHeap::GetAlignedBlockIndex(num_pages, align_pages)}; // Loop, trying to iterate from each block // TODO (bunnei): Support multiple managers @@ -72,7 +72,7 @@ VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size } // If we allocated more than we need, free some - const auto allocated_pages{Memory::PageHeap::GetBlockNumPages(heap_index)}; + const auto allocated_pages{KPageHeap::GetBlockNumPages(heap_index)}; if (allocated_pages > num_pages) { chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); } @@ -94,7 +94,7 @@ ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_ std::lock_guard lock{pool_locks[pool_index]}; // Choose a heap based on our page size request - const s32 heap_index{Memory::PageHeap::GetBlockIndex(num_pages)}; + const s32 heap_index{KPageHeap::GetBlockIndex(num_pages)}; if (heap_index < 0) { return ResultOutOfMemory; } @@ -113,7 +113,7 @@ ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_ // Keep allocating until we've allocated all our pages for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { - const auto pages_per_alloc{Memory::PageHeap::GetBlockNumPages(index)}; + const auto pages_per_alloc{KPageHeap::GetBlockNumPages(index)}; while (num_pages >= pages_per_alloc) { // Allocate a block diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index 377f642d8..ae9f683b8 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h @@ -10,14 +10,12 @@ #include "common/common_funcs.h" #include "common/common_types.h" -#include "core/hle/kernel/memory/page_heap.h" +#include "core/hle/kernel/k_page_heap.h" #include "core/hle/result.h" namespace Kernel { -class KPageLinkedList; -} -namespace Kernel { +class KPageLinkedList; class KMemoryManager final : NonCopyable { public: @@ -84,7 +82,7 @@ private: using RefCount = u16; private: - Memory::PageHeap heap; + KPageHeap heap; Pool pool{}; public: diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp new file mode 100644 index 000000000..07e062922 --- /dev/null +++ b/src/core/hle/kernel/k_page_heap.cpp @@ -0,0 +1,116 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/core.h" +#include "core/hle/kernel/k_page_heap.h" +#include "core/memory.h" + +namespace Kernel { + +void KPageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) { + // Check our assumptions + ASSERT(Common::IsAligned((address), PageSize)); + ASSERT(Common::IsAligned(size, PageSize)); + + // Set our members + heap_address = address; + heap_size = size; + + // Setup bitmaps + metadata.resize(metadata_size / sizeof(u64)); + u64* cur_bitmap_storage{metadata.data()}; + for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { + const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; + const std::size_t next_block_shift{ + (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; + cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift, + next_block_shift, cur_bitmap_storage); + } +} + +VAddr KPageHeap::AllocateBlock(s32 index, bool random) { + const std::size_t needed_size{blocks[index].GetSize()}; + + for (s32 i{index}; i < static_cast(MemoryBlockPageShifts.size()); i++) { + if (const VAddr addr{blocks[i].PopBlock(random)}; addr) { + if (const std::size_t allocated_size{blocks[i].GetSize()}; + allocated_size > needed_size) { + Free(addr + needed_size, (allocated_size - needed_size) / PageSize); + } + return addr; + } + } + + return 0; +} + +void KPageHeap::FreeBlock(VAddr block, s32 index) { + do { + block = blocks[index++].PushBlock(block); + } while (block != 0); +} + +void KPageHeap::Free(VAddr addr, std::size_t num_pages) { + // Freeing no pages is a no-op + if (num_pages == 0) { + return; + } + + // Find the largest block size that we can free, and free as many as possible + s32 big_index{static_cast(MemoryBlockPageShifts.size()) - 1}; + const VAddr start{addr}; + const VAddr end{(num_pages * PageSize) + addr}; + VAddr before_start{start}; + VAddr before_end{start}; + VAddr after_start{end}; + VAddr after_end{end}; + while (big_index >= 0) { + const std::size_t block_size{blocks[big_index].GetSize()}; + const VAddr big_start{Common::AlignUp((start), block_size)}; + const VAddr big_end{Common::AlignDown((end), block_size)}; + if (big_start < big_end) { + // Free as many big blocks as we can + for (auto block{big_start}; block < big_end; block += block_size) { + FreeBlock(block, big_index); + } + before_end = big_start; + after_start = big_end; + break; + } + big_index--; + } + ASSERT(big_index >= 0); + + // Free space before the big blocks + for (s32 i{big_index - 1}; i >= 0; i--) { + const std::size_t block_size{blocks[i].GetSize()}; + while (before_start + block_size <= before_end) { + before_end -= block_size; + FreeBlock(before_end, i); + } + } + + // Free space after the big blocks + for (s32 i{big_index - 1}; i >= 0; i--) { + const std::size_t block_size{blocks[i].GetSize()}; + while (after_start + block_size <= after_end) { + FreeBlock(after_start, i); + after_start += block_size; + } + } +} + +std::size_t KPageHeap::CalculateManagementOverheadSize(std::size_t region_size) { + std::size_t overhead_size = 0; + for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { + const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; + const std::size_t next_block_shift{ + (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; + overhead_size += KPageHeap::Block::CalculateManagementOverheadSize( + region_size, cur_block_shift, next_block_shift); + } + return Common::AlignUp(overhead_size, PageSize); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h new file mode 100644 index 000000000..de5d6a189 --- /dev/null +++ b/src/core/hle/kernel/k_page_heap.h @@ -0,0 +1,193 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_page_bitmap.h" +#include "core/hle/kernel/memory_types.h" + +namespace Kernel { + +class KPageHeap final : NonCopyable { +public: + static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) { + const auto target_pages{std::max(num_pages, align_pages)}; + for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) { + if (target_pages <= + (static_cast(1) << MemoryBlockPageShifts[i]) / PageSize) { + return static_cast(i); + } + } + return -1; + } + + static constexpr s32 GetBlockIndex(std::size_t num_pages) { + for (s32 i{static_cast(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { + if (num_pages >= (static_cast(1) << MemoryBlockPageShifts[i]) / PageSize) { + return i; + } + } + return -1; + } + + static constexpr std::size_t GetBlockSize(std::size_t index) { + return static_cast(1) << MemoryBlockPageShifts[index]; + } + + static constexpr std::size_t GetBlockNumPages(std::size_t index) { + return GetBlockSize(index) / PageSize; + } + +private: + static constexpr std::size_t NumMemoryBlockPageShifts{7}; + static constexpr std::array MemoryBlockPageShifts{ + 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E, + }; + + class Block final : NonCopyable { + private: + KPageBitmap bitmap; + VAddr heap_address{}; + uintptr_t end_offset{}; + std::size_t block_shift{}; + std::size_t next_block_shift{}; + + public: + Block() = default; + + constexpr std::size_t GetShift() const { + return block_shift; + } + constexpr std::size_t GetNextShift() const { + return next_block_shift; + } + constexpr std::size_t GetSize() const { + return static_cast(1) << GetShift(); + } + constexpr std::size_t GetNumPages() const { + return GetSize() / PageSize; + } + constexpr std::size_t GetNumFreeBlocks() const { + return bitmap.GetNumBits(); + } + constexpr std::size_t GetNumFreePages() const { + return GetNumFreeBlocks() * GetNumPages(); + } + + u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs, + u64* bit_storage) { + // Set shifts + block_shift = bs; + next_block_shift = nbs; + + // Align up the address + VAddr end{addr + size}; + const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift) + : (1ULL << block_shift)}; + addr = Common::AlignDown((addr), align); + end = Common::AlignUp((end), align); + + heap_address = addr; + end_offset = (end - addr) / (1ULL << block_shift); + return bitmap.Initialize(bit_storage, end_offset); + } + + VAddr PushBlock(VAddr address) { + // Set the bit for the free block + std::size_t offset{(address - heap_address) >> GetShift()}; + bitmap.SetBit(offset); + + // If we have a next shift, try to clear the blocks below and return the address + if (GetNextShift()) { + const auto diff{1ULL << (GetNextShift() - GetShift())}; + offset = Common::AlignDown(offset, diff); + if (bitmap.ClearRange(offset, diff)) { + return heap_address + (offset << GetShift()); + } + } + + // We couldn't coalesce, or we're already as big as possible + return 0; + } + + VAddr PopBlock(bool random) { + // Find a free block + const s64 soffset{bitmap.FindFreeBlock(random)}; + if (soffset < 0) { + return 0; + } + const auto offset{static_cast(soffset)}; + + // Update our tracking and return it + bitmap.ClearBit(offset); + return heap_address + (offset << GetShift()); + } + + public: + static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size, + std::size_t cur_block_shift, + std::size_t next_block_shift) { + const auto cur_block_size{(1ULL << cur_block_shift)}; + const auto next_block_size{(1ULL << next_block_shift)}; + const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size}; + return KPageBitmap::CalculateManagementOverheadSize( + (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); + } + }; + +public: + KPageHeap() = default; + + constexpr VAddr GetAddress() const { + return heap_address; + } + constexpr std::size_t GetSize() const { + return heap_size; + } + constexpr VAddr GetEndAddress() const { + return GetAddress() + GetSize(); + } + constexpr std::size_t GetPageOffset(VAddr block) const { + return (block - GetAddress()) / PageSize; + } + + void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size); + VAddr AllocateBlock(s32 index, bool random); + void Free(VAddr addr, std::size_t num_pages); + + void UpdateUsedSize() { + used_size = heap_size - (GetNumFreePages() * PageSize); + } + + static std::size_t CalculateManagementOverheadSize(std::size_t region_size); + +private: + constexpr std::size_t GetNumFreePages() const { + std::size_t num_free{}; + + for (const auto& block : blocks) { + num_free += block.GetNumFreePages(); + } + + return num_free; + } + + void FreeBlock(VAddr block, s32 index); + + VAddr heap_address{}; + std::size_t heap_size{}; + std::size_t used_size{}; + std::array blocks{}; + std::vector metadata; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp new file mode 100644 index 000000000..d09d5ce48 --- /dev/null +++ b/src/core/hle/kernel/k_page_table.cpp @@ -0,0 +1,1190 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/scope_exit.h" +#include "core/core.h" +#include "core/hle/kernel/k_address_space_info.h" +#include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_memory_block_manager.h" +#include "core/hle/kernel/k_page_linked_list.h" +#include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_resource_limit.h" +#include "core/hle/kernel/k_scoped_resource_reservation.h" +#include "core/hle/kernel/k_system_control.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/svc_results.h" +#include "core/memory.h" + +namespace Kernel { + +namespace { + +constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { + switch (as_type) { + case FileSys::ProgramAddressSpaceType::Is32Bit: + case FileSys::ProgramAddressSpaceType::Is32BitNoMap: + return 32; + case FileSys::ProgramAddressSpaceType::Is36Bit: + return 36; + case FileSys::ProgramAddressSpaceType::Is39Bit: + return 39; + default: + UNREACHABLE(); + return {}; + } +} + +constexpr u64 GetAddressInRange(const KMemoryInfo& info, VAddr addr) { + if (info.GetAddress() < addr) { + return addr; + } + return info.GetAddress(); +} + +constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr end) { + std::size_t size{info.GetSize()}; + if (info.GetAddress() < start) { + size -= start - info.GetAddress(); + } + if (info.GetEndAddress() > end) { + size -= info.GetEndAddress() - end; + } + return size; +} + +} // namespace + +KPageTable::KPageTable(Core::System& system) : system{system} {} + +ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, + bool enable_aslr, VAddr code_addr, + std::size_t code_size, KMemoryManager::Pool pool) { + + const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { + return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); + }; + const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { + return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); + }; + + // Set our width and heap/alias sizes + address_space_width = GetAddressSpaceWidthFromType(as_type); + const VAddr start = 0; + const VAddr end{1ULL << address_space_width}; + std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; + std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; + + ASSERT(start <= code_addr); + ASSERT(code_addr < code_addr + code_size); + ASSERT(code_addr + code_size - 1 <= end - 1); + + // Adjust heap/alias size if we don't have an alias region + if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) { + heap_region_size += alias_region_size; + alias_region_size = 0; + } + + // Set code regions and determine remaining + constexpr std::size_t RegionAlignment{2 * 1024 * 1024}; + VAddr process_code_start{}; + VAddr process_code_end{}; + std::size_t stack_region_size{}; + std::size_t kernel_map_region_size{}; + + if (address_space_width == 39) { + alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); + heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); + stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); + kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); + code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); + code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); + alias_code_region_start = code_region_start; + alias_code_region_end = code_region_end; + process_code_start = Common::AlignDown(code_addr, RegionAlignment); + process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); + } else { + stack_region_size = 0; + kernel_map_region_size = 0; + code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); + code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); + stack_region_start = code_region_start; + alias_code_region_start = code_region_start; + alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + + GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); + stack_region_end = code_region_end; + kernel_map_region_start = code_region_start; + kernel_map_region_end = code_region_end; + process_code_start = code_region_start; + process_code_end = code_region_end; + } + + // Set other basic fields + is_aslr_enabled = enable_aslr; + address_space_start = start; + address_space_end = end; + is_kernel = false; + + // Determine the region we can place our undetermineds in + VAddr alloc_start{}; + std::size_t alloc_size{}; + if ((process_code_start - code_region_start) >= (end - process_code_end)) { + alloc_start = code_region_start; + alloc_size = process_code_start - code_region_start; + } else { + alloc_start = process_code_end; + alloc_size = end - process_code_end; + } + const std::size_t needed_size{ + (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; + if (alloc_size < needed_size) { + UNREACHABLE(); + return ResultOutOfMemory; + } + + const std::size_t remaining_size{alloc_size - needed_size}; + + // Determine random placements for each region + std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; + if (enable_aslr) { + alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + } + + // Setup heap and alias regions + alias_region_start = alloc_start + alias_rnd; + alias_region_end = alias_region_start + alias_region_size; + heap_region_start = alloc_start + heap_rnd; + heap_region_end = heap_region_start + heap_region_size; + + if (alias_rnd <= heap_rnd) { + heap_region_start += alias_region_size; + heap_region_end += alias_region_size; + } else { + alias_region_start += heap_region_size; + alias_region_end += heap_region_size; + } + + // Setup stack region + if (stack_region_size) { + stack_region_start = alloc_start + stack_rnd; + stack_region_end = stack_region_start + stack_region_size; + + if (alias_rnd < stack_rnd) { + stack_region_start += alias_region_size; + stack_region_end += alias_region_size; + } else { + alias_region_start += stack_region_size; + alias_region_end += stack_region_size; + } + + if (heap_rnd < stack_rnd) { + stack_region_start += heap_region_size; + stack_region_end += heap_region_size; + } else { + heap_region_start += stack_region_size; + heap_region_end += stack_region_size; + } + } + + // Setup kernel map region + if (kernel_map_region_size) { + kernel_map_region_start = alloc_start + kmap_rnd; + kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; + + if (alias_rnd < kmap_rnd) { + kernel_map_region_start += alias_region_size; + kernel_map_region_end += alias_region_size; + } else { + alias_region_start += kernel_map_region_size; + alias_region_end += kernel_map_region_size; + } + + if (heap_rnd < kmap_rnd) { + kernel_map_region_start += heap_region_size; + kernel_map_region_end += heap_region_size; + } else { + heap_region_start += kernel_map_region_size; + heap_region_end += kernel_map_region_size; + } + + if (stack_region_size) { + if (stack_rnd < kmap_rnd) { + kernel_map_region_start += stack_region_size; + kernel_map_region_end += stack_region_size; + } else { + stack_region_start += kernel_map_region_size; + stack_region_end += kernel_map_region_size; + } + } + } + + // Set heap members + current_heap_end = heap_region_start; + max_heap_size = 0; + max_physical_memory_size = 0; + + // Ensure that we regions inside our address space + auto IsInAddressSpace = [&](VAddr addr) { + return address_space_start <= addr && addr <= address_space_end; + }; + ASSERT(IsInAddressSpace(alias_region_start)); + ASSERT(IsInAddressSpace(alias_region_end)); + ASSERT(IsInAddressSpace(heap_region_start)); + ASSERT(IsInAddressSpace(heap_region_end)); + ASSERT(IsInAddressSpace(stack_region_start)); + ASSERT(IsInAddressSpace(stack_region_end)); + ASSERT(IsInAddressSpace(kernel_map_region_start)); + ASSERT(IsInAddressSpace(kernel_map_region_end)); + + // Ensure that we selected regions that don't overlap + const VAddr alias_start{alias_region_start}; + const VAddr alias_last{alias_region_end - 1}; + const VAddr heap_start{heap_region_start}; + const VAddr heap_last{heap_region_end - 1}; + const VAddr stack_start{stack_region_start}; + const VAddr stack_last{stack_region_end - 1}; + const VAddr kmap_start{kernel_map_region_start}; + const VAddr kmap_last{kernel_map_region_end - 1}; + ASSERT(alias_last < heap_start || heap_last < alias_start); + ASSERT(alias_last < stack_start || stack_last < alias_start); + ASSERT(alias_last < kmap_start || kmap_last < alias_start); + ASSERT(heap_last < stack_start || stack_last < heap_start); + ASSERT(heap_last < kmap_start || kmap_last < heap_start); + + current_heap_addr = heap_region_start; + heap_capacity = 0; + physical_memory_usage = 0; + memory_pool = pool; + + page_table_impl.Resize(address_space_width, PageBits); + + return InitializeMemoryLayout(start, end); +} + +ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, + KMemoryPermission perm) { + std::lock_guard lock{page_table_lock}; + + const u64 size{num_pages * PageSize}; + + if (!CanContain(addr, size, state)) { + return ResultInvalidCurrentMemory; + } + + if (IsRegionMapped(addr, size)) { + return ResultInvalidCurrentMemory; + } + + KPageLinkedList page_linked_list; + CASCADE_CODE( + system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)); + CASCADE_CODE(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup)); + + block_manager->Update(addr, num_pages, state, perm); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + const std::size_t num_pages{size / PageSize}; + + KMemoryState state{}; + KMemoryPermission perm{}; + CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, src_addr, size, KMemoryState::All, + KMemoryState::Normal, KMemoryPermission::Mask, + KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, + KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); + + if (IsRegionMapped(dst_addr, size)) { + return ResultInvalidCurrentMemory; + } + + KPageLinkedList page_linked_list; + AddRegionToPages(src_addr, num_pages, page_linked_list); + + { + auto block_guard = detail::ScopeExit( + [&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); }); + + CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, + OperationType::ChangePermissions)); + CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::None)); + + block_guard.Cancel(); + } + + block_manager->Update(src_addr, num_pages, state, KMemoryPermission::None, + KMemoryAttribute::Locked); + block_manager->Update(dst_addr, num_pages, KMemoryState::AliasCode); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + if (!size) { + return RESULT_SUCCESS; + } + + const std::size_t num_pages{size / PageSize}; + + CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, src_addr, size, KMemoryState::All, + KMemoryState::Normal, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::Mask, + KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); + + KMemoryState state{}; + CASCADE_CODE(CheckMemoryState( + &state, nullptr, nullptr, dst_addr, PageSize, KMemoryState::FlagCanCodeAlias, + KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); + CASCADE_CODE(CheckMemoryState(dst_addr, size, KMemoryState::All, state, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::Mask, + KMemoryAttribute::None)); + CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); + + block_manager->Update(dst_addr, num_pages, KMemoryState::Free); + block_manager->Update(src_addr, num_pages, KMemoryState::Normal, + KMemoryPermission::ReadAndWrite); + + return RESULT_SUCCESS; +} + +void KPageTable::MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end) { + auto node{page_linked_list.Nodes().begin()}; + PAddr map_addr{node->GetAddress()}; + std::size_t src_num_pages{node->GetNumPages()}; + + block_manager->IterateForRange(start, end, [&](const KMemoryInfo& info) { + if (info.state != KMemoryState::Free) { + return; + } + + std::size_t dst_num_pages{GetSizeInRange(info, start, end) / PageSize}; + VAddr dst_addr{GetAddressInRange(info, start)}; + + while (dst_num_pages) { + if (!src_num_pages) { + node = std::next(node); + map_addr = node->GetAddress(); + src_num_pages = node->GetNumPages(); + } + + const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)}; + Operate(dst_addr, num_pages, KMemoryPermission::ReadAndWrite, OperationType::Map, + map_addr); + + dst_addr += num_pages * PageSize; + map_addr += num_pages * PageSize; + src_num_pages -= num_pages; + dst_num_pages -= num_pages; + } + }); +} + +ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + std::size_t mapped_size{}; + const VAddr end_addr{addr + size}; + + block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { + if (info.state != KMemoryState::Free) { + mapped_size += GetSizeInRange(info, addr, end_addr); + } + }); + + if (mapped_size == size) { + return RESULT_SUCCESS; + } + + const std::size_t remaining_size{size - mapped_size}; + const std::size_t remaining_pages{remaining_size / PageSize}; + + // Reserve the memory from the process resource limit. + KScopedResourceReservation memory_reservation( + system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, + remaining_size); + if (!memory_reservation.Succeeded()) { + LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size); + return ResultResourceLimitedExceeded; + } + + KPageLinkedList page_linked_list; + + CASCADE_CODE( + system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, memory_pool)); + + // We succeeded, so commit the memory reservation. + memory_reservation.Commit(); + + MapPhysicalMemory(page_linked_list, addr, end_addr); + + physical_memory_usage += remaining_size; + + const std::size_t num_pages{size / PageSize}; + block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryState::Normal, + KMemoryPermission::ReadAndWrite, KMemoryAttribute::None); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + const VAddr end_addr{addr + size}; + ResultCode result{RESULT_SUCCESS}; + std::size_t mapped_size{}; + + // Verify that the region can be unmapped + block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { + if (info.state == KMemoryState::Normal) { + if (info.attribute != KMemoryAttribute::None) { + result = ResultInvalidCurrentMemory; + return; + } + mapped_size += GetSizeInRange(info, addr, end_addr); + } else if (info.state != KMemoryState::Free) { + result = ResultInvalidCurrentMemory; + } + }); + + if (result.IsError()) { + return result; + } + + if (!mapped_size) { + return RESULT_SUCCESS; + } + + CASCADE_CODE(UnmapMemory(addr, size)); + + auto process{system.Kernel().CurrentProcess()}; + process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); + physical_memory_usage -= mapped_size; + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::UnmapMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + const VAddr end_addr{addr + size}; + ResultCode result{RESULT_SUCCESS}; + KPageLinkedList page_linked_list; + + // Unmap each region within the range + block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { + if (info.state == KMemoryState::Normal) { + const std::size_t block_size{GetSizeInRange(info, addr, end_addr)}; + const std::size_t block_num_pages{block_size / PageSize}; + const VAddr block_addr{GetAddressInRange(info, addr)}; + + AddRegionToPages(block_addr, block_size / PageSize, page_linked_list); + + if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None, + OperationType::Unmap); + result.IsError()) { + return; + } + } + }); + + if (result.IsError()) { + return result; + } + + const std::size_t num_pages{size / PageSize}; + system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool); + + block_manager->Update(addr, num_pages, KMemoryState::Free); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + KMemoryState src_state{}; + CASCADE_CODE(CheckMemoryState( + &src_state, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, + KMemoryState::FlagCanAlias, KMemoryPermission::Mask, KMemoryPermission::ReadAndWrite, + KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); + + if (IsRegionMapped(dst_addr, size)) { + return ResultInvalidCurrentMemory; + } + + KPageLinkedList page_linked_list; + const std::size_t num_pages{size / PageSize}; + + AddRegionToPages(src_addr, num_pages, page_linked_list); + + { + auto block_guard = detail::ScopeExit([&] { + Operate(src_addr, num_pages, KMemoryPermission::ReadAndWrite, + OperationType::ChangePermissions); + }); + + CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, + OperationType::ChangePermissions)); + CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::ReadAndWrite)); + + block_guard.Cancel(); + } + + block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None, + KMemoryAttribute::Locked); + block_manager->Update(dst_addr, num_pages, KMemoryState::Stack, + KMemoryPermission::ReadAndWrite); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + KMemoryState src_state{}; + CASCADE_CODE(CheckMemoryState( + &src_state, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, + KMemoryState::FlagCanAlias, KMemoryPermission::Mask, KMemoryPermission::None, + KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); + + KMemoryPermission dst_perm{}; + CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, dst_addr, size, KMemoryState::All, + KMemoryState::Stack, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::Mask, + KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); + + KPageLinkedList src_pages; + KPageLinkedList dst_pages; + const std::size_t num_pages{size / PageSize}; + + AddRegionToPages(src_addr, num_pages, src_pages); + AddRegionToPages(dst_addr, num_pages, dst_pages); + + if (!dst_pages.IsEqual(src_pages)) { + return ResultInvalidMemoryRange; + } + + { + auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); + + CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); + CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::ReadAndWrite, + OperationType::ChangePermissions)); + + block_guard.Cancel(); + } + + block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::ReadAndWrite); + block_manager->Update(dst_addr, num_pages, KMemoryState::Free); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list, + KMemoryPermission perm) { + VAddr cur_addr{addr}; + + for (const auto& node : page_linked_list.Nodes()) { + if (const auto result{ + Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; + result.IsError()) { + const std::size_t num_pages{(addr - cur_addr) / PageSize}; + + ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) + .IsSuccess()); + + return result; + } + + cur_addr += node.GetNumPages() * PageSize; + } + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, + KMemoryPermission perm) { + std::lock_guard lock{page_table_lock}; + + const std::size_t num_pages{page_linked_list.GetNumPages()}; + const std::size_t size{num_pages * PageSize}; + + if (!CanContain(addr, size, state)) { + return ResultInvalidCurrentMemory; + } + + if (IsRegionMapped(addr, num_pages * PageSize)) { + return ResultInvalidCurrentMemory; + } + + CASCADE_CODE(MapPages(addr, page_linked_list, perm)); + + block_manager->Update(addr, num_pages, state, perm); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, + KMemoryPermission perm) { + + std::lock_guard lock{page_table_lock}; + + KMemoryState prev_state{}; + KMemoryPermission prev_perm{}; + + CASCADE_CODE(CheckMemoryState( + &prev_state, &prev_perm, nullptr, addr, size, KMemoryState::FlagCode, + KMemoryState::FlagCode, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); + + KMemoryState state{prev_state}; + + // Ensure state is mutable if permission allows write + if ((perm & KMemoryPermission::Write) != KMemoryPermission::None) { + if (prev_state == KMemoryState::Code) { + state = KMemoryState::CodeData; + } else if (prev_state == KMemoryState::AliasCode) { + state = KMemoryState::AliasCodeData; + } else { + UNREACHABLE(); + } + } + + // Return early if there is nothing to change + if (state == prev_state && perm == prev_perm) { + return RESULT_SUCCESS; + } + + if ((prev_perm & KMemoryPermission::Execute) != (perm & KMemoryPermission::Execute)) { + // Memory execution state is changing, invalidate CPU cache range + system.InvalidateCpuInstructionCacheRange(addr, size); + } + + const std::size_t num_pages{size / PageSize}; + const OperationType operation{(perm & KMemoryPermission::Execute) != KMemoryPermission::None + ? OperationType::ChangePermissionsAndRefresh + : OperationType::ChangePermissions}; + + CASCADE_CODE(Operate(addr, num_pages, perm, operation)); + + block_manager->Update(addr, num_pages, state, perm); + + return RESULT_SUCCESS; +} + +KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { + std::lock_guard lock{page_table_lock}; + + return block_manager->FindBlock(addr).GetMemoryInfo(); +} + +KMemoryInfo KPageTable::QueryInfo(VAddr addr) { + if (!Contains(addr, 1)) { + return {address_space_end, 0 - address_space_end, KMemoryState::Inaccessible, + KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None}; + } + + return QueryInfoImpl(addr); +} + +ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { + std::lock_guard lock{page_table_lock}; + + KMemoryState state{}; + KMemoryAttribute attribute{}; + + CASCADE_CODE(CheckMemoryState( + &state, nullptr, &attribute, addr, size, + KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, + KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::Mask, + KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryAttribute::None, + KMemoryAttribute::IpcAndDeviceMapped)); + + block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + KMemoryState state{}; + + CASCADE_CODE( + CheckMemoryState(&state, nullptr, nullptr, addr, size, + KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, + KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, + KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask, + KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); + + block_manager->Update(addr, size / PageSize, state, KMemoryPermission::ReadAndWrite); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryAttribute mask, + KMemoryAttribute value) { + std::lock_guard lock{page_table_lock}; + + KMemoryState state{}; + KMemoryPermission perm{}; + KMemoryAttribute attribute{}; + + CASCADE_CODE(CheckMemoryState( + &state, &perm, &attribute, addr, size, KMemoryState::FlagCanChangeAttribute, + KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, + KMemoryAttribute::DeviceSharedAndUncached)); + + attribute = attribute & ~mask; + attribute = attribute | (mask & value); + + block_manager->Update(addr, size / PageSize, state, perm, attribute); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::SetHeapCapacity(std::size_t new_heap_capacity) { + std::lock_guard lock{page_table_lock}; + heap_capacity = new_heap_capacity; + return RESULT_SUCCESS; +} + +ResultVal KPageTable::SetHeapSize(std::size_t size) { + + if (size > heap_region_end - heap_region_start) { + return ResultOutOfMemory; + } + + const u64 previous_heap_size{GetHeapSize()}; + + UNIMPLEMENTED_IF_MSG(previous_heap_size > size, "Heap shrink is unimplemented"); + + // Increase the heap size + { + std::lock_guard lock{page_table_lock}; + + const u64 delta{size - previous_heap_size}; + + // Reserve memory for the heap extension. + KScopedResourceReservation memory_reservation( + system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, + delta); + + if (!memory_reservation.Succeeded()) { + LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta); + return ResultResourceLimitedExceeded; + } + + KPageLinkedList page_linked_list; + const std::size_t num_pages{delta / PageSize}; + + CASCADE_CODE( + system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)); + + if (IsRegionMapped(current_heap_addr, delta)) { + return ResultInvalidCurrentMemory; + } + + CASCADE_CODE( + Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup)); + + // Succeeded in allocation, commit the resource reservation + memory_reservation.Commit(); + + block_manager->Update(current_heap_addr, num_pages, KMemoryState::Normal, + KMemoryPermission::ReadAndWrite); + + current_heap_addr = heap_region_start + size; + } + + return MakeResult(heap_region_start); +} + +ResultVal KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, + bool is_map_only, VAddr region_start, + std::size_t region_num_pages, KMemoryState state, + KMemoryPermission perm, PAddr map_addr) { + std::lock_guard lock{page_table_lock}; + + if (!CanContain(region_start, region_num_pages * PageSize, state)) { + return ResultInvalidCurrentMemory; + } + + if (region_num_pages <= needed_num_pages) { + return ResultOutOfMemory; + } + + const VAddr addr{ + AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; + if (!addr) { + return ResultOutOfMemory; + } + + if (is_map_only) { + CASCADE_CODE(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); + } else { + KPageLinkedList page_group; + CASCADE_CODE( + system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool)); + CASCADE_CODE(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); + } + + block_manager->Update(addr, needed_num_pages, state, perm); + + return MakeResult(addr); +} + +ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + KMemoryPermission perm{}; + if (const ResultCode result{CheckMemoryState( + nullptr, &perm, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, + KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, + KMemoryAttribute::DeviceSharedAndUncached)}; + result.IsError()) { + return result; + } + + block_manager->UpdateLock( + addr, size / PageSize, + [](KMemoryBlockManager::iterator block, KMemoryPermission perm) { + block->ShareToDevice(perm); + }, + perm); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + KMemoryPermission perm{}; + if (const ResultCode result{CheckMemoryState( + nullptr, &perm, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, + KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, + KMemoryAttribute::DeviceSharedAndUncached)}; + result.IsError()) { + return result; + } + + block_manager->UpdateLock( + addr, size / PageSize, + [](KMemoryBlockManager::iterator block, KMemoryPermission perm) { + block->UnshareToDevice(perm); + }, + perm); + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { + block_manager = std::make_unique(start, end); + + return RESULT_SUCCESS; +} + +bool KPageTable::IsRegionMapped(VAddr address, u64 size) { + return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, + KMemoryPermission::Mask, KMemoryPermission::None, + KMemoryAttribute::Mask, KMemoryAttribute::None, + KMemoryAttribute::IpcAndDeviceMapped) + .IsError(); +} + +bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { + auto start_ptr = system.Memory().GetPointer(addr); + for (u64 offset{}; offset < size; offset += PageSize) { + if (start_ptr != system.Memory().GetPointer(addr + offset)) { + return false; + } + start_ptr += PageSize; + } + return true; +} + +void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, + KPageLinkedList& page_linked_list) { + VAddr addr{start}; + while (addr < start + (num_pages * PageSize)) { + const PAddr paddr{GetPhysicalAddr(addr)}; + if (!paddr) { + UNREACHABLE(); + } + page_linked_list.AddBlock(paddr, 1); + addr += PageSize; + } +} + +VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, + u64 needed_num_pages, std::size_t align) { + if (is_aslr_enabled) { + UNIMPLEMENTED(); + } + return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, + IsKernel() ? 1 : 4); +} + +ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group, + OperationType operation) { + std::lock_guard lock{page_table_lock}; + + ASSERT(Common::IsAligned(addr, PageSize)); + ASSERT(num_pages > 0); + ASSERT(num_pages == page_group.GetNumPages()); + + for (const auto& node : page_group.Nodes()) { + const std::size_t size{node.GetNumPages() * PageSize}; + + switch (operation) { + case OperationType::MapGroup: + system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); + break; + default: + UNREACHABLE(); + } + + addr += size; + } + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, + OperationType operation, PAddr map_addr) { + std::lock_guard lock{page_table_lock}; + + ASSERT(num_pages > 0); + ASSERT(Common::IsAligned(addr, PageSize)); + ASSERT(ContainsPages(addr, num_pages)); + + switch (operation) { + case OperationType::Unmap: + system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); + break; + case OperationType::Map: { + ASSERT(map_addr); + ASSERT(Common::IsAligned(map_addr, PageSize)); + system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); + break; + } + case OperationType::ChangePermissions: + case OperationType::ChangePermissionsAndRefresh: + break; + default: + UNREACHABLE(); + } + return RESULT_SUCCESS; +} + +constexpr VAddr KPageTable::GetRegionAddress(KMemoryState state) const { + switch (state) { + case KMemoryState::Free: + case KMemoryState::Kernel: + return address_space_start; + case KMemoryState::Normal: + return heap_region_start; + case KMemoryState::Ipc: + case KMemoryState::NonSecureIpc: + case KMemoryState::NonDeviceIpc: + return alias_region_start; + case KMemoryState::Stack: + return stack_region_start; + case KMemoryState::Io: + case KMemoryState::Static: + case KMemoryState::ThreadLocal: + return kernel_map_region_start; + case KMemoryState::Shared: + case KMemoryState::AliasCode: + case KMemoryState::AliasCodeData: + case KMemoryState::Transferred: + case KMemoryState::SharedTransferred: + case KMemoryState::SharedCode: + case KMemoryState::GeneratedCode: + case KMemoryState::CodeOut: + return alias_code_region_start; + case KMemoryState::Code: + case KMemoryState::CodeData: + return code_region_start; + default: + UNREACHABLE(); + return {}; + } +} + +constexpr std::size_t KPageTable::GetRegionSize(KMemoryState state) const { + switch (state) { + case KMemoryState::Free: + case KMemoryState::Kernel: + return address_space_end - address_space_start; + case KMemoryState::Normal: + return heap_region_end - heap_region_start; + case KMemoryState::Ipc: + case KMemoryState::NonSecureIpc: + case KMemoryState::NonDeviceIpc: + return alias_region_end - alias_region_start; + case KMemoryState::Stack: + return stack_region_end - stack_region_start; + case KMemoryState::Io: + case KMemoryState::Static: + case KMemoryState::ThreadLocal: + return kernel_map_region_end - kernel_map_region_start; + case KMemoryState::Shared: + case KMemoryState::AliasCode: + case KMemoryState::AliasCodeData: + case KMemoryState::Transferred: + case KMemoryState::SharedTransferred: + case KMemoryState::SharedCode: + case KMemoryState::GeneratedCode: + case KMemoryState::CodeOut: + return alias_code_region_end - alias_code_region_start; + case KMemoryState::Code: + case KMemoryState::CodeData: + return code_region_end - code_region_start; + default: + UNREACHABLE(); + return {}; + } +} + +constexpr bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { + const VAddr end{addr + size}; + const VAddr last{end - 1}; + const VAddr region_start{GetRegionAddress(state)}; + const std::size_t region_size{GetRegionSize(state)}; + const bool is_in_region{region_start <= addr && addr < end && + last <= region_start + region_size - 1}; + const bool is_in_heap{!(end <= heap_region_start || heap_region_end <= addr)}; + const bool is_in_alias{!(end <= alias_region_start || alias_region_end <= addr)}; + + switch (state) { + case KMemoryState::Free: + case KMemoryState::Kernel: + return is_in_region; + case KMemoryState::Io: + case KMemoryState::Static: + case KMemoryState::Code: + case KMemoryState::CodeData: + case KMemoryState::Shared: + case KMemoryState::AliasCode: + case KMemoryState::AliasCodeData: + case KMemoryState::Stack: + case KMemoryState::ThreadLocal: + case KMemoryState::Transferred: + case KMemoryState::SharedTransferred: + case KMemoryState::SharedCode: + case KMemoryState::GeneratedCode: + case KMemoryState::CodeOut: + return is_in_region && !is_in_heap && !is_in_alias; + case KMemoryState::Normal: + ASSERT(is_in_heap); + return is_in_region && !is_in_alias; + case KMemoryState::Ipc: + case KMemoryState::NonSecureIpc: + case KMemoryState::NonDeviceIpc: + ASSERT(is_in_alias); + return is_in_region && !is_in_heap; + default: + return false; + } +} + +constexpr ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, + KMemoryAttribute attr_mask, + KMemoryAttribute attr) const { + // Validate the states match expectation + if ((info.state & state_mask) != state) { + return ResultInvalidCurrentMemory; + } + if ((info.perm & perm_mask) != perm) { + return ResultInvalidCurrentMemory; + } + if ((info.attribute & attr_mask) != attr) { + return ResultInvalidCurrentMemory; + } + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, + KMemoryAttribute* out_attr, VAddr addr, std::size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr, + KMemoryAttribute ignore_attr) { + std::lock_guard lock{page_table_lock}; + + // Get information about the first block + const VAddr last_addr{addr + size - 1}; + KMemoryBlockManager::const_iterator it{block_manager->FindIterator(addr)}; + KMemoryInfo info{it->GetMemoryInfo()}; + + // Validate all blocks in the range have correct state + const KMemoryState first_state{info.state}; + const KMemoryPermission first_perm{info.perm}; + const KMemoryAttribute first_attr{info.attribute}; + + while (true) { + // Validate the current block + if (!(info.state == first_state)) { + return ResultInvalidCurrentMemory; + } + if (!(info.perm == first_perm)) { + return ResultInvalidCurrentMemory; + } + if (!((info.attribute | static_cast(ignore_attr)) == + (first_attr | static_cast(ignore_attr)))) { + return ResultInvalidCurrentMemory; + } + + // Validate against the provided masks + CASCADE_CODE(CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); + + // Break once we're done + if (last_addr <= info.GetLastAddress()) { + break; + } + + // Advance our iterator + it++; + ASSERT(it != block_manager->cend()); + info = it->GetMemoryInfo(); + } + + // Write output state + if (out_state) { + *out_state = first_state; + } + if (out_perm) { + *out_perm = first_perm; + } + if (out_attr) { + *out_attr = first_attr & static_cast(~ignore_attr); + } + + return RESULT_SUCCESS; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h new file mode 100644 index 000000000..49b824379 --- /dev/null +++ b/src/core/hle/kernel/k_page_table.h @@ -0,0 +1,279 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include + +#include "common/common_types.h" +#include "common/page_table.h" +#include "core/file_sys/program_metadata.h" +#include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/result.h" + +namespace Core { +class System; +} + +namespace Kernel { + +class KMemoryBlockManager; + +class KPageTable final : NonCopyable { +public: + explicit KPageTable(Core::System& system); + + ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, + VAddr code_addr, std::size_t code_size, + KMemoryManager::Pool pool); + ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, + KMemoryPermission perm); + ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode MapPhysicalMemory(VAddr addr, std::size_t size); + ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size); + ResultCode UnmapMemory(VAddr addr, std::size_t size); + ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, + KMemoryPermission perm); + ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm); + KMemoryInfo QueryInfo(VAddr addr); + ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); + ResultCode ResetTransferMemory(VAddr addr, std::size_t size); + ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryAttribute mask, + KMemoryAttribute value); + ResultCode SetHeapCapacity(std::size_t new_heap_capacity); + ResultVal SetHeapSize(std::size_t size); + ResultVal AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, + bool is_map_only, VAddr region_start, + std::size_t region_num_pages, KMemoryState state, + KMemoryPermission perm, PAddr map_addr = 0); + ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size); + ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); + + Common::PageTable& PageTableImpl() { + return page_table_impl; + } + + const Common::PageTable& PageTableImpl() const { + return page_table_impl; + } + +private: + enum class OperationType : u32 { + Map, + MapGroup, + Unmap, + ChangePermissions, + ChangePermissionsAndRefresh, + }; + + static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask | + KMemoryAttribute::IpcLocked | + KMemoryAttribute::DeviceShared; + + ResultCode InitializeMemoryLayout(VAddr start, VAddr end); + ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list, + KMemoryPermission perm); + void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end); + bool IsRegionMapped(VAddr address, u64 size); + bool IsRegionContiguous(VAddr addr, u64 size) const; + void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list); + KMemoryInfo QueryInfoImpl(VAddr addr); + VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, + std::size_t align); + ResultCode Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group, + OperationType operation); + ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, + OperationType operation, PAddr map_addr = 0); + constexpr VAddr GetRegionAddress(KMemoryState state) const; + constexpr std::size_t GetRegionSize(KMemoryState state) const; + constexpr bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; + + constexpr ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr) const; + ResultCode CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, + KMemoryAttribute* out_attr, VAddr addr, std::size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr, + KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr); + ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr, + KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) { + return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask, + perm, attr_mask, attr, ignore_attr); + } + + std::recursive_mutex page_table_lock; + std::unique_ptr block_manager; + +public: + constexpr VAddr GetAddressSpaceStart() const { + return address_space_start; + } + constexpr VAddr GetAddressSpaceEnd() const { + return address_space_end; + } + constexpr std::size_t GetAddressSpaceSize() const { + return address_space_end - address_space_start; + } + constexpr VAddr GetHeapRegionStart() const { + return heap_region_start; + } + constexpr VAddr GetHeapRegionEnd() const { + return heap_region_end; + } + constexpr std::size_t GetHeapRegionSize() const { + return heap_region_end - heap_region_start; + } + constexpr VAddr GetAliasRegionStart() const { + return alias_region_start; + } + constexpr VAddr GetAliasRegionEnd() const { + return alias_region_end; + } + constexpr std::size_t GetAliasRegionSize() const { + return alias_region_end - alias_region_start; + } + constexpr VAddr GetStackRegionStart() const { + return stack_region_start; + } + constexpr VAddr GetStackRegionEnd() const { + return stack_region_end; + } + constexpr std::size_t GetStackRegionSize() const { + return stack_region_end - stack_region_start; + } + constexpr VAddr GetKernelMapRegionStart() const { + return kernel_map_region_start; + } + constexpr VAddr GetKernelMapRegionEnd() const { + return kernel_map_region_end; + } + constexpr VAddr GetCodeRegionStart() const { + return code_region_start; + } + constexpr VAddr GetCodeRegionEnd() const { + return code_region_end; + } + constexpr VAddr GetAliasCodeRegionStart() const { + return alias_code_region_start; + } + constexpr VAddr GetAliasCodeRegionSize() const { + return alias_code_region_end - alias_code_region_start; + } + constexpr std::size_t GetAddressSpaceWidth() const { + return address_space_width; + } + constexpr std::size_t GetHeapSize() { + return current_heap_addr - heap_region_start; + } + constexpr std::size_t GetTotalHeapSize() { + return GetHeapSize() + physical_memory_usage; + } + constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { + return address_space_start <= address && address + size - 1 <= address_space_end - 1; + } + constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { + return alias_region_start > address || address + size - 1 > alias_region_end - 1; + } + constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { + return stack_region_start > address || address + size - 1 > stack_region_end - 1; + } + constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { + return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; + } + constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { + return address + size > heap_region_start && heap_region_end > address; + } + constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { + return address + size > alias_region_start && alias_region_end > address; + } + constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { + if (IsInvalidRegion(address, size)) { + return true; + } + if (IsInsideHeapRegion(address, size)) { + return true; + } + if (IsInsideAliasRegion(address, size)) { + return true; + } + return {}; + } + constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { + return !IsOutsideASLRRegion(address, size); + } + constexpr PAddr GetPhysicalAddr(VAddr addr) { + return page_table_impl.backing_addr[addr >> PageBits] + addr; + } + +private: + constexpr bool Contains(VAddr addr) const { + return address_space_start <= addr && addr <= address_space_end - 1; + } + constexpr bool Contains(VAddr addr, std::size_t size) const { + return address_space_start <= addr && addr < addr + size && + addr + size - 1 <= address_space_end - 1; + } + constexpr bool IsKernel() const { + return is_kernel; + } + constexpr bool IsAslrEnabled() const { + return is_aslr_enabled; + } + + constexpr std::size_t GetNumGuardPages() const { + return IsKernel() ? 1 : 4; + } + + constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { + return (address_space_start <= addr) && + (num_pages <= (address_space_end - address_space_start) / PageSize) && + (addr + num_pages * PageSize - 1 <= address_space_end - 1); + } + +private: + VAddr address_space_start{}; + VAddr address_space_end{}; + VAddr heap_region_start{}; + VAddr heap_region_end{}; + VAddr current_heap_end{}; + VAddr alias_region_start{}; + VAddr alias_region_end{}; + VAddr stack_region_start{}; + VAddr stack_region_end{}; + VAddr kernel_map_region_start{}; + VAddr kernel_map_region_end{}; + VAddr code_region_start{}; + VAddr code_region_end{}; + VAddr alias_code_region_start{}; + VAddr alias_code_region_end{}; + VAddr current_heap_addr{}; + + std::size_t heap_capacity{}; + std::size_t physical_memory_usage{}; + std::size_t max_heap_size{}; + std::size_t max_physical_memory_size{}; + std::size_t address_space_width{}; + + bool is_kernel{}; + bool is_aslr_enabled{}; + + KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; + + Common::PageTable page_table_impl; + + Core::System& system; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index aade52764..9b14f42b5 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -4,10 +4,10 @@ #include "common/assert.h" #include "core/core.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_scoped_resource_reservation.h" #include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/memory/page_table.h" namespace Kernel { @@ -21,8 +21,7 @@ KSharedMemory::~KSharedMemory() { std::shared_ptr KSharedMemory::Create( KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, KPageLinkedList&& page_list, KMemoryPermission owner_permission, - KMemoryPermission user_permission, PAddr physical_address, std::size_t size, - std::string name) { + KMemoryPermission user_permission, PAddr physical_address, std::size_t size, std::string name) { const auto resource_limit = kernel.GetSystemResourceLimit(); KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp deleted file mode 100644 index 8fb53a0e8..000000000 --- a/src/core/hle/kernel/memory/page_heap.cpp +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2020 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -// This file references various implementation details from Atmosphere, an open-source firmware for -// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. - -#include "core/core.h" -#include "core/hle/kernel/memory/page_heap.h" -#include "core/memory.h" - -namespace Kernel::Memory { - -void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) { - // Check our assumptions - ASSERT(Common::IsAligned((address), PageSize)); - ASSERT(Common::IsAligned(size, PageSize)); - - // Set our members - heap_address = address; - heap_size = size; - - // Setup bitmaps - metadata.resize(metadata_size / sizeof(u64)); - u64* cur_bitmap_storage{metadata.data()}; - for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { - const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; - const std::size_t next_block_shift{ - (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; - cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift, - next_block_shift, cur_bitmap_storage); - } -} - -VAddr PageHeap::AllocateBlock(s32 index, bool random) { - const std::size_t needed_size{blocks[index].GetSize()}; - - for (s32 i{index}; i < static_cast(MemoryBlockPageShifts.size()); i++) { - if (const VAddr addr{blocks[i].PopBlock(random)}; addr) { - if (const std::size_t allocated_size{blocks[i].GetSize()}; - allocated_size > needed_size) { - Free(addr + needed_size, (allocated_size - needed_size) / PageSize); - } - return addr; - } - } - - return 0; -} - -void PageHeap::FreeBlock(VAddr block, s32 index) { - do { - block = blocks[index++].PushBlock(block); - } while (block != 0); -} - -void PageHeap::Free(VAddr addr, std::size_t num_pages) { - // Freeing no pages is a no-op - if (num_pages == 0) { - return; - } - - // Find the largest block size that we can free, and free as many as possible - s32 big_index{static_cast(MemoryBlockPageShifts.size()) - 1}; - const VAddr start{addr}; - const VAddr end{(num_pages * PageSize) + addr}; - VAddr before_start{start}; - VAddr before_end{start}; - VAddr after_start{end}; - VAddr after_end{end}; - while (big_index >= 0) { - const std::size_t block_size{blocks[big_index].GetSize()}; - const VAddr big_start{Common::AlignUp((start), block_size)}; - const VAddr big_end{Common::AlignDown((end), block_size)}; - if (big_start < big_end) { - // Free as many big blocks as we can - for (auto block{big_start}; block < big_end; block += block_size) { - FreeBlock(block, big_index); - } - before_end = big_start; - after_start = big_end; - break; - } - big_index--; - } - ASSERT(big_index >= 0); - - // Free space before the big blocks - for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[i].GetSize()}; - while (before_start + block_size <= before_end) { - before_end -= block_size; - FreeBlock(before_end, i); - } - } - - // Free space after the big blocks - for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[i].GetSize()}; - while (after_start + block_size <= after_end) { - FreeBlock(after_start, i); - after_start += block_size; - } - } -} - -std::size_t PageHeap::CalculateManagementOverheadSize(std::size_t region_size) { - std::size_t overhead_size = 0; - for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { - const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; - const std::size_t next_block_shift{ - (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; - overhead_size += PageHeap::Block::CalculateManagementOverheadSize( - region_size, cur_block_shift, next_block_shift); - } - return Common::AlignUp(overhead_size, PageSize); -} - -} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h deleted file mode 100644 index e21d60a54..000000000 --- a/src/core/hle/kernel/memory/page_heap.h +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2020 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -// This file references various implementation details from Atmosphere, an open-source firmware for -// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. - -#pragma once - -#include -#include -#include - -#include "common/alignment.h" -#include "common/assert.h" -#include "common/common_funcs.h" -#include "common/common_types.h" -#include "core/hle/kernel/k_page_bitmap.h" -#include "core/hle/kernel/memory_types.h" - -namespace Kernel::Memory { - -class PageHeap final : NonCopyable { -public: - static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) { - const auto target_pages{std::max(num_pages, align_pages)}; - for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) { - if (target_pages <= - (static_cast(1) << MemoryBlockPageShifts[i]) / PageSize) { - return static_cast(i); - } - } - return -1; - } - - static constexpr s32 GetBlockIndex(std::size_t num_pages) { - for (s32 i{static_cast(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { - if (num_pages >= (static_cast(1) << MemoryBlockPageShifts[i]) / PageSize) { - return i; - } - } - return -1; - } - - static constexpr std::size_t GetBlockSize(std::size_t index) { - return static_cast(1) << MemoryBlockPageShifts[index]; - } - - static constexpr std::size_t GetBlockNumPages(std::size_t index) { - return GetBlockSize(index) / PageSize; - } - -private: - static constexpr std::size_t NumMemoryBlockPageShifts{7}; - static constexpr std::array MemoryBlockPageShifts{ - 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E, - }; - - class Block final : NonCopyable { - private: - KPageBitmap bitmap; - VAddr heap_address{}; - uintptr_t end_offset{}; - std::size_t block_shift{}; - std::size_t next_block_shift{}; - - public: - Block() = default; - - constexpr std::size_t GetShift() const { - return block_shift; - } - constexpr std::size_t GetNextShift() const { - return next_block_shift; - } - constexpr std::size_t GetSize() const { - return static_cast(1) << GetShift(); - } - constexpr std::size_t GetNumPages() const { - return GetSize() / PageSize; - } - constexpr std::size_t GetNumFreeBlocks() const { - return bitmap.GetNumBits(); - } - constexpr std::size_t GetNumFreePages() const { - return GetNumFreeBlocks() * GetNumPages(); - } - - u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs, - u64* bit_storage) { - // Set shifts - block_shift = bs; - next_block_shift = nbs; - - // Align up the address - VAddr end{addr + size}; - const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift) - : (1ULL << block_shift)}; - addr = Common::AlignDown((addr), align); - end = Common::AlignUp((end), align); - - heap_address = addr; - end_offset = (end - addr) / (1ULL << block_shift); - return bitmap.Initialize(bit_storage, end_offset); - } - - VAddr PushBlock(VAddr address) { - // Set the bit for the free block - std::size_t offset{(address - heap_address) >> GetShift()}; - bitmap.SetBit(offset); - - // If we have a next shift, try to clear the blocks below and return the address - if (GetNextShift()) { - const auto diff{1ULL << (GetNextShift() - GetShift())}; - offset = Common::AlignDown(offset, diff); - if (bitmap.ClearRange(offset, diff)) { - return heap_address + (offset << GetShift()); - } - } - - // We couldn't coalesce, or we're already as big as possible - return 0; - } - - VAddr PopBlock(bool random) { - // Find a free block - const s64 soffset{bitmap.FindFreeBlock(random)}; - if (soffset < 0) { - return 0; - } - const auto offset{static_cast(soffset)}; - - // Update our tracking and return it - bitmap.ClearBit(offset); - return heap_address + (offset << GetShift()); - } - - public: - static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size, - std::size_t cur_block_shift, - std::size_t next_block_shift) { - const auto cur_block_size{(1ULL << cur_block_shift)}; - const auto next_block_size{(1ULL << next_block_shift)}; - const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size}; - return KPageBitmap::CalculateManagementOverheadSize( - (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); - } - }; - -public: - PageHeap() = default; - - constexpr VAddr GetAddress() const { - return heap_address; - } - constexpr std::size_t GetSize() const { - return heap_size; - } - constexpr VAddr GetEndAddress() const { - return GetAddress() + GetSize(); - } - constexpr std::size_t GetPageOffset(VAddr block) const { - return (block - GetAddress()) / PageSize; - } - - void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size); - VAddr AllocateBlock(s32 index, bool random); - void Free(VAddr addr, std::size_t num_pages); - - void UpdateUsedSize() { - used_size = heap_size - (GetNumFreePages() * PageSize); - } - - static std::size_t CalculateManagementOverheadSize(std::size_t region_size); - -private: - constexpr std::size_t GetNumFreePages() const { - std::size_t num_free{}; - - for (const auto& block : blocks) { - num_free += block.GetNumFreePages(); - } - - return num_free; - } - - void FreeBlock(VAddr block, s32 index); - - VAddr heap_address{}; - std::size_t heap_size{}; - std::size_t used_size{}; - std::array blocks{}; - std::vector metadata; -}; - -} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp deleted file mode 100644 index ef9d97413..000000000 --- a/src/core/hle/kernel/memory/page_table.cpp +++ /dev/null @@ -1,1189 +0,0 @@ -// Copyright 2020 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include "common/alignment.h" -#include "common/assert.h" -#include "common/scope_exit.h" -#include "core/core.h" -#include "core/hle/kernel/k_address_space_info.h" -#include "core/hle/kernel/k_memory_block.h" -#include "core/hle/kernel/k_memory_block_manager.h" -#include "core/hle/kernel/k_page_linked_list.h" -#include "core/hle/kernel/k_resource_limit.h" -#include "core/hle/kernel/k_scoped_resource_reservation.h" -#include "core/hle/kernel/k_system_control.h" -#include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/memory/page_table.h" -#include "core/hle/kernel/process.h" -#include "core/hle/kernel/svc_results.h" -#include "core/memory.h" - -namespace Kernel::Memory { - -namespace { - -constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { - switch (as_type) { - case FileSys::ProgramAddressSpaceType::Is32Bit: - case FileSys::ProgramAddressSpaceType::Is32BitNoMap: - return 32; - case FileSys::ProgramAddressSpaceType::Is36Bit: - return 36; - case FileSys::ProgramAddressSpaceType::Is39Bit: - return 39; - default: - UNREACHABLE(); - return {}; - } -} - -constexpr u64 GetAddressInRange(const KMemoryInfo& info, VAddr addr) { - if (info.GetAddress() < addr) { - return addr; - } - return info.GetAddress(); -} - -constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr end) { - std::size_t size{info.GetSize()}; - if (info.GetAddress() < start) { - size -= start - info.GetAddress(); - } - if (info.GetEndAddress() > end) { - size -= info.GetEndAddress() - end; - } - return size; -} - -} // namespace - -PageTable::PageTable(Core::System& system) : system{system} {} - -ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, - bool enable_aslr, VAddr code_addr, std::size_t code_size, - KMemoryManager::Pool pool) { - - const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); - }; - const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); - }; - - // Set our width and heap/alias sizes - address_space_width = GetAddressSpaceWidthFromType(as_type); - const VAddr start = 0; - const VAddr end{1ULL << address_space_width}; - std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; - std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; - - ASSERT(start <= code_addr); - ASSERT(code_addr < code_addr + code_size); - ASSERT(code_addr + code_size - 1 <= end - 1); - - // Adjust heap/alias size if we don't have an alias region - if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) { - heap_region_size += alias_region_size; - alias_region_size = 0; - } - - // Set code regions and determine remaining - constexpr std::size_t RegionAlignment{2 * 1024 * 1024}; - VAddr process_code_start{}; - VAddr process_code_end{}; - std::size_t stack_region_size{}; - std::size_t kernel_map_region_size{}; - - if (address_space_width == 39) { - alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); - heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); - stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); - kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); - code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); - alias_code_region_start = code_region_start; - alias_code_region_end = code_region_end; - process_code_start = Common::AlignDown(code_addr, RegionAlignment); - process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); - } else { - stack_region_size = 0; - kernel_map_region_size = 0; - code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); - code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - stack_region_start = code_region_start; - alias_code_region_start = code_region_start; - alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + - GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); - stack_region_end = code_region_end; - kernel_map_region_start = code_region_start; - kernel_map_region_end = code_region_end; - process_code_start = code_region_start; - process_code_end = code_region_end; - } - - // Set other basic fields - is_aslr_enabled = enable_aslr; - address_space_start = start; - address_space_end = end; - is_kernel = false; - - // Determine the region we can place our undetermineds in - VAddr alloc_start{}; - std::size_t alloc_size{}; - if ((process_code_start - code_region_start) >= (end - process_code_end)) { - alloc_start = code_region_start; - alloc_size = process_code_start - code_region_start; - } else { - alloc_start = process_code_end; - alloc_size = end - process_code_end; - } - const std::size_t needed_size{ - (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; - if (alloc_size < needed_size) { - UNREACHABLE(); - return ResultOutOfMemory; - } - - const std::size_t remaining_size{alloc_size - needed_size}; - - // Determine random placements for each region - std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; - if (enable_aslr) { - alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * - RegionAlignment; - heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * - RegionAlignment; - stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * - RegionAlignment; - kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * - RegionAlignment; - } - - // Setup heap and alias regions - alias_region_start = alloc_start + alias_rnd; - alias_region_end = alias_region_start + alias_region_size; - heap_region_start = alloc_start + heap_rnd; - heap_region_end = heap_region_start + heap_region_size; - - if (alias_rnd <= heap_rnd) { - heap_region_start += alias_region_size; - heap_region_end += alias_region_size; - } else { - alias_region_start += heap_region_size; - alias_region_end += heap_region_size; - } - - // Setup stack region - if (stack_region_size) { - stack_region_start = alloc_start + stack_rnd; - stack_region_end = stack_region_start + stack_region_size; - - if (alias_rnd < stack_rnd) { - stack_region_start += alias_region_size; - stack_region_end += alias_region_size; - } else { - alias_region_start += stack_region_size; - alias_region_end += stack_region_size; - } - - if (heap_rnd < stack_rnd) { - stack_region_start += heap_region_size; - stack_region_end += heap_region_size; - } else { - heap_region_start += stack_region_size; - heap_region_end += stack_region_size; - } - } - - // Setup kernel map region - if (kernel_map_region_size) { - kernel_map_region_start = alloc_start + kmap_rnd; - kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; - - if (alias_rnd < kmap_rnd) { - kernel_map_region_start += alias_region_size; - kernel_map_region_end += alias_region_size; - } else { - alias_region_start += kernel_map_region_size; - alias_region_end += kernel_map_region_size; - } - - if (heap_rnd < kmap_rnd) { - kernel_map_region_start += heap_region_size; - kernel_map_region_end += heap_region_size; - } else { - heap_region_start += kernel_map_region_size; - heap_region_end += kernel_map_region_size; - } - - if (stack_region_size) { - if (stack_rnd < kmap_rnd) { - kernel_map_region_start += stack_region_size; - kernel_map_region_end += stack_region_size; - } else { - stack_region_start += kernel_map_region_size; - stack_region_end += kernel_map_region_size; - } - } - } - - // Set heap members - current_heap_end = heap_region_start; - max_heap_size = 0; - max_physical_memory_size = 0; - - // Ensure that we regions inside our address space - auto IsInAddressSpace = [&](VAddr addr) { - return address_space_start <= addr && addr <= address_space_end; - }; - ASSERT(IsInAddressSpace(alias_region_start)); - ASSERT(IsInAddressSpace(alias_region_end)); - ASSERT(IsInAddressSpace(heap_region_start)); - ASSERT(IsInAddressSpace(heap_region_end)); - ASSERT(IsInAddressSpace(stack_region_start)); - ASSERT(IsInAddressSpace(stack_region_end)); - ASSERT(IsInAddressSpace(kernel_map_region_start)); - ASSERT(IsInAddressSpace(kernel_map_region_end)); - - // Ensure that we selected regions that don't overlap - const VAddr alias_start{alias_region_start}; - const VAddr alias_last{alias_region_end - 1}; - const VAddr heap_start{heap_region_start}; - const VAddr heap_last{heap_region_end - 1}; - const VAddr stack_start{stack_region_start}; - const VAddr stack_last{stack_region_end - 1}; - const VAddr kmap_start{kernel_map_region_start}; - const VAddr kmap_last{kernel_map_region_end - 1}; - ASSERT(alias_last < heap_start || heap_last < alias_start); - ASSERT(alias_last < stack_start || stack_last < alias_start); - ASSERT(alias_last < kmap_start || kmap_last < alias_start); - ASSERT(heap_last < stack_start || stack_last < heap_start); - ASSERT(heap_last < kmap_start || kmap_last < heap_start); - - current_heap_addr = heap_region_start; - heap_capacity = 0; - physical_memory_usage = 0; - memory_pool = pool; - - page_table_impl.Resize(address_space_width, PageBits); - - return InitializeMemoryLayout(start, end); -} - -ResultCode PageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, - KMemoryPermission perm) { - std::lock_guard lock{page_table_lock}; - - const u64 size{num_pages * PageSize}; - - if (!CanContain(addr, size, state)) { - return ResultInvalidCurrentMemory; - } - - if (IsRegionMapped(addr, size)) { - return ResultInvalidCurrentMemory; - } - - KPageLinkedList page_linked_list; - CASCADE_CODE( - system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)); - CASCADE_CODE(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup)); - - block_manager->Update(addr, num_pages, state, perm); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - const std::size_t num_pages{size / PageSize}; - - KMemoryState state{}; - KMemoryPermission perm{}; - CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, src_addr, size, KMemoryState::All, - KMemoryState::Normal, KMemoryPermission::Mask, - KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, - KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); - - if (IsRegionMapped(dst_addr, size)) { - return ResultInvalidCurrentMemory; - } - - KPageLinkedList page_linked_list; - AddRegionToPages(src_addr, num_pages, page_linked_list); - - { - auto block_guard = detail::ScopeExit( - [&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); }); - - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, - OperationType::ChangePermissions)); - CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::None)); - - block_guard.Cancel(); - } - - block_manager->Update(src_addr, num_pages, state, KMemoryPermission::None, - KMemoryAttribute::Locked); - block_manager->Update(dst_addr, num_pages, KMemoryState::AliasCode); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - if (!size) { - return RESULT_SUCCESS; - } - - const std::size_t num_pages{size / PageSize}; - - CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, src_addr, size, KMemoryState::All, - KMemoryState::Normal, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); - - KMemoryState state{}; - CASCADE_CODE(CheckMemoryState( - &state, nullptr, nullptr, dst_addr, PageSize, KMemoryState::FlagCanCodeAlias, - KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); - CASCADE_CODE(CheckMemoryState(dst_addr, size, KMemoryState::All, state, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::None)); - CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); - - block_manager->Update(dst_addr, num_pages, KMemoryState::Free); - block_manager->Update(src_addr, num_pages, KMemoryState::Normal, - KMemoryPermission::ReadAndWrite); - - return RESULT_SUCCESS; -} - -void PageTable::MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end) { - auto node{page_linked_list.Nodes().begin()}; - PAddr map_addr{node->GetAddress()}; - std::size_t src_num_pages{node->GetNumPages()}; - - block_manager->IterateForRange(start, end, [&](const KMemoryInfo& info) { - if (info.state != KMemoryState::Free) { - return; - } - - std::size_t dst_num_pages{GetSizeInRange(info, start, end) / PageSize}; - VAddr dst_addr{GetAddressInRange(info, start)}; - - while (dst_num_pages) { - if (!src_num_pages) { - node = std::next(node); - map_addr = node->GetAddress(); - src_num_pages = node->GetNumPages(); - } - - const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)}; - Operate(dst_addr, num_pages, KMemoryPermission::ReadAndWrite, OperationType::Map, - map_addr); - - dst_addr += num_pages * PageSize; - map_addr += num_pages * PageSize; - src_num_pages -= num_pages; - dst_num_pages -= num_pages; - } - }); -} - -ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - std::size_t mapped_size{}; - const VAddr end_addr{addr + size}; - - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state != KMemoryState::Free) { - mapped_size += GetSizeInRange(info, addr, end_addr); - } - }); - - if (mapped_size == size) { - return RESULT_SUCCESS; - } - - const std::size_t remaining_size{size - mapped_size}; - const std::size_t remaining_pages{remaining_size / PageSize}; - - // Reserve the memory from the process resource limit. - KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, - remaining_size); - if (!memory_reservation.Succeeded()) { - LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size); - return ResultResourceLimitedExceeded; - } - - KPageLinkedList page_linked_list; - - CASCADE_CODE( - system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, memory_pool)); - - // We succeeded, so commit the memory reservation. - memory_reservation.Commit(); - - MapPhysicalMemory(page_linked_list, addr, end_addr); - - physical_memory_usage += remaining_size; - - const std::size_t num_pages{size / PageSize}; - block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None, KMemoryState::Normal, - KMemoryPermission::ReadAndWrite, KMemoryAttribute::None); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - const VAddr end_addr{addr + size}; - ResultCode result{RESULT_SUCCESS}; - std::size_t mapped_size{}; - - // Verify that the region can be unmapped - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state == KMemoryState::Normal) { - if (info.attribute != KMemoryAttribute::None) { - result = ResultInvalidCurrentMemory; - return; - } - mapped_size += GetSizeInRange(info, addr, end_addr); - } else if (info.state != KMemoryState::Free) { - result = ResultInvalidCurrentMemory; - } - }); - - if (result.IsError()) { - return result; - } - - if (!mapped_size) { - return RESULT_SUCCESS; - } - - CASCADE_CODE(UnmapMemory(addr, size)); - - auto process{system.Kernel().CurrentProcess()}; - process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); - physical_memory_usage -= mapped_size; - - return RESULT_SUCCESS; -} - -ResultCode PageTable::UnmapMemory(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - const VAddr end_addr{addr + size}; - ResultCode result{RESULT_SUCCESS}; - KPageLinkedList page_linked_list; - - // Unmap each region within the range - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state == KMemoryState::Normal) { - const std::size_t block_size{GetSizeInRange(info, addr, end_addr)}; - const std::size_t block_num_pages{block_size / PageSize}; - const VAddr block_addr{GetAddressInRange(info, addr)}; - - AddRegionToPages(block_addr, block_size / PageSize, page_linked_list); - - if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None, - OperationType::Unmap); - result.IsError()) { - return; - } - } - }); - - if (result.IsError()) { - return result; - } - - const std::size_t num_pages{size / PageSize}; - system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool); - - block_manager->Update(addr, num_pages, KMemoryState::Free); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - KMemoryState src_state{}; - CASCADE_CODE(CheckMemoryState( - &src_state, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::Mask, KMemoryPermission::ReadAndWrite, - KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); - - if (IsRegionMapped(dst_addr, size)) { - return ResultInvalidCurrentMemory; - } - - KPageLinkedList page_linked_list; - const std::size_t num_pages{size / PageSize}; - - AddRegionToPages(src_addr, num_pages, page_linked_list); - - { - auto block_guard = detail::ScopeExit([&] { - Operate(src_addr, num_pages, KMemoryPermission::ReadAndWrite, - OperationType::ChangePermissions); - }); - - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, - OperationType::ChangePermissions)); - CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::ReadAndWrite)); - - block_guard.Cancel(); - } - - block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None, - KMemoryAttribute::Locked); - block_manager->Update(dst_addr, num_pages, KMemoryState::Stack, - KMemoryPermission::ReadAndWrite); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - KMemoryState src_state{}; - CASCADE_CODE(CheckMemoryState( - &src_state, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::Mask, KMemoryPermission::None, - KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); - - KMemoryPermission dst_perm{}; - CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, dst_addr, size, KMemoryState::All, - KMemoryState::Stack, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); - - KPageLinkedList src_pages; - KPageLinkedList dst_pages; - const std::size_t num_pages{size / PageSize}; - - AddRegionToPages(src_addr, num_pages, src_pages); - AddRegionToPages(dst_addr, num_pages, dst_pages); - - if (!dst_pages.IsEqual(src_pages)) { - return ResultInvalidMemoryRange; - } - - { - auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); - - CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::ReadAndWrite, - OperationType::ChangePermissions)); - - block_guard.Cancel(); - } - - block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::ReadAndWrite); - block_manager->Update(dst_addr, num_pages, KMemoryState::Free); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list, - KMemoryPermission perm) { - VAddr cur_addr{addr}; - - for (const auto& node : page_linked_list.Nodes()) { - if (const auto result{ - Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; - result.IsError()) { - const std::size_t num_pages{(addr - cur_addr) / PageSize}; - - ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) - .IsSuccess()); - - return result; - } - - cur_addr += node.GetNumPages() * PageSize; - } - - return RESULT_SUCCESS; -} - -ResultCode PageTable::MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, - KMemoryPermission perm) { - std::lock_guard lock{page_table_lock}; - - const std::size_t num_pages{page_linked_list.GetNumPages()}; - const std::size_t size{num_pages * PageSize}; - - if (!CanContain(addr, size, state)) { - return ResultInvalidCurrentMemory; - } - - if (IsRegionMapped(addr, num_pages * PageSize)) { - return ResultInvalidCurrentMemory; - } - - CASCADE_CODE(MapPages(addr, page_linked_list, perm)); - - block_manager->Update(addr, num_pages, state, perm); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, - KMemoryPermission perm) { - - std::lock_guard lock{page_table_lock}; - - KMemoryState prev_state{}; - KMemoryPermission prev_perm{}; - - CASCADE_CODE(CheckMemoryState( - &prev_state, &prev_perm, nullptr, addr, size, KMemoryState::FlagCode, - KMemoryState::FlagCode, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); - - KMemoryState state{prev_state}; - - // Ensure state is mutable if permission allows write - if ((perm & KMemoryPermission::Write) != KMemoryPermission::None) { - if (prev_state == KMemoryState::Code) { - state = KMemoryState::CodeData; - } else if (prev_state == KMemoryState::AliasCode) { - state = KMemoryState::AliasCodeData; - } else { - UNREACHABLE(); - } - } - - // Return early if there is nothing to change - if (state == prev_state && perm == prev_perm) { - return RESULT_SUCCESS; - } - - if ((prev_perm & KMemoryPermission::Execute) != (perm & KMemoryPermission::Execute)) { - // Memory execution state is changing, invalidate CPU cache range - system.InvalidateCpuInstructionCacheRange(addr, size); - } - - const std::size_t num_pages{size / PageSize}; - const OperationType operation{(perm & KMemoryPermission::Execute) != KMemoryPermission::None - ? OperationType::ChangePermissionsAndRefresh - : OperationType::ChangePermissions}; - - CASCADE_CODE(Operate(addr, num_pages, perm, operation)); - - block_manager->Update(addr, num_pages, state, perm); - - return RESULT_SUCCESS; -} - -KMemoryInfo PageTable::QueryInfoImpl(VAddr addr) { - std::lock_guard lock{page_table_lock}; - - return block_manager->FindBlock(addr).GetMemoryInfo(); -} - -KMemoryInfo PageTable::QueryInfo(VAddr addr) { - if (!Contains(addr, 1)) { - return {address_space_end, 0 - address_space_end, KMemoryState::Inaccessible, - KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None}; - } - - return QueryInfoImpl(addr); -} - -ResultCode PageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { - std::lock_guard lock{page_table_lock}; - - KMemoryState state{}; - KMemoryAttribute attribute{}; - - CASCADE_CODE(CheckMemoryState( - &state, nullptr, &attribute, addr, size, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::Mask, - KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryAttribute::None, - KMemoryAttribute::IpcAndDeviceMapped)); - - block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::ResetTransferMemory(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - KMemoryState state{}; - - CASCADE_CODE( - CheckMemoryState(&state, nullptr, nullptr, addr, size, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); - - block_manager->Update(addr, size / PageSize, state, KMemoryPermission::ReadAndWrite); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryAttribute mask, - KMemoryAttribute value) { - std::lock_guard lock{page_table_lock}; - - KMemoryState state{}; - KMemoryPermission perm{}; - KMemoryAttribute attribute{}; - - CASCADE_CODE(CheckMemoryState( - &state, &perm, &attribute, addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, - KMemoryAttribute::DeviceSharedAndUncached)); - - attribute = attribute & ~mask; - attribute = attribute | (mask & value); - - block_manager->Update(addr, size / PageSize, state, perm, attribute); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::SetHeapCapacity(std::size_t new_heap_capacity) { - std::lock_guard lock{page_table_lock}; - heap_capacity = new_heap_capacity; - return RESULT_SUCCESS; -} - -ResultVal PageTable::SetHeapSize(std::size_t size) { - - if (size > heap_region_end - heap_region_start) { - return ResultOutOfMemory; - } - - const u64 previous_heap_size{GetHeapSize()}; - - UNIMPLEMENTED_IF_MSG(previous_heap_size > size, "Heap shrink is unimplemented"); - - // Increase the heap size - { - std::lock_guard lock{page_table_lock}; - - const u64 delta{size - previous_heap_size}; - - // Reserve memory for the heap extension. - KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, - delta); - - if (!memory_reservation.Succeeded()) { - LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta); - return ResultResourceLimitedExceeded; - } - - KPageLinkedList page_linked_list; - const std::size_t num_pages{delta / PageSize}; - - CASCADE_CODE( - system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)); - - if (IsRegionMapped(current_heap_addr, delta)) { - return ResultInvalidCurrentMemory; - } - - CASCADE_CODE( - Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup)); - - // Succeeded in allocation, commit the resource reservation - memory_reservation.Commit(); - - block_manager->Update(current_heap_addr, num_pages, KMemoryState::Normal, - KMemoryPermission::ReadAndWrite); - - current_heap_addr = heap_region_start + size; - } - - return MakeResult(heap_region_start); -} - -ResultVal PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, - bool is_map_only, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, - KMemoryPermission perm, PAddr map_addr) { - std::lock_guard lock{page_table_lock}; - - if (!CanContain(region_start, region_num_pages * PageSize, state)) { - return ResultInvalidCurrentMemory; - } - - if (region_num_pages <= needed_num_pages) { - return ResultOutOfMemory; - } - - const VAddr addr{ - AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; - if (!addr) { - return ResultOutOfMemory; - } - - if (is_map_only) { - CASCADE_CODE(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); - } else { - KPageLinkedList page_group; - CASCADE_CODE( - system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool)); - CASCADE_CODE(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); - } - - block_manager->Update(addr, needed_num_pages, state, perm); - - return MakeResult(addr); -} - -ResultCode PageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - KMemoryPermission perm{}; - if (const ResultCode result{CheckMemoryState( - nullptr, &perm, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, - KMemoryAttribute::DeviceSharedAndUncached)}; - result.IsError()) { - return result; - } - - block_manager->UpdateLock( - addr, size / PageSize, - [](KMemoryBlockManager::iterator block, KMemoryPermission perm) { - block->ShareToDevice(perm); - }, - perm); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - KMemoryPermission perm{}; - if (const ResultCode result{CheckMemoryState( - nullptr, &perm, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, - KMemoryAttribute::DeviceSharedAndUncached)}; - result.IsError()) { - return result; - } - - block_manager->UpdateLock( - addr, size / PageSize, - [](KMemoryBlockManager::iterator block, KMemoryPermission perm) { - block->UnshareToDevice(perm); - }, - perm); - - return RESULT_SUCCESS; -} - -ResultCode PageTable::InitializeMemoryLayout(VAddr start, VAddr end) { - block_manager = std::make_unique(start, end); - - return RESULT_SUCCESS; -} - -bool PageTable::IsRegionMapped(VAddr address, u64 size) { - return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, - KMemoryPermission::Mask, KMemoryPermission::None, - KMemoryAttribute::Mask, KMemoryAttribute::None, - KMemoryAttribute::IpcAndDeviceMapped) - .IsError(); -} - -bool PageTable::IsRegionContiguous(VAddr addr, u64 size) const { - auto start_ptr = system.Memory().GetPointer(addr); - for (u64 offset{}; offset < size; offset += PageSize) { - if (start_ptr != system.Memory().GetPointer(addr + offset)) { - return false; - } - start_ptr += PageSize; - } - return true; -} - -void PageTable::AddRegionToPages(VAddr start, std::size_t num_pages, - KPageLinkedList& page_linked_list) { - VAddr addr{start}; - while (addr < start + (num_pages * PageSize)) { - const PAddr paddr{GetPhysicalAddr(addr)}; - if (!paddr) { - UNREACHABLE(); - } - page_linked_list.AddBlock(paddr, 1); - addr += PageSize; - } -} - -VAddr PageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, - u64 needed_num_pages, std::size_t align) { - if (is_aslr_enabled) { - UNIMPLEMENTED(); - } - return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, - IsKernel() ? 1 : 4); -} - -ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group, - OperationType operation) { - std::lock_guard lock{page_table_lock}; - - ASSERT(Common::IsAligned(addr, PageSize)); - ASSERT(num_pages > 0); - ASSERT(num_pages == page_group.GetNumPages()); - - for (const auto& node : page_group.Nodes()) { - const std::size_t size{node.GetNumPages() * PageSize}; - - switch (operation) { - case OperationType::MapGroup: - system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); - break; - default: - UNREACHABLE(); - } - - addr += size; - } - - return RESULT_SUCCESS; -} - -ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, - OperationType operation, PAddr map_addr) { - std::lock_guard lock{page_table_lock}; - - ASSERT(num_pages > 0); - ASSERT(Common::IsAligned(addr, PageSize)); - ASSERT(ContainsPages(addr, num_pages)); - - switch (operation) { - case OperationType::Unmap: - system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); - break; - case OperationType::Map: { - ASSERT(map_addr); - ASSERT(Common::IsAligned(map_addr, PageSize)); - system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); - break; - } - case OperationType::ChangePermissions: - case OperationType::ChangePermissionsAndRefresh: - break; - default: - UNREACHABLE(); - } - return RESULT_SUCCESS; -} - -constexpr VAddr PageTable::GetRegionAddress(KMemoryState state) const { - switch (state) { - case KMemoryState::Free: - case KMemoryState::Kernel: - return address_space_start; - case KMemoryState::Normal: - return heap_region_start; - case KMemoryState::Ipc: - case KMemoryState::NonSecureIpc: - case KMemoryState::NonDeviceIpc: - return alias_region_start; - case KMemoryState::Stack: - return stack_region_start; - case KMemoryState::Io: - case KMemoryState::Static: - case KMemoryState::ThreadLocal: - return kernel_map_region_start; - case KMemoryState::Shared: - case KMemoryState::AliasCode: - case KMemoryState::AliasCodeData: - case KMemoryState::Transferred: - case KMemoryState::SharedTransferred: - case KMemoryState::SharedCode: - case KMemoryState::GeneratedCode: - case KMemoryState::CodeOut: - return alias_code_region_start; - case KMemoryState::Code: - case KMemoryState::CodeData: - return code_region_start; - default: - UNREACHABLE(); - return {}; - } -} - -constexpr std::size_t PageTable::GetRegionSize(KMemoryState state) const { - switch (state) { - case KMemoryState::Free: - case KMemoryState::Kernel: - return address_space_end - address_space_start; - case KMemoryState::Normal: - return heap_region_end - heap_region_start; - case KMemoryState::Ipc: - case KMemoryState::NonSecureIpc: - case KMemoryState::NonDeviceIpc: - return alias_region_end - alias_region_start; - case KMemoryState::Stack: - return stack_region_end - stack_region_start; - case KMemoryState::Io: - case KMemoryState::Static: - case KMemoryState::ThreadLocal: - return kernel_map_region_end - kernel_map_region_start; - case KMemoryState::Shared: - case KMemoryState::AliasCode: - case KMemoryState::AliasCodeData: - case KMemoryState::Transferred: - case KMemoryState::SharedTransferred: - case KMemoryState::SharedCode: - case KMemoryState::GeneratedCode: - case KMemoryState::CodeOut: - return alias_code_region_end - alias_code_region_start; - case KMemoryState::Code: - case KMemoryState::CodeData: - return code_region_end - code_region_start; - default: - UNREACHABLE(); - return {}; - } -} - -constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { - const VAddr end{addr + size}; - const VAddr last{end - 1}; - const VAddr region_start{GetRegionAddress(state)}; - const std::size_t region_size{GetRegionSize(state)}; - const bool is_in_region{region_start <= addr && addr < end && - last <= region_start + region_size - 1}; - const bool is_in_heap{!(end <= heap_region_start || heap_region_end <= addr)}; - const bool is_in_alias{!(end <= alias_region_start || alias_region_end <= addr)}; - - switch (state) { - case KMemoryState::Free: - case KMemoryState::Kernel: - return is_in_region; - case KMemoryState::Io: - case KMemoryState::Static: - case KMemoryState::Code: - case KMemoryState::CodeData: - case KMemoryState::Shared: - case KMemoryState::AliasCode: - case KMemoryState::AliasCodeData: - case KMemoryState::Stack: - case KMemoryState::ThreadLocal: - case KMemoryState::Transferred: - case KMemoryState::SharedTransferred: - case KMemoryState::SharedCode: - case KMemoryState::GeneratedCode: - case KMemoryState::CodeOut: - return is_in_region && !is_in_heap && !is_in_alias; - case KMemoryState::Normal: - ASSERT(is_in_heap); - return is_in_region && !is_in_alias; - case KMemoryState::Ipc: - case KMemoryState::NonSecureIpc: - case KMemoryState::NonDeviceIpc: - ASSERT(is_in_alias); - return is_in_region && !is_in_heap; - default: - return false; - } -} - -constexpr ResultCode PageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, - KMemoryPermission perm, KMemoryAttribute attr_mask, - KMemoryAttribute attr) const { - // Validate the states match expectation - if ((info.state & state_mask) != state) { - return ResultInvalidCurrentMemory; - } - if ((info.perm & perm_mask) != perm) { - return ResultInvalidCurrentMemory; - } - if ((info.attribute & attr_mask) != attr) { - return ResultInvalidCurrentMemory; - } - - return RESULT_SUCCESS; -} - -ResultCode PageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, VAddr addr, std::size_t size, - KMemoryState state_mask, KMemoryState state, - KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, KMemoryAttribute attr, - KMemoryAttribute ignore_attr) { - std::lock_guard lock{page_table_lock}; - - // Get information about the first block - const VAddr last_addr{addr + size - 1}; - KMemoryBlockManager::const_iterator it{block_manager->FindIterator(addr)}; - KMemoryInfo info{it->GetMemoryInfo()}; - - // Validate all blocks in the range have correct state - const KMemoryState first_state{info.state}; - const KMemoryPermission first_perm{info.perm}; - const KMemoryAttribute first_attr{info.attribute}; - - while (true) { - // Validate the current block - if (!(info.state == first_state)) { - return ResultInvalidCurrentMemory; - } - if (!(info.perm == first_perm)) { - return ResultInvalidCurrentMemory; - } - if (!((info.attribute | static_cast(ignore_attr)) == - (first_attr | static_cast(ignore_attr)))) { - return ResultInvalidCurrentMemory; - } - - // Validate against the provided masks - CASCADE_CODE(CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); - - // Break once we're done - if (last_addr <= info.GetLastAddress()) { - break; - } - - // Advance our iterator - it++; - ASSERT(it != block_manager->cend()); - info = it->GetMemoryInfo(); - } - - // Write output state - if (out_state) { - *out_state = first_state; - } - if (out_perm) { - *out_perm = first_perm; - } - if (out_attr) { - *out_attr = first_attr & static_cast(~ignore_attr); - } - - return RESULT_SUCCESS; -} - -} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_table.h b/src/core/hle/kernel/memory/page_table.h deleted file mode 100644 index a9e850e01..000000000 --- a/src/core/hle/kernel/memory/page_table.h +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2020 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include -#include - -#include "common/common_types.h" -#include "common/page_table.h" -#include "core/file_sys/program_metadata.h" -#include "core/hle/kernel/k_memory_block.h" -#include "core/hle/kernel/k_memory_manager.h" -#include "core/hle/result.h" - -namespace Core { -class System; -} - -namespace Kernel { -class KMemoryBlockManager; -} - -namespace Kernel::Memory { - -class PageTable final : NonCopyable { -public: - explicit PageTable(Core::System& system); - - ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, std::size_t code_size, - KMemoryManager::Pool pool); - ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, - KMemoryPermission perm); - ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); - ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); - ResultCode MapPhysicalMemory(VAddr addr, std::size_t size); - ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size); - ResultCode UnmapMemory(VAddr addr, std::size_t size); - ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size); - ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size); - ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, - KMemoryPermission perm); - ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm); - KMemoryInfo QueryInfo(VAddr addr); - ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); - ResultCode ResetTransferMemory(VAddr addr, std::size_t size); - ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryAttribute mask, - KMemoryAttribute value); - ResultCode SetHeapCapacity(std::size_t new_heap_capacity); - ResultVal SetHeapSize(std::size_t size); - ResultVal AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, - bool is_map_only, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, - KMemoryPermission perm, PAddr map_addr = 0); - ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size); - ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); - - Common::PageTable& PageTableImpl() { - return page_table_impl; - } - - const Common::PageTable& PageTableImpl() const { - return page_table_impl; - } - -private: - enum class OperationType : u32 { - Map, - MapGroup, - Unmap, - ChangePermissions, - ChangePermissionsAndRefresh, - }; - - static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask | - KMemoryAttribute::IpcLocked | - KMemoryAttribute::DeviceShared; - - ResultCode InitializeMemoryLayout(VAddr start, VAddr end); - ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list, - KMemoryPermission perm); - void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end); - bool IsRegionMapped(VAddr address, u64 size); - bool IsRegionContiguous(VAddr addr, u64 size) const; - void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list); - KMemoryInfo QueryInfoImpl(VAddr addr); - VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, - std::size_t align); - ResultCode Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group, - OperationType operation); - ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, - OperationType operation, PAddr map_addr = 0); - constexpr VAddr GetRegionAddress(KMemoryState state) const; - constexpr std::size_t GetRegionSize(KMemoryState state) const; - constexpr bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; - - constexpr ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, - KMemoryPermission perm, KMemoryAttribute attr_mask, - KMemoryAttribute attr) const; - ResultCode CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, VAddr addr, std::size_t size, - KMemoryState state_mask, KMemoryState state, - KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, KMemoryAttribute attr, - KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr); - ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, - KMemoryPermission perm, KMemoryAttribute attr_mask, - KMemoryAttribute attr, - KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) { - return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask, - perm, attr_mask, attr, ignore_attr); - } - - std::recursive_mutex page_table_lock; - std::unique_ptr block_manager; - -public: - constexpr VAddr GetAddressSpaceStart() const { - return address_space_start; - } - constexpr VAddr GetAddressSpaceEnd() const { - return address_space_end; - } - constexpr std::size_t GetAddressSpaceSize() const { - return address_space_end - address_space_start; - } - constexpr VAddr GetHeapRegionStart() const { - return heap_region_start; - } - constexpr VAddr GetHeapRegionEnd() const { - return heap_region_end; - } - constexpr std::size_t GetHeapRegionSize() const { - return heap_region_end - heap_region_start; - } - constexpr VAddr GetAliasRegionStart() const { - return alias_region_start; - } - constexpr VAddr GetAliasRegionEnd() const { - return alias_region_end; - } - constexpr std::size_t GetAliasRegionSize() const { - return alias_region_end - alias_region_start; - } - constexpr VAddr GetStackRegionStart() const { - return stack_region_start; - } - constexpr VAddr GetStackRegionEnd() const { - return stack_region_end; - } - constexpr std::size_t GetStackRegionSize() const { - return stack_region_end - stack_region_start; - } - constexpr VAddr GetKernelMapRegionStart() const { - return kernel_map_region_start; - } - constexpr VAddr GetKernelMapRegionEnd() const { - return kernel_map_region_end; - } - constexpr VAddr GetCodeRegionStart() const { - return code_region_start; - } - constexpr VAddr GetCodeRegionEnd() const { - return code_region_end; - } - constexpr VAddr GetAliasCodeRegionStart() const { - return alias_code_region_start; - } - constexpr VAddr GetAliasCodeRegionSize() const { - return alias_code_region_end - alias_code_region_start; - } - constexpr std::size_t GetAddressSpaceWidth() const { - return address_space_width; - } - constexpr std::size_t GetHeapSize() { - return current_heap_addr - heap_region_start; - } - constexpr std::size_t GetTotalHeapSize() { - return GetHeapSize() + physical_memory_usage; - } - constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { - return address_space_start <= address && address + size - 1 <= address_space_end - 1; - } - constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { - return alias_region_start > address || address + size - 1 > alias_region_end - 1; - } - constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { - return stack_region_start > address || address + size - 1 > stack_region_end - 1; - } - constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { - return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; - } - constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { - return address + size > heap_region_start && heap_region_end > address; - } - constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { - return address + size > alias_region_start && alias_region_end > address; - } - constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { - if (IsInvalidRegion(address, size)) { - return true; - } - if (IsInsideHeapRegion(address, size)) { - return true; - } - if (IsInsideAliasRegion(address, size)) { - return true; - } - return {}; - } - constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { - return !IsOutsideASLRRegion(address, size); - } - constexpr PAddr GetPhysicalAddr(VAddr addr) { - return page_table_impl.backing_addr[addr >> PageBits] + addr; - } - -private: - constexpr bool Contains(VAddr addr) const { - return address_space_start <= addr && addr <= address_space_end - 1; - } - constexpr bool Contains(VAddr addr, std::size_t size) const { - return address_space_start <= addr && addr < addr + size && - addr + size - 1 <= address_space_end - 1; - } - constexpr bool IsKernel() const { - return is_kernel; - } - constexpr bool IsAslrEnabled() const { - return is_aslr_enabled; - } - - constexpr std::size_t GetNumGuardPages() const { - return IsKernel() ? 1 : 4; - } - - constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { - return (address_space_start <= addr) && - (num_pages <= (address_space_end - address_space_start) / PageSize) && - (addr + num_pages * PageSize - 1 <= address_space_end - 1); - } - -private: - VAddr address_space_start{}; - VAddr address_space_end{}; - VAddr heap_region_start{}; - VAddr heap_region_end{}; - VAddr current_heap_end{}; - VAddr alias_region_start{}; - VAddr alias_region_end{}; - VAddr stack_region_start{}; - VAddr stack_region_end{}; - VAddr kernel_map_region_start{}; - VAddr kernel_map_region_end{}; - VAddr code_region_start{}; - VAddr code_region_end{}; - VAddr alias_code_region_start{}; - VAddr alias_code_region_end{}; - VAddr current_heap_addr{}; - - std::size_t heap_capacity{}; - std::size_t physical_memory_usage{}; - std::size_t max_heap_size{}; - std::size_t max_physical_memory_size{}; - std::size_t address_space_width{}; - - bool is_kernel{}; - bool is_aslr_enabled{}; - - KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; - - Common::PageTable page_table_impl; - - Core::System& system; -}; - -} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index e0359eb3c..73b85d6f9 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -15,13 +15,13 @@ #include "core/file_sys/program_metadata.h" #include "core/hle/kernel/code_set.h" #include "core/hle/kernel/k_memory_block_manager.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_resource_reservation.h" #include "core/hle/kernel/k_slab_heap.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/lock.h" @@ -464,9 +464,9 @@ bool Process::IsSignaled() const { } Process::Process(Core::System& system) - : KSynchronizationObject{system.Kernel()}, - page_table{std::make_unique(system)}, handle_table{system.Kernel()}, - address_arbiter{system}, condition_var{system}, state_lock{system.Kernel()}, system{system} {} + : KSynchronizationObject{system.Kernel()}, page_table{std::make_unique(system)}, + handle_table{system.Kernel()}, address_arbiter{system}, condition_var{system}, + state_lock{system.Kernel()}, system{system} {} Process::~Process() = default; diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 320b0f347..45eefb90e 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -29,16 +29,13 @@ class ProgramMetadata; namespace Kernel { class KernelCore; +class KPageTable; class KResourceLimit; class KThread; class TLSPage; struct CodeSet; -namespace Memory { -class PageTable; -} - enum class MemoryRegion : u16 { APPLICATION = 1, SYSTEM = 2, @@ -104,12 +101,12 @@ public: } /// Gets a reference to the process' page table. - Memory::PageTable& PageTable() { + KPageTable& PageTable() { return *page_table; } /// Gets const a reference to the process' page table. - const Memory::PageTable& PageTable() const { + const KPageTable& PageTable() const { return *page_table; } @@ -385,7 +382,7 @@ private: ResultCode AllocateMainThreadStack(std::size_t stack_size); /// Memory manager for this process - std::unique_ptr page_table; + std::unique_ptr page_table; /// Current status of the process ProcessStatus status{}; diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp index 7c567049e..3fc326eab 100644 --- a/src/core/hle/kernel/process_capability.cpp +++ b/src/core/hle/kernel/process_capability.cpp @@ -7,7 +7,7 @@ #include "common/bit_util.h" #include "common/logging/log.h" #include "core/hle/kernel/handle_table.h" -#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/process_capability.h" #include "core/hle/kernel/svc_results.h" @@ -69,7 +69,7 @@ u32 GetFlagBitOffset(CapabilityType type) { ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities, - Memory::PageTable& page_table) { + KPageTable& page_table) { Clear(); // Allow all cores and priorities. @@ -82,7 +82,7 @@ ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabiliti ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities, - Memory::PageTable& page_table) { + KPageTable& page_table) { Clear(); return ParseCapabilities(capabilities, num_capabilities, page_table); @@ -108,7 +108,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() { ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, - Memory::PageTable& page_table) { + KPageTable& page_table) { u32 set_flags = 0; u32 set_svc_bits = 0; @@ -155,7 +155,7 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities, } ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, - u32 flag, Memory::PageTable& page_table) { + u32 flag, KPageTable& page_table) { const auto type = GetCapabilityType(flag); if (type == CapabilityType::Unset) { @@ -293,12 +293,12 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) } ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags, - Memory::PageTable& page_table) { + KPageTable& page_table) { // TODO(Lioncache): Implement once the memory manager can handle this. return RESULT_SUCCESS; } -ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, Memory::PageTable& page_table) { +ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) { // TODO(Lioncache): Implement once the memory manager can handle this. return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h index ea9d12c16..73ad197fa 100644 --- a/src/core/hle/kernel/process_capability.h +++ b/src/core/hle/kernel/process_capability.h @@ -12,9 +12,7 @@ union ResultCode; namespace Kernel { -namespace Memory { -class PageTable; -} +class KPageTable; /// The possible types of programs that may be indicated /// by the program type capability descriptor. @@ -90,7 +88,7 @@ public: /// otherwise, an error code upon failure. /// ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities, - Memory::PageTable& page_table); + KPageTable& page_table); /// Initializes this process capabilities instance for a userland process. /// @@ -103,7 +101,7 @@ public: /// otherwise, an error code upon failure. /// ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities, - Memory::PageTable& page_table); + KPageTable& page_table); /// Initializes this process capabilities instance for a process that does not /// have any metadata to parse. @@ -189,7 +187,7 @@ private: /// @return RESULT_SUCCESS if no errors occur, otherwise an error code. /// ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, - Memory::PageTable& page_table); + KPageTable& page_table); /// Attempts to parse a capability descriptor that is only represented by a /// single flag set. @@ -204,7 +202,7 @@ private: /// @return RESULT_SUCCESS if no errors occurred, otherwise an error code. /// ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag, - Memory::PageTable& page_table); + KPageTable& page_table); /// Clears the internal state of this process capability instance. Necessary, /// to have a sane starting point due to us allowing running executables without @@ -228,10 +226,10 @@ private: ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags); /// Handles flags related to mapping physical memory pages. - ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, Memory::PageTable& page_table); + ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table); /// Handles flags related to mapping IO pages. - ResultCode HandleMapIOFlags(u32 flags, Memory::PageTable& page_table); + ResultCode HandleMapIOFlags(u32 flags, KPageTable& page_table); /// Handles flags related to the interrupt capability flags. ResultCode HandleInterruptFlags(u32 flags); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 4ffd65d33..cc8fa6576 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -29,6 +29,7 @@ #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_memory_block.h" #include "core/hle/kernel/k_memory_layout.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" @@ -39,7 +40,6 @@ #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/svc.h" @@ -67,8 +67,8 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) { // Helper function that performs the common sanity checks for svcMapMemory // and svcUnmapMemory. This is doable, as both functions perform their sanitizing // in the same order. -ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr dst_addr, - VAddr src_addr, u64 size) { +ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr, + u64 size) { if (!Common::Is4KBAligned(dst_addr)) { LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); return ResultInvalidAddress; diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp index b92559616..cad063e4d 100644 --- a/src/core/hle/kernel/transfer_memory.cpp +++ b/src/core/hle/kernel/transfer_memory.cpp @@ -2,9 +2,9 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/transfer_memory.h" #include "core/hle/result.h" diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index b42184a3b..d111c1357 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -11,8 +11,8 @@ #include "common/scope_exit.h" #include "core/core.h" #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_system_control.h" -#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/service/ldr/ldr.h" @@ -287,8 +287,7 @@ public: rb.Push(RESULT_SUCCESS); } - bool ValidateRegionForMap(Kernel::Memory::PageTable& page_table, VAddr start, - std::size_t size) const { + bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const { constexpr std::size_t padding_size{4 * Kernel::PageSize}; const auto start_info{page_table.QueryInfo(start - 1)}; @@ -309,7 +308,7 @@ public: return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize()); } - VAddr GetRandomMapRegion(const Kernel::Memory::PageTable& page_table, std::size_t size) const { + VAddr GetRandomMapRegion(const Kernel::KPageTable& page_table, std::size_t size) const { VAddr addr{}; const std::size_t end_pages{(page_table.GetAliasCodeRegionSize() - size) >> Kernel::PageBits}; diff --git a/src/core/loader/deconstructed_rom_directory.cpp b/src/core/loader/deconstructed_rom_directory.cpp index 79ebf11de..4a10211f6 100644 --- a/src/core/loader/deconstructed_rom_directory.cpp +++ b/src/core/loader/deconstructed_rom_directory.cpp @@ -12,8 +12,8 @@ #include "core/file_sys/control_metadata.h" #include "core/file_sys/patch_manager.h" #include "core/file_sys/romfs_factory.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/service/filesystem/filesystem.h" #include "core/loader/deconstructed_rom_directory.h" diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp index dca1fcb18..f4a339390 100644 --- a/src/core/loader/elf.cpp +++ b/src/core/loader/elf.cpp @@ -10,7 +10,7 @@ #include "common/file_util.h" #include "common/logging/log.h" #include "core/hle/kernel/code_set.h" -#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/process.h" #include "core/loader/elf.h" #include "core/memory.h" diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp index e162c4ff0..3f4ba233d 100644 --- a/src/core/loader/kip.cpp +++ b/src/core/loader/kip.cpp @@ -6,7 +6,7 @@ #include "core/file_sys/kernel_executable.h" #include "core/file_sys/program_metadata.h" #include "core/hle/kernel/code_set.h" -#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/process.h" #include "core/loader/kip.h" #include "core/memory.h" diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp index f976d0a9c..14618cb40 100644 --- a/src/core/loader/nro.cpp +++ b/src/core/loader/nro.cpp @@ -15,8 +15,8 @@ #include "core/file_sys/romfs_factory.h" #include "core/file_sys/vfs_offset.h" #include "core/hle/kernel/code_set.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_thread.h" -#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/service/filesystem/filesystem.h" #include "core/loader/nro.h" diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp index ea347ea83..cbd048695 100644 --- a/src/core/loader/nso.cpp +++ b/src/core/loader/nso.cpp @@ -15,8 +15,8 @@ #include "core/core.h" #include "core/file_sys/patch_manager.h" #include "core/hle/kernel/code_set.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_thread.h" -#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/loader/nso.h" #include "core/memory.h" diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 11609682a..b9dd3e275 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -16,7 +16,7 @@ #include "core/arm/arm_interface.h" #include "core/core.h" #include "core/device_memory.h" -#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/physical_memory.h" #include "core/hle/kernel/process.h" #include "core/memory.h" diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp index 2dd0eb0f8..8eec567ab 100644 --- a/src/core/memory/cheat_engine.cpp +++ b/src/core/memory/cheat_engine.cpp @@ -10,7 +10,7 @@ #include "core/core_timing.h" #include "core/core_timing_util.h" #include "core/hardware_properties.h" -#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/service/hid/controllers/npad.h" #include "core/hle/service/hid/hid.h" diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp index f199c3362..74fb32814 100644 --- a/src/core/reporter.cpp +++ b/src/core/reporter.cpp @@ -17,7 +17,7 @@ #include "core/arm/arm_interface.h" #include "core/core.h" #include "core/hle/kernel/hle_ipc.h" -#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/result.h" #include "core/memory.h" diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 44240a9c4..4eb71efbd 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -6,7 +6,7 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" -#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/process.h" #include "core/memory.h" #include "video_core/gpu.h" -- cgit v1.2.3