summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2022-02-22 02:52:36 +0100
committerbunnei <bunneidev@gmail.com>2022-02-27 19:34:02 +0100
commit06e2b76c759af22be60c077489271b42ad49c732 (patch)
tree8dc02f9c3395ea5a169ad04c4af4eededf814a7f /src/core/hle
parenthle: kernel: Add initial_process.h header. (diff)
downloadyuzu-06e2b76c759af22be60c077489271b42ad49c732.tar
yuzu-06e2b76c759af22be60c077489271b42ad49c732.tar.gz
yuzu-06e2b76c759af22be60c077489271b42ad49c732.tar.bz2
yuzu-06e2b76c759af22be60c077489271b42ad49c732.tar.lz
yuzu-06e2b76c759af22be60c077489271b42ad49c732.tar.xz
yuzu-06e2b76c759af22be60c077489271b42ad49c732.tar.zst
yuzu-06e2b76c759af22be60c077489271b42ad49c732.zip
Diffstat (limited to 'src/core/hle')
-rw-r--r--src/core/hle/kernel/k_page_heap.cpp126
-rw-r--r--src/core/hle/kernel/k_page_heap.h221
2 files changed, 192 insertions, 155 deletions
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp
index 29d996d62..97a5890a0 100644
--- a/src/core/hle/kernel/k_page_heap.cpp
+++ b/src/core/hle/kernel/k_page_heap.cpp
@@ -7,35 +7,51 @@
namespace Kernel {
-void KPageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
- // Check our assumptions
- ASSERT(Common::IsAligned((address), PageSize));
+void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
+ size_t management_size, const size_t* block_shifts,
+ size_t num_block_shifts) {
+ // Check our assumptions.
+ ASSERT(Common::IsAligned(address, PageSize));
ASSERT(Common::IsAligned(size, PageSize));
+ ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
+ const VAddr management_end = management_address + management_size;
- // Set our members
- heap_address = address;
- heap_size = size;
-
- // Setup bitmaps
- metadata.resize(metadata_size / sizeof(u64));
- u64* cur_bitmap_storage{metadata.data()};
- for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
- const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
- const std::size_t next_block_shift{
- (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
- cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift,
- next_block_shift, cur_bitmap_storage);
+ // Set our members.
+ m_heap_address = address;
+ m_heap_size = size;
+ m_num_blocks = num_block_shifts;
+
+ // Setup bitmaps.
+ m_management_data.resize(management_size / sizeof(u64));
+ u64* cur_bitmap_storage{m_management_data.data()};
+ for (size_t i = 0; i < num_block_shifts; i++) {
+ const size_t cur_block_shift = block_shifts[i];
+ const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
+ cur_bitmap_storage = m_blocks[i].Initialize(m_heap_address, m_heap_size, cur_block_shift,
+ next_block_shift, cur_bitmap_storage);
}
+
+ // Ensure we didn't overextend our bounds.
+ ASSERT(VAddr(cur_bitmap_storage) <= management_end);
+}
+
+size_t KPageHeap::GetNumFreePages() const {
+ size_t num_free = 0;
+
+ for (size_t i = 0; i < m_num_blocks; i++) {
+ num_free += m_blocks[i].GetNumFreePages();
+ }
+
+ return num_free;
}
-VAddr KPageHeap::AllocateBlock(s32 index, bool random) {
- const std::size_t needed_size{blocks[index].GetSize()};
+PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
+ const size_t needed_size = m_blocks[index].GetSize();
- for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
- if (const VAddr addr{blocks[i].PopBlock(random)}; addr) {
- if (const std::size_t allocated_size{blocks[i].GetSize()};
- allocated_size > needed_size) {
- Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
+ for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
+ if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) {
+ if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
+ this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
return addr;
}
@@ -44,34 +60,34 @@ VAddr KPageHeap::AllocateBlock(s32 index, bool random) {
return 0;
}
-void KPageHeap::FreeBlock(VAddr block, s32 index) {
+void KPageHeap::FreeBlock(PAddr block, s32 index) {
do {
- block = blocks[index++].PushBlock(block);
+ block = m_blocks[index++].PushBlock(block);
} while (block != 0);
}
-void KPageHeap::Free(VAddr addr, std::size_t num_pages) {
- // Freeing no pages is a no-op
+void KPageHeap::Free(PAddr addr, size_t num_pages) {
+ // Freeing no pages is a no-op.
if (num_pages == 0) {
return;
}
- // Find the largest block size that we can free, and free as many as possible
- s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1};
- const VAddr start{addr};
- const VAddr end{(num_pages * PageSize) + addr};
- VAddr before_start{start};
- VAddr before_end{start};
- VAddr after_start{end};
- VAddr after_end{end};
+ // Find the largest block size that we can free, and free as many as possible.
+ s32 big_index = static_cast<s32>(m_num_blocks) - 1;
+ const PAddr start = addr;
+ const PAddr end = addr + num_pages * PageSize;
+ PAddr before_start = start;
+ PAddr before_end = start;
+ PAddr after_start = end;
+ PAddr after_end = end;
while (big_index >= 0) {
- const std::size_t block_size{blocks[big_index].GetSize()};
- const VAddr big_start{Common::AlignUp((start), block_size)};
- const VAddr big_end{Common::AlignDown((end), block_size)};
+ const size_t block_size = m_blocks[big_index].GetSize();
+ const PAddr big_start = Common::AlignUp(start, block_size);
+ const PAddr big_end = Common::AlignDown(end, block_size);
if (big_start < big_end) {
- // Free as many big blocks as we can
- for (auto block{big_start}; block < big_end; block += block_size) {
- FreeBlock(block, big_index);
+ // Free as many big blocks as we can.
+ for (auto block = big_start; block < big_end; block += block_size) {
+ this->FreeBlock(block, big_index);
}
before_end = big_start;
after_start = big_end;
@@ -81,31 +97,31 @@ void KPageHeap::Free(VAddr addr, std::size_t num_pages) {
}
ASSERT(big_index >= 0);
- // Free space before the big blocks
- for (s32 i{big_index - 1}; i >= 0; i--) {
- const std::size_t block_size{blocks[i].GetSize()};
+ // Free space before the big blocks.
+ for (s32 i = big_index - 1; i >= 0; i--) {
+ const size_t block_size = m_blocks[i].GetSize();
while (before_start + block_size <= before_end) {
before_end -= block_size;
- FreeBlock(before_end, i);
+ this->FreeBlock(before_end, i);
}
}
- // Free space after the big blocks
- for (s32 i{big_index - 1}; i >= 0; i--) {
- const std::size_t block_size{blocks[i].GetSize()};
+ // Free space after the big blocks.
+ for (s32 i = big_index - 1; i >= 0; i--) {
+ const size_t block_size = m_blocks[i].GetSize();
while (after_start + block_size <= after_end) {
- FreeBlock(after_start, i);
+ this->FreeBlock(after_start, i);
after_start += block_size;
}
}
}
-std::size_t KPageHeap::CalculateManagementOverheadSize(std::size_t region_size) {
- std::size_t overhead_size = 0;
- for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
- const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
- const std::size_t next_block_shift{
- (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
+size_t KPageHeap::CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
+ size_t num_block_shifts) {
+ size_t overhead_size = 0;
+ for (size_t i = 0; i < num_block_shifts; i++) {
+ const size_t cur_block_shift = block_shifts[i];
+ const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
overhead_size += KPageHeap::Block::CalculateManagementOverheadSize(
region_size, cur_block_shift, next_block_shift);
}
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h
index a65aa28a0..60fff766b 100644
--- a/src/core/hle/kernel/k_page_heap.h
+++ b/src/core/hle/kernel/k_page_heap.h
@@ -23,54 +23,73 @@ public:
KPageHeap() = default;
~KPageHeap() = default;
- constexpr VAddr GetAddress() const {
- return heap_address;
+ constexpr PAddr GetAddress() const {
+ return m_heap_address;
}
- constexpr std::size_t GetSize() const {
- return heap_size;
+ constexpr size_t GetSize() const {
+ return m_heap_size;
}
- constexpr VAddr GetEndAddress() const {
- return GetAddress() + GetSize();
+ constexpr PAddr GetEndAddress() const {
+ return this->GetAddress() + this->GetSize();
}
- constexpr std::size_t GetPageOffset(VAddr block) const {
- return (block - GetAddress()) / PageSize;
+ constexpr size_t GetPageOffset(PAddr block) const {
+ return (block - this->GetAddress()) / PageSize;
+ }
+ constexpr size_t GetPageOffsetToEnd(PAddr block) const {
+ return (this->GetEndAddress() - block) / PageSize;
+ }
+
+ void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
+ size_t management_size) {
+ return this->Initialize(heap_address, heap_size, management_address, management_size,
+ MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts);
+ }
+
+ size_t GetFreeSize() const {
+ return this->GetNumFreePages() * PageSize;
}
- void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
- VAddr AllocateBlock(s32 index, bool random);
- void Free(VAddr addr, std::size_t num_pages);
+ void SetInitialUsedSize(size_t reserved_size) {
+ // Check that the reserved size is valid.
+ const size_t free_size = this->GetNumFreePages() * PageSize;
+ ASSERT(m_heap_size >= free_size + reserved_size);
- void UpdateUsedSize() {
- used_size = heap_size - (GetNumFreePages() * PageSize);
+ // Set the initial used size.
+ m_initial_used_size = m_heap_size - free_size - reserved_size;
}
- static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
+ PAddr AllocateBlock(s32 index, bool random);
+ void Free(PAddr addr, size_t num_pages);
+
+ static size_t CalculateManagementOverheadSize(size_t region_size) {
+ return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(),
+ NumMemoryBlockPageShifts);
+ }
- static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
- const auto target_pages{std::max(num_pages, align_pages)};
- for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
- if (target_pages <=
- (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
+ static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
+ const size_t target_pages = std::max(num_pages, align_pages);
+ for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
+ if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
return static_cast<s32>(i);
}
}
return -1;
}
- static constexpr s32 GetBlockIndex(std::size_t num_pages) {
- for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
- if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
+ static constexpr s32 GetBlockIndex(size_t num_pages) {
+ for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
+ if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
return i;
}
}
return -1;
}
- static constexpr std::size_t GetBlockSize(std::size_t index) {
- return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
+ static constexpr size_t GetBlockSize(size_t index) {
+ return size_t(1) << MemoryBlockPageShifts[index];
}
- static constexpr std::size_t GetBlockNumPages(std::size_t index) {
+ static constexpr size_t GetBlockNumPages(size_t index) {
return GetBlockSize(index) / PageSize;
}
@@ -83,114 +102,116 @@ private:
Block() = default;
~Block() = default;
- constexpr std::size_t GetShift() const {
- return block_shift;
+ constexpr size_t GetShift() const {
+ return m_block_shift;
}
- constexpr std::size_t GetNextShift() const {
- return next_block_shift;
+ constexpr size_t GetNextShift() const {
+ return m_next_block_shift;
}
- constexpr std::size_t GetSize() const {
- return static_cast<std::size_t>(1) << GetShift();
+ constexpr size_t GetSize() const {
+ return u64(1) << this->GetShift();
}
- constexpr std::size_t GetNumPages() const {
- return GetSize() / PageSize;
+ constexpr size_t GetNumPages() const {
+ return this->GetSize() / PageSize;
}
- constexpr std::size_t GetNumFreeBlocks() const {
- return bitmap.GetNumBits();
+ constexpr size_t GetNumFreeBlocks() const {
+ return m_bitmap.GetNumBits();
}
- constexpr std::size_t GetNumFreePages() const {
- return GetNumFreeBlocks() * GetNumPages();
+ constexpr size_t GetNumFreePages() const {
+ return this->GetNumFreeBlocks() * this->GetNumPages();
}
- u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
- u64* bit_storage) {
- // Set shifts
- block_shift = bs;
- next_block_shift = nbs;
-
- // Align up the address
- VAddr end{addr + size};
- const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
- : (1ULL << block_shift)};
- addr = Common::AlignDown((addr), align);
- end = Common::AlignUp((end), align);
-
- heap_address = addr;
- end_offset = (end - addr) / (1ULL << block_shift);
- return bitmap.Initialize(bit_storage, end_offset);
+ u64* Initialize(PAddr addr, size_t size, size_t bs, size_t nbs, u64* bit_storage) {
+ // Set shifts.
+ m_block_shift = bs;
+ m_next_block_shift = nbs;
+
+ // Align up the address.
+ PAddr end = addr + size;
+ const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift)
+ : (u64(1) << m_block_shift);
+ addr = Common::AlignDown(addr, align);
+ end = Common::AlignUp(end, align);
+
+ m_heap_address = addr;
+ m_end_offset = (end - addr) / (u64(1) << m_block_shift);
+ return m_bitmap.Initialize(bit_storage, m_end_offset);
}
- VAddr PushBlock(VAddr address) {
- // Set the bit for the free block
- std::size_t offset{(address - heap_address) >> GetShift()};
- bitmap.SetBit(offset);
+ PAddr PushBlock(PAddr address) {
+ // Set the bit for the free block.
+ size_t offset = (address - m_heap_address) >> this->GetShift();
+ m_bitmap.SetBit(offset);
- // If we have a next shift, try to clear the blocks below and return the address
- if (GetNextShift()) {
- const auto diff{1ULL << (GetNextShift() - GetShift())};
+ // If we have a next shift, try to clear the blocks below this one and return the new
+ // address.
+ if (this->GetNextShift()) {
+ const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift());
offset = Common::AlignDown(offset, diff);
- if (bitmap.ClearRange(offset, diff)) {
- return heap_address + (offset << GetShift());
+ if (m_bitmap.ClearRange(offset, diff)) {
+ return m_heap_address + (offset << this->GetShift());
}
}
- // We couldn't coalesce, or we're already as big as possible
- return 0;
+ // We couldn't coalesce, or we're already as big as possible.
+ return {};
}
- VAddr PopBlock(bool random) {
- // Find a free block
- const s64 soffset{bitmap.FindFreeBlock(random)};
+ PAddr PopBlock(bool random) {
+ // Find a free block.
+ s64 soffset = m_bitmap.FindFreeBlock(random);
if (soffset < 0) {
- return 0;
+ return {};
}
- const auto offset{static_cast<std::size_t>(soffset)};
+ const size_t offset = static_cast<size_t>(soffset);
- // Update our tracking and return it
- bitmap.ClearBit(offset);
- return heap_address + (offset << GetShift());
+ // Update our tracking and return it.
+ m_bitmap.ClearBit(offset);
+ return m_heap_address + (offset << this->GetShift());
}
- static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size,
- std::size_t cur_block_shift,
- std::size_t next_block_shift) {
- const auto cur_block_size{(1ULL << cur_block_shift)};
- const auto next_block_size{(1ULL << next_block_shift)};
- const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
+ public:
+ static constexpr size_t CalculateManagementOverheadSize(size_t region_size,
+ size_t cur_block_shift,
+ size_t next_block_shift) {
+ const size_t cur_block_size = (u64(1) << cur_block_shift);
+ const size_t next_block_size = (u64(1) << next_block_shift);
+ const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size;
return KPageBitmap::CalculateManagementOverheadSize(
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
}
private:
- KPageBitmap bitmap;
- VAddr heap_address{};
- uintptr_t end_offset{};
- std::size_t block_shift{};
- std::size_t next_block_shift{};
+ KPageBitmap m_bitmap;
+ PAddr m_heap_address{};
+ uintptr_t m_end_offset{};
+ size_t m_block_shift{};
+ size_t m_next_block_shift{};
};
- constexpr std::size_t GetNumFreePages() const {
- std::size_t num_free{};
-
- for (const auto& block : blocks) {
- num_free += block.GetNumFreePages();
- }
-
- return num_free;
- }
+private:
+ void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
+ size_t management_size, const size_t* block_shifts, size_t num_block_shifts);
+ size_t GetNumFreePages() const;
- void FreeBlock(VAddr block, s32 index);
+ void FreeBlock(PAddr block, s32 index);
- static constexpr std::size_t NumMemoryBlockPageShifts{7};
- static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
+ static constexpr size_t NumMemoryBlockPageShifts{7};
+ static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
};
- VAddr heap_address{};
- std::size_t heap_size{};
- std::size_t used_size{};
- std::array<Block, NumMemoryBlockPageShifts> blocks{};
- std::vector<u64> metadata;
+private:
+ static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
+ size_t num_block_shifts);
+
+private:
+ PAddr m_heap_address{};
+ size_t m_heap_size{};
+ size_t m_initial_used_size{};
+ size_t m_num_blocks{};
+ std::array<Block, NumMemoryBlockPageShifts> m_blocks{};
+ std::vector<u64> m_management_data;
};
} // namespace Kernel