From a02566136c225e0824a3f217766efcb3b3f55ec2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 11 Feb 2021 18:39:06 -0800 Subject: hle: kernel: Add KPageBitmap class. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/k_page_bitmap.h | 279 ++++++++++++++++++++++++++++++++++++ 2 files changed, 280 insertions(+) create mode 100644 src/core/hle/kernel/k_page_bitmap.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index a6b5fef56..6604bc2c5 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -164,6 +164,7 @@ add_library(core STATIC hle/kernel/k_light_condition_variable.h hle/kernel/k_light_lock.cpp hle/kernel/k_light_lock.h + hle/kernel/k_page_bitmap.h hle/kernel/k_priority_queue.h hle/kernel/k_readable_event.cpp hle/kernel/k_readable_event.h diff --git a/src/core/hle/kernel/k_page_bitmap.h b/src/core/hle/kernel/k_page_bitmap.h new file mode 100644 index 000000000..da2d20032 --- /dev/null +++ b/src/core/hle/kernel/k_page_bitmap.h @@ -0,0 +1,279 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/bit_util.h" +#include "common/common_types.h" +#include "common/tiny_mt.h" +#include "core/hle/kernel/memory/system_control.h" + +namespace Kernel { + +class KPageBitmap { +private: + class RandomBitGenerator { + private: + Common::TinyMT rng{}; + u32 entropy{}; + u32 bits_available{}; + + private: + void RefreshEntropy() { + entropy = rng.GenerateRandomU32(); + bits_available = static_cast(Common::BitSize()); + } + + bool GenerateRandomBit() { + if (bits_available == 0) { + this->RefreshEntropy(); + } + + const bool rnd_bit = (entropy & 1) != 0; + entropy >>= 1; + --bits_available; + return rnd_bit; + } + + public: + RandomBitGenerator() { + rng.Initialize(static_cast(Memory::SystemControl::GenerateRandomU64())); + } + + std::size_t SelectRandomBit(u64 bitmap) { + u64 selected = 0; + + u64 cur_num_bits = Common::BitSize() / 2; + u64 cur_mask = (1ULL << cur_num_bits) - 1; + + while (cur_num_bits) { + const u64 low = (bitmap >> 0) & cur_mask; + const u64 high = (bitmap >> cur_num_bits) & cur_mask; + + bool choose_low; + if (high == 0) { + // If only low val is set, choose low. + choose_low = true; + } else if (low == 0) { + // If only high val is set, choose high. + choose_low = false; + } else { + // If both are set, choose random. + choose_low = this->GenerateRandomBit(); + } + + // If we chose low, proceed with low. + if (choose_low) { + bitmap = low; + selected += 0; + } else { + bitmap = high; + selected += cur_num_bits; + } + + // Proceed. + cur_num_bits /= 2; + cur_mask >>= cur_num_bits; + } + + return selected; + } + }; + +public: + static constexpr std::size_t MaxDepth = 4; + +private: + std::array bit_storages{}; + RandomBitGenerator rng{}; + std::size_t num_bits{}; + std::size_t used_depths{}; + +public: + KPageBitmap() = default; + + constexpr std::size_t GetNumBits() const { + return num_bits; + } + constexpr s32 GetHighestDepthIndex() const { + return static_cast(used_depths) - 1; + } + + u64* Initialize(u64* storage, std::size_t size) { + // Initially, everything is un-set. + num_bits = 0; + + // Calculate the needed bitmap depth. + used_depths = static_cast(GetRequiredDepth(size)); + ASSERT(used_depths <= MaxDepth); + + // Set the bitmap pointers. + for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { + bit_storages[depth] = storage; + size = Common::AlignUp(size, Common::BitSize()) / Common::BitSize(); + storage += size; + } + + return storage; + } + + s64 FindFreeBlock(bool random) { + uintptr_t offset = 0; + s32 depth = 0; + + if (random) { + do { + const u64 v = bit_storages[depth][offset]; + if (v == 0) { + // If depth is bigger than zero, then a previous level indicated a block was + // free. + ASSERT(depth == 0); + return -1; + } + offset = offset * Common::BitSize() + rng.SelectRandomBit(v); + ++depth; + } while (depth < static_cast(used_depths)); + } else { + do { + const u64 v = bit_storages[depth][offset]; + if (v == 0) { + // If depth is bigger than zero, then a previous level indicated a block was + // free. + ASSERT(depth == 0); + return -1; + } + offset = offset * Common::BitSize() + std::countr_zero(v); + ++depth; + } while (depth < static_cast(used_depths)); + } + + return static_cast(offset); + } + + void SetBit(std::size_t offset) { + this->SetBit(this->GetHighestDepthIndex(), offset); + num_bits++; + } + + void ClearBit(std::size_t offset) { + this->ClearBit(this->GetHighestDepthIndex(), offset); + num_bits--; + } + + bool ClearRange(std::size_t offset, std::size_t count) { + s32 depth = this->GetHighestDepthIndex(); + u64* bits = bit_storages[depth]; + std::size_t bit_ind = offset / Common::BitSize(); + if (count < Common::BitSize()) { + const std::size_t shift = offset % Common::BitSize(); + ASSERT(shift + count <= Common::BitSize()); + // Check that all the bits are set. + const u64 mask = ((u64(1) << count) - 1) << shift; + u64 v = bits[bit_ind]; + if ((v & mask) != mask) { + return false; + } + + // Clear the bits. + v &= ~mask; + bits[bit_ind] = v; + if (v == 0) { + this->ClearBit(depth - 1, bit_ind); + } + } else { + ASSERT(offset % Common::BitSize() == 0); + ASSERT(count % Common::BitSize() == 0); + // Check that all the bits are set. + std::size_t remaining = count; + std::size_t i = 0; + do { + if (bits[bit_ind + i++] != ~u64(0)) { + return false; + } + remaining -= Common::BitSize(); + } while (remaining > 0); + + // Clear the bits. + remaining = count; + i = 0; + do { + bits[bit_ind + i] = 0; + this->ClearBit(depth - 1, bit_ind + i); + i++; + remaining -= Common::BitSize(); + } while (remaining > 0); + } + + num_bits -= count; + return true; + } + +private: + void SetBit(s32 depth, std::size_t offset) { + while (depth >= 0) { + std::size_t ind = offset / Common::BitSize(); + std::size_t which = offset % Common::BitSize(); + const u64 mask = u64(1) << which; + + u64* bit = std::addressof(bit_storages[depth][ind]); + u64 v = *bit; + ASSERT((v & mask) == 0); + *bit = v | mask; + if (v) { + break; + } + offset = ind; + depth--; + } + } + + void ClearBit(s32 depth, std::size_t offset) { + while (depth >= 0) { + std::size_t ind = offset / Common::BitSize(); + std::size_t which = offset % Common::BitSize(); + const u64 mask = u64(1) << which; + + u64* bit = std::addressof(bit_storages[depth][ind]); + u64 v = *bit; + ASSERT((v & mask) != 0); + v &= ~mask; + *bit = v; + if (v) { + break; + } + offset = ind; + depth--; + } + } + +private: + static constexpr s32 GetRequiredDepth(std::size_t region_size) { + s32 depth = 0; + while (true) { + region_size /= Common::BitSize(); + depth++; + if (region_size == 0) { + return depth; + } + } + } + +public: + static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) { + std::size_t overhead_bits = 0; + for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { + region_size = + Common::AlignUp(region_size, Common::BitSize()) / Common::BitSize(); + overhead_bits += region_size; + } + return overhead_bits * sizeof(u64); + } +}; + +} // namespace Kernel -- cgit v1.2.3