From 778e0f8ec1c1be555217be91a32ac01d32675e23 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 13 Feb 2021 00:42:30 -0800 Subject: hle: kernel: Move KMemoryRegion to its own module and update. --- src/core/hle/kernel/k_memory_layout.h | 22 +-- src/core/hle/kernel/k_memory_region.h | 310 ++++++++++++++++++++++++++++++++++ src/core/hle/kernel/kernel.cpp | 20 +-- 3 files changed, 321 insertions(+), 31 deletions(-) create mode 100644 src/core/hle/kernel/k_memory_region.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 0821d2d8c..a76ffa02e 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -6,6 +6,7 @@ #include "common/common_types.h" #include "core/device_memory.h" +#include "core/hle/kernel/k_memory_region.h" namespace Kernel { @@ -27,27 +28,6 @@ constexpr bool IsKernelAddress(VAddr address) { return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; } -class KMemoryRegion final { - friend class KMemoryLayout; - -public: - constexpr PAddr StartAddress() const { - return start_address; - } - - constexpr PAddr EndAddress() const { - return end_address; - } - -private: - constexpr KMemoryRegion() = default; - constexpr KMemoryRegion(PAddr start_address, PAddr end_address) - : start_address{start_address}, end_address{end_address} {} - - const PAddr start_address{}; - const PAddr end_address{}; -}; - class KMemoryLayout final { public: constexpr const KMemoryRegion& Application() const { diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h new file mode 100644 index 000000000..de236df6a --- /dev/null +++ b/src/core/hle/kernel/k_memory_region.h @@ -0,0 +1,310 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/assert.h" +#include "common/common_types.h" +#include "common/intrusive_red_black_tree.h" + +namespace Kernel { + +class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode, + NonCopyable { + friend class KMemoryLayout; + friend class KMemoryRegionTree; + +public: + static constexpr int Compare(const KMemoryRegion& lhs, const KMemoryRegion& rhs) { + if (lhs.GetAddress() < rhs.GetAddress()) { + return -1; + } else if (lhs.GetAddress() <= rhs.GetLastAddress()) { + return 0; + } else { + return 1; + } + } + + constexpr u64 GetAddress() const { + return address; + } + + constexpr u64 GetPairAddress() const { + return pair_address; + } + + constexpr u64 GetLastAddress() const { + return last_address; + } + + constexpr u64 GetEndAddress() const { + return GetLastAddress() + 1; + } + + constexpr std::size_t GetSize() const { + return GetEndAddress() - GetAddress(); + } + + constexpr u32 GetAttributes() const { + return attributes; + } + + constexpr u32 GetType() const { + return type_id; + } + + constexpr void SetType(u32 type) { + ASSERT(this->CanDerive(type)); + type_id = type; + } + + constexpr bool Contains(uintptr_t address) const { + ASSERT(this->GetEndAddress() != 0); + return this->GetAddress() <= address && address <= this->GetLastAddress(); + } + + constexpr bool IsDerivedFrom(u32 type) const { + return (this->GetType() | type) == this->GetType(); + } + + // constexpr bool HasTypeAttribute(KMemoryRegionAttr attr) const { + // return (this->GetType() | attr) == this->GetType(); + //} + + constexpr bool CanDerive(u32 type) const { + return (this->GetType() | type) == type; + } + + constexpr void SetPairAddress(u64 a) { + pair_address = a; + } + + // constexpr void SetTypeAttribute(KMemoryRegionAttr attr) { + // type_id |= attr; + //} + +private: + constexpr KMemoryRegion() = default; + constexpr KMemoryRegion(u64 address_, u64 last_address_) + : address{address_}, last_address{last_address_} {} + constexpr KMemoryRegion(u64 address_, u64 last_address_, u64 pair_address_, u32 attributes_, + u32 type_id_) + : address(address_), last_address(last_address_), pair_address(pair_address_), + attributes(attributes_), type_id(type_id_) {} + constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_) + : KMemoryRegion(address_, last_address_, std::numeric_limits::max(), attributes_, + type_id_) {} + + const u64 address{}; + const u64 last_address{}; + u64 pair_address{}; + u32 attributes{}; + u32 type_id{}; +}; + +class KMemoryRegionTree final : NonCopyable { +public: + struct DerivedRegionExtents { + const KMemoryRegion* first_region{}; + const KMemoryRegion* last_region{}; + + constexpr DerivedRegionExtents() = default; + + constexpr u64 GetAddress() const { + return this->first_region->GetAddress(); + } + + constexpr u64 GetLastAddress() const { + return this->last_region->GetLastAddress(); + } + + constexpr u64 GetEndAddress() const { + return this->GetLastAddress() + 1; + } + + constexpr size_t GetSize() const { + return this->GetEndAddress() - this->GetAddress(); + } + }; + +private: + using TreeType = + Common::IntrusiveRedBlackTreeBaseTraits::TreeType; + +public: + using value_type = TreeType::value_type; + using size_type = TreeType::size_type; + using difference_type = TreeType::difference_type; + using pointer = TreeType::pointer; + using const_pointer = TreeType::const_pointer; + using reference = TreeType::reference; + using const_reference = TreeType::const_reference; + using iterator = TreeType::iterator; + using const_iterator = TreeType::const_iterator; + +private: + TreeType m_tree{}; + +public: + constexpr KMemoryRegionTree() = default; + +public: + KMemoryRegion* FindModifiable(u64 address) { + if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) { + return std::addressof(*it); + } else { + return nullptr; + } + } + + const KMemoryRegion* Find(u64 address) const { + if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->cend()) { + return std::addressof(*it); + } else { + return nullptr; + } + } + + const KMemoryRegion* FindByType(u32 type_id) const { + for (auto it = this->cbegin(); it != this->cend(); ++it) { + if (it->GetType() == type_id) { + return std::addressof(*it); + } + } + return nullptr; + } + + const KMemoryRegion* FindByTypeAndAttribute(u32 type_id, u32 attr) const { + for (auto it = this->cbegin(); it != this->cend(); ++it) { + if (it->GetType() == type_id && it->GetAttributes() == attr) { + return std::addressof(*it); + } + } + return nullptr; + } + + const KMemoryRegion* FindFirstDerived(u32 type_id) const { + for (auto it = this->cbegin(); it != this->cend(); it++) { + if (it->IsDerivedFrom(type_id)) { + return std::addressof(*it); + } + } + return nullptr; + } + + const KMemoryRegion* FindLastDerived(u32 type_id) const { + const KMemoryRegion* region = nullptr; + for (auto it = this->begin(); it != this->end(); it++) { + if (it->IsDerivedFrom(type_id)) { + region = std::addressof(*it); + } + } + return region; + } + + DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const { + DerivedRegionExtents extents; + + ASSERT(extents.first_region == nullptr); + ASSERT(extents.last_region == nullptr); + + for (auto it = this->cbegin(); it != this->cend(); it++) { + if (it->IsDerivedFrom(type_id)) { + if (extents.first_region == nullptr) { + extents.first_region = std::addressof(*it); + } + extents.last_region = std::addressof(*it); + } + } + + ASSERT(extents.first_region != nullptr); + ASSERT(extents.last_region != nullptr); + + return extents; + } + +public: + void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0); + bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0); + + VAddr GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id); + + VAddr GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id, + size_t guard_size) { + return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size; + } + +public: + // Iterator accessors. + iterator begin() { + return m_tree.begin(); + } + + const_iterator begin() const { + return m_tree.begin(); + } + + iterator end() { + return m_tree.end(); + } + + const_iterator end() const { + return m_tree.end(); + } + + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + + iterator iterator_to(reference ref) { + return m_tree.iterator_to(ref); + } + + const_iterator iterator_to(const_reference ref) const { + return m_tree.iterator_to(ref); + } + + // Content management. + bool empty() const { + return m_tree.empty(); + } + + reference back() { + return m_tree.back(); + } + + const_reference back() const { + return m_tree.back(); + } + + reference front() { + return m_tree.front(); + } + + const_reference front() const { + return m_tree.front(); + } + + iterator insert(reference ref) { + return m_tree.insert(ref); + } + + iterator erase(iterator it) { + return m_tree.erase(it); + } + + iterator find(const_reference ref) const { + return m_tree.find(ref); + } + + iterator nfind(const_reference ref) const { + return m_tree.nfind(ref); + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 780008b08..48916df17 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -275,22 +275,22 @@ struct KernelCore::Impl { constexpr std::size_t font_size{0x1100000}; constexpr std::size_t irs_size{0x8000}; constexpr std::size_t time_size{0x1000}; - constexpr PAddr hid_addr{layout.System().StartAddress()}; - constexpr PAddr font_pa{layout.System().StartAddress() + hid_size}; - constexpr PAddr irs_addr{layout.System().StartAddress() + hid_size + font_size}; - constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size}; + constexpr PAddr hid_addr{layout.System().GetAddress()}; + constexpr PAddr font_pa{layout.System().GetAddress() + hid_size}; + constexpr PAddr irs_addr{layout.System().GetAddress() + hid_size + font_size}; + constexpr PAddr time_addr{layout.System().GetAddress() + hid_size + font_size + irs_size}; // Initialize memory manager memory_manager = std::make_unique(); memory_manager->InitializeManager(KMemoryManager::Pool::Application, - layout.Application().StartAddress(), - layout.Application().EndAddress()); + layout.Application().GetAddress(), + layout.Application().GetLastAddress()); memory_manager->InitializeManager(KMemoryManager::Pool::Applet, - layout.Applet().StartAddress(), - layout.Applet().EndAddress()); + layout.Applet().GetAddress(), + layout.Applet().GetLastAddress()); memory_manager->InitializeManager(KMemoryManager::Pool::System, - layout.System().StartAddress(), - layout.System().EndAddress()); + layout.System().GetAddress(), + layout.System().GetLastAddress()); hid_shared_mem = Kernel::KSharedMemory::Create( system.Kernel(), system.DeviceMemory(), nullptr, {hid_addr, hid_size / PageSize}, -- cgit v1.2.3 From 541b4353e4a85a1f10c53d643ea48b8e31363a62 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 13 Feb 2021 01:07:23 -0800 Subject: hle: kernel: Add initial KMemoryRegionType module. --- src/core/hle/kernel/k_memory_region.h | 36 +++++++++++++++--------------- src/core/hle/kernel/k_memory_region_type.h | 22 ++++++++++++++++++ 2 files changed, 40 insertions(+), 18 deletions(-) create mode 100644 src/core/hle/kernel/k_memory_region_type.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h index de236df6a..afa89011c 100644 --- a/src/core/hle/kernel/k_memory_region.h +++ b/src/core/hle/kernel/k_memory_region.h @@ -7,15 +7,26 @@ #include "common/assert.h" #include "common/common_types.h" #include "common/intrusive_red_black_tree.h" +#include "core/hle/kernel/k_memory_region_type.h" namespace Kernel { class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode, NonCopyable { - friend class KMemoryLayout; friend class KMemoryRegionTree; public: + constexpr KMemoryRegion() = default; + constexpr KMemoryRegion(u64 address_, u64 last_address_) + : address{address_}, last_address{last_address_} {} + constexpr KMemoryRegion(u64 address_, u64 last_address_, u64 pair_address_, u32 attributes_, + u32 type_id_) + : address(address_), last_address(last_address_), pair_address(pair_address_), + attributes(attributes_), type_id(type_id_) {} + constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_) + : KMemoryRegion(address_, last_address_, std::numeric_limits::max(), attributes_, + type_id_) {} + static constexpr int Compare(const KMemoryRegion& lhs, const KMemoryRegion& rhs) { if (lhs.GetAddress() < rhs.GetAddress()) { return -1; @@ -68,9 +79,9 @@ public: return (this->GetType() | type) == this->GetType(); } - // constexpr bool HasTypeAttribute(KMemoryRegionAttr attr) const { - // return (this->GetType() | attr) == this->GetType(); - //} + constexpr bool HasTypeAttribute(KMemoryRegionAttr attr) const { + return (this->GetType() | static_cast(attr)) == this->GetType(); + } constexpr bool CanDerive(u32 type) const { return (this->GetType() | type) == type; @@ -80,22 +91,11 @@ public: pair_address = a; } - // constexpr void SetTypeAttribute(KMemoryRegionAttr attr) { - // type_id |= attr; - //} + constexpr void SetTypeAttribute(KMemoryRegionAttr attr) { + type_id |= static_cast(attr); + } private: - constexpr KMemoryRegion() = default; - constexpr KMemoryRegion(u64 address_, u64 last_address_) - : address{address_}, last_address{last_address_} {} - constexpr KMemoryRegion(u64 address_, u64 last_address_, u64 pair_address_, u32 attributes_, - u32 type_id_) - : address(address_), last_address(last_address_), pair_address(pair_address_), - attributes(attributes_), type_id(type_id_) {} - constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_) - : KMemoryRegion(address_, last_address_, std::numeric_limits::max(), attributes_, - type_id_) {} - const u64 address{}; const u64 last_address{}; u64 pair_address{}; diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h new file mode 100644 index 000000000..6fb5abde9 --- /dev/null +++ b/src/core/hle/kernel/k_memory_region_type.h @@ -0,0 +1,22 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel { + +enum class KMemoryRegionType : u32 {}; + +enum class KMemoryRegionAttr : typename std::underlying_type::type { + CarveoutProtected = 0x04000000, + DidKernelMap = 0x08000000, + ShouldKernelMap = 0x10000000, + UserReadOnly = 0x20000000, + NoUserMap = 0x40000000, + LinearMapped = 0x80000000, +}; + +} // namespace Kernel -- cgit v1.2.3 From 5872561077e8b671ee9a80ecd5d116100458a28f Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 13 Feb 2021 01:29:32 -0800 Subject: hle: kernel: Migrate some code from Common::SpinLock to KSpinLock. --- src/core/hle/kernel/k_scheduler.cpp | 22 +++++++++++----------- src/core/hle/kernel/k_scheduler.h | 7 ++----- src/core/hle/kernel/k_scheduler_lock.h | 11 ++++------- src/core/hle/kernel/k_spin_lock.h | 6 ++++++ src/core/hle/kernel/k_thread.h | 4 ++-- 5 files changed, 25 insertions(+), 25 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index e7de48476..d1df97305 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -62,7 +62,7 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul } u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { - std::scoped_lock lock{guard}; + KScopedSpinLock lk{guard}; if (KThread* prev_highest_thread = state.highest_priority_thread; prev_highest_thread != highest_thread) { if (prev_highest_thread != nullptr) { @@ -637,11 +637,11 @@ void KScheduler::RescheduleCurrentCore() { if (phys_core.IsInterrupted()) { phys_core.ClearInterrupt(); } - guard.lock(); + guard.Lock(); if (state.needs_scheduling.load()) { Schedule(); } else { - guard.unlock(); + guard.Unlock(); } } @@ -669,7 +669,7 @@ void KScheduler::Unload(KThread* thread) { } else { prev_thread = nullptr; } - thread->context_guard.unlock(); + thread->context_guard.Unlock(); } } @@ -713,7 +713,7 @@ void KScheduler::ScheduleImpl() { // If we're not actually switching thread, there's nothing to do. if (next_thread == current_thread.load()) { - guard.unlock(); + guard.Unlock(); return; } @@ -732,7 +732,7 @@ void KScheduler::ScheduleImpl() { } else { old_context = &idle_thread->GetHostContext(); } - guard.unlock(); + guard.Unlock(); Common::Fiber::YieldTo(*old_context, *switch_fiber); /// When a thread wakes up, the scheduler may have changed to other in another core. @@ -748,24 +748,24 @@ void KScheduler::OnSwitch(void* this_scheduler) { void KScheduler::SwitchToCurrent() { while (true) { { - std::scoped_lock lock{guard}; + KScopedSpinLock lk{guard}; current_thread.store(state.highest_priority_thread); state.needs_scheduling.store(false); } const auto is_switch_pending = [this] { - std::scoped_lock lock{guard}; + KScopedSpinLock lk{guard}; return state.needs_scheduling.load(); }; do { auto next_thread = current_thread.load(); if (next_thread != nullptr) { - next_thread->context_guard.lock(); + next_thread->context_guard.Lock(); if (next_thread->GetRawState() != ThreadState::Runnable) { - next_thread->context_guard.unlock(); + next_thread->context_guard.Unlock(); break; } if (next_thread->GetActiveCore() != core_id) { - next_thread->context_guard.unlock(); + next_thread->context_guard.Unlock(); break; } } diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index f595b9a5c..f1cca51dc 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -2,19 +2,16 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -// This file references various implementation details from Atmosphere, an open-source firmware for -// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. - #pragma once #include #include "common/common_types.h" -#include "common/spin_lock.h" #include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_priority_queue.h" #include "core/hle/kernel/k_scheduler_lock.h" #include "core/hle/kernel/k_scoped_lock.h" +#include "core/hle/kernel/k_spin_lock.h" namespace Common { class Fiber; @@ -195,7 +192,7 @@ private: u64 last_context_switch_time{}; const s32 core_id; - Common::SpinLock guard{}; + KSpinLock guard{}; }; class KScopedSchedulerLock : KScopedLock { diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 169455d18..47e315555 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -2,14 +2,11 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -// This file references various implementation details from Atmosphere, an open-source firmware for -// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. - #pragma once #include "common/assert.h" -#include "common/spin_lock.h" #include "core/hardware_properties.h" +#include "core/hle/kernel/k_spin_lock.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" @@ -34,7 +31,7 @@ public: } else { // Otherwise, we want to disable scheduling and acquire the spinlock. SchedulerType::DisableScheduling(kernel); - spin_lock.lock(); + spin_lock.Lock(); // For debug, ensure that our state is valid. ASSERT(lock_count == 0); @@ -58,7 +55,7 @@ public: // Note that we no longer hold the lock, and unlock the spinlock. owner_thread = nullptr; - spin_lock.unlock(); + spin_lock.Unlock(); // Enable scheduling, and perform a rescheduling operation. SchedulerType::EnableScheduling(kernel, cores_needing_scheduling); @@ -67,7 +64,7 @@ public: private: KernelCore& kernel; - Common::SpinLock spin_lock{}; + KAlignedSpinLock spin_lock{}; s32 lock_count{}; KThread* owner_thread{}; }; diff --git a/src/core/hle/kernel/k_spin_lock.h b/src/core/hle/kernel/k_spin_lock.h index 12c4b2e88..4d87d006a 100644 --- a/src/core/hle/kernel/k_spin_lock.h +++ b/src/core/hle/kernel/k_spin_lock.h @@ -28,6 +28,12 @@ private: std::atomic_flag lck = ATOMIC_FLAG_INIT; }; +// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future. +using KAlignedSpinLock = KSpinLock; +using KNotAlignedSpinLock = KSpinLock; + using KScopedSpinLock = KScopedLock; +using KScopedAlignedSpinLock = KScopedLock; +using KScopedNotAlignedSpinLock = KScopedLock; } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 1c19b23dc..1c86fdd20 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -14,10 +14,10 @@ #include "common/common_types.h" #include "common/intrusive_red_black_tree.h" -#include "common/spin_lock.h" #include "core/arm/arm_interface.h" #include "core/hle/kernel/k_affinity_mask.h" #include "core/hle/kernel/k_light_lock.h" +#include "core/hle/kernel/k_spin_lock.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/svc_common.h" @@ -732,7 +732,7 @@ private: s8 priority_inheritance_count{}; bool resource_limit_release_hint{}; StackParameters stack_parameters{}; - Common::SpinLock context_guard{}; + KSpinLock context_guard{}; // For emulation std::shared_ptr host_context{}; -- cgit v1.2.3 From 3fb64da452c4c7cbead05cc5489598b30f4c13f4 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 15 Feb 2021 21:30:46 -0800 Subject: hle: kernel: KMemoryRegion: Derive region values. --- src/core/hle/kernel/k_memory_region_type.h | 327 +++++++++++++++++++++++++++++ 1 file changed, 327 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index 6fb5abde9..243e2fd3d 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h @@ -4,6 +4,8 @@ #pragma once +#include "common/bit_util.h" +#include "common/common_funcs.h" #include "common/common_types.h" namespace Kernel { @@ -18,5 +20,330 @@ enum class KMemoryRegionAttr : typename std::underlying_type: NoUserMap = 0x40000000, LinearMapped = 0x80000000, }; +DECLARE_ENUM_FLAG_OPERATORS(KMemoryRegionAttr); + +namespace impl { + +constexpr size_t BitsForDeriveSparse(size_t n) { + return n + 1; +} + +constexpr size_t BitsForDeriveDense(size_t n) { + size_t low = 0, high = 1; + for (size_t i = 0; i < n - 1; ++i) { + if ((++low) == high) { + ++high; + low = 0; + } + } + return high + 1; +} + +class KMemoryRegionTypeValue { +private: + using ValueType = typename std::underlying_type::type; + +private: + ValueType m_value{}; + size_t m_next_bit{}; + bool m_finalized{}; + bool m_sparse_only{}; + bool m_dense_only{}; + +private: + constexpr KMemoryRegionTypeValue(ValueType v) : m_value(v) {} + +public: + constexpr KMemoryRegionTypeValue() = default; + + constexpr operator KMemoryRegionType() const { + return static_cast(m_value); + } + + constexpr ValueType GetValue() const { + return m_value; + } + + constexpr const KMemoryRegionTypeValue& Finalize() { + m_finalized = true; + return *this; + } + + constexpr const KMemoryRegionTypeValue& SetSparseOnly() { + m_sparse_only = true; + return *this; + } + + constexpr const KMemoryRegionTypeValue& SetDenseOnly() { + m_dense_only = true; + return *this; + } + + constexpr KMemoryRegionTypeValue& SetAttribute(KMemoryRegionAttr attr) { + m_value |= static_cast(attr); + return *this; + } + + constexpr KMemoryRegionTypeValue DeriveInitial( + size_t i, size_t next = Common::BitSize()) const { + KMemoryRegionTypeValue new_type = *this; + new_type.m_value = (ValueType{1} << i); + new_type.m_next_bit = next; + return new_type; + } + + constexpr KMemoryRegionTypeValue DeriveAttribute(KMemoryRegionAttr attr) const { + KMemoryRegionTypeValue new_type = *this; + new_type.m_value |= static_cast(attr); + return new_type; + } + + constexpr KMemoryRegionTypeValue DeriveTransition(size_t ofs = 0, size_t adv = 1) const { + KMemoryRegionTypeValue new_type = *this; + new_type.m_value |= (ValueType{1} << (m_next_bit + ofs)); + new_type.m_next_bit += adv; + return new_type; + } + + constexpr KMemoryRegionTypeValue DeriveSparse(size_t ofs, size_t n, size_t i) const { + KMemoryRegionTypeValue new_type = *this; + new_type.m_value |= (ValueType{1} << (m_next_bit + ofs)); + new_type.m_value |= (ValueType{1} << (m_next_bit + ofs + 1 + i)); + new_type.m_next_bit += ofs + n + 1; + return new_type; + } + + constexpr KMemoryRegionTypeValue Derive(size_t n, size_t i) const { + size_t low = 0, high = 1; + for (size_t j = 0; j < i; ++j) { + if ((++low) == high) { + ++high; + low = 0; + } + } + + KMemoryRegionTypeValue new_type = *this; + new_type.m_value |= (ValueType{1} << (m_next_bit + low)); + new_type.m_value |= (ValueType{1} << (m_next_bit + high)); + new_type.m_next_bit += BitsForDeriveDense(n); + return new_type; + } + + constexpr KMemoryRegionTypeValue Advance(size_t n) const { + KMemoryRegionTypeValue new_type = *this; + new_type.m_next_bit += n; + return new_type; + } + + constexpr bool IsAncestorOf(ValueType v) const { + return (m_value | v) == v; + } +}; + +} // namespace impl + +constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); +constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); +constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); +static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1); +static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); + +constexpr auto KMemoryRegionType_DramKernelBase = + KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) + .SetAttribute(KMemoryRegionAttr::NoUserMap) + .SetAttribute(KMemoryRegionAttr::CarveoutProtected); +constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); +constexpr auto KMemoryRegionType_DramHeapBase = + KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr::LinearMapped); +static_assert(static_cast(KMemoryRegionType_DramKernelBase.GetValue()) == + (static_cast(0xE) | KMemoryRegionAttr::CarveoutProtected | + KMemoryRegionAttr::NoUserMap)); +static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); +static_assert(static_cast(KMemoryRegionType_DramHeapBase.GetValue()) == + (static_cast(0x26) | KMemoryRegionAttr::LinearMapped)); + +constexpr auto KMemoryRegionType_DramKernelCode = + KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); +constexpr auto KMemoryRegionType_DramKernelSlab = + KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); +constexpr auto KMemoryRegionType_DramKernelPtHeap = + KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( + KMemoryRegionAttr::LinearMapped); +constexpr auto KMemoryRegionType_DramKernelInitPt = + KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( + KMemoryRegionAttr::LinearMapped); +static_assert(static_cast(KMemoryRegionType_DramKernelCode.GetValue()) == + (static_cast(0xCE) | KMemoryRegionAttr::CarveoutProtected | + KMemoryRegionAttr::NoUserMap)); +static_assert(static_cast(KMemoryRegionType_DramKernelSlab.GetValue()) == + (static_cast(0x14E) | KMemoryRegionAttr::CarveoutProtected | + KMemoryRegionAttr::NoUserMap)); +static_assert(static_cast(KMemoryRegionType_DramKernelPtHeap.GetValue()) == + (static_cast(0x24E) | KMemoryRegionAttr::CarveoutProtected | + KMemoryRegionAttr::NoUserMap | KMemoryRegionAttr::LinearMapped)); +static_assert(static_cast(KMemoryRegionType_DramKernelInitPt.GetValue()) == + (static_cast(0x44E) | KMemoryRegionAttr::CarveoutProtected | + KMemoryRegionAttr::NoUserMap | KMemoryRegionAttr::LinearMapped)); + +constexpr auto KMemoryRegionType_DramReservedEarly = + KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr::NoUserMap); +static_assert(static_cast(KMemoryRegionType_DramReservedEarly.GetValue()) == + (static_cast(0x16) | KMemoryRegionAttr::NoUserMap)); + +constexpr auto KMemoryRegionType_KernelTraceBuffer = + KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) + .SetAttribute(KMemoryRegionAttr::LinearMapped) + .SetAttribute(KMemoryRegionAttr::UserReadOnly); +constexpr auto KMemoryRegionType_OnMemoryBootImage = + KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); +constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); +static_assert(static_cast(KMemoryRegionType_KernelTraceBuffer.GetValue()) == + (static_cast(0xD6) | KMemoryRegionAttr::LinearMapped | + KMemoryRegionAttr::UserReadOnly)); +static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); +static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); + +constexpr auto KMemoryRegionType_DramPoolPartition = + KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr::NoUserMap); +static_assert(static_cast(KMemoryRegionType_DramPoolPartition.GetValue()) == + (static_cast(0x26) | KMemoryRegionAttr::LinearMapped | + KMemoryRegionAttr::NoUserMap)); + +constexpr auto KMemoryRegionType_DramPoolManagement = + KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( + KMemoryRegionAttr::CarveoutProtected); +constexpr auto KMemoryRegionType_DramUserPool = + KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); +static_assert(static_cast(KMemoryRegionType_DramPoolManagement.GetValue()) == + (static_cast(0x166) | KMemoryRegionAttr::LinearMapped | + KMemoryRegionAttr::NoUserMap | KMemoryRegionAttr::CarveoutProtected)); +static_assert(static_cast(KMemoryRegionType_DramUserPool.GetValue()) == + (static_cast(0x1A6) | KMemoryRegionAttr::LinearMapped | + KMemoryRegionAttr::NoUserMap)); + +constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0); +constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1); +constexpr auto KMemoryRegionType_DramSystemNonSecurePool = + KMemoryRegionType_DramUserPool.Derive(4, 2); +constexpr auto KMemoryRegionType_DramSystemPool = + KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr::CarveoutProtected); +static_assert(static_cast(KMemoryRegionType_DramApplicationPool.GetValue()) == + (static_cast(0x7A6) | KMemoryRegionAttr::LinearMapped | + KMemoryRegionAttr::NoUserMap)); +static_assert(static_cast(KMemoryRegionType_DramAppletPool.GetValue()) == + (static_cast(0xBA6) | KMemoryRegionAttr::LinearMapped | + KMemoryRegionAttr::NoUserMap)); +static_assert( + static_cast(KMemoryRegionType_DramSystemNonSecurePool.GetValue()) == + (static_cast(0xDA6) | KMemoryRegionAttr::LinearMapped | + KMemoryRegionAttr::NoUserMap)); +static_assert(static_cast(KMemoryRegionType_DramSystemPool.GetValue()) == + (static_cast(0x13A6) | KMemoryRegionAttr::LinearMapped | + KMemoryRegionAttr::NoUserMap | KMemoryRegionAttr::CarveoutProtected)); + +constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); +constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap = + KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); +constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer = + KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); +static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); +static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); +static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); + +constexpr auto KMemoryRegionType_VirtualDramKernelInitPt = + KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); +constexpr auto KMemoryRegionType_VirtualDramPoolManagement = + KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); +constexpr auto KMemoryRegionType_VirtualDramUserPool = + KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); +static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); +static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); +static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); + +// NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying +// to understand why Nintendo made this choice. +// UNUSED: .Derive(6, 0); +// UNUSED: .Derive(6, 1); +constexpr auto KMemoryRegionType_VirtualDramAppletPool = + KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); +constexpr auto KMemoryRegionType_VirtualDramApplicationPool = + KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); +constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool = + KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); +constexpr auto KMemoryRegionType_VirtualDramSystemPool = + KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); +static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); +static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); +static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); +static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); + +constexpr auto KMemoryRegionType_ArchDeviceBase = + KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); +constexpr auto KMemoryRegionType_BoardDeviceBase = + KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly(); +static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); +static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); + +#if defined(ATMOSPHERE_ARCH_ARM64) +#include +#elif defined(ATMOSPHERE_ARCH_ARM) +#include +#else +// Default to no architecture devices. +constexpr auto NumArchitectureDeviceRegions = 0; +#endif +static_assert(NumArchitectureDeviceRegions >= 0); + +#if defined(ATMOSPHERE_BOARD_NINTENDO_NX) +#include +#else +// Default to no board devices. +constexpr auto NumBoardDeviceRegions = 0; +#endif +static_assert(NumBoardDeviceRegions >= 0); + +constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); +constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); +constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); +constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); +static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19); +static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29); +static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49); +static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); + +constexpr auto KMemoryRegionType_KernelMiscDerivedBase = + KMemoryRegionType_KernelMisc.DeriveTransition(); +static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149); + +// UNUSED: .Derive(7, 0); +constexpr auto KMemoryRegionType_KernelMiscMainStack = + KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); +constexpr auto KMemoryRegionType_KernelMiscMappedDevice = + KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); +constexpr auto KMemoryRegionType_KernelMiscExceptionStack = + KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); +constexpr auto KMemoryRegionType_KernelMiscUnknownDebug = + KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4); +// UNUSED: .Derive(7, 5); +constexpr auto KMemoryRegionType_KernelMiscIdleStack = + KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6); +static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49); +static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); +static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349); +static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549); +static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); + +constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); +static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); + +constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { + if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) { + return KMemoryRegionType_VirtualDramKernelTraceBuffer; + } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { + return KMemoryRegionType_VirtualDramKernelPtHeap; + } else { + return KMemoryRegionType_Dram; + } +} } // namespace Kernel -- cgit v1.2.3 From c17beefe3d1e4850b3518a45892c0c5c6e11c95a Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 18 Feb 2021 16:38:30 -0800 Subject: hle: kernel: Add architecture and board specific memory regions. --- .../arch/arm64/k_memory_region_device_types.inc | 20 +++++++++ .../nintendo/nx/k_memory_region_device_types.inc | 52 ++++++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc create mode 100644 src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc b/src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc new file mode 100644 index 000000000..857b512ba --- /dev/null +++ b/src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc @@ -0,0 +1,20 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// All architectures must define NumArchitectureDeviceRegions. +constexpr inline const auto NumArchitectureDeviceRegions = 3; + +constexpr inline const auto KMemoryRegionType_Uart = + KMemoryRegionType_ArchDeviceBase.DeriveSparse(0, NumArchitectureDeviceRegions, 0); +constexpr inline const auto KMemoryRegionType_InterruptCpuInterface = + KMemoryRegionType_ArchDeviceBase.DeriveSparse(0, NumArchitectureDeviceRegions, 1) + .SetAttribute(KMemoryRegionAttr_NoUserMap); +constexpr inline const auto KMemoryRegionType_InterruptDistributor = + KMemoryRegionType_ArchDeviceBase.DeriveSparse(0, NumArchitectureDeviceRegions, 2) + .SetAttribute(KMemoryRegionAttr_NoUserMap); +static_assert(KMemoryRegionType_Uart.GetValue() == (0x1D)); +static_assert(KMemoryRegionType_InterruptCpuInterface.GetValue() == + (0x2D | KMemoryRegionAttr_NoUserMap)); +static_assert(KMemoryRegionType_InterruptDistributor.GetValue() == + (0x4D | KMemoryRegionAttr_NoUserMap)); diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc b/src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc new file mode 100644 index 000000000..58d6c0b16 --- /dev/null +++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc @@ -0,0 +1,52 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// All architectures must define NumBoardDeviceRegions. +constexpr inline const auto NumBoardDeviceRegions = 6; +// UNUSED: .Derive(NumBoardDeviceRegions, 0); +constexpr inline const auto KMemoryRegionType_MemoryController = + KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 1) + .SetAttribute(KMemoryRegionAttr_NoUserMap); +constexpr inline const auto KMemoryRegionType_MemoryController1 = + KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 2) + .SetAttribute(KMemoryRegionAttr_NoUserMap); +constexpr inline const auto KMemoryRegionType_MemoryController0 = + KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 3) + .SetAttribute(KMemoryRegionAttr_NoUserMap); +constexpr inline const auto KMemoryRegionType_PowerManagementController = + KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 4).DeriveTransition(); +constexpr inline const auto KMemoryRegionType_LegacyLpsDevices = + KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 5); +static_assert(KMemoryRegionType_MemoryController.GetValue() == + (0x55 | KMemoryRegionAttr_NoUserMap)); +static_assert(KMemoryRegionType_MemoryController1.GetValue() == + (0x65 | KMemoryRegionAttr_NoUserMap)); +static_assert(KMemoryRegionType_MemoryController0.GetValue() == + (0x95 | KMemoryRegionAttr_NoUserMap)); +static_assert(KMemoryRegionType_PowerManagementController.GetValue() == (0x1A5)); + +static_assert(KMemoryRegionType_LegacyLpsDevices.GetValue() == 0xC5); + +constexpr inline const auto NumLegacyLpsDevices = 7; +constexpr inline const auto KMemoryRegionType_LegacyLpsExceptionVectors = + KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 0); +constexpr inline const auto KMemoryRegionType_LegacyLpsIram = + KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 1); +constexpr inline const auto KMemoryRegionType_LegacyLpsFlowController = + KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 2); +constexpr inline const auto KMemoryRegionType_LegacyLpsPrimaryICtlr = + KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 3); +constexpr inline const auto KMemoryRegionType_LegacyLpsSemaphore = + KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 4); +constexpr inline const auto KMemoryRegionType_LegacyLpsAtomics = + KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 5); +constexpr inline const auto KMemoryRegionType_LegacyLpsClkRst = + KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 6); +static_assert(KMemoryRegionType_LegacyLpsExceptionVectors.GetValue() == 0x3C5); +static_assert(KMemoryRegionType_LegacyLpsIram.GetValue() == 0x5C5); +static_assert(KMemoryRegionType_LegacyLpsFlowController.GetValue() == 0x6C5); +static_assert(KMemoryRegionType_LegacyLpsPrimaryICtlr.GetValue() == 0x9C5); +static_assert(KMemoryRegionType_LegacyLpsSemaphore.GetValue() == 0xAC5); +static_assert(KMemoryRegionType_LegacyLpsAtomics.GetValue() == 0xCC5); +static_assert(KMemoryRegionType_LegacyLpsClkRst.GetValue() == 0x11C5); -- cgit v1.2.3 From 43a29b58033794dce24b0b19cda32b87963d924f Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 18 Feb 2021 16:39:41 -0800 Subject: hle: kernel: KMemoryManager: Add aliases. --- src/core/hle/kernel/k_memory_manager.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index ae9f683b8..587dad178 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h @@ -29,6 +29,10 @@ public: Shift = 4, Mask = (0xF << Shift), + + // Aliases. + Unsafe = Application, + Secure = System, }; enum class Direction : u32 { -- cgit v1.2.3 From a439867f2c09f536e01ffdd4da9c01604cb9df45 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 18 Feb 2021 17:55:46 -0800 Subject: hle: kernel: KMemoryManager: Add CalculateManagementOverheadSize. --- src/core/hle/kernel/k_memory_manager.cpp | 12 ++++++++++++ src/core/hle/kernel/k_memory_manager.h | 14 ++++++++++++++ 2 files changed, 26 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 9027602bf..aa71697b2 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -173,4 +173,16 @@ ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_page return RESULT_SUCCESS; } +std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) { + const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16); + const std::size_t optimize_map_size = + (Common::AlignUp((region_size / PageSize), Common::BitSize()) / + Common::BitSize()) * + sizeof(u64); + const std::size_t manager_meta_size = + Common::AlignUp(optimize_map_size + ref_count_size, PageSize); + const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size); + return manager_meta_size + page_heap_size; +} + } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index 587dad178..ac840b3d0 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h @@ -60,6 +60,10 @@ public: static constexpr std::size_t MaxManagerCount = 10; public: + static std::size_t CalculateManagementOverheadSize(std::size_t region_size) { + return Impl::CalculateManagementOverheadSize(region_size); + } + static constexpr u32 EncodeOption(Pool pool, Direction dir) { return (static_cast(pool) << static_cast(Pool::Shift)) | (static_cast(dir) << static_cast(Direction::Shift)); @@ -89,6 +93,16 @@ private: KPageHeap heap; Pool pool{}; + public: + static std::size_t CalculateManagementOverheadSize(std::size_t region_size); + + static constexpr std::size_t CalculateOptimizedProcessOverheadSize( + std::size_t region_size) { + return (Common::AlignUp((region_size / PageSize), Common::BitSize()) / + Common::BitSize()) * + sizeof(u64); + } + public: Impl() = default; -- cgit v1.2.3 From 57625177289624051e107578ccfce749a083f51a Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 18 Feb 2021 18:38:23 -0800 Subject: hle: kernel: KSystemControl: Update to reflect board-specific behavior. --- .../kernel/board/nintendo/nx/k_system_control.cpp | 46 ++++++++++++++++++++++ .../kernel/board/nintendo/nx/k_system_control.h | 22 +++++++++++ src/core/hle/kernel/k_system_control.cpp | 42 -------------------- src/core/hle/kernel/k_system_control.h | 18 +++++---- 4 files changed, 79 insertions(+), 49 deletions(-) create mode 100644 src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp create mode 100644 src/core/hle/kernel/board/nintendo/nx/k_system_control.h delete mode 100644 src/core/hle/kernel/k_system_control.cpp (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp new file mode 100644 index 000000000..0f41c4110 --- /dev/null +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp @@ -0,0 +1,46 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include + +#include "core/hle/kernel/k_system_control.h" + +namespace Kernel::Board::Nintendo::Nx { + +bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() { + return true; +} + +namespace { +template +u64 GenerateUniformRange(u64 min, u64 max, F f) { + // Handle the case where the difference is too large to represent. + if (max == std::numeric_limits::max() && min == std::numeric_limits::min()) { + return f(); + } + + // Iterate until we get a value in range. + const u64 range_size = ((max + 1) - min); + const u64 effective_max = (std::numeric_limits::max() / range_size) * range_size; + while (true) { + if (const u64 rnd = f(); rnd < effective_max) { + return min + (rnd % range_size); + } + } +} + +} // Anonymous namespace + +u64 KSystemControl::GenerateRandomU64() { + static std::random_device device; + static std::mt19937 gen(device()); + static std::uniform_int_distribution distribution(1, std::numeric_limits::max()); + return distribution(gen); +} + +u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) { + return GenerateUniformRange(min, max, GenerateRandomU64); +} + +} // namespace Kernel::Board::Nintendo::Nx diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h new file mode 100644 index 000000000..3fde61971 --- /dev/null +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h @@ -0,0 +1,22 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel::Board::Nintendo::Nx { + +class KSystemControl { +public: + class Init { + public: + static bool ShouldIncreaseThreadResourceLimit(); + }; + + static u64 GenerateRandomRange(u64 min, u64 max); + static u64 GenerateRandomU64(); +}; + +} // namespace Kernel::Board::Nintendo::Nx diff --git a/src/core/hle/kernel/k_system_control.cpp b/src/core/hle/kernel/k_system_control.cpp deleted file mode 100644 index aa1682f69..000000000 --- a/src/core/hle/kernel/k_system_control.cpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2021 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include - -#include "core/hle/kernel/k_system_control.h" - -namespace Kernel { - -namespace { -template -u64 GenerateUniformRange(u64 min, u64 max, F f) { - // Handle the case where the difference is too large to represent. - if (max == std::numeric_limits::max() && min == std::numeric_limits::min()) { - return f(); - } - - // Iterate until we get a value in range. - const u64 range_size = ((max + 1) - min); - const u64 effective_max = (std::numeric_limits::max() / range_size) * range_size; - while (true) { - if (const u64 rnd = f(); rnd < effective_max) { - return min + (rnd % range_size); - } - } -} - -} // Anonymous namespace - -u64 KSystemControl::GenerateRandomU64() { - static std::random_device device; - static std::mt19937 gen(device()); - static std::uniform_int_distribution distribution(1, std::numeric_limits::max()); - return distribution(gen); -} - -u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) { - return GenerateUniformRange(min, max, GenerateRandomU64); -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/k_system_control.h b/src/core/hle/kernel/k_system_control.h index 1d5b64ffa..d755082c2 100644 --- a/src/core/hle/kernel/k_system_control.h +++ b/src/core/hle/kernel/k_system_control.h @@ -6,14 +6,18 @@ #include "common/common_types.h" -namespace Kernel { +#define BOARD_NINTENDO_NX + +#ifdef BOARD_NINTENDO_NX -class KSystemControl { -public: - KSystemControl() = default; +#include "core/hle/kernel/board/nintendo/nx/k_system_control.h" - static u64 GenerateRandomRange(u64 min, u64 max); - static u64 GenerateRandomU64(); -}; +namespace Kernel { + +using Kernel::Board::Nintendo::Nx::KSystemControl; } // namespace Kernel + +#else +#error "Unknown board for KSystemControl" +#endif -- cgit v1.2.3 From f2e14415674f8a8fb95975cef66201ee32b89c08 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 19 Mar 2021 23:03:57 -0700 Subject: hle: kernel: Add k_trace module. --- src/core/hle/kernel/k_trace.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 src/core/hle/kernel/k_trace.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_trace.h b/src/core/hle/kernel/k_trace.h new file mode 100644 index 000000000..91ebf9ab2 --- /dev/null +++ b/src/core/hle/kernel/k_trace.h @@ -0,0 +1,12 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +namespace Kernel { + +constexpr bool IsKTraceEnabled = false; +constexpr std::size_t KTraceBufferSize = IsKTraceEnabled ? 16 * 1024 * 1024 : 0; + +} // namespace Kernel -- cgit v1.2.3 From 01f04fee322ae2750171bb0b526054170f61ad2d Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 19 Mar 2021 23:29:29 -0700 Subject: hle: kernel: k_address_space_info: Cleanup. --- src/core/hle/kernel/k_address_space_info.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp index 24944d15b..e9f56fbed 100644 --- a/src/core/hle/kernel/k_address_space_info.cpp +++ b/src/core/hle/kernel/k_address_space_info.cpp @@ -12,15 +12,15 @@ namespace Kernel { namespace { enum : u64 { - Size_1_MB = 0x100000, - Size_2_MB = 2 * Size_1_MB, - Size_128_MB = 128 * Size_1_MB, - Size_1_GB = 0x40000000, - Size_2_GB = 2 * Size_1_GB, - Size_4_GB = 4 * Size_1_GB, - Size_6_GB = 6 * Size_1_GB, - Size_64_GB = 64 * Size_1_GB, - Size_512_GB = 512 * Size_1_GB, + Size_1_MB = 0x100000ULL, + Size_2_MB = 2ULL * Size_1_MB, + Size_128_MB = 128ULL * Size_1_MB, + Size_1_GB = 0x40000000ULL, + Size_2_GB = 2ULL * Size_1_GB, + Size_4_GB = 4ULL * Size_1_GB, + Size_6_GB = 6ULL * Size_1_GB, + Size_64_GB = 64ULL * Size_1_GB, + Size_512_GB = 512ULL * Size_1_GB, Invalid = std::numeric_limits::max(), }; -- cgit v1.2.3 From 28be8aec9a4499206662a7bd74091706000e3d6d Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 19 Mar 2021 23:52:47 -0700 Subject: common: Move common sizes to their own header for code reuse. --- src/core/hle/kernel/k_address_space_info.cpp | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp index e9f56fbed..eb8af660c 100644 --- a/src/core/hle/kernel/k_address_space_info.cpp +++ b/src/core/hle/kernel/k_address_space_info.cpp @@ -5,25 +5,13 @@ #include #include "common/assert.h" +#include "common/common_sizes.h" #include "core/hle/kernel/k_address_space_info.h" namespace Kernel { namespace { -enum : u64 { - Size_1_MB = 0x100000ULL, - Size_2_MB = 2ULL * Size_1_MB, - Size_128_MB = 128ULL * Size_1_MB, - Size_1_GB = 0x40000000ULL, - Size_2_GB = 2ULL * Size_1_GB, - Size_4_GB = 4ULL * Size_1_GB, - Size_6_GB = 6ULL * Size_1_GB, - Size_64_GB = 64ULL * Size_1_GB, - Size_512_GB = 512ULL * Size_1_GB, - Invalid = std::numeric_limits::max(), -}; - // clang-format off constexpr std::array AddressSpaceInfos{{ { .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, }, -- cgit v1.2.3 From 10265ad0e4fbc5e54c1607d18eded80d8340606c Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 20 Mar 2021 00:00:49 -0700 Subject: hle: kernel: board: Add secure_monitor module. --- .../hle/kernel/board/nintendo/nx/secure_monitor.h | 26 ++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 src/core/hle/kernel/board/nintendo/nx/secure_monitor.h (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/board/nintendo/nx/secure_monitor.h b/src/core/hle/kernel/board/nintendo/nx/secure_monitor.h new file mode 100644 index 000000000..0c366b252 --- /dev/null +++ b/src/core/hle/kernel/board/nintendo/nx/secure_monitor.h @@ -0,0 +1,26 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel::Board::Nintendo::Nx::Smc { + +enum MemorySize { + MemorySize_4GB = 0, + MemorySize_6GB = 1, + MemorySize_8GB = 2, +}; + +enum MemoryArrangement { + MemoryArrangement_4GB = 0, + MemoryArrangement_4GBForAppletDev = 1, + MemoryArrangement_4GBForSystemDev = 2, + MemoryArrangement_6GB = 3, + MemoryArrangement_6GBForAppletDev = 4, + MemoryArrangement_8GB = 5, +}; + +} // namespace Kernel::Board::Nintendo::Nx::Smc -- cgit v1.2.3 From edbc505e52c8dacce91c7f89c798ae1b6da801e7 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 20 Mar 2021 00:23:06 -0700 Subject: hle: kernel: board: k_system_control: Extend to include memory region sizes. --- .../kernel/board/nintendo/nx/k_system_control.cpp | 120 ++++++++++++++++++++- .../kernel/board/nintendo/nx/k_system_control.h | 6 ++ 2 files changed, 125 insertions(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp index 0f41c4110..a48d0c11e 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp @@ -4,14 +4,132 @@ #include -#include "core/hle/kernel/k_system_control.h" +#include "common/common_sizes.h" +#include "core/hle/kernel/board/nintendo/nx/k_system_control.h" +#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h" +#include "core/hle/kernel/k_trace.h" namespace Kernel::Board::Nintendo::Nx { +namespace impl { + +constexpr inline const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2238 * 4 * 1024; +constexpr inline const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x710 * 4 * 1024; +constexpr inline const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4 * 1024; + +} // namespace impl + +constexpr inline const std::size_t RequiredNonSecureSystemMemorySize = + impl::RequiredNonSecureSystemMemorySizeVi + impl::RequiredNonSecureSystemMemorySizeNvservices + + impl::RequiredNonSecureSystemMemorySizeMisc; + +namespace { + +u32 GetMemoryModeForInit() { + return 0x01; +} + +u32 GetMemorySizeForInit() { + return 0; +} + +Smc::MemoryArrangement GetMemoryArrangeForInit() { + switch (GetMemoryModeForInit() & 0x3F) { + case 0x01: + default: + return Smc::MemoryArrangement_4GB; + case 0x02: + return Smc::MemoryArrangement_4GBForAppletDev; + case 0x03: + return Smc::MemoryArrangement_4GBForSystemDev; + case 0x11: + return Smc::MemoryArrangement_6GB; + case 0x12: + return Smc::MemoryArrangement_6GBForAppletDev; + case 0x21: + return Smc::MemoryArrangement_8GB; + } +} +} // namespace + +// Initialization. +size_t KSystemControl::Init::GetIntendedMemorySize() { + switch (GetMemorySizeForInit()) { + case Smc::MemorySize_4GB: + default: // All invalid modes should go to 4GB. + return Size_4_GB; + case Smc::MemorySize_6GB: + return Size_6_GB; + case Smc::MemorySize_8GB: + return Size_8_GB; + } +} + +PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) { + return base_address; +} + bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() { return true; } +std::size_t KSystemControl::Init::GetApplicationPoolSize() { + // Get the base pool size. + const size_t base_pool_size = []() -> size_t { + switch (GetMemoryArrangeForInit()) { + case Smc::MemoryArrangement_4GB: + default: + return Size_3285_MB; + case Smc::MemoryArrangement_4GBForAppletDev: + return Size_2048_MB; + case Smc::MemoryArrangement_4GBForSystemDev: + return Size_3285_MB; + case Smc::MemoryArrangement_6GB: + return Size_4916_MB; + case Smc::MemoryArrangement_6GBForAppletDev: + return Size_3285_MB; + case Smc::MemoryArrangement_8GB: + return Size_4916_MB; + } + }(); + + // Return (possibly) adjusted size. + return base_pool_size; +} + +size_t KSystemControl::Init::GetAppletPoolSize() { + // Get the base pool size. + const size_t base_pool_size = []() -> size_t { + switch (GetMemoryArrangeForInit()) { + case Smc::MemoryArrangement_4GB: + default: + return Size_507_MB; + case Smc::MemoryArrangement_4GBForAppletDev: + return Size_1554_MB; + case Smc::MemoryArrangement_4GBForSystemDev: + return Size_448_MB; + case Smc::MemoryArrangement_6GB: + return Size_562_MB; + case Smc::MemoryArrangement_6GBForAppletDev: + return Size_2193_MB; + case Smc::MemoryArrangement_8GB: + return Size_2193_MB; + } + }(); + + // Return (possibly) adjusted size. + constexpr size_t ExtraSystemMemoryForAtmosphere = Size_33_MB; + return base_pool_size - ExtraSystemMemoryForAtmosphere - KTraceBufferSize; +} + +size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() { + // Verify that our minimum is at least as large as Nintendo's. + constexpr size_t MinimumSize = RequiredNonSecureSystemMemorySize; + static_assert(MinimumSize >= 0x29C8000); + + return MinimumSize; +} + namespace { template u64 GenerateUniformRange(u64 min, u64 max, F f) { diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h index 3fde61971..52f230ced 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h @@ -12,7 +12,13 @@ class KSystemControl { public: class Init { public: + // Initialization. + static std::size_t GetIntendedMemorySize(); + static PAddr GetKernelPhysicalBaseAddress(u64 base_address); static bool ShouldIncreaseThreadResourceLimit(); + static std::size_t GetApplicationPoolSize(); + static std::size_t GetAppletPoolSize(); + static std::size_t GetMinimumNonSecureSystemPoolSize(); }; static u64 GenerateRandomRange(u64 min, u64 max); -- cgit v1.2.3 From 80688362cf90055cfdee8bcd5a5c0cfd2c57b67f Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 20 Mar 2021 00:36:19 -0700 Subject: hle: kernel: k_memory_region: Refactor to simplify code. --- src/core/hle/kernel/k_memory_region.h | 42 ++++++---- src/core/hle/kernel/k_memory_region_type.h | 130 ++++++++++++++--------------- 2 files changed, 89 insertions(+), 83 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h index afa89011c..1d4fcde6f 100644 --- a/src/core/hle/kernel/k_memory_region.h +++ b/src/core/hle/kernel/k_memory_region.h @@ -24,7 +24,7 @@ public: : address(address_), last_address(last_address_), pair_address(pair_address_), attributes(attributes_), type_id(type_id_) {} constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_) - : KMemoryRegion(address_, last_address_, std::numeric_limits::max(), attributes_, + : KMemoryRegion(address_, last_address_, std::numeric_limits::max(), attributes_, type_id_) {} static constexpr int Compare(const KMemoryRegion& lhs, const KMemoryRegion& rhs) { @@ -37,6 +37,16 @@ public: } } +private: + constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) { + address = a; + pair_address = p; + last_address = la; + attributes = r; + type_id = t; + } + +public: constexpr u64 GetAddress() const { return address; } @@ -50,11 +60,11 @@ public: } constexpr u64 GetEndAddress() const { - return GetLastAddress() + 1; + return this->GetLastAddress() + 1; } - constexpr std::size_t GetSize() const { - return GetEndAddress() - GetAddress(); + constexpr size_t GetSize() const { + return this->GetEndAddress() - this->GetAddress(); } constexpr u32 GetAttributes() const { @@ -70,7 +80,7 @@ public: type_id = type; } - constexpr bool Contains(uintptr_t address) const { + constexpr bool Contains(u64 address) const { ASSERT(this->GetEndAddress() != 0); return this->GetAddress() <= address && address <= this->GetLastAddress(); } @@ -80,7 +90,7 @@ public: } constexpr bool HasTypeAttribute(KMemoryRegionAttr attr) const { - return (this->GetType() | static_cast(attr)) == this->GetType(); + return (this->GetType() | attr) == this->GetType(); } constexpr bool CanDerive(u32 type) const { @@ -92,12 +102,12 @@ public: } constexpr void SetTypeAttribute(KMemoryRegionAttr attr) { - type_id |= static_cast(attr); + type_id |= attr; } private: - const u64 address{}; - const u64 last_address{}; + u64 address{}; + u64 last_address{}; u64 pair_address{}; u32 attributes{}; u32 type_id{}; @@ -166,9 +176,9 @@ public: } } - const KMemoryRegion* FindByType(u32 type_id) const { + const KMemoryRegion* FindByType(KMemoryRegionType type_id) const { for (auto it = this->cbegin(); it != this->cend(); ++it) { - if (it->GetType() == type_id) { + if (it->GetType() == static_cast(type_id)) { return std::addressof(*it); } } @@ -184,7 +194,7 @@ public: return nullptr; } - const KMemoryRegion* FindFirstDerived(u32 type_id) const { + const KMemoryRegion* FindFirstDerived(KMemoryRegionType type_id) const { for (auto it = this->cbegin(); it != this->cend(); it++) { if (it->IsDerivedFrom(type_id)) { return std::addressof(*it); @@ -193,7 +203,7 @@ public: return nullptr; } - const KMemoryRegion* FindLastDerived(u32 type_id) const { + const KMemoryRegion* FindLastDerived(KMemoryRegionType type_id) const { const KMemoryRegion* region = nullptr; for (auto it = this->begin(); it != this->end(); it++) { if (it->IsDerivedFrom(type_id)) { @@ -203,7 +213,7 @@ public: return region; } - DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const { + DerivedRegionExtents GetDerivedRegionExtents(KMemoryRegionType type_id) const { DerivedRegionExtents extents; ASSERT(extents.first_region == nullptr); @@ -224,6 +234,10 @@ public: return extents; } + DerivedRegionExtents GetDerivedRegionExtents(KMemoryRegionAttr type_id) const { + return GetDerivedRegionExtents(static_cast(type_id)); + } + public: void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0); bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0); diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index 243e2fd3d..d83e80793 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h @@ -8,17 +8,20 @@ #include "common/common_funcs.h" #include "common/common_types.h" +#define ARCH_ARM64 +#define BOARD_NINTENDO_NX + namespace Kernel { -enum class KMemoryRegionType : u32 {}; +enum KMemoryRegionType : u32 {}; -enum class KMemoryRegionAttr : typename std::underlying_type::type { - CarveoutProtected = 0x04000000, - DidKernelMap = 0x08000000, - ShouldKernelMap = 0x10000000, - UserReadOnly = 0x20000000, - NoUserMap = 0x40000000, - LinearMapped = 0x80000000, +enum KMemoryRegionAttr : typename std::underlying_type::type { + KMemoryRegionAttr_CarveoutProtected = 0x04000000, + KMemoryRegionAttr_DidKernelMap = 0x08000000, + KMemoryRegionAttr_ShouldKernelMap = 0x10000000, + KMemoryRegionAttr_UserReadOnly = 0x20000000, + KMemoryRegionAttr_NoUserMap = 0x40000000, + KMemoryRegionAttr_LinearMapped = 0x80000000, }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryRegionAttr); @@ -150,17 +153,15 @@ static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); constexpr auto KMemoryRegionType_DramKernelBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) - .SetAttribute(KMemoryRegionAttr::NoUserMap) - .SetAttribute(KMemoryRegionAttr::CarveoutProtected); + .SetAttribute(KMemoryRegionAttr_NoUserMap) + .SetAttribute(KMemoryRegionAttr_CarveoutProtected); constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); constexpr auto KMemoryRegionType_DramHeapBase = - KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr::LinearMapped); -static_assert(static_cast(KMemoryRegionType_DramKernelBase.GetValue()) == - (static_cast(0xE) | KMemoryRegionAttr::CarveoutProtected | - KMemoryRegionAttr::NoUserMap)); + KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped); +static_assert(KMemoryRegionType_DramKernelBase.GetValue() == + (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); -static_assert(static_cast(KMemoryRegionType_DramHeapBase.GetValue()) == - (static_cast(0x26) | KMemoryRegionAttr::LinearMapped)); +static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); constexpr auto KMemoryRegionType_DramKernelCode = KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); @@ -168,78 +169,69 @@ constexpr auto KMemoryRegionType_DramKernelSlab = KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); constexpr auto KMemoryRegionType_DramKernelPtHeap = KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( - KMemoryRegionAttr::LinearMapped); + KMemoryRegionAttr_LinearMapped); constexpr auto KMemoryRegionType_DramKernelInitPt = KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( - KMemoryRegionAttr::LinearMapped); -static_assert(static_cast(KMemoryRegionType_DramKernelCode.GetValue()) == - (static_cast(0xCE) | KMemoryRegionAttr::CarveoutProtected | - KMemoryRegionAttr::NoUserMap)); -static_assert(static_cast(KMemoryRegionType_DramKernelSlab.GetValue()) == - (static_cast(0x14E) | KMemoryRegionAttr::CarveoutProtected | - KMemoryRegionAttr::NoUserMap)); -static_assert(static_cast(KMemoryRegionType_DramKernelPtHeap.GetValue()) == - (static_cast(0x24E) | KMemoryRegionAttr::CarveoutProtected | - KMemoryRegionAttr::NoUserMap | KMemoryRegionAttr::LinearMapped)); -static_assert(static_cast(KMemoryRegionType_DramKernelInitPt.GetValue()) == - (static_cast(0x44E) | KMemoryRegionAttr::CarveoutProtected | - KMemoryRegionAttr::NoUserMap | KMemoryRegionAttr::LinearMapped)); + KMemoryRegionAttr_LinearMapped); +static_assert(KMemoryRegionType_DramKernelCode.GetValue() == + (0xCE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); +static_assert(KMemoryRegionType_DramKernelSlab.GetValue() == + (0x14E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); +static_assert(KMemoryRegionType_DramKernelPtHeap.GetValue() == + (0x24E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | + KMemoryRegionAttr_LinearMapped)); +static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == + (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | + KMemoryRegionAttr_LinearMapped)); constexpr auto KMemoryRegionType_DramReservedEarly = - KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr::NoUserMap); -static_assert(static_cast(KMemoryRegionType_DramReservedEarly.GetValue()) == - (static_cast(0x16) | KMemoryRegionAttr::NoUserMap)); + KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); +static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == + (0x16 | KMemoryRegionAttr_NoUserMap)); constexpr auto KMemoryRegionType_KernelTraceBuffer = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) - .SetAttribute(KMemoryRegionAttr::LinearMapped) - .SetAttribute(KMemoryRegionAttr::UserReadOnly); + .SetAttribute(KMemoryRegionAttr_LinearMapped) + .SetAttribute(KMemoryRegionAttr_UserReadOnly); constexpr auto KMemoryRegionType_OnMemoryBootImage = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); -static_assert(static_cast(KMemoryRegionType_KernelTraceBuffer.GetValue()) == - (static_cast(0xD6) | KMemoryRegionAttr::LinearMapped | - KMemoryRegionAttr::UserReadOnly)); +static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == + (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly)); static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); constexpr auto KMemoryRegionType_DramPoolPartition = - KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr::NoUserMap); -static_assert(static_cast(KMemoryRegionType_DramPoolPartition.GetValue()) == - (static_cast(0x26) | KMemoryRegionAttr::LinearMapped | - KMemoryRegionAttr::NoUserMap)); + KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); +static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == + (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); constexpr auto KMemoryRegionType_DramPoolManagement = KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( - KMemoryRegionAttr::CarveoutProtected); + KMemoryRegionAttr_CarveoutProtected); constexpr auto KMemoryRegionType_DramUserPool = KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); -static_assert(static_cast(KMemoryRegionType_DramPoolManagement.GetValue()) == - (static_cast(0x166) | KMemoryRegionAttr::LinearMapped | - KMemoryRegionAttr::NoUserMap | KMemoryRegionAttr::CarveoutProtected)); -static_assert(static_cast(KMemoryRegionType_DramUserPool.GetValue()) == - (static_cast(0x1A6) | KMemoryRegionAttr::LinearMapped | - KMemoryRegionAttr::NoUserMap)); +static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == + (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | + KMemoryRegionAttr_CarveoutProtected)); +static_assert(KMemoryRegionType_DramUserPool.GetValue() == + (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0); constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1); constexpr auto KMemoryRegionType_DramSystemNonSecurePool = KMemoryRegionType_DramUserPool.Derive(4, 2); constexpr auto KMemoryRegionType_DramSystemPool = - KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr::CarveoutProtected); -static_assert(static_cast(KMemoryRegionType_DramApplicationPool.GetValue()) == - (static_cast(0x7A6) | KMemoryRegionAttr::LinearMapped | - KMemoryRegionAttr::NoUserMap)); -static_assert(static_cast(KMemoryRegionType_DramAppletPool.GetValue()) == - (static_cast(0xBA6) | KMemoryRegionAttr::LinearMapped | - KMemoryRegionAttr::NoUserMap)); -static_assert( - static_cast(KMemoryRegionType_DramSystemNonSecurePool.GetValue()) == - (static_cast(0xDA6) | KMemoryRegionAttr::LinearMapped | - KMemoryRegionAttr::NoUserMap)); -static_assert(static_cast(KMemoryRegionType_DramSystemPool.GetValue()) == - (static_cast(0x13A6) | KMemoryRegionAttr::LinearMapped | - KMemoryRegionAttr::NoUserMap | KMemoryRegionAttr::CarveoutProtected)); + KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); +static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == + (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); +static_assert(KMemoryRegionType_DramAppletPool.GetValue() == + (0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); +static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == + (0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); +static_assert(KMemoryRegionType_DramSystemPool.GetValue() == + (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | + KMemoryRegionAttr_CarveoutProtected)); constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap = @@ -284,18 +276,18 @@ constexpr auto KMemoryRegionType_BoardDeviceBase = static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); -#if defined(ATMOSPHERE_ARCH_ARM64) -#include -#elif defined(ATMOSPHERE_ARCH_ARM) -#include +#if defined(ARCH_ARM64) +#include "core/hle/kernel/arch/arm64/k_memory_region_device_types.inc" +#elif defined(ARCH_ARM) +#error "Unimplemented"" #else // Default to no architecture devices. constexpr auto NumArchitectureDeviceRegions = 0; #endif static_assert(NumArchitectureDeviceRegions >= 0); -#if defined(ATMOSPHERE_BOARD_NINTENDO_NX) -#include +#if defined(BOARD_NINTENDO_NX) +#include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc" #else // Default to no board devices. constexpr auto NumBoardDeviceRegions = 0; -- cgit v1.2.3 From 8d0ba7ee494af17c85ef2d617db5f2a7acaa37cf Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 20 Mar 2021 00:43:00 -0700 Subject: common: common_sizes: Move Invalid to Size_* prefix and add missing values. --- src/core/hle/kernel/k_address_space_info.cpp | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp index eb8af660c..89906954d 100644 --- a/src/core/hle/kernel/k_address_space_info.cpp +++ b/src/core/hle/kernel/k_address_space_info.cpp @@ -14,24 +14,24 @@ namespace { // clang-format off constexpr std::array AddressSpaceInfos{{ - { .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, }, - { .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, - { .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, }, - { .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, }, - { .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, }, - { .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, - { .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, - { .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, }, - { .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, }, - { .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall }, - { .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, - { .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, }, - { .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, }, + { .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, }, + { .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, + { .bit_width = 32, .address = Size_Invalid, .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, }, + { .bit_width = 32, .address = Size_Invalid, .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, }, + { .bit_width = 36, .address = Size_128_MB , .size = Size_2_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, }, + { .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, + { .bit_width = 36, .address = Size_Invalid, .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, + { .bit_width = 36, .address = Size_Invalid, .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, }, + { .bit_width = 39, .address = Size_128_MB , .size = Size_512_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, }, + { .bit_width = 39, .address = Size_Invalid, .size = Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall }, + { .bit_width = 39, .address = Size_Invalid, .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, + { .bit_width = 39, .address = Size_Invalid, .size = Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, }, + { .bit_width = 39, .address = Size_Invalid, .size = Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, }, }}; // clang-format on constexpr bool IsAllowedIndexForAddress(std::size_t index) { - return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Invalid; + return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Size_Invalid; } using IndexArray = -- cgit v1.2.3 From 343eaecd388dc688cea18d754ad2e10f741d0a2e Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 20 Mar 2021 00:53:00 -0700 Subject: hle: kernel: k_memory_layout: Derive memory regions based on board layout. --- .../kernel/k_memory_layout.board.nintendo_nx.cpp | 199 +++++++++++ src/core/hle/kernel/k_memory_layout.cpp | 183 ++++++++++ src/core/hle/kernel/k_memory_layout.h | 384 +++++++++++++++++++-- src/core/hle/kernel/kernel.cpp | 319 +++++++++++++++-- src/core/hle/kernel/kernel.h | 2 +- 5 files changed, 1031 insertions(+), 56 deletions(-) create mode 100644 src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp create mode 100644 src/core/hle/kernel/k_memory_layout.cpp (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp b/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp new file mode 100644 index 000000000..a78551291 --- /dev/null +++ b/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp @@ -0,0 +1,199 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/alignment.h" +#include "core/hle/kernel/k_memory_layout.h" +#include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/kernel/k_system_control.h" +#include "core/hle/kernel/k_trace.h" + +namespace Kernel { + +namespace { + +constexpr size_t CarveoutAlignment = 0x20000; +constexpr size_t CarveoutSizeMax = (512ULL * 1024 * 1024) - CarveoutAlignment; + +bool SetupPowerManagementControllerMemoryRegion(KMemoryLayout& memory_layout) { + // Above firmware 2.0.0, the PMC is not mappable. + return memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap) && + memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x7000E400, 0xC00, + KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap); +} + +void InsertPoolPartitionRegionIntoBothTrees(KMemoryLayout& memory_layout, size_t start, size_t size, + KMemoryRegionType phys_type, + KMemoryRegionType virt_type, u32& cur_attr) { + const u32 attr = cur_attr++; + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(start, size, + static_cast(phys_type), attr)); + const KMemoryRegion* phys = memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute( + static_cast(phys_type), attr); + ASSERT(phys != nullptr); + ASSERT(phys->GetEndAddress() != 0); + ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(phys->GetPairAddress(), size, + static_cast(virt_type), attr)); +} + +} // namespace + +namespace Init { + +void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) { + ASSERT(SetupPowerManagementControllerMemoryRegion(memory_layout)); + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x70019000, 0x1000, KMemoryRegionType_MemoryController | KMemoryRegionAttr_NoUserMap)); + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x7001C000, 0x1000, KMemoryRegionType_MemoryController0 | KMemoryRegionAttr_NoUserMap)); + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x7001D000, 0x1000, KMemoryRegionType_MemoryController1 | KMemoryRegionAttr_NoUserMap)); + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x50041000, 0x1000, + KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap)); + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x50042000, 0x1000, + KMemoryRegionType_InterruptCpuInterface | KMemoryRegionAttr_ShouldKernelMap)); + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + + // Map IRAM unconditionally, to support debug-logging-to-iram build config. + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x40000000, 0x40000, KMemoryRegionType_LegacyLpsIram | KMemoryRegionAttr_ShouldKernelMap)); + + // Above firmware 2.0.0, prevent mapping the bpmp exception vectors or the ipatch region. + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + 0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); +} + +void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) { + const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize(); + const PAddr physical_memory_base_address = + KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress); + + // Insert blocks into the tree. + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram)); + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly)); + + // Insert the KTrace block at the end of Dram, if KTrace is enabled. + static_assert(!IsKTraceEnabled || KTraceBufferSize > 0); + if constexpr (IsKTraceEnabled) { + const PAddr ktrace_buffer_phys_addr = + physical_memory_base_address + intended_memory_size - KTraceBufferSize; + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer)); + } +} + +void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout) { + // Start by identifying the extents of the DRAM memory region. + const auto dram_extents = memory_layout.GetMainMemoryPhysicalExtents(); + ASSERT(dram_extents.GetEndAddress() != 0); + + // Determine the end of the pool region. + const u64 pool_end = dram_extents.GetEndAddress() - KTraceBufferSize; + + // Find the start of the kernel DRAM region. + const KMemoryRegion* kernel_dram_region = + memory_layout.GetPhysicalMemoryRegionTree().FindFirstDerived( + KMemoryRegionType_DramKernelBase); + ASSERT(kernel_dram_region != nullptr); + + const u64 kernel_dram_start = kernel_dram_region->GetAddress(); + ASSERT(Common::IsAligned(kernel_dram_start, CarveoutAlignment)); + + // Find the start of the pool partitions region. + const KMemoryRegion* pool_partitions_region = + memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute( + KMemoryRegionType_DramPoolPartition, 0); + ASSERT(pool_partitions_region != nullptr); + const u64 pool_partitions_start = pool_partitions_region->GetAddress(); + + // Setup the pool partition layouts. + // On 5.0.0+, setup modern 4-pool-partition layout. + + // Get Application and Applet pool sizes. + const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize(); + const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize(); + const size_t unsafe_system_pool_min_size = + KSystemControl::Init::GetMinimumNonSecureSystemPoolSize(); + + // Decide on starting addresses for our pools. + const u64 application_pool_start = pool_end - application_pool_size; + const u64 applet_pool_start = application_pool_start - applet_pool_size; + const u64 unsafe_system_pool_start = std::min( + kernel_dram_start + CarveoutSizeMax, + Common::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment)); + const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start; + + // We want to arrange application pool depending on where the middle of dram is. + const u64 dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2; + u32 cur_pool_attr = 0; + size_t total_overhead_size = 0; + if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) { + InsertPoolPartitionRegionIntoBothTrees( + memory_layout, application_pool_start, application_pool_size, + KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, + cur_pool_attr); + total_overhead_size += + KMemoryManager::CalculateManagementOverheadSize(application_pool_size); + } else { + const size_t first_application_pool_size = dram_midpoint - application_pool_start; + const size_t second_application_pool_size = + application_pool_start + application_pool_size - dram_midpoint; + InsertPoolPartitionRegionIntoBothTrees( + memory_layout, application_pool_start, first_application_pool_size, + KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, + cur_pool_attr); + InsertPoolPartitionRegionIntoBothTrees( + memory_layout, dram_midpoint, second_application_pool_size, + KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, + cur_pool_attr); + total_overhead_size += + KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size); + total_overhead_size += + KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size); + } + + // Insert the applet pool. + InsertPoolPartitionRegionIntoBothTrees(memory_layout, applet_pool_start, applet_pool_size, + KMemoryRegionType_DramAppletPool, + KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size); + + // Insert the nonsecure system pool. + InsertPoolPartitionRegionIntoBothTrees( + memory_layout, unsafe_system_pool_start, unsafe_system_pool_size, + KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, + cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size); + + // Insert the pool management region. + total_overhead_size += KMemoryManager::CalculateManagementOverheadSize( + (unsafe_system_pool_start - pool_partitions_start) - total_overhead_size); + const u64 pool_management_start = unsafe_system_pool_start - total_overhead_size; + const size_t pool_management_size = total_overhead_size; + u32 pool_management_attr = 0; + InsertPoolPartitionRegionIntoBothTrees( + memory_layout, pool_management_start, pool_management_size, + KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, + pool_management_attr); + + // Insert the system pool. + const u64 system_pool_size = pool_management_start - pool_partitions_start; + InsertPoolPartitionRegionIntoBothTrees(memory_layout, pool_partitions_start, system_pool_size, + KMemoryRegionType_DramSystemPool, + KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr); +} + +} // namespace Init + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp new file mode 100644 index 000000000..58fe4a133 --- /dev/null +++ b/src/core/hle/kernel/k_memory_layout.cpp @@ -0,0 +1,183 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/alignment.h" +#include "core/hle/kernel/k_memory_layout.h" +#include "core/hle/kernel/k_system_control.h" + +namespace Kernel { + +namespace { + +class KMemoryRegionAllocator final : NonCopyable { +public: + static constexpr size_t MaxMemoryRegions = 200; + +private: + KMemoryRegion region_heap[MaxMemoryRegions]{}; + size_t num_regions{}; + +public: + constexpr KMemoryRegionAllocator() = default; + +public: + template + KMemoryRegion* Allocate(Args&&... args) { + // Ensure we stay within the bounds of our heap. + ASSERT(this->num_regions < MaxMemoryRegions); + + // Create the new region. + KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]); + new (region) KMemoryRegion(std::forward(args)...); + + return region; + } +}; + +KMemoryRegionAllocator g_memory_region_allocator; + +template +KMemoryRegion* AllocateRegion(Args&&... args) { + return g_memory_region_allocator.Allocate(std::forward(args)...); +} + +} // namespace + +void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) { + this->insert(*AllocateRegion(address, last_address, attr, type_id)); +} + +bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { + // Locate the memory region that contains the address. + KMemoryRegion* found = this->FindModifiable(address); + + // We require that the old attr is correct. + if (found->GetAttributes() != old_attr) { + return false; + } + + // We further require that the region can be split from the old region. + const u64 inserted_region_end = address + size; + const u64 inserted_region_last = inserted_region_end - 1; + if (found->GetLastAddress() < inserted_region_last) { + return false; + } + + // Further, we require that the type id is a valid transformation. + if (!found->CanDerive(type_id)) { + return false; + } + + // Cache information from the region before we remove it. + const u64 old_address = found->GetAddress(); + const u64 old_last = found->GetLastAddress(); + const u64 old_pair = found->GetPairAddress(); + const u32 old_type = found->GetType(); + + // Erase the existing region from the tree. + this->erase(this->iterator_to(*found)); + + // Insert the new region into the tree. + if (old_address == address) { + // Reuse the old object for the new region, if we can. + found->Reset(address, inserted_region_last, old_pair, new_attr, type_id); + this->insert(*found); + } else { + // If we can't re-use, adjust the old region. + found->Reset(old_address, address - 1, old_pair, old_attr, old_type); + this->insert(*found); + + // Insert a new region for the split. + const u64 new_pair = (old_pair != std::numeric_limits::max()) + ? old_pair + (address - old_address) + : old_pair; + this->insert(*AllocateRegion(address, inserted_region_last, new_pair, new_attr, type_id)); + } + + // If we need to insert a region after the region, do so. + if (old_last != inserted_region_last) { + const u64 after_pair = (old_pair != std::numeric_limits::max()) + ? old_pair + (inserted_region_end - old_address) + : old_pair; + this->insert( + *AllocateRegion(inserted_region_end, old_last, after_pair, old_attr, old_type)); + } + + return true; +} + +VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) { + // We want to find the total extents of the type id. + const auto extents = this->GetDerivedRegionExtents(static_cast(type_id)); + + // Ensure that our alignment is correct. + ASSERT(Common::IsAligned(extents.GetAddress(), alignment)); + + const u64 first_address = extents.GetAddress(); + const u64 last_address = extents.GetLastAddress(); + + const u64 first_index = first_address / alignment; + const u64 last_index = last_address / alignment; + + while (true) { + const u64 candidate = + KSystemControl::GenerateRandomRange(first_index, last_index) * alignment; + + // Ensure that the candidate doesn't overflow with the size. + if (!(candidate < candidate + size)) { + continue; + } + + const u64 candidate_last = candidate + size - 1; + + // Ensure that the candidate fits within the region. + if (candidate_last > last_address) { + continue; + } + + // Locate the candidate region, and ensure it fits and has the correct type id. + if (const auto& candidate_region = *this->Find(candidate); + !(candidate_last <= candidate_region.GetLastAddress() && + candidate_region.GetType() == type_id)) { + continue; + } + + return candidate; + } +} + +void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, + VAddr linear_virtual_start) { + // Set static differences. + linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; + linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; + + // Initialize linear trees. + for (auto& region : GetPhysicalMemoryRegionTree()) { + if (region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { + GetPhysicalLinearMemoryRegionTree().InsertDirectly( + region.GetAddress(), region.GetLastAddress(), region.GetAttributes(), + region.GetType()); + } + } + + for (auto& region : GetVirtualMemoryRegionTree()) { + if (region.IsDerivedFrom(KMemoryRegionType_Dram)) { + GetVirtualLinearMemoryRegionTree().InsertDirectly( + region.GetAddress(), region.GetLastAddress(), region.GetAttributes(), + region.GetType()); + } + } +} + +size_t KMemoryLayout::GetResourceRegionSizeForInit() { + // Calculate resource region size based on whether we allow extra threads. + const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); + size_t resource_region_size = + KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0); + + return resource_region_size; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index a76ffa02e..f2b46c932 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -1,24 +1,67 @@ -// Copyright 2020 yuzu Emulator Project +// Copyright 2021 yuzu Emulator Project // Licensed under GPLv2 or any later version // Refer to the license.txt file included. #pragma once +#include "common/alignment.h" +#include "common/common_sizes.h" #include "common/common_types.h" #include "core/device_memory.h" #include "core/hle/kernel/k_memory_region.h" +#include "core/hle/kernel/k_memory_region_type.h" +#include "core/hle/kernel/memory_types.h" namespace Kernel { -constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024; +constexpr std::size_t L1BlockSize = Size_1_GB; +constexpr std::size_t L2BlockSize = Size_2_MB; + +constexpr std::size_t GetMaximumOverheadSize(std::size_t size) { + return (Common::DivideUp(size, L1BlockSize) + Common::DivideUp(size, L2BlockSize)) * PageSize; +} + +constexpr std::size_t MainMemorySize = Size_4_GB; +constexpr std::size_t MainMemorySizeMax = Size_8_GB; + +constexpr std::size_t ReservedEarlyDramSize = 0x60000; +constexpr std::size_t DramPhysicalAddress = 0x80000000; + +constexpr std::size_t KernelAslrAlignment = Size_2_MB; constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39; constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48; + constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth; constexpr std::size_t KernelVirtualAddressSpaceEnd = KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment); -constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1; +constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ULL; constexpr std::size_t KernelVirtualAddressSpaceSize = KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase; +constexpr std::size_t KernelVirtualAddressCodeBase = KernelVirtualAddressSpaceBase; +constexpr std::size_t KernelVirtualAddressCodeSize = 0x62000; +constexpr std::size_t KernelVirtualAddressCodeEnd = + KernelVirtualAddressCodeBase + KernelVirtualAddressCodeSize; + +constexpr std::size_t KernelPhysicalAddressSpaceBase = 0ULL; +constexpr std::size_t KernelPhysicalAddressSpaceEnd = + KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth; +constexpr std::size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ULL; +constexpr std::size_t KernelPhysicalAddressSpaceSize = + KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase; +constexpr std::size_t KernelPhysicalAddressCodeBase = DramPhysicalAddress + ReservedEarlyDramSize; + +constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemorySizeMax); +constexpr std::size_t KernelInitialPageHeapSize = Size_128_KB; + +constexpr std::size_t KernelSlabHeapDataSize = Size_5_MB; +constexpr std::size_t KernelSlabHeapGapsSize = Size_2_MB - Size_64_KB; +constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize; + +// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. +constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000ULL; + +constexpr std::size_t KernelResourceSize = + KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; constexpr bool IsKernelAddressKey(VAddr key) { return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; @@ -30,41 +73,324 @@ constexpr bool IsKernelAddress(VAddr address) { class KMemoryLayout final { public: - constexpr const KMemoryRegion& Application() const { - return application; + KMemoryLayout() = default; + + KMemoryRegionTree& GetVirtualMemoryRegionTree() { + return virtual_tree; + } + const KMemoryRegionTree& GetVirtualMemoryRegionTree() const { + return virtual_tree; + } + KMemoryRegionTree& GetPhysicalMemoryRegionTree() { + return physical_tree; + } + const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const { + return physical_tree; + } + KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() { + return virtual_linear_tree; + } + const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const { + return virtual_linear_tree; + } + KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() { + return physical_linear_tree; + } + const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const { + return physical_linear_tree; } - constexpr const KMemoryRegion& Applet() const { - return applet; + VAddr GetLinearVirtualAddress(PAddr address) const { + return address + linear_phys_to_virt_diff; + } + PAddr GetLinearPhysicalAddress(VAddr address) const { + return address + linear_virt_to_phys_diff; } - constexpr const KMemoryRegion& System() const { - return system; + const KMemoryRegion* FindVirtual(VAddr address) const { + return Find(address, GetVirtualMemoryRegionTree()); + } + const KMemoryRegion* FindPhysical(PAddr address) const { + return Find(address, GetPhysicalMemoryRegionTree()); } - static constexpr KMemoryLayout GetDefaultLayout() { - constexpr std::size_t application_size{0xcd500000}; - constexpr std::size_t applet_size{0x1fb00000}; - constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size}; - constexpr PAddr application_end_address{Core::DramMemoryMap::End}; - constexpr PAddr applet_start_address{application_start_address - applet_size}; - constexpr PAddr applet_end_address{applet_start_address + applet_size}; - constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd}; - constexpr PAddr system_end_address{applet_start_address}; - return {application_start_address, application_end_address, applet_start_address, - applet_end_address, system_start_address, system_end_address}; + const KMemoryRegion* FindVirtualLinear(VAddr address) const { + return Find(address, GetVirtualLinearMemoryRegionTree()); + } + const KMemoryRegion* FindPhysicalLinear(PAddr address) const { + return Find(address, GetPhysicalLinearMemoryRegionTree()); + } + + VAddr GetMainStackTopAddress(s32 core_id) const { + return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack); + } + VAddr GetIdleStackTopAddress(s32 core_id) const { + return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack); + } + VAddr GetExceptionStackTopAddress(s32 core_id) const { + return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); + } + + VAddr GetSlabRegionAddress() const { + return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)) + .GetAddress(); + } + + const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const { + return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); + } + PAddr GetDevicePhysicalAddress(KMemoryRegionType type) const { + return GetDeviceRegion(type).GetAddress(); + } + VAddr GetDeviceVirtualAddress(KMemoryRegionType type) const { + return GetDeviceRegion(type).GetPairAddress(); + } + + const KMemoryRegion& GetPoolManagementRegion() const { + return Dereference( + GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramPoolManagement)); + } + const KMemoryRegion& GetPageTableHeapRegion() const { + return Dereference( + GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelPtHeap)); + } + const KMemoryRegion& GetKernelStackRegion() const { + return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelStack)); + } + const KMemoryRegion& GetTempRegion() const { + return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelTemp)); + } + + const KMemoryRegion& GetKernelTraceBufferRegion() const { + return Dereference(GetVirtualLinearMemoryRegionTree().FindByType( + KMemoryRegionType_VirtualDramKernelTraceBuffer)); + } + + const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { + return Dereference(FindVirtualLinear(address)); + } + + const KMemoryRegion* GetPhysicalKernelTraceBufferRegion() const { + return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); + } + const KMemoryRegion* GetPhysicalOnMemoryBootImageRegion() const { + return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_OnMemoryBootImage); + } + const KMemoryRegion* GetPhysicalDTBRegion() const { + return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB); + } + + bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address) const { + return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(), + KMemoryRegionType_DramUserPool); + } + bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address) const { + return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(), + KMemoryRegionType_VirtualDramUserPool); + } + + bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address, size_t size) const { + return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(), + KMemoryRegionType_DramUserPool); + } + bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address, size_t size) const { + return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(), + KMemoryRegionType_VirtualDramUserPool); + } + + bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address) const { + return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(), + static_cast(KMemoryRegionAttr_LinearMapped)); + } + bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address, + size_t size) const { + return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(), + static_cast(KMemoryRegionAttr_LinearMapped)); + } + + std::tuple GetTotalAndKernelMemorySizes() const { + size_t total_size = 0, kernel_size = 0; + for (const auto& region : GetPhysicalMemoryRegionTree()) { + if (region.IsDerivedFrom(KMemoryRegionType_Dram)) { + total_size += region.GetSize(); + if (!region.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { + kernel_size += region.GetSize(); + } + } + } + return std::make_tuple(total_size, kernel_size); + } + + void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, + VAddr linear_virtual_start); + static size_t GetResourceRegionSizeForInit(); + + auto GetKernelRegionExtents() const { + return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); + } + auto GetKernelCodeRegionExtents() const { + return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode); + } + auto GetKernelStackRegionExtents() const { + return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelStack); + } + auto GetKernelMiscRegionExtents() const { + return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelMisc); + } + auto GetKernelSlabRegionExtents() const { + return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelSlab); + } + + auto GetLinearRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionAttr_LinearMapped); + } + + auto GetLinearRegionVirtualExtents() const { + const auto physical = GetLinearRegionPhysicalExtents(); + return KMemoryRegion(GetLinearVirtualAddress(physical.GetAddress()), + GetLinearVirtualAddress(physical.GetLastAddress()), 0, + KMemoryRegionType_None); + } + + auto GetMainMemoryPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Dram); + } + auto GetCarveoutRegionExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionAttr_CarveoutProtected); + } + + auto GetKernelRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramKernelBase); + } + auto GetKernelCodeRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramKernelCode); + } + auto GetKernelSlabRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramKernelSlab); + } + auto GetKernelPageTableHeapRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramKernelPtHeap); + } + auto GetKernelInitPageTableRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramKernelInitPt); + } + + auto GetKernelPoolManagementRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramPoolManagement); + } + auto GetKernelPoolPartitionRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramPoolPartition); + } + auto GetKernelSystemPoolRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramSystemPool); + } + auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramSystemNonSecurePool); + } + auto GetKernelAppletPoolRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramAppletPool); + } + auto GetKernelApplicationPoolRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_DramApplicationPool); + } + + auto GetKernelTraceBufferRegionPhysicalExtents() const { + return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionType_KernelTraceBuffer); + } + +private: + template + static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, + const KMemoryRegionTree& tree, KMemoryRegionType type) { + // Check if the cached region already contains the address. + if (region != nullptr && region->Contains(address)) { + return true; + } + + // Find the containing region, and update the cache. + if (const KMemoryRegion* found = tree.Find(address); + found != nullptr && found->IsDerivedFrom(type)) { + region = found; + return true; + } else { + return false; + } + } + + template + static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, size_t size, + const KMemoryRegionTree& tree, KMemoryRegionType type) { + // Get the end of the checked region. + const u64 last_address = address + size - 1; + + // Walk the tree to verify the region is correct. + const KMemoryRegion* cur = + (region != nullptr && region->Contains(address)) ? region : tree.Find(address); + while (cur != nullptr && cur->IsDerivedFrom(type)) { + if (last_address <= cur->GetLastAddress()) { + region = cur; + return true; + } + + cur = cur->GetNext(); + } + return false; + } + + template + static const KMemoryRegion* Find(AddressType address, const KMemoryRegionTree& tree) { + return tree.Find(address); + } + + static KMemoryRegion& Dereference(KMemoryRegion* region) { + ASSERT(region != nullptr); + return *region; + } + + static const KMemoryRegion& Dereference(const KMemoryRegion* region) { + ASSERT(region != nullptr); + return *region; + } + + VAddr GetStackTopAddress(s32 core_id, KMemoryRegionType type) const { + const auto& region = Dereference( + GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast(core_id))); + ASSERT(region.GetEndAddress() != 0); + return region.GetEndAddress(); } private: - constexpr KMemoryLayout(PAddr application_start_address, std::size_t application_size, - PAddr applet_start_address, std::size_t applet_size, - PAddr system_start_address, std::size_t system_size) - : application{application_start_address, application_size}, - applet{applet_start_address, applet_size}, system{system_start_address, system_size} {} - - const KMemoryRegion application; - const KMemoryRegion applet; - const KMemoryRegion system; + u64 linear_phys_to_virt_diff{}; + u64 linear_virt_to_phys_diff{}; + KMemoryRegionTree virtual_tree; + KMemoryRegionTree physical_tree; + KMemoryRegionTree virtual_linear_tree; + KMemoryRegionTree physical_linear_tree; }; +namespace Init { + +// These should be generic, regardless of board. +void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout); + +// These may be implemented in a board-specific manner. +void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout); +void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout); + +} // namespace Init + } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 48916df17..257d4bb83 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -1,4 +1,4 @@ -// Copyright 2014 Citra Emulator Project +// Copyright 2021 yuzu Emulator Project // Licensed under GPLv2 or any later version // Refer to the license.txt file included. @@ -12,6 +12,7 @@ #include #include "common/assert.h" +#include "common/common_sizes.h" #include "common/logging/log.h" #include "common/microprofile.h" #include "common/thread.h" @@ -269,44 +270,310 @@ struct KernelCore::Impl { } void InitializeMemoryLayout() { - // Initialize memory layout - constexpr KMemoryLayout layout{KMemoryLayout::GetDefaultLayout()}; + KMemoryLayout memory_layout; + + // Insert the root region for the virtual memory tree, from which all other regions will + // derive. + memory_layout.GetVirtualMemoryRegionTree().InsertDirectly( + KernelVirtualAddressSpaceBase, + KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1); + + // Insert the root region for the physical memory tree, from which all other regions will + // derive. + memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly( + KernelPhysicalAddressSpaceBase, + KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1); + + // Save start and end for ease of use. + const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase; + const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd; + + // Setup the containing kernel region. + constexpr size_t KernelRegionSize = Size_1_GB; + constexpr size_t KernelRegionAlign = Size_1_GB; + constexpr VAddr kernel_region_start = + Common::AlignDown(code_start_virt_addr, KernelRegionAlign); + size_t kernel_region_size = KernelRegionSize; + if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) { + kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start; + } + ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel)); + + // Setup the code region. + constexpr size_t CodeRegionAlign = PageSize; + constexpr VAddr code_region_start = + Common::AlignDown(code_start_virt_addr, CodeRegionAlign); + constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign); + constexpr size_t code_region_size = code_region_end - code_region_start; + ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + code_region_start, code_region_size, KMemoryRegionType_KernelCode)); + + // Setup board-specific device physical regions. + Init::SetupDevicePhysicalMemoryRegions(memory_layout); + + // Determine the amount of space needed for the misc region. + size_t misc_region_needed_size; + { + // Each core has a one page stack for all three stack types (Main, Idle, Exception). + misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize)); + + // Account for each auto-map device. + for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { + if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { + // Check that the region is valid. + ASSERT(region.GetEndAddress() != 0); + + // Account for the region. + misc_region_needed_size += + PageSize + (Common::AlignUp(region.GetLastAddress(), PageSize) - + Common::AlignDown(region.GetAddress(), PageSize)); + } + } + + // Multiply the needed size by three, to account for the need for guard space. + misc_region_needed_size *= 3; + } + + // Decide on the actual size for the misc region. + constexpr size_t MiscRegionAlign = KernelAslrAlignment; + constexpr size_t MiscRegionMinimumSize = Size_32_MB; + const size_t misc_region_size = Common::AlignUp( + std::max(misc_region_needed_size, MiscRegionMinimumSize), MiscRegionAlign); + ASSERT(misc_region_size > 0); + + // Setup the misc region. + const VAddr misc_region_start = + memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( + misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel); + ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); + + // Setup the stack region. + constexpr size_t StackRegionSize = Size_14_MB; + constexpr size_t StackRegionAlign = KernelAslrAlignment; + const VAddr stack_region_start = + memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( + StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); + ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); + + // Determine the size of the resource region. + const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); + + // Determine the size of the slab region. + const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize); + ASSERT(slab_region_size <= resource_region_size); + + // Setup the slab region. + const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase; + const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size; + const PAddr slab_start_phys_addr = code_end_phys_addr; + const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size; + constexpr size_t SlabRegionAlign = KernelAslrAlignment; + const size_t slab_region_needed_size = + Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) - + Common::AlignDown(code_end_phys_addr, SlabRegionAlign); + const VAddr slab_region_start = + memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( + slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + + (code_end_phys_addr % SlabRegionAlign); + ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab)); + + // Setup the temp region. + constexpr size_t TempRegionSize = Size_128_MB; + constexpr size_t TempRegionAlign = KernelAslrAlignment; + const VAddr temp_region_start = + memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( + TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); + ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize, + KMemoryRegionType_KernelTemp)); + + // Automatically map in devices that have auto-map attributes. + for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { + // We only care about kernel regions. + if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) { + continue; + } + + // Check whether we should map the region. + if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { + continue; + } + + // If this region has already been mapped, no need to consider it. + if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) { + continue; + } + + // Check that the region is valid. + ASSERT(region.GetEndAddress() != 0); + + // Set the attribute to note we've mapped this region. + region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap); + + // Create a virtual pair region and insert it into the tree. + const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize); + const size_t map_size = + Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr; + const VAddr map_virt_addr = + memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( + map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); + ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice)); + region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr); + } + + Init::SetupDramPhysicalMemoryRegions(memory_layout); + + // Insert a physical region for the kernel code region. + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode)); + + // Insert a physical region for the kernel slab region. + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab)); + + // Determine size available for kernel page table heaps, requiring > 8 MB. + const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size; + const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr; + ASSERT(page_table_heap_size / Size_4_MB > 2); + + // Insert a physical region for the kernel page table heap region + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); + + // All DRAM regions that we haven't tagged by this point will be mapped under the linear + // mapping. Tag them. + for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { + if (region.GetType() == KMemoryRegionType_Dram) { + // Check that the region is valid. + ASSERT(region.GetEndAddress() != 0); + + // Set the linear map attribute. + region.SetTypeAttribute(KMemoryRegionAttr_LinearMapped); + } + } + + // Get the linear region extents. + const auto linear_extents = + memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + KMemoryRegionAttr_LinearMapped); + ASSERT(linear_extents.GetEndAddress() != 0); + + // Setup the linear mapping region. + constexpr size_t LinearRegionAlign = Size_1_GB; + const PAddr aligned_linear_phys_start = + Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign); + const size_t linear_region_size = + Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - + aligned_linear_phys_start; + const VAddr linear_region_start = + memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( + linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); + + const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start; + + // Map and create regions for all the linearly-mapped data. + { + PAddr cur_phys_addr = 0; + u64 cur_size = 0; + for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { + if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { + continue; + } + + ASSERT(region.GetEndAddress() != 0); + + if (cur_size == 0) { + cur_phys_addr = region.GetAddress(); + cur_size = region.GetSize(); + } else if (cur_phys_addr + cur_size == region.GetAddress()) { + cur_size += region.GetSize(); + } else { + const VAddr cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff; + cur_phys_addr = region.GetAddress(); + cur_size = region.GetSize(); + } + + const VAddr region_virt_addr = + region.GetAddress() + linear_region_phys_to_virt_diff; + ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + region_virt_addr, region.GetSize(), + GetTypeForVirtualLinearMapping(region.GetType()))); + region.SetPairAddress(region_virt_addr); + + KMemoryRegion* virt_region = + memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); + ASSERT(virt_region != nullptr); + virt_region->SetPairAddress(region.GetAddress()); + } + } + + // Insert regions for the initial page table region. + ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt)); + ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize, + KMemoryRegionType_VirtualDramKernelInitPt)); + + // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to + // some pool partition. Tag them. + for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { + if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) { + region.SetType(KMemoryRegionType_DramPoolPartition); + } + } + + // Setup all other memory regions needed to arrange the pool partitions. + Init::SetupPoolPartitionMemoryRegions(memory_layout); + + // Cache all linear regions in their own trees for faster access, later. + memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, + linear_region_start); + + const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents(); + const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents(); + const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents(); + + // Initialize memory managers + memory_manager = std::make_unique(); + memory_manager->InitializeManager(KMemoryManager::Pool::Application, + application_pool.GetAddress(), + application_pool.GetEndAddress()); + memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(), + applet_pool.GetEndAddress()); + memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(), + system_pool.GetEndAddress()); + + // Setup memory regions for emulated processes + // TODO(bunnei): These should not be hardcoded regions initialized within the kernel constexpr std::size_t hid_size{0x40000}; constexpr std::size_t font_size{0x1100000}; constexpr std::size_t irs_size{0x8000}; constexpr std::size_t time_size{0x1000}; - constexpr PAddr hid_addr{layout.System().GetAddress()}; - constexpr PAddr font_pa{layout.System().GetAddress() + hid_size}; - constexpr PAddr irs_addr{layout.System().GetAddress() + hid_size + font_size}; - constexpr PAddr time_addr{layout.System().GetAddress() + hid_size + font_size + irs_size}; - // Initialize memory manager - memory_manager = std::make_unique(); - memory_manager->InitializeManager(KMemoryManager::Pool::Application, - layout.Application().GetAddress(), - layout.Application().GetLastAddress()); - memory_manager->InitializeManager(KMemoryManager::Pool::Applet, - layout.Applet().GetAddress(), - layout.Applet().GetLastAddress()); - memory_manager->InitializeManager(KMemoryManager::Pool::System, - layout.System().GetAddress(), - layout.System().GetLastAddress()); + const PAddr hid_phys_addr{system_pool.GetAddress()}; + const PAddr font_phys_addr{system_pool.GetAddress() + hid_size}; + const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size}; + const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size}; hid_shared_mem = Kernel::KSharedMemory::Create( - system.Kernel(), system.DeviceMemory(), nullptr, {hid_addr, hid_size / PageSize}, - KMemoryPermission::None, KMemoryPermission::Read, hid_addr, hid_size, + system.Kernel(), system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize}, + KMemoryPermission::None, KMemoryPermission::Read, hid_phys_addr, hid_size, "HID:SharedMemory"); font_shared_mem = Kernel::KSharedMemory::Create( - system.Kernel(), system.DeviceMemory(), nullptr, {font_pa, font_size / PageSize}, - KMemoryPermission::None, KMemoryPermission::Read, font_pa, font_size, + system.Kernel(), system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize}, + KMemoryPermission::None, KMemoryPermission::Read, font_phys_addr, font_size, "Font:SharedMemory"); irs_shared_mem = Kernel::KSharedMemory::Create( - system.Kernel(), system.DeviceMemory(), nullptr, {irs_addr, irs_size / PageSize}, - KMemoryPermission::None, KMemoryPermission::Read, irs_addr, irs_size, + system.Kernel(), system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize}, + KMemoryPermission::None, KMemoryPermission::Read, irs_phys_addr, irs_size, "IRS:SharedMemory"); time_shared_mem = Kernel::KSharedMemory::Create( - system.Kernel(), system.DeviceMemory(), nullptr, {time_addr, time_size / PageSize}, - KMemoryPermission::None, KMemoryPermission::Read, time_addr, time_size, + system.Kernel(), system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize}, + KMemoryPermission::None, KMemoryPermission::Read, time_phys_addr, time_size, "Time:SharedMemory"); // Allocate slab heaps diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 56906f2da..a500e63bc 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -1,4 +1,4 @@ -// Copyright 2014 Citra Emulator Project / PPSSPP Project +// Copyright 2021 yuzu Emulator Project // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -- cgit v1.2.3 From 1996cae9cbcddadeb42db571e9c491efd91374b9 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 21 Mar 2021 14:36:26 -0700 Subject: hle: kernel: k_memory_layout: Move KMemoryRegionAllocator out of global. --- src/core/hle/kernel/k_memory_layout.cpp | 49 +++++++++++---------------------- src/core/hle/kernel/k_memory_layout.h | 3 +- src/core/hle/kernel/k_memory_region.h | 30 +++++++++++++++++++- 3 files changed, 47 insertions(+), 35 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp index 58fe4a133..fb1e2435f 100644 --- a/src/core/hle/kernel/k_memory_layout.cpp +++ b/src/core/hle/kernel/k_memory_layout.cpp @@ -2,6 +2,8 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include + #include "common/alignment.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_system_control.h" @@ -10,42 +12,18 @@ namespace Kernel { namespace { -class KMemoryRegionAllocator final : NonCopyable { -public: - static constexpr size_t MaxMemoryRegions = 200; - -private: - KMemoryRegion region_heap[MaxMemoryRegions]{}; - size_t num_regions{}; - -public: - constexpr KMemoryRegionAllocator() = default; - -public: - template - KMemoryRegion* Allocate(Args&&... args) { - // Ensure we stay within the bounds of our heap. - ASSERT(this->num_regions < MaxMemoryRegions); - - // Create the new region. - KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]); - new (region) KMemoryRegion(std::forward(args)...); - - return region; - } -}; - -KMemoryRegionAllocator g_memory_region_allocator; - template -KMemoryRegion* AllocateRegion(Args&&... args) { - return g_memory_region_allocator.Allocate(std::forward(args)...); +KMemoryRegion* AllocateRegion(KMemoryRegionAllocator& memory_region_allocator, Args&&... args) { + return memory_region_allocator.Allocate(std::forward(args)...); } } // namespace +KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_) + : memory_region_allocator{memory_region_allocator_} {} + void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) { - this->insert(*AllocateRegion(address, last_address, attr, type_id)); + this->insert(*AllocateRegion(memory_region_allocator, address, last_address, attr, type_id)); } bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { @@ -92,7 +70,8 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at const u64 new_pair = (old_pair != std::numeric_limits::max()) ? old_pair + (address - old_address) : old_pair; - this->insert(*AllocateRegion(address, inserted_region_last, new_pair, new_attr, type_id)); + this->insert(*AllocateRegion(memory_region_allocator, address, inserted_region_last, + new_pair, new_attr, type_id)); } // If we need to insert a region after the region, do so. @@ -100,8 +79,8 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at const u64 after_pair = (old_pair != std::numeric_limits::max()) ? old_pair + (inserted_region_end - old_address) : old_pair; - this->insert( - *AllocateRegion(inserted_region_end, old_last, after_pair, old_attr, old_type)); + this->insert(*AllocateRegion(memory_region_allocator, inserted_region_end, old_last, + after_pair, old_attr, old_type)); } return true; @@ -147,6 +126,10 @@ VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u } } +KMemoryLayout::KMemoryLayout() + : virtual_tree{memory_region_allocator}, physical_tree{memory_region_allocator}, + virtual_linear_tree{memory_region_allocator}, physical_linear_tree{memory_region_allocator} {} + void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, VAddr linear_virtual_start) { // Set static differences. diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index f2b46c932..b3e057326 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -73,7 +73,7 @@ constexpr bool IsKernelAddress(VAddr address) { class KMemoryLayout final { public: - KMemoryLayout() = default; + KMemoryLayout(); KMemoryRegionTree& GetVirtualMemoryRegionTree() { return virtual_tree; @@ -376,6 +376,7 @@ private: private: u64 linear_phys_to_virt_diff{}; u64 linear_virt_to_phys_diff{}; + KMemoryRegionAllocator memory_region_allocator; KMemoryRegionTree virtual_tree; KMemoryRegionTree physical_tree; KMemoryRegionTree virtual_linear_tree; diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h index 1d4fcde6f..374b24bd3 100644 --- a/src/core/hle/kernel/k_memory_region.h +++ b/src/core/hle/kernel/k_memory_region.h @@ -11,6 +11,8 @@ namespace Kernel { +class KMemoryRegionAllocator; + class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode, NonCopyable { friend class KMemoryRegionTree; @@ -155,9 +157,10 @@ public: private: TreeType m_tree{}; + KMemoryRegionAllocator& memory_region_allocator; public: - constexpr KMemoryRegionTree() = default; + KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_); public: KMemoryRegion* FindModifiable(u64 address) { @@ -321,4 +324,29 @@ public: } }; +class KMemoryRegionAllocator final : NonCopyable { +public: + static constexpr size_t MaxMemoryRegions = 200; + +private: + std::array region_heap{}; + size_t num_regions{}; + +public: + constexpr KMemoryRegionAllocator() = default; + +public: + template + KMemoryRegion* Allocate(Args&&... args) { + // Ensure we stay within the bounds of our heap. + ASSERT(this->num_regions < MaxMemoryRegions); + + // Create the new region. + KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]); + new (region) KMemoryRegion(std::forward(args)...); + + return region; + } +}; + } // namespace Kernel -- cgit v1.2.3 From fc5205fc84bf989b0d4b87f06d084942b7e04cea Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 21 Mar 2021 15:47:05 -0700 Subject: hle: kernel: k_memory_region_type: Remove extra ". --- src/core/hle/kernel/k_memory_region_type.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index d83e80793..cf2653d28 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h @@ -279,7 +279,7 @@ static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); #if defined(ARCH_ARM64) #include "core/hle/kernel/arch/arm64/k_memory_region_device_types.inc" #elif defined(ARCH_ARM) -#error "Unimplemented"" +#error "Unimplemented" #else // Default to no architecture devices. constexpr auto NumArchitectureDeviceRegions = 0; -- cgit v1.2.3 From 3ffbe50e7d925d309632302a710fbc2161165269 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 21 Mar 2021 15:47:24 -0700 Subject: hle: kernel: Remove unused variable. --- src/core/hle/kernel/kernel.cpp | 1 - 1 file changed, 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 257d4bb83..9360710de 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -492,7 +492,6 @@ struct KernelCore::Impl { } else if (cur_phys_addr + cur_size == region.GetAddress()) { cur_size += region.GetSize(); } else { - const VAddr cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff; cur_phys_addr = region.GetAddress(); cur_size = region.GetSize(); } -- cgit v1.2.3 From 1d78190843e64577810bfd8a33b80256de28f2bc Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 21 Mar 2021 15:53:21 -0700 Subject: hle: kernel: Merge KMemoryRegionAttr and KMemoryRegionType. - Fixes clang errors with mixed enum arithmetic. --- src/core/hle/kernel/k_memory_region.h | 6 +++--- src/core/hle/kernel/k_memory_region_type.h | 14 ++++++-------- 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h index 374b24bd3..782c21fbf 100644 --- a/src/core/hle/kernel/k_memory_region.h +++ b/src/core/hle/kernel/k_memory_region.h @@ -91,7 +91,7 @@ public: return (this->GetType() | type) == this->GetType(); } - constexpr bool HasTypeAttribute(KMemoryRegionAttr attr) const { + constexpr bool HasTypeAttribute(u32 attr) const { return (this->GetType() | attr) == this->GetType(); } @@ -103,7 +103,7 @@ public: pair_address = a; } - constexpr void SetTypeAttribute(KMemoryRegionAttr attr) { + constexpr void SetTypeAttribute(u32 attr) { type_id |= attr; } @@ -237,7 +237,7 @@ public: return extents; } - DerivedRegionExtents GetDerivedRegionExtents(KMemoryRegionAttr type_id) const { + DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const { return GetDerivedRegionExtents(static_cast(type_id)); } diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index cf2653d28..5ea5d2ceb 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h @@ -13,9 +13,7 @@ namespace Kernel { -enum KMemoryRegionType : u32 {}; - -enum KMemoryRegionAttr : typename std::underlying_type::type { +enum KMemoryRegionType : u32 { KMemoryRegionAttr_CarveoutProtected = 0x04000000, KMemoryRegionAttr_DidKernelMap = 0x08000000, KMemoryRegionAttr_ShouldKernelMap = 0x10000000, @@ -23,7 +21,7 @@ enum KMemoryRegionAttr : typename std::underlying_type::type KMemoryRegionAttr_NoUserMap = 0x40000000, KMemoryRegionAttr_LinearMapped = 0x80000000, }; -DECLARE_ENUM_FLAG_OPERATORS(KMemoryRegionAttr); +DECLARE_ENUM_FLAG_OPERATORS(KMemoryRegionType); namespace impl { @@ -82,8 +80,8 @@ public: return *this; } - constexpr KMemoryRegionTypeValue& SetAttribute(KMemoryRegionAttr attr) { - m_value |= static_cast(attr); + constexpr KMemoryRegionTypeValue& SetAttribute(u32 attr) { + m_value |= attr; return *this; } @@ -95,9 +93,9 @@ public: return new_type; } - constexpr KMemoryRegionTypeValue DeriveAttribute(KMemoryRegionAttr attr) const { + constexpr KMemoryRegionTypeValue DeriveAttribute(u32 attr) const { KMemoryRegionTypeValue new_type = *this; - new_type.m_value |= static_cast(attr); + new_type.m_value |= attr; return new_type; } -- cgit v1.2.3 From ab5995c7ae878fcf3b9a0c537f0e37377f206d76 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 23 Mar 2021 18:31:46 -0700 Subject: common: common_sizes: Move sizes to the Common namespace. --- .../kernel/board/nintendo/nx/k_system_control.cpp | 32 +++++++++++----------- src/core/hle/kernel/k_address_space_info.cpp | 29 ++++++++++---------- src/core/hle/kernel/k_memory_layout.h | 16 +++++------ src/core/hle/kernel/kernel.cpp | 14 +++++----- 4 files changed, 46 insertions(+), 45 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp index a48d0c11e..919a6b943 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp @@ -57,11 +57,11 @@ size_t KSystemControl::Init::GetIntendedMemorySize() { switch (GetMemorySizeForInit()) { case Smc::MemorySize_4GB: default: // All invalid modes should go to 4GB. - return Size_4_GB; + return Common::Size_4_GB; case Smc::MemorySize_6GB: - return Size_6_GB; + return Common::Size_6_GB; case Smc::MemorySize_8GB: - return Size_8_GB; + return Common::Size_8_GB; } } @@ -79,17 +79,17 @@ std::size_t KSystemControl::Init::GetApplicationPoolSize() { switch (GetMemoryArrangeForInit()) { case Smc::MemoryArrangement_4GB: default: - return Size_3285_MB; + return Common::Size_3285_MB; case Smc::MemoryArrangement_4GBForAppletDev: - return Size_2048_MB; + return Common::Size_2048_MB; case Smc::MemoryArrangement_4GBForSystemDev: - return Size_3285_MB; + return Common::Size_3285_MB; case Smc::MemoryArrangement_6GB: - return Size_4916_MB; + return Common::Size_4916_MB; case Smc::MemoryArrangement_6GBForAppletDev: - return Size_3285_MB; + return Common::Size_3285_MB; case Smc::MemoryArrangement_8GB: - return Size_4916_MB; + return Common::Size_4916_MB; } }(); @@ -103,22 +103,22 @@ size_t KSystemControl::Init::GetAppletPoolSize() { switch (GetMemoryArrangeForInit()) { case Smc::MemoryArrangement_4GB: default: - return Size_507_MB; + return Common::Size_507_MB; case Smc::MemoryArrangement_4GBForAppletDev: - return Size_1554_MB; + return Common::Size_1554_MB; case Smc::MemoryArrangement_4GBForSystemDev: - return Size_448_MB; + return Common::Size_448_MB; case Smc::MemoryArrangement_6GB: - return Size_562_MB; + return Common::Size_562_MB; case Smc::MemoryArrangement_6GBForAppletDev: - return Size_2193_MB; + return Common::Size_2193_MB; case Smc::MemoryArrangement_8GB: - return Size_2193_MB; + return Common::Size_2193_MB; } }(); // Return (possibly) adjusted size. - constexpr size_t ExtraSystemMemoryForAtmosphere = Size_33_MB; + constexpr size_t ExtraSystemMemoryForAtmosphere = Common::Size_33_MB; return base_pool_size - ExtraSystemMemoryForAtmosphere - KTraceBufferSize; } diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp index 89906954d..c7549f7a2 100644 --- a/src/core/hle/kernel/k_address_space_info.cpp +++ b/src/core/hle/kernel/k_address_space_info.cpp @@ -14,24 +14,25 @@ namespace { // clang-format off constexpr std::array AddressSpaceInfos{{ - { .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, }, - { .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, - { .bit_width = 32, .address = Size_Invalid, .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, }, - { .bit_width = 32, .address = Size_Invalid, .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, }, - { .bit_width = 36, .address = Size_128_MB , .size = Size_2_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, }, - { .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, - { .bit_width = 36, .address = Size_Invalid, .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, - { .bit_width = 36, .address = Size_Invalid, .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, }, - { .bit_width = 39, .address = Size_128_MB , .size = Size_512_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, }, - { .bit_width = 39, .address = Size_Invalid, .size = Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall }, - { .bit_width = 39, .address = Size_Invalid, .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, - { .bit_width = 39, .address = Size_Invalid, .size = Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, }, - { .bit_width = 39, .address = Size_Invalid, .size = Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, }, + { .bit_width = 32, .address = Common::Size_2_MB , .size = Common::Size_1_GB - Common::Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, }, + { .bit_width = 32, .address = Common::Size_1_GB , .size = Common::Size_4_GB - Common::Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, + { .bit_width = 32, .address = Common::Size_Invalid, .size = Common::Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, }, + { .bit_width = 32, .address = Common::Size_Invalid, .size = Common::Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, }, + { .bit_width = 36, .address = Common::Size_128_MB , .size = Common::Size_2_GB - Common::Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, }, + { .bit_width = 36, .address = Common::Size_2_GB , .size = Common::Size_64_GB - Common::Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, + { .bit_width = 36, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, + { .bit_width = 36, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, }, + { .bit_width = 39, .address = Common::Size_128_MB , .size = Common::Size_512_GB - Common::Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, }, + { .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall }, + { .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, + { .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, }, + { .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, }, }}; // clang-format on constexpr bool IsAllowedIndexForAddress(std::size_t index) { - return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Size_Invalid; + return index < AddressSpaceInfos.size() && + AddressSpaceInfos[index].address != Common::Size_Invalid; } using IndexArray = diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index b3e057326..590dc449a 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -14,20 +14,20 @@ namespace Kernel { -constexpr std::size_t L1BlockSize = Size_1_GB; -constexpr std::size_t L2BlockSize = Size_2_MB; +constexpr std::size_t L1BlockSize = Common::Size_1_GB; +constexpr std::size_t L2BlockSize = Common::Size_2_MB; constexpr std::size_t GetMaximumOverheadSize(std::size_t size) { return (Common::DivideUp(size, L1BlockSize) + Common::DivideUp(size, L2BlockSize)) * PageSize; } -constexpr std::size_t MainMemorySize = Size_4_GB; -constexpr std::size_t MainMemorySizeMax = Size_8_GB; +constexpr std::size_t MainMemorySize = Common::Size_4_GB; +constexpr std::size_t MainMemorySizeMax = Common::Size_8_GB; constexpr std::size_t ReservedEarlyDramSize = 0x60000; constexpr std::size_t DramPhysicalAddress = 0x80000000; -constexpr std::size_t KernelAslrAlignment = Size_2_MB; +constexpr std::size_t KernelAslrAlignment = Common::Size_2_MB; constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39; constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48; @@ -51,10 +51,10 @@ constexpr std::size_t KernelPhysicalAddressSpaceSize = constexpr std::size_t KernelPhysicalAddressCodeBase = DramPhysicalAddress + ReservedEarlyDramSize; constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemorySizeMax); -constexpr std::size_t KernelInitialPageHeapSize = Size_128_KB; +constexpr std::size_t KernelInitialPageHeapSize = Common::Size_128_KB; -constexpr std::size_t KernelSlabHeapDataSize = Size_5_MB; -constexpr std::size_t KernelSlabHeapGapsSize = Size_2_MB - Size_64_KB; +constexpr std::size_t KernelSlabHeapDataSize = Common::Size_5_MB; +constexpr std::size_t KernelSlabHeapGapsSize = Common::Size_2_MB - Common::Size_64_KB; constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize; // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 9360710de..e994e8bed 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -289,8 +289,8 @@ struct KernelCore::Impl { const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd; // Setup the containing kernel region. - constexpr size_t KernelRegionSize = Size_1_GB; - constexpr size_t KernelRegionAlign = Size_1_GB; + constexpr size_t KernelRegionSize = Common::Size_1_GB; + constexpr size_t KernelRegionAlign = Common::Size_1_GB; constexpr VAddr kernel_region_start = Common::AlignDown(code_start_virt_addr, KernelRegionAlign); size_t kernel_region_size = KernelRegionSize; @@ -337,7 +337,7 @@ struct KernelCore::Impl { // Decide on the actual size for the misc region. constexpr size_t MiscRegionAlign = KernelAslrAlignment; - constexpr size_t MiscRegionMinimumSize = Size_32_MB; + constexpr size_t MiscRegionMinimumSize = Common::Size_32_MB; const size_t misc_region_size = Common::AlignUp( std::max(misc_region_needed_size, MiscRegionMinimumSize), MiscRegionAlign); ASSERT(misc_region_size > 0); @@ -350,7 +350,7 @@ struct KernelCore::Impl { misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); // Setup the stack region. - constexpr size_t StackRegionSize = Size_14_MB; + constexpr size_t StackRegionSize = Common::Size_14_MB; constexpr size_t StackRegionAlign = KernelAslrAlignment; const VAddr stack_region_start = memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( @@ -382,7 +382,7 @@ struct KernelCore::Impl { slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab)); // Setup the temp region. - constexpr size_t TempRegionSize = Size_128_MB; + constexpr size_t TempRegionSize = Common::Size_128_MB; constexpr size_t TempRegionAlign = KernelAslrAlignment; const VAddr temp_region_start = memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( @@ -438,7 +438,7 @@ struct KernelCore::Impl { // Determine size available for kernel page table heaps, requiring > 8 MB. const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size; const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr; - ASSERT(page_table_heap_size / Size_4_MB > 2); + ASSERT(page_table_heap_size / Common::Size_4_MB > 2); // Insert a physical region for the kernel page table heap region ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( @@ -463,7 +463,7 @@ struct KernelCore::Impl { ASSERT(linear_extents.GetEndAddress() != 0); // Setup the linear mapping region. - constexpr size_t LinearRegionAlign = Size_1_GB; + constexpr size_t LinearRegionAlign = Common::Size_1_GB; const PAddr aligned_linear_phys_start = Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign); const size_t linear_region_size = -- cgit v1.2.3 From a32190d0c2198d66657db33409348f067ada27d3 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 23 Mar 2021 18:33:29 -0700 Subject: hle: kernel: k_system_control: Remove unnecessary inline. --- src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp index 919a6b943..86472b5ce 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp @@ -13,13 +13,13 @@ namespace Kernel::Board::Nintendo::Nx { namespace impl { -constexpr inline const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2238 * 4 * 1024; -constexpr inline const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x710 * 4 * 1024; -constexpr inline const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4 * 1024; +constexpr const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2238 * 4 * 1024; +constexpr const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x710 * 4 * 1024; +constexpr const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4 * 1024; } // namespace impl -constexpr inline const std::size_t RequiredNonSecureSystemMemorySize = +constexpr const std::size_t RequiredNonSecureSystemMemorySize = impl::RequiredNonSecureSystemMemorySizeVi + impl::RequiredNonSecureSystemMemorySizeNvservices + impl::RequiredNonSecureSystemMemorySizeMisc; -- cgit v1.2.3 From 9032d21365638aa6aaf4a2833b1149df6c838820 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 23 Mar 2021 18:35:01 -0700 Subject: hle: kernel: k_memory_layout: Use pair instead of tuple. --- src/core/hle/kernel/k_memory_layout.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 590dc449a..288642d9a 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -4,6 +4,8 @@ #pragma once +#include + #include "common/alignment.h" #include "common/common_sizes.h" #include "common/common_types.h" @@ -208,7 +210,7 @@ public: static_cast(KMemoryRegionAttr_LinearMapped)); } - std::tuple GetTotalAndKernelMemorySizes() const { + std::pair GetTotalAndKernelMemorySizes() const { size_t total_size = 0, kernel_size = 0; for (const auto& region : GetPhysicalMemoryRegionTree()) { if (region.IsDerivedFrom(KMemoryRegionType_Dram)) { @@ -218,7 +220,7 @@ public: } } } - return std::make_tuple(total_size, kernel_size); + return std::make_pair(total_size, kernel_size); } void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, -- cgit v1.2.3 From 4eac8703d23be2b3ddfab36d93608c2398bbaa0a Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 23 Mar 2021 18:37:39 -0700 Subject: hle: kernel: k_memory_region: Minor code cleanup. --- src/core/hle/kernel/k_memory_region.h | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h index 782c21fbf..a861c04ab 100644 --- a/src/core/hle/kernel/k_memory_region.h +++ b/src/core/hle/kernel/k_memory_region.h @@ -160,7 +160,7 @@ private: KMemoryRegionAllocator& memory_region_allocator; public: - KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_); + explicit KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_); public: KMemoryRegion* FindModifiable(u64 address) { @@ -328,14 +328,8 @@ class KMemoryRegionAllocator final : NonCopyable { public: static constexpr size_t MaxMemoryRegions = 200; -private: - std::array region_heap{}; - size_t num_regions{}; - -public: constexpr KMemoryRegionAllocator() = default; -public: template KMemoryRegion* Allocate(Args&&... args) { // Ensure we stay within the bounds of our heap. @@ -347,6 +341,10 @@ public: return region; } + +private: + std::array region_heap{}; + size_t num_regions{}; }; } // namespace Kernel -- cgit v1.2.3 From fb91647bcad788898d90c2eb95a3e1af4304d6d9 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 23 Mar 2021 18:42:04 -0700 Subject: hle: kernel: k_memory_region_type: Minor code cleanup. --- src/core/hle/kernel/k_memory_region_type.h | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index 5ea5d2ceb..a05e66677 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h @@ -41,20 +41,9 @@ constexpr size_t BitsForDeriveDense(size_t n) { } class KMemoryRegionTypeValue { -private: - using ValueType = typename std::underlying_type::type; - -private: - ValueType m_value{}; - size_t m_next_bit{}; - bool m_finalized{}; - bool m_sparse_only{}; - bool m_dense_only{}; - -private: - constexpr KMemoryRegionTypeValue(ValueType v) : m_value(v) {} - public: + using ValueType = std::underlying_type_t; + constexpr KMemoryRegionTypeValue() = default; constexpr operator KMemoryRegionType() const { @@ -139,6 +128,16 @@ public: constexpr bool IsAncestorOf(ValueType v) const { return (m_value | v) == v; } + +private: + constexpr KMemoryRegionTypeValue(ValueType v) : m_value(v) {} + +private: + ValueType m_value{}; + size_t m_next_bit{}; + bool m_finalized{}; + bool m_sparse_only{}; + bool m_dense_only{}; }; } // namespace impl -- cgit v1.2.3 From 10d6e9f32bcc102d2fb12ba3e489c9a03d7f220f Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 23 Mar 2021 18:47:16 -0700 Subject: hle: kernel: Breakup InitializeMemoryLayout. --- src/core/hle/kernel/kernel.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index e994e8bed..557e63ea0 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -269,9 +269,7 @@ struct KernelCore::Impl { return schedulers[thread_id]->GetCurrentThread(); } - void InitializeMemoryLayout() { - KMemoryLayout memory_layout; - + void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) { // Insert the root region for the virtual memory tree, from which all other regions will // derive. memory_layout.GetVirtualMemoryRegionTree().InsertDirectly( @@ -531,6 +529,12 @@ struct KernelCore::Impl { // Cache all linear regions in their own trees for faster access, later. memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, linear_region_start); + } + + void InitializeMemoryLayout() { + // Derive the initial memory layout from the emulated board + KMemoryLayout memory_layout; + DeriveInitialMemoryLayout(memory_layout); const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents(); const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents(); -- cgit v1.2.3