summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/code_set.cpp12
-rw-r--r--src/core/hle/kernel/code_set.h90
-rw-r--r--src/core/hle/kernel/process.cpp14
-rw-r--r--src/core/hle/kernel/process.h43
-rw-r--r--src/core/hle/kernel/scheduler.cpp8
-rw-r--r--src/core/hle/kernel/svc.cpp32
-rw-r--r--src/core/hle/kernel/thread.cpp72
-rw-r--r--src/core/hle/kernel/thread.h23
-rw-r--r--src/core/hle/kernel/vm_manager.cpp26
-rw-r--r--src/core/hle/kernel/vm_manager.h20
10 files changed, 209 insertions, 131 deletions
diff --git a/src/core/hle/kernel/code_set.cpp b/src/core/hle/kernel/code_set.cpp
new file mode 100644
index 000000000..1f434e9af
--- /dev/null
+++ b/src/core/hle/kernel/code_set.cpp
@@ -0,0 +1,12 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/code_set.h"
+
+namespace Kernel {
+
+CodeSet::CodeSet() = default;
+CodeSet::~CodeSet() = default;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
new file mode 100644
index 000000000..834fd23d2
--- /dev/null
+++ b/src/core/hle/kernel/code_set.h
@@ -0,0 +1,90 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <cstddef>
+#include <memory>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Kernel {
+
+/**
+ * Represents executable data that may be loaded into a kernel process.
+ *
+ * A code set consists of three basic segments:
+ * - A code (AKA text) segment,
+ * - A read-only data segment (rodata)
+ * - A data segment
+ *
+ * The code segment is the portion of the object file that contains
+ * executable instructions.
+ *
+ * The read-only data segment in the portion of the object file that
+ * contains (as one would expect) read-only data, such as fixed constant
+ * values and data structures.
+ *
+ * The data segment is similar to the read-only data segment -- it contains
+ * variables and data structures that have predefined values, however,
+ * entities within this segment can be modified.
+ */
+struct CodeSet final {
+ /// A single segment within a code set.
+ struct Segment final {
+ /// The byte offset that this segment is located at.
+ std::size_t offset = 0;
+
+ /// The address to map this segment to.
+ VAddr addr = 0;
+
+ /// The size of this segment in bytes.
+ u32 size = 0;
+ };
+
+ explicit CodeSet();
+ ~CodeSet();
+
+ CodeSet(const CodeSet&) = delete;
+ CodeSet& operator=(const CodeSet&) = delete;
+
+ CodeSet(CodeSet&&) = default;
+ CodeSet& operator=(CodeSet&&) = default;
+
+ Segment& CodeSegment() {
+ return segments[0];
+ }
+
+ const Segment& CodeSegment() const {
+ return segments[0];
+ }
+
+ Segment& RODataSegment() {
+ return segments[1];
+ }
+
+ const Segment& RODataSegment() const {
+ return segments[1];
+ }
+
+ Segment& DataSegment() {
+ return segments[2];
+ }
+
+ const Segment& DataSegment() const {
+ return segments[2];
+ }
+
+ /// The overall data that backs this code set.
+ std::shared_ptr<std::vector<u8>> memory;
+
+ /// The segments that comprise this code set.
+ std::array<Segment, 3> segments;
+
+ /// The entry point address for this code set.
+ VAddr entrypoint = 0;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 406808ce9..0d782e4ba 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -9,6 +9,7 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
+#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
@@ -31,7 +32,7 @@ namespace {
*/
void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) {
// Setup page table so we can write to memory
- SetCurrentPageTable(&owner_process.VMManager().page_table);
+ Memory::SetCurrentPageTable(&owner_process.VMManager().page_table);
// Initialize new "main" thread
const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress();
@@ -50,9 +51,6 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_poi
}
} // Anonymous namespace
-CodeSet::CodeSet() = default;
-CodeSet::~CodeSet() = default;
-
SharedPtr<Process> Process::Create(Core::System& system, std::string&& name) {
auto& kernel = system.Kernel();
@@ -212,7 +210,7 @@ void Process::FreeTLSSlot(VAddr tls_address) {
}
void Process::LoadModule(CodeSet module_, VAddr base_addr) {
- const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
+ const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions,
MemoryState memory_state) {
const auto vma = vm_manager
.MapMemoryBlock(segment.addr + base_addr, module_.memory,
@@ -222,9 +220,9 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) {
};
// Map CodeSet segments
- MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::CodeStatic);
- MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeMutable);
- MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeMutable);
+ MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
+ MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData);
+ MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData);
// Clear instruction cache in CPU JIT
system.InvalidateCpuInstructionCaches();
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 5e72300b6..1bd7bf5c1 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -7,7 +7,6 @@
#include <array>
#include <bitset>
#include <cstddef>
-#include <memory>
#include <string>
#include <vector>
#include <boost/container/static_vector.hpp>
@@ -34,6 +33,8 @@ class KernelCore;
class ResourceLimit;
class Thread;
+struct CodeSet;
+
struct AddressMapping {
// Address and size must be page-aligned
VAddr address;
@@ -66,46 +67,6 @@ enum class ProcessStatus {
DebugBreak,
};
-struct CodeSet final {
- struct Segment {
- std::size_t offset = 0;
- VAddr addr = 0;
- u32 size = 0;
- };
-
- explicit CodeSet();
- ~CodeSet();
-
- Segment& CodeSegment() {
- return segments[0];
- }
-
- const Segment& CodeSegment() const {
- return segments[0];
- }
-
- Segment& RODataSegment() {
- return segments[1];
- }
-
- const Segment& RODataSegment() const {
- return segments[1];
- }
-
- Segment& DataSegment() {
- return segments[2];
- }
-
- const Segment& DataSegment() const {
- return segments[2];
- }
-
- std::shared_ptr<std::vector<u8>> memory;
-
- std::array<Segment, 3> segments;
- VAddr entrypoint = 0;
-};
-
class Process final : public WaitObject {
public:
enum : u64 {
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 5fccfd9f4..cc189cc64 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -96,7 +96,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
auto* const thread_owner_process = current_thread->GetOwnerProcess();
if (previous_process != thread_owner_process) {
system.Kernel().MakeCurrentProcess(thread_owner_process);
- SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
+ Memory::SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
}
cpu_core.LoadContext(new_thread->GetContext());
@@ -199,8 +199,7 @@ void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
// Yield this thread -- sleep for zero time and force reschedule to different thread
- WaitCurrentThread_Sleep();
- GetCurrentThread()->WakeAfterDelay(0);
+ GetCurrentThread()->Sleep(0);
}
void Scheduler::YieldWithLoadBalancing(Thread* thread) {
@@ -215,8 +214,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) {
ASSERT(priority < THREADPRIO_COUNT);
// Sleep for zero time to be able to force reschedule to different thread
- WaitCurrentThread_Sleep();
- GetCurrentThread()->WakeAfterDelay(0);
+ GetCurrentThread()->Sleep(0);
Thread* suggested_thread = nullptr;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 599609944..a6a17efe7 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1285,10 +1285,14 @@ static ResultCode StartThread(Handle thread_handle) {
/// Called when a thread exits
static void ExitThread() {
- LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CurrentArmInterface().GetPC());
+ auto& system = Core::System::GetInstance();
- ExitCurrentThread();
- Core::System::GetInstance().PrepareReschedule();
+ LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
+
+ auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
+ current_thread->Stop();
+ system.CurrentScheduler().RemoveThread(current_thread);
+ system.PrepareReschedule();
}
/// Sleep the current thread
@@ -1301,32 +1305,32 @@ static void SleepThread(s64 nanoseconds) {
YieldAndWaitForLoadBalancing = -2,
};
+ auto& system = Core::System::GetInstance();
+ auto& scheduler = system.CurrentScheduler();
+ auto* const current_thread = scheduler.GetCurrentThread();
+
if (nanoseconds <= 0) {
- auto& scheduler{Core::System::GetInstance().CurrentScheduler()};
switch (static_cast<SleepType>(nanoseconds)) {
case SleepType::YieldWithoutLoadBalancing:
- scheduler.YieldWithoutLoadBalancing(GetCurrentThread());
+ scheduler.YieldWithoutLoadBalancing(current_thread);
break;
case SleepType::YieldWithLoadBalancing:
- scheduler.YieldWithLoadBalancing(GetCurrentThread());
+ scheduler.YieldWithLoadBalancing(current_thread);
break;
case SleepType::YieldAndWaitForLoadBalancing:
- scheduler.YieldAndWaitForLoadBalancing(GetCurrentThread());
+ scheduler.YieldAndWaitForLoadBalancing(current_thread);
break;
default:
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
} else {
- // Sleep current thread and check for next thread to schedule
- WaitCurrentThread_Sleep();
-
- // Create an event to wake the thread up after the specified nanosecond delay has passed
- GetCurrentThread()->WakeAfterDelay(nanoseconds);
+ current_thread->Sleep(nanoseconds);
}
// Reschedule all CPU cores
- for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i)
- Core::System::GetInstance().CpuCore(i).PrepareReschedule();
+ for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) {
+ system.CpuCore(i).PrepareReschedule();
+ }
}
/// Wait process wide key atomic
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index eb54d6651..3b22e8e0d 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -7,8 +7,6 @@
#include <optional>
#include <vector>
-#include <boost/range/algorithm_ext/erase.hpp>
-
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
@@ -68,17 +66,6 @@ void Thread::Stop() {
owner_process->FreeTLSSlot(tls_address);
}
-void WaitCurrentThread_Sleep() {
- Thread* thread = GetCurrentThread();
- thread->SetStatus(ThreadStatus::WaitSleep);
-}
-
-void ExitCurrentThread() {
- Thread* thread = GetCurrentThread();
- thread->Stop();
- Core::System::GetInstance().CurrentScheduler().RemoveThread(thread);
-}
-
void Thread::WakeAfterDelay(s64 nanoseconds) {
// Don't schedule a wakeup if the thread wants to wait forever
if (nanoseconds == -1)
@@ -269,8 +256,8 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
if (thread->lock_owner == this) {
// If the thread is already waiting for this thread to release the mutex, ensure that the
// waiters list is consistent and return without doing anything.
- auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(itr != wait_mutex_threads.end());
+ const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
+ ASSERT(iter != wait_mutex_threads.end());
return;
}
@@ -278,11 +265,16 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
ASSERT(thread->lock_owner == nullptr);
// Ensure that the thread is not already in the list of mutex waiters
- auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(itr == wait_mutex_threads.end());
-
+ const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
+ ASSERT(iter == wait_mutex_threads.end());
+
+ // Keep the list in an ordered fashion
+ const auto insertion_point = std::find_if(
+ wait_mutex_threads.begin(), wait_mutex_threads.end(),
+ [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
+ wait_mutex_threads.insert(insertion_point, thread);
thread->lock_owner = this;
- wait_mutex_threads.emplace_back(std::move(thread));
+
UpdatePriority();
}
@@ -290,32 +282,44 @@ void Thread::RemoveMutexWaiter(SharedPtr<Thread> thread) {
ASSERT(thread->lock_owner == this);
// Ensure that the thread is in the list of mutex waiters
- auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(itr != wait_mutex_threads.end());
+ const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
+ ASSERT(iter != wait_mutex_threads.end());
+
+ wait_mutex_threads.erase(iter);
- boost::remove_erase(wait_mutex_threads, thread);
thread->lock_owner = nullptr;
UpdatePriority();
}
void Thread::UpdatePriority() {
- // Find the highest priority among all the threads that are waiting for this thread's lock
+ // If any of the threads waiting on the mutex have a higher priority
+ // (taking into account priority inheritance), then this thread inherits
+ // that thread's priority.
u32 new_priority = nominal_priority;
- for (const auto& thread : wait_mutex_threads) {
- if (thread->nominal_priority < new_priority)
- new_priority = thread->nominal_priority;
+ if (!wait_mutex_threads.empty()) {
+ if (wait_mutex_threads.front()->current_priority < new_priority) {
+ new_priority = wait_mutex_threads.front()->current_priority;
+ }
}
- if (new_priority == current_priority)
+ if (new_priority == current_priority) {
return;
+ }
scheduler->SetThreadPriority(this, new_priority);
-
current_priority = new_priority;
+ if (!lock_owner) {
+ return;
+ }
+
+ // Ensure that the thread is within the correct location in the waiting list.
+ auto old_owner = lock_owner;
+ lock_owner->RemoveMutexWaiter(this);
+ old_owner->AddMutexWaiter(this);
+
// Recursively update the priority of the thread that depends on the priority of this one.
- if (lock_owner)
- lock_owner->UpdatePriority();
+ lock_owner->UpdatePriority();
}
void Thread::ChangeCore(u32 core, u64 mask) {
@@ -391,6 +395,14 @@ void Thread::SetActivity(ThreadActivity value) {
}
}
+void Thread::Sleep(s64 nanoseconds) {
+ // Sleep current thread and check for next thread to schedule
+ SetStatus(ThreadStatus::WaitSleep);
+
+ // Create an event to wake the thread up after the specified nanosecond delay has passed
+ WakeAfterDelay(nanoseconds);
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index c48b21aba..faad5f391 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -383,6 +383,9 @@ public:
void SetActivity(ThreadActivity value);
+ /// Sleeps this thread for the given amount of nanoseconds.
+ void Sleep(s64 nanoseconds);
+
private:
explicit Thread(KernelCore& kernel);
~Thread() override;
@@ -398,8 +401,14 @@ private:
VAddr entry_point = 0;
VAddr stack_top = 0;
- u32 nominal_priority = 0; ///< Nominal thread priority, as set by the emulated application
- u32 current_priority = 0; ///< Current thread priority, can be temporarily changed
+ /// Nominal thread priority, as set by the emulated application.
+ /// The nominal priority is the thread priority without priority
+ /// inheritance taken into account.
+ u32 nominal_priority = 0;
+
+ /// Current thread priority. This may change over the course of the
+ /// thread's lifetime in order to facilitate priority inheritance.
+ u32 current_priority = 0;
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
@@ -460,14 +469,4 @@ private:
*/
Thread* GetCurrentThread();
-/**
- * Waits the current thread on a sleep
- */
-void WaitCurrentThread_Sleep();
-
-/**
- * Stops the current thread and removes it from the thread_list
- */
-void ExitCurrentThread();
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 05c59af34..22bf55ce7 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -7,29 +7,29 @@
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
+#include "common/memory_hook.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h"
-#include "core/memory_hook.h"
#include "core/memory_setup.h"
namespace Kernel {
namespace {
const char* GetMemoryStateName(MemoryState state) {
static constexpr const char* names[] = {
- "Unmapped", "Io",
- "Normal", "CodeStatic",
- "CodeMutable", "Heap",
- "Shared", "Unknown1",
- "ModuleCodeStatic", "ModuleCodeMutable",
- "IpcBuffer0", "Stack",
- "ThreadLocal", "TransferMemoryIsolated",
- "TransferMemory", "ProcessMemory",
- "Inaccessible", "IpcBuffer1",
- "IpcBuffer3", "KernelStack",
+ "Unmapped", "Io",
+ "Normal", "Code",
+ "CodeData", "Heap",
+ "Shared", "Unknown1",
+ "ModuleCode", "ModuleCodeData",
+ "IpcBuffer0", "Stack",
+ "ThreadLocal", "TransferMemoryIsolated",
+ "TransferMemory", "ProcessMemory",
+ "Inaccessible", "IpcBuffer1",
+ "IpcBuffer3", "KernelStack",
};
return names[ToSvcMemoryState(state)];
@@ -177,7 +177,7 @@ ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
MemoryState state,
- Memory::MemoryHookPointer mmio_handler) {
+ Common::MemoryHookPointer mmio_handler) {
// This is the appropriately sized VMA that will turn into our allocation.
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
VirtualMemoryArea& final_vma = vma_handle->second;
@@ -624,7 +624,7 @@ void VMManager::ClearPageTable() {
std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
page_table.special_regions.clear();
std::fill(page_table.attributes.begin(), page_table.attributes.end(),
- Memory::PageType::Unmapped);
+ Common::PageType::Unmapped);
}
VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask,
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 88e0b3c02..7cdff6094 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -9,9 +9,10 @@
#include <tuple>
#include <vector>
#include "common/common_types.h"
+#include "common/memory_hook.h"
+#include "common/page_table.h"
#include "core/hle/result.h"
#include "core/memory.h"
-#include "core/memory_hook.h"
namespace FileSys {
enum class ProgramAddressSpaceType : u8;
@@ -164,12 +165,12 @@ enum class MemoryState : u32 {
Unmapped = 0x00,
Io = 0x01 | FlagMapped,
Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed,
- CodeStatic = 0x03 | CodeFlags | FlagMapProcess,
- CodeMutable = 0x04 | CodeFlags | FlagMapProcess | FlagCodeMemory,
+ Code = 0x03 | CodeFlags | FlagMapProcess,
+ CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory,
Heap = 0x05 | DataFlags | FlagCodeMemory,
Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated,
- ModuleCodeStatic = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
- ModuleCodeMutable = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
+ ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
+ ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated |
IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned,
@@ -290,7 +291,7 @@ struct VirtualMemoryArea {
// Settings for type = MMIO
/// Physical address of the register area this VMA maps to.
PAddr paddr = 0;
- Memory::MemoryHookPointer mmio_handler = nullptr;
+ Common::MemoryHookPointer mmio_handler = nullptr;
/// Tests if this area can be merged to the right with `next`.
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
@@ -368,7 +369,7 @@ public:
* @param mmio_handler The handler that will implement read and write for this MMIO region.
*/
ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state,
- Memory::MemoryHookPointer mmio_handler);
+ Common::MemoryHookPointer mmio_handler);
/// Unmaps a range of addresses, splitting VMAs as necessary.
ResultCode UnmapRange(VAddr target, u64 size);
@@ -509,7 +510,7 @@ public:
/// Each VMManager has its own page table, which is set as the main one when the owning process
/// is scheduled.
- Memory::PageTable page_table;
+ Common::PageTable page_table{Memory::PAGE_BITS};
private:
using VMAIter = VMAMap::iterator;
@@ -616,6 +617,9 @@ private:
VAddr new_map_region_base = 0;
VAddr new_map_region_end = 0;
+ VAddr main_code_region_base = 0;
+ VAddr main_code_region_end = 0;
+
VAddr tls_io_region_base = 0;
VAddr tls_io_region_end = 0;