summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/code_set.cpp12
-rw-r--r--src/core/hle/kernel/code_set.h90
-rw-r--r--src/core/hle/kernel/kernel.cpp2
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/mutex.cpp35
-rw-r--r--src/core/hle/kernel/mutex.h20
-rw-r--r--src/core/hle/kernel/object.cpp1
-rw-r--r--src/core/hle/kernel/object.h1
-rw-r--r--src/core/hle/kernel/process.cpp15
-rw-r--r--src/core/hle/kernel/process.h65
-rw-r--r--src/core/hle/kernel/scheduler.cpp42
-rw-r--r--src/core/hle/kernel/scheduler.h4
-rw-r--r--src/core/hle/kernel/svc.cpp146
-rw-r--r--src/core/hle/kernel/thread.cpp3
-rw-r--r--src/core/hle/kernel/transfer_memory.cpp73
-rw-r--r--src/core/hle/kernel/transfer_memory.h91
-rw-r--r--src/core/hle/kernel/vm_manager.cpp96
-rw-r--r--src/core/hle/kernel/vm_manager.h60
18 files changed, 575 insertions, 184 deletions
diff --git a/src/core/hle/kernel/code_set.cpp b/src/core/hle/kernel/code_set.cpp
new file mode 100644
index 000000000..1f434e9af
--- /dev/null
+++ b/src/core/hle/kernel/code_set.cpp
@@ -0,0 +1,12 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/code_set.h"
+
+namespace Kernel {
+
+CodeSet::CodeSet() = default;
+CodeSet::~CodeSet() = default;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
new file mode 100644
index 000000000..834fd23d2
--- /dev/null
+++ b/src/core/hle/kernel/code_set.h
@@ -0,0 +1,90 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <cstddef>
+#include <memory>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Kernel {
+
+/**
+ * Represents executable data that may be loaded into a kernel process.
+ *
+ * A code set consists of three basic segments:
+ * - A code (AKA text) segment,
+ * - A read-only data segment (rodata)
+ * - A data segment
+ *
+ * The code segment is the portion of the object file that contains
+ * executable instructions.
+ *
+ * The read-only data segment in the portion of the object file that
+ * contains (as one would expect) read-only data, such as fixed constant
+ * values and data structures.
+ *
+ * The data segment is similar to the read-only data segment -- it contains
+ * variables and data structures that have predefined values, however,
+ * entities within this segment can be modified.
+ */
+struct CodeSet final {
+ /// A single segment within a code set.
+ struct Segment final {
+ /// The byte offset that this segment is located at.
+ std::size_t offset = 0;
+
+ /// The address to map this segment to.
+ VAddr addr = 0;
+
+ /// The size of this segment in bytes.
+ u32 size = 0;
+ };
+
+ explicit CodeSet();
+ ~CodeSet();
+
+ CodeSet(const CodeSet&) = delete;
+ CodeSet& operator=(const CodeSet&) = delete;
+
+ CodeSet(CodeSet&&) = default;
+ CodeSet& operator=(CodeSet&&) = default;
+
+ Segment& CodeSegment() {
+ return segments[0];
+ }
+
+ const Segment& CodeSegment() const {
+ return segments[0];
+ }
+
+ Segment& RODataSegment() {
+ return segments[1];
+ }
+
+ const Segment& RODataSegment() const {
+ return segments[1];
+ }
+
+ Segment& DataSegment() {
+ return segments[2];
+ }
+
+ const Segment& DataSegment() const {
+ return segments[2];
+ }
+
+ /// The overall data that backs this code set.
+ std::shared_ptr<std::vector<u8>> memory;
+
+ /// The segments that comprise this code set.
+ std::array<Segment, 3> segments;
+
+ /// The entry point address for this code set.
+ VAddr entrypoint = 0;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 8de62d073..3b73be67b 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -29,7 +29,7 @@ namespace Kernel {
* @param thread_handle The handle of the thread that's been awoken
* @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
*/
-static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_late) {
+static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
const auto proper_handle = static_cast<Handle>(thread_handle);
const auto& system = Core::System::GetInstance();
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index ff17ff865..03ea5b659 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -8,9 +8,6 @@
#include <unordered_map>
#include "core/hle/kernel/object.h"
-template <typename T>
-class ResultVal;
-
namespace Core {
class System;
}
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 0743670ad..98e87313b 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <map>
#include <utility>
#include <vector>
@@ -10,8 +9,11 @@
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/object.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -57,41 +59,47 @@ static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_t
}
}
-ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle holding_thread_handle,
+Mutex::Mutex(Core::System& system) : system{system} {}
+Mutex::~Mutex() = default;
+
+ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
Handle requesting_thread_handle) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
return ERR_INVALID_ADDRESS;
}
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ Thread* const current_thread = system.CurrentScheduler().GetCurrentThread();
SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
SharedPtr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle);
// TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another
// thread.
- ASSERT(requesting_thread == GetCurrentThread());
+ ASSERT(requesting_thread == current_thread);
- u32 addr_value = Memory::Read32(address);
+ const u32 addr_value = Memory::Read32(address);
// If the mutex isn't being held, just return success.
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
return RESULT_SUCCESS;
}
- if (holding_thread == nullptr)
+ if (holding_thread == nullptr) {
return ERR_INVALID_HANDLE;
+ }
// Wait until the mutex is released
- GetCurrentThread()->SetMutexWaitAddress(address);
- GetCurrentThread()->SetWaitHandle(requesting_thread_handle);
+ current_thread->SetMutexWaitAddress(address);
+ current_thread->SetWaitHandle(requesting_thread_handle);
- GetCurrentThread()->SetStatus(ThreadStatus::WaitMutex);
- GetCurrentThread()->InvalidateWakeupCallback();
+ current_thread->SetStatus(ThreadStatus::WaitMutex);
+ current_thread->InvalidateWakeupCallback();
// Update the lock holder thread's priority to prevent priority inversion.
- holding_thread->AddMutexWaiter(GetCurrentThread());
+ holding_thread->AddMutexWaiter(current_thread);
- Core::System::GetInstance().PrepareReschedule();
+ system.PrepareReschedule();
return RESULT_SUCCESS;
}
@@ -102,7 +110,8 @@ ResultCode Mutex::Release(VAddr address) {
return ERR_INVALID_ADDRESS;
}
- auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address);
+ auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
+ auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address);
// There are no more threads waiting for the mutex, release it completely.
if (thread == nullptr) {
@@ -111,7 +120,7 @@ ResultCode Mutex::Release(VAddr address) {
}
// Transfer the ownership of the mutex from the previous owner to the new one.
- TransferMutexOwnership(address, GetCurrentThread(), thread);
+ TransferMutexOwnership(address, current_thread, thread);
u32 mutex_value = thread->GetWaitHandle();
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
index 81e62d497..b904de2e8 100644
--- a/src/core/hle/kernel/mutex.h
+++ b/src/core/hle/kernel/mutex.h
@@ -5,32 +5,34 @@
#pragma once
#include "common/common_types.h"
-#include "core/hle/kernel/object.h"
union ResultCode;
-namespace Kernel {
+namespace Core {
+class System;
+}
-class HandleTable;
-class Thread;
+namespace Kernel {
class Mutex final {
public:
+ explicit Mutex(Core::System& system);
+ ~Mutex();
+
/// Flag that indicates that a mutex still has threads waiting for it.
static constexpr u32 MutexHasWaitersFlag = 0x40000000;
/// Mask of the bits in a mutex address value that contain the mutex owner.
static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
/// Attempts to acquire a mutex at the specified address.
- static ResultCode TryAcquire(HandleTable& handle_table, VAddr address,
- Handle holding_thread_handle, Handle requesting_thread_handle);
+ ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
+ Handle requesting_thread_handle);
/// Releases the mutex at the specified address.
- static ResultCode Release(VAddr address);
+ ResultCode Release(VAddr address);
private:
- Mutex() = default;
- ~Mutex() = default;
+ Core::System& system;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp
index 8870463d0..217144efc 100644
--- a/src/core/hle/kernel/object.cpp
+++ b/src/core/hle/kernel/object.cpp
@@ -23,6 +23,7 @@ bool Object::IsWaitable() const {
case HandleType::Unknown:
case HandleType::WritableEvent:
case HandleType::SharedMemory:
+ case HandleType::TransferMemory:
case HandleType::AddressArbiter:
case HandleType::ResourceLimit:
case HandleType::ClientPort:
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index 4c2505908..3f6baa094 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -22,6 +22,7 @@ enum class HandleType : u32 {
WritableEvent,
ReadableEvent,
SharedMemory,
+ TransferMemory,
Thread,
Process,
AddressArbiter,
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 65c51003d..0d782e4ba 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -9,6 +9,7 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
+#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
@@ -50,9 +51,6 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_poi
}
} // Anonymous namespace
-CodeSet::CodeSet() = default;
-CodeSet::~CodeSet() = default;
-
SharedPtr<Process> Process::Create(Core::System& system, std::string&& name) {
auto& kernel = system.Kernel();
@@ -212,7 +210,7 @@ void Process::FreeTLSSlot(VAddr tls_address) {
}
void Process::LoadModule(CodeSet module_, VAddr base_addr) {
- const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
+ const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions,
MemoryState memory_state) {
const auto vma = vm_manager
.MapMemoryBlock(segment.addr + base_addr, module_.memory,
@@ -222,16 +220,17 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) {
};
// Map CodeSet segments
- MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::CodeStatic);
- MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeMutable);
- MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeMutable);
+ MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
+ MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData);
+ MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData);
// Clear instruction cache in CPU JIT
system.InvalidateCpuInstructionCaches();
}
Process::Process(Core::System& system)
- : WaitObject{system.Kernel()}, address_arbiter{system}, system{system} {}
+ : WaitObject{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {}
+
Process::~Process() = default;
void Process::Acquire(Thread* thread) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 47ffd4ad3..a0217d3d8 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -7,13 +7,13 @@
#include <array>
#include <bitset>
#include <cstddef>
-#include <memory>
#include <string>
#include <vector>
#include <boost/container/static_vector.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/hle/kernel/wait_object.h"
@@ -33,13 +33,7 @@ class KernelCore;
class ResourceLimit;
class Thread;
-struct AddressMapping {
- // Address and size must be page-aligned
- VAddr address;
- u64 size;
- bool read_only;
- bool unk_flag;
-};
+struct CodeSet;
enum class MemoryRegion : u16 {
APPLICATION = 1,
@@ -65,46 +59,6 @@ enum class ProcessStatus {
DebugBreak,
};
-struct CodeSet final {
- struct Segment {
- std::size_t offset = 0;
- VAddr addr = 0;
- u32 size = 0;
- };
-
- explicit CodeSet();
- ~CodeSet();
-
- Segment& CodeSegment() {
- return segments[0];
- }
-
- const Segment& CodeSegment() const {
- return segments[0];
- }
-
- Segment& RODataSegment() {
- return segments[1];
- }
-
- const Segment& RODataSegment() const {
- return segments[1];
- }
-
- Segment& DataSegment() {
- return segments[2];
- }
-
- const Segment& DataSegment() const {
- return segments[2];
- }
-
- std::shared_ptr<std::vector<u8>> memory;
-
- std::array<Segment, 3> segments;
- VAddr entrypoint = 0;
-};
-
class Process final : public WaitObject {
public:
enum : u64 {
@@ -165,6 +119,16 @@ public:
return address_arbiter;
}
+ /// Gets a reference to the process' mutex lock.
+ Mutex& GetMutex() {
+ return mutex;
+ }
+
+ /// Gets a const reference to the process' mutex lock
+ const Mutex& GetMutex() const {
+ return mutex;
+ }
+
/// Gets the current status of the process
ProcessStatus GetStatus() const {
return status;
@@ -327,6 +291,11 @@ private:
/// Per-process address arbiter.
AddressArbiter address_arbiter;
+ /// The per-process mutex lock instance used for handling various
+ /// forms of services, such as lock arbitration, and condition
+ /// variable related facilities.
+ Mutex mutex;
+
/// Random values for svcGetInfo RandomEntropy
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy;
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index cc189cc64..6d0f13ecf 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -30,7 +30,7 @@ Scheduler::~Scheduler() {
bool Scheduler::HaveReadyThreads() const {
std::lock_guard<std::mutex> lock(scheduler_mutex);
- return ready_queue.get_first() != nullptr;
+ return !ready_queue.empty();
}
Thread* Scheduler::GetCurrentThread() const {
@@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() {
Thread* thread = GetCurrentThread();
if (thread && thread->GetStatus() == ThreadStatus::Running) {
+ if (ready_queue.empty()) {
+ return thread;
+ }
// We have to do better than the current thread.
// This call returns null when that's not possible.
- next = ready_queue.pop_first_better(thread->GetPriority());
- if (!next) {
- // Otherwise just keep going with the current thread
+ next = ready_queue.front();
+ if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
next = thread;
}
} else {
- next = ready_queue.pop_first();
+ if (ready_queue.empty()) {
+ return nullptr;
+ }
+ next = ready_queue.front();
}
return next;
}
void Scheduler::SwitchContext(Thread* new_thread) {
- Thread* const previous_thread = GetCurrentThread();
+ Thread* previous_thread = GetCurrentThread();
Process* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
if (previous_thread->GetStatus() == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
- ready_queue.push_front(previous_thread->GetPriority(), previous_thread);
+ ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
previous_thread->SetStatus(ThreadStatus::Ready);
}
}
@@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
current_thread = new_thread;
- ready_queue.remove(new_thread->GetPriority(), new_thread);
+ ready_queue.remove(new_thread, new_thread->GetPriority());
new_thread->SetStatus(ThreadStatus::Running);
auto* const thread_owner_process = current_thread->GetOwnerProcess();
@@ -147,7 +152,6 @@ void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex);
thread_list.push_back(std::move(thread));
- ready_queue.prepare(priority);
}
void Scheduler::RemoveThread(Thread* thread) {
@@ -161,33 +165,37 @@ void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex);
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
- ready_queue.push_back(priority, thread);
+ ready_queue.add(thread, priority);
}
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex);
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
- ready_queue.remove(priority, thread);
+ ready_queue.remove(thread, priority);
}
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex);
+ if (thread->GetPriority() == priority) {
+ return;
+ }
// If thread was ready, adjust queues
if (thread->GetStatus() == ThreadStatus::Ready)
- ready_queue.move(thread, thread->GetPriority(), priority);
- else
- ready_queue.prepare(priority);
+ ready_queue.adjust(thread, thread->GetPriority(), priority);
}
Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
std::lock_guard<std::mutex> lock(scheduler_mutex);
const u32 mask = 1U << core;
- return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) {
- return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority;
- });
+ for (auto* thread : ready_queue) {
+ if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
+ return thread;
+ }
+ }
+ return nullptr;
}
void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 1c5bf57d9..44baeb713 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -7,7 +7,7 @@
#include <mutex>
#include <vector>
#include "common/common_types.h"
-#include "common/thread_queue_list.h"
+#include "common/multi_level_queue.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
@@ -156,7 +156,7 @@ private:
std::vector<SharedPtr<Thread>> thread_list;
/// Lists only ready thread ids.
- Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
+ Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
SharedPtr<Thread> current_thread = nullptr;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index d40a2226b..11796e5e5 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -32,6 +32,7 @@
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_wrap.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/transfer_memory.h"
#include "core/hle/kernel/writable_event.h"
#include "core/hle/lock.h"
#include "core/hle/result.h"
@@ -174,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
return ERR_INVALID_SIZE;
}
- auto& vm_manager = Core::CurrentProcess()->VMManager();
- const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress();
- const auto alloc_result =
- vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite);
-
+ auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager();
+ const auto alloc_result = vm_manager.SetHeapSize(heap_size);
if (alloc_result.Failed()) {
return alloc_result.Code();
}
@@ -551,9 +549,9 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
return ERR_INVALID_ADDRESS;
}
- auto& handle_table = Core::CurrentProcess()->GetHandleTable();
- return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle,
- requesting_thread_handle);
+ auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
+ requesting_thread_handle);
}
/// Unlock a mutex
@@ -571,7 +569,8 @@ static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
return ERR_INVALID_ADDRESS;
}
- return Mutex::Release(mutex_addr);
+ auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ return current_process->GetMutex().Release(mutex_addr);
}
enum class BreakType : u32 {
@@ -807,7 +806,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
return RESULT_SUCCESS;
case GetInfoType::TotalHeapUsage:
- *result = process->VMManager().GetTotalHeapUsage();
+ *result = process->VMManager().GetCurrentHeapSize();
return RESULT_SUCCESS;
case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -1340,11 +1339,15 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
- const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
+ auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
ASSERT(thread);
- CASCADE_CODE(Mutex::Release(mutex_addr));
+ const auto release_result = current_process->GetMutex().Release(mutex_addr);
+ if (release_result.IsError()) {
+ return release_result;
+ }
SharedPtr<Thread> current_thread = GetCurrentThread();
current_thread->SetCondVarWaitAddress(condition_variable_addr);
@@ -1582,14 +1585,121 @@ static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32
}
auto& kernel = Core::System::GetInstance().Kernel();
- auto process = kernel.CurrentProcess();
- auto& handle_table = process->GetHandleTable();
- const auto shared_mem_handle = SharedMemory::Create(kernel, process, size, perms, perms, addr);
+ auto transfer_mem_handle = TransferMemory::Create(kernel, addr, size, perms);
- CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
+ auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+ const auto result = handle_table.Create(std::move(transfer_mem_handle));
+ if (result.Failed()) {
+ return result.Code();
+ }
+
+ *handle = *result;
return RESULT_SUCCESS;
}
+static ResultCode MapTransferMemory(Handle handle, VAddr address, u64 size, u32 permission_raw) {
+ LOG_DEBUG(Kernel_SVC,
+ "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}",
+ handle, address, size, permission_raw);
+
+ if (!Common::Is4KBAligned(address)) {
+ LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
+ address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
+ size);
+ return ERR_INVALID_SIZE;
+ }
+
+ if (!IsValidAddressRange(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size overflows the 64-bit range (address=0x{:016X}, "
+ "size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto permissions = static_cast<MemoryPermission>(permission_raw);
+ if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read &&
+ permissions != MemoryPermission::ReadWrite) {
+ LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).",
+ permission_raw);
+ return ERR_INVALID_STATE;
+ }
+
+ const auto& kernel = Core::System::GetInstance().Kernel();
+ const auto* const current_process = kernel.CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
+
+ auto transfer_memory = handle_table.Get<TransferMemory>(handle);
+ if (!transfer_memory) {
+ LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
+ handle);
+ return ERR_INVALID_HANDLE;
+ }
+
+ if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size don't fully fit within the ASLR region "
+ "(address=0x{:016X}, size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_MEMORY_RANGE;
+ }
+
+ return transfer_memory->MapMemory(address, size, permissions);
+}
+
+static ResultCode UnmapTransferMemory(Handle handle, VAddr address, u64 size) {
+ LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle,
+ address, size);
+
+ if (!Common::Is4KBAligned(address)) {
+ LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
+ address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
+ size);
+ return ERR_INVALID_SIZE;
+ }
+
+ if (!IsValidAddressRange(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size overflows the 64-bit range (address=0x{:016X}, "
+ "size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto& kernel = Core::System::GetInstance().Kernel();
+ const auto* const current_process = kernel.CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
+
+ auto transfer_memory = handle_table.Get<TransferMemory>(handle);
+ if (!transfer_memory) {
+ LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
+ handle);
+ return ERR_INVALID_HANDLE;
+ }
+
+ if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size don't fully fit within the ASLR region "
+ "(address=0x{:016X}, size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_MEMORY_RANGE;
+ }
+
+ return transfer_memory->UnmapMemory(address, size);
+}
+
static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) {
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
@@ -1965,8 +2075,8 @@ static const FunctionDef SVC_Table[] = {
{0x4E, nullptr, "ReadWriteRegister"},
{0x4F, nullptr, "SetProcessActivity"},
{0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"},
- {0x51, nullptr, "MapTransferMemory"},
- {0x52, nullptr, "UnmapTransferMemory"},
+ {0x51, SvcWrap<MapTransferMemory>, "MapTransferMemory"},
+ {0x52, SvcWrap<UnmapTransferMemory>, "UnmapTransferMemory"},
{0x53, nullptr, "CreateInterruptEvent"},
{0x54, nullptr, "QueryPhysicalAddress"},
{0x55, nullptr, "QueryIoMapping"},
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 79c80bb01..e5853c46f 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -315,8 +315,9 @@ void Thread::UpdatePriority() {
}
// Ensure that the thread is within the correct location in the waiting list.
+ auto old_owner = lock_owner;
lock_owner->RemoveMutexWaiter(this);
- lock_owner->AddMutexWaiter(this);
+ old_owner->AddMutexWaiter(this);
// Recursively update the priority of the thread that depends on the priority of this one.
lock_owner->UpdatePriority();
diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp
new file mode 100644
index 000000000..23228e1b5
--- /dev/null
+++ b/src/core/hle/kernel/transfer_memory.cpp
@@ -0,0 +1,73 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/shared_memory.h"
+#include "core/hle/kernel/transfer_memory.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+TransferMemory::TransferMemory(KernelCore& kernel) : Object{kernel} {}
+TransferMemory::~TransferMemory() = default;
+
+SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address,
+ size_t size, MemoryPermission permissions) {
+ SharedPtr<TransferMemory> transfer_memory{new TransferMemory(kernel)};
+
+ transfer_memory->base_address = base_address;
+ transfer_memory->memory_size = size;
+ transfer_memory->owner_permissions = permissions;
+ transfer_memory->owner_process = kernel.CurrentProcess();
+
+ return transfer_memory;
+}
+
+ResultCode TransferMemory::MapMemory(VAddr address, size_t size, MemoryPermission permissions) {
+ if (memory_size != size) {
+ return ERR_INVALID_SIZE;
+ }
+
+ if (owner_permissions != permissions) {
+ return ERR_INVALID_STATE;
+ }
+
+ if (is_mapped) {
+ return ERR_INVALID_STATE;
+ }
+
+ const auto map_state = owner_permissions == MemoryPermission::None
+ ? MemoryState::TransferMemoryIsolated
+ : MemoryState::TransferMemory;
+ auto& vm_manager = owner_process->VMManager();
+ const auto map_result = vm_manager.MapMemoryBlock(
+ address, std::make_shared<std::vector<u8>>(size), 0, size, map_state);
+
+ if (map_result.Failed()) {
+ return map_result.Code();
+ }
+
+ is_mapped = true;
+ return RESULT_SUCCESS;
+}
+
+ResultCode TransferMemory::UnmapMemory(VAddr address, size_t size) {
+ if (memory_size != size) {
+ return ERR_INVALID_SIZE;
+ }
+
+ auto& vm_manager = owner_process->VMManager();
+ const auto result = vm_manager.UnmapRange(address, size);
+
+ if (result.IsError()) {
+ return result;
+ }
+
+ is_mapped = false;
+ return RESULT_SUCCESS;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h
new file mode 100644
index 000000000..ec294951e
--- /dev/null
+++ b/src/core/hle/kernel/transfer_memory.h
@@ -0,0 +1,91 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/object.h"
+
+union ResultCode;
+
+namespace Kernel {
+
+class KernelCore;
+class Process;
+
+enum class MemoryPermission : u32;
+
+/// Defines the interface for transfer memory objects.
+///
+/// Transfer memory is typically used for the purpose of
+/// transferring memory between separate process instances,
+/// thus the name.
+///
+class TransferMemory final : public Object {
+public:
+ static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory;
+
+ static SharedPtr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, size_t size,
+ MemoryPermission permissions);
+
+ TransferMemory(const TransferMemory&) = delete;
+ TransferMemory& operator=(const TransferMemory&) = delete;
+
+ TransferMemory(TransferMemory&&) = delete;
+ TransferMemory& operator=(TransferMemory&&) = delete;
+
+ std::string GetTypeName() const override {
+ return "TransferMemory";
+ }
+
+ std::string GetName() const override {
+ return GetTypeName();
+ }
+
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ /// Attempts to map transfer memory with the given range and memory permissions.
+ ///
+ /// @param address The base address to being mapping memory at.
+ /// @param size The size of the memory to map, in bytes.
+ /// @param permissions The memory permissions to check against when mapping memory.
+ ///
+ /// @pre The given address, size, and memory permissions must all match
+ /// the same values that were given when creating the transfer memory
+ /// instance.
+ ///
+ ResultCode MapMemory(VAddr address, size_t size, MemoryPermission permissions);
+
+ /// Unmaps the transfer memory with the given range
+ ///
+ /// @param address The base address to begin unmapping memory at.
+ /// @param size The size of the memory to unmap, in bytes.
+ ///
+ /// @pre The given address and size must be the same as the ones used
+ /// to create the transfer memory instance.
+ ///
+ ResultCode UnmapMemory(VAddr address, size_t size);
+
+private:
+ explicit TransferMemory(KernelCore& kernel);
+ ~TransferMemory() override;
+
+ /// The base address for the memory managed by this instance.
+ VAddr base_address = 0;
+
+ /// Size of the memory, in bytes, that this instance manages.
+ size_t memory_size = 0;
+
+ /// The memory permissions that are applied to this instance.
+ MemoryPermission owner_permissions{};
+
+ /// The process that this transfer memory instance was created under.
+ Process* owner_process = nullptr;
+
+ /// Whether or not this transfer memory instance has mapped memory.
+ bool is_mapped = false;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 3def3e52c..ec0a480ce 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -20,16 +20,16 @@ namespace Kernel {
namespace {
const char* GetMemoryStateName(MemoryState state) {
static constexpr const char* names[] = {
- "Unmapped", "Io",
- "Normal", "CodeStatic",
- "CodeMutable", "Heap",
- "Shared", "Unknown1",
- "ModuleCodeStatic", "ModuleCodeMutable",
- "IpcBuffer0", "Stack",
- "ThreadLocal", "TransferMemoryIsolated",
- "TransferMemory", "ProcessMemory",
- "Inaccessible", "IpcBuffer1",
- "IpcBuffer3", "KernelStack",
+ "Unmapped", "Io",
+ "Normal", "Code",
+ "CodeData", "Heap",
+ "Shared", "Unknown1",
+ "ModuleCode", "ModuleCodeData",
+ "IpcBuffer0", "Stack",
+ "ThreadLocal", "TransferMemoryIsolated",
+ "TransferMemory", "ProcessMemory",
+ "Inaccessible", "IpcBuffer1",
+ "IpcBuffer3", "KernelStack",
};
return names[ToSvcMemoryState(state)];
@@ -256,57 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
return RESULT_SUCCESS;
}
-ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) {
- if (!IsWithinHeapRegion(target, size)) {
- return ERR_INVALID_ADDRESS;
+ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
+ if (size > GetHeapRegionSize()) {
+ return ERR_OUT_OF_MEMORY;
+ }
+
+ // No need to do any additional work if the heap is already the given size.
+ if (size == GetCurrentHeapSize()) {
+ return MakeResult(heap_region_base);
}
if (heap_memory == nullptr) {
// Initialize heap
- heap_memory = std::make_shared<std::vector<u8>>();
- heap_start = heap_end = target;
+ heap_memory = std::make_shared<std::vector<u8>>(size);
+ heap_end = heap_region_base + size;
} else {
- UnmapRange(heap_start, heap_end - heap_start);
- }
-
- // If necessary, expand backing vector to cover new heap extents.
- if (target < heap_start) {
- heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
- heap_start = target;
- RefreshMemoryBlockMappings(heap_memory.get());
- }
- if (target + size > heap_end) {
- heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
- heap_end = target + size;
- RefreshMemoryBlockMappings(heap_memory.get());
+ UnmapRange(heap_region_base, GetCurrentHeapSize());
}
- ASSERT(heap_end - heap_start == heap_memory->size());
- CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size,
- MemoryState::Heap));
- Reprotect(vma, perms);
+ // If necessary, expand backing vector to cover new heap extents in
+ // the case of allocating. Otherwise, shrink the backing memory,
+ // if a smaller heap has been requested.
+ const u64 old_heap_size = GetCurrentHeapSize();
+ if (size > old_heap_size) {
+ const u64 alloc_size = size - old_heap_size;
- heap_used = size;
-
- return MakeResult<VAddr>(heap_end - size);
-}
+ heap_memory->insert(heap_memory->end(), alloc_size, 0);
+ RefreshMemoryBlockMappings(heap_memory.get());
+ } else if (size < old_heap_size) {
+ heap_memory->resize(size);
+ heap_memory->shrink_to_fit();
-ResultCode VMManager::HeapFree(VAddr target, u64 size) {
- if (!IsWithinHeapRegion(target, size)) {
- return ERR_INVALID_ADDRESS;
+ RefreshMemoryBlockMappings(heap_memory.get());
}
- if (size == 0) {
- return RESULT_SUCCESS;
- }
+ heap_end = heap_region_base + size;
+ ASSERT(GetCurrentHeapSize() == heap_memory->size());
- const ResultCode result = UnmapRange(target, size);
- if (result.IsError()) {
- return result;
+ const auto mapping_result =
+ MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap);
+ if (mapping_result.Failed()) {
+ return mapping_result.Code();
}
- heap_used -= size;
- return RESULT_SUCCESS;
+ return MakeResult<VAddr>(heap_region_base);
}
MemoryInfo VMManager::QueryMemory(VAddr address) const {
@@ -598,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
heap_region_base = map_region_end;
heap_region_end = heap_region_base + heap_region_size;
+ heap_end = heap_region_base;
new_map_region_base = heap_region_end;
new_map_region_end = new_map_region_base + new_map_region_size;
@@ -692,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const {
return 0xF8000000;
}
-u64 VMManager::GetTotalHeapUsage() const {
- return heap_used;
-}
-
VAddr VMManager::GetAddressSpaceBaseAddress() const {
return address_space_base;
}
@@ -778,6 +768,10 @@ u64 VMManager::GetHeapRegionSize() const {
return heap_region_end - heap_region_base;
}
+u64 VMManager::GetCurrentHeapSize() const {
+ return heap_end - heap_region_base;
+}
+
bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(),
GetHeapRegionEndAddress());
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index b96980f8f..6f484b7bf 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -165,12 +165,12 @@ enum class MemoryState : u32 {
Unmapped = 0x00,
Io = 0x01 | FlagMapped,
Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed,
- CodeStatic = 0x03 | CodeFlags | FlagMapProcess,
- CodeMutable = 0x04 | CodeFlags | FlagMapProcess | FlagCodeMemory,
+ Code = 0x03 | CodeFlags | FlagMapProcess,
+ CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory,
Heap = 0x05 | DataFlags | FlagCodeMemory,
Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated,
- ModuleCodeStatic = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
- ModuleCodeMutable = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
+ ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
+ ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated |
IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned,
@@ -380,11 +380,41 @@ public:
/// Changes the permissions of a range of addresses, splitting VMAs as necessary.
ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
- ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
- ResultCode HeapFree(VAddr target, u64 size);
-
ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
+ /// Attempts to allocate a heap with the given size.
+ ///
+ /// @param size The size of the heap to allocate in bytes.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size that is equal to the size of the current heap,
+ /// then this function will do nothing and return the current
+ /// heap's starting address, as there's no need to perform
+ /// any additional heap allocation work.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size less than the current heap's size, then
+ /// this function will attempt to shrink the heap.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size larger than the current heap's size, then
+ /// this function will attempt to extend the size of the heap.
+ ///
+ /// @returns A result indicating either success or failure.
+ /// <p>
+ /// If successful, this function will return a result
+ /// containing the starting address to the allocated heap.
+ /// <p>
+ /// If unsuccessful, this function will return a result
+ /// containing an error code.
+ ///
+ /// @pre The given size must lie within the allowable heap
+ /// memory region managed by this VMManager instance.
+ /// Failure to abide by this will result in ERR_OUT_OF_MEMORY
+ /// being returned as the result.
+ ///
+ ResultVal<VAddr> SetHeapSize(u64 size);
+
/// Queries the memory manager for information about the given address.
///
/// @param address The address to query the memory manager about for information.
@@ -418,9 +448,6 @@ public:
/// Gets the total memory usage, used by svcGetInfo
u64 GetTotalMemoryUsage() const;
- /// Gets the total heap usage, used by svcGetInfo
- u64 GetTotalHeapUsage() const;
-
/// Gets the address space base address
VAddr GetAddressSpaceBaseAddress() const;
@@ -469,6 +496,13 @@ public:
/// Gets the total size of the heap region in bytes.
u64 GetHeapRegionSize() const;
+ /// Gets the total size of the current heap in bytes.
+ ///
+ /// @note This is the current allocated heap size, not the size
+ /// of the region it's allowed to exist within.
+ ///
+ u64 GetCurrentHeapSize() const;
+
/// Determines whether or not the specified range is within the heap region.
bool IsWithinHeapRegion(VAddr address, u64 size) const;
@@ -625,9 +659,9 @@ private:
// This makes deallocation and reallocation of holes fast and keeps process memory contiguous
// in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
std::shared_ptr<std::vector<u8>> heap_memory;
- // The left/right bounds of the address space covered by heap_memory.
- VAddr heap_start = 0;
+
+ // The end of the currently allocated heap. This is not an inclusive
+ // end of the range. This is essentially 'base_address + current_size'.
VAddr heap_end = 0;
- u64 heap_used = 0;
};
} // namespace Kernel