summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp322
-rw-r--r--src/core/hle/kernel/address_arbiter.h94
-rw-r--r--src/core/hle/kernel/client_port.cpp7
-rw-r--r--src/core/hle/kernel/client_port.h2
-rw-r--r--src/core/hle/kernel/client_session.cpp24
-rw-r--r--src/core/hle/kernel/client_session.h19
-rw-r--r--src/core/hle/kernel/errors.h40
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp52
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h86
-rw-r--r--src/core/hle/kernel/handle_table.cpp22
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp75
-rw-r--r--src/core/hle/kernel/hle_ipc.h48
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp341
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h70
-rw-r--r--src/core/hle/kernel/k_affinity_mask.h58
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp345
-rw-r--r--src/core/hle/kernel/k_condition_variable.h59
-rw-r--r--src/core/hle/kernel/k_event.cpp32
-rw-r--r--src/core/hle/kernel/k_event.h57
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.h57
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp130
-rw-r--r--src/core/hle/kernel/k_light_lock.h41
-rw-r--r--src/core/hle/kernel/k_priority_queue.h451
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp56
-rw-r--r--src/core/hle/kernel/k_readable_event.h51
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp152
-rw-r--r--src/core/hle/kernel/k_resource_limit.h81
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp814
-rw-r--r--src/core/hle/kernel/k_scheduler.h207
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h75
-rw-r--r--src/core/hle/kernel/k_scoped_lock.h41
-rw-r--r--src/core/hle/kernel/k_scoped_resource_reservation.h67
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h46
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp171
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h59
-rw-r--r--src/core/hle/kernel/k_thread.cpp1048
-rw-r--r--src/core/hle/kernel/k_thread.h768
-rw-r--r--src/core/hle/kernel/k_thread_queue.h81
-rw-r--r--src/core/hle/kernel/k_writable_event.cpp27
-rw-r--r--src/core/hle/kernel/k_writable_event.h44
-rw-r--r--src/core/hle/kernel/kernel.cpp324
-rw-r--r--src/core/hle/kernel/kernel.h68
-rw-r--r--src/core/hle/kernel/memory/address_space_info.cpp2
-rw-r--r--src/core/hle/kernel/memory/memory_block.h20
-rw-r--r--src/core/hle/kernel/memory/memory_block_manager.h4
-rw-r--r--src/core/hle/kernel/memory/memory_layout.h19
-rw-r--r--src/core/hle/kernel/memory/memory_manager.cpp6
-rw-r--r--src/core/hle/kernel/memory/page_heap.h4
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp106
-rw-r--r--src/core/hle/kernel/mutex.cpp170
-rw-r--r--src/core/hle/kernel/mutex.h42
-rw-r--r--src/core/hle/kernel/object.cpp6
-rw-r--r--src/core/hle/kernel/object.h14
-rw-r--r--src/core/hle/kernel/physical_core.cpp52
-rw-r--r--src/core/hle/kernel/physical_core.h44
-rw-r--r--src/core/hle/kernel/process.cpp225
-rw-r--r--src/core/hle/kernel/process.h169
-rw-r--r--src/core/hle/kernel/process_capability.cpp40
-rw-r--r--src/core/hle/kernel/readable_event.cpp54
-rw-r--r--src/core/hle/kernel/readable_event.h57
-rw-r--r--src/core/hle/kernel/resource_limit.cpp73
-rw-r--r--src/core/hle/kernel/resource_limit.h104
-rw-r--r--src/core/hle/kernel/scheduler.cpp845
-rw-r--r--src/core/hle/kernel/scheduler.h318
-rw-r--r--src/core/hle/kernel/server_port.cpp20
-rw-r--r--src/core/hle/kernel/server_port.h9
-rw-r--r--src/core/hle/kernel/server_session.cpp71
-rw-r--r--src/core/hle/kernel/server_session.h45
-rw-r--r--src/core/hle/kernel/service_thread.cpp110
-rw-r--r--src/core/hle/kernel/service_thread.h28
-rw-r--r--src/core/hle/kernel/session.cpp22
-rw-r--r--src/core/hle/kernel/session.h8
-rw-r--r--src/core/hle/kernel/shared_memory.cpp11
-rw-r--r--src/core/hle/kernel/shared_memory.h2
-rw-r--r--src/core/hle/kernel/svc.cpp1357
-rw-r--r--src/core/hle/kernel/svc_common.h14
-rw-r--r--src/core/hle/kernel/svc_results.h41
-rw-r--r--src/core/hle/kernel/svc_types.h34
-rw-r--r--src/core/hle/kernel/svc_wrap.h103
-rw-r--r--src/core/hle/kernel/synchronization.cpp115
-rw-r--r--src/core/hle/kernel/synchronization.h44
-rw-r--r--src/core/hle/kernel/synchronization_object.cpp49
-rw-r--r--src/core/hle/kernel/synchronization_object.h76
-rw-r--r--src/core/hle/kernel/thread.cpp540
-rw-r--r--src/core/hle/kernel/thread.h681
-rw-r--r--src/core/hle/kernel/time_manager.cpp45
-rw-r--r--src/core/hle/kernel/time_manager.h12
-rw-r--r--src/core/hle/kernel/transfer_memory.cpp2
-rw-r--r--src/core/hle/kernel/transfer_memory.h2
-rw-r--r--src/core/hle/kernel/writable_event.cpp45
-rw-r--r--src/core/hle/kernel/writable_event.h59
91 files changed, 7422 insertions, 5109 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
deleted file mode 100644
index b882eaa0f..000000000
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include <vector>
-
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "core/arm/exclusive_monitor.h"
-#include "core/core.h"
-#include "core/hle/kernel/address_arbiter.h"
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/handle_table.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/kernel/time_manager.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-// Wake up num_to_wake (or all) threads in a vector.
-void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
- s32 num_to_wake) {
- // Only process up to 'target' threads, unless 'target' is <= 0, in which case process
- // them all.
- std::size_t last = waiting_threads.size();
- if (num_to_wake > 0) {
- last = std::min(last, static_cast<std::size_t>(num_to_wake));
- }
-
- // Signal the waiting threads.
- for (std::size_t i = 0; i < last; i++) {
- waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
- RemoveThread(waiting_threads[i]);
- waiting_threads[i]->WaitForArbitration(false);
- waiting_threads[i]->ResumeFromWait();
- }
-}
-
-AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
-AddressArbiter::~AddressArbiter() = default;
-
-ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
- s32 num_to_wake) {
- switch (type) {
- case SignalType::Signal:
- return SignalToAddressOnly(address, num_to_wake);
- case SignalType::IncrementAndSignalIfEqual:
- return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
- case SignalType::ModifyByWaitingCountAndSignalIfEqual:
- return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
- default:
- return ERR_INVALID_ENUM_VALUE;
- }
-}
-
-ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
- SchedulerLock lock(system.Kernel());
- const std::vector<std::shared_ptr<Thread>> waiting_threads =
- GetThreadsWaitingOnAddress(address);
- WakeThreads(waiting_threads, num_to_wake);
- return RESULT_SUCCESS;
-}
-
-ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
- s32 num_to_wake) {
- SchedulerLock lock(system.Kernel());
- auto& memory = system.Memory();
-
- // Ensure that we can write to the address.
- if (!memory.IsValidVirtualAddress(address)) {
- return ERR_INVALID_ADDRESS_STATE;
- }
-
- const std::size_t current_core = system.CurrentCoreIndex();
- auto& monitor = system.Monitor();
- u32 current_value;
- do {
- current_value = monitor.ExclusiveRead32(current_core, address);
-
- if (current_value != static_cast<u32>(value)) {
- return ERR_INVALID_STATE;
- }
- current_value++;
- } while (!monitor.ExclusiveWrite32(current_core, address, current_value));
-
- return SignalToAddressOnly(address, num_to_wake);
-}
-
-ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
- s32 num_to_wake) {
- SchedulerLock lock(system.Kernel());
- auto& memory = system.Memory();
-
- // Ensure that we can write to the address.
- if (!memory.IsValidVirtualAddress(address)) {
- return ERR_INVALID_ADDRESS_STATE;
- }
-
- // Get threads waiting on the address.
- const std::vector<std::shared_ptr<Thread>> waiting_threads =
- GetThreadsWaitingOnAddress(address);
-
- const std::size_t current_core = system.CurrentCoreIndex();
- auto& monitor = system.Monitor();
- s32 updated_value;
- do {
- updated_value = monitor.ExclusiveRead32(current_core, address);
-
- if (updated_value != value) {
- return ERR_INVALID_STATE;
- }
- // Determine the modified value depending on the waiting count.
- if (num_to_wake <= 0) {
- if (waiting_threads.empty()) {
- updated_value = value + 1;
- } else {
- updated_value = value - 1;
- }
- } else {
- if (waiting_threads.empty()) {
- updated_value = value + 1;
- } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
- updated_value = value - 1;
- } else {
- updated_value = value;
- }
- }
- } while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
-
- WakeThreads(waiting_threads, num_to_wake);
- return RESULT_SUCCESS;
-}
-
-ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
- s64 timeout_ns) {
- switch (type) {
- case ArbitrationType::WaitIfLessThan:
- return WaitForAddressIfLessThan(address, value, timeout_ns, false);
- case ArbitrationType::DecrementAndWaitIfLessThan:
- return WaitForAddressIfLessThan(address, value, timeout_ns, true);
- case ArbitrationType::WaitIfEqual:
- return WaitForAddressIfEqual(address, value, timeout_ns);
- default:
- return ERR_INVALID_ENUM_VALUE;
- }
-}
-
-ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
- bool should_decrement) {
- auto& memory = system.Memory();
- auto& kernel = system.Kernel();
- Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
-
- Handle event_handle = InvalidHandle;
- {
- SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
-
- if (current_thread->IsPendingTermination()) {
- lock.CancelSleep();
- return ERR_THREAD_TERMINATING;
- }
-
- // Ensure that we can read the address.
- if (!memory.IsValidVirtualAddress(address)) {
- lock.CancelSleep();
- return ERR_INVALID_ADDRESS_STATE;
- }
-
- s32 current_value = static_cast<s32>(memory.Read32(address));
- if (current_value >= value) {
- lock.CancelSleep();
- return ERR_INVALID_STATE;
- }
-
- current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
-
- s32 decrement_value;
-
- const std::size_t current_core = system.CurrentCoreIndex();
- auto& monitor = system.Monitor();
- do {
- current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
- if (should_decrement) {
- decrement_value = current_value - 1;
- } else {
- decrement_value = current_value;
- }
- } while (
- !monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
-
- // Short-circuit without rescheduling, if timeout is zero.
- if (timeout == 0) {
- lock.CancelSleep();
- return RESULT_TIMEOUT;
- }
-
- current_thread->SetArbiterWaitAddress(address);
- InsertThread(SharedFrom(current_thread));
- current_thread->SetStatus(ThreadStatus::WaitArb);
- current_thread->WaitForArbitration(true);
- }
-
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
-
- {
- SchedulerLock lock(kernel);
- if (current_thread->IsWaitingForArbitration()) {
- RemoveThread(SharedFrom(current_thread));
- current_thread->WaitForArbitration(false);
- }
- }
-
- return current_thread->GetSignalingResult();
-}
-
-ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
- auto& memory = system.Memory();
- auto& kernel = system.Kernel();
- Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
-
- Handle event_handle = InvalidHandle;
- {
- SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
-
- if (current_thread->IsPendingTermination()) {
- lock.CancelSleep();
- return ERR_THREAD_TERMINATING;
- }
-
- // Ensure that we can read the address.
- if (!memory.IsValidVirtualAddress(address)) {
- lock.CancelSleep();
- return ERR_INVALID_ADDRESS_STATE;
- }
-
- s32 current_value = static_cast<s32>(memory.Read32(address));
- if (current_value != value) {
- lock.CancelSleep();
- return ERR_INVALID_STATE;
- }
-
- // Short-circuit without rescheduling, if timeout is zero.
- if (timeout == 0) {
- lock.CancelSleep();
- return RESULT_TIMEOUT;
- }
-
- current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
- current_thread->SetArbiterWaitAddress(address);
- InsertThread(SharedFrom(current_thread));
- current_thread->SetStatus(ThreadStatus::WaitArb);
- current_thread->WaitForArbitration(true);
- }
-
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
-
- {
- SchedulerLock lock(kernel);
- if (current_thread->IsWaitingForArbitration()) {
- RemoveThread(SharedFrom(current_thread));
- current_thread->WaitForArbitration(false);
- }
- }
-
- return current_thread->GetSignalingResult();
-}
-
-void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) {
- ASSERT(thread->GetStatus() == ThreadStatus::WaitArb);
- RemoveThread(thread);
- thread->SetArbiterWaitAddress(0);
-}
-
-void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
- const VAddr arb_addr = thread->GetArbiterWaitAddress();
- std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
-
- const auto iter =
- std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
- return entry->GetPriority() >= thread->GetPriority();
- });
-
- if (iter == thread_list.cend()) {
- thread_list.push_back(std::move(thread));
- } else {
- thread_list.insert(iter, std::move(thread));
- }
-}
-
-void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
- const VAddr arb_addr = thread->GetArbiterWaitAddress();
- std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
-
- const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
- [&thread](const auto& entry) { return thread == entry; });
-
- if (iter != thread_list.cend()) {
- thread_list.erase(iter);
- }
-}
-
-std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
- VAddr address) const {
- const auto iter = arb_threads.find(address);
- if (iter == arb_threads.cend()) {
- return {};
- }
-
- const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
- return {thread_list.cbegin(), thread_list.cend()};
-}
-} // namespace Kernel
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h
deleted file mode 100644
index 0b05d533c..000000000
--- a/src/core/hle/kernel/address_arbiter.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <list>
-#include <memory>
-#include <unordered_map>
-#include <vector>
-
-#include "common/common_types.h"
-
-union ResultCode;
-
-namespace Core {
-class System;
-}
-
-namespace Kernel {
-
-class Thread;
-
-class AddressArbiter {
-public:
- enum class ArbitrationType {
- WaitIfLessThan = 0,
- DecrementAndWaitIfLessThan = 1,
- WaitIfEqual = 2,
- };
-
- enum class SignalType {
- Signal = 0,
- IncrementAndSignalIfEqual = 1,
- ModifyByWaitingCountAndSignalIfEqual = 2,
- };
-
- explicit AddressArbiter(Core::System& system);
- ~AddressArbiter();
-
- AddressArbiter(const AddressArbiter&) = delete;
- AddressArbiter& operator=(const AddressArbiter&) = delete;
-
- AddressArbiter(AddressArbiter&&) = default;
- AddressArbiter& operator=(AddressArbiter&&) = delete;
-
- /// Signals an address being waited on with a particular signaling type.
- ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
-
- /// Waits on an address with a particular arbitration type.
- ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
-
- /// Removes a thread from the container and resets its address arbiter adress to 0
- void HandleWakeupThread(std::shared_ptr<Thread> thread);
-
-private:
- /// Signals an address being waited on.
- ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
-
- /// Signals an address being waited on and increments its value if equal to the value argument.
- ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
-
- /// Signals an address being waited on and modifies its value based on waiting thread count if
- /// equal to the value argument.
- ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
- s32 num_to_wake);
-
- /// Waits on an address if the value passed is less than the argument value,
- /// optionally decrementing.
- ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
- bool should_decrement);
-
- /// Waits on an address if the value passed is equal to the argument value.
- ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
-
- /// Wake up num_to_wake (or all) threads in a vector.
- void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
-
- /// Insert a thread into the address arbiter container
- void InsertThread(std::shared_ptr<Thread> thread);
-
- /// Removes a thread from the address arbiter container
- void RemoveThread(std::shared_ptr<Thread> thread);
-
- // Gets the threads waiting on an address.
- std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
-
- /// List of threads waiting for a address arbiter
- std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
-
- Core::System& system;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp
index 8aff2227a..0b6957e31 100644
--- a/src/core/hle/kernel/client_port.cpp
+++ b/src/core/hle/kernel/client_port.cpp
@@ -4,11 +4,11 @@
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/session.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel {
@@ -21,7 +21,7 @@ std::shared_ptr<ServerPort> ClientPort::GetServerPort() const {
ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
if (active_sessions >= max_sessions) {
- return ERR_MAX_CONNECTIONS_REACHED;
+ return ResultMaxConnectionsReached;
}
active_sessions++;
@@ -33,9 +33,6 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
server_port->AppendPendingSession(std::move(server));
}
- // Wake the threads waiting on the ServerPort
- server_port->Signal();
-
return MakeResult(std::move(client));
}
diff --git a/src/core/hle/kernel/client_port.h b/src/core/hle/kernel/client_port.h
index 9762bbf0d..77559ebf9 100644
--- a/src/core/hle/kernel/client_port.h
+++ b/src/core/hle/kernel/client_port.h
@@ -51,6 +51,8 @@ public:
*/
void ConnectionClosed();
+ void Finalize() override {}
+
private:
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp
index 5ab204b9b..e230f365a 100644
--- a/src/core/hle/kernel/client_session.cpp
+++ b/src/core/hle/kernel/client_session.cpp
@@ -3,16 +3,16 @@
// Refer to the license.txt file included.
#include "core/hle/kernel/client_session.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/hle/result.h"
namespace Kernel {
-ClientSession::ClientSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
+ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
@@ -22,15 +22,6 @@ ClientSession::~ClientSession() {
}
}
-bool ClientSession::ShouldWait(const Thread* thread) const {
- UNIMPLEMENTED();
- return {};
-}
-
-void ClientSession::Acquire(Thread* thread) {
- UNIMPLEMENTED();
-}
-
bool ClientSession::IsSignaled() const {
UNIMPLEMENTED();
return true;
@@ -47,15 +38,16 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern
return MakeResult(std::move(client_session));
}
-ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread,
- Core::Memory::Memory& memory) {
+ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread,
+ Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing) {
// Keep ServerSession alive until we're done working with it.
if (!parent->Server()) {
- return ERR_SESSION_CLOSED_BY_REMOTE;
+ return ResultSessionClosedByRemote;
}
// Signal the server session that new data is available
- return parent->Server()->HandleSyncRequest(std::move(thread), memory);
+ return parent->Server()->HandleSyncRequest(std::move(thread), memory, core_timing);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h
index c5f760d7d..85aafeaf4 100644
--- a/src/core/hle/kernel/client_session.h
+++ b/src/core/hle/kernel/client_session.h
@@ -7,7 +7,7 @@
#include <memory>
#include <string>
-#include "core/hle/kernel/synchronization_object.h"
+#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
union ResultCode;
@@ -16,13 +16,17 @@ namespace Core::Memory {
class Memory;
}
+namespace Core::Timing {
+class CoreTiming;
+}
+
namespace Kernel {
class KernelCore;
class Session;
-class Thread;
+class KThread;
-class ClientSession final : public SynchronizationObject {
+class ClientSession final : public KSynchronizationObject {
public:
explicit ClientSession(KernelCore& kernel);
~ClientSession() override;
@@ -42,14 +46,13 @@ public:
return HANDLE_TYPE;
}
- ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
-
- bool ShouldWait(const Thread* thread) const override;
-
- void Acquire(Thread* thread) override;
+ ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing);
bool IsSignaled() const override;
+ void Finalize() override {}
+
private:
static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel,
std::shared_ptr<Session> parent,
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
deleted file mode 100644
index d4e5d88cf..000000000
--- a/src/core/hle/kernel/errors.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include "core/hle/result.h"
-
-namespace Kernel {
-
-// Confirmed Switch kernel error codes
-
-constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
-constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
-constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
-constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
-constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
-constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
-constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
-constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
-constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
-constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
-constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110};
-constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113};
-constexpr ResultCode ERR_INVALID_THREAD_PRIORITY{ErrorModule::Kernel, 112};
-constexpr ResultCode ERR_INVALID_HANDLE{ErrorModule::Kernel, 114};
-constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115};
-constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116};
-constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117};
-constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118};
-constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119};
-constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120};
-constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121};
-constexpr ResultCode ERR_BUSY{ErrorModule::Kernel, 122};
-constexpr ResultCode ERR_SESSION_CLOSED_BY_REMOTE{ErrorModule::Kernel, 123};
-constexpr ResultCode ERR_INVALID_STATE{ErrorModule::Kernel, 125};
-constexpr ResultCode ERR_RESERVED_VALUE{ErrorModule::Kernel, 126};
-constexpr ResultCode ERR_RESOURCE_LIMIT_EXCEEDED{ErrorModule::Kernel, 132};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
new file mode 100644
index 000000000..c6838649f
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -0,0 +1,52 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <mutex>
+
+#include "common/assert.h"
+#include "core/core.h"
+#include "core/hle/kernel/global_scheduler_context.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/kernel.h"
+
+namespace Kernel {
+
+GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
+ : kernel{kernel}, scheduler_lock{kernel} {}
+
+GlobalSchedulerContext::~GlobalSchedulerContext() = default;
+
+void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) {
+ std::scoped_lock lock{global_list_guard};
+ thread_list.push_back(std::move(thread));
+}
+
+void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) {
+ std::scoped_lock lock{global_list_guard};
+ thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
+ thread_list.end());
+}
+
+void GlobalSchedulerContext::PreemptThreads() {
+ // The priority levels at which the global scheduler preempts threads every 10 ms. They are
+ // ordered from Core 0 to Core 3.
+ static constexpr std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities{
+ 59,
+ 59,
+ 59,
+ 63,
+ };
+
+ ASSERT(IsLocked());
+ for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
+ const u32 priority = preemption_priorities[core_id];
+ kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
+ }
+}
+
+bool GlobalSchedulerContext::IsLocked() const {
+ return scheduler_lock.IsLockedByCurrentThread();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
new file mode 100644
index 000000000..11592843e
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -0,0 +1,86 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <atomic>
+#include <vector>
+
+#include "common/common_types.h"
+#include "common/spin_lock.h"
+#include "core/hardware_properties.h"
+#include "core/hle/kernel/k_priority_queue.h"
+#include "core/hle/kernel/k_scheduler_lock.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/svc_types.h"
+
+namespace Kernel {
+
+class KernelCore;
+class SchedulerLock;
+
+using KSchedulerPriorityQueue =
+ KPriorityQueue<KThread, Core::Hardware::NUM_CPU_CORES, Svc::LowestThreadPriority,
+ Svc::HighestThreadPriority>;
+
+static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
+static_assert(Svc::LowestThreadPriority >= HighestCoreMigrationAllowedPriority);
+static_assert(Svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
+
+class GlobalSchedulerContext final {
+ friend class KScheduler;
+
+public:
+ using LockType = KAbstractSchedulerLock<KScheduler>;
+
+ explicit GlobalSchedulerContext(KernelCore& kernel);
+ ~GlobalSchedulerContext();
+
+ /// Adds a new thread to the scheduler
+ void AddThread(std::shared_ptr<KThread> thread);
+
+ /// Removes a thread from the scheduler
+ void RemoveThread(std::shared_ptr<KThread> thread);
+
+ /// Returns a list of all threads managed by the scheduler
+ [[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const {
+ return thread_list;
+ }
+
+ /**
+ * Rotates the scheduling queues of threads at a preemption priority and then does
+ * some core rebalancing. Preemption priorities can be found in the array
+ * 'preemption_priorities'.
+ *
+ * @note This operation happens every 10ms.
+ */
+ void PreemptThreads();
+
+ /// Returns true if the global scheduler lock is acquired
+ bool IsLocked() const;
+
+ [[nodiscard]] LockType& SchedulerLock() {
+ return scheduler_lock;
+ }
+
+ [[nodiscard]] const LockType& SchedulerLock() const {
+ return scheduler_lock;
+ }
+
+private:
+ friend class KScopedSchedulerLock;
+ friend class KScopedSchedulerLockAndSleep;
+
+ KernelCore& kernel;
+
+ std::atomic_bool scheduler_update_needed{};
+ KSchedulerPriorityQueue priority_queue;
+ LockType scheduler_lock;
+
+ /// Lists all thread ids that aren't deleted/etc.
+ std::vector<std::shared_ptr<KThread>> thread_list;
+ Common::SpinLock global_list_guard{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index fb30b6f8b..f96d34078 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -6,12 +6,12 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel {
namespace {
@@ -33,7 +33,7 @@ HandleTable::~HandleTable() = default;
ResultCode HandleTable::SetSize(s32 handle_table_size) {
if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT);
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
// Values less than or equal to zero indicate to use the maximum allowable
@@ -53,7 +53,7 @@ ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
const u16 slot = next_free_slot;
if (slot >= table_size) {
LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
- return ERR_HANDLE_TABLE_FULL;
+ return ResultHandleTableFull;
}
next_free_slot = generations[slot];
@@ -76,7 +76,7 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
std::shared_ptr<Object> object = GetGeneric(handle);
if (object == nullptr) {
LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
return Create(std::move(object));
}
@@ -84,11 +84,15 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
ResultCode HandleTable::Close(Handle handle) {
if (!IsValid(handle)) {
LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
const u16 slot = GetSlot(handle);
+ if (objects[slot].use_count() == 1) {
+ objects[slot]->Finalize();
+ }
+
objects[slot] = nullptr;
generations[slot] = next_free_slot;
@@ -105,7 +109,7 @@ bool HandleTable::IsValid(Handle handle) const {
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
if (handle == CurrentThread) {
- return SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
+ return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
} else if (handle == CurrentProcess) {
return SharedFrom(kernel.CurrentProcess());
}
@@ -118,7 +122,7 @@ std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
void HandleTable::Clear() {
for (u16 i = 0; i < table_size; ++i) {
- generations[i] = i + 1;
+ generations[i] = static_cast<u16>(i + 1);
objects[i] = nullptr;
}
next_free_slot = 0;
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 81f85643b..161d9f782 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -14,18 +14,19 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/hle/ipc_helpers.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/readable_event.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/server_session.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
-#include "core/hle/kernel/writable_event.h"
#include "core/memory.h"
namespace Kernel {
@@ -45,47 +46,9 @@ void SessionRequestHandler::ClientDisconnected(
boost::range::remove_erase(connected_sessions, server_session);
}
-std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
- const std::string& reason, u64 timeout, WakeupCallback&& callback,
- std::shared_ptr<WritableEvent> writable_event) {
- // Put the client thread to sleep until the wait event is signaled or the timeout expires.
-
- if (!writable_event) {
- // Create event if not provided
- const auto pair = WritableEvent::CreateEventPair(kernel, "HLE Pause Event: " + reason);
- writable_event = pair.writable;
- }
-
- {
- Handle event_handle = InvalidHandle;
- SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
- thread->SetHLECallback(
- [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
- ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
- ? ThreadWakeupReason::Timeout
- : ThreadWakeupReason::Signal;
- callback(thread, context, reason);
- context.WriteToOutgoingCommandBuffer(*thread);
- return true;
- });
- const auto readable_event{writable_event->GetReadableEvent()};
- writable_event->Clear();
- thread->SetHLESyncObject(readable_event.get());
- thread->SetStatus(ThreadStatus::WaitHLEEvent);
- thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
- readable_event->AddWaitingThread(thread);
- lock.Release();
- thread->SetHLETimeEvent(event_handle);
- }
-
- is_thread_waiting = true;
-
- return writable_event;
-}
-
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> server_session,
- std::shared_ptr<Thread> thread)
+ std::shared_ptr<KThread> thread)
: server_session(std::move(server_session)),
thread(std::move(thread)), kernel{kernel}, memory{memory} {
cmd_buf[0] = 0;
@@ -219,7 +182,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTabl
return RESULT_SUCCESS;
}
-ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
+ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) {
auto& owner_process = *thread.GetOwnerProcess();
auto& handle_table = owner_process.GetHandleTable();
@@ -375,6 +338,28 @@ std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) cons
return 0;
}
+bool HLERequestContext::CanReadBuffer(std::size_t buffer_index) const {
+ const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
+ BufferDescriptorA()[buffer_index].Size()};
+
+ if (is_buffer_a) {
+ return BufferDescriptorA().size() > buffer_index;
+ } else {
+ return BufferDescriptorX().size() > buffer_index;
+ }
+}
+
+bool HLERequestContext::CanWriteBuffer(std::size_t buffer_index) const {
+ const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
+ BufferDescriptorB()[buffer_index].Size()};
+
+ if (is_buffer_b) {
+ return BufferDescriptorB().size() > buffer_index;
+ } else {
+ return BufferDescriptorC().size() > buffer_index;
+ }
+}
+
std::string HLERequestContext::Description() const {
if (!command_header) {
return "No command header available";
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index f3277b766..9a769781b 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -24,6 +24,10 @@ namespace Core::Memory {
class Memory;
}
+namespace IPC {
+class ResponseBuilder;
+}
+
namespace Service {
class ServiceFrameworkBase;
}
@@ -36,9 +40,9 @@ class HLERequestContext;
class KernelCore;
class Process;
class ServerSession;
-class Thread;
-class ReadableEvent;
-class WritableEvent;
+class KThread;
+class KReadableEvent;
+class KWritableEvent;
enum class ThreadWakeupReason;
@@ -106,7 +110,7 @@ class HLERequestContext {
public:
explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> session,
- std::shared_ptr<Thread> thread);
+ std::shared_ptr<KThread> thread);
~HLERequestContext();
/// Returns a pointer to the IPC command buffer for this request.
@@ -122,32 +126,12 @@ public:
return server_session;
}
- using WakeupCallback = std::function<void(
- std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
-
- /**
- * Puts the specified guest thread to sleep until the returned event is signaled or until the
- * specified timeout expires.
- * @param reason Reason for pausing the thread, to be used for debugging purposes.
- * @param timeout Timeout in nanoseconds after which the thread will be awoken and the callback
- * invoked with a Timeout reason.
- * @param callback Callback to be invoked when the thread is resumed. This callback must write
- * the entire command response once again, regardless of the state of it before this function
- * was called.
- * @param writable_event Event to use to wake up the thread. If unspecified, an event will be
- * created.
- * @returns Event that when signaled will resume the thread and call the callback function.
- */
- std::shared_ptr<WritableEvent> SleepClientThread(
- const std::string& reason, u64 timeout, WakeupCallback&& callback,
- std::shared_ptr<WritableEvent> writable_event = nullptr);
-
/// Populates this context with data from the requesting process/thread.
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
u32_le* src_cmdbuf);
/// Writes data from this context back to the requesting process/thread.
- ResultCode WriteToOutgoingCommandBuffer(Thread& thread);
+ ResultCode WriteToOutgoingCommandBuffer(KThread& thread);
u32_le GetCommand() const {
return command;
@@ -220,6 +204,12 @@ public:
/// Helper function to get the size of the output buffer
std::size_t GetWriteBufferSize(std::size_t buffer_index = 0) const;
+ /// Helper function to test whether the input buffer at buffer_index can be read
+ bool CanReadBuffer(std::size_t buffer_index = 0) const;
+
+ /// Helper function to test whether the output buffer at buffer_index can be written
+ bool CanWriteBuffer(std::size_t buffer_index = 0) const;
+
template <typename T>
std::shared_ptr<T> GetCopyObject(std::size_t index) {
return DynamicObjectCast<T>(copy_objects.at(index));
@@ -274,11 +264,11 @@ public:
std::string Description() const;
- Thread& GetThread() {
+ KThread& GetThread() {
return *thread;
}
- const Thread& GetThread() const {
+ const KThread& GetThread() const {
return *thread;
}
@@ -287,11 +277,13 @@ public:
}
private:
+ friend class IPC::ResponseBuilder;
+
void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
std::shared_ptr<Kernel::ServerSession> server_session;
- std::shared_ptr<Thread> thread;
+ std::shared_ptr<KThread> thread;
// TODO(yuriks): Check common usage of this and optimize size accordingly
boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects;
boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects;
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
new file mode 100644
index 000000000..7018f56da
--- /dev/null
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -0,0 +1,341 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/arm/exclusive_monitor.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_address_arbiter.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/time_manager.h"
+#include "core/memory.h"
+
+namespace Kernel {
+
+KAddressArbiter::KAddressArbiter(Core::System& system_)
+ : system{system_}, kernel{system.Kernel()} {}
+KAddressArbiter::~KAddressArbiter() = default;
+
+namespace {
+
+bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
+ *out = system.Memory().Read32(address);
+ return true;
+}
+
+bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
+ auto& monitor = system.Monitor();
+ const auto current_core = system.CurrentCoreIndex();
+
+ // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // TODO(bunnei): We should call CanAccessAtomic(..) here.
+
+ // Load the value from the address.
+ const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
+
+ // Compare it to the desired one.
+ if (current_value < value) {
+ // If less than, we want to try to decrement.
+ const s32 decrement_value = current_value - 1;
+
+ // Decrement and try to store.
+ if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
+ // If we failed to store, try again.
+ DecrementIfLessThan(system, out, address, value);
+ }
+ } else {
+ // Otherwise, clear our exclusive hold and finish
+ monitor.ClearExclusive();
+ }
+
+ // We're done.
+ *out = current_value;
+ return true;
+}
+
+bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
+ auto& monitor = system.Monitor();
+ const auto current_core = system.CurrentCoreIndex();
+
+ // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // TODO(bunnei): We should call CanAccessAtomic(..) here.
+
+ // Load the value from the address.
+ const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
+
+ // Compare it to the desired one.
+ if (current_value == value) {
+ // If equal, we want to try to write the new value.
+
+ // Try to store.
+ if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
+ // If we failed to store, try again.
+ UpdateIfEqual(system, out, address, value, new_value);
+ }
+ } else {
+ // Otherwise, clear our exclusive hold and finish.
+ monitor.ClearExclusive();
+ }
+
+ // We're done.
+ *out = current_value;
+ return true;
+}
+
+} // namespace
+
+ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
+ // Perform signaling.
+ s32 num_waiters{};
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ auto it = thread_tree.nfind_light({addr, -1});
+ while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ (it->GetAddressArbiterKey() == addr)) {
+ KThread* target_thread = std::addressof(*it);
+ target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+
+ ASSERT(target_thread->IsWaitingForAddressArbiter());
+ target_thread->Wakeup();
+
+ it = thread_tree.erase(it);
+ target_thread->ClearAddressArbiter();
+ ++num_waiters;
+ }
+ }
+ return RESULT_SUCCESS;
+}
+
+ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
+ // Perform signaling.
+ s32 num_waiters{};
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ // Check the userspace value.
+ s32 user_value{};
+ if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) {
+ LOG_ERROR(Kernel, "Invalid current memory!");
+ return ResultInvalidCurrentMemory;
+ }
+ if (user_value != value) {
+ return ResultInvalidState;
+ }
+
+ auto it = thread_tree.nfind_light({addr, -1});
+ while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ (it->GetAddressArbiterKey() == addr)) {
+ KThread* target_thread = std::addressof(*it);
+ target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+
+ ASSERT(target_thread->IsWaitingForAddressArbiter());
+ target_thread->Wakeup();
+
+ it = thread_tree.erase(it);
+ target_thread->ClearAddressArbiter();
+ ++num_waiters;
+ }
+ }
+ return RESULT_SUCCESS;
+}
+
+ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
+ // Perform signaling.
+ s32 num_waiters{};
+ {
+ [[maybe_unused]] const KScopedSchedulerLock sl(kernel);
+
+ auto it = thread_tree.nfind_light({addr, -1});
+ // Determine the updated value.
+ s32 new_value{};
+ if (count <= 0) {
+ if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
+ new_value = value - 2;
+ } else {
+ new_value = value + 1;
+ }
+ } else {
+ if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
+ auto tmp_it = it;
+ s32 tmp_num_waiters{};
+ while (++tmp_it != thread_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
+ if (tmp_num_waiters++ >= count) {
+ break;
+ }
+ }
+
+ if (tmp_num_waiters < count) {
+ new_value = value - 1;
+ } else {
+ new_value = value;
+ }
+ } else {
+ new_value = value + 1;
+ }
+ }
+
+ // Check the userspace value.
+ s32 user_value{};
+ bool succeeded{};
+ if (value != new_value) {
+ succeeded = UpdateIfEqual(system, &user_value, addr, value, new_value);
+ } else {
+ succeeded = ReadFromUser(system, &user_value, addr);
+ }
+
+ if (!succeeded) {
+ LOG_ERROR(Kernel, "Invalid current memory!");
+ return ResultInvalidCurrentMemory;
+ }
+ if (user_value != value) {
+ return ResultInvalidState;
+ }
+
+ while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ (it->GetAddressArbiterKey() == addr)) {
+ KThread* target_thread = std::addressof(*it);
+ target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+
+ ASSERT(target_thread->IsWaitingForAddressArbiter());
+ target_thread->Wakeup();
+
+ it = thread_tree.erase(it);
+ target_thread->ClearAddressArbiter();
+ ++num_waiters;
+ }
+ }
+ return RESULT_SUCCESS;
+}
+
+ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
+ // Prepare to wait.
+ KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ {
+ KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
+
+ // Check that the thread isn't terminating.
+ if (cur_thread->IsTerminationRequested()) {
+ slp.CancelSleep();
+ return ResultTerminationRequested;
+ }
+
+ // Set the synced object.
+ cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
+
+ // Read the value from userspace.
+ s32 user_value{};
+ bool succeeded{};
+ if (decrement) {
+ succeeded = DecrementIfLessThan(system, &user_value, addr, value);
+ } else {
+ succeeded = ReadFromUser(system, &user_value, addr);
+ }
+
+ if (!succeeded) {
+ slp.CancelSleep();
+ return ResultInvalidCurrentMemory;
+ }
+
+ // Check that the value is less than the specified one.
+ if (user_value >= value) {
+ slp.CancelSleep();
+ return ResultInvalidState;
+ }
+
+ // Check that the timeout is non-zero.
+ if (timeout == 0) {
+ slp.CancelSleep();
+ return ResultTimedOut;
+ }
+
+ // Set the arbiter.
+ cur_thread->SetAddressArbiter(&thread_tree, addr);
+ thread_tree.insert(*cur_thread);
+ cur_thread->SetState(ThreadState::Waiting);
+ cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
+ }
+
+ // Cancel the timer wait.
+ kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
+
+ // Remove from the address arbiter.
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ if (cur_thread->IsWaitingForAddressArbiter()) {
+ thread_tree.erase(thread_tree.iterator_to(*cur_thread));
+ cur_thread->ClearAddressArbiter();
+ }
+ }
+
+ // Get the result.
+ KSynchronizationObject* dummy{};
+ return cur_thread->GetWaitResult(&dummy);
+}
+
+ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
+ // Prepare to wait.
+ KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ {
+ KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
+
+ // Check that the thread isn't terminating.
+ if (cur_thread->IsTerminationRequested()) {
+ slp.CancelSleep();
+ return ResultTerminationRequested;
+ }
+
+ // Set the synced object.
+ cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
+
+ // Read the value from userspace.
+ s32 user_value{};
+ if (!ReadFromUser(system, &user_value, addr)) {
+ slp.CancelSleep();
+ return ResultInvalidCurrentMemory;
+ }
+
+ // Check that the value is equal.
+ if (value != user_value) {
+ slp.CancelSleep();
+ return ResultInvalidState;
+ }
+
+ // Check that the timeout is non-zero.
+ if (timeout == 0) {
+ slp.CancelSleep();
+ return ResultTimedOut;
+ }
+
+ // Set the arbiter.
+ cur_thread->SetAddressArbiter(&thread_tree, addr);
+ thread_tree.insert(*cur_thread);
+ cur_thread->SetState(ThreadState::Waiting);
+ cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
+ }
+
+ // Cancel the timer wait.
+ kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
+
+ // Remove from the address arbiter.
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ if (cur_thread->IsWaitingForAddressArbiter()) {
+ thread_tree.erase(thread_tree.iterator_to(*cur_thread));
+ cur_thread->ClearAddressArbiter();
+ }
+ }
+
+ // Get the result.
+ KSynchronizationObject* dummy{};
+ return cur_thread->GetWaitResult(&dummy);
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
new file mode 100644
index 000000000..8d379b524
--- /dev/null
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -0,0 +1,70 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "core/hle/kernel/k_condition_variable.h"
+#include "core/hle/kernel/svc_types.h"
+
+union ResultCode;
+
+namespace Core {
+class System;
+}
+
+namespace Kernel {
+
+class KernelCore;
+
+class KAddressArbiter {
+public:
+ using ThreadTree = KConditionVariable::ThreadTree;
+
+ explicit KAddressArbiter(Core::System& system_);
+ ~KAddressArbiter();
+
+ [[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
+ s32 count) {
+ switch (type) {
+ case Svc::SignalType::Signal:
+ return Signal(addr, count);
+ case Svc::SignalType::SignalAndIncrementIfEqual:
+ return SignalAndIncrementIfEqual(addr, value, count);
+ case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
+ return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
+ }
+ UNREACHABLE();
+ return RESULT_UNKNOWN;
+ }
+
+ [[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
+ s64 timeout) {
+ switch (type) {
+ case Svc::ArbitrationType::WaitIfLessThan:
+ return WaitIfLessThan(addr, value, false, timeout);
+ case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
+ return WaitIfLessThan(addr, value, true, timeout);
+ case Svc::ArbitrationType::WaitIfEqual:
+ return WaitIfEqual(addr, value, timeout);
+ }
+ UNREACHABLE();
+ return RESULT_UNKNOWN;
+ }
+
+private:
+ [[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
+ [[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
+ [[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
+ [[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
+ [[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
+
+ ThreadTree thread_tree;
+
+ Core::System& system;
+ KernelCore& kernel;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h
new file mode 100644
index 000000000..b906895fc
--- /dev/null
+++ b/src/core/hle/kernel/k_affinity_mask.h
@@ -0,0 +1,58 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "core/hardware_properties.h"
+
+namespace Kernel {
+
+class KAffinityMask {
+public:
+ constexpr KAffinityMask() = default;
+
+ [[nodiscard]] constexpr u64 GetAffinityMask() const {
+ return this->mask;
+ }
+
+ constexpr void SetAffinityMask(u64 new_mask) {
+ ASSERT((new_mask & ~AllowedAffinityMask) == 0);
+ this->mask = new_mask;
+ }
+
+ [[nodiscard]] constexpr bool GetAffinity(s32 core) const {
+ return (this->mask & GetCoreBit(core)) != 0;
+ }
+
+ constexpr void SetAffinity(s32 core, bool set) {
+ ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+
+ if (set) {
+ this->mask |= GetCoreBit(core);
+ } else {
+ this->mask &= ~GetCoreBit(core);
+ }
+ }
+
+ constexpr void SetAll() {
+ this->mask = AllowedAffinityMask;
+ }
+
+private:
+ [[nodiscard]] static constexpr u64 GetCoreBit(s32 core) {
+ ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ return (1ULL << core);
+ }
+
+ static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1;
+
+ u64 mask{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
new file mode 100644
index 000000000..170d8fa0d
--- /dev/null
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -0,0 +1,345 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <vector>
+
+#include "core/arm/exclusive_monitor.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_condition_variable.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/svc_common.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/memory.h"
+
+namespace Kernel {
+
+namespace {
+
+bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
+ *out = system.Memory().Read32(address);
+ return true;
+}
+
+bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
+ system.Memory().Write32(address, *p);
+ return true;
+}
+
+bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
+ u32 new_orr_mask) {
+ auto& monitor = system.Monitor();
+ const auto current_core = system.CurrentCoreIndex();
+
+ // Load the value from the address.
+ const auto expected = monitor.ExclusiveRead32(current_core, address);
+
+ // Orr in the new mask.
+ u32 value = expected | new_orr_mask;
+
+ // If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
+ if (!expected) {
+ value = if_zero;
+ }
+
+ // Try to store.
+ if (!monitor.ExclusiveWrite32(current_core, address, value)) {
+ // If we failed to store, try again.
+ return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
+ }
+
+ // We're done.
+ *out = expected;
+ return true;
+}
+
+} // namespace
+
+KConditionVariable::KConditionVariable(Core::System& system_)
+ : system{system_}, kernel{system.Kernel()} {}
+
+KConditionVariable::~KConditionVariable() = default;
+
+ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
+ KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ // Signal the address.
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ // Remove waiter thread.
+ s32 num_waiters{};
+ KThread* next_owner_thread =
+ owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
+
+ // Determine the next tag.
+ u32 next_value{};
+ if (next_owner_thread) {
+ next_value = next_owner_thread->GetAddressKeyValue();
+ if (num_waiters > 1) {
+ next_value |= Svc::HandleWaitMask;
+ }
+
+ next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+ next_owner_thread->Wakeup();
+ }
+
+ // Write the value to userspace.
+ if (!WriteToUser(system, addr, std::addressof(next_value))) {
+ if (next_owner_thread) {
+ next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
+ }
+
+ return ResultInvalidCurrentMemory;
+ }
+ }
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
+ KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ // Wait for the address.
+ {
+ std::shared_ptr<KThread> owner_thread;
+ ASSERT(!owner_thread);
+ {
+ KScopedSchedulerLock sl(kernel);
+ cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+
+ // Check if the thread should terminate.
+ R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
+
+ {
+ // Read the tag from userspace.
+ u32 test_tag{};
+ R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
+ ResultInvalidCurrentMemory);
+
+ // If the tag isn't the handle (with wait mask), we're done.
+ R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
+
+ // Get the lock owner thread.
+ owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle);
+ R_UNLESS(owner_thread, ResultInvalidHandle);
+
+ // Update the lock.
+ cur_thread->SetAddressKey(addr, value);
+ owner_thread->AddWaiter(cur_thread);
+ cur_thread->SetState(ThreadState::Waiting);
+ cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
+ cur_thread->SetMutexWaitAddressForDebugging(addr);
+ }
+ }
+ ASSERT(owner_thread);
+ }
+
+ // Remove the thread as a waiter from the lock owner.
+ {
+ KScopedSchedulerLock sl(kernel);
+ KThread* owner_thread = cur_thread->GetLockOwner();
+ if (owner_thread != nullptr) {
+ owner_thread->RemoveWaiter(cur_thread);
+ }
+ }
+
+ // Get the wait result.
+ KSynchronizationObject* dummy{};
+ return cur_thread->GetWaitResult(std::addressof(dummy));
+}
+
+KThread* KConditionVariable::SignalImpl(KThread* thread) {
+ // Check pre-conditions.
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Update the tag.
+ VAddr address = thread->GetAddressKey();
+ u32 own_tag = thread->GetAddressKeyValue();
+
+ u32 prev_tag{};
+ bool can_access{};
+ {
+ // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // TODO(bunnei): We should call CanAccessAtomic(..) here.
+ can_access = true;
+ if (can_access) {
+ UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
+ Svc::HandleWaitMask);
+ }
+ }
+
+ KThread* thread_to_close = nullptr;
+ if (can_access) {
+ if (prev_tag == InvalidHandle) {
+ // If nobody held the lock previously, we're all good.
+ thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+ thread->Wakeup();
+ } else {
+ // Get the previous owner.
+ auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(
+ prev_tag & ~Svc::HandleWaitMask);
+
+ if (owner_thread) {
+ // Add the thread as a waiter on the owner.
+ owner_thread->AddWaiter(thread);
+ thread_to_close = owner_thread.get();
+ } else {
+ // The lock was tagged with a thread that doesn't exist.
+ thread->SetSyncedObject(nullptr, ResultInvalidState);
+ thread->Wakeup();
+ }
+ }
+ } else {
+ // If the address wasn't accessible, note so.
+ thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
+ thread->Wakeup();
+ }
+
+ return thread_to_close;
+}
+
+void KConditionVariable::Signal(u64 cv_key, s32 count) {
+ // Prepare for signaling.
+ constexpr int MaxThreads = 16;
+
+ // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
+ // std::shared_ptr.
+ std::vector<std::shared_ptr<KThread>> thread_list;
+ std::array<KThread*, MaxThreads> thread_array;
+ s32 num_to_close{};
+
+ // Perform signaling.
+ s32 num_waiters{};
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ auto it = thread_tree.nfind_light({cv_key, -1});
+ while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ (it->GetConditionVariableKey() == cv_key)) {
+ KThread* target_thread = std::addressof(*it);
+
+ if (KThread* thread = SignalImpl(target_thread); thread != nullptr) {
+ if (num_to_close < MaxThreads) {
+ thread_array[num_to_close++] = thread;
+ } else {
+ thread_list.push_back(SharedFrom(thread));
+ }
+ }
+
+ it = thread_tree.erase(it);
+ target_thread->ClearConditionVariable();
+ ++num_waiters;
+ }
+
+ // If we have no waiters, clear the has waiter flag.
+ if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
+ const u32 has_waiter_flag{};
+ WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
+ }
+ }
+
+ // Close threads in the array.
+ for (auto i = 0; i < num_to_close; ++i) {
+ thread_array[i]->Close();
+ }
+
+ // Close threads in the list.
+ for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
+ (*it)->Close();
+ }
+}
+
+ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
+ // Prepare to wait.
+ KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ {
+ KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
+
+ // Set the synced object.
+ cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
+
+ // Check that the thread isn't terminating.
+ if (cur_thread->IsTerminationRequested()) {
+ slp.CancelSleep();
+ return ResultTerminationRequested;
+ }
+
+ // Update the value and process for the next owner.
+ {
+ // Remove waiter thread.
+ s32 num_waiters{};
+ KThread* next_owner_thread =
+ cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
+
+ // Update for the next owner thread.
+ u32 next_value{};
+ if (next_owner_thread != nullptr) {
+ // Get the next tag value.
+ next_value = next_owner_thread->GetAddressKeyValue();
+ if (num_waiters > 1) {
+ next_value |= Svc::HandleWaitMask;
+ }
+
+ // Wake up the next owner.
+ next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+ next_owner_thread->Wakeup();
+ }
+
+ // Write to the cv key.
+ {
+ const u32 has_waiter_flag = 1;
+ WriteToUser(system, key, std::addressof(has_waiter_flag));
+ // TODO(bunnei): We should call DataMemoryBarrier(..) here.
+ }
+
+ // Write the value to userspace.
+ if (!WriteToUser(system, addr, std::addressof(next_value))) {
+ slp.CancelSleep();
+ return ResultInvalidCurrentMemory;
+ }
+ }
+
+ // Update condition variable tracking.
+ {
+ cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
+ thread_tree.insert(*cur_thread);
+ }
+
+ // If the timeout is non-zero, set the thread as waiting.
+ if (timeout != 0) {
+ cur_thread->SetState(ThreadState::Waiting);
+ cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
+ cur_thread->SetMutexWaitAddressForDebugging(addr);
+ }
+ }
+
+ // Cancel the timer wait.
+ kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
+
+ // Remove from the condition variable.
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
+ owner->RemoveWaiter(cur_thread);
+ }
+
+ if (cur_thread->IsWaitingForConditionVariable()) {
+ thread_tree.erase(thread_tree.iterator_to(*cur_thread));
+ cur_thread->ClearConditionVariable();
+ }
+ }
+
+ // Get the result.
+ KSynchronizationObject* dummy{};
+ return cur_thread->GetWaitResult(std::addressof(dummy));
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
new file mode 100644
index 000000000..861dbd420
--- /dev/null
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -0,0 +1,59 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/assert.h"
+#include "common/common_types.h"
+
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/result.h"
+
+namespace Core {
+class System;
+}
+
+namespace Kernel {
+
+class KConditionVariable {
+public:
+ using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
+
+ explicit KConditionVariable(Core::System& system_);
+ ~KConditionVariable();
+
+ // Arbitration
+ [[nodiscard]] ResultCode SignalToAddress(VAddr addr);
+ [[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
+
+ // Condition variable
+ void Signal(u64 cv_key, s32 count);
+ [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
+
+private:
+ [[nodiscard]] KThread* SignalImpl(KThread* thread);
+
+ ThreadTree thread_tree;
+
+ Core::System& system;
+ KernelCore& kernel;
+};
+
+inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
+ KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ tree->erase(tree->iterator_to(*thread));
+}
+
+inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
+ KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ tree->insert(*thread);
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp
new file mode 100644
index 000000000..bb2fa4ad5
--- /dev/null
+++ b/src/core/hle/kernel/k_event.cpp
@@ -0,0 +1,32 @@
+// Copyright 2021 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/k_writable_event.h"
+
+namespace Kernel {
+
+KEvent::KEvent(KernelCore& kernel, std::string&& name) : Object{kernel, std::move(name)} {}
+
+KEvent::~KEvent() = default;
+
+std::shared_ptr<KEvent> KEvent::Create(KernelCore& kernel, std::string&& name) {
+ return std::make_shared<KEvent>(kernel, std::move(name));
+}
+
+void KEvent::Initialize() {
+ // Create our sub events.
+ readable_event = std::make_shared<KReadableEvent>(kernel, GetName() + ":Readable");
+ writable_event = std::make_shared<KWritableEvent>(kernel, GetName() + ":Writable");
+
+ // Initialize our sub sessions.
+ readable_event->Initialize(this);
+ writable_event->Initialize(this);
+
+ // Mark initialized.
+ initialized = true;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h
new file mode 100644
index 000000000..2fb887129
--- /dev/null
+++ b/src/core/hle/kernel/k_event.h
@@ -0,0 +1,57 @@
+// Copyright 2021 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/object.h"
+
+namespace Kernel {
+
+class KernelCore;
+class KReadableEvent;
+class KWritableEvent;
+
+class KEvent final : public Object {
+public:
+ explicit KEvent(KernelCore& kernel, std::string&& name);
+ ~KEvent() override;
+
+ static std::shared_ptr<KEvent> Create(KernelCore& kernel, std::string&& name);
+
+ void Initialize();
+
+ void Finalize() override {}
+
+ std::string GetTypeName() const override {
+ return "KEvent";
+ }
+
+ static constexpr HandleType HANDLE_TYPE = HandleType::Event;
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ std::shared_ptr<KReadableEvent>& GetReadableEvent() {
+ return readable_event;
+ }
+
+ std::shared_ptr<KWritableEvent>& GetWritableEvent() {
+ return writable_event;
+ }
+
+ const std::shared_ptr<KReadableEvent>& GetReadableEvent() const {
+ return readable_event;
+ }
+
+ const std::shared_ptr<KWritableEvent>& GetWritableEvent() const {
+ return writable_event;
+ }
+
+private:
+ std::shared_ptr<KReadableEvent> readable_event;
+ std::shared_ptr<KWritableEvent> writable_event;
+ bool initialized{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h
new file mode 100644
index 000000000..362d0db28
--- /dev/null
+++ b/src/core/hle/kernel/k_light_condition_variable.h
@@ -0,0 +1,57 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_thread_queue.h"
+#include "core/hle/kernel/time_manager.h"
+
+namespace Kernel {
+class KernelCore;
+
+class KLightConditionVariable {
+public:
+ explicit KLightConditionVariable(KernelCore& kernel) : thread_queue(kernel), kernel(kernel) {}
+
+ void Wait(KLightLock* lock, s64 timeout = -1) {
+ WaitImpl(lock, timeout);
+ lock->Lock();
+ }
+
+ void Broadcast() {
+ KScopedSchedulerLock lk{kernel};
+ while (thread_queue.WakeupFrontThread() != nullptr) {
+ // We want to signal all threads, and so should continue waking up until there's nothing
+ // to wake.
+ }
+ }
+
+private:
+ void WaitImpl(KLightLock* lock, s64 timeout) {
+ KThread* owner = GetCurrentThreadPointer(kernel);
+
+ // Sleep the thread.
+ {
+ KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
+ lock->Unlock();
+
+ if (!thread_queue.SleepThread(owner)) {
+ lk.CancelSleep();
+ return;
+ }
+ }
+
+ // Cancel the task that the sleep setup.
+ kernel.TimeManager().UnscheduleTimeEvent(owner);
+ }
+ KThreadQueue thread_queue;
+ KernelCore& kernel;
+};
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
new file mode 100644
index 000000000..f974022e8
--- /dev/null
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -0,0 +1,130 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+
+namespace Kernel {
+
+void KLightLock::Lock() {
+ const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
+ const uintptr_t cur_thread_tag = (cur_thread | 1);
+
+ while (true) {
+ uintptr_t old_tag = tag.load(std::memory_order_relaxed);
+
+ while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1,
+ std::memory_order_acquire)) {
+ if ((old_tag | 1) == cur_thread_tag) {
+ return;
+ }
+ }
+
+ if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) {
+ break;
+ }
+
+ LockSlowPath(old_tag | 1, cur_thread);
+ }
+}
+
+void KLightLock::Unlock() {
+ const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
+ uintptr_t expected = cur_thread;
+ do {
+ if (expected != cur_thread) {
+ return UnlockSlowPath(cur_thread);
+ }
+ } while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
+}
+
+void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
+ KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
+
+ // Pend the current thread waiting on the owner thread.
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ // Ensure we actually have locking to do.
+ if (tag.load(std::memory_order_relaxed) != _owner) {
+ return;
+ }
+
+ // Add the current thread as a waiter on the owner.
+ KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
+ cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
+ owner_thread->AddWaiter(cur_thread);
+
+ // Set thread states.
+ if (cur_thread->GetState() == ThreadState::Runnable) {
+ cur_thread->SetState(ThreadState::Waiting);
+ } else {
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
+ }
+
+ if (owner_thread->IsSuspended()) {
+ owner_thread->ContinueIfHasKernelWaiters();
+ }
+ }
+
+ // We're no longer waiting on the lock owner.
+ {
+ KScopedSchedulerLock sl{kernel};
+ KThread* owner_thread = cur_thread->GetLockOwner();
+ if (owner_thread) {
+ owner_thread->RemoveWaiter(cur_thread);
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
+ }
+ }
+}
+
+void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
+ KThread* owner_thread = reinterpret_cast<KThread*>(_cur_thread);
+
+ // Unlock.
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ // Get the next owner.
+ s32 num_waiters = 0;
+ KThread* next_owner = owner_thread->RemoveWaiterByKey(
+ std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
+
+ // Pass the lock to the next owner.
+ uintptr_t next_tag = 0;
+ if (next_owner) {
+ next_tag = reinterpret_cast<uintptr_t>(next_owner);
+ if (num_waiters > 1) {
+ next_tag |= 0x1;
+ }
+
+ if (next_owner->GetState() == ThreadState::Waiting) {
+ next_owner->SetState(ThreadState::Runnable);
+ } else {
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
+ }
+
+ if (next_owner->IsSuspended()) {
+ next_owner->ContinueIfHasKernelWaiters();
+ }
+ }
+
+ // We may have unsuspended in the process of acquiring the lock, so we'll re-suspend now if
+ // so.
+ if (owner_thread->IsSuspended()) {
+ owner_thread->TrySuspend();
+ }
+
+ // Write the new tag value.
+ tag.store(next_tag);
+ }
+}
+
+bool KLightLock::IsLockedByCurrentThread() const {
+ return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL);
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h
new file mode 100644
index 000000000..f4c45f76a
--- /dev/null
+++ b/src/core/hle/kernel/k_light_lock.h
@@ -0,0 +1,41 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <atomic>
+
+#include "common/common_types.h"
+#include "core/hle/kernel/k_scoped_lock.h"
+
+namespace Kernel {
+
+class KernelCore;
+
+class KLightLock {
+public:
+ explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {}
+
+ void Lock();
+
+ void Unlock();
+
+ void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
+
+ void UnlockSlowPath(uintptr_t cur_thread);
+
+ bool IsLocked() const {
+ return tag != 0;
+ }
+
+ bool IsLockedByCurrentThread() const;
+
+private:
+ std::atomic<uintptr_t> tag{};
+ KernelCore& kernel;
+};
+
+using KScopedLightLock = KScopedLock<KLightLock>;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
new file mode 100644
index 000000000..4aa669d95
--- /dev/null
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -0,0 +1,451 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include <array>
+#include <bit>
+#include <concepts>
+
+#include "common/assert.h"
+#include "common/bit_set.h"
+#include "common/common_types.h"
+#include "common/concepts.h"
+
+namespace Kernel {
+
+class KThread;
+
+template <typename T>
+concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
+ { t.GetAffinityMask() }
+ ->Common::ConvertibleTo<u64>;
+ {t.SetAffinityMask(0)};
+
+ { t.GetAffinity(0) }
+ ->std::same_as<bool>;
+ {t.SetAffinity(0, false)};
+ {t.SetAll()};
+};
+
+template <typename T>
+concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
+ {typename T::QueueEntry()};
+ {(typename T::QueueEntry()).Initialize()};
+ {(typename T::QueueEntry()).SetPrev(std::addressof(t))};
+ {(typename T::QueueEntry()).SetNext(std::addressof(t))};
+ { (typename T::QueueEntry()).GetNext() }
+ ->std::same_as<T*>;
+ { (typename T::QueueEntry()).GetPrev() }
+ ->std::same_as<T*>;
+ { t.GetPriorityQueueEntry(0) }
+ ->std::same_as<typename T::QueueEntry&>;
+
+ {t.GetAffinityMask()};
+ { std::remove_cvref_t<decltype(t.GetAffinityMask())>() }
+ ->KPriorityQueueAffinityMask;
+
+ { t.GetActiveCore() }
+ ->Common::ConvertibleTo<s32>;
+ { t.GetPriority() }
+ ->Common::ConvertibleTo<s32>;
+};
+
+template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
+requires KPriorityQueueMember<Member> class KPriorityQueue {
+public:
+ using AffinityMaskType = std::remove_cv_t<
+ std::remove_reference_t<decltype(std::declval<Member>().GetAffinityMask())>>;
+
+ static_assert(LowestPriority >= 0);
+ static_assert(HighestPriority >= 0);
+ static_assert(LowestPriority >= HighestPriority);
+ static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
+ static constexpr size_t NumCores = NumCores_;
+
+ static constexpr bool IsValidCore(s32 core) {
+ return 0 <= core && core < static_cast<s32>(NumCores);
+ }
+
+ static constexpr bool IsValidPriority(s32 priority) {
+ return HighestPriority <= priority && priority <= LowestPriority + 1;
+ }
+
+private:
+ using Entry = typename Member::QueueEntry;
+
+public:
+ class KPerCoreQueue {
+ private:
+ std::array<Entry, NumCores> root{};
+
+ public:
+ constexpr KPerCoreQueue() {
+ for (auto& per_core_root : root) {
+ per_core_root.Initialize();
+ }
+ }
+
+ constexpr bool PushBack(s32 core, Member* member) {
+ // Get the entry associated with the member.
+ Entry& member_entry = member->GetPriorityQueueEntry(core);
+
+ // Get the entry associated with the end of the queue.
+ Member* tail = this->root[core].GetPrev();
+ Entry& tail_entry =
+ (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
+
+ // Link the entries.
+ member_entry.SetPrev(tail);
+ member_entry.SetNext(nullptr);
+ tail_entry.SetNext(member);
+ this->root[core].SetPrev(member);
+
+ return tail == nullptr;
+ }
+
+ constexpr bool PushFront(s32 core, Member* member) {
+ // Get the entry associated with the member.
+ Entry& member_entry = member->GetPriorityQueueEntry(core);
+
+ // Get the entry associated with the front of the queue.
+ Member* head = this->root[core].GetNext();
+ Entry& head_entry =
+ (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
+
+ // Link the entries.
+ member_entry.SetPrev(nullptr);
+ member_entry.SetNext(head);
+ head_entry.SetPrev(member);
+ this->root[core].SetNext(member);
+
+ return (head == nullptr);
+ }
+
+ constexpr bool Remove(s32 core, Member* member) {
+ // Get the entry associated with the member.
+ Entry& member_entry = member->GetPriorityQueueEntry(core);
+
+ // Get the entries associated with next and prev.
+ Member* prev = member_entry.GetPrev();
+ Member* next = member_entry.GetNext();
+ Entry& prev_entry =
+ (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core];
+ Entry& next_entry =
+ (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
+
+ // Unlink.
+ prev_entry.SetNext(next);
+ next_entry.SetPrev(prev);
+
+ return (this->GetFront(core) == nullptr);
+ }
+
+ constexpr Member* GetFront(s32 core) const {
+ return this->root[core].GetNext();
+ }
+ };
+
+ class KPriorityQueueImpl {
+ public:
+ constexpr KPriorityQueueImpl() = default;
+
+ constexpr void PushBack(s32 priority, s32 core, Member* member) {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority > LowestPriority) {
+ return;
+ }
+
+ if (this->queues[priority].PushBack(core, member)) {
+ this->available_priorities[core].SetBit(priority);
+ }
+ }
+
+ constexpr void PushFront(s32 priority, s32 core, Member* member) {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority > LowestPriority) {
+ return;
+ }
+
+ if (this->queues[priority].PushFront(core, member)) {
+ this->available_priorities[core].SetBit(priority);
+ }
+ }
+
+ constexpr void Remove(s32 priority, s32 core, Member* member) {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority > LowestPriority) {
+ return;
+ }
+
+ if (this->queues[priority].Remove(core, member)) {
+ this->available_priorities[core].ClearBit(priority);
+ }
+ }
+
+ constexpr Member* GetFront(s32 core) const {
+ ASSERT(IsValidCore(core));
+
+ const s32 priority =
+ static_cast<s32>(this->available_priorities[core].CountLeadingZero());
+ if (priority <= LowestPriority) {
+ return this->queues[priority].GetFront(core);
+ } else {
+ return nullptr;
+ }
+ }
+
+ constexpr Member* GetFront(s32 priority, s32 core) const {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority <= LowestPriority) {
+ return this->queues[priority].GetFront(core);
+ } else {
+ return nullptr;
+ }
+ }
+
+ constexpr Member* GetNext(s32 core, const Member* member) const {
+ ASSERT(IsValidCore(core));
+
+ Member* next = member->GetPriorityQueueEntry(core).GetNext();
+ if (next == nullptr) {
+ const s32 priority = static_cast<s32>(
+ this->available_priorities[core].GetNextSet(member->GetPriority()));
+ if (priority <= LowestPriority) {
+ next = this->queues[priority].GetFront(core);
+ }
+ }
+ return next;
+ }
+
+ constexpr void MoveToFront(s32 priority, s32 core, Member* member) {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority <= LowestPriority) {
+ this->queues[priority].Remove(core, member);
+ this->queues[priority].PushFront(core, member);
+ }
+ }
+
+ constexpr Member* MoveToBack(s32 priority, s32 core, Member* member) {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority <= LowestPriority) {
+ this->queues[priority].Remove(core, member);
+ this->queues[priority].PushBack(core, member);
+ return this->queues[priority].GetFront(core);
+ } else {
+ return nullptr;
+ }
+ }
+
+ private:
+ std::array<KPerCoreQueue, NumPriority> queues{};
+ std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{};
+ };
+
+private:
+ KPriorityQueueImpl scheduled_queue;
+ KPriorityQueueImpl suggested_queue;
+
+private:
+ constexpr void ClearAffinityBit(u64& affinity, s32 core) {
+ affinity &= ~(u64(1) << core);
+ }
+
+ constexpr s32 GetNextCore(u64& affinity) {
+ const s32 core = std::countr_zero(affinity);
+ ClearAffinityBit(affinity, core);
+ return core;
+ }
+
+ constexpr void PushBack(s32 priority, Member* member) {
+ ASSERT(IsValidPriority(priority));
+
+ // Push onto the scheduled queue for its core, if we can.
+ u64 affinity = member->GetAffinityMask().GetAffinityMask();
+ if (const s32 core = member->GetActiveCore(); core >= 0) {
+ this->scheduled_queue.PushBack(priority, core, member);
+ ClearAffinityBit(affinity, core);
+ }
+
+ // And suggest the thread for all other cores.
+ while (affinity) {
+ this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
+ }
+ }
+
+ constexpr void PushFront(s32 priority, Member* member) {
+ ASSERT(IsValidPriority(priority));
+
+ // Push onto the scheduled queue for its core, if we can.
+ u64 affinity = member->GetAffinityMask().GetAffinityMask();
+ if (const s32 core = member->GetActiveCore(); core >= 0) {
+ this->scheduled_queue.PushFront(priority, core, member);
+ ClearAffinityBit(affinity, core);
+ }
+
+ // And suggest the thread for all other cores.
+ // Note: Nintendo pushes onto the back of the suggested queue, not the front.
+ while (affinity) {
+ this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
+ }
+ }
+
+ constexpr void Remove(s32 priority, Member* member) {
+ ASSERT(IsValidPriority(priority));
+
+ // Remove from the scheduled queue for its core.
+ u64 affinity = member->GetAffinityMask().GetAffinityMask();
+ if (const s32 core = member->GetActiveCore(); core >= 0) {
+ this->scheduled_queue.Remove(priority, core, member);
+ ClearAffinityBit(affinity, core);
+ }
+
+ // Remove from the suggested queue for all other cores.
+ while (affinity) {
+ this->suggested_queue.Remove(priority, GetNextCore(affinity), member);
+ }
+ }
+
+public:
+ constexpr KPriorityQueue() = default;
+
+ // Getters.
+ constexpr Member* GetScheduledFront(s32 core) const {
+ return this->scheduled_queue.GetFront(core);
+ }
+
+ constexpr Member* GetScheduledFront(s32 core, s32 priority) const {
+ return this->scheduled_queue.GetFront(priority, core);
+ }
+
+ constexpr Member* GetSuggestedFront(s32 core) const {
+ return this->suggested_queue.GetFront(core);
+ }
+
+ constexpr Member* GetSuggestedFront(s32 core, s32 priority) const {
+ return this->suggested_queue.GetFront(priority, core);
+ }
+
+ constexpr Member* GetScheduledNext(s32 core, const Member* member) const {
+ return this->scheduled_queue.GetNext(core, member);
+ }
+
+ constexpr Member* GetSuggestedNext(s32 core, const Member* member) const {
+ return this->suggested_queue.GetNext(core, member);
+ }
+
+ constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const {
+ return member->GetPriorityQueueEntry(core).GetNext();
+ }
+
+ // Mutators.
+ constexpr void PushBack(Member* member) {
+ this->PushBack(member->GetPriority(), member);
+ }
+
+ constexpr void Remove(Member* member) {
+ this->Remove(member->GetPriority(), member);
+ }
+
+ constexpr void MoveToScheduledFront(Member* member) {
+ this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
+ }
+
+ constexpr KThread* MoveToScheduledBack(Member* member) {
+ return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
+ member);
+ }
+
+ // First class fancy operations.
+ constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
+ ASSERT(IsValidPriority(prev_priority));
+
+ // Remove the member from the queues.
+ const s32 new_priority = member->GetPriority();
+ this->Remove(prev_priority, member);
+
+ // And enqueue. If the member is running, we want to keep it running.
+ if (is_running) {
+ this->PushFront(new_priority, member);
+ } else {
+ this->PushBack(new_priority, member);
+ }
+ }
+
+ constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
+ Member* member) {
+ // Get the new information.
+ const s32 priority = member->GetPriority();
+ const AffinityMaskType& new_affinity = member->GetAffinityMask();
+ const s32 new_core = member->GetActiveCore();
+
+ // Remove the member from all queues it was in before.
+ for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
+ if (prev_affinity.GetAffinity(core)) {
+ if (core == prev_core) {
+ this->scheduled_queue.Remove(priority, core, member);
+ } else {
+ this->suggested_queue.Remove(priority, core, member);
+ }
+ }
+ }
+
+ // And add the member to all queues it should be in now.
+ for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
+ if (new_affinity.GetAffinity(core)) {
+ if (core == new_core) {
+ this->scheduled_queue.PushBack(priority, core, member);
+ } else {
+ this->suggested_queue.PushBack(priority, core, member);
+ }
+ }
+ }
+ }
+
+ constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
+ // Get the new information.
+ const s32 new_core = member->GetActiveCore();
+ const s32 priority = member->GetPriority();
+
+ // We don't need to do anything if the core is the same.
+ if (prev_core != new_core) {
+ // Remove from the scheduled queue for the previous core.
+ if (prev_core >= 0) {
+ this->scheduled_queue.Remove(priority, prev_core, member);
+ }
+
+ // Remove from the suggested queue and add to the scheduled queue for the new core.
+ if (new_core >= 0) {
+ this->suggested_queue.Remove(priority, new_core, member);
+ if (to_front) {
+ this->scheduled_queue.PushFront(priority, new_core, member);
+ } else {
+ this->scheduled_queue.PushBack(priority, new_core, member);
+ }
+ }
+
+ // Add to the suggested queue for the previous core.
+ if (prev_core >= 0) {
+ this->suggested_queue.PushBack(priority, prev_core, member);
+ }
+ }
+ }
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
new file mode 100644
index 000000000..4b4d34857
--- /dev/null
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -0,0 +1,56 @@
+// Copyright 2021 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include "common/assert.h"
+#include "common/common_funcs.h"
+#include "common/logging/log.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/object.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+
+KReadableEvent::KReadableEvent(KernelCore& kernel, std::string&& name)
+ : KSynchronizationObject{kernel, std::move(name)} {}
+KReadableEvent::~KReadableEvent() = default;
+
+bool KReadableEvent::IsSignaled() const {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ return is_signaled;
+}
+
+ResultCode KReadableEvent::Signal() {
+ KScopedSchedulerLock lk{kernel};
+
+ if (!is_signaled) {
+ is_signaled = true;
+ NotifyAvailable();
+ }
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KReadableEvent::Clear() {
+ Reset();
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KReadableEvent::Reset() {
+ KScopedSchedulerLock lk{kernel};
+
+ if (!is_signaled) {
+ return ResultInvalidState;
+ }
+
+ is_signaled = false;
+ return RESULT_SUCCESS;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
new file mode 100644
index 000000000..e6f0fd900
--- /dev/null
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -0,0 +1,51 @@
+// Copyright 2021 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/object.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KernelCore;
+class KEvent;
+
+class KReadableEvent final : public KSynchronizationObject {
+public:
+ explicit KReadableEvent(KernelCore& kernel, std::string&& name);
+ ~KReadableEvent() override;
+
+ std::string GetTypeName() const override {
+ return "KReadableEvent";
+ }
+
+ static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ KEvent* GetParent() const {
+ return parent;
+ }
+
+ void Initialize(KEvent* parent_) {
+ is_signaled = false;
+ parent = parent_;
+ }
+
+ bool IsSignaled() const override;
+ void Finalize() override {}
+
+ ResultCode Signal();
+ ResultCode Clear();
+ ResultCode Reset();
+
+private:
+ bool is_signaled{};
+ KEvent* parent{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
new file mode 100644
index 000000000..d7a4a38e6
--- /dev/null
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -0,0 +1,152 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#include "common/assert.h"
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/core_timing_util.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
+
+KResourceLimit::KResourceLimit(KernelCore& kernel, Core::System& system)
+ : Object{kernel}, lock{kernel}, cond_var{kernel}, kernel{kernel}, system(system) {}
+KResourceLimit::~KResourceLimit() = default;
+
+s64 KResourceLimit::GetLimitValue(LimitableResource which) const {
+ const auto index = static_cast<std::size_t>(which);
+ s64 value{};
+ {
+ KScopedLightLock lk{lock};
+ value = limit_values[index];
+ ASSERT(value >= 0);
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+ }
+ return value;
+}
+
+s64 KResourceLimit::GetCurrentValue(LimitableResource which) const {
+ const auto index = static_cast<std::size_t>(which);
+ s64 value{};
+ {
+ KScopedLightLock lk{lock};
+ value = current_values[index];
+ ASSERT(value >= 0);
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+ }
+ return value;
+}
+
+s64 KResourceLimit::GetPeakValue(LimitableResource which) const {
+ const auto index = static_cast<std::size_t>(which);
+ s64 value{};
+ {
+ KScopedLightLock lk{lock};
+ value = peak_values[index];
+ ASSERT(value >= 0);
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+ }
+ return value;
+}
+
+s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
+ const auto index = static_cast<std::size_t>(which);
+ s64 value{};
+ {
+ KScopedLightLock lk(lock);
+ ASSERT(current_values[index] >= 0);
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+ value = limit_values[index] - current_values[index];
+ }
+
+ return value;
+}
+
+ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
+ const auto index = static_cast<std::size_t>(which);
+ KScopedLightLock lk(lock);
+ R_UNLESS(current_values[index] <= value, ResultInvalidState);
+
+ limit_values[index] = value;
+
+ return RESULT_SUCCESS;
+}
+
+bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
+ return Reserve(which, value, system.CoreTiming().GetGlobalTimeNs().count() + DefaultTimeout);
+}
+
+bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
+ ASSERT(value >= 0);
+ const auto index = static_cast<std::size_t>(which);
+ KScopedLightLock lk(lock);
+
+ ASSERT(current_hints[index] <= current_values[index]);
+ if (current_hints[index] >= limit_values[index]) {
+ return false;
+ }
+
+ // Loop until we reserve or run out of time.
+ while (true) {
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+
+ // If we would overflow, don't allow to succeed.
+ if (current_values[index] + value <= current_values[index]) {
+ break;
+ }
+
+ if (current_values[index] + value <= limit_values[index]) {
+ current_values[index] += value;
+ current_hints[index] += value;
+ peak_values[index] = std::max(peak_values[index], current_values[index]);
+ return true;
+ }
+
+ if (current_hints[index] + value <= limit_values[index] &&
+ (timeout < 0 || system.CoreTiming().GetGlobalTimeNs().count() < timeout)) {
+ waiter_count++;
+ cond_var.Wait(&lock, timeout);
+ waiter_count--;
+ } else {
+ break;
+ }
+ }
+
+ return false;
+}
+
+void KResourceLimit::Release(LimitableResource which, s64 value) {
+ Release(which, value, value);
+}
+
+void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) {
+ ASSERT(value >= 0);
+ ASSERT(hint >= 0);
+
+ const auto index = static_cast<std::size_t>(which);
+ KScopedLightLock lk(lock);
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+ ASSERT(value <= current_values[index]);
+ ASSERT(hint <= current_hints[index]);
+
+ current_values[index] -= value;
+ current_hints[index] -= hint;
+
+ if (waiter_count != 0) {
+ cond_var.Broadcast();
+ }
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h
new file mode 100644
index 000000000..58ae456f1
--- /dev/null
+++ b/src/core/hle/kernel/k_resource_limit.h
@@ -0,0 +1,81 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include <array>
+#include "common/common_types.h"
+#include "core/hle/kernel/k_light_condition_variable.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/object.h"
+
+union ResultCode;
+
+namespace Core {
+class System;
+}
+
+namespace Kernel {
+class KernelCore;
+enum class LimitableResource : u32 {
+ PhysicalMemory = 0,
+ Threads = 1,
+ Events = 2,
+ TransferMemory = 3,
+ Sessions = 4,
+
+ Count,
+};
+
+constexpr bool IsValidResourceType(LimitableResource type) {
+ return type < LimitableResource::Count;
+}
+
+class KResourceLimit final : public Object {
+public:
+ explicit KResourceLimit(KernelCore& kernel, Core::System& system);
+ ~KResourceLimit();
+
+ s64 GetLimitValue(LimitableResource which) const;
+ s64 GetCurrentValue(LimitableResource which) const;
+ s64 GetPeakValue(LimitableResource which) const;
+ s64 GetFreeValue(LimitableResource which) const;
+
+ ResultCode SetLimitValue(LimitableResource which, s64 value);
+
+ bool Reserve(LimitableResource which, s64 value);
+ bool Reserve(LimitableResource which, s64 value, s64 timeout);
+ void Release(LimitableResource which, s64 value);
+ void Release(LimitableResource which, s64 value, s64 hint);
+
+ std::string GetTypeName() const override {
+ return "KResourceLimit";
+ }
+ std::string GetName() const override {
+ return GetTypeName();
+ }
+
+ static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ virtual void Finalize() override {}
+
+private:
+ using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>;
+ ResourceArray limit_values{};
+ ResourceArray current_values{};
+ ResourceArray current_hints{};
+ ResourceArray peak_values{};
+ mutable KLightLock lock;
+ s32 waiter_count{};
+ KLightConditionVariable cond_var;
+ KernelCore& kernel;
+ Core::System& system;
+};
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
new file mode 100644
index 000000000..bb5f43b53
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -0,0 +1,814 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#include <bit>
+
+#include "common/assert.h"
+#include "common/bit_util.h"
+#include "common/fiber.h"
+#include "common/logging/log.h"
+#include "core/arm/arm_interface.h"
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/cpu_manager.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/physical_core.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/time_manager.h"
+
+namespace Kernel {
+
+static void IncrementScheduledCount(Kernel::KThread* thread) {
+ if (auto process = thread->GetOwnerProcess(); process) {
+ process->IncrementScheduledCount();
+ }
+}
+
+void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) {
+ auto scheduler = kernel.CurrentScheduler();
+
+ u32 current_core{0xF};
+ bool must_context_switch{};
+ if (scheduler) {
+ current_core = scheduler->core_id;
+ // TODO(bunnei): Should be set to true when we deprecate single core
+ must_context_switch = !kernel.IsPhantomModeForSingleCore();
+ }
+
+ while (cores_pending_reschedule != 0) {
+ const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
+ ASSERT(core < Core::Hardware::NUM_CPU_CORES);
+ if (!must_context_switch || core != current_core) {
+ auto& phys_core = kernel.PhysicalCore(core);
+ phys_core.Interrupt();
+ } else {
+ must_context_switch = true;
+ }
+ cores_pending_reschedule &= ~(1ULL << core);
+ }
+ if (must_context_switch) {
+ auto core_scheduler = kernel.CurrentScheduler();
+ kernel.ExitSVCProfile();
+ core_scheduler->RescheduleCurrentCore();
+ kernel.EnterSVCProfile();
+ }
+}
+
+u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
+ std::scoped_lock lock{guard};
+ if (KThread* prev_highest_thread = state.highest_priority_thread;
+ prev_highest_thread != highest_thread) {
+ if (prev_highest_thread != nullptr) {
+ IncrementScheduledCount(prev_highest_thread);
+ prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
+ }
+ if (state.should_count_idle) {
+ if (highest_thread != nullptr) {
+ if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
+ process->SetRunningThread(core_id, highest_thread, state.idle_count);
+ }
+ } else {
+ state.idle_count++;
+ }
+ }
+
+ state.highest_priority_thread = highest_thread;
+ state.needs_scheduling.store(true);
+ return (1ULL << core_id);
+ } else {
+ return 0;
+ }
+}
+
+u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Clear that we need to update.
+ ClearSchedulerUpdateNeeded(kernel);
+
+ u64 cores_needing_scheduling = 0, idle_cores = 0;
+ KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ /// We want to go over all cores, finding the highest priority thread and determining if
+ /// scheduling is needed for that core.
+ for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
+ KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
+ if (top_thread != nullptr) {
+ // If the thread has no waiters, we need to check if the process has a thread pinned.
+ if (top_thread->GetNumKernelWaiters() == 0) {
+ if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
+ if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
+ pinned != nullptr && pinned != top_thread) {
+ // We prefer our parent's pinned thread if possible. However, we also don't
+ // want to schedule un-runnable threads.
+ if (pinned->GetRawState() == ThreadState::Runnable) {
+ top_thread = pinned;
+ } else {
+ top_thread = nullptr;
+ }
+ }
+ }
+ }
+ } else {
+ idle_cores |= (1ULL << core_id);
+ }
+
+ top_threads[core_id] = top_thread;
+ cores_needing_scheduling |=
+ kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
+ }
+
+ // Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
+ while (idle_cores != 0) {
+ const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
+ if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
+ s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
+ size_t num_candidates = 0;
+
+ // While we have a suggested thread, try to migrate it!
+ while (suggested != nullptr) {
+ // Check if the suggested thread is the top thread on its core.
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (KThread* top_thread =
+ (suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
+ top_thread != suggested) {
+ // Make sure we're not dealing with threads too high priority for migration.
+ if (top_thread != nullptr &&
+ top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
+ break;
+ }
+
+ // The suggested thread isn't bound to its core, so we can migrate it!
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested);
+
+ top_threads[core_id] = suggested;
+ cores_needing_scheduling |=
+ kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
+ break;
+ }
+
+ // Note this core as a candidate for migration.
+ ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
+ migration_candidates[num_candidates++] = suggested_core;
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+
+ // If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
+ // candidate cores' top threads.
+ if (suggested == nullptr) {
+ for (size_t i = 0; i < num_candidates; i++) {
+ // Check if there's some other thread that can run on the candidate core.
+ const s32 candidate_core = migration_candidates[i];
+ suggested = top_threads[candidate_core];
+ if (KThread* next_on_candidate_core =
+ priority_queue.GetScheduledNext(candidate_core, suggested);
+ next_on_candidate_core != nullptr) {
+ // The candidate core can run some other thread! We'll migrate its current
+ // top thread to us.
+ top_threads[candidate_core] = next_on_candidate_core;
+ cores_needing_scheduling |=
+ kernel.Scheduler(candidate_core)
+ .UpdateHighestPriorityThread(top_threads[candidate_core]);
+
+ // Perform the migration.
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(candidate_core, suggested);
+
+ top_threads[core_id] = suggested;
+ cores_needing_scheduling |=
+ kernel.Scheduler(core_id).UpdateHighestPriorityThread(
+ top_threads[core_id]);
+ break;
+ }
+ }
+ }
+ }
+
+ idle_cores &= ~(1ULL << core_id);
+ }
+
+ return cores_needing_scheduling;
+}
+
+void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
+ // Get an atomic reference to the core scheduler's previous thread.
+ std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
+ static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
+
+ // Atomically clear the previous thread if it's our target.
+ KThread* compare = thread;
+ prev_thread.compare_exchange_strong(compare, nullptr);
+ }
+}
+
+void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Check if the state has changed, because if it hasn't there's nothing to do.
+ const auto cur_state = thread->GetRawState();
+ if (cur_state == old_state) {
+ return;
+ }
+
+ // Update the priority queues.
+ if (old_state == ThreadState::Runnable) {
+ // If we were previously runnable, then we're not runnable now, and we should remove.
+ GetPriorityQueue(kernel).Remove(thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ } else if (cur_state == ThreadState::Runnable) {
+ // If we're now runnable, then we weren't previously, and we should add.
+ GetPriorityQueue(kernel).PushBack(thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ }
+}
+
+void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // If the thread is runnable, we want to change its priority in the queue.
+ if (thread->GetRawState() == ThreadState::Runnable) {
+ GetPriorityQueue(kernel).ChangePriority(
+ old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ }
+}
+
+void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
+ const KAffinityMask& old_affinity, s32 old_core) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // If the thread is runnable, we want to change its affinity in the queue.
+ if (thread->GetRawState() == ThreadState::Runnable) {
+ GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ }
+}
+
+void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
+ ASSERT(system.GlobalSchedulerContext().IsLocked());
+
+ // Get a reference to the priority queue.
+ auto& kernel = system.Kernel();
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ // Rotate the front of the queue to the end.
+ KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
+ KThread* next_thread = nullptr;
+ if (top_thread != nullptr) {
+ next_thread = priority_queue.MoveToScheduledBack(top_thread);
+ if (next_thread != top_thread) {
+ IncrementScheduledCount(top_thread);
+ IncrementScheduledCount(next_thread);
+ }
+ }
+
+ // While we have a suggested thread, try to migrate it!
+ {
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
+ while (suggested != nullptr) {
+ // Check if the suggested thread is the top thread on its core.
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (KThread* top_on_suggested_core =
+ (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
+ : nullptr;
+ top_on_suggested_core != suggested) {
+ // If the next thread is a new thread that has been waiting longer than our
+ // suggestion, we prefer it to our suggestion.
+ if (top_thread != next_thread && next_thread != nullptr &&
+ next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
+ suggested = nullptr;
+ break;
+ }
+
+ // If we're allowed to do a migration, do one.
+ // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
+ // to the front of the queue.
+ if (top_on_suggested_core == nullptr ||
+ top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested, true);
+ IncrementScheduledCount(suggested);
+ break;
+ }
+ }
+
+ // Get the next suggestion.
+ suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
+ }
+ }
+
+ // Now that we might have migrated a thread with the same priority, check if we can do better.
+
+ {
+ KThread* best_thread = priority_queue.GetScheduledFront(core_id);
+ if (best_thread == GetCurrentThread()) {
+ best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
+ }
+
+ // If the best thread we can choose has a priority the same or worse than ours, try to
+ // migrate a higher priority thread.
+ if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id);
+ while (suggested != nullptr) {
+ // If the suggestion's priority is the same as ours, don't bother.
+ if (suggested->GetPriority() >= best_thread->GetPriority()) {
+ break;
+ }
+
+ // Check if the suggested thread is the top thread on its core.
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (KThread* top_on_suggested_core =
+ (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
+ : nullptr;
+ top_on_suggested_core != suggested) {
+ // If we're allowed to do a migration, do one.
+ // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
+ // suggestion to the front of the queue.
+ if (top_on_suggested_core == nullptr ||
+ top_on_suggested_core->GetPriority() >=
+ HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested, true);
+ IncrementScheduledCount(suggested);
+ break;
+ }
+ }
+
+ // Get the next suggestion.
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+ }
+ }
+
+ // After a rotation, we need a scheduler update.
+ SetSchedulerUpdateNeeded(kernel);
+}
+
+bool KScheduler::CanSchedule(KernelCore& kernel) {
+ return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
+}
+
+bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
+}
+
+void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
+ kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
+}
+
+void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
+ kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
+}
+
+void KScheduler::DisableScheduling(KernelCore& kernel) {
+ if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
+ ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
+ scheduler->GetCurrentThread()->DisableDispatch();
+ }
+}
+
+void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
+ if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
+ ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
+ if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
+ scheduler->GetCurrentThread()->EnableDispatch();
+ }
+ }
+ RescheduleCores(kernel, cores_needing_scheduling);
+}
+
+u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
+ if (IsSchedulerUpdateNeeded(kernel)) {
+ return UpdateHighestPriorityThreadsImpl(kernel);
+ } else {
+ return 0;
+ }
+}
+
+KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().priority_queue;
+}
+
+void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
+ // Validate preconditions.
+ ASSERT(CanSchedule(kernel));
+ ASSERT(kernel.CurrentProcess() != nullptr);
+
+ // Get the current thread and process.
+ KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ Process& cur_process = *kernel.CurrentProcess();
+
+ // If the thread's yield count matches, there's nothing for us to do.
+ if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
+ return;
+ }
+
+ // Get a reference to the priority queue.
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ // Perform the yield.
+ {
+ KScopedSchedulerLock lock(kernel);
+
+ const auto cur_state = cur_thread.GetRawState();
+ if (cur_state == ThreadState::Runnable) {
+ // Put the current thread at the back of the queue.
+ KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
+ IncrementScheduledCount(std::addressof(cur_thread));
+
+ // If the next thread is different, we have an update to perform.
+ if (next_thread != std::addressof(cur_thread)) {
+ SetSchedulerUpdateNeeded(kernel);
+ } else {
+ // Otherwise, set the thread's yield count so that we won't waste work until the
+ // process is scheduled again.
+ cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
+ }
+ }
+ }
+}
+
+void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
+ // Validate preconditions.
+ ASSERT(CanSchedule(kernel));
+ ASSERT(kernel.CurrentProcess() != nullptr);
+
+ // Get the current thread and process.
+ KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ Process& cur_process = *kernel.CurrentProcess();
+
+ // If the thread's yield count matches, there's nothing for us to do.
+ if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
+ return;
+ }
+
+ // Get a reference to the priority queue.
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ // Perform the yield.
+ {
+ KScopedSchedulerLock lock(kernel);
+
+ const auto cur_state = cur_thread.GetRawState();
+ if (cur_state == ThreadState::Runnable) {
+ // Get the current active core.
+ const s32 core_id = cur_thread.GetActiveCore();
+
+ // Put the current thread at the back of the queue.
+ KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
+ IncrementScheduledCount(std::addressof(cur_thread));
+
+ // While we have a suggested thread, try to migrate it!
+ bool recheck = false;
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id);
+ while (suggested != nullptr) {
+ // Check if the suggested thread is the thread running on its core.
+ const s32 suggested_core = suggested->GetActiveCore();
+
+ if (KThread* running_on_suggested_core =
+ (suggested_core >= 0)
+ ? kernel.Scheduler(suggested_core).state.highest_priority_thread
+ : nullptr;
+ running_on_suggested_core != suggested) {
+ // If the current thread's priority is higher than our suggestion's we prefer
+ // the next thread to the suggestion. We also prefer the next thread when the
+ // current thread's priority is equal to the suggestions, but the next thread
+ // has been waiting longer.
+ if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
+ (suggested->GetPriority() == cur_thread.GetPriority() &&
+ next_thread != std::addressof(cur_thread) &&
+ next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) {
+ suggested = nullptr;
+ break;
+ }
+
+ // If we're allowed to do a migration, do one.
+ // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
+ // suggestion to the front of the queue.
+ if (running_on_suggested_core == nullptr ||
+ running_on_suggested_core->GetPriority() >=
+ HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested, true);
+ IncrementScheduledCount(suggested);
+ break;
+ } else {
+ // We couldn't perform a migration, but we should check again on a future
+ // yield.
+ recheck = true;
+ }
+ }
+
+ // Get the next suggestion.
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+
+ // If we still have a suggestion or the next thread is different, we have an update to
+ // perform.
+ if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
+ SetSchedulerUpdateNeeded(kernel);
+ } else if (!recheck) {
+ // Otherwise if we don't need to re-check, set the thread's yield count so that we
+ // won't waste work until the process is scheduled again.
+ cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
+ }
+ }
+ }
+}
+
+void KScheduler::YieldToAnyThread(KernelCore& kernel) {
+ // Validate preconditions.
+ ASSERT(CanSchedule(kernel));
+ ASSERT(kernel.CurrentProcess() != nullptr);
+
+ // Get the current thread and process.
+ KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ Process& cur_process = *kernel.CurrentProcess();
+
+ // If the thread's yield count matches, there's nothing for us to do.
+ if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
+ return;
+ }
+
+ // Get a reference to the priority queue.
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ // Perform the yield.
+ {
+ KScopedSchedulerLock lock(kernel);
+
+ const auto cur_state = cur_thread.GetRawState();
+ if (cur_state == ThreadState::Runnable) {
+ // Get the current active core.
+ const s32 core_id = cur_thread.GetActiveCore();
+
+ // Migrate the current thread to core -1.
+ cur_thread.SetActiveCore(-1);
+ priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
+ IncrementScheduledCount(std::addressof(cur_thread));
+
+ // If there's nothing scheduled, we can try to perform a migration.
+ if (priority_queue.GetScheduledFront(core_id) == nullptr) {
+ // While we have a suggested thread, try to migrate it!
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id);
+ while (suggested != nullptr) {
+ // Check if the suggested thread is the top thread on its core.
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (KThread* top_on_suggested_core =
+ (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
+ : nullptr;
+ top_on_suggested_core != suggested) {
+ // If we're allowed to do a migration, do one.
+ if (top_on_suggested_core == nullptr ||
+ top_on_suggested_core->GetPriority() >=
+ HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested);
+ IncrementScheduledCount(suggested);
+ }
+
+ // Regardless of whether we migrated, we had a candidate, so we're done.
+ break;
+ }
+
+ // Get the next suggestion.
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+
+ // If the suggestion is different from the current thread, we need to perform an
+ // update.
+ if (suggested != std::addressof(cur_thread)) {
+ SetSchedulerUpdateNeeded(kernel);
+ } else {
+ // Otherwise, set the thread's yield count so that we won't waste work until the
+ // process is scheduled again.
+ cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
+ }
+ } else {
+ // Otherwise, we have an update to perform.
+ SetSchedulerUpdateNeeded(kernel);
+ }
+ }
+ }
+}
+
+KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) {
+ switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
+ state.needs_scheduling.store(true);
+ state.interrupt_task_thread_runnable = false;
+ state.should_count_idle = false;
+ state.idle_count = 0;
+ state.idle_thread_stack = nullptr;
+ state.highest_priority_thread = nullptr;
+}
+
+KScheduler::~KScheduler() = default;
+
+KThread* KScheduler::GetCurrentThread() const {
+ if (auto result = current_thread.load(); result) {
+ return result;
+ }
+ return idle_thread;
+}
+
+u64 KScheduler::GetLastContextSwitchTicks() const {
+ return last_context_switch_time;
+}
+
+void KScheduler::RescheduleCurrentCore() {
+ ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
+
+ auto& phys_core = system.Kernel().PhysicalCore(core_id);
+ if (phys_core.IsInterrupted()) {
+ phys_core.ClearInterrupt();
+ }
+ guard.lock();
+ if (state.needs_scheduling.load()) {
+ Schedule();
+ } else {
+ guard.unlock();
+ }
+}
+
+void KScheduler::OnThreadStart() {
+ SwitchContextStep2();
+}
+
+void KScheduler::Unload(KThread* thread) {
+ LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
+
+ if (thread) {
+ if (thread->IsCallingSvc()) {
+ system.ArmInterface(core_id).ExceptionalExit();
+ thread->ClearIsCallingSvc();
+ }
+ if (!thread->IsTerminationRequested()) {
+ prev_thread = thread;
+
+ Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
+ cpu_core.SaveContext(thread->GetContext32());
+ cpu_core.SaveContext(thread->GetContext64());
+ // Save the TPIDR_EL0 system register in case it was modified.
+ thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
+ } else {
+ prev_thread = nullptr;
+ }
+ thread->context_guard.unlock();
+ }
+}
+
+void KScheduler::Reload(KThread* thread) {
+ LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
+
+ if (thread) {
+ ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
+
+ auto* const thread_owner_process = thread->GetOwnerProcess();
+ if (thread_owner_process != nullptr) {
+ system.Kernel().MakeCurrentProcess(thread_owner_process);
+ }
+
+ Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
+ cpu_core.LoadContext(thread->GetContext32());
+ cpu_core.LoadContext(thread->GetContext64());
+ cpu_core.SetTlsAddress(thread->GetTLSAddress());
+ cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
+ }
+}
+
+void KScheduler::SwitchContextStep2() {
+ // Load context of new thread
+ Reload(current_thread.load());
+
+ RescheduleCurrentCore();
+}
+
+void KScheduler::ScheduleImpl() {
+ KThread* previous_thread = current_thread.load();
+ KThread* next_thread = state.highest_priority_thread;
+
+ state.needs_scheduling = false;
+
+ // We never want to schedule a null thread, so use the idle thread if we don't have a next.
+ if (next_thread == nullptr) {
+ next_thread = idle_thread;
+ }
+
+ // If we're not actually switching thread, there's nothing to do.
+ if (next_thread == current_thread.load()) {
+ guard.unlock();
+ return;
+ }
+
+ current_thread.store(next_thread);
+
+ Process* const previous_process = system.Kernel().CurrentProcess();
+
+ UpdateLastContextSwitchTime(previous_thread, previous_process);
+
+ // Save context for previous thread
+ Unload(previous_thread);
+
+ std::shared_ptr<Common::Fiber>* old_context;
+ if (previous_thread != nullptr) {
+ old_context = &previous_thread->GetHostContext();
+ } else {
+ old_context = &idle_thread->GetHostContext();
+ }
+ guard.unlock();
+
+ Common::Fiber::YieldTo(*old_context, switch_fiber);
+ /// When a thread wakes up, the scheduler may have changed to other in another core.
+ auto& next_scheduler = *system.Kernel().CurrentScheduler();
+ next_scheduler.SwitchContextStep2();
+}
+
+void KScheduler::OnSwitch(void* this_scheduler) {
+ KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
+ sched->SwitchToCurrent();
+}
+
+void KScheduler::SwitchToCurrent() {
+ while (true) {
+ {
+ std::scoped_lock lock{guard};
+ current_thread.store(state.highest_priority_thread);
+ state.needs_scheduling.store(false);
+ }
+ const auto is_switch_pending = [this] {
+ std::scoped_lock lock{guard};
+ return state.needs_scheduling.load();
+ };
+ do {
+ auto next_thread = current_thread.load();
+ if (next_thread != nullptr) {
+ next_thread->context_guard.lock();
+ if (next_thread->GetRawState() != ThreadState::Runnable) {
+ next_thread->context_guard.unlock();
+ break;
+ }
+ if (next_thread->GetActiveCore() != core_id) {
+ next_thread->context_guard.unlock();
+ break;
+ }
+ }
+ std::shared_ptr<Common::Fiber>* next_context;
+ if (next_thread != nullptr) {
+ next_context = &next_thread->GetHostContext();
+ } else {
+ next_context = &idle_thread->GetHostContext();
+ }
+ Common::Fiber::YieldTo(switch_fiber, *next_context);
+ } while (!is_switch_pending());
+ }
+}
+
+void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) {
+ const u64 prev_switch_ticks = last_context_switch_time;
+ const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
+ const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
+
+ if (thread != nullptr) {
+ thread->AddCpuTime(core_id, update_ticks);
+ }
+
+ if (process != nullptr) {
+ process->UpdateCPUTimeTicks(update_ticks);
+ }
+
+ last_context_switch_time = most_recent_switch_ticks;
+}
+
+void KScheduler::Initialize() {
+ std::string name = "Idle Thread Id:" + std::to_string(core_id);
+ std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
+ void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
+ auto thread_res = KThread::Create(system, ThreadType::Main, name, 0,
+ KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0,
+ nullptr, std::move(init_func), init_func_parameter);
+ idle_thread = thread_res.Unwrap().get();
+}
+
+KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
+ : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
+
+KScopedSchedulerLock::~KScopedSchedulerLock() = default;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
new file mode 100644
index 000000000..f595b9a5c
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -0,0 +1,207 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include <atomic>
+
+#include "common/common_types.h"
+#include "common/spin_lock.h"
+#include "core/hle/kernel/global_scheduler_context.h"
+#include "core/hle/kernel/k_priority_queue.h"
+#include "core/hle/kernel/k_scheduler_lock.h"
+#include "core/hle/kernel/k_scoped_lock.h"
+
+namespace Common {
+class Fiber;
+}
+
+namespace Core {
+class System;
+}
+
+namespace Kernel {
+
+class KernelCore;
+class Process;
+class SchedulerLock;
+class KThread;
+
+class KScheduler final {
+public:
+ explicit KScheduler(Core::System& system, s32 core_id);
+ ~KScheduler();
+
+ /// Reschedules to the next available thread (call after current thread is suspended)
+ void RescheduleCurrentCore();
+
+ /// Reschedules cores pending reschedule, to be called on EnableScheduling.
+ static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule);
+
+ /// The next two are for SingleCore Only.
+ /// Unload current thread before preempting core.
+ void Unload(KThread* thread);
+
+ /// Reload current thread after core preemption.
+ void Reload(KThread* thread);
+
+ /// Gets the current running thread
+ [[nodiscard]] KThread* GetCurrentThread() const;
+
+ /// Returns true if the scheduler is idle
+ [[nodiscard]] bool IsIdle() const {
+ return GetCurrentThread() == idle_thread;
+ }
+
+ /// Gets the timestamp for the last context switch in ticks.
+ [[nodiscard]] u64 GetLastContextSwitchTicks() const;
+
+ [[nodiscard]] bool ContextSwitchPending() const {
+ return state.needs_scheduling.load(std::memory_order_relaxed);
+ }
+
+ void Initialize();
+
+ void OnThreadStart();
+
+ [[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
+ return switch_fiber;
+ }
+
+ [[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
+ return switch_fiber;
+ }
+
+ [[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread);
+
+ /**
+ * Takes a thread and moves it to the back of the it's priority list.
+ *
+ * @note This operation can be redundant and no scheduling is changed if marked as so.
+ */
+ static void YieldWithoutCoreMigration(KernelCore& kernel);
+
+ /**
+ * Takes a thread and moves it to the back of the it's priority list.
+ * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
+ * a better priority than the next thread in the core.
+ *
+ * @note This operation can be redundant and no scheduling is changed if marked as so.
+ */
+ static void YieldWithCoreMigration(KernelCore& kernel);
+
+ /**
+ * Takes a thread and moves it out of the scheduling queue.
+ * and into the suggested queue. If no thread can be scheduled afterwards in that core,
+ * a suggested thread is obtained instead.
+ *
+ * @note This operation can be redundant and no scheduling is changed if marked as so.
+ */
+ static void YieldToAnyThread(KernelCore& kernel);
+
+ static void ClearPreviousThread(KernelCore& kernel, KThread* thread);
+
+ /// Notify the scheduler a thread's status has changed.
+ static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
+
+ /// Notify the scheduler a thread's priority has changed.
+ static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority);
+
+ /// Notify the scheduler a thread's core and/or affinity mask has changed.
+ static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
+ const KAffinityMask& old_affinity, s32 old_core);
+
+ static bool CanSchedule(KernelCore& kernel);
+ static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
+ static void SetSchedulerUpdateNeeded(KernelCore& kernel);
+ static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
+ static void DisableScheduling(KernelCore& kernel);
+ static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
+ [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
+
+private:
+ friend class GlobalSchedulerContext;
+
+ /**
+ * Takes care of selecting the new scheduled threads in three steps:
+ *
+ * 1. First a thread is selected from the top of the priority queue. If no thread
+ * is obtained then we move to step two, else we are done.
+ *
+ * 2. Second we try to get a suggested thread that's not assigned to any core or
+ * that is not the top thread in that core.
+ *
+ * 3. Third is no suggested thread is found, we do a second pass and pick a running
+ * thread in another core and swap it with its current thread.
+ *
+ * returns the cores needing scheduling.
+ */
+ [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
+
+ [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
+
+ void RotateScheduledQueue(s32 core_id, s32 priority);
+
+ void Schedule() {
+ ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
+ this->ScheduleImpl();
+ }
+
+ /// Switches the CPU's active thread context to that of the specified thread
+ void ScheduleImpl();
+
+ /// When a thread wakes up, it must run this through it's new scheduler
+ void SwitchContextStep2();
+
+ /**
+ * Called on every context switch to update the internal timestamp
+ * This also updates the running time ticks for the given thread and
+ * process using the following difference:
+ *
+ * ticks += most_recent_ticks - last_context_switch_ticks
+ *
+ * The internal tick timestamp for the scheduler is simply the
+ * most recent tick count retrieved. No special arithmetic is
+ * applied to it.
+ */
+ void UpdateLastContextSwitchTime(KThread* thread, Process* process);
+
+ static void OnSwitch(void* this_scheduler);
+ void SwitchToCurrent();
+
+ KThread* prev_thread{};
+ std::atomic<KThread*> current_thread{};
+
+ KThread* idle_thread;
+
+ std::shared_ptr<Common::Fiber> switch_fiber{};
+
+ struct SchedulingState {
+ std::atomic<bool> needs_scheduling;
+ bool interrupt_task_thread_runnable{};
+ bool should_count_idle{};
+ u64 idle_count{};
+ KThread* highest_priority_thread{};
+ void* idle_thread_stack{};
+ };
+
+ SchedulingState state;
+
+ Core::System& system;
+ u64 last_context_switch_time{};
+ const s32 core_id;
+
+ Common::SpinLock guard{};
+};
+
+class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
+public:
+ explicit KScopedSchedulerLock(KernelCore& kernel);
+ ~KScopedSchedulerLock();
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
new file mode 100644
index 000000000..169455d18
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -0,0 +1,75 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/assert.h"
+#include "common/spin_lock.h"
+#include "core/hardware_properties.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+
+namespace Kernel {
+
+class KernelCore;
+
+template <typename SchedulerType>
+class KAbstractSchedulerLock {
+public:
+ explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}
+
+ bool IsLockedByCurrentThread() const {
+ return owner_thread == GetCurrentThreadPointer(kernel);
+ }
+
+ void Lock() {
+ if (IsLockedByCurrentThread()) {
+ // If we already own the lock, we can just increment the count.
+ ASSERT(lock_count > 0);
+ lock_count++;
+ } else {
+ // Otherwise, we want to disable scheduling and acquire the spinlock.
+ SchedulerType::DisableScheduling(kernel);
+ spin_lock.lock();
+
+ // For debug, ensure that our state is valid.
+ ASSERT(lock_count == 0);
+ ASSERT(owner_thread == nullptr);
+
+ // Increment count, take ownership.
+ lock_count = 1;
+ owner_thread = GetCurrentThreadPointer(kernel);
+ }
+ }
+
+ void Unlock() {
+ ASSERT(IsLockedByCurrentThread());
+ ASSERT(lock_count > 0);
+
+ // Release an instance of the lock.
+ if ((--lock_count) == 0) {
+ // We're no longer going to hold the lock. Take note of what cores need scheduling.
+ const u64 cores_needing_scheduling =
+ SchedulerType::UpdateHighestPriorityThreads(kernel);
+
+ // Note that we no longer hold the lock, and unlock the spinlock.
+ owner_thread = nullptr;
+ spin_lock.unlock();
+
+ // Enable scheduling, and perform a rescheduling operation.
+ SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
+ }
+ }
+
+private:
+ KernelCore& kernel;
+ Common::SpinLock spin_lock{};
+ s32 lock_count{};
+ KThread* owner_thread{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h
new file mode 100644
index 000000000..d7cc557b2
--- /dev/null
+++ b/src/core/hle/kernel/k_scoped_lock.h
@@ -0,0 +1,41 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Kernel {
+
+template <typename T>
+concept KLockable = !std::is_reference_v<T> && requires(T & t) {
+ { t.Lock() }
+ ->std::same_as<void>;
+ { t.Unlock() }
+ ->std::same_as<void>;
+};
+
+template <typename T>
+requires KLockable<T> class KScopedLock {
+public:
+ explicit KScopedLock(T* l) : lock_ptr(l) {
+ this->lock_ptr->Lock();
+ }
+ explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */
+ }
+ ~KScopedLock() {
+ this->lock_ptr->Unlock();
+ }
+
+ KScopedLock(const KScopedLock&) = delete;
+ KScopedLock(KScopedLock&&) = delete;
+
+private:
+ T* lock_ptr;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_resource_reservation.h b/src/core/hle/kernel/k_scoped_resource_reservation.h
new file mode 100644
index 000000000..c5deca00b
--- /dev/null
+++ b/src/core/hle/kernel/k_scoped_resource_reservation.h
@@ -0,0 +1,67 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/process.h"
+
+namespace Kernel {
+
+class KScopedResourceReservation {
+public:
+ explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r,
+ s64 v, s64 timeout)
+ : resource_limit(std::move(l)), value(v), resource(r) {
+ if (resource_limit && value) {
+ success = resource_limit->Reserve(resource, value, timeout);
+ } else {
+ success = true;
+ }
+ }
+
+ explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r,
+ s64 v = 1)
+ : resource_limit(std::move(l)), value(v), resource(r) {
+ if (resource_limit && value) {
+ success = resource_limit->Reserve(resource, value);
+ } else {
+ success = true;
+ }
+ }
+
+ explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v, s64 t)
+ : KScopedResourceReservation(p->GetResourceLimit(), r, v, t) {}
+
+ explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v = 1)
+ : KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
+
+ ~KScopedResourceReservation() noexcept {
+ if (resource_limit && value && success) {
+ // resource was not committed, release the reservation.
+ resource_limit->Release(resource, value);
+ }
+ }
+
+ /// Commit the resource reservation, destruction of this object does not release the resource
+ void Commit() {
+ resource_limit = nullptr;
+ }
+
+ [[nodiscard]] bool Succeeded() const {
+ return success;
+ }
+
+private:
+ std::shared_ptr<KResourceLimit> resource_limit;
+ s64 value;
+ LimitableResource resource;
+ bool success;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
new file mode 100644
index 000000000..f8189e107
--- /dev/null
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -0,0 +1,46 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/time_manager.h"
+
+namespace Kernel {
+
+class KScopedSchedulerLockAndSleep {
+public:
+ explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KThread* t, s64 timeout)
+ : kernel(kernel), thread(t), timeout_tick(timeout) {
+ // Lock the scheduler.
+ kernel.GlobalSchedulerContext().scheduler_lock.Lock();
+ }
+
+ ~KScopedSchedulerLockAndSleep() {
+ // Register the sleep.
+ if (timeout_tick > 0) {
+ kernel.TimeManager().ScheduleTimeEvent(thread, timeout_tick);
+ }
+
+ // Unlock the scheduler.
+ kernel.GlobalSchedulerContext().scheduler_lock.Unlock();
+ }
+
+ void CancelSleep() {
+ timeout_tick = 0;
+ }
+
+private:
+ KernelCore& kernel;
+ KThread* thread{};
+ s64 timeout_tick{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
new file mode 100644
index 000000000..82f72a0fe
--- /dev/null
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -0,0 +1,171 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+
+ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
+ KSynchronizationObject** objects, const s32 num_objects,
+ s64 timeout) {
+ // Allocate space on stack for thread nodes.
+ std::vector<ThreadListNode> thread_nodes(num_objects);
+
+ // Prepare for wait.
+ KThread* thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ {
+ // Setup the scheduling lock and sleep.
+ KScopedSchedulerLockAndSleep slp{kernel, thread, timeout};
+
+ // Check if any of the objects are already signaled.
+ for (auto i = 0; i < num_objects; ++i) {
+ ASSERT(objects[i] != nullptr);
+
+ if (objects[i]->IsSignaled()) {
+ *out_index = i;
+ slp.CancelSleep();
+ return RESULT_SUCCESS;
+ }
+ }
+
+ // Check if the timeout is zero.
+ if (timeout == 0) {
+ slp.CancelSleep();
+ return ResultTimedOut;
+ }
+
+ // Check if the thread should terminate.
+ if (thread->IsTerminationRequested()) {
+ slp.CancelSleep();
+ return ResultTerminationRequested;
+ }
+
+ // Check if waiting was canceled.
+ if (thread->IsWaitCancelled()) {
+ slp.CancelSleep();
+ thread->ClearWaitCancelled();
+ return ResultCancelled;
+ }
+
+ // Add the waiters.
+ for (auto i = 0; i < num_objects; ++i) {
+ thread_nodes[i].thread = thread;
+ thread_nodes[i].next = nullptr;
+
+ if (objects[i]->thread_list_tail == nullptr) {
+ objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
+ } else {
+ objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
+ }
+
+ objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
+ }
+
+ // For debugging only
+ thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
+
+ // Mark the thread as waiting.
+ thread->SetCancellable();
+ thread->SetSyncedObject(nullptr, ResultTimedOut);
+ thread->SetState(ThreadState::Waiting);
+ thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
+ }
+
+ // The lock/sleep is done, so we should be able to get our result.
+
+ // Thread is no longer cancellable.
+ thread->ClearCancellable();
+
+ // For debugging only
+ thread->SetWaitObjectsForDebugging({});
+
+ // Cancel the timer as needed.
+ kernel.TimeManager().UnscheduleTimeEvent(thread);
+
+ // Get the wait result.
+ ResultCode wait_result{RESULT_SUCCESS};
+ s32 sync_index = -1;
+ {
+ KScopedSchedulerLock lock(kernel);
+ KSynchronizationObject* synced_obj;
+ wait_result = thread->GetWaitResult(std::addressof(synced_obj));
+
+ for (auto i = 0; i < num_objects; ++i) {
+ // Unlink the object from the list.
+ ThreadListNode* prev_ptr =
+ reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
+ ThreadListNode* prev_val = nullptr;
+ ThreadListNode *prev, *tail_prev;
+
+ do {
+ prev = prev_ptr;
+ prev_ptr = prev_ptr->next;
+ tail_prev = prev_val;
+ prev_val = prev_ptr;
+ } while (prev_ptr != std::addressof(thread_nodes[i]));
+
+ if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
+ objects[i]->thread_list_tail = tail_prev;
+ }
+
+ prev->next = thread_nodes[i].next;
+
+ if (objects[i] == synced_obj) {
+ sync_index = i;
+ }
+ }
+ }
+
+ // Set output.
+ *out_index = sync_index;
+ return wait_result;
+}
+
+KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {}
+
+KSynchronizationObject::KSynchronizationObject(KernelCore& kernel, std::string&& name)
+ : Object{kernel, std::move(name)} {}
+
+KSynchronizationObject::~KSynchronizationObject() = default;
+
+void KSynchronizationObject::NotifyAvailable(ResultCode result) {
+ KScopedSchedulerLock lock(kernel);
+
+ // If we're not signaled, we've nothing to notify.
+ if (!this->IsSignaled()) {
+ return;
+ }
+
+ // Iterate over each thread.
+ for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
+ KThread* thread = cur_node->thread;
+ if (thread->GetState() == ThreadState::Waiting) {
+ thread->SetSyncedObject(this, result);
+ thread->SetState(ThreadState::Runnable);
+ }
+ }
+}
+
+std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
+ std::vector<KThread*> threads;
+
+ // If debugging, dump the list of waiters.
+ {
+ KScopedSchedulerLock lock(kernel);
+ for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
+ threads.emplace_back(cur_node->thread);
+ }
+ }
+
+ return threads;
+}
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
new file mode 100644
index 000000000..5803718fd
--- /dev/null
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -0,0 +1,59 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <vector>
+
+#include "core/hle/kernel/object.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KernelCore;
+class Synchronization;
+class KThread;
+
+/// Class that represents a Kernel object that a thread can be waiting on
+class KSynchronizationObject : public Object {
+public:
+ struct ThreadListNode {
+ ThreadListNode* next{};
+ KThread* thread{};
+ };
+
+ [[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
+ KSynchronizationObject** objects, const s32 num_objects,
+ s64 timeout);
+
+ [[nodiscard]] virtual bool IsSignaled() const = 0;
+
+ [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
+
+protected:
+ explicit KSynchronizationObject(KernelCore& kernel);
+ explicit KSynchronizationObject(KernelCore& kernel, std::string&& name);
+ virtual ~KSynchronizationObject();
+
+ void NotifyAvailable(ResultCode result);
+ void NotifyAvailable() {
+ return this->NotifyAvailable(RESULT_SUCCESS);
+ }
+
+private:
+ ThreadListNode* thread_list_head{};
+ ThreadListNode* thread_list_tail{};
+};
+
+// Specialization of DynamicObjectCast for KSynchronizationObjects
+template <>
+inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
+ std::shared_ptr<Object> object) {
+ if (object != nullptr && object->IsWaitable()) {
+ return std::static_pointer_cast<KSynchronizationObject>(object);
+ }
+ return nullptr;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
new file mode 100644
index 000000000..e5620da5a
--- /dev/null
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -0,0 +1,1048 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <cinttypes>
+#include <optional>
+#include <vector>
+
+#include "common/assert.h"
+#include "common/bit_util.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/fiber.h"
+#include "common/logging/log.h"
+#include "common/scope_exit.h"
+#include "common/thread_queue_list.h"
+#include "core/core.h"
+#include "core/cpu_manager.h"
+#include "core/hardware_properties.h"
+#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_condition_variable.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_thread_queue.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/memory/memory_layout.h"
+#include "core/hle/kernel/object.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/time_manager.h"
+#include "core/hle/result.h"
+#include "core/memory.h"
+
+#ifdef ARCHITECTURE_x86_64
+#include "core/arm/dynarmic/arm_dynarmic_32.h"
+#include "core/arm/dynarmic/arm_dynarmic_64.h"
+#endif
+
+namespace {
+static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
+ u32 entry_point, u32 arg) {
+ context = {};
+ context.cpu_registers[0] = arg;
+ context.cpu_registers[15] = entry_point;
+ context.cpu_registers[13] = stack_top;
+}
+
+static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
+ VAddr entry_point, u64 arg) {
+ context = {};
+ context.cpu_registers[0] = arg;
+ context.pc = entry_point;
+ context.sp = stack_top;
+ // TODO(merry): Perform a hardware test to determine the below value.
+ context.fpcr = 0;
+}
+} // namespace
+
+namespace Kernel {
+
+KThread::KThread(KernelCore& kernel)
+ : KSynchronizationObject{kernel}, activity_pause_lock{kernel} {}
+KThread::~KThread() = default;
+
+ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
+ s32 virt_core, Process* owner, ThreadType type) {
+ // Assert parameters are valid.
+ ASSERT((type == ThreadType::Main) ||
+ (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
+ ASSERT((owner != nullptr) || (type != ThreadType::User));
+ ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>()));
+
+ // Convert the virtual core to a physical core.
+ const s32 phys_core = Core::Hardware::VirtualToPhysicalCoreMap[virt_core];
+ ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+
+ // First, clear the TLS address.
+ tls_address = {};
+
+ // Next, assert things based on the type.
+ switch (type) {
+ case ThreadType::Main:
+ ASSERT(arg == 0);
+ [[fallthrough]];
+ case ThreadType::HighPriority:
+ [[fallthrough]];
+ case ThreadType::User:
+ ASSERT(((owner == nullptr) ||
+ (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
+ ASSERT(((owner == nullptr) ||
+ (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
+ break;
+ case ThreadType::Kernel:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
+ break;
+ }
+ thread_type_for_debugging = type;
+
+ // Set the ideal core ID and affinity mask.
+ virtual_ideal_core_id = virt_core;
+ physical_ideal_core_id = phys_core;
+ virtual_affinity_mask = 1ULL << virt_core;
+ physical_affinity_mask.SetAffinity(phys_core, true);
+
+ // Set the thread state.
+ thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized;
+
+ // Set TLS address.
+ tls_address = 0;
+
+ // Set parent and condvar tree.
+ parent = nullptr;
+ condvar_tree = nullptr;
+
+ // Set sync booleans.
+ signaled = false;
+ termination_requested = false;
+ wait_cancelled = false;
+ cancellable = false;
+
+ // Set core ID and wait result.
+ core_id = phys_core;
+ wait_result = ResultNoSynchronizationObject;
+
+ // Set priorities.
+ priority = prio;
+ base_priority = prio;
+
+ // Set sync object and waiting lock to null.
+ synced_object = nullptr;
+
+ // Initialize sleeping queue.
+ sleeping_queue = nullptr;
+
+ // Set suspend flags.
+ suspend_request_flags = 0;
+ suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask);
+
+ // We're neither debug attached, nor are we nesting our priority inheritance.
+ debug_attached = false;
+ priority_inheritance_count = 0;
+
+ // We haven't been scheduled, and we have done no light IPC.
+ schedule_count = -1;
+ last_scheduled_tick = 0;
+ light_ipc_data = nullptr;
+
+ // We're not waiting for a lock, and we haven't disabled migration.
+ lock_owner = nullptr;
+ num_core_migration_disables = 0;
+
+ // We have no waiters, but we do have an entrypoint.
+ num_kernel_waiters = 0;
+
+ // Set our current core id.
+ current_core_id = phys_core;
+
+ // We haven't released our resource limit hint, and we've spent no time on the cpu.
+ resource_limit_release_hint = false;
+ cpu_time = 0;
+
+ // Clear our stack parameters.
+ std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0,
+ sizeof(StackParameters));
+
+ // Setup the TLS, if needed.
+ if (type == ThreadType::User) {
+ tls_address = owner->CreateTLSRegion();
+ }
+
+ // Set parent, if relevant.
+ if (owner != nullptr) {
+ parent = owner;
+ parent->IncrementThreadCount();
+ }
+
+ // Initialize thread context.
+ ResetThreadContext64(thread_context_64, user_stack_top, func, arg);
+ ResetThreadContext32(thread_context_32, static_cast<u32>(user_stack_top),
+ static_cast<u32>(func), static_cast<u32>(arg));
+
+ // Setup the stack parameters.
+ StackParameters& sp = GetStackParameters();
+ sp.cur_thread = this;
+ sp.disable_count = 1;
+ SetInExceptionHandler();
+
+ // Set thread ID.
+ thread_id = kernel.CreateNewThreadID();
+
+ // We initialized!
+ initialized = true;
+
+ // Register ourselves with our parent process.
+ if (parent != nullptr) {
+ parent->RegisterThread(this);
+ if (parent->IsSuspended()) {
+ RequestSuspend(SuspendType::Process);
+ }
+ }
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
+ VAddr user_stack_top, s32 prio, s32 core, Process* owner,
+ ThreadType type) {
+ // Initialize the thread.
+ R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
+
+ return RESULT_SUCCESS;
+}
+
+void KThread::Finalize() {
+ // If the thread has an owner process, unregister it.
+ if (parent != nullptr) {
+ parent->UnregisterThread(this);
+ }
+
+ // If the thread has a local region, delete it.
+ if (tls_address != 0) {
+ parent->FreeTLSRegion(tls_address);
+ }
+
+ // Release any waiters.
+ {
+ ASSERT(lock_owner == nullptr);
+ KScopedSchedulerLock sl{kernel};
+
+ auto it = waiter_list.begin();
+ while (it != waiter_list.end()) {
+ // The thread shouldn't be a kernel waiter.
+ it->SetLockOwner(nullptr);
+ it->SetSyncedObject(nullptr, ResultInvalidState);
+ it->Wakeup();
+ it = waiter_list.erase(it);
+ }
+ }
+
+ // Decrement the parent process's thread count.
+ if (parent != nullptr) {
+ parent->DecrementThreadCount();
+ parent->GetResourceLimit()->Release(LimitableResource::Threads, 1);
+ }
+}
+
+bool KThread::IsSignaled() const {
+ return signaled;
+}
+
+void KThread::Wakeup() {
+ KScopedSchedulerLock sl{kernel};
+
+ if (GetState() == ThreadState::Waiting) {
+ if (sleeping_queue != nullptr) {
+ sleeping_queue->WakeupThread(this);
+ } else {
+ SetState(ThreadState::Runnable);
+ }
+ }
+}
+
+void KThread::StartTermination() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Release user exception and unpin, if relevant.
+ if (parent != nullptr) {
+ parent->ReleaseUserException(this);
+ if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) {
+ parent->UnpinCurrentThread();
+ }
+ }
+
+ // Set state to terminated.
+ SetState(ThreadState::Terminated);
+
+ // Clear the thread's status as running in parent.
+ if (parent != nullptr) {
+ parent->ClearRunningThread(this);
+ }
+
+ // Signal.
+ signaled = true;
+ NotifyAvailable();
+
+ // Clear previous thread in KScheduler.
+ KScheduler::ClearPreviousThread(kernel, this);
+
+ // Register terminated dpc flag.
+ RegisterDpc(DpcFlag::Terminated);
+}
+
+void KThread::Pin() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Set ourselves as pinned.
+ GetStackParameters().is_pinned = true;
+
+ // Disable core migration.
+ ASSERT(num_core_migration_disables == 0);
+ {
+ ++num_core_migration_disables;
+
+ // Save our ideal state to restore when we're unpinned.
+ original_physical_ideal_core_id = physical_ideal_core_id;
+ original_physical_affinity_mask = physical_affinity_mask;
+
+ // Bind ourselves to this core.
+ const s32 active_core = GetActiveCore();
+ const s32 current_core = GetCurrentCoreId(kernel);
+
+ SetActiveCore(current_core);
+ physical_ideal_core_id = current_core;
+ physical_affinity_mask.SetAffinityMask(1ULL << current_core);
+
+ if (active_core != current_core || physical_affinity_mask.GetAffinityMask() !=
+ original_physical_affinity_mask.GetAffinityMask()) {
+ KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask,
+ active_core);
+ }
+ }
+
+ // Disallow performing thread suspension.
+ {
+ // Update our allow flags.
+ suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) +
+ static_cast<u32>(ThreadState::SuspendShift)));
+
+ // Update our state.
+ const ThreadState old_state = thread_state;
+ thread_state = static_cast<ThreadState>(GetSuspendFlags() |
+ static_cast<u32>(old_state & ThreadState::Mask));
+ if (thread_state != old_state) {
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ }
+ }
+
+ // TODO(bunnei): Update our SVC access permissions.
+ ASSERT(parent != nullptr);
+}
+
+void KThread::Unpin() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Set ourselves as unpinned.
+ GetStackParameters().is_pinned = false;
+
+ // Enable core migration.
+ ASSERT(num_core_migration_disables == 1);
+ {
+ num_core_migration_disables--;
+
+ // Restore our original state.
+ const KAffinityMask old_mask = physical_affinity_mask;
+
+ physical_ideal_core_id = original_physical_ideal_core_id;
+ physical_affinity_mask = original_physical_affinity_mask;
+
+ if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
+ const s32 active_core = GetActiveCore();
+
+ if (!physical_affinity_mask.GetAffinity(active_core)) {
+ if (physical_ideal_core_id >= 0) {
+ SetActiveCore(physical_ideal_core_id);
+ } else {
+ SetActiveCore(static_cast<s32>(
+ Common::BitSize<u64>() - 1 -
+ std::countl_zero(physical_affinity_mask.GetAffinityMask())));
+ }
+ }
+ KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
+ }
+ }
+
+ // Allow performing thread suspension (if termination hasn't been requested).
+ {
+ // Update our allow flags.
+ if (!IsTerminationRequested()) {
+ suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
+ static_cast<u32>(ThreadState::SuspendShift)));
+ }
+
+ // Update our state.
+ const ThreadState old_state = thread_state;
+ thread_state = static_cast<ThreadState>(GetSuspendFlags() |
+ static_cast<u32>(old_state & ThreadState::Mask));
+ if (thread_state != old_state) {
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ }
+ }
+
+ // TODO(bunnei): Update our SVC access permissions.
+ ASSERT(parent != nullptr);
+
+ // Resume any threads that began waiting on us while we were pinned.
+ for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) {
+ if (it->GetState() == ThreadState::Waiting) {
+ it->SetState(ThreadState::Runnable);
+ }
+ }
+}
+
+ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Get the virtual mask.
+ *out_ideal_core = virtual_ideal_core_id;
+ *out_affinity_mask = virtual_affinity_mask;
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
+ KScopedSchedulerLock sl{kernel};
+ ASSERT(num_core_migration_disables >= 0);
+
+ // Select between core mask and original core mask.
+ if (num_core_migration_disables == 0) {
+ *out_ideal_core = physical_ideal_core_id;
+ *out_affinity_mask = physical_affinity_mask.GetAffinityMask();
+ } else {
+ *out_ideal_core = original_physical_ideal_core_id;
+ *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
+ }
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
+ ASSERT(parent != nullptr);
+ ASSERT(v_affinity_mask != 0);
+ KScopedLightLock lk{activity_pause_lock};
+
+ // Set the core mask.
+ u64 p_affinity_mask = 0;
+ {
+ KScopedSchedulerLock sl{kernel};
+ ASSERT(num_core_migration_disables >= 0);
+
+ // If the core id is no-update magic, preserve the ideal core id.
+ if (core_id == Svc::IdealCoreNoUpdate) {
+ core_id = virtual_ideal_core_id;
+ R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, ResultInvalidCombination);
+ }
+
+ // Set the virtual core/affinity mask.
+ virtual_ideal_core_id = core_id;
+ virtual_affinity_mask = v_affinity_mask;
+
+ // Translate the virtual core to a physical core.
+ if (core_id >= 0) {
+ core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id];
+ }
+
+ // Translate the virtual affinity mask to a physical one.
+ while (v_affinity_mask != 0) {
+ const u64 next = std::countr_zero(v_affinity_mask);
+ v_affinity_mask &= ~(1ULL << next);
+ p_affinity_mask |= (1ULL << Core::Hardware::VirtualToPhysicalCoreMap[next]);
+ }
+
+ // If we haven't disabled migration, perform an affinity change.
+ if (num_core_migration_disables == 0) {
+ const KAffinityMask old_mask = physical_affinity_mask;
+
+ // Set our new ideals.
+ physical_ideal_core_id = core_id;
+ physical_affinity_mask.SetAffinityMask(p_affinity_mask);
+
+ if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
+ const s32 active_core = GetActiveCore();
+
+ if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) {
+ const s32 new_core = static_cast<s32>(
+ physical_ideal_core_id >= 0
+ ? physical_ideal_core_id
+ : Common::BitSize<u64>() - 1 -
+ std::countl_zero(physical_affinity_mask.GetAffinityMask()));
+ SetActiveCore(new_core);
+ }
+ KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
+ }
+ } else {
+ // Otherwise, we edit the original affinity for restoration later.
+ original_physical_ideal_core_id = core_id;
+ original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
+ }
+ }
+
+ // Update the pinned waiter list.
+ {
+ bool retry_update{};
+ bool thread_is_pinned{};
+ do {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Don't do any further management if our termination has been requested.
+ R_SUCCEED_IF(IsTerminationRequested());
+
+ // By default, we won't need to retry.
+ retry_update = false;
+
+ // Check if the thread is currently running.
+ bool thread_is_current{};
+ s32 thread_core;
+ for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
+ ++thread_core) {
+ if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
+ thread_is_current = true;
+ break;
+ }
+ }
+
+ // If the thread is currently running, check whether it's no longer allowed under the
+ // new mask.
+ if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) {
+ // If the thread is pinned, we want to wait until it's not pinned.
+ if (GetStackParameters().is_pinned) {
+ // Verify that the current thread isn't terminating.
+ R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
+ ResultTerminationRequested);
+
+ // Note that the thread was pinned.
+ thread_is_pinned = true;
+
+ // Wait until the thread isn't pinned any more.
+ pinned_waiter_list.push_back(GetCurrentThread(kernel));
+ GetCurrentThread(kernel).SetState(ThreadState::Waiting);
+ } else {
+ // If the thread isn't pinned, release the scheduler lock and retry until it's
+ // not current.
+ retry_update = true;
+ }
+ }
+ } while (retry_update);
+
+ // If the thread was pinned, it no longer is, and we should remove the current thread from
+ // our waiter list.
+ if (thread_is_pinned) {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Remove from the list.
+ pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
+ }
+ }
+
+ return RESULT_SUCCESS;
+}
+
+void KThread::SetBasePriority(s32 value) {
+ ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
+
+ KScopedSchedulerLock sl{kernel};
+
+ // Change our base priority.
+ base_priority = value;
+
+ // Perform a priority restoration.
+ RestorePriority(kernel, this);
+}
+
+void KThread::RequestSuspend(SuspendType type) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Note the request in our flags.
+ suspend_request_flags |=
+ (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
+
+ // Try to perform the suspend.
+ TrySuspend();
+}
+
+void KThread::Resume(SuspendType type) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Clear the request in our flags.
+ suspend_request_flags &=
+ ~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
+
+ // Update our state.
+ const ThreadState old_state = thread_state;
+ thread_state = static_cast<ThreadState>(GetSuspendFlags() |
+ static_cast<u32>(old_state & ThreadState::Mask));
+ if (thread_state != old_state) {
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ }
+}
+
+void KThread::WaitCancel() {
+ KScopedSchedulerLock sl{kernel};
+
+ // Check if we're waiting and cancellable.
+ if (GetState() == ThreadState::Waiting && cancellable) {
+ if (sleeping_queue != nullptr) {
+ sleeping_queue->WakeupThread(this);
+ wait_cancelled = true;
+ } else {
+ SetSyncedObject(nullptr, ResultCancelled);
+ SetState(ThreadState::Runnable);
+ wait_cancelled = false;
+ }
+ } else {
+ // Otherwise, note that we cancelled a wait.
+ wait_cancelled = true;
+ }
+}
+
+void KThread::TrySuspend() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSuspendRequested());
+
+ // Ensure that we have no waiters.
+ if (GetNumKernelWaiters() > 0) {
+ return;
+ }
+ ASSERT(GetNumKernelWaiters() == 0);
+
+ // Perform the suspend.
+ Suspend();
+}
+
+void KThread::Suspend() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSuspendRequested());
+
+ // Set our suspend flags in state.
+ const auto old_state = thread_state;
+ thread_state = static_cast<ThreadState>(GetSuspendFlags()) | (old_state & ThreadState::Mask);
+
+ // Note the state change in scheduler.
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+}
+
+void KThread::Continue() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Clear our suspend flags in state.
+ const auto old_state = thread_state;
+ thread_state = old_state & ThreadState::Mask;
+
+ // Note the state change in scheduler.
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+}
+
+ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
+ // Lock ourselves.
+ KScopedLightLock lk(activity_pause_lock);
+
+ // Set the activity.
+ {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Verify our state.
+ const auto cur_state = GetState();
+ R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
+ ResultInvalidState);
+
+ // Either pause or resume.
+ if (activity == Svc::ThreadActivity::Paused) {
+ // Verify that we're not suspended.
+ R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
+
+ // Suspend.
+ RequestSuspend(SuspendType::Thread);
+ } else {
+ ASSERT(activity == Svc::ThreadActivity::Runnable);
+
+ // Verify that we're suspended.
+ R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
+
+ // Resume.
+ Resume(SuspendType::Thread);
+ }
+ }
+
+ // If the thread is now paused, update the pinned waiter list.
+ if (activity == Svc::ThreadActivity::Paused) {
+ bool thread_is_pinned{};
+ bool thread_is_current{};
+ do {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Don't do any further management if our termination has been requested.
+ R_SUCCEED_IF(IsTerminationRequested());
+
+ // Check whether the thread is pinned.
+ if (GetStackParameters().is_pinned) {
+ // Verify that the current thread isn't terminating.
+ R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
+ ResultTerminationRequested);
+
+ // Note that the thread was pinned and not current.
+ thread_is_pinned = true;
+ thread_is_current = false;
+
+ // Wait until the thread isn't pinned any more.
+ pinned_waiter_list.push_back(GetCurrentThread(kernel));
+ GetCurrentThread(kernel).SetState(ThreadState::Waiting);
+ } else {
+ // Check if the thread is currently running.
+ // If it is, we'll need to retry.
+ thread_is_current = false;
+
+ for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ if (kernel.Scheduler(i).GetCurrentThread() == this) {
+ thread_is_current = true;
+ break;
+ }
+ }
+ }
+ } while (thread_is_current);
+
+ // If the thread was pinned, it no longer is, and we should remove the current thread from
+ // our waiter list.
+ if (thread_is_pinned) {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Remove from the list.
+ pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
+ }
+ }
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KThread::GetThreadContext3(std::vector<u8>& out) {
+ // Lock ourselves.
+ KScopedLightLock lk{activity_pause_lock};
+
+ // Get the context.
+ {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Verify that we're suspended.
+ R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
+
+ // If we're not terminating, get the thread's user context.
+ if (!IsTerminationRequested()) {
+ if (parent->Is64BitProcess()) {
+ // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
+ auto context = GetContext64();
+ context.pstate &= 0xFF0FFE20;
+
+ out.resize(sizeof(context));
+ std::memcpy(out.data(), &context, sizeof(context));
+ } else {
+ // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
+ auto context = GetContext32();
+ context.cpsr &= 0xFF0FFE20;
+
+ out.resize(sizeof(context));
+ std::memcpy(out.data(), &context, sizeof(context));
+ }
+ }
+ }
+
+ return RESULT_SUCCESS;
+}
+
+void KThread::AddWaiterImpl(KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Find the right spot to insert the waiter.
+ auto it = waiter_list.begin();
+ while (it != waiter_list.end()) {
+ if (it->GetPriority() > thread->GetPriority()) {
+ break;
+ }
+ it++;
+ }
+
+ // Keep track of how many kernel waiters we have.
+ if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
+ ASSERT((num_kernel_waiters++) >= 0);
+ }
+
+ // Insert the waiter.
+ waiter_list.insert(it, *thread);
+ thread->SetLockOwner(this);
+}
+
+void KThread::RemoveWaiterImpl(KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Keep track of how many kernel waiters we have.
+ if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
+ ASSERT((num_kernel_waiters--) > 0);
+ }
+
+ // Remove the waiter.
+ waiter_list.erase(waiter_list.iterator_to(*thread));
+ thread->SetLockOwner(nullptr);
+}
+
+void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ while (true) {
+ // We want to inherit priority where possible.
+ s32 new_priority = thread->GetBasePriority();
+ if (thread->HasWaiters()) {
+ new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
+ }
+
+ // If the priority we would inherit is not different from ours, don't do anything.
+ if (new_priority == thread->GetPriority()) {
+ return;
+ }
+
+ // Ensure we don't violate condition variable red black tree invariants.
+ if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
+ BeforeUpdatePriority(kernel, cv_tree, thread);
+ }
+
+ // Change the priority.
+ const s32 old_priority = thread->GetPriority();
+ thread->SetPriority(new_priority);
+
+ // Restore the condition variable, if relevant.
+ if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
+ AfterUpdatePriority(kernel, cv_tree, thread);
+ }
+
+ // Update the scheduler.
+ KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
+
+ // Keep the lock owner up to date.
+ KThread* lock_owner = thread->GetLockOwner();
+ if (lock_owner == nullptr) {
+ return;
+ }
+
+ // Update the thread in the lock owner's sorted list, and continue inheriting.
+ lock_owner->RemoveWaiterImpl(thread);
+ lock_owner->AddWaiterImpl(thread);
+ thread = lock_owner;
+ }
+}
+
+void KThread::AddWaiter(KThread* thread) {
+ AddWaiterImpl(thread);
+ RestorePriority(kernel, this);
+}
+
+void KThread::RemoveWaiter(KThread* thread) {
+ RemoveWaiterImpl(thread);
+ RestorePriority(kernel, this);
+}
+
+KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ s32 num_waiters{};
+ KThread* next_lock_owner{};
+ auto it = waiter_list.begin();
+ while (it != waiter_list.end()) {
+ if (it->GetAddressKey() == key) {
+ KThread* thread = std::addressof(*it);
+
+ // Keep track of how many kernel waiters we have.
+ if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
+ ASSERT((num_kernel_waiters--) > 0);
+ }
+ it = waiter_list.erase(it);
+
+ // Update the next lock owner.
+ if (next_lock_owner == nullptr) {
+ next_lock_owner = thread;
+ next_lock_owner->SetLockOwner(nullptr);
+ } else {
+ next_lock_owner->AddWaiterImpl(thread);
+ }
+ num_waiters++;
+ } else {
+ it++;
+ }
+ }
+
+ // Do priority updates, if we have a next owner.
+ if (next_lock_owner) {
+ RestorePriority(kernel, this);
+ RestorePriority(kernel, next_lock_owner);
+ }
+
+ // Return output.
+ *out_num_waiters = num_waiters;
+ return next_lock_owner;
+}
+
+ResultCode KThread::Run() {
+ while (true) {
+ KScopedSchedulerLock lk{kernel};
+
+ // If either this thread or the current thread are requesting termination, note it.
+ R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested);
+ R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested);
+
+ // Ensure our thread state is correct.
+ R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState);
+
+ // If the current thread has been asked to suspend, suspend it and retry.
+ if (GetCurrentThread(kernel).IsSuspended()) {
+ GetCurrentThread(kernel).Suspend();
+ continue;
+ }
+
+ // If we're not a kernel thread and we've been asked to suspend, suspend ourselves.
+ if (IsUserThread() && IsSuspended()) {
+ Suspend();
+ }
+
+ // Set our state and finish.
+ SetState(ThreadState::Runnable);
+ return RESULT_SUCCESS;
+ }
+}
+
+void KThread::Exit() {
+ ASSERT(this == GetCurrentThreadPointer(kernel));
+
+ // Release the thread resource hint from parent.
+ if (parent != nullptr) {
+ // TODO(bunnei): Hint that the resource is about to be released.
+ resource_limit_release_hint = true;
+ }
+
+ // Perform termination.
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ // Disallow all suspension.
+ suspend_allowed_flags = 0;
+
+ // Start termination.
+ StartTermination();
+ }
+}
+
+ResultCode KThread::Sleep(s64 timeout) {
+ ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(this == GetCurrentThreadPointer(kernel));
+ ASSERT(timeout > 0);
+
+ {
+ // Setup the scheduling lock and sleep.
+ KScopedSchedulerLockAndSleep slp{kernel, this, timeout};
+
+ // Check if the thread should terminate.
+ if (IsTerminationRequested()) {
+ slp.CancelSleep();
+ return ResultTerminationRequested;
+ }
+
+ // Mark the thread as waiting.
+ SetState(ThreadState::Waiting);
+ SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
+ }
+
+ // The lock/sleep is done.
+
+ // Cancel the timer.
+ kernel.TimeManager().UnscheduleTimeEvent(this);
+
+ return RESULT_SUCCESS;
+}
+
+void KThread::SetState(ThreadState state) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Clear debugging state
+ SetMutexWaitAddressForDebugging({});
+ SetWaitReasonForDebugging({});
+
+ const ThreadState old_state = thread_state;
+ thread_state =
+ static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
+ if (thread_state != old_state) {
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ }
+}
+
+std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
+ return host_context;
+}
+
+ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
+ std::string name, VAddr entry_point,
+ u32 priority, u64 arg, s32 processor_id,
+ VAddr stack_top, Process* owner_process) {
+ std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
+ void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
+ return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
+ owner_process, std::move(init_func), init_func_parameter);
+}
+
+ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
+ std::string name, VAddr entry_point,
+ u32 priority, u64 arg, s32 processor_id,
+ VAddr stack_top, Process* owner_process,
+ std::function<void(void*)>&& thread_start_func,
+ void* thread_start_parameter) {
+ auto& kernel = system.Kernel();
+
+ std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
+
+ if (const auto result =
+ thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority,
+ processor_id, owner_process, type_flags);
+ result.IsError()) {
+ return result;
+ }
+
+ thread->name = name;
+
+ auto& scheduler = kernel.GlobalSchedulerContext();
+ scheduler.AddThread(thread);
+
+ thread->host_context =
+ std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
+
+ return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
+}
+
+KThread* GetCurrentThreadPointer(KernelCore& kernel) {
+ return kernel.GetCurrentEmuThread();
+}
+
+KThread& GetCurrentThread(KernelCore& kernel) {
+ return *GetCurrentThreadPointer(kernel);
+}
+
+s32 GetCurrentCoreId(KernelCore& kernel) {
+ return GetCurrentThread(kernel).GetCurrentCore();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
new file mode 100644
index 000000000..c8ac656a4
--- /dev/null
+++ b/src/core/hle/kernel/k_thread.h
@@ -0,0 +1,768 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <span>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <boost/intrusive/list.hpp>
+
+#include "common/common_types.h"
+#include "common/intrusive_red_black_tree.h"
+#include "common/spin_lock.h"
+#include "core/arm/arm_interface.h"
+#include "core/hle/kernel/k_affinity_mask.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/object.h"
+#include "core/hle/kernel/svc_common.h"
+#include "core/hle/kernel/svc_types.h"
+#include "core/hle/result.h"
+
+namespace Common {
+class Fiber;
+}
+
+namespace Core {
+class ARM_Interface;
+class System;
+} // namespace Core
+
+namespace Kernel {
+
+class GlobalSchedulerContext;
+class KernelCore;
+class Process;
+class KScheduler;
+class KThreadQueue;
+
+using KThreadFunction = VAddr;
+
+enum class ThreadType : u32 {
+ Main = 0,
+ Kernel = 1,
+ HighPriority = 2,
+ User = 3,
+};
+DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
+
+enum class SuspendType : u32 {
+ Process = 0,
+ Thread = 1,
+ Debug = 2,
+ Backtrace = 3,
+ Init = 4,
+
+ Count,
+};
+
+enum class ThreadState : u16 {
+ Initialized = 0,
+ Waiting = 1,
+ Runnable = 2,
+ Terminated = 3,
+
+ SuspendShift = 4,
+ Mask = (1 << SuspendShift) - 1,
+
+ ProcessSuspended = (1 << (0 + SuspendShift)),
+ ThreadSuspended = (1 << (1 + SuspendShift)),
+ DebugSuspended = (1 << (2 + SuspendShift)),
+ BacktraceSuspended = (1 << (3 + SuspendShift)),
+ InitSuspended = (1 << (4 + SuspendShift)),
+
+ SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
+};
+DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
+
+enum class DpcFlag : u32 {
+ Terminating = (1 << 0),
+ Terminated = (1 << 1),
+};
+
+enum class ThreadWaitReasonForDebugging : u32 {
+ None, ///< Thread is not waiting
+ Sleep, ///< Thread is waiting due to a SleepThread SVC
+ IPC, ///< Thread is waiting for the reply from an IPC request
+ Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC
+ ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC
+ Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC
+ Suspended, ///< Thread is waiting due to process suspension
+};
+
+[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
+[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
+[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
+
+class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
+ friend class KScheduler;
+ friend class Process;
+
+public:
+ static constexpr s32 DefaultThreadPriority = 44;
+ static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
+
+ explicit KThread(KernelCore& kernel);
+ ~KThread() override;
+
+public:
+ using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
+ using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
+ using WaiterList = boost::intrusive::list<KThread>;
+
+ /**
+ * Creates and returns a new thread. The new thread is immediately scheduled
+ * @param system The instance of the whole system
+ * @param name The friendly name desired for the thread
+ * @param entry_point The address at which the thread should start execution
+ * @param priority The thread's priority
+ * @param arg User data to pass to the thread
+ * @param processor_id The ID(s) of the processors on which the thread is desired to be run
+ * @param stack_top The address of the thread's stack top
+ * @param owner_process The parent process for the thread, if null, it's a kernel thread
+ * @return A shared pointer to the newly created thread
+ */
+ [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
+ Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
+ u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
+
+ /**
+ * Creates and returns a new thread. The new thread is immediately scheduled
+ * @param system The instance of the whole system
+ * @param name The friendly name desired for the thread
+ * @param entry_point The address at which the thread should start execution
+ * @param priority The thread's priority
+ * @param arg User data to pass to the thread
+ * @param processor_id The ID(s) of the processors on which the thread is desired to be run
+ * @param stack_top The address of the thread's stack top
+ * @param owner_process The parent process for the thread, if null, it's a kernel thread
+ * @param thread_start_func The function where the host context will start.
+ * @param thread_start_parameter The parameter which will passed to host context on init
+ * @return A shared pointer to the newly created thread
+ */
+ [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
+ Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
+ u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
+ std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
+
+ [[nodiscard]] std::string GetName() const override {
+ return name;
+ }
+
+ void SetName(std::string new_name) {
+ name = std::move(new_name);
+ }
+
+ [[nodiscard]] std::string GetTypeName() const override {
+ return "Thread";
+ }
+
+ static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
+ [[nodiscard]] HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ /**
+ * Gets the thread's current priority
+ * @return The current thread's priority
+ */
+ [[nodiscard]] s32 GetPriority() const {
+ return priority;
+ }
+
+ /**
+ * Sets the thread's current priority.
+ * @param priority The new priority.
+ */
+ void SetPriority(s32 value) {
+ priority = value;
+ }
+
+ /**
+ * Gets the thread's nominal priority.
+ * @return The current thread's nominal priority.
+ */
+ [[nodiscard]] s32 GetBasePriority() const {
+ return base_priority;
+ }
+
+ /**
+ * Gets the thread's thread ID
+ * @return The thread's ID
+ */
+ [[nodiscard]] u64 GetThreadID() const {
+ return thread_id;
+ }
+
+ void ContinueIfHasKernelWaiters() {
+ if (GetNumKernelWaiters() > 0) {
+ Continue();
+ }
+ }
+
+ void Wakeup();
+
+ void SetBasePriority(s32 value);
+
+ [[nodiscard]] ResultCode Run();
+
+ void Exit();
+
+ [[nodiscard]] u32 GetSuspendFlags() const {
+ return suspend_allowed_flags & suspend_request_flags;
+ }
+
+ [[nodiscard]] bool IsSuspended() const {
+ return GetSuspendFlags() != 0;
+ }
+
+ [[nodiscard]] bool IsSuspendRequested(SuspendType type) const {
+ return (suspend_request_flags &
+ (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) !=
+ 0;
+ }
+
+ [[nodiscard]] bool IsSuspendRequested() const {
+ return suspend_request_flags != 0;
+ }
+
+ void RequestSuspend(SuspendType type);
+
+ void Resume(SuspendType type);
+
+ void TrySuspend();
+
+ void Continue();
+
+ void Suspend();
+
+ void Finalize() override;
+
+ bool IsSignaled() const override;
+
+ void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
+ synced_object = obj;
+ wait_result = wait_res;
+ }
+
+ [[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const {
+ *out = synced_object;
+ return wait_result;
+ }
+
+ /*
+ * Returns the Thread Local Storage address of the current thread
+ * @returns VAddr of the thread's TLS
+ */
+ [[nodiscard]] VAddr GetTLSAddress() const {
+ return tls_address;
+ }
+
+ /*
+ * Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
+ * @returns The value of the TPIDR_EL0 register.
+ */
+ [[nodiscard]] u64 GetTPIDR_EL0() const {
+ return thread_context_64.tpidr;
+ }
+
+ /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
+ void SetTPIDR_EL0(u64 value) {
+ thread_context_64.tpidr = value;
+ thread_context_32.tpidr = static_cast<u32>(value);
+ }
+
+ [[nodiscard]] ThreadContext32& GetContext32() {
+ return thread_context_32;
+ }
+
+ [[nodiscard]] const ThreadContext32& GetContext32() const {
+ return thread_context_32;
+ }
+
+ [[nodiscard]] ThreadContext64& GetContext64() {
+ return thread_context_64;
+ }
+
+ [[nodiscard]] const ThreadContext64& GetContext64() const {
+ return thread_context_64;
+ }
+
+ [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
+
+ [[nodiscard]] ThreadState GetState() const {
+ return thread_state & ThreadState::Mask;
+ }
+
+ [[nodiscard]] ThreadState GetRawState() const {
+ return thread_state;
+ }
+
+ void SetState(ThreadState state);
+
+ [[nodiscard]] s64 GetLastScheduledTick() const {
+ return last_scheduled_tick;
+ }
+
+ void SetLastScheduledTick(s64 tick) {
+ last_scheduled_tick = tick;
+ }
+
+ void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) {
+ cpu_time += amount;
+ // TODO(bunnei): Debug kernels track per-core tick counts. Should we?
+ }
+
+ [[nodiscard]] s64 GetCpuTime() const {
+ return cpu_time;
+ }
+
+ [[nodiscard]] s32 GetActiveCore() const {
+ return core_id;
+ }
+
+ void SetActiveCore(s32 core) {
+ core_id = core;
+ }
+
+ [[nodiscard]] s32 GetCurrentCore() const {
+ return current_core_id;
+ }
+
+ void SetCurrentCore(s32 core) {
+ current_core_id = core;
+ }
+
+ [[nodiscard]] Process* GetOwnerProcess() {
+ return parent;
+ }
+
+ [[nodiscard]] const Process* GetOwnerProcess() const {
+ return parent;
+ }
+
+ [[nodiscard]] bool IsUserThread() const {
+ return parent != nullptr;
+ }
+
+ [[nodiscard]] KThread* GetLockOwner() const {
+ return lock_owner;
+ }
+
+ void SetLockOwner(KThread* owner) {
+ lock_owner = owner;
+ }
+
+ [[nodiscard]] const KAffinityMask& GetAffinityMask() const {
+ return physical_affinity_mask;
+ }
+
+ [[nodiscard]] ResultCode GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+
+ [[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+
+ [[nodiscard]] ResultCode SetCoreMask(s32 core_id, u64 v_affinity_mask);
+
+ [[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity);
+
+ [[nodiscard]] ResultCode Sleep(s64 timeout);
+
+ [[nodiscard]] s64 GetYieldScheduleCount() const {
+ return schedule_count;
+ }
+
+ void SetYieldScheduleCount(s64 count) {
+ schedule_count = count;
+ }
+
+ void WaitCancel();
+
+ [[nodiscard]] bool IsWaitCancelled() const {
+ return wait_cancelled;
+ }
+
+ [[nodiscard]] void ClearWaitCancelled() {
+ wait_cancelled = false;
+ }
+
+ [[nodiscard]] bool IsCancellable() const {
+ return cancellable;
+ }
+
+ void SetCancellable() {
+ cancellable = true;
+ }
+
+ void ClearCancellable() {
+ cancellable = false;
+ }
+
+ [[nodiscard]] bool IsTerminationRequested() const {
+ return termination_requested || GetRawState() == ThreadState::Terminated;
+ }
+
+ struct StackParameters {
+ u8 svc_permission[0x10];
+ std::atomic<u8> dpc_flags;
+ u8 current_svc_id;
+ bool is_calling_svc;
+ bool is_in_exception_handler;
+ bool is_pinned;
+ s32 disable_count;
+ KThread* cur_thread;
+ };
+
+ [[nodiscard]] StackParameters& GetStackParameters() {
+ return stack_parameters;
+ }
+
+ [[nodiscard]] const StackParameters& GetStackParameters() const {
+ return stack_parameters;
+ }
+
+ class QueueEntry {
+ public:
+ constexpr QueueEntry() = default;
+
+ constexpr void Initialize() {
+ prev = nullptr;
+ next = nullptr;
+ }
+
+ constexpr KThread* GetPrev() const {
+ return prev;
+ }
+ constexpr KThread* GetNext() const {
+ return next;
+ }
+ constexpr void SetPrev(KThread* thread) {
+ prev = thread;
+ }
+ constexpr void SetNext(KThread* thread) {
+ next = thread;
+ }
+
+ private:
+ KThread* prev{};
+ KThread* next{};
+ };
+
+ [[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) {
+ return per_core_priority_queue_entry[core];
+ }
+
+ [[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const {
+ return per_core_priority_queue_entry[core];
+ }
+
+ void SetSleepingQueue(KThreadQueue* q) {
+ sleeping_queue = q;
+ }
+
+ [[nodiscard]] s32 GetDisableDispatchCount() const {
+ return this->GetStackParameters().disable_count;
+ }
+
+ void DisableDispatch() {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
+ this->GetStackParameters().disable_count++;
+ }
+
+ void EnableDispatch() {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
+ this->GetStackParameters().disable_count--;
+ }
+
+ void Pin();
+
+ void Unpin();
+
+ void SetInExceptionHandler() {
+ this->GetStackParameters().is_in_exception_handler = true;
+ }
+
+ void ClearInExceptionHandler() {
+ this->GetStackParameters().is_in_exception_handler = false;
+ }
+
+ [[nodiscard]] bool IsInExceptionHandler() const {
+ return this->GetStackParameters().is_in_exception_handler;
+ }
+
+ void SetIsCallingSvc() {
+ this->GetStackParameters().is_calling_svc = true;
+ }
+
+ void ClearIsCallingSvc() {
+ this->GetStackParameters().is_calling_svc = false;
+ }
+
+ [[nodiscard]] bool IsCallingSvc() const {
+ return this->GetStackParameters().is_calling_svc;
+ }
+
+ [[nodiscard]] u8 GetSvcId() const {
+ return this->GetStackParameters().current_svc_id;
+ }
+
+ void RegisterDpc(DpcFlag flag) {
+ this->GetStackParameters().dpc_flags |= static_cast<u8>(flag);
+ }
+
+ void ClearDpc(DpcFlag flag) {
+ this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag);
+ }
+
+ [[nodiscard]] u8 GetDpc() const {
+ return this->GetStackParameters().dpc_flags;
+ }
+
+ [[nodiscard]] bool HasDpc() const {
+ return this->GetDpc() != 0;
+ }
+
+ void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
+ wait_reason_for_debugging = reason;
+ }
+
+ [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
+ return wait_reason_for_debugging;
+ }
+
+ [[nodiscard]] ThreadType GetThreadTypeForDebugging() const {
+ return thread_type_for_debugging;
+ }
+
+ void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
+ wait_objects_for_debugging.clear();
+ wait_objects_for_debugging.reserve(objects.size());
+ for (const auto& object : objects) {
+ wait_objects_for_debugging.emplace_back(object);
+ }
+ }
+
+ [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
+ return wait_objects_for_debugging;
+ }
+
+ void SetMutexWaitAddressForDebugging(VAddr address) {
+ mutex_wait_address_for_debugging = address;
+ }
+
+ [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
+ return mutex_wait_address_for_debugging;
+ }
+
+ [[nodiscard]] s32 GetIdealCoreForDebugging() const {
+ return virtual_ideal_core_id;
+ }
+
+ void AddWaiter(KThread* thread);
+
+ void RemoveWaiter(KThread* thread);
+
+ [[nodiscard]] ResultCode GetThreadContext3(std::vector<u8>& out);
+
+ [[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
+
+ [[nodiscard]] VAddr GetAddressKey() const {
+ return address_key;
+ }
+
+ [[nodiscard]] u32 GetAddressKeyValue() const {
+ return address_key_value;
+ }
+
+ void SetAddressKey(VAddr key) {
+ address_key = key;
+ }
+
+ void SetAddressKey(VAddr key, u32 val) {
+ address_key = key;
+ address_key_value = val;
+ }
+
+ [[nodiscard]] bool HasWaiters() const {
+ return !waiter_list.empty();
+ }
+
+ [[nodiscard]] s32 GetNumKernelWaiters() const {
+ return num_kernel_waiters;
+ }
+
+ [[nodiscard]] u64 GetConditionVariableKey() const {
+ return condvar_key;
+ }
+
+ [[nodiscard]] u64 GetAddressArbiterKey() const {
+ return condvar_key;
+ }
+
+private:
+ static constexpr size_t PriorityInheritanceCountMax = 10;
+ union SyncObjectBuffer {
+ std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
+ std::array<Handle,
+ Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
+ handles;
+ constexpr SyncObjectBuffer() {}
+ };
+ static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
+
+ struct ConditionVariableComparator {
+ struct LightCompareType {
+ u64 cv_key{};
+ s32 priority{};
+
+ [[nodiscard]] constexpr u64 GetConditionVariableKey() const {
+ return cv_key;
+ }
+
+ [[nodiscard]] constexpr s32 GetPriority() const {
+ return priority;
+ }
+ };
+
+ template <typename T>
+ requires(
+ std::same_as<T, KThread> ||
+ std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
+ const KThread& rhs) {
+ const u64 l_key = lhs.GetConditionVariableKey();
+ const u64 r_key = rhs.GetConditionVariableKey();
+
+ if (l_key < r_key) {
+ // Sort first by key
+ return -1;
+ } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
+ // And then by priority.
+ return -1;
+ } else {
+ return 1;
+ }
+ }
+ };
+
+ void AddWaiterImpl(KThread* thread);
+
+ void RemoveWaiterImpl(KThread* thread);
+
+ void StartTermination();
+
+ [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
+ s32 prio, s32 virt_core, Process* owner, ThreadType type);
+
+ [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
+ uintptr_t arg, VAddr user_stack_top, s32 prio,
+ s32 core, Process* owner, ThreadType type);
+
+ static void RestorePriority(KernelCore& kernel, KThread* thread);
+
+ // For core KThread implementation
+ ThreadContext32 thread_context_32{};
+ ThreadContext64 thread_context_64{};
+ Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
+ s32 priority{};
+ using ConditionVariableThreadTreeTraits =
+ Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
+ &KThread::condvar_arbiter_tree_node>;
+ using ConditionVariableThreadTree =
+ ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
+ ConditionVariableThreadTree* condvar_tree{};
+ u64 condvar_key{};
+ u64 virtual_affinity_mask{};
+ KAffinityMask physical_affinity_mask{};
+ u64 thread_id{};
+ std::atomic<s64> cpu_time{};
+ KSynchronizationObject* synced_object{};
+ VAddr address_key{};
+ Process* parent{};
+ VAddr kernel_stack_top{};
+ u32* light_ipc_data{};
+ VAddr tls_address{};
+ KLightLock activity_pause_lock;
+ s64 schedule_count{};
+ s64 last_scheduled_tick{};
+ std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
+ KThreadQueue* sleeping_queue{};
+ WaiterList waiter_list{};
+ WaiterList pinned_waiter_list{};
+ KThread* lock_owner{};
+ u32 address_key_value{};
+ u32 suspend_request_flags{};
+ u32 suspend_allowed_flags{};
+ ResultCode wait_result{RESULT_SUCCESS};
+ s32 base_priority{};
+ s32 physical_ideal_core_id{};
+ s32 virtual_ideal_core_id{};
+ s32 num_kernel_waiters{};
+ s32 current_core_id{};
+ s32 core_id{};
+ KAffinityMask original_physical_affinity_mask{};
+ s32 original_physical_ideal_core_id{};
+ s32 num_core_migration_disables{};
+ ThreadState thread_state{};
+ std::atomic<bool> termination_requested{};
+ bool wait_cancelled{};
+ bool cancellable{};
+ bool signaled{};
+ bool initialized{};
+ bool debug_attached{};
+ s8 priority_inheritance_count{};
+ bool resource_limit_release_hint{};
+ StackParameters stack_parameters{};
+ Common::SpinLock context_guard{};
+
+ // For emulation
+ std::shared_ptr<Common::Fiber> host_context{};
+
+ // For debugging
+ std::vector<KSynchronizationObject*> wait_objects_for_debugging;
+ VAddr mutex_wait_address_for_debugging{};
+ ThreadWaitReasonForDebugging wait_reason_for_debugging{};
+ ThreadType thread_type_for_debugging{};
+ std::string name;
+
+public:
+ using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
+
+ void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
+ u32 value) {
+ condvar_tree = tree;
+ condvar_key = cv_key;
+ address_key = address;
+ address_key_value = value;
+ }
+
+ void ClearConditionVariable() {
+ condvar_tree = nullptr;
+ }
+
+ [[nodiscard]] bool IsWaitingForConditionVariable() const {
+ return condvar_tree != nullptr;
+ }
+
+ void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) {
+ condvar_tree = tree;
+ condvar_key = address;
+ }
+
+ void ClearAddressArbiter() {
+ condvar_tree = nullptr;
+ }
+
+ [[nodiscard]] bool IsWaitingForAddressArbiter() const {
+ return condvar_tree != nullptr;
+ }
+
+ [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
+ return condvar_tree;
+ }
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
new file mode 100644
index 000000000..c52eba249
--- /dev/null
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -0,0 +1,81 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/k_thread.h"
+
+namespace Kernel {
+
+class KThreadQueue {
+public:
+ explicit KThreadQueue(KernelCore& kernel) : kernel{kernel} {}
+
+ bool IsEmpty() const {
+ return wait_list.empty();
+ }
+
+ KThread::WaiterList::iterator begin() {
+ return wait_list.begin();
+ }
+ KThread::WaiterList::iterator end() {
+ return wait_list.end();
+ }
+
+ bool SleepThread(KThread* t) {
+ KScopedSchedulerLock sl{kernel};
+
+ // If the thread needs terminating, don't enqueue it.
+ if (t->IsTerminationRequested()) {
+ return false;
+ }
+
+ // Set the thread's queue and mark it as waiting.
+ t->SetSleepingQueue(this);
+ t->SetState(ThreadState::Waiting);
+
+ // Add the thread to the queue.
+ wait_list.push_back(*t);
+
+ return true;
+ }
+
+ void WakeupThread(KThread* t) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Remove the thread from the queue.
+ wait_list.erase(wait_list.iterator_to(*t));
+
+ // Mark the thread as no longer sleeping.
+ t->SetState(ThreadState::Runnable);
+ t->SetSleepingQueue(nullptr);
+ }
+
+ KThread* WakeupFrontThread() {
+ KScopedSchedulerLock sl{kernel};
+
+ if (wait_list.empty()) {
+ return nullptr;
+ } else {
+ // Remove the thread from the queue.
+ auto it = wait_list.begin();
+ KThread* thread = std::addressof(*it);
+ wait_list.erase(it);
+
+ ASSERT(thread->GetState() == ThreadState::Waiting);
+
+ // Mark the thread as no longer sleeping.
+ thread->SetState(ThreadState::Runnable);
+ thread->SetSleepingQueue(nullptr);
+
+ return thread;
+ }
+ }
+
+private:
+ KernelCore& kernel;
+ KThread::WaiterList wait_list{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp
new file mode 100644
index 000000000..25c52edb2
--- /dev/null
+++ b/src/core/hle/kernel/k_writable_event.cpp
@@ -0,0 +1,27 @@
+// Copyright 2021 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/k_writable_event.h"
+
+namespace Kernel {
+
+KWritableEvent::KWritableEvent(KernelCore& kernel, std::string&& name)
+ : Object{kernel, std::move(name)} {}
+KWritableEvent::~KWritableEvent() = default;
+
+void KWritableEvent::Initialize(KEvent* parent_) {
+ parent = parent_;
+}
+
+ResultCode KWritableEvent::Signal() {
+ return parent->GetReadableEvent()->Signal();
+}
+
+ResultCode KWritableEvent::Clear() {
+ return parent->GetReadableEvent()->Clear();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h
new file mode 100644
index 000000000..518f5448d
--- /dev/null
+++ b/src/core/hle/kernel/k_writable_event.h
@@ -0,0 +1,44 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/object.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KernelCore;
+class KEvent;
+
+class KWritableEvent final : public Object {
+public:
+ explicit KWritableEvent(KernelCore& kernel, std::string&& name);
+ ~KWritableEvent() override;
+
+ std::string GetTypeName() const override {
+ return "KWritableEvent";
+ }
+
+ static constexpr HandleType HANDLE_TYPE = HandleType::WritableEvent;
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ void Initialize(KEvent* parent_);
+
+ void Finalize() override {}
+
+ ResultCode Signal();
+ ResultCode Clear();
+
+ KEvent* GetParent() const {
+ return parent;
+ }
+
+private:
+ KEvent* parent{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index cabe8d418..b6e6f115e 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -7,15 +7,15 @@
#include <bitset>
#include <functional>
#include <memory>
-#include <mutex>
#include <thread>
-#include <unordered_map>
+#include <unordered_set>
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
#include "common/thread.h"
+#include "common/thread_worker.h"
#include "core/arm/arm_interface.h"
#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/exclusive_monitor.h"
@@ -26,19 +26,19 @@
#include "core/device_memory.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/client_port.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/memory_layout.h"
#include "core/hle/kernel/memory/memory_manager.h"
#include "core/hle/kernel/memory/slab_heap.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/resource_limit.h"
-#include "core/hle/kernel/scheduler.h"
+#include "core/hle/kernel/service_thread.h"
#include "core/hle/kernel/shared_memory.h"
-#include "core/hle/kernel/synchronization.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/lock.h"
#include "core/hle/result.h"
@@ -50,26 +50,42 @@ namespace Kernel {
struct KernelCore::Impl {
explicit Impl(Core::System& system, KernelCore& kernel)
- : global_scheduler{kernel}, synchronization{system}, time_manager{system},
- global_handle_table{kernel}, system{system} {}
+ : time_manager{system}, global_handle_table{kernel}, system{system} {}
void SetMulticore(bool is_multicore) {
this->is_multicore = is_multicore;
}
void Initialize(KernelCore& kernel) {
- Shutdown();
+ global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
+
RegisterHostThread();
+ service_thread_manager =
+ std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
+ is_phantom_mode_for_singlecore = false;
+
InitializePhysicalCores();
- InitializeSystemResourceLimit(kernel);
+ InitializeSystemResourceLimit(kernel, system);
InitializeMemoryLayout();
InitializePreemption(kernel);
InitializeSchedulers();
InitializeSuspendThreads();
}
+ void InitializeCores() {
+ for (auto& core : cores) {
+ core.Initialize(current_process->Is64BitProcess());
+ }
+ }
+
void Shutdown() {
+ process_list.clear();
+
+ // Ensures all service threads gracefully shutdown
+ service_thread_manager.reset();
+ service_threads.clear();
+
next_object_id = 0;
next_kernel_process_id = Process::InitialKIPIDMin;
next_user_process_id = Process::ProcessIDMin;
@@ -81,74 +97,69 @@ struct KernelCore::Impl {
}
}
- for (std::size_t i = 0; i < cores.size(); i++) {
- cores[i].Shutdown();
- schedulers[i].reset();
- }
cores.clear();
- registered_core_threads.reset();
-
- process_list.clear();
current_process = nullptr;
system_resource_limit = nullptr;
global_handle_table.Clear();
- preemption_event = nullptr;
- global_scheduler.Shutdown();
+ preemption_event = nullptr;
named_ports.clear();
- for (auto& core : cores) {
- core.Shutdown();
- }
- cores.clear();
-
exclusive_monitor.reset();
- host_thread_ids.clear();
+
+ // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
+ next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
}
void InitializePhysicalCores() {
exclusive_monitor =
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
- for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i);
- cores.emplace_back(system, i, *schedulers[i], interrupts[i]);
+ for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
+ cores.emplace_back(i, system, *schedulers[i], interrupts);
}
}
void InitializeSchedulers() {
- for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
cores[i].Scheduler().Initialize();
}
}
// Creates the default system resource limit
- void InitializeSystemResourceLimit(KernelCore& kernel) {
- system_resource_limit = ResourceLimit::Create(kernel);
+ void InitializeSystemResourceLimit(KernelCore& kernel, Core::System& system) {
+ system_resource_limit = std::make_shared<KResourceLimit>(kernel, system);
// If setting the default system values fails, then something seriously wrong has occurred.
- ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x100000000)
+ ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, 0x100000000)
.IsSuccess());
- ASSERT(system_resource_limit->SetLimitValue(ResourceType::Threads, 800).IsSuccess());
- ASSERT(system_resource_limit->SetLimitValue(ResourceType::Events, 700).IsSuccess());
- ASSERT(system_resource_limit->SetLimitValue(ResourceType::TransferMemory, 200).IsSuccess());
- ASSERT(system_resource_limit->SetLimitValue(ResourceType::Sessions, 900).IsSuccess());
+ ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess());
+ ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 700).IsSuccess());
+ ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200)
+ .IsSuccess());
+ ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 933).IsSuccess());
- if (!system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0) ||
- !system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0x60000)) {
+ // Derived from recent software updates. The kernel reserves 27MB
+ constexpr u64 kernel_size{0x1b00000};
+ if (!system_resource_limit->Reserve(LimitableResource::PhysicalMemory, kernel_size)) {
UNREACHABLE();
}
+ // Reserve secure applet memory, introduced in firmware 5.0.0
+ constexpr u64 secure_applet_memory_size{0x400000};
+ ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
+ secure_applet_memory_size));
}
void InitializePreemption(KernelCore& kernel) {
preemption_event = Core::Timing::CreateEvent(
"PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
{
- SchedulerLock lock(kernel);
- global_scheduler.PreemptThreads();
+ KScopedSchedulerLock lock(kernel);
+ global_scheduler_context->PreemptThreads();
}
const auto time_interval = std::chrono::nanoseconds{
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
@@ -165,11 +176,9 @@ struct KernelCore::Impl {
std::string name = "Suspend Thread Id:" + std::to_string(i);
std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc();
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
- const auto type =
- static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
- auto thread_res =
- Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
- nullptr, std::move(init_func), init_func_parameter);
+ auto thread_res = KThread::Create(system, ThreadType::HighPriority, std::move(name), 0,
+ 0, 0, static_cast<u32>(i), 0, nullptr,
+ std::move(init_func), init_func_parameter);
suspend_threads[i] = std::move(thread_res).Unwrap();
}
@@ -177,69 +186,82 @@ struct KernelCore::Impl {
void MakeCurrentProcess(Process* process) {
current_process = process;
-
if (process == nullptr) {
return;
}
- u32 core_id = GetCurrentHostThreadID();
+ const u32 core_id = GetCurrentHostThreadID();
if (core_id < Core::Hardware::NUM_CPU_CORES) {
system.Memory().SetCurrentPageTable(*process, core_id);
}
}
+ /// Creates a new host thread ID, should only be called by GetHostThreadId
+ u32 AllocateHostThreadId(std::optional<std::size_t> core_id) {
+ if (core_id) {
+ // The first for slots are reserved for CPU core threads
+ ASSERT(*core_id < Core::Hardware::NUM_CPU_CORES);
+ return static_cast<u32>(*core_id);
+ } else {
+ return next_host_thread_id++;
+ }
+ }
+
+ /// Gets the host thread ID for the caller, allocating a new one if this is the first time
+ u32 GetHostThreadId(std::optional<std::size_t> core_id = std::nullopt) {
+ const thread_local auto host_thread_id{AllocateHostThreadId(core_id)};
+ return host_thread_id;
+ }
+
+ // Gets the dummy KThread for the caller, allocating a new one if this is the first time
+ KThread* GetHostDummyThread() {
+ const thread_local auto thread =
+ KThread::Create(
+ system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0,
+ KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr,
+ []([[maybe_unused]] void* arg) { UNREACHABLE(); }, nullptr)
+ .Unwrap();
+ return thread.get();
+ }
+
+ /// Registers a CPU core thread by allocating a host thread ID for it
void RegisterCoreThread(std::size_t core_id) {
- std::unique_lock lock{register_thread_mutex};
+ ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
+ const auto this_id = GetHostThreadId(core_id);
if (!is_multicore) {
- single_core_thread_id = std::this_thread::get_id();
+ single_core_thread_id = this_id;
}
- const std::thread::id this_id = std::this_thread::get_id();
- const auto it = host_thread_ids.find(this_id);
- ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
- ASSERT(it == host_thread_ids.end());
- ASSERT(!registered_core_threads[core_id]);
- host_thread_ids[this_id] = static_cast<u32>(core_id);
- registered_core_threads.set(core_id);
}
+ /// Registers a new host thread by allocating a host thread ID for it
void RegisterHostThread() {
- std::unique_lock lock{register_thread_mutex};
- const std::thread::id this_id = std::this_thread::get_id();
- const auto it = host_thread_ids.find(this_id);
- if (it != host_thread_ids.end()) {
- return;
- }
- host_thread_ids[this_id] = registered_thread_ids++;
+ [[maybe_unused]] const auto this_id = GetHostThreadId();
+ [[maybe_unused]] const auto dummy_thread = GetHostDummyThread();
}
- u32 GetCurrentHostThreadID() const {
- const std::thread::id this_id = std::this_thread::get_id();
- if (!is_multicore) {
- if (single_core_thread_id == this_id) {
- return static_cast<u32>(system.GetCpuManager().CurrentCore());
- }
- }
- const auto it = host_thread_ids.find(this_id);
- if (it == host_thread_ids.end()) {
- return Core::INVALID_HOST_THREAD_ID;
+ [[nodiscard]] u32 GetCurrentHostThreadID() {
+ const auto this_id = GetHostThreadId();
+ if (!is_multicore && single_core_thread_id == this_id) {
+ return static_cast<u32>(system.GetCpuManager().CurrentCore());
}
- return it->second;
+ return this_id;
}
- Core::EmuThreadHandle GetCurrentEmuThreadID() const {
- Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle();
- result.host_handle = GetCurrentHostThreadID();
- if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
- return result;
- }
- const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler();
- const Kernel::Thread* current = sched.GetCurrentThread();
- if (current != nullptr && !current->IsPhantomMode()) {
- result.guest_handle = current->GetGlobalHandle();
- } else {
- result.guest_handle = InvalidHandle;
+ bool IsPhantomModeForSingleCore() const {
+ return is_phantom_mode_for_singlecore;
+ }
+
+ void SetIsPhantomModeForSingleCore(bool value) {
+ ASSERT(!is_multicore);
+ is_phantom_mode_for_singlecore = value;
+ }
+
+ KThread* GetCurrentEmuThread() {
+ const auto thread_id = GetCurrentHostThreadID();
+ if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
+ return GetHostDummyThread();
}
- return result;
+ return schedulers[thread_id]->GetCurrentThread();
}
void InitializeMemoryLayout() {
@@ -286,8 +308,11 @@ struct KernelCore::Impl {
// Allocate slab heaps
user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>();
+ constexpr u64 user_slab_heap_size{0x1ef000};
+ // Reserve slab heaps
+ ASSERT(
+ system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
// Initialize slab heaps
- constexpr u64 user_slab_heap_size{0x3de000};
user_slab_heap_pages->Initialize(
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
user_slab_heap_size);
@@ -301,11 +326,10 @@ struct KernelCore::Impl {
// Lists all processes that exist in the current session.
std::vector<std::shared_ptr<Process>> process_list;
Process* current_process = nullptr;
- Kernel::GlobalScheduler global_scheduler;
- Kernel::Synchronization synchronization;
+ std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
Kernel::TimeManager time_manager;
- std::shared_ptr<ResourceLimit> system_resource_limit;
+ std::shared_ptr<KResourceLimit> system_resource_limit;
std::shared_ptr<Core::Timing::EventType> preemption_event;
@@ -320,11 +344,8 @@ struct KernelCore::Impl {
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
std::vector<Kernel::PhysicalCore> cores;
- // 0-3 IDs represent core threads, >3 represent others
- std::unordered_map<std::thread::id, u32> host_thread_ids;
- u32 registered_thread_ids{Core::Hardware::NUM_CPU_CORES};
- std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads;
- std::mutex register_thread_mutex;
+ // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
+ std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES};
// Kernel memory management
std::unique_ptr<Memory::MemoryManager> memory_manager;
@@ -336,12 +357,20 @@ struct KernelCore::Impl {
std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
- std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
+ // Threads used for services
+ std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
+
+ // Service threads are managed by a worker thread, so that a calling service thread can queue up
+ // the release of itself
+ std::unique_ptr<Common::ThreadWorker> service_thread_manager;
+
+ std::array<std::shared_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
- std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
+ std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
bool is_multicore{};
- std::thread::id single_core_thread_id{};
+ bool is_phantom_mode_for_singlecore{};
+ u32 single_core_thread_id{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
@@ -362,16 +391,20 @@ void KernelCore::Initialize() {
impl->Initialize(*this);
}
+void KernelCore::InitializeCores() {
+ impl->InitializeCores();
+}
+
void KernelCore::Shutdown() {
impl->Shutdown();
}
-std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
+std::shared_ptr<KResourceLimit> KernelCore::GetSystemResourceLimit() const {
return impl->system_resource_limit;
}
-std::shared_ptr<Thread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
- return impl->global_handle_table.Get<Thread>(handle);
+std::shared_ptr<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
+ return impl->global_handle_table.Get<KThread>(handle);
}
void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
@@ -394,19 +427,19 @@ const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const
return impl->process_list;
}
-Kernel::GlobalScheduler& KernelCore::GlobalScheduler() {
- return impl->global_scheduler;
+Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() {
+ return *impl->global_scheduler_context;
}
-const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
- return impl->global_scheduler;
+const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const {
+ return *impl->global_scheduler_context;
}
-Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) {
+Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) {
return *impl->schedulers[id];
}
-const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const {
+const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
return *impl->schedulers[id];
}
@@ -430,16 +463,13 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
return impl->cores[core_id];
}
-Kernel::Scheduler& KernelCore::CurrentScheduler() {
+Kernel::KScheduler* KernelCore::CurrentScheduler() {
u32 core_id = impl->GetCurrentHostThreadID();
- ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
- return *impl->schedulers[core_id];
-}
-
-const Kernel::Scheduler& KernelCore::CurrentScheduler() const {
- u32 core_id = impl->GetCurrentHostThreadID();
- ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
- return *impl->schedulers[core_id];
+ if (core_id >= Core::Hardware::NUM_CPU_CORES) {
+ // This is expected when called from not a guest thread
+ return {};
+ }
+ return impl->schedulers[core_id].get();
}
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
@@ -451,14 +481,6 @@ const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Kern
return impl->interrupts;
}
-Kernel::Synchronization& KernelCore::Synchronization() {
- return impl->synchronization;
-}
-
-const Kernel::Synchronization& KernelCore::Synchronization() const {
- return impl->synchronization;
-}
-
Kernel::TimeManager& KernelCore::TimeManager() {
return impl->time_manager;
}
@@ -476,12 +498,17 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
}
void KernelCore::InvalidateAllInstructionCaches() {
- auto& threads = GlobalScheduler().GetThreadList();
- for (auto& thread : threads) {
- if (!thread->IsHLEThread()) {
- auto& arm_interface = thread->ArmInterface();
- arm_interface.ClearInstructionCache();
+ for (auto& physical_core : impl->cores) {
+ physical_core.ArmInterface().ClearInstructionCache();
+ }
+}
+
+void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
+ for (auto& physical_core : impl->cores) {
+ if (!physical_core.IsInitialized()) {
+ continue;
}
+ physical_core.ArmInterface().InvalidateCacheRange(addr, size);
}
}
@@ -542,8 +569,8 @@ u32 KernelCore::GetCurrentHostThreadID() const {
return impl->GetCurrentHostThreadID();
}
-Core::EmuThreadHandle KernelCore::GetCurrentEmuThreadID() const {
- return impl->GetCurrentEmuThreadID();
+KThread* KernelCore::GetCurrentEmuThread() const {
+ return impl->GetCurrentEmuThread();
}
Memory::MemoryManager& KernelCore::MemoryManager() {
@@ -597,10 +624,12 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
void KernelCore::Suspend(bool in_suspention) {
const bool should_suspend = exception_exited || in_suspention;
{
- SchedulerLock lock(*this);
- ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
+ KScopedSchedulerLock lock(*this);
+ const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- impl->suspend_threads[i]->SetStatus(status);
+ impl->suspend_threads[i]->SetState(state);
+ impl->suspend_threads[i]->SetWaitReasonForDebugging(
+ ThreadWaitReasonForDebugging::Suspended);
}
}
}
@@ -624,4 +653,27 @@ void KernelCore::ExitSVCProfile() {
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
}
+std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
+ auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name);
+ impl->service_thread_manager->QueueWork(
+ [this, service_thread] { impl->service_threads.emplace(service_thread); });
+ return service_thread;
+}
+
+void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
+ impl->service_thread_manager->QueueWork([this, service_thread] {
+ if (auto strong_ptr = service_thread.lock()) {
+ impl->service_threads.erase(strong_ptr);
+ }
+ });
+}
+
+bool KernelCore::IsPhantomModeForSingleCore() const {
+ return impl->IsPhantomModeForSingleCore();
+}
+
+void KernelCore::SetIsPhantomModeForSingleCore(bool value) {
+ impl->SetIsPhantomModeForSingleCore(value);
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 16285c3f0..806a0d986 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -33,19 +33,23 @@ template <typename T>
class SlabHeap;
} // namespace Memory
-class AddressArbiter;
class ClientPort;
-class GlobalScheduler;
+class GlobalSchedulerContext;
class HandleTable;
class PhysicalCore;
class Process;
-class ResourceLimit;
-class Scheduler;
+class KResourceLimit;
+class KScheduler;
class SharedMemory;
+class ServiceThread;
class Synchronization;
-class Thread;
+class KThread;
class TimeManager;
+using EmuThreadHandle = uintptr_t;
+constexpr EmuThreadHandle EmuThreadHandleInvalid{};
+constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63};
+
/// Represents a single instance of the kernel.
class KernelCore {
private:
@@ -74,14 +78,17 @@ public:
/// Resets the kernel to a clean slate for use.
void Initialize();
+ /// Initializes the CPU cores.
+ void InitializeCores();
+
/// Clears all resources in use by the kernel instance.
void Shutdown();
/// Retrieves a shared pointer to the system resource limit instance.
- std::shared_ptr<ResourceLimit> GetSystemResourceLimit() const;
+ std::shared_ptr<KResourceLimit> GetSystemResourceLimit() const;
/// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
- std::shared_ptr<Thread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
+ std::shared_ptr<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
/// Adds the given shared pointer to an internal list of active processes.
void AppendNewProcess(std::shared_ptr<Process> process);
@@ -99,16 +106,16 @@ public:
const std::vector<std::shared_ptr<Process>>& GetProcessList() const;
/// Gets the sole instance of the global scheduler
- Kernel::GlobalScheduler& GlobalScheduler();
+ Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
/// Gets the sole instance of the global scheduler
- const Kernel::GlobalScheduler& GlobalScheduler() const;
+ const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
/// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
- Kernel::Scheduler& Scheduler(std::size_t id);
+ Kernel::KScheduler& Scheduler(std::size_t id);
/// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
- const Kernel::Scheduler& Scheduler(std::size_t id) const;
+ const Kernel::KScheduler& Scheduler(std::size_t id) const;
/// Gets the an instance of the respective physical CPU core.
Kernel::PhysicalCore& PhysicalCore(std::size_t id);
@@ -117,10 +124,7 @@ public:
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
/// Gets the sole instance of the Scheduler at the current running core.
- Kernel::Scheduler& CurrentScheduler();
-
- /// Gets the sole instance of the Scheduler at the current running core.
- const Kernel::Scheduler& CurrentScheduler() const;
+ Kernel::KScheduler* CurrentScheduler();
/// Gets the an instance of the current physical CPU core.
Kernel::PhysicalCore& CurrentPhysicalCore();
@@ -128,12 +132,6 @@ public:
/// Gets the an instance of the current physical CPU core.
const Kernel::PhysicalCore& CurrentPhysicalCore() const;
- /// Gets the an instance of the Synchronization Interface.
- Kernel::Synchronization& Synchronization();
-
- /// Gets the an instance of the Synchronization Interface.
- const Kernel::Synchronization& Synchronization() const;
-
/// Gets the an instance of the TimeManager Interface.
Kernel::TimeManager& TimeManager();
@@ -153,6 +151,8 @@ public:
void InvalidateAllInstructionCaches();
+ void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
+
/// Adds a port to the named port table
void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port);
@@ -165,8 +165,8 @@ public:
/// Determines whether or not the given port is a valid named port.
bool IsValidNamedPort(NamedPortTable::const_iterator port) const;
- /// Gets the current host_thread/guest_thread handle.
- Core::EmuThreadHandle GetCurrentEmuThreadID() const;
+ /// Gets the current host_thread/guest_thread pointer.
+ KThread* GetCurrentEmuThread() const;
/// Gets the current host_thread handle.
u32 GetCurrentHostThreadID() const;
@@ -225,10 +225,30 @@ public:
void ExitSVCProfile();
+ /**
+ * Creates an HLE service thread, which are used to execute service routines asynchronously.
+ * While these are allocated per ServerSession, these need to be owned and managed outside of
+ * ServerSession to avoid a circular dependency.
+ * @param name String name for the ServerSession creating this thread, used for debug purposes.
+ * @returns The a weak pointer newly created service thread.
+ */
+ std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
+
+ /**
+ * Releases a HLE service thread, instructing KernelCore to free it. This should be called when
+ * the ServerSession associated with the thread is destroyed.
+ * @param service_thread Service thread to release.
+ */
+ void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread);
+
+ /// Workaround for single-core mode when preempting threads while idle.
+ bool IsPhantomModeForSingleCore() const;
+ void SetIsPhantomModeForSingleCore(bool value);
+
private:
friend class Object;
friend class Process;
- friend class Thread;
+ friend class KThread;
/// Creates a new object ID, incrementing the internal object ID counter.
u32 CreateNewObjectID();
diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp
index e4288cab4..6cf43ba24 100644
--- a/src/core/hle/kernel/memory/address_space_info.cpp
+++ b/src/core/hle/kernel/memory/address_space_info.cpp
@@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
}
UNREACHABLE();
+ return 0;
}
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
@@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
}
UNREACHABLE();
+ return 0;
}
} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h
index 9d7839d08..83acece1e 100644
--- a/src/core/hle/kernel/memory/memory_block.h
+++ b/src/core/hle/kernel/memory/memory_block.h
@@ -73,12 +73,12 @@ enum class MemoryState : u32 {
ThreadLocal =
static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
- Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
- FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
- FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
+ Transferred = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc |
+ FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
+ FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
- SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
- FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
+ SharedTransferred = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc |
+ FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
@@ -111,8 +111,8 @@ static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
-static_assert(static_cast<u32>(MemoryState::Transfered) == 0x015C3C0D);
-static_assert(static_cast<u32>(MemoryState::SharedTransfered) == 0x005C380E);
+static_assert(static_cast<u32>(MemoryState::Transferred) == 0x015C3C0D);
+static_assert(static_cast<u32>(MemoryState::SharedTransferred) == 0x005C380E);
static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
@@ -222,9 +222,9 @@ public:
public:
constexpr MemoryBlock() = default;
- constexpr MemoryBlock(VAddr addr, std::size_t num_pages, MemoryState state,
- MemoryPermission perm, MemoryAttribute attribute)
- : addr{addr}, num_pages(num_pages), state{state}, perm{perm}, attribute{attribute} {}
+ constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_,
+ MemoryPermission perm_, MemoryAttribute attribute_)
+ : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
constexpr VAddr GetAddress() const {
return addr;
diff --git a/src/core/hle/kernel/memory/memory_block_manager.h b/src/core/hle/kernel/memory/memory_block_manager.h
index 6e1d41075..f57d1bbcc 100644
--- a/src/core/hle/kernel/memory/memory_block_manager.h
+++ b/src/core/hle/kernel/memory/memory_block_manager.h
@@ -57,8 +57,8 @@ public:
private:
void MergeAdjacent(iterator it, iterator& next_it);
- const VAddr start_addr;
- const VAddr end_addr;
+ [[maybe_unused]] const VAddr start_addr;
+ [[maybe_unused]] const VAddr end_addr;
MemoryBlockTree memory_block_tree;
};
diff --git a/src/core/hle/kernel/memory/memory_layout.h b/src/core/hle/kernel/memory/memory_layout.h
index 9b3d6267a..c7c0b2f49 100644
--- a/src/core/hle/kernel/memory/memory_layout.h
+++ b/src/core/hle/kernel/memory/memory_layout.h
@@ -5,9 +5,28 @@
#pragma once
#include "common/common_types.h"
+#include "core/device_memory.h"
namespace Kernel::Memory {
+constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024;
+constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39;
+constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48;
+constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth;
+constexpr std::size_t KernelVirtualAddressSpaceEnd =
+ KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
+constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1;
+constexpr std::size_t KernelVirtualAddressSpaceSize =
+ KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
+
+constexpr bool IsKernelAddressKey(VAddr key) {
+ return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
+}
+
+constexpr bool IsKernelAddress(VAddr address) {
+ return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
+}
+
class MemoryRegion final {
friend class MemoryLayout;
diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp
index acf13585c..77f135cdc 100644
--- a/src/core/hle/kernel/memory/memory_manager.cpp
+++ b/src/core/hle/kernel/memory/memory_manager.cpp
@@ -8,9 +8,9 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/scope_exit.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/memory/memory_manager.h"
#include "core/hle/kernel/memory/page_linked_list.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel::Memory {
@@ -95,7 +95,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
// Choose a heap based on our page size request
const s32 heap_index{PageHeap::GetBlockIndex(num_pages)};
if (heap_index < 0) {
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
// TODO (bunnei): Support multiple managers
@@ -140,7 +140,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
// Only succeed if we allocated as many pages as we wanted
if (num_pages) {
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
// We succeeded!
diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h
index 22b0de860..131093284 100644
--- a/src/core/hle/kernel/memory/page_heap.h
+++ b/src/core/hle/kernel/memory/page_heap.h
@@ -8,11 +8,11 @@
#pragma once
#include <array>
+#include <bit>
#include <vector>
#include "common/alignment.h"
#include "common/assert.h"
-#include "common/bit_util.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/kernel/memory/memory_types.h"
@@ -105,7 +105,7 @@ private:
ASSERT(depth == 0);
return -1;
}
- offset = offset * 64 + Common::CountTrailingZeroes64(v);
+ offset = offset * 64 + static_cast<u32>(std::countr_zero(v));
++depth;
} while (depth < static_cast<s32>(used_depths));
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
index a3fadb533..00ed9b881 100644
--- a/src/core/hle/kernel/memory/page_table.cpp
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -6,7 +6,7 @@
#include "common/assert.h"
#include "common/scope_exit.h"
#include "core/core.h"
-#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/address_space_info.h"
#include "core/hle/kernel/memory/memory_block.h"
@@ -15,7 +15,7 @@
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/memory/system_control.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/resource_limit.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/memory.h"
namespace Kernel::Memory {
@@ -141,7 +141,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
if (alloc_size < needed_size) {
UNREACHABLE();
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
const std::size_t remaining_size{alloc_size - needed_size};
@@ -265,7 +265,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t
physical_memory_usage = 0;
memory_pool = pool;
- page_table_impl.Resize(address_space_width, PageBits, true);
+ page_table_impl.Resize(address_space_width, PageBits);
return InitializeMemoryLayout(start, end);
}
@@ -277,11 +277,11 @@ ResultCode PageTable::MapProcessCode(VAddr addr, std::size_t num_pages, MemorySt
const u64 size{num_pages * PageSize};
if (!CanContain(addr, size, state)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (IsRegionMapped(addr, size)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
PageLinkedList page_linked_list;
@@ -307,7 +307,7 @@ ResultCode PageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::
MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped));
if (IsRegionMapped(dst_addr, size)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
PageLinkedList page_linked_list;
@@ -409,27 +409,25 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
return RESULT_SUCCESS;
}
- auto process{system.Kernel().CurrentProcess()};
const std::size_t remaining_size{size - mapped_size};
const std::size_t remaining_pages{remaining_size / PageSize};
- if (process->GetResourceLimit() &&
- !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) {
- return ERR_RESOURCE_LIMIT_EXCEEDED;
+ // Reserve the memory from the process resource limit.
+ KScopedResourceReservation memory_reservation(
+ system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
+ remaining_size);
+ if (!memory_reservation.Succeeded()) {
+ LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
+ return ResultResourceLimitedExceeded;
}
PageLinkedList page_linked_list;
- {
- auto block_guard = detail::ScopeExit([&] {
- system.Kernel().MemoryManager().Free(page_linked_list, remaining_pages, memory_pool);
- process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, remaining_size);
- });
- CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
- memory_pool));
+ CASCADE_CODE(
+ system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, memory_pool));
- block_guard.Cancel();
- }
+ // We succeeded, so commit the memory reservation.
+ memory_reservation.Commit();
MapPhysicalMemory(page_linked_list, addr, end_addr);
@@ -454,12 +452,12 @@ ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) {
if (info.state == MemoryState::Normal) {
if (info.attribute != MemoryAttribute::None) {
- result = ERR_INVALID_ADDRESS_STATE;
+ result = ResultInvalidCurrentMemory;
return;
}
mapped_size += GetSizeInRange(info, addr, end_addr);
} else if (info.state != MemoryState::Free) {
- result = ERR_INVALID_ADDRESS_STATE;
+ result = ResultInvalidCurrentMemory;
}
});
@@ -474,7 +472,7 @@ ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
CASCADE_CODE(UnmapMemory(addr, size));
auto process{system.Kernel().CurrentProcess()};
- process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, mapped_size);
+ process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
physical_memory_usage -= mapped_size;
return RESULT_SUCCESS;
@@ -526,7 +524,7 @@ ResultCode PageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped));
if (IsRegionMapped(dst_addr, size)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
PageLinkedList page_linked_list;
@@ -577,7 +575,7 @@ ResultCode PageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
AddRegionToPages(dst_addr, num_pages, dst_pages);
if (!dst_pages.IsEqual(src_pages)) {
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
{
@@ -626,11 +624,11 @@ ResultCode PageTable::MapPages(VAddr addr, PageLinkedList& page_linked_list, Mem
const std::size_t size{num_pages * PageSize};
if (!CanContain(addr, size, state)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (IsRegionMapped(addr, num_pages * PageSize)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
CASCADE_CODE(MapPages(addr, page_linked_list, perm));
@@ -670,6 +668,11 @@ ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, Memo
return RESULT_SUCCESS;
}
+ if ((prev_perm & MemoryPermission::Execute) != (perm & MemoryPermission::Execute)) {
+ // Memory execution state is changing, invalidate CPU cache range
+ system.InvalidateCpuInstructionCacheRange(addr, size);
+ }
+
const std::size_t num_pages{size / PageSize};
const OperationType operation{(perm & MemoryPermission::Execute) != MemoryPermission::None
? OperationType::ChangePermissionsAndRefresh
@@ -763,7 +766,7 @@ ResultCode PageTable::SetHeapCapacity(std::size_t new_heap_capacity) {
ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
if (size > heap_region_end - heap_region_start) {
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
const u64 previous_heap_size{GetHeapSize()};
@@ -776,10 +779,14 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
const u64 delta{size - previous_heap_size};
- auto process{system.Kernel().CurrentProcess()};
- if (process->GetResourceLimit() && delta != 0 &&
- !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) {
- return ERR_RESOURCE_LIMIT_EXCEEDED;
+ // Reserve memory for the heap extension.
+ KScopedResourceReservation memory_reservation(
+ system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
+ delta);
+
+ if (!memory_reservation.Succeeded()) {
+ LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta);
+ return ResultResourceLimitedExceeded;
}
PageLinkedList page_linked_list;
@@ -789,12 +796,15 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool));
if (IsRegionMapped(current_heap_addr, delta)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
CASCADE_CODE(
Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup));
+ // Succeeded in allocation, commit the resource reservation
+ memory_reservation.Commit();
+
block_manager->Update(current_heap_addr, num_pages, MemoryState::Normal,
MemoryPermission::ReadAndWrite);
@@ -811,17 +821,17 @@ ResultVal<VAddr> PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, s
std::lock_guard lock{page_table_lock};
if (!CanContain(region_start, region_num_pages * PageSize, state)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (region_num_pages <= needed_num_pages) {
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
const VAddr addr{
AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
if (!addr) {
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
if (is_map_only) {
@@ -1002,8 +1012,8 @@ constexpr VAddr PageTable::GetRegionAddress(MemoryState state) const {
case MemoryState::Shared:
case MemoryState::AliasCode:
case MemoryState::AliasCodeData:
- case MemoryState::Transfered:
- case MemoryState::SharedTransfered:
+ case MemoryState::Transferred:
+ case MemoryState::SharedTransferred:
case MemoryState::SharedCode:
case MemoryState::GeneratedCode:
case MemoryState::CodeOut:
@@ -1037,8 +1047,8 @@ constexpr std::size_t PageTable::GetRegionSize(MemoryState state) const {
case MemoryState::Shared:
case MemoryState::AliasCode:
case MemoryState::AliasCodeData:
- case MemoryState::Transfered:
- case MemoryState::SharedTransfered:
+ case MemoryState::Transferred:
+ case MemoryState::SharedTransferred:
case MemoryState::SharedCode:
case MemoryState::GeneratedCode:
case MemoryState::CodeOut:
@@ -1075,8 +1085,8 @@ constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, MemoryState s
case MemoryState::AliasCodeData:
case MemoryState::Stack:
case MemoryState::ThreadLocal:
- case MemoryState::Transfered:
- case MemoryState::SharedTransfered:
+ case MemoryState::Transferred:
+ case MemoryState::SharedTransferred:
case MemoryState::SharedCode:
case MemoryState::GeneratedCode:
case MemoryState::CodeOut:
@@ -1100,13 +1110,13 @@ constexpr ResultCode PageTable::CheckMemoryState(const MemoryInfo& info, MemoryS
MemoryAttribute attr) const {
// Validate the states match expectation
if ((info.state & state_mask) != state) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if ((info.perm & perm_mask) != perm) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if ((info.attribute & attr_mask) != attr) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
return RESULT_SUCCESS;
@@ -1133,14 +1143,14 @@ ResultCode PageTable::CheckMemoryState(MemoryState* out_state, MemoryPermission*
while (true) {
// Validate the current block
if (!(info.state == first_state)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!(info.perm == first_perm)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!((info.attribute | static_cast<MemoryAttribute>(ignore_attr)) ==
(first_attr | static_cast<MemoryAttribute>(ignore_attr)))) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
// Validate against the provided masks
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
deleted file mode 100644
index 8f6c944d1..000000000
--- a/src/core/hle/kernel/mutex.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <memory>
-#include <utility>
-#include <vector>
-
-#include "common/assert.h"
-#include "common/logging/log.h"
-#include "core/core.h"
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/handle_table.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/mutex.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
-/// those.
-static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
- const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) {
-
- std::shared_ptr<Thread> highest_priority_thread;
- u32 num_waiters = 0;
-
- for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
- if (thread->GetMutexWaitAddress() != mutex_addr)
- continue;
-
- ++num_waiters;
- if (highest_priority_thread == nullptr ||
- thread->GetPriority() < highest_priority_thread->GetPriority()) {
- highest_priority_thread = thread;
- }
- }
-
- return {highest_priority_thread, num_waiters};
-}
-
-/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
-static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
- std::shared_ptr<Thread> new_owner) {
- current_thread->RemoveMutexWaiter(new_owner);
- const auto threads = current_thread->GetMutexWaitingThreads();
- for (const auto& thread : threads) {
- if (thread->GetMutexWaitAddress() != mutex_addr)
- continue;
-
- ASSERT(thread->GetLockOwner() == current_thread.get());
- current_thread->RemoveMutexWaiter(thread);
- if (new_owner != thread)
- new_owner->AddMutexWaiter(thread);
- }
-}
-
-Mutex::Mutex(Core::System& system) : system{system} {}
-Mutex::~Mutex() = default;
-
-ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
- Handle requesting_thread_handle) {
- // The mutex address must be 4-byte aligned
- if ((address % sizeof(u32)) != 0) {
- LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
- return ERR_INVALID_ADDRESS;
- }
-
- auto& kernel = system.Kernel();
- std::shared_ptr<Thread> current_thread =
- SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
- {
- SchedulerLock lock(kernel);
- // The mutex address must be 4-byte aligned
- if ((address % sizeof(u32)) != 0) {
- return ERR_INVALID_ADDRESS;
- }
-
- const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
- std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
- std::shared_ptr<Thread> requesting_thread =
- handle_table.Get<Thread>(requesting_thread_handle);
-
- // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
- // another thread.
- ASSERT(requesting_thread == current_thread);
-
- current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
-
- const u32 addr_value = system.Memory().Read32(address);
-
- // If the mutex isn't being held, just return success.
- if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
- return RESULT_SUCCESS;
- }
-
- if (holding_thread == nullptr) {
- return ERR_INVALID_HANDLE;
- }
-
- // Wait until the mutex is released
- current_thread->SetMutexWaitAddress(address);
- current_thread->SetWaitHandle(requesting_thread_handle);
-
- current_thread->SetStatus(ThreadStatus::WaitMutex);
-
- // Update the lock holder thread's priority to prevent priority inversion.
- holding_thread->AddMutexWaiter(current_thread);
- }
-
- {
- SchedulerLock lock(kernel);
- auto* owner = current_thread->GetLockOwner();
- if (owner != nullptr) {
- owner->RemoveMutexWaiter(current_thread);
- }
- }
- return current_thread->GetSignalingResult();
-}
-
-std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
- VAddr address) {
- // The mutex address must be 4-byte aligned
- if ((address % sizeof(u32)) != 0) {
- LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
- return {ERR_INVALID_ADDRESS, nullptr};
- }
-
- auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
- if (new_owner == nullptr) {
- system.Memory().Write32(address, 0);
- return {RESULT_SUCCESS, nullptr};
- }
- // Transfer the ownership of the mutex from the previous owner to the new one.
- TransferMutexOwnership(address, owner, new_owner);
- u32 mutex_value = new_owner->GetWaitHandle();
- if (num_waiters >= 2) {
- // Notify the guest that there are still some threads waiting for the mutex
- mutex_value |= Mutex::MutexHasWaitersFlag;
- }
- new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
- new_owner->SetLockOwner(nullptr);
- new_owner->ResumeFromWait();
-
- system.Memory().Write32(address, mutex_value);
- return {RESULT_SUCCESS, new_owner};
-}
-
-ResultCode Mutex::Release(VAddr address) {
- auto& kernel = system.Kernel();
- SchedulerLock lock(kernel);
-
- std::shared_ptr<Thread> current_thread =
- SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
-
- auto [result, new_owner] = Unlock(current_thread, address);
-
- if (result != RESULT_SUCCESS && new_owner != nullptr) {
- new_owner->SetSynchronizationResults(nullptr, result);
- }
-
- return result;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
deleted file mode 100644
index 3b81dc3df..000000000
--- a/src/core/hle/kernel/mutex.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include "common/common_types.h"
-
-union ResultCode;
-
-namespace Core {
-class System;
-}
-
-namespace Kernel {
-
-class Mutex final {
-public:
- explicit Mutex(Core::System& system);
- ~Mutex();
-
- /// Flag that indicates that a mutex still has threads waiting for it.
- static constexpr u32 MutexHasWaitersFlag = 0x40000000;
- /// Mask of the bits in a mutex address value that contain the mutex owner.
- static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
-
- /// Attempts to acquire a mutex at the specified address.
- ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
- Handle requesting_thread_handle);
-
- /// Unlocks a mutex for owner at address
- std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
- VAddr address);
-
- /// Releases the mutex at the specified address.
- ResultCode Release(VAddr address);
-
-private:
- Core::System& system;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp
index 2c571792b..d7f40c403 100644
--- a/src/core/hle/kernel/object.cpp
+++ b/src/core/hle/kernel/object.cpp
@@ -8,7 +8,10 @@
namespace Kernel {
-Object::Object(KernelCore& kernel) : kernel{kernel}, object_id{kernel.CreateNewObjectID()} {}
+Object::Object(KernelCore& kernel_)
+ : kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{"[UNKNOWN KERNEL OBJECT]"} {}
+Object::Object(KernelCore& kernel_, std::string&& name_)
+ : kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{std::move(name_)} {}
Object::~Object() = default;
bool Object::IsWaitable() const {
@@ -21,6 +24,7 @@ bool Object::IsWaitable() const {
return true;
case HandleType::Unknown:
+ case HandleType::Event:
case HandleType::WritableEvent:
case HandleType::SharedMemory:
case HandleType::TransferMemory:
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index e3391e2af..501e58b33 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -18,6 +18,7 @@ using Handle = u32;
enum class HandleType : u32 {
Unknown,
+ Event,
WritableEvent,
ReadableEvent,
SharedMemory,
@@ -34,7 +35,8 @@ enum class HandleType : u32 {
class Object : NonCopyable, public std::enable_shared_from_this<Object> {
public:
- explicit Object(KernelCore& kernel);
+ explicit Object(KernelCore& kernel_);
+ explicit Object(KernelCore& kernel_, std::string&& name_);
virtual ~Object();
/// Returns a unique identifier for the object. For debugging purposes only.
@@ -46,22 +48,30 @@ public:
return "[BAD KERNEL OBJECT TYPE]";
}
virtual std::string GetName() const {
- return "[UNKNOWN KERNEL OBJECT]";
+ return name;
}
virtual HandleType GetHandleType() const = 0;
+ void Close() {
+ // TODO(bunnei): This is a placeholder to decrement the reference count, which we will use
+ // when we implement KAutoObject instead of using shared_ptr.
+ }
+
/**
* Check if a thread can wait on the object
* @return True if a thread can wait on the object, otherwise false
*/
bool IsWaitable() const;
+ virtual void Finalize() = 0;
+
protected:
/// The kernel instance this object was created under.
KernelCore& kernel;
private:
std::atomic<u32> object_id{0};
+ std::string name;
};
template <typename T>
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index c6bbdb080..7fea45f96 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -2,54 +2,60 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include "common/assert.h"
-#include "common/logging/log.h"
#include "common/spin_lock.h"
-#include "core/arm/arm_interface.h"
-#ifdef ARCHITECTURE_x86_64
+#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/dynarmic/arm_dynarmic_32.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
-#endif
-#include "core/arm/cpu_interrupt_handler.h"
-#include "core/arm/exclusive_monitor.h"
-#include "core/arm/unicorn/arm_unicorn.h"
#include "core/core.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/thread.h"
namespace Kernel {
-PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler,
- Core::CPUInterruptHandler& interrupt_handler)
- : interrupt_handler{interrupt_handler}, core_index{id}, scheduler{scheduler} {
-
- guard = std::make_unique<Common::SpinLock>();
-}
+PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system,
+ Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts)
+ : core_index{core_index}, system{system}, scheduler{scheduler},
+ interrupts{interrupts}, guard{std::make_unique<Common::SpinLock>()} {}
PhysicalCore::~PhysicalCore() = default;
-void PhysicalCore::Idle() {
- interrupt_handler.AwaitInterrupt();
+void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
+#ifdef ARCHITECTURE_x86_64
+ auto& kernel = system.Kernel();
+ if (is_64_bit) {
+ arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
+ system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ } else {
+ arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
+ system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ }
+#else
+#error Platform not supported yet.
+#endif
}
-void PhysicalCore::Shutdown() {
- scheduler.Shutdown();
+void PhysicalCore::Run() {
+ arm_interface->Run();
+}
+
+void PhysicalCore::Idle() {
+ interrupts[core_index].AwaitInterrupt();
}
bool PhysicalCore::IsInterrupted() const {
- return interrupt_handler.IsInterrupted();
+ return interrupts[core_index].IsInterrupted();
}
void PhysicalCore::Interrupt() {
guard->lock();
- interrupt_handler.SetInterrupt(true);
+ interrupts[core_index].SetInterrupt(true);
guard->unlock();
}
void PhysicalCore::ClearInterrupt() {
guard->lock();
- interrupt_handler.SetInterrupt(false);
+ interrupts[core_index].SetInterrupt(false);
guard->unlock();
}
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index d7a7a951c..f2b0911aa 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -4,19 +4,21 @@
#pragma once
+#include <array>
#include <cstddef>
#include <memory>
+#include "core/arm/arm_interface.h"
+
namespace Common {
class SpinLock;
}
namespace Kernel {
-class Scheduler;
+class KScheduler;
} // namespace Kernel
namespace Core {
-class ARM_Interface;
class CPUInterruptHandler;
class ExclusiveMonitor;
class System;
@@ -26,17 +28,24 @@ namespace Kernel {
class PhysicalCore {
public:
- PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler,
- Core::CPUInterruptHandler& interrupt_handler);
+ PhysicalCore(std::size_t core_index, Core::System& system, Kernel::KScheduler& scheduler,
+ Core::CPUInterrupts& interrupts);
~PhysicalCore();
PhysicalCore(const PhysicalCore&) = delete;
PhysicalCore& operator=(const PhysicalCore&) = delete;
PhysicalCore(PhysicalCore&&) = default;
- PhysicalCore& operator=(PhysicalCore&&) = default;
+ PhysicalCore& operator=(PhysicalCore&&) = delete;
+
+ /// Initialize the core for the specified parameters.
+ void Initialize(bool is_64_bit);
+
+ /// Execute current jit state
+ void Run();
void Idle();
+
/// Interrupt this physical core.
void Interrupt();
@@ -46,8 +55,17 @@ public:
/// Check if this core is interrupted
bool IsInterrupted() const;
- // Shutdown this physical core.
- void Shutdown();
+ bool IsInitialized() const {
+ return arm_interface != nullptr;
+ }
+
+ Core::ARM_Interface& ArmInterface() {
+ return *arm_interface;
+ }
+
+ const Core::ARM_Interface& ArmInterface() const {
+ return *arm_interface;
+ }
bool IsMainCore() const {
return core_index == 0;
@@ -61,19 +79,21 @@ public:
return core_index;
}
- Kernel::Scheduler& Scheduler() {
+ Kernel::KScheduler& Scheduler() {
return scheduler;
}
- const Kernel::Scheduler& Scheduler() const {
+ const Kernel::KScheduler& Scheduler() const {
return scheduler;
}
private:
- Core::CPUInterruptHandler& interrupt_handler;
- std::size_t core_index;
- Kernel::Scheduler& scheduler;
+ const std::size_t core_index;
+ Core::System& system;
+ Kernel::KScheduler& scheduler;
+ Core::CPUInterrupts& interrupts;
std::unique_ptr<Common::SpinLock> guard;
+ std::unique_ptr<Core::ARM_Interface> arm_interface;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index ff9d9248b..47b3ac57b 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -4,6 +4,7 @@
#include <algorithm>
#include <bitset>
+#include <ctime>
#include <memory>
#include <random>
#include "common/alignment.h"
@@ -13,15 +14,16 @@
#include "core/device_memory.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/code_set.h"
-#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/memory_block_manager.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/memory/slab_heap.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/resource_limit.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/hle/lock.h"
#include "core/memory.h"
#include "core/settings.h"
@@ -37,11 +39,11 @@ namespace {
*/
void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
- ThreadType type = THREADTYPE_USER;
- auto thread_res = Thread::Create(system, type, "main", entry_point, priority, 0,
- owner_process.GetIdealCore(), stack_top, &owner_process);
+ ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
+ auto thread_res = KThread::Create(system, ThreadType::User, "main", entry_point, priority, 0,
+ owner_process.GetIdealCoreId(), stack_top, &owner_process);
- std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap();
+ std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap();
// Register 1 must be a handle to the main thread
const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap();
@@ -53,8 +55,8 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
auto& kernel = system.Kernel();
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
{
- SchedulerLock lock{kernel};
- thread->SetStatus(ThreadStatus::Ready);
+ KScopedSchedulerLock lock{kernel};
+ thread->SetState(ThreadState::Runnable);
}
}
} // Anonymous namespace
@@ -116,14 +118,17 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
std::shared_ptr<Process> process = std::make_shared<Process>(system);
process->name = std::move(name);
- process->resource_limit = ResourceLimit::Create(kernel);
+
+ // TODO: This is inaccurate
+ // The process should hold a reference to the kernel-wide resource limit.
+ process->resource_limit = std::make_shared<KResourceLimit>(kernel, system);
process->status = ProcessStatus::Created;
process->program_id = 0;
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
: kernel.CreateNewUserProcessID();
process->capabilities.InitializeForMetadatalessProcess();
- std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(0));
+ std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr)));
std::uniform_int_distribution<u64> distribution;
std::generate(process->random_entropy.begin(), process->random_entropy.end(),
[&] { return distribution(rng); });
@@ -132,12 +137,32 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
return process;
}
-std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
+std::shared_ptr<KResourceLimit> Process::GetResourceLimit() const {
return resource_limit;
}
+void Process::IncrementThreadCount() {
+ ASSERT(num_threads >= 0);
+ num_created_threads++;
+
+ if (const auto count = ++num_threads; count > peak_num_threads) {
+ peak_num_threads = count;
+ }
+}
+
+void Process::DecrementThreadCount() {
+ ASSERT(num_threads > 0);
+
+ if (const auto count = --num_threads; count == 0) {
+ UNIMPLEMENTED_MSG("Process termination is not implemented!");
+ }
+}
+
u64 Process::GetTotalPhysicalMemoryAvailable() const {
- const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
+ // TODO: This is expected to always return the application memory pool size after accurately
+ // reserving kernel resources. The current workaround uses a process-local resource limit of
+ // application memory pool size, which is inaccurate.
+ const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
main_thread_stack_size};
@@ -161,68 +186,79 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
}
-void Process::InsertConditionVariableThread(std::shared_ptr<Thread> thread) {
- VAddr cond_var_addr = thread->GetCondVarWaitAddress();
- std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
- auto it = thread_list.begin();
- while (it != thread_list.end()) {
- const std::shared_ptr<Thread> current_thread = *it;
- if (current_thread->GetPriority() > thread->GetPriority()) {
- thread_list.insert(it, thread);
- return;
+bool Process::ReleaseUserException(KThread* thread) {
+ KScopedSchedulerLock sl{kernel};
+
+ if (exception_thread == thread) {
+ exception_thread = nullptr;
+
+ // Remove waiter thread.
+ s32 num_waiters{};
+ KThread* next = thread->RemoveWaiterByKey(
+ std::addressof(num_waiters),
+ reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
+ if (next != nullptr) {
+ if (next->GetState() == ThreadState::Waiting) {
+ next->SetState(ThreadState::Runnable);
+ } else {
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
+ }
}
- ++it;
+
+ return true;
+ } else {
+ return false;
}
- thread_list.push_back(thread);
}
-void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
- VAddr cond_var_addr = thread->GetCondVarWaitAddress();
- std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
- auto it = thread_list.begin();
- while (it != thread_list.end()) {
- const std::shared_ptr<Thread> current_thread = *it;
- if (current_thread.get() == thread.get()) {
- thread_list.erase(it);
- return;
- }
- ++it;
- }
+void Process::PinCurrentThread() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Get the current thread.
+ const s32 core_id = GetCurrentCoreId(kernel);
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
+
+ // Pin it.
+ PinThread(core_id, cur_thread);
+ cur_thread->Pin();
+
+ // An update is needed.
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
}
-std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
- const VAddr cond_var_addr) {
- std::vector<std::shared_ptr<Thread>> result{};
- std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
- auto it = thread_list.begin();
- while (it != thread_list.end()) {
- std::shared_ptr<Thread> current_thread = *it;
- result.push_back(current_thread);
- ++it;
- }
- return result;
+void Process::UnpinCurrentThread() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Get the current thread.
+ const s32 core_id = GetCurrentCoreId(kernel);
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
+
+ // Unpin it.
+ cur_thread->Unpin();
+ UnpinThread(core_id, cur_thread);
+
+ // An update is needed.
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
}
-void Process::RegisterThread(const Thread* thread) {
+void Process::RegisterThread(const KThread* thread) {
thread_list.push_back(thread);
}
-void Process::UnregisterThread(const Thread* thread) {
+void Process::UnregisterThread(const KThread* thread) {
thread_list.remove(thread);
}
-ResultCode Process::ClearSignalState() {
- SchedulerLock lock(system.Kernel());
- if (status == ProcessStatus::Exited) {
- LOG_ERROR(Kernel, "called on a terminated process instance.");
- return ERR_INVALID_STATE;
- }
+ResultCode Process::Reset() {
+ // Lock the process and the scheduler.
+ KScopedLightLock lk(state_lock);
+ KScopedSchedulerLock sl{kernel};
- if (!is_signaled) {
- LOG_ERROR(Kernel, "called on a process instance that isn't signaled.");
- return ERR_INVALID_STATE;
- }
+ // Validate that we're in a state that we can reset.
+ R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
+ R_UNLESS(is_signaled, ResultInvalidState);
+ // Clear signaled.
is_signaled = false;
return RESULT_SUCCESS;
}
@@ -235,6 +271,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
system_resource_size = metadata.GetSystemResourceSize();
image_size = code_size;
+ // Set initial resource limits
+ resource_limit->SetLimitValue(
+ LimitableResource::PhysicalMemory,
+ kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
+ KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
+ code_size + system_resource_size);
+ if (!memory_reservation.Succeeded()) {
+ LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
+ code_size + system_resource_size);
+ return ResultResourceLimitedExceeded;
+ }
// Initialize proces address space
if (const ResultCode result{
page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
@@ -276,24 +323,22 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
UNREACHABLE();
}
- // Set initial resource limits
- resource_limit->SetLimitValue(
- ResourceType::PhysicalMemory,
- kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
- resource_limit->SetLimitValue(ResourceType::Threads, 608);
- resource_limit->SetLimitValue(ResourceType::Events, 700);
- resource_limit->SetLimitValue(ResourceType::TransferMemory, 128);
- resource_limit->SetLimitValue(ResourceType::Sessions, 894);
- ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size));
+ resource_limit->SetLimitValue(LimitableResource::Threads, 608);
+ resource_limit->SetLimitValue(LimitableResource::Events, 700);
+ resource_limit->SetLimitValue(LimitableResource::TransferMemory, 128);
+ resource_limit->SetLimitValue(LimitableResource::Sessions, 894);
// Create TLS region
tls_region_address = CreateTLSRegion();
+ memory_reservation.Commit();
return handle_table.SetSize(capabilities.GetHandleTableSize());
}
void Process::Run(s32 main_thread_priority, u64 stack_size) {
AllocateMainThreadStack(stack_size);
+ resource_limit->Reserve(LimitableResource::Threads, 1);
+ resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size};
ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError());
@@ -301,34 +346,37 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
ChangeStatus(ProcessStatus::Running);
SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
- resource_limit->Reserve(ResourceType::Threads, 1);
- resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size);
}
void Process::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting);
- const auto stop_threads = [this](const std::vector<std::shared_ptr<Thread>>& thread_list) {
+ const auto stop_threads = [this](const std::vector<std::shared_ptr<KThread>>& thread_list) {
for (auto& thread : thread_list) {
if (thread->GetOwnerProcess() != this)
continue;
- if (thread.get() == system.CurrentScheduler().GetCurrentThread())
+ if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread())
continue;
// TODO(Subv): When are the other running/ready threads terminated?
- ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynch,
+ ASSERT_MSG(thread->GetState() == ThreadState::Waiting,
"Exiting processes with non-waiting threads is currently unimplemented");
- thread->Stop();
+ thread->Exit();
}
};
- stop_threads(system.GlobalScheduler().GetThreadList());
+ stop_threads(system.GlobalSchedulerContext().GetThreadList());
FreeTLSRegion(tls_region_address);
tls_region_address = 0;
+ if (resource_limit) {
+ resource_limit->Release(LimitableResource::PhysicalMemory,
+ main_thread_stack_size + image_size);
+ }
+
ChangeStatus(ProcessStatus::Exited);
}
@@ -346,7 +394,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
}
VAddr Process::CreateTLSRegion() {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
tls_page_iter != tls_pages.cend()) {
return *tls_page_iter->ReserveSlot();
@@ -377,7 +425,7 @@ VAddr Process::CreateTLSRegion() {
}
void Process::FreeTLSRegion(VAddr tls_address) {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
auto iter =
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
@@ -405,21 +453,18 @@ void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite);
}
+bool Process::IsSignaled() const {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ return is_signaled;
+}
+
Process::Process(Core::System& system)
- : SynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>(
- system)},
- handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {}
+ : KSynchronizationObject{system.Kernel()},
+ page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()},
+ address_arbiter{system}, condition_var{system}, state_lock{system.Kernel()}, system{system} {}
Process::~Process() = default;
-void Process::Acquire(Thread* thread) {
- ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
-}
-
-bool Process::ShouldWait(const Thread* thread) const {
- return !is_signaled;
-}
-
void Process::ChangeStatus(ProcessStatus new_status) {
if (status == new_status) {
return;
@@ -427,7 +472,7 @@ void Process::ChangeStatus(ProcessStatus new_status) {
status = new_status;
is_signaled = true;
- Signal();
+ NotifyAvailable();
}
ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index f45cb5674..320b0f347 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -11,11 +11,11 @@
#include <unordered_map>
#include <vector>
#include "common/common_types.h"
-#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/handle_table.h"
-#include "core/hle/kernel/mutex.h"
+#include "core/hle/kernel/k_address_arbiter.h"
+#include "core/hle/kernel/k_condition_variable.h"
+#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/process_capability.h"
-#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/result.h"
namespace Core {
@@ -29,8 +29,8 @@ class ProgramMetadata;
namespace Kernel {
class KernelCore;
-class ResourceLimit;
-class Thread;
+class KResourceLimit;
+class KThread;
class TLSPage;
struct CodeSet;
@@ -63,7 +63,7 @@ enum class ProcessStatus {
DebugBreak,
};
-class Process final : public SynchronizationObject {
+class Process final : public KSynchronizationObject {
public:
explicit Process(Core::System& system);
~Process() override;
@@ -123,24 +123,30 @@ public:
return handle_table;
}
- /// Gets a reference to the process' address arbiter.
- AddressArbiter& GetAddressArbiter() {
- return address_arbiter;
+ ResultCode SignalToAddress(VAddr address) {
+ return condition_var.SignalToAddress(address);
}
- /// Gets a const reference to the process' address arbiter.
- const AddressArbiter& GetAddressArbiter() const {
- return address_arbiter;
+ ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
+ return condition_var.WaitForAddress(handle, address, tag);
}
- /// Gets a reference to the process' mutex lock.
- Mutex& GetMutex() {
- return mutex;
+ void SignalConditionVariable(u64 cv_key, int32_t count) {
+ return condition_var.Signal(cv_key, count);
}
- /// Gets a const reference to the process' mutex lock
- const Mutex& GetMutex() const {
- return mutex;
+ ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
+ return condition_var.Wait(address, cv_key, tag, ns);
+ }
+
+ ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
+ s32 count) {
+ return address_arbiter.SignalToAddress(address, signal_type, value, count);
+ }
+
+ ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
+ s64 timeout) {
+ return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
}
/// Gets the address to the process' dedicated TLS region.
@@ -164,13 +170,18 @@ public:
}
/// Gets the resource limit descriptor for this process
- std::shared_ptr<ResourceLimit> GetResourceLimit() const;
+ std::shared_ptr<KResourceLimit> GetResourceLimit() const;
/// Gets the ideal CPU core ID for this process
- u8 GetIdealCore() const {
+ u8 GetIdealCoreId() const {
return ideal_core;
}
+ /// Checks if the specified thread priority is valid.
+ bool CheckThreadPriority(s32 prio) const {
+ return ((1ULL << prio) & GetPriorityMask()) != 0;
+ }
+
/// Gets the bitmask of allowed cores that this process' threads can run on.
u64 GetCoreMask() const {
return capabilities.GetCoreMask();
@@ -206,6 +217,14 @@ public:
return is_64bit_process;
}
+ [[nodiscard]] bool IsSuspended() const {
+ return is_suspended;
+ }
+
+ void SetSuspended(bool suspended) {
+ is_suspended = suspended;
+ }
+
/// Gets the total running time of the process instance in ticks.
u64 GetCPUTimeTicks() const {
return total_process_running_time_ticks;
@@ -216,6 +235,43 @@ public:
total_process_running_time_ticks += ticks;
}
+ /// Gets the process schedule count, used for thread yelding
+ s64 GetScheduledCount() const {
+ return schedule_count;
+ }
+
+ /// Increments the process schedule count, used for thread yielding.
+ void IncrementScheduledCount() {
+ ++schedule_count;
+ }
+
+ void IncrementThreadCount();
+ void DecrementThreadCount();
+
+ void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
+ running_threads[core] = thread;
+ running_thread_idle_counts[core] = idle_count;
+ }
+
+ void ClearRunningThread(KThread* thread) {
+ for (size_t i = 0; i < running_threads.size(); ++i) {
+ if (running_threads[i] == thread) {
+ running_threads[i] = nullptr;
+ }
+ }
+ }
+
+ [[nodiscard]] KThread* GetRunningThread(s32 core) const {
+ return running_threads[core];
+ }
+
+ bool ReleaseUserException(KThread* thread);
+
+ [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
+ ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ return pinned_threads[core_id];
+ }
+
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
u64 GetRandomEntropy(std::size_t index) const {
return random_entropy.at(index);
@@ -236,26 +292,17 @@ public:
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
/// Gets the list of all threads created with this process as their owner.
- const std::list<const Thread*>& GetThreadList() const {
+ const std::list<const KThread*>& GetThreadList() const {
return thread_list;
}
- /// Insert a thread into the condition variable wait container
- void InsertConditionVariableThread(std::shared_ptr<Thread> thread);
-
- /// Remove a thread from the condition variable wait container
- void RemoveConditionVariableThread(std::shared_ptr<Thread> thread);
-
- /// Obtain all condition variable threads waiting for some address
- std::vector<std::shared_ptr<Thread>> GetConditionVariableThreads(VAddr cond_var_addr);
-
/// Registers a thread as being created under this process,
/// adding it to this process' thread list.
- void RegisterThread(const Thread* thread);
+ void RegisterThread(const KThread* thread);
/// Unregisters a thread from this process, removing it
/// from this process' thread list.
- void UnregisterThread(const Thread* thread);
+ void UnregisterThread(const KThread* thread);
/// Clears the signaled state of the process if and only if it's signaled.
///
@@ -265,7 +312,7 @@ public:
/// @pre The process must be in a signaled state. If this is called on a
/// process instance that is not signaled, ERR_INVALID_STATE will be
/// returned.
- ResultCode ClearSignalState();
+ ResultCode Reset();
/**
* Loads process-specifics configuration info with metadata provided
@@ -294,6 +341,17 @@ public:
void LoadModule(CodeSet code_set, VAddr base_addr);
+ bool IsSignaled() const override;
+
+ void Finalize() override {}
+
+ void PinCurrentThread();
+ void UnpinCurrentThread();
+
+ KLightLock& GetStateLock() {
+ return state_lock;
+ }
+
///////////////////////////////////////////////////////////////////////////////////////////////
// Thread-local storage management
@@ -304,11 +362,19 @@ public:
void FreeTLSRegion(VAddr tls_address);
private:
- /// Checks if the specified thread should wait until this process is available.
- bool ShouldWait(const Thread* thread) const override;
+ void PinThread(s32 core_id, KThread* thread) {
+ ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ ASSERT(thread != nullptr);
+ ASSERT(pinned_threads[core_id] == nullptr);
+ pinned_threads[core_id] = thread;
+ }
- /// Acquires/locks this process for the specified thread if it's available.
- void Acquire(Thread* thread) override;
+ void UnpinThread(s32 core_id, KThread* thread) {
+ ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ ASSERT(thread != nullptr);
+ ASSERT(pinned_threads[core_id] == thread);
+ pinned_threads[core_id] = nullptr;
+ }
/// Changes the process status. If the status is different
/// from the current process status, then this will trigger
@@ -336,7 +402,7 @@ private:
u32 system_resource_size = 0;
/// Resource limit descriptor for this process
- std::shared_ptr<ResourceLimit> resource_limit;
+ std::shared_ptr<KResourceLimit> resource_limit;
/// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 ideal_core = 0;
@@ -363,12 +429,12 @@ private:
HandleTable handle_table;
/// Per-process address arbiter.
- AddressArbiter address_arbiter;
+ KAddressArbiter address_arbiter;
/// The per-process mutex lock instance used for handling various
/// forms of services, such as lock arbitration, and condition
/// variable related facilities.
- Mutex mutex;
+ KConditionVariable condition_var;
/// Address indicating the location of the process' dedicated TLS region.
VAddr tls_region_address = 0;
@@ -377,10 +443,7 @@ private:
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
/// List of threads that are running with this process as their owner.
- std::list<const Thread*> thread_list;
-
- /// List of threads waiting for a condition variable
- std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads;
+ std::list<const KThread*> thread_list;
/// Address of the top of the main thread's stack
VAddr main_thread_stack_top{};
@@ -397,6 +460,24 @@ private:
/// Name of this process
std::string name;
+ /// Schedule count of this process
+ s64 schedule_count{};
+
+ bool is_signaled{};
+ bool is_suspended{};
+
+ std::atomic<s32> num_created_threads{};
+ std::atomic<u16> num_threads{};
+ u16 peak_num_threads{};
+
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
+ std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
+
+ KThread* exception_thread{};
+
+ KLightLock state_lock;
+
/// System context
Core::System& system;
};
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 63880f13d..7c567049e 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -2,12 +2,14 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <bit>
+
#include "common/bit_util.h"
#include "common/logging/log.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/process_capability.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel {
namespace {
@@ -60,7 +62,7 @@ constexpr CapabilityType GetCapabilityType(u32 value) {
u32 GetFlagBitOffset(CapabilityType type) {
const auto value = static_cast<u32>(type);
- return static_cast<u32>(Common::BitSize<u32>() - Common::CountLeadingZeroes32(value));
+ return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value)));
}
} // Anonymous namespace
@@ -121,13 +123,13 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
// If there's only one, then there's a problem.
if (i >= num_capabilities) {
LOG_ERROR(Kernel, "Invalid combination! i={}", i);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
const auto size_flags = capabilities[i];
if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
@@ -157,7 +159,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
const auto type = GetCapabilityType(flag);
if (type == CapabilityType::Unset) {
- return ERR_INVALID_CAPABILITY_DESCRIPTOR;
+ return ResultInvalidCapabilityDescriptor;
}
// Bail early on ignorable entries, as one would expect,
@@ -174,7 +176,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
LOG_ERROR(Kernel,
"Attempted to initialize flags that may only be initialized once. set_flags={}",
set_flags);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
set_flags |= set_flag;
@@ -199,8 +201,8 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
break;
}
- LOG_ERROR(Kernel, "Invalid capability type! type={}", static_cast<u32>(type));
- return ERR_INVALID_CAPABILITY_DESCRIPTOR;
+ LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
+ return ResultInvalidCapabilityDescriptor;
}
void ProcessCapabilities::Clear() {
@@ -223,7 +225,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
if (priority_mask != 0 || core_mask != 0) {
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
priority_mask, core_mask);
- return ERR_INVALID_CAPABILITY_DESCRIPTOR;
+ return ResultInvalidCapabilityDescriptor;
}
const u32 core_num_min = (flags >> 16) & 0xFF;
@@ -231,7 +233,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
if (core_num_min > core_num_max) {
LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
core_num_min, core_num_max);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
const u32 priority_min = (flags >> 10) & 0x3F;
@@ -240,13 +242,13 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
LOG_ERROR(Kernel,
"Priority min is greater than priority max! priority_min={}, priority_max={}",
core_num_min, priority_max);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
// The switch only has 4 usable cores.
if (core_num_max >= 4) {
LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
- return ERR_INVALID_PROCESSOR_ID;
+ return ResultInvalidCoreId;
}
const auto make_mask = [](u64 min, u64 max) {
@@ -267,7 +269,7 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
// If we've already set this svc before, bail.
if ((set_svc_bits & svc_bit) != 0) {
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
set_svc_bits |= svc_bit;
@@ -281,7 +283,7 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
if (svc_number >= svc_capabilities.size()) {
LOG_ERROR(Kernel, "Process svc capability is out of range! svc_number={}", svc_number);
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
svc_capabilities[svc_number] = true;
@@ -319,7 +321,7 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
if (interrupt >= interrupt_capabilities.size()) {
LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
interrupt);
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
interrupt_capabilities[interrupt] = true;
@@ -332,7 +334,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
const u32 reserved = flags >> 17;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ERR_RESERVED_VALUE;
+ return ResultReservedValue;
}
program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
@@ -352,7 +354,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
LOG_ERROR(Kernel,
"Kernel version is non zero or flags are too small! major_version={}, flags={}",
major_version, flags);
- return ERR_INVALID_CAPABILITY_DESCRIPTOR;
+ return ResultInvalidCapabilityDescriptor;
}
kernel_version = flags;
@@ -363,7 +365,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
const u32 reserved = flags >> 26;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ERR_RESERVED_VALUE;
+ return ResultReservedValue;
}
handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
@@ -374,7 +376,7 @@ ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
const u32 reserved = flags >> 19;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ERR_RESERVED_VALUE;
+ return ResultReservedValue;
}
is_debuggable = (flags & 0x20000) != 0;
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
deleted file mode 100644
index 6e286419e..000000000
--- a/src/core/hle/kernel/readable_event.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include "common/assert.h"
-#include "common/logging/log.h"
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/readable_event.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/thread.h"
-
-namespace Kernel {
-
-ReadableEvent::ReadableEvent(KernelCore& kernel) : SynchronizationObject{kernel} {}
-ReadableEvent::~ReadableEvent() = default;
-
-bool ReadableEvent::ShouldWait(const Thread* thread) const {
- return !is_signaled;
-}
-
-void ReadableEvent::Acquire(Thread* thread) {
- ASSERT_MSG(IsSignaled(), "object unavailable!");
-}
-
-void ReadableEvent::Signal() {
- if (is_signaled) {
- return;
- }
-
- is_signaled = true;
- SynchronizationObject::Signal();
-}
-
-void ReadableEvent::Clear() {
- is_signaled = false;
-}
-
-ResultCode ReadableEvent::Reset() {
- SchedulerLock lock(kernel);
- if (!is_signaled) {
- LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
- GetObjectId(), GetTypeName(), GetName());
- return ERR_INVALID_STATE;
- }
-
- Clear();
-
- return RESULT_SUCCESS;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h
deleted file mode 100644
index 3264dd066..000000000
--- a/src/core/hle/kernel/readable_event.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/synchronization_object.h"
-
-union ResultCode;
-
-namespace Kernel {
-
-class KernelCore;
-class WritableEvent;
-
-class ReadableEvent final : public SynchronizationObject {
- friend class WritableEvent;
-
-public:
- ~ReadableEvent() override;
-
- std::string GetTypeName() const override {
- return "ReadableEvent";
- }
- std::string GetName() const override {
- return name;
- }
-
- static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
- HandleType GetHandleType() const override {
- return HANDLE_TYPE;
- }
-
- bool ShouldWait(const Thread* thread) const override;
- void Acquire(Thread* thread) override;
-
- /// Unconditionally clears the readable event's state.
- void Clear();
-
- /// Clears the readable event's state if and only if it
- /// has already been signaled.
- ///
- /// @pre The event must be in a signaled state. If this event
- /// is in an unsignaled state and this function is called,
- /// then ERR_INVALID_STATE will be returned.
- ResultCode Reset();
-
- void Signal() override;
-
-private:
- explicit ReadableEvent(KernelCore& kernel);
-
- std::string name; ///< Name of event (optional)
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
deleted file mode 100644
index 212e442f4..000000000
--- a/src/core/hle/kernel/resource_limit.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/resource_limit.h"
-#include "core/hle/result.h"
-
-namespace Kernel {
-namespace {
-constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
- return static_cast<std::size_t>(type);
-}
-} // Anonymous namespace
-
-ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
-ResourceLimit::~ResourceLimit() = default;
-
-bool ResourceLimit::Reserve(ResourceType resource, s64 amount) {
- return Reserve(resource, amount, 10000000000);
-}
-
-bool ResourceLimit::Reserve(ResourceType resource, s64 amount, u64 timeout) {
- const std::size_t index{ResourceTypeToIndex(resource)};
-
- s64 new_value = current[index] + amount;
- if (new_value > limit[index] && available[index] + amount <= limit[index]) {
- // TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout
- new_value = current[index] + amount;
- }
-
- if (new_value <= limit[index]) {
- current[index] = new_value;
- return true;
- }
- return false;
-}
-
-void ResourceLimit::Release(ResourceType resource, u64 amount) {
- Release(resource, amount, amount);
-}
-
-void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) {
- const std::size_t index{ResourceTypeToIndex(resource)};
-
- current[index] -= used_amount;
- available[index] -= available_amount;
-}
-
-std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
- return std::make_shared<ResourceLimit>(kernel);
-}
-
-s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
- return limit.at(ResourceTypeToIndex(resource)) - current.at(ResourceTypeToIndex(resource));
-}
-
-s64 ResourceLimit::GetMaxResourceValue(ResourceType resource) const {
- return limit.at(ResourceTypeToIndex(resource));
-}
-
-ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) {
- const std::size_t index{ResourceTypeToIndex(resource)};
- if (current[index] <= value) {
- limit[index] = value;
- return RESULT_SUCCESS;
- } else {
- LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}",
- static_cast<u32>(resource), value, index);
- return ERR_INVALID_STATE;
- }
-}
-} // namespace Kernel
diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h
deleted file mode 100644
index 936cc4d0f..000000000
--- a/src/core/hle/kernel/resource_limit.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <array>
-#include <memory>
-
-#include "common/common_types.h"
-#include "core/hle/kernel/object.h"
-
-union ResultCode;
-
-namespace Kernel {
-
-class KernelCore;
-
-enum class ResourceType : u32 {
- PhysicalMemory,
- Threads,
- Events,
- TransferMemory,
- Sessions,
-
- // Used as a count, not an actual type.
- ResourceTypeCount
-};
-
-constexpr bool IsValidResourceType(ResourceType type) {
- return type < ResourceType::ResourceTypeCount;
-}
-
-class ResourceLimit final : public Object {
-public:
- explicit ResourceLimit(KernelCore& kernel);
- ~ResourceLimit() override;
-
- /// Creates a resource limit object.
- static std::shared_ptr<ResourceLimit> Create(KernelCore& kernel);
-
- std::string GetTypeName() const override {
- return "ResourceLimit";
- }
- std::string GetName() const override {
- return GetTypeName();
- }
-
- static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
- HandleType GetHandleType() const override {
- return HANDLE_TYPE;
- }
-
- bool Reserve(ResourceType resource, s64 amount);
- bool Reserve(ResourceType resource, s64 amount, u64 timeout);
- void Release(ResourceType resource, u64 amount);
- void Release(ResourceType resource, u64 used_amount, u64 available_amount);
-
- /**
- * Gets the current value for the specified resource.
- * @param resource Requested resource type
- * @returns The current value of the resource type
- */
- s64 GetCurrentResourceValue(ResourceType resource) const;
-
- /**
- * Gets the max value for the specified resource.
- * @param resource Requested resource type
- * @returns The max value of the resource type
- */
- s64 GetMaxResourceValue(ResourceType resource) const;
-
- /**
- * Sets the limit value for a given resource type.
- *
- * @param resource The resource type to apply the limit to.
- * @param value The limit to apply to the given resource type.
- *
- * @return A result code indicating if setting the limit value
- * was successful or not.
- *
- * @note The supplied limit value *must* be greater than or equal to
- * the current resource value for the given resource type,
- * otherwise ERR_INVALID_STATE will be returned.
- */
- ResultCode SetLimitValue(ResourceType resource, s64 value);
-
-private:
- // TODO(Subv): Increment resource limit current values in their respective Kernel::T::Create
- // functions
- //
- // Currently we have no way of distinguishing if a Create was called by the running application,
- // or by a service module. Approach this once we have separated the service modules into their
- // own processes
-
- using ResourceArray =
- std::array<s64, static_cast<std::size_t>(ResourceType::ResourceTypeCount)>;
-
- ResourceArray limit{};
- ResourceArray current{};
- ResourceArray available{};
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
deleted file mode 100644
index a4b234424..000000000
--- a/src/core/hle/kernel/scheduler.cpp
+++ /dev/null
@@ -1,845 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-//
-// SelectThreads, Yield functions originally by TuxSH.
-// licensed under GPLv2 or later under exception provided by the author.
-
-#include <algorithm>
-#include <mutex>
-#include <set>
-#include <unordered_set>
-#include <utility>
-
-#include "common/assert.h"
-#include "common/bit_util.h"
-#include "common/fiber.h"
-#include "common/logging/log.h"
-#include "core/arm/arm_interface.h"
-#include "core/core.h"
-#include "core/core_timing.h"
-#include "core/cpu_manager.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/physical_core.h"
-#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/time_manager.h"
-
-namespace Kernel {
-
-GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
-
-GlobalScheduler::~GlobalScheduler() = default;
-
-void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
- std::scoped_lock lock{global_list_guard};
- thread_list.push_back(std::move(thread));
-}
-
-void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
- std::scoped_lock lock{global_list_guard};
- thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
- thread_list.end());
-}
-
-u32 GlobalScheduler::SelectThreads() {
- ASSERT(is_locked);
- const auto update_thread = [](Thread* thread, Scheduler& sched) {
- std::scoped_lock lock{sched.guard};
- if (thread != sched.selected_thread_set.get()) {
- if (thread == nullptr) {
- ++sched.idle_selection_count;
- }
- sched.selected_thread_set = SharedFrom(thread);
- }
- const bool reschedule_pending =
- sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
- sched.is_context_switch_pending = reschedule_pending;
- std::atomic_thread_fence(std::memory_order_seq_cst);
- return reschedule_pending;
- };
- if (!is_reselection_pending.load()) {
- return 0;
- }
- std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{};
-
- u32 idle_cores{};
-
- // Step 1: Get top thread in schedule queue.
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- Thread* top_thread =
- scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
- if (top_thread != nullptr) {
- // TODO(Blinkhawk): Implement Thread Pinning
- } else {
- idle_cores |= (1ul << core);
- }
- top_threads[core] = top_thread;
- }
-
- while (idle_cores != 0) {
- u32 core_id = Common::CountTrailingZeroes32(idle_cores);
-
- if (!suggested_queue[core_id].empty()) {
- std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{};
- std::size_t num_candidates = 0;
- auto iter = suggested_queue[core_id].begin();
- Thread* suggested = nullptr;
- // Step 2: Try selecting a suggested thread.
- while (iter != suggested_queue[core_id].end()) {
- suggested = *iter;
- iter++;
- s32 suggested_core_id = suggested->GetProcessorID();
- Thread* top_thread =
- suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
- if (top_thread != suggested) {
- if (top_thread != nullptr &&
- top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
- suggested = nullptr;
- break;
- // There's a too high thread to do core migration, cancel
- }
- TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
- break;
- }
- suggested = nullptr;
- migration_candidates[num_candidates++] = suggested_core_id;
- }
- // Step 3: Select a suggested thread from another core
- if (suggested == nullptr) {
- for (std::size_t i = 0; i < num_candidates; i++) {
- s32 candidate_core = migration_candidates[i];
- suggested = top_threads[candidate_core];
- auto it = scheduled_queue[candidate_core].begin();
- it++;
- Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
- if (next != nullptr) {
- TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
- suggested);
- top_threads[candidate_core] = next;
- break;
- } else {
- suggested = nullptr;
- }
- }
- }
- top_threads[core_id] = suggested;
- }
-
- idle_cores &= ~(1ul << core_id);
- }
- u32 cores_needing_context_switch{};
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- Scheduler& sched = kernel.Scheduler(core);
- ASSERT(top_threads[core] == nullptr ||
- static_cast<u32>(top_threads[core]->GetProcessorID()) == core);
- if (update_thread(top_threads[core], sched)) {
- cores_needing_context_switch |= (1ul << core);
- }
- }
- return cores_needing_context_switch;
-}
-
-bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
- ASSERT(is_locked);
- // Note: caller should use critical section, etc.
- if (!yielding_thread->IsRunnable()) {
- // Normally this case shouldn't happen except for SetThreadActivity.
- is_reselection_pending.store(true, std::memory_order_release);
- return false;
- }
- const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
- const u32 priority = yielding_thread->GetPriority();
-
- // Yield the thread
- Reschedule(priority, core_id, yielding_thread);
- const Thread* const winner = scheduled_queue[core_id].front();
- if (kernel.GetCurrentHostThreadID() != core_id) {
- is_reselection_pending.store(true, std::memory_order_release);
- }
-
- return AskForReselectionOrMarkRedundant(yielding_thread, winner);
-}
-
-bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
- ASSERT(is_locked);
- // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
- // etc.
- if (!yielding_thread->IsRunnable()) {
- // Normally this case shouldn't happen except for SetThreadActivity.
- is_reselection_pending.store(true, std::memory_order_release);
- return false;
- }
- const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
- const u32 priority = yielding_thread->GetPriority();
-
- // Yield the thread
- Reschedule(priority, core_id, yielding_thread);
-
- std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
- for (std::size_t i = 0; i < current_threads.size(); i++) {
- current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
- }
-
- Thread* next_thread = scheduled_queue[core_id].front(priority);
- Thread* winner = nullptr;
- for (auto& thread : suggested_queue[core_id]) {
- const s32 source_core = thread->GetProcessorID();
- if (source_core >= 0) {
- if (current_threads[source_core] != nullptr) {
- if (thread == current_threads[source_core] ||
- current_threads[source_core]->GetPriority() < min_regular_priority) {
- continue;
- }
- }
- }
- if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
- next_thread->GetPriority() < thread->GetPriority()) {
- if (thread->GetPriority() <= priority) {
- winner = thread;
- break;
- }
- }
- }
-
- if (winner != nullptr) {
- if (winner != yielding_thread) {
- TransferToCore(winner->GetPriority(), s32(core_id), winner);
- }
- } else {
- winner = next_thread;
- }
-
- if (kernel.GetCurrentHostThreadID() != core_id) {
- is_reselection_pending.store(true, std::memory_order_release);
- }
-
- return AskForReselectionOrMarkRedundant(yielding_thread, winner);
-}
-
-bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
- ASSERT(is_locked);
- // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
- // etc.
- if (!yielding_thread->IsRunnable()) {
- // Normally this case shouldn't happen except for SetThreadActivity.
- is_reselection_pending.store(true, std::memory_order_release);
- return false;
- }
- Thread* winner = nullptr;
- const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
-
- // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
- TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
-
- // If the core is idle, perform load balancing, excluding the threads that have just used this
- // function...
- if (scheduled_queue[core_id].empty()) {
- // Here, "current_threads" is calculated after the ""yield"", unlike yield -1
- std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
- for (std::size_t i = 0; i < current_threads.size(); i++) {
- current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
- }
- for (auto& thread : suggested_queue[core_id]) {
- const s32 source_core = thread->GetProcessorID();
- if (source_core < 0 || thread == current_threads[source_core]) {
- continue;
- }
- if (current_threads[source_core] == nullptr ||
- current_threads[source_core]->GetPriority() >= min_regular_priority) {
- winner = thread;
- }
- break;
- }
- if (winner != nullptr) {
- if (winner != yielding_thread) {
- TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
- }
- } else {
- winner = yielding_thread;
- }
- } else {
- winner = scheduled_queue[core_id].front();
- }
-
- if (kernel.GetCurrentHostThreadID() != core_id) {
- is_reselection_pending.store(true, std::memory_order_release);
- }
-
- return AskForReselectionOrMarkRedundant(yielding_thread, winner);
-}
-
-void GlobalScheduler::PreemptThreads() {
- ASSERT(is_locked);
- for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- const u32 priority = preemption_priorities[core_id];
-
- if (scheduled_queue[core_id].size(priority) > 0) {
- if (scheduled_queue[core_id].size(priority) > 1) {
- scheduled_queue[core_id].front(priority)->IncrementYieldCount();
- }
- scheduled_queue[core_id].yield(priority);
- if (scheduled_queue[core_id].size(priority) > 1) {
- scheduled_queue[core_id].front(priority)->IncrementYieldCount();
- }
- }
-
- Thread* current_thread =
- scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
- Thread* winner = nullptr;
- for (auto& thread : suggested_queue[core_id]) {
- const s32 source_core = thread->GetProcessorID();
- if (thread->GetPriority() != priority) {
- continue;
- }
- if (source_core >= 0) {
- Thread* next_thread = scheduled_queue[source_core].empty()
- ? nullptr
- : scheduled_queue[source_core].front();
- if (next_thread != nullptr && next_thread->GetPriority() < 2) {
- break;
- }
- if (next_thread == thread) {
- continue;
- }
- }
- if (current_thread != nullptr &&
- current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
- winner = thread;
- break;
- }
- }
-
- if (winner != nullptr) {
- TransferToCore(winner->GetPriority(), s32(core_id), winner);
- current_thread =
- winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
- }
-
- if (current_thread != nullptr && current_thread->GetPriority() > priority) {
- for (auto& thread : suggested_queue[core_id]) {
- const s32 source_core = thread->GetProcessorID();
- if (thread->GetPriority() < priority) {
- continue;
- }
- if (source_core >= 0) {
- Thread* next_thread = scheduled_queue[source_core].empty()
- ? nullptr
- : scheduled_queue[source_core].front();
- if (next_thread != nullptr && next_thread->GetPriority() < 2) {
- break;
- }
- if (next_thread == thread) {
- continue;
- }
- }
- if (current_thread != nullptr &&
- current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
- winner = thread;
- break;
- }
- }
-
- if (winner != nullptr) {
- TransferToCore(winner->GetPriority(), s32(core_id), winner);
- current_thread = winner;
- }
- }
-
- is_reselection_pending.store(true, std::memory_order_release);
- }
-}
-
-void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
- Core::EmuThreadHandle global_thread) {
- u32 current_core = global_thread.host_handle;
- bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
- (current_core < Core::Hardware::NUM_CPU_CORES);
- while (cores_pending_reschedule != 0) {
- u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
- ASSERT(core < Core::Hardware::NUM_CPU_CORES);
- if (!must_context_switch || core != current_core) {
- auto& phys_core = kernel.PhysicalCore(core);
- phys_core.Interrupt();
- } else {
- must_context_switch = true;
- }
- cores_pending_reschedule &= ~(1ul << core);
- }
- if (must_context_switch) {
- auto& core_scheduler = kernel.CurrentScheduler();
- kernel.ExitSVCProfile();
- core_scheduler.TryDoContextSwitch();
- kernel.EnterSVCProfile();
- }
-}
-
-void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- suggested_queue[core].add(thread, priority);
-}
-
-void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- suggested_queue[core].remove(thread, priority);
-}
-
-void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
- scheduled_queue[core].add(thread, priority);
-}
-
-void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
- scheduled_queue[core].add(thread, priority, false);
-}
-
-void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- scheduled_queue[core].remove(thread, priority);
- scheduled_queue[core].add(thread, priority);
-}
-
-void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- scheduled_queue[core].remove(thread, priority);
-}
-
-void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
- ASSERT(is_locked);
- const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
- const s32 source_core = thread->GetProcessorID();
- if (source_core == destination_core || !schedulable) {
- return;
- }
- thread->SetProcessorID(destination_core);
- if (source_core >= 0) {
- Unschedule(priority, static_cast<u32>(source_core), thread);
- }
- if (destination_core >= 0) {
- Unsuggest(priority, static_cast<u32>(destination_core), thread);
- Schedule(priority, static_cast<u32>(destination_core), thread);
- }
- if (source_core >= 0) {
- Suggest(priority, static_cast<u32>(source_core), thread);
- }
-}
-
-bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
- const Thread* winner) {
- if (current_thread == winner) {
- current_thread->IncrementYieldCount();
- return true;
- } else {
- is_reselection_pending.store(true, std::memory_order_release);
- return false;
- }
-}
-
-void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
- if (old_flags == thread->scheduling_state) {
- return;
- }
- ASSERT(is_locked);
-
- if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) {
- // In this case the thread was running, now it's pausing/exitting
- if (thread->processor_id >= 0) {
- Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
- Unsuggest(thread->current_priority, core, thread);
- }
- }
- } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
- // The thread is now set to running from being stopped
- if (thread->processor_id >= 0) {
- Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
- Suggest(thread->current_priority, core, thread);
- }
- }
- }
-
- SetReselectionPending();
-}
-
-void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) {
- if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) {
- return;
- }
- ASSERT(is_locked);
- if (thread->processor_id >= 0) {
- Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
- Unsuggest(old_priority, core, thread);
- }
- }
-
- if (thread->processor_id >= 0) {
- if (thread == kernel.CurrentScheduler().GetCurrentThread()) {
- SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id),
- thread);
- } else {
- Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
- }
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
- Suggest(thread->current_priority, core, thread);
- }
- }
- thread->IncrementYieldCount();
- SetReselectionPending();
-}
-
-void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask,
- s32 old_core) {
- if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) ||
- thread->current_priority >= THREADPRIO_COUNT) {
- return;
- }
- ASSERT(is_locked);
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (((old_affinity_mask >> core) & 1) != 0) {
- if (core == static_cast<u32>(old_core)) {
- Unschedule(thread->current_priority, core, thread);
- } else {
- Unsuggest(thread->current_priority, core, thread);
- }
- }
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (((thread->affinity_mask >> core) & 1) != 0) {
- if (core == static_cast<u32>(thread->processor_id)) {
- Schedule(thread->current_priority, core, thread);
- } else {
- Suggest(thread->current_priority, core, thread);
- }
- }
- }
-
- thread->IncrementYieldCount();
- SetReselectionPending();
-}
-
-void GlobalScheduler::Shutdown() {
- for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- scheduled_queue[core].clear();
- suggested_queue[core].clear();
- }
- thread_list.clear();
-}
-
-void GlobalScheduler::Lock() {
- Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
- ASSERT(!current_thread.IsInvalid());
- if (current_thread == current_owner) {
- ++scope_lock;
- } else {
- inner_lock.lock();
- is_locked = true;
- current_owner = current_thread;
- ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
- scope_lock = 1;
- }
-}
-
-void GlobalScheduler::Unlock() {
- if (--scope_lock != 0) {
- ASSERT(scope_lock > 0);
- return;
- }
- u32 cores_pending_reschedule = SelectThreads();
- Core::EmuThreadHandle leaving_thread = current_owner;
- current_owner = Core::EmuThreadHandle::InvalidHandle();
- scope_lock = 1;
- is_locked = false;
- inner_lock.unlock();
- EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
-}
-
-Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) {
- switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
-}
-
-Scheduler::~Scheduler() = default;
-
-bool Scheduler::HaveReadyThreads() const {
- return system.GlobalScheduler().HaveReadyThreads(core_id);
-}
-
-Thread* Scheduler::GetCurrentThread() const {
- if (current_thread) {
- return current_thread.get();
- }
- return idle_thread.get();
-}
-
-Thread* Scheduler::GetSelectedThread() const {
- return selected_thread.get();
-}
-
-u64 Scheduler::GetLastContextSwitchTicks() const {
- return last_context_switch_time;
-}
-
-void Scheduler::TryDoContextSwitch() {
- auto& phys_core = system.Kernel().CurrentPhysicalCore();
- if (phys_core.IsInterrupted()) {
- phys_core.ClearInterrupt();
- }
- guard.lock();
- if (is_context_switch_pending) {
- SwitchContext();
- } else {
- guard.unlock();
- }
-}
-
-void Scheduler::OnThreadStart() {
- SwitchContextStep2();
-}
-
-void Scheduler::Unload() {
- Thread* thread = current_thread.get();
- if (thread) {
- thread->SetContinuousOnSVC(false);
- thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
- thread->SetIsRunning(false);
- if (!thread->IsHLEThread() && !thread->HasExited()) {
- Core::ARM_Interface& cpu_core = thread->ArmInterface();
- cpu_core.SaveContext(thread->GetContext32());
- cpu_core.SaveContext(thread->GetContext64());
- // Save the TPIDR_EL0 system register in case it was modified.
- thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
- }
- thread->context_guard.unlock();
- }
-}
-
-void Scheduler::Reload() {
- Thread* thread = current_thread.get();
- if (thread) {
- ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
- "Thread must be runnable.");
-
- // Cancel any outstanding wakeup events for this thread
- thread->SetIsRunning(true);
- thread->SetWasRunning(false);
- thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
-
- auto* const thread_owner_process = thread->GetOwnerProcess();
- if (thread_owner_process != nullptr) {
- system.Kernel().MakeCurrentProcess(thread_owner_process);
- }
- if (!thread->IsHLEThread()) {
- Core::ARM_Interface& cpu_core = thread->ArmInterface();
- cpu_core.LoadContext(thread->GetContext32());
- cpu_core.LoadContext(thread->GetContext64());
- cpu_core.SetTlsAddress(thread->GetTLSAddress());
- cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
- cpu_core.ChangeProcessorID(this->core_id);
- cpu_core.ClearExclusiveState();
- }
- }
-}
-
-void Scheduler::SwitchContextStep2() {
- // Load context of new thread
- if (selected_thread) {
- ASSERT_MSG(selected_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
- "Thread must be runnable.");
-
- // Cancel any outstanding wakeup events for this thread
- selected_thread->SetIsRunning(true);
- selected_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
- selected_thread->SetWasRunning(false);
-
- auto* const thread_owner_process = current_thread->GetOwnerProcess();
- if (thread_owner_process != nullptr) {
- system.Kernel().MakeCurrentProcess(thread_owner_process);
- }
- if (!selected_thread->IsHLEThread()) {
- Core::ARM_Interface& cpu_core = selected_thread->ArmInterface();
- cpu_core.LoadContext(selected_thread->GetContext32());
- cpu_core.LoadContext(selected_thread->GetContext64());
- cpu_core.SetTlsAddress(selected_thread->GetTLSAddress());
- cpu_core.SetTPIDR_EL0(selected_thread->GetTPIDR_EL0());
- cpu_core.ChangeProcessorID(this->core_id);
- cpu_core.ClearExclusiveState();
- }
- }
-
- TryDoContextSwitch();
-}
-
-void Scheduler::SwitchContext() {
- current_thread_prev = current_thread;
- selected_thread = selected_thread_set;
- Thread* previous_thread = current_thread_prev.get();
- Thread* new_thread = selected_thread.get();
- current_thread = selected_thread;
-
- is_context_switch_pending = false;
-
- if (new_thread == previous_thread) {
- guard.unlock();
- return;
- }
-
- Process* const previous_process = system.Kernel().CurrentProcess();
-
- UpdateLastContextSwitchTime(previous_thread, previous_process);
-
- // Save context for previous thread
- if (previous_thread) {
- if (new_thread != nullptr && new_thread->IsSuspendThread()) {
- previous_thread->SetWasRunning(true);
- }
- previous_thread->SetContinuousOnSVC(false);
- previous_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
- previous_thread->SetIsRunning(false);
- if (!previous_thread->IsHLEThread() && !previous_thread->HasExited()) {
- Core::ARM_Interface& cpu_core = previous_thread->ArmInterface();
- cpu_core.SaveContext(previous_thread->GetContext32());
- cpu_core.SaveContext(previous_thread->GetContext64());
- // Save the TPIDR_EL0 system register in case it was modified.
- previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
- }
- previous_thread->context_guard.unlock();
- }
-
- std::shared_ptr<Common::Fiber>* old_context;
- if (previous_thread != nullptr) {
- old_context = &previous_thread->GetHostContext();
- } else {
- old_context = &idle_thread->GetHostContext();
- }
- guard.unlock();
-
- Common::Fiber::YieldTo(*old_context, switch_fiber);
- /// When a thread wakes up, the scheduler may have changed to other in another core.
- auto& next_scheduler = system.Kernel().CurrentScheduler();
- next_scheduler.SwitchContextStep2();
-}
-
-void Scheduler::OnSwitch(void* this_scheduler) {
- Scheduler* sched = static_cast<Scheduler*>(this_scheduler);
- sched->SwitchToCurrent();
-}
-
-void Scheduler::SwitchToCurrent() {
- while (true) {
- {
- std::scoped_lock lock{guard};
- selected_thread = selected_thread_set;
- current_thread = selected_thread;
- is_context_switch_pending = false;
- }
- while (!is_context_switch_pending) {
- if (current_thread != nullptr && !current_thread->IsHLEThread()) {
- current_thread->context_guard.lock();
- if (!current_thread->IsRunnable()) {
- current_thread->context_guard.unlock();
- break;
- }
- if (current_thread->GetProcessorID() != core_id) {
- current_thread->context_guard.unlock();
- break;
- }
- }
- std::shared_ptr<Common::Fiber>* next_context;
- if (current_thread != nullptr) {
- next_context = &current_thread->GetHostContext();
- } else {
- next_context = &idle_thread->GetHostContext();
- }
- Common::Fiber::YieldTo(switch_fiber, *next_context);
- }
- }
-}
-
-void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
- const u64 prev_switch_ticks = last_context_switch_time;
- const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
- const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
-
- if (thread != nullptr) {
- thread->UpdateCPUTimeTicks(update_ticks);
- }
-
- if (process != nullptr) {
- process->UpdateCPUTimeTicks(update_ticks);
- }
-
- last_context_switch_time = most_recent_switch_ticks;
-}
-
-void Scheduler::Initialize() {
- std::string name = "Idle Thread Id:" + std::to_string(core_id);
- std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
- void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
- ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
- auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
- nullptr, std::move(init_func), init_func_parameter);
- idle_thread = std::move(thread_res).Unwrap();
-}
-
-void Scheduler::Shutdown() {
- current_thread = nullptr;
- selected_thread = nullptr;
-}
-
-SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} {
- kernel.GlobalScheduler().Lock();
-}
-
-SchedulerLock::~SchedulerLock() {
- kernel.GlobalScheduler().Unlock();
-}
-
-SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle,
- Thread* time_task, s64 nanoseconds)
- : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{
- nanoseconds} {
- event_handle = InvalidHandle;
-}
-
-SchedulerLockAndSleep::~SchedulerLockAndSleep() {
- if (sleep_cancelled) {
- return;
- }
- auto& time_manager = kernel.TimeManager();
- time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
-}
-
-void SchedulerLockAndSleep::Release() {
- if (sleep_cancelled) {
- return;
- }
- auto& time_manager = kernel.TimeManager();
- time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
- sleep_cancelled = true;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
deleted file mode 100644
index 36e3c26fb..000000000
--- a/src/core/hle/kernel/scheduler.h
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <atomic>
-#include <memory>
-#include <mutex>
-#include <vector>
-
-#include "common/common_types.h"
-#include "common/multi_level_queue.h"
-#include "common/spin_lock.h"
-#include "core/hardware_properties.h"
-#include "core/hle/kernel/thread.h"
-
-namespace Common {
-class Fiber;
-}
-
-namespace Core {
-class ARM_Interface;
-class System;
-} // namespace Core
-
-namespace Kernel {
-
-class KernelCore;
-class Process;
-class SchedulerLock;
-
-class GlobalScheduler final {
-public:
- explicit GlobalScheduler(KernelCore& kernel);
- ~GlobalScheduler();
-
- /// Adds a new thread to the scheduler
- void AddThread(std::shared_ptr<Thread> thread);
-
- /// Removes a thread from the scheduler
- void RemoveThread(std::shared_ptr<Thread> thread);
-
- /// Returns a list of all threads managed by the scheduler
- const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
- return thread_list;
- }
-
- /// Notify the scheduler a thread's status has changed.
- void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags);
-
- /// Notify the scheduler a thread's priority has changed.
- void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority);
-
- /// Notify the scheduler a thread's core and/or affinity mask has changed.
- void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core);
-
- /**
- * Takes care of selecting the new scheduled threads in three steps:
- *
- * 1. First a thread is selected from the top of the priority queue. If no thread
- * is obtained then we move to step two, else we are done.
- *
- * 2. Second we try to get a suggested thread that's not assigned to any core or
- * that is not the top thread in that core.
- *
- * 3. Third is no suggested thread is found, we do a second pass and pick a running
- * thread in another core and swap it with its current thread.
- *
- * returns the cores needing scheduling.
- */
- u32 SelectThreads();
-
- bool HaveReadyThreads(std::size_t core_id) const {
- return !scheduled_queue[core_id].empty();
- }
-
- /**
- * Takes a thread and moves it to the back of the it's priority list.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- bool YieldThread(Thread* thread);
-
- /**
- * Takes a thread and moves it to the back of the it's priority list.
- * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
- * a better priority than the next thread in the core.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- bool YieldThreadAndBalanceLoad(Thread* thread);
-
- /**
- * Takes a thread and moves it out of the scheduling queue.
- * and into the suggested queue. If no thread can be scheduled afterwards in that core,
- * a suggested thread is obtained instead.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
-
- /**
- * Rotates the scheduling queues of threads at a preemption priority and then does
- * some core rebalancing. Preemption priorities can be found in the array
- * 'preemption_priorities'.
- *
- * @note This operation happens every 10ms.
- */
- void PreemptThreads();
-
- u32 CpuCoresCount() const {
- return Core::Hardware::NUM_CPU_CORES;
- }
-
- void SetReselectionPending() {
- is_reselection_pending.store(true, std::memory_order_release);
- }
-
- bool IsReselectionPending() const {
- return is_reselection_pending.load(std::memory_order_acquire);
- }
-
- void Shutdown();
-
-private:
- friend class SchedulerLock;
-
- /// Lock the scheduler to the current thread.
- void Lock();
-
- /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
- /// and reschedules current core if needed.
- void Unlock();
-
- void EnableInterruptAndSchedule(u32 cores_pending_reschedule,
- Core::EmuThreadHandle global_thread);
-
- /**
- * Add a thread to the suggested queue of a cpu core. Suggested threads may be
- * picked if no thread is scheduled to run on the core.
- */
- void Suggest(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
- * picked if no thread is scheduled to run on the core.
- */
- void Unsuggest(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Add a thread to the scheduling queue of a cpu core. The thread is added at the
- * back the queue in its priority level.
- */
- void Schedule(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Add a thread to the scheduling queue of a cpu core. The thread is added at the
- * front the queue in its priority level.
- */
- void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
-
- /// Reschedule an already scheduled thread based on a new priority
- void Reschedule(u32 priority, std::size_t core, Thread* thread);
-
- /// Unschedules a thread.
- void Unschedule(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Transfers a thread into an specific core. If the destination_core is -1
- * it will be unscheduled from its source code and added into its suggested
- * queue.
- */
- void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
-
- bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner);
-
- static constexpr u32 min_regular_priority = 2;
- std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
- scheduled_queue;
- std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
- suggested_queue;
- std::atomic<bool> is_reselection_pending{false};
-
- // The priority levels at which the global scheduler preempts threads every 10 ms. They are
- // ordered from Core 0 to Core 3.
- std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
-
- /// Scheduler lock mechanisms.
- bool is_locked{};
- Common::SpinLock inner_lock{};
- std::atomic<s64> scope_lock{};
- Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
-
- Common::SpinLock global_list_guard{};
-
- /// Lists all thread ids that aren't deleted/etc.
- std::vector<std::shared_ptr<Thread>> thread_list;
- KernelCore& kernel;
-};
-
-class Scheduler final {
-public:
- explicit Scheduler(Core::System& system, std::size_t core_id);
- ~Scheduler();
-
- /// Returns whether there are any threads that are ready to run.
- bool HaveReadyThreads() const;
-
- /// Reschedules to the next available thread (call after current thread is suspended)
- void TryDoContextSwitch();
-
- /// The next two are for SingleCore Only.
- /// Unload current thread before preempting core.
- void Unload();
- /// Reload current thread after core preemption.
- void Reload();
-
- /// Gets the current running thread
- Thread* GetCurrentThread() const;
-
- /// Gets the currently selected thread from the top of the multilevel queue
- Thread* GetSelectedThread() const;
-
- /// Gets the timestamp for the last context switch in ticks.
- u64 GetLastContextSwitchTicks() const;
-
- bool ContextSwitchPending() const {
- return is_context_switch_pending;
- }
-
- void Initialize();
-
- /// Shutdowns the scheduler.
- void Shutdown();
-
- void OnThreadStart();
-
- std::shared_ptr<Common::Fiber>& ControlContext() {
- return switch_fiber;
- }
-
- const std::shared_ptr<Common::Fiber>& ControlContext() const {
- return switch_fiber;
- }
-
-private:
- friend class GlobalScheduler;
-
- /// Switches the CPU's active thread context to that of the specified thread
- void SwitchContext();
-
- /// When a thread wakes up, it must run this through it's new scheduler
- void SwitchContextStep2();
-
- /**
- * Called on every context switch to update the internal timestamp
- * This also updates the running time ticks for the given thread and
- * process using the following difference:
- *
- * ticks += most_recent_ticks - last_context_switch_ticks
- *
- * The internal tick timestamp for the scheduler is simply the
- * most recent tick count retrieved. No special arithmetic is
- * applied to it.
- */
- void UpdateLastContextSwitchTime(Thread* thread, Process* process);
-
- static void OnSwitch(void* this_scheduler);
- void SwitchToCurrent();
-
- std::shared_ptr<Thread> current_thread = nullptr;
- std::shared_ptr<Thread> selected_thread = nullptr;
- std::shared_ptr<Thread> current_thread_prev = nullptr;
- std::shared_ptr<Thread> selected_thread_set = nullptr;
- std::shared_ptr<Thread> idle_thread = nullptr;
-
- std::shared_ptr<Common::Fiber> switch_fiber = nullptr;
-
- Core::System& system;
- u64 last_context_switch_time = 0;
- u64 idle_selection_count = 0;
- const std::size_t core_id;
-
- Common::SpinLock guard{};
-
- bool is_context_switch_pending = false;
-};
-
-class SchedulerLock {
-public:
- [[nodiscard]] explicit SchedulerLock(KernelCore& kernel);
- ~SchedulerLock();
-
-protected:
- KernelCore& kernel;
-};
-
-class SchedulerLockAndSleep : public SchedulerLock {
-public:
- explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task,
- s64 nanoseconds);
- ~SchedulerLockAndSleep();
-
- void CancelSleep() {
- sleep_cancelled = true;
- }
-
- void Release();
-
-private:
- Handle& event_handle;
- Thread* time_task;
- s64 nanoseconds;
- bool sleep_cancelled{};
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
index a549ae9d7..5d17346ad 100644
--- a/src/core/hle/kernel/server_port.cpp
+++ b/src/core/hle/kernel/server_port.cpp
@@ -5,20 +5,20 @@
#include <tuple>
#include "common/assert.h"
#include "core/hle/kernel/client_port.h"
-#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel {
-ServerPort::ServerPort(KernelCore& kernel) : SynchronizationObject{kernel} {}
+ServerPort::ServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
ServerPort::~ServerPort() = default;
ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
if (pending_sessions.empty()) {
- return ERR_NOT_FOUND;
+ return ResultNotFound;
}
auto session = std::move(pending_sessions.back());
@@ -28,15 +28,9 @@ ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) {
pending_sessions.push_back(std::move(pending_session));
-}
-
-bool ServerPort::ShouldWait(const Thread* thread) const {
- // If there are no pending sessions, we wait until a new one is added.
- return pending_sessions.empty();
-}
-
-void ServerPort::Acquire(Thread* thread) {
- ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
+ if (pending_sessions.size() == 1) {
+ NotifyAvailable();
+ }
}
bool ServerPort::IsSignaled() const {
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
index 41b191b86..29b4f2509 100644
--- a/src/core/hle/kernel/server_port.h
+++ b/src/core/hle/kernel/server_port.h
@@ -9,8 +9,8 @@
#include <utility>
#include <vector>
#include "common/common_types.h"
+#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/result.h"
namespace Kernel {
@@ -20,7 +20,7 @@ class KernelCore;
class ServerSession;
class SessionRequestHandler;
-class ServerPort final : public SynchronizationObject {
+class ServerPort final : public KSynchronizationObject {
public:
explicit ServerPort(KernelCore& kernel);
~ServerPort() override;
@@ -79,11 +79,10 @@ public:
/// waiting to be accepted by this port.
void AppendPendingSession(std::shared_ptr<ServerSession> pending_session);
- bool ShouldWait(const Thread* thread) const override;
- void Acquire(Thread* thread) override;
-
bool IsSignaled() const override;
+ void Finalize() override {}
+
private:
/// ServerSessions waiting to be accepted by the port
std::vector<std::shared_ptr<ServerSession>> pending_sessions;
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 7e6391c6c..790dbb998 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -8,51 +8,40 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
-#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
-#include "core/hle/kernel/thread.h"
#include "core/memory.h"
namespace Kernel {
-ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
-ServerSession::~ServerSession() = default;
+ServerSession::ServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
+
+ServerSession::~ServerSession() {
+ kernel.ReleaseServiceThread(service_thread);
+}
ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel,
std::shared_ptr<Session> parent,
std::string name) {
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
- session->request_event =
- Core::Timing::CreateEvent(name, [session](std::uintptr_t, std::chrono::nanoseconds) {
- session->CompleteSyncRequest();
- });
session->name = std::move(name);
session->parent = std::move(parent);
+ session->service_thread = kernel.CreateServiceThread(session->name);
return MakeResult(std::move(session));
}
-bool ServerSession::ShouldWait(const Thread* thread) const {
- // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
- if (!parent->Client()) {
- return false;
- }
-
- // Wait if we have no pending requests, or if we're currently handling a request.
- return pending_requesting_threads.empty() || currently_handling != nullptr;
-}
-
bool ServerSession::IsSignaled() const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
if (!parent->Client()) {
@@ -63,15 +52,6 @@ bool ServerSession::IsSignaled() const {
return !pending_requesting_threads.empty() && currently_handling == nullptr;
}
-void ServerSession::Acquire(Thread* thread) {
- ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
- // We are now handling a request, pop it from the stack.
- // TODO(Subv): What happens if the client endpoint is closed before any requests are made?
- ASSERT(!pending_requesting_threads.empty());
- currently_handling = pending_requesting_threads.back();
- pending_requesting_threads.pop_back();
-}
-
void ServerSession::ClientDisconnected() {
// We keep a shared pointer to the hle handler to keep it alive throughout
// the call to ClientDisconnected, as ClientDisconnected invalidates the
@@ -131,29 +111,28 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
}
}
- LOG_CRITICAL(IPC, "Unknown domain command={}",
- static_cast<int>(domain_message_header.command.Value()));
+ LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
ASSERT(false);
return RESULT_SUCCESS;
}
-ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread,
+ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<KThread> thread,
Core::Memory::Memory& memory) {
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
auto context =
std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread));
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
- request_queue.Push(std::move(context));
+
+ if (auto strong_ptr = service_thread.lock()) {
+ strong_ptr->QueueSyncRequest(*this, std::move(context));
+ return RESULT_SUCCESS;
+ }
return RESULT_SUCCESS;
}
-ResultCode ServerSession::CompleteSyncRequest() {
- ASSERT(!request_queue.Empty());
-
- auto& context = *request_queue.Front();
-
+ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
ResultCode result = RESULT_SUCCESS;
// If the session has been converted to a domain, handle the domain request
if (IsDomain() && context.HasDomainMessageHeader()) {
@@ -172,24 +151,20 @@ ResultCode ServerSession::CompleteSyncRequest() {
// Some service requests require the thread to block
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
if (!context.IsThreadWaiting()) {
- context.GetThread().ResumeFromWait();
- context.GetThread().SetSynchronizationResults(nullptr, result);
+ context.GetThread().Wakeup();
+ context.GetThread().SetSyncedObject(nullptr, result);
}
}
- request_queue.Pop();
-
return result;
}
-ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
- Core::Memory::Memory& memory) {
- const ResultCode result = QueueSyncRequest(std::move(thread), memory);
- const auto delay = std::chrono::nanoseconds{kernel.IsMulticore() ? 0 : 20000};
- Core::System::GetInstance().CoreTiming().ScheduleEvent(delay, request_event, {});
- return result;
+ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<KThread> thread,
+ Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing) {
+ return QueueSyncRequest(std::move(thread), memory);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index 403aaf10b..c42d5ee59 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -10,7 +10,8 @@
#include <vector>
#include "common/threadsafe_queue.h"
-#include "core/hle/kernel/synchronization_object.h"
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/service_thread.h"
#include "core/hle/result.h"
namespace Core::Memory {
@@ -18,8 +19,9 @@ class Memory;
}
namespace Core::Timing {
+class CoreTiming;
struct EventType;
-}
+} // namespace Core::Timing
namespace Kernel {
@@ -27,7 +29,7 @@ class HLERequestContext;
class KernelCore;
class Session;
class SessionRequestHandler;
-class Thread;
+class KThread;
/**
* Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS
@@ -41,7 +43,9 @@ class Thread;
* After the server replies to the request, the response is marshalled back to the caller's
* TLS buffer and control is transferred back to it.
*/
-class ServerSession final : public SynchronizationObject {
+class ServerSession final : public KSynchronizationObject {
+ friend class ServiceThread;
+
public:
explicit ServerSession(KernelCore& kernel);
~ServerSession() override;
@@ -73,8 +77,6 @@ public:
return parent.get();
}
- bool IsSignaled() const override;
-
/**
* Sets the HLE handler for the session. This handler will be called to service IPC requests
* instead of the regular IPC machinery. (The regular IPC machinery is currently not
@@ -87,16 +89,14 @@ public:
/**
* Handle a sync request from the emulated application.
*
- * @param thread Thread that initiated the request.
- * @param memory Memory context to handle the sync request under.
+ * @param thread Thread that initiated the request.
+ * @param memory Memory context to handle the sync request under.
+ * @param core_timing Core timing context to schedule the request event under.
*
* @returns ResultCode from the operation.
*/
- ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
-
- bool ShouldWait(const Thread* thread) const override;
-
- void Acquire(Thread* thread) override;
+ ResultCode HandleSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing);
/// Called when a client disconnection occurs.
void ClientDisconnected();
@@ -124,12 +124,16 @@ public:
convert_to_domain = true;
}
+ bool IsSignaled() const override;
+
+ void Finalize() override {}
+
private:
/// Queues a sync request from the emulated application.
- ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
+ ResultCode QueueSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory);
/// Completes a sync request from the emulated application.
- ResultCode CompleteSyncRequest();
+ ResultCode CompleteSyncRequest(HLERequestContext& context);
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
/// object handle.
@@ -147,12 +151,12 @@ private:
/// List of threads that are pending a response after a sync request. This list is processed in
/// a LIFO manner, thus, the last request will be dispatched first.
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
- std::vector<std::shared_ptr<Thread>> pending_requesting_threads;
+ std::vector<std::shared_ptr<KThread>> pending_requesting_threads;
/// Thread whose request is currently being handled. A request is considered "handled" when a
/// response is sent via svcReplyAndReceive.
/// TODO(Subv): Find a better name for this.
- std::shared_ptr<Thread> currently_handling;
+ std::shared_ptr<KThread> currently_handling;
/// When set to True, converts the session to a domain at the end of the command
bool convert_to_domain{};
@@ -160,11 +164,8 @@ private:
/// The name of this session (optional)
std::string name;
- /// Core timing event used to schedule the service request at some point in the future
- std::shared_ptr<Core::Timing::EventType> request_event;
-
- /// Queue of scheduled service requests
- Common::MPSCQueue<std::shared_ptr<Kernel::HLERequestContext>> request_queue;
+ /// Thread to dispatch service requests
+ std::weak_ptr<ServiceThread> service_thread;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
new file mode 100644
index 000000000..ee46f3e21
--- /dev/null
+++ b/src/core/hle/kernel/service_thread.cpp
@@ -0,0 +1,110 @@
+// Copyright 2020 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <condition_variable>
+#include <functional>
+#include <mutex>
+#include <thread>
+#include <vector>
+#include <queue>
+
+#include "common/assert.h"
+#include "common/scope_exit.h"
+#include "common/thread.h"
+#include "core/core.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/server_session.h"
+#include "core/hle/kernel/service_thread.h"
+#include "core/hle/lock.h"
+#include "video_core/renderer_base.h"
+
+namespace Kernel {
+
+class ServiceThread::Impl final {
+public:
+ explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name);
+ ~Impl();
+
+ void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
+
+private:
+ std::vector<std::thread> threads;
+ std::queue<std::function<void()>> requests;
+ std::mutex queue_mutex;
+ std::condition_variable condition;
+ const std::string service_name;
+ bool stop{};
+};
+
+ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
+ : service_name{name} {
+ for (std::size_t i = 0; i < num_threads; ++i)
+ threads.emplace_back([this, &kernel] {
+ Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
+
+ // Wait for first request before trying to acquire a render context
+ {
+ std::unique_lock lock{queue_mutex};
+ condition.wait(lock, [this] { return stop || !requests.empty(); });
+ }
+
+ kernel.RegisterHostThread();
+
+ while (true) {
+ std::function<void()> task;
+
+ {
+ std::unique_lock lock{queue_mutex};
+ condition.wait(lock, [this] { return stop || !requests.empty(); });
+ if (stop || requests.empty()) {
+ return;
+ }
+ task = std::move(requests.front());
+ requests.pop();
+ }
+
+ task();
+ }
+ });
+}
+
+void ServiceThread::Impl::QueueSyncRequest(ServerSession& session,
+ std::shared_ptr<HLERequestContext>&& context) {
+ {
+ std::unique_lock lock{queue_mutex};
+
+ // ServerSession owns the service thread, so we cannot caption a strong pointer here in the
+ // event that the ServerSession is terminated.
+ std::weak_ptr<ServerSession> weak_ptr{SharedFrom(&session)};
+ requests.emplace([weak_ptr, context{std::move(context)}]() {
+ if (auto strong_ptr = weak_ptr.lock()) {
+ strong_ptr->CompleteSyncRequest(*context);
+ }
+ });
+ }
+ condition.notify_one();
+}
+
+ServiceThread::Impl::~Impl() {
+ {
+ std::unique_lock lock{queue_mutex};
+ stop = true;
+ }
+ condition.notify_all();
+ for (std::thread& thread : threads) {
+ thread.join();
+ }
+}
+
+ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name)
+ : impl{std::make_unique<Impl>(kernel, num_threads, name)} {}
+
+ServiceThread::~ServiceThread() = default;
+
+void ServiceThread::QueueSyncRequest(ServerSession& session,
+ std::shared_ptr<HLERequestContext>&& context) {
+ impl->QueueSyncRequest(session, std::move(context));
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h
new file mode 100644
index 000000000..025ab8fb5
--- /dev/null
+++ b/src/core/hle/kernel/service_thread.h
@@ -0,0 +1,28 @@
+// Copyright 2020 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <string>
+
+namespace Kernel {
+
+class HLERequestContext;
+class KernelCore;
+class ServerSession;
+
+class ServiceThread final {
+public:
+ explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name);
+ ~ServiceThread();
+
+ void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
+
+private:
+ class Impl;
+ std::unique_ptr<Impl> impl;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/session.cpp b/src/core/hle/kernel/session.cpp
index e4dd53e24..8830d4e91 100644
--- a/src/core/hle/kernel/session.cpp
+++ b/src/core/hle/kernel/session.cpp
@@ -4,15 +4,23 @@
#include "common/assert.h"
#include "core/hle/kernel/client_session.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
namespace Kernel {
-Session::Session(KernelCore& kernel) : SynchronizationObject{kernel} {}
-Session::~Session() = default;
+Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
+Session::~Session() {
+ // Release reserved resource when the Session pair was created.
+ kernel.GetSystemResourceLimit()->Release(LimitableResource::Sessions, 1);
+}
Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
+ // Reserve a new session from the resource limit.
+ KScopedResourceReservation session_reservation(kernel.GetSystemResourceLimit(),
+ LimitableResource::Sessions);
+ ASSERT(session_reservation.Succeeded());
auto session{std::make_shared<Session>(kernel)};
auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()};
auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()};
@@ -21,21 +29,13 @@ Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
session->client = client_session;
session->server = server_session;
+ session_reservation.Commit();
return std::make_pair(std::move(client_session), std::move(server_session));
}
-bool Session::ShouldWait(const Thread* thread) const {
- UNIMPLEMENTED();
- return {};
-}
-
bool Session::IsSignaled() const {
UNIMPLEMENTED();
return true;
}
-void Session::Acquire(Thread* thread) {
- UNIMPLEMENTED();
-}
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/session.h b/src/core/hle/kernel/session.h
index 7cd9c0d77..fa3c5651a 100644
--- a/src/core/hle/kernel/session.h
+++ b/src/core/hle/kernel/session.h
@@ -8,7 +8,7 @@
#include <string>
#include <utility>
-#include "core/hle/kernel/synchronization_object.h"
+#include "core/hle/kernel/k_synchronization_object.h"
namespace Kernel {
@@ -19,7 +19,7 @@ class ServerSession;
* Parent structure to link the client and server endpoints of a session with their associated
* client port.
*/
-class Session final : public SynchronizationObject {
+class Session final : public KSynchronizationObject {
public:
explicit Session(KernelCore& kernel);
~Session() override;
@@ -37,11 +37,9 @@ public:
return HANDLE_TYPE;
}
- bool ShouldWait(const Thread* thread) const override;
-
bool IsSignaled() const override;
- void Acquire(Thread* thread) override;
+ void Finalize() override {}
std::shared_ptr<ClientSession> Client() {
if (auto result{client.lock()}) {
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index 0cd467110..2eadd51d7 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -4,6 +4,7 @@
#include "common/assert.h"
#include "core/core.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/shared_memory.h"
@@ -13,7 +14,9 @@ namespace Kernel {
SharedMemory::SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
: Object{kernel}, device_memory{device_memory} {}
-SharedMemory::~SharedMemory() = default;
+SharedMemory::~SharedMemory() {
+ kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
+}
std::shared_ptr<SharedMemory> SharedMemory::Create(
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
@@ -21,6 +24,11 @@ std::shared_ptr<SharedMemory> SharedMemory::Create(
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
std::string name) {
+ const auto resource_limit = kernel.GetSystemResourceLimit();
+ KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
+ size);
+ ASSERT(memory_reservation.Succeeded());
+
std::shared_ptr<SharedMemory> shared_memory{
std::make_shared<SharedMemory>(kernel, device_memory)};
@@ -32,6 +40,7 @@ std::shared_ptr<SharedMemory> SharedMemory::Create(
shared_memory->size = size;
shared_memory->name = name;
+ memory_reservation.Commit();
return shared_memory;
}
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index 0ef87235c..623bd8b11 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -71,6 +71,8 @@ public:
return device_memory.GetPointer(physical_address + offset);
}
+ void Finalize() override {}
+
private:
Core::DeviceMemory& device_memory;
Process* owner_process{};
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 01ae57053..31d899e06 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -10,38 +10,44 @@
#include "common/alignment.h"
#include "common/assert.h"
+#include "common/common_funcs.h"
#include "common/fiber.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
+#include "common/scope_exit.h"
#include "common/string_util.h"
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/cpu_manager.h"
-#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_address_arbiter.h"
+#include "core/hle/kernel/k_condition_variable.h"
+#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/memory_block.h"
+#include "core/hle/kernel/memory/memory_layout.h"
#include "core/hle/kernel/memory/page_table.h"
-#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/readable_event.h"
-#include "core/hle/kernel/resource_limit.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/kernel/svc_wrap.h"
-#include "core/hle/kernel/synchronization.h"
-#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/kernel/transfer_memory.h"
-#include "core/hle/kernel/writable_event.h"
#include "core/hle/lock.h"
#include "core/hle/result.h"
#include "core/hle/service/service.h"
@@ -65,49 +71,49 @@ ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr ds
VAddr src_addr, u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(src_addr)) {
LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is 0");
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!IsValidAddressRange(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
dst_addr, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!IsValidAddressRange(src_addr, size)) {
LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
src_addr, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!manager.IsInsideAddressSpace(src_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
src_addr, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (manager.IsOutsideStackRegion(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
dst_addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (manager.IsInsideHeapRegion(dst_addr, size)) {
@@ -115,7 +121,7 @@ ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr ds
"Destination does not fit within the heap region, addr=0x{:016X}, "
"size=0x{:016X}",
dst_addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (manager.IsInsideAliasRegion(dst_addr, size)) {
@@ -123,7 +129,7 @@ ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr ds
"Destination does not fit within the map region, addr=0x{:016X}, "
"size=0x{:016X}",
dst_addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
return RESULT_SUCCESS;
@@ -132,33 +138,40 @@ ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr ds
enum class ResourceLimitValueType {
CurrentValue,
LimitValue,
+ PeakValue,
};
ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit,
u32 resource_type, ResourceLimitValueType value_type) {
std::lock_guard lock{HLE::g_hle_lock};
- const auto type = static_cast<ResourceType>(resource_type);
+ const auto type = static_cast<LimitableResource>(resource_type);
if (!IsValidResourceType(type)) {
LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
const auto* const current_process = system.Kernel().CurrentProcess();
ASSERT(current_process != nullptr);
const auto resource_limit_object =
- current_process->GetHandleTable().Get<ResourceLimit>(resource_limit);
+ current_process->GetHandleTable().Get<KResourceLimit>(resource_limit);
if (!resource_limit_object) {
LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}",
resource_limit);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
- if (value_type == ResourceLimitValueType::CurrentValue) {
- return MakeResult(resource_limit_object->GetCurrentResourceValue(type));
+ switch (value_type) {
+ case ResourceLimitValueType::CurrentValue:
+ return MakeResult(resource_limit_object->GetCurrentValue(type));
+ case ResourceLimitValueType::LimitValue:
+ return MakeResult(resource_limit_object->GetLimitValue(type));
+ case ResourceLimitValueType::PeakValue:
+ return MakeResult(resource_limit_object->GetPeakValue(type));
+ default:
+ LOG_ERROR(Kernel_SVC, "Invalid resource value_type: '{}'", value_type);
+ return ResultInvalidEnumValue;
}
-
- return MakeResult(resource_limit_object->GetMaxResourceValue(type));
}
} // Anonymous namespace
@@ -171,12 +184,12 @@ static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_s
if ((heap_size % 0x200000) != 0) {
LOG_ERROR(Kernel_SVC, "The heap size is not a multiple of 2MB, heap_size=0x{:016X}",
heap_size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (heap_size >= 0x200000000) {
LOG_ERROR(Kernel_SVC, "The heap size is not less than 8GB, heap_size=0x{:016X}", heap_size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
@@ -202,19 +215,19 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
if (!Common::Is4KBAligned(address)) {
LOG_ERROR(Kernel_SVC, "Address not page aligned (0x{:016X})", address);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (size == 0 || !Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Invalid size (0x{:X}). Size must be non-zero and page aligned.",
size);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!IsValidAddressRange(address, size)) {
LOG_ERROR(Kernel_SVC, "Address range overflowed (Address: 0x{:016X}, Size: 0x{:016X})",
address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
const auto attributes{static_cast<Memory::MemoryAttribute>(mask | attribute)};
@@ -223,7 +236,7 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
LOG_ERROR(Kernel_SVC,
"Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}",
attribute, mask);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
@@ -234,8 +247,7 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
u32 attribute) {
- return SetMemoryAttribute(system, static_cast<VAddr>(address), static_cast<std::size_t>(size),
- mask, attribute);
+ return SetMemoryAttribute(system, address, size, mask, attribute);
}
/// Maps a memory range into a different range.
@@ -255,8 +267,7 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
}
static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
- return MapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr),
- static_cast<std::size_t>(size));
+ return MapMemory(system, dst_addr, src_addr, size);
}
/// Unmaps a region that was previously mapped with svcMapMemory
@@ -276,8 +287,7 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
}
static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
- return UnmapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr),
- static_cast<std::size_t>(size));
+ return UnmapMemory(system, dst_addr, src_addr, size);
}
/// Connect to an OS service given the port name, returns the handle to the port to out
@@ -290,7 +300,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
LOG_ERROR(Kernel_SVC,
"Port Name Address is not a valid virtual address, port_name_address=0x{:016X}",
port_name_address);
- return ERR_NOT_FOUND;
+ return ResultNotFound;
}
static constexpr std::size_t PortNameMaxLength = 11;
@@ -299,7 +309,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
if (port_name.size() > PortNameMaxLength) {
LOG_ERROR(Kernel_SVC, "Port name is too long, expected {} but got {}", PortNameMaxLength,
port_name.size());
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
@@ -308,11 +318,9 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
const auto it = kernel.FindNamedPort(port_name);
if (!kernel.IsValidNamedPort(it)) {
LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
- return ERR_NOT_FOUND;
+ return ResultNotFound;
}
- ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Sessions, 1));
-
auto client_port = it->second;
std::shared_ptr<ClientSession> client_session;
@@ -332,40 +340,26 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
/// Makes a blocking IPC call to an OS service.
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ auto& kernel = system.Kernel();
+ const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
if (!session) {
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
- auto thread = system.CurrentScheduler().GetCurrentThread();
+ auto thread = kernel.CurrentScheduler()->GetCurrentThread();
{
- SchedulerLock lock(system.Kernel());
- thread->InvalidateHLECallback();
- thread->SetStatus(ThreadStatus::WaitIPC);
- session->SendSyncRequest(SharedFrom(thread), system.Memory());
- }
-
- if (thread->HasHLECallback()) {
- Handle event_handle = thread->GetHLETimeEvent();
- if (event_handle != InvalidHandle) {
- auto& time_manager = system.Kernel().TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
-
- {
- SchedulerLock lock(system.Kernel());
- auto* sync_object = thread->GetHLESyncObject();
- sync_object->RemoveWaitingThread(SharedFrom(thread));
- }
-
- thread->InvokeHLECallback(SharedFrom(thread));
+ KScopedSchedulerLock lock(kernel);
+ thread->SetState(ThreadState::Waiting);
+ thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
+ session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
}
- return thread->GetSignalingResult();
+ KSynchronizationObject* dummy{};
+ return thread->GetWaitResult(std::addressof(dummy));
}
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -373,27 +367,29 @@ static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
}
/// Get the ID for the specified thread.
-static ResultCode GetThreadId(Core::System& system, u64* thread_id, Handle thread_handle) {
+static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
+ // Get the thread from its handle.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", thread_handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- *thread_id = thread->GetThreadID();
+ // Get the thread's id.
+ *out_thread_id = thread->GetThreadID();
return RESULT_SUCCESS;
}
-static ResultCode GetThreadId32(Core::System& system, u32* thread_id_low, u32* thread_id_high,
- Handle thread_handle) {
- u64 thread_id{};
- const ResultCode result{GetThreadId(system, &thread_id, thread_handle)};
+static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
+ u32* out_thread_id_high, Handle thread_handle) {
+ u64 out_thread_id{};
+ const ResultCode result{GetThreadId(system, &out_thread_id, thread_handle)};
- *thread_id_low = static_cast<u32>(thread_id >> 32);
- *thread_id_high = static_cast<u32>(thread_id & std::numeric_limits<u32>::max());
+ *out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
+ *out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
return result;
}
@@ -409,12 +405,12 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
return RESULT_SUCCESS;
}
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
if (thread) {
const Process* const owner_process = thread->GetOwnerProcess();
if (!owner_process) {
LOG_ERROR(Kernel_SVC, "Non-existent owning process encountered.");
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
*process_id = owner_process->GetProcessID();
@@ -424,7 +420,7 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
// NOTE: This should also handle debug objects before returning.
LOG_ERROR(Kernel_SVC, "Handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32* process_id_high,
@@ -437,7 +433,7 @@ static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32*
}
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
-static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address,
+static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
u64 handle_count, s64 nano_seconds) {
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
handles_address, handle_count, nano_seconds);
@@ -447,7 +443,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
LOG_ERROR(Kernel_SVC,
"Handle address is not a valid virtual address, handle_address=0x{:016X}",
handles_address);
- return ERR_INVALID_POINTER;
+ return ResultInvalidPointer;
}
static constexpr u64 MaxHandles = 0x40;
@@ -455,32 +451,30 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
if (handle_count > MaxHandles) {
LOG_ERROR(Kernel_SVC, "Handle count specified is too large, expected {} but got {}",
MaxHandles, handle_count);
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
auto& kernel = system.Kernel();
- Thread::ThreadSynchronizationObjects objects(handle_count);
+ std::vector<KSynchronizationObject*> objects(handle_count);
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
for (u64 i = 0; i < handle_count; ++i) {
const Handle handle = memory.Read32(handles_address + i * sizeof(Handle));
- const auto object = handle_table.Get<SynchronizationObject>(handle);
+ const auto object = handle_table.Get<KSynchronizationObject>(handle);
if (object == nullptr) {
LOG_ERROR(Kernel_SVC, "Object is a nullptr");
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
- objects[i] = object;
+ objects[i] = object.get();
}
- auto& synchronization = kernel.Synchronization();
- const auto [result, handle_result] = synchronization.WaitFor(objects, nano_seconds);
- *index = handle_result;
- return result;
+ return KSynchronizationObject::Wait(kernel, index, objects.data(),
+ static_cast<s32>(objects.size()), nano_seconds);
}
static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
- s32 handle_count, u32 timeout_high, Handle* index) {
+ s32 handle_count, u32 timeout_high, s32* index) {
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds);
}
@@ -489,15 +483,17 @@ static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u
static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) {
LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle);
+ // Get the thread from its handle.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
+
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
- thread_handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- thread->CancelWait();
+ // Cancel the thread's wait.
+ thread->WaitCancel();
return RESULT_SUCCESS;
}
@@ -505,57 +501,53 @@ static ResultCode CancelSynchronization32(Core::System& system, Handle thread_ha
return CancelSynchronization(system, thread_handle);
}
-/// Attempts to locks a mutex, creating it if it does not already exist
-static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle,
- VAddr mutex_addr, Handle requesting_thread_handle) {
- LOG_TRACE(Kernel_SVC,
- "called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, "
- "requesting_current_thread_handle=0x{:08X}",
- holding_thread_handle, mutex_addr, requesting_thread_handle);
+/// Attempts to locks a mutex
+static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
+ u32 tag) {
+ LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
+ thread_handle, address, tag);
- if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
- LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
- mutex_addr);
- return ERR_INVALID_ADDRESS_STATE;
+ // Validate the input address.
+ if (Memory::IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempting to arbitrate a lock on a kernel address (address={:08X})",
+ address);
+ return ResultInvalidCurrentMemory;
}
-
- if (!Common::IsWordAligned(mutex_addr)) {
- LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr);
- return ERR_INVALID_ADDRESS;
+ if (!Common::IsAligned(address, sizeof(u32))) {
+ LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
+ return ResultInvalidAddress;
}
- auto* const current_process = system.Kernel().CurrentProcess();
- return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
- requesting_thread_handle);
+ return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
}
-static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle,
- u32 mutex_addr, Handle requesting_thread_handle) {
- return ArbitrateLock(system, holding_thread_handle, static_cast<VAddr>(mutex_addr),
- requesting_thread_handle);
+static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
+ u32 tag) {
+ return ArbitrateLock(system, thread_handle, address, tag);
}
/// Unlock a mutex
-static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
- LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
+static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
+ LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
- if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
- LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
- mutex_addr);
- return ERR_INVALID_ADDRESS_STATE;
- }
+ // Validate the input address.
- if (!Common::IsWordAligned(mutex_addr)) {
- LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr);
- return ERR_INVALID_ADDRESS;
+ if (Memory::IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC,
+ "Attempting to arbitrate an unlock on a kernel address (address={:08X})",
+ address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(u32))) {
+ LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
+ return ResultInvalidAddress;
}
- auto* const current_process = system.Kernel().CurrentProcess();
- return current_process->GetMutex().Release(mutex_addr);
+ return system.Kernel().CurrentProcess()->SignalToAddress(address);
}
-static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) {
- return ArbitrateUnlock(system, static_cast<VAddr>(mutex_addr));
+static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
+ return ArbitrateUnlock(system, address);
}
enum class BreakType : u32 {
@@ -658,7 +650,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
if (!break_reason.signal_debugger) {
- SchedulerLock lock(system.Kernel());
LOG_CRITICAL(
Debug_Emulated,
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
@@ -666,22 +657,18 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
handle_debug_buffer(info1, info2);
- auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
- const auto thread_processor_id = current_thread->GetProcessorID();
+ auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ const auto thread_processor_id = current_thread->GetActiveCore();
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
-
- // Kill the current thread
- system.Kernel().ExceptionalExit();
- current_thread->Stop();
}
}
static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
- Break(system, reason, static_cast<u64>(info1), static_cast<u64>(info2));
+ Break(system, reason, info1, info2);
}
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
-static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) {
+static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
if (len == 0) {
return;
}
@@ -755,7 +742,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
if (info_sub_id != 0) {
LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
info_sub_id);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
const auto& current_process_handle_table =
@@ -764,7 +751,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
if (!process) {
LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
info_id, info_sub_id, handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
switch (info_id_type) {
@@ -846,7 +833,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
}
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
case GetInfoType::IsCurrentProcessBeingDebugged:
@@ -856,13 +843,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
case GetInfoType::RegisterResourceLimit: {
if (handle != 0) {
LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
if (info_sub_id != 0) {
LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
info_sub_id);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
Process* const current_process = system.Kernel().CurrentProcess();
@@ -887,13 +874,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
if (handle != 0) {
LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}",
handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
if (info_sub_id >= Process::RANDOM_ENTROPY_SIZE) {
LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}",
Process::RANDOM_ENTROPY_SIZE, info_sub_id);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
*result = system.Kernel().CurrentProcess()->GetRandomEntropy(info_sub_id);
@@ -910,26 +897,26 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) {
LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus,
info_sub_id);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
- const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<Thread>(
+ const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<KThread>(
static_cast<Handle>(handle));
if (!thread) {
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
static_cast<Handle>(handle));
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
const auto& core_timing = system.CoreTiming();
- const auto& scheduler = system.CurrentScheduler();
+ const auto& scheduler = *system.Kernel().CurrentScheduler();
const auto* const current_thread = scheduler.GetCurrentThread();
const bool same_thread = current_thread == thread.get();
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
u64 out_ticks = 0;
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
- const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks();
+ const u64 thread_ticks = current_thread->GetCpuTime();
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
@@ -942,13 +929,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
default:
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
}
static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
u32 info_id, u32 handle, u32 sub_id_high) {
- const u64 sub_id{static_cast<u64>(sub_id_low | (static_cast<u64>(sub_id_high) << 32))};
+ const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
u64 res_value{};
const ResultCode result{GetInfo(system, &res_value, info_id, handle, sub_id)};
@@ -965,22 +952,22 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is zero");
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!(addr < addr + size)) {
LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
Process* const current_process{system.Kernel().CurrentProcess()};
@@ -988,28 +975,28 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
if (current_process->GetSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
- return ERR_INVALID_STATE;
+ return ResultInvalidState;
}
if (!page_table.IsInsideAddressSpace(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (page_table.IsOutsideAliasRegion(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
return page_table.MapPhysicalMemory(addr, size);
}
static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
- return MapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size));
+ return MapPhysicalMemory(system, addr, size);
}
/// Unmaps memory previously mapped via MapPhysicalMemory
@@ -1019,22 +1006,22 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is zero");
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!(addr < addr + size)) {
LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
Process* const current_process{system.Kernel().CurrentProcess()};
@@ -1042,153 +1029,164 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
if (current_process->GetSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
- return ERR_INVALID_STATE;
+ return ResultInvalidState;
}
if (!page_table.IsInsideAddressSpace(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (page_table.IsOutsideAliasRegion(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
return page_table.UnmapPhysicalMemory(addr, size);
}
static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
- return UnmapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size));
+ return UnmapPhysicalMemory(system, addr, size);
}
/// Sets the thread activity
-static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) {
- LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity);
- if (activity > static_cast<u32>(ThreadActivity::Paused)) {
- return ERR_INVALID_ENUM_VALUE;
+static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity) {
+ LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
+ thread_activity);
+
+ // Validate the activity.
+ constexpr auto IsValidThreadActivity = [](ThreadActivity activity) {
+ return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused;
+ };
+ if (!IsValidThreadActivity(thread_activity)) {
+ LOG_ERROR(Kernel_SVC, "Invalid thread activity value provided (activity={})",
+ thread_activity);
+ return ResultInvalidEnumValue;
}
- const auto* current_process = system.Kernel().CurrentProcess();
- const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
+ // Get the thread from its handle.
+ auto& kernel = system.Kernel();
+ const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- if (thread->GetOwnerProcess() != current_process) {
- LOG_ERROR(Kernel_SVC,
- "The current process does not own the current thread, thread_handle={:08X} "
- "thread_pid={}, "
- "current_process_pid={}",
- handle, thread->GetOwnerProcess()->GetProcessID(),
- current_process->GetProcessID());
- return ERR_INVALID_HANDLE;
+ // Check that the activity is being set on a non-current thread for the current process.
+ if (thread->GetOwnerProcess() != kernel.CurrentProcess()) {
+ LOG_ERROR(Kernel_SVC, "Invalid owning process for the created thread.");
+ return ResultInvalidHandle;
+ }
+ if (thread.get() == GetCurrentThreadPointer(kernel)) {
+ LOG_ERROR(Kernel_SVC, "Thread is busy");
+ return ResultBusy;
}
- if (thread.get() == system.CurrentScheduler().GetCurrentThread()) {
- LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
- return ERR_BUSY;
+ // Set the activity.
+ const auto set_result = thread->SetActivity(thread_activity);
+ if (set_result.IsError()) {
+ LOG_ERROR(Kernel_SVC, "Failed to set thread activity.");
+ return set_result;
}
- return thread->SetActivity(static_cast<ThreadActivity>(activity));
+ return RESULT_SUCCESS;
}
-static ResultCode SetThreadActivity32(Core::System& system, Handle handle, u32 activity) {
- return SetThreadActivity(system, handle, activity);
+static ResultCode SetThreadActivity32(Core::System& system, Handle thread_handle,
+ Svc::ThreadActivity thread_activity) {
+ return SetThreadActivity(system, thread_handle, thread_activity);
}
/// Gets the thread context
-static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, Handle handle) {
- LOG_DEBUG(Kernel_SVC, "called, context=0x{:08X}, thread=0x{:X}", thread_context, handle);
+static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
+ LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
+ thread_handle);
+ // Get the thread from its handle.
const auto* current_process = system.Kernel().CurrentProcess();
- const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
+ const std::shared_ptr<KThread> thread =
+ current_process->GetHandleTable().Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={})", thread_handle);
+ return ResultInvalidHandle;
}
+ // Require the handle be to a non-current thread in the current process.
if (thread->GetOwnerProcess() != current_process) {
- LOG_ERROR(Kernel_SVC,
- "The current process does not own the current thread, thread_handle={:08X} "
- "thread_pid={}, "
- "current_process_pid={}",
- handle, thread->GetOwnerProcess()->GetProcessID(),
- current_process->GetProcessID());
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Thread owning process is not the current process.");
+ return ResultInvalidHandle;
}
-
- if (thread.get() == system.CurrentScheduler().GetCurrentThread()) {
- LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
- return ERR_BUSY;
+ if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
+ LOG_ERROR(Kernel_SVC, "Current thread is busy.");
+ return ResultBusy;
}
- Core::ARM_Interface::ThreadContext64 ctx = thread->GetContext64();
- // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
- ctx.pstate &= 0xFF0FFE20;
-
- // If 64-bit, we can just write the context registers directly and we're good.
- // However, if 32-bit, we have to ensure some registers are zeroed out.
- if (!current_process->Is64BitProcess()) {
- std::fill(ctx.cpu_registers.begin() + 15, ctx.cpu_registers.end(), 0);
- std::fill(ctx.vector_registers.begin() + 16, ctx.vector_registers.end(), u128{});
+ // Get the thread context.
+ std::vector<u8> context;
+ const auto context_result = thread->GetThreadContext3(context);
+ if (context_result.IsError()) {
+ LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve thread context (result: {})",
+ context_result.raw);
+ return context_result;
}
- system.Memory().WriteBlock(thread_context, &ctx, sizeof(ctx));
+ // Copy the thread context to user space.
+ system.Memory().WriteBlock(out_context, context.data(), context.size());
+
return RESULT_SUCCESS;
}
-static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) {
- return GetThreadContext(system, static_cast<VAddr>(thread_context), handle);
+static ResultCode GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
+ return GetThreadContext(system, out_context, thread_handle);
}
/// Gets the priority for the specified thread
-static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) {
+static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
LOG_TRACE(Kernel_SVC, "called");
+ // Get the thread from its handle.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
if (!thread) {
- *priority = 0;
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", handle);
+ return ResultInvalidHandle;
}
- *priority = thread->GetPriority();
+ // Get the thread's priority.
+ *out_priority = thread->GetPriority();
return RESULT_SUCCESS;
}
-static ResultCode GetThreadPriority32(Core::System& system, u32* priority, Handle handle) {
- return GetThreadPriority(system, priority, handle);
+static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
+ return GetThreadPriority(system, out_priority, handle);
}
/// Sets the priority for the specified thread
static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) {
LOG_TRACE(Kernel_SVC, "called");
- if (priority > THREADPRIO_LOWEST) {
- LOG_ERROR(
- Kernel_SVC,
- "An invalid priority was specified, expected {} but got {} for thread_handle={:08X}",
- THREADPRIO_LOWEST, priority, handle);
- return ERR_INVALID_THREAD_PRIORITY;
+ // Validate the priority.
+ if (HighestThreadPriority > priority || priority > LowestThreadPriority) {
+ LOG_ERROR(Kernel_SVC, "Invalid thread priority specified (priority={})", priority);
+ return ResultInvalidPriority;
}
- const auto* const current_process = system.Kernel().CurrentProcess();
-
- std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
+ // Get the thread from its handle.
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid handle provided (handle={:08X})", handle);
+ return ResultInvalidHandle;
}
- thread->SetPriority(priority);
-
+ // Set the thread priority.
+ thread->SetBasePriority(priority);
return RESULT_SUCCESS;
}
@@ -1215,23 +1213,23 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is 0");
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!IsValidAddressRange(addr, size)) {
LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
addr, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
const auto permission_type = static_cast<Memory::MemoryPermission>(permissions);
@@ -1239,7 +1237,7 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
Memory::MemoryPermission::ReadAndWrite) {
LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}",
permissions);
- return ERR_INVALID_MEMORY_PERMISSIONS;
+ return ResultInvalidMemoryPermissions;
}
auto* const current_process{system.Kernel().CurrentProcess()};
@@ -1250,7 +1248,7 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
"Addr does not fit within the valid region, addr=0x{:016X}, "
"size=0x{:016X}",
addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (page_table.IsInsideHeapRegion(addr, size)) {
@@ -1258,7 +1256,7 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
"Addr does not fit within the heap region, addr=0x{:016X}, "
"size=0x{:016X}",
addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (page_table.IsInsideAliasRegion(addr, size)) {
@@ -1266,14 +1264,14 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
"Address does not fit within the map region, addr=0x{:016X}, "
"size=0x{:016X}",
addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
auto shared_memory{current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle)};
if (!shared_memory) {
LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}",
shared_memory_handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
return shared_memory->Map(*current_process, addr, size, permission_type);
@@ -1281,8 +1279,7 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr,
u32 size, u32 permissions) {
- return MapSharedMemory(system, shared_memory_handle, static_cast<VAddr>(addr),
- static_cast<std::size_t>(size), permissions);
+ return MapSharedMemory(system, shared_memory_handle, addr, size, permissions);
}
static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
@@ -1295,7 +1292,7 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add
if (!process) {
LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
process_handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
auto& memory{system.Memory()};
@@ -1342,18 +1339,18 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
if (!Common::Is4KBAligned(src_address)) {
LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
src_address);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(dst_address)) {
LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
dst_address);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (size == 0 || !Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!IsValidAddressRange(dst_address, size)) {
@@ -1361,7 +1358,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
"Destination address range overflows the address space (dst_address=0x{:016X}, "
"size=0x{:016X}).",
dst_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!IsValidAddressRange(src_address, size)) {
@@ -1369,7 +1366,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
"Source address range overflows the address space (src_address=0x{:016X}, "
"size=0x{:016X}).",
src_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
@@ -1377,7 +1374,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
if (!process) {
LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
process_handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
auto& page_table = process->PageTable();
@@ -1386,7 +1383,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
"Source address range is not within the address space (src_address=0x{:016X}, "
"size=0x{:016X}).",
src_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!page_table.IsInsideASLRRegion(dst_address, size)) {
@@ -1394,7 +1391,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
"Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
"size=0x{:016X}).",
dst_address, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
return page_table.MapProcessCodeMemory(dst_address, src_address, size);
@@ -1410,18 +1407,18 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
if (!Common::Is4KBAligned(dst_address)) {
LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
dst_address);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(src_address)) {
LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
src_address);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (size == 0 || Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!IsValidAddressRange(dst_address, size)) {
@@ -1429,7 +1426,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
"Destination address range overflows the address space (dst_address=0x{:016X}, "
"size=0x{:016X}).",
dst_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!IsValidAddressRange(src_address, size)) {
@@ -1437,7 +1434,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
"Source address range overflows the address space (src_address=0x{:016X}, "
"size=0x{:016X}).",
src_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
@@ -1445,7 +1442,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
if (!process) {
LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
process_handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
auto& page_table = process->PageTable();
@@ -1454,7 +1451,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
"Source address range is not within the address space (src_address=0x{:016X}, "
"size=0x{:016X}).",
src_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!page_table.IsInsideASLRRegion(dst_address, size)) {
@@ -1462,7 +1459,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
"Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
"size=0x{:016X}).",
dst_address, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
return page_table.UnmapProcessCodeMemory(dst_address, src_address, size);
@@ -1480,62 +1477,67 @@ static void ExitProcess(Core::System& system) {
current_process->PrepareForTermination();
// Kill the current thread
- system.CurrentScheduler().GetCurrentThread()->Stop();
+ system.Kernel().CurrentScheduler()->GetCurrentThread()->Exit();
}
static void ExitProcess32(Core::System& system) {
ExitProcess(system);
}
+static constexpr bool IsValidCoreId(int32_t core_id) {
+ return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES));
+}
+
/// Creates a new thread
static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
- VAddr stack_top, u32 priority, s32 processor_id) {
+ VAddr stack_bottom, u32 priority, s32 core_id) {
LOG_DEBUG(Kernel_SVC,
- "called entrypoint=0x{:08X}, arg=0x{:08X}, stacktop=0x{:08X}, "
- "threadpriority=0x{:08X}, processorid=0x{:08X} : created handle=0x{:08X}",
- entry_point, arg, stack_top, priority, processor_id, *out_handle);
+ "called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
+ "priority=0x{:08X}, core_id=0x{:08X}",
+ entry_point, arg, stack_bottom, priority, core_id);
- auto* const current_process = system.Kernel().CurrentProcess();
-
- if (processor_id == THREADPROCESSORID_IDEAL) {
- // Set the target CPU to the one specified by the process.
- processor_id = current_process->GetIdealCore();
- ASSERT(processor_id != THREADPROCESSORID_IDEAL);
+ // Adjust core id, if it's the default magic.
+ auto& kernel = system.Kernel();
+ auto& process = *kernel.CurrentProcess();
+ if (core_id == IdealCoreUseProcessValue) {
+ core_id = process.GetIdealCoreId();
}
- if (processor_id < THREADPROCESSORID_0 || processor_id > THREADPROCESSORID_3) {
- LOG_ERROR(Kernel_SVC, "Invalid thread processor ID: {}", processor_id);
- return ERR_INVALID_PROCESSOR_ID;
+ // Validate arguments.
+ if (!IsValidCoreId(core_id)) {
+ LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id);
+ return ResultInvalidCoreId;
}
-
- const u64 core_mask = current_process->GetCoreMask();
- if ((core_mask | (1ULL << processor_id)) != core_mask) {
- LOG_ERROR(Kernel_SVC, "Invalid thread core specified ({})", processor_id);
- return ERR_INVALID_PROCESSOR_ID;
+ if (((1ULL << core_id) & process.GetCoreMask()) == 0) {
+ LOG_ERROR(Kernel_SVC, "Core ID doesn't fall within allowable cores (id={})", core_id);
+ return ResultInvalidCoreId;
}
- if (priority > THREADPRIO_LOWEST) {
- LOG_ERROR(Kernel_SVC,
- "Invalid thread priority specified ({}). Must be within the range 0-64",
- priority);
- return ERR_INVALID_THREAD_PRIORITY;
+ if (HighestThreadPriority > priority || priority > LowestThreadPriority) {
+ LOG_ERROR(Kernel_SVC, "Invalid priority specified (priority={})", priority);
+ return ResultInvalidPriority;
}
-
- if (((1ULL << priority) & current_process->GetPriorityMask()) == 0) {
- LOG_ERROR(Kernel_SVC, "Invalid thread priority specified ({})", priority);
- return ERR_INVALID_THREAD_PRIORITY;
+ if (!process.CheckThreadPriority(priority)) {
+ LOG_ERROR(Kernel_SVC, "Invalid allowable thread priority (priority={})", priority);
+ return ResultInvalidPriority;
}
- auto& kernel = system.Kernel();
-
- ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1));
+ KScopedResourceReservation thread_reservation(
+ kernel.CurrentProcess(), LimitableResource::Threads, 1,
+ system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
+ if (!thread_reservation.Succeeded()) {
+ LOG_ERROR(Kernel_SVC, "Could not reserve a new thread");
+ return ResultResourceLimitedExceeded;
+ }
- ThreadType type = THREADTYPE_USER;
- CASCADE_RESULT(std::shared_ptr<Thread> thread,
- Thread::Create(system, type, "", entry_point, priority, arg, processor_id,
- stack_top, current_process));
+ std::shared_ptr<KThread> thread;
+ {
+ KScopedLightLock lk{process.GetStateLock()};
+ CASCADE_RESULT(thread, KThread::Create(system, ThreadType::User, "", entry_point, priority,
+ arg, core_id, stack_bottom, &process));
+ }
- const auto new_thread_handle = current_process->GetHandleTable().Create(thread);
+ const auto new_thread_handle = process.GetHandleTable().Create(thread);
if (new_thread_handle.Failed()) {
LOG_ERROR(Kernel_SVC, "Failed to create handle with error=0x{:X}",
new_thread_handle.Code().raw);
@@ -1546,31 +1548,38 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
// Set the thread name for debugging purposes.
thread->SetName(
fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
+ thread_reservation.Commit();
return RESULT_SUCCESS;
}
static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
- return CreateThread(system, out_handle, static_cast<VAddr>(entry_point), static_cast<u64>(arg),
- static_cast<VAddr>(stack_top), priority, processor_id);
+ return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
}
/// Starts the thread for the provided handle
static ResultCode StartThread(Core::System& system, Handle thread_handle) {
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
+ // Get the thread from its handle.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
- thread_handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- ASSERT(thread->GetStatus() == ThreadStatus::Dormant);
+ // Try to start the thread.
+ const auto run_result = thread->Run();
+ if (run_result.IsError()) {
+ LOG_ERROR(Kernel_SVC,
+ "Unable to successfuly start thread (thread handle={:08X}, result={})",
+ thread_handle, run_result.raw);
+ return run_result;
+ }
- return thread->Start();
+ return RESULT_SUCCESS;
}
static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
@@ -1581,9 +1590,9 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
static void ExitThread(Core::System& system) {
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
- auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
- system.GlobalScheduler().RemoveThread(SharedFrom(current_thread));
- current_thread->Stop();
+ auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread));
+ current_thread->Exit();
}
static void ExitThread32(Core::System& system) {
@@ -1592,277 +1601,190 @@ static void ExitThread32(Core::System& system) {
/// Sleep the current thread
static void SleepThread(Core::System& system, s64 nanoseconds) {
- LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds);
-
- enum class SleepType : s64 {
- YieldWithoutLoadBalancing = 0,
- YieldWithLoadBalancing = -1,
- YieldAndWaitForLoadBalancing = -2,
- };
-
- auto& scheduler = system.CurrentScheduler();
- auto* const current_thread = scheduler.GetCurrentThread();
- bool is_redundant = false;
-
- if (nanoseconds <= 0) {
- switch (static_cast<SleepType>(nanoseconds)) {
- case SleepType::YieldWithoutLoadBalancing: {
- auto pair = current_thread->YieldSimple();
- is_redundant = pair.second;
- break;
- }
- case SleepType::YieldWithLoadBalancing: {
- auto pair = current_thread->YieldAndBalanceLoad();
- is_redundant = pair.second;
- break;
- }
- case SleepType::YieldAndWaitForLoadBalancing: {
- auto pair = current_thread->YieldAndWaitForLoadBalancing();
- is_redundant = pair.second;
- break;
- }
- default:
- UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
- }
+ auto& kernel = system.Kernel();
+ const auto yield_type = static_cast<Svc::YieldType>(nanoseconds);
+
+ LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
+
+ // When the input tick is positive, sleep.
+ if (nanoseconds > 0) {
+ // Convert the timeout from nanoseconds to ticks.
+ // NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
+
+ // Sleep.
+ // NOTE: Nintendo does not check the result of this sleep.
+ static_cast<void>(GetCurrentThread(kernel).Sleep(nanoseconds));
+ } else if (yield_type == Svc::YieldType::WithoutCoreMigration) {
+ KScheduler::YieldWithoutCoreMigration(kernel);
+ } else if (yield_type == Svc::YieldType::WithCoreMigration) {
+ KScheduler::YieldWithCoreMigration(kernel);
+ } else if (yield_type == Svc::YieldType::ToAnyThread) {
+ KScheduler::YieldToAnyThread(kernel);
} else {
- current_thread->Sleep(nanoseconds);
- }
-
- if (is_redundant && !system.Kernel().IsMulticore()) {
- system.Kernel().ExitSVCProfile();
- system.CoreTiming().AddTicks(1000U);
- system.GetCpuManager().PreemptSingleCore();
- system.Kernel().EnterSVCProfile();
+ // Nintendo does nothing at all if an otherwise invalid value is passed.
+ UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
}
static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) {
- const s64 nanoseconds = static_cast<s64>(static_cast<u64>(nanoseconds_low) |
- (static_cast<u64>(nanoseconds_high) << 32));
+ const auto nanoseconds = static_cast<s64>(u64{nanoseconds_low} | (u64{nanoseconds_high} << 32));
SleepThread(system, nanoseconds);
}
/// Wait process wide key atomic
-static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_addr,
- VAddr condition_variable_addr, Handle thread_handle,
- s64 nano_seconds) {
- LOG_TRACE(
- Kernel_SVC,
- "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
- mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
-
- if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
- LOG_ERROR(
- Kernel_SVC,
- "Given mutex address must not be within the kernel address space. address=0x{:016X}",
- mutex_addr);
- return ERR_INVALID_ADDRESS_STATE;
- }
-
- if (!Common::IsWordAligned(mutex_addr)) {
- LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}",
- mutex_addr);
- return ERR_INVALID_ADDRESS;
- }
-
- ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
- auto& kernel = system.Kernel();
- Handle event_handle;
- Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
- auto* const current_process = system.Kernel().CurrentProcess();
- {
- SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
- const auto& handle_table = current_process->GetHandleTable();
- std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
- ASSERT(thread);
-
- current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
-
- if (thread->IsPendingTermination()) {
- lock.CancelSleep();
- return ERR_THREAD_TERMINATING;
- }
-
- const auto release_result = current_process->GetMutex().Release(mutex_addr);
- if (release_result.IsError()) {
- lock.CancelSleep();
- return release_result;
- }
-
- if (nano_seconds == 0) {
- lock.CancelSleep();
- return RESULT_TIMEOUT;
- }
-
- current_thread->SetCondVarWaitAddress(condition_variable_addr);
- current_thread->SetMutexWaitAddress(mutex_addr);
- current_thread->SetWaitHandle(thread_handle);
- current_thread->SetStatus(ThreadStatus::WaitCondVar);
- current_process->InsertConditionVariableThread(SharedFrom(current_thread));
- }
-
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
-
- {
- SchedulerLock lock(kernel);
-
- auto* owner = current_thread->GetLockOwner();
- if (owner != nullptr) {
- owner->RemoveMutexWaiter(SharedFrom(current_thread));
+static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
+ u32 tag, s64 timeout_ns) {
+ LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
+ cv_key, tag, timeout_ns);
+
+ // Validate input.
+ if (Memory::IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempted to wait on kernel address (address={:08X})", address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(s32))) {
+ LOG_ERROR(Kernel_SVC, "Address must be 4 byte aligned (address={:08X})", address);
+ return ResultInvalidAddress;
+ }
+
+ // Convert timeout from nanoseconds to ticks.
+ s64 timeout{};
+ if (timeout_ns > 0) {
+ const s64 offset_tick(timeout_ns);
+ if (offset_tick > 0) {
+ timeout = offset_tick + 2;
+ if (timeout <= 0) {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = std::numeric_limits<s64>::max();
}
-
- current_process->RemoveConditionVariableThread(SharedFrom(current_thread));
+ } else {
+ timeout = timeout_ns;
}
- // Note: Deliberately don't attempt to inherit the lock owner's priority.
- return current_thread->GetSignalingResult();
+ // Wait on the condition variable.
+ return system.Kernel().CurrentProcess()->WaitConditionVariable(
+ address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
}
-static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr,
- u32 condition_variable_addr, Handle thread_handle,
- u32 nanoseconds_low, u32 nanoseconds_high) {
- const s64 nanoseconds =
- static_cast<s64>(nanoseconds_low | (static_cast<u64>(nanoseconds_high) << 32));
- return WaitProcessWideKeyAtomic(system, static_cast<VAddr>(mutex_addr),
- static_cast<VAddr>(condition_variable_addr), thread_handle,
- nanoseconds);
+static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
+ u32 timeout_ns_low, u32 timeout_ns_high) {
+ const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
+ return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
}
/// Signal process wide key
-static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) {
- LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
- condition_variable_addr, target);
+static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
+ LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
- ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
+ // Signal the condition variable.
+ return system.Kernel().CurrentProcess()->SignalConditionVariable(
+ Common::AlignDown(cv_key, sizeof(u32)), count);
+}
- // Retrieve a list of all threads that are waiting for this condition variable.
- auto& kernel = system.Kernel();
- SchedulerLock lock(kernel);
- auto* const current_process = kernel.CurrentProcess();
- std::vector<std::shared_ptr<Thread>> waiting_threads =
- current_process->GetConditionVariableThreads(condition_variable_addr);
-
- // Only process up to 'target' threads, unless 'target' is less equal 0, in which case process
- // them all.
- std::size_t last = waiting_threads.size();
- if (target > 0) {
- last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
- }
- for (std::size_t index = 0; index < last; ++index) {
- auto& thread = waiting_threads[index];
-
- ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
-
- // liberate Cond Var Thread.
- current_process->RemoveConditionVariableThread(thread);
-
- const std::size_t current_core = system.CurrentCoreIndex();
- auto& monitor = system.Monitor();
-
- // Atomically read the value of the mutex.
- u32 mutex_val = 0;
- u32 update_val = 0;
- const VAddr mutex_address = thread->GetMutexWaitAddress();
- do {
- // If the mutex is not yet acquired, acquire it.
- mutex_val = monitor.ExclusiveRead32(current_core, mutex_address);
-
- if (mutex_val != 0) {
- update_val = mutex_val | Mutex::MutexHasWaitersFlag;
- } else {
- update_val = thread->GetWaitHandle();
- }
- } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
- monitor.ClearExclusive();
- if (mutex_val == 0) {
- // We were able to acquire the mutex, resume this thread.
- auto* const lock_owner = thread->GetLockOwner();
- if (lock_owner != nullptr) {
- lock_owner->RemoveMutexWaiter(thread);
- }
+static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
+ SignalProcessWideKey(system, cv_key, count);
+}
- thread->SetLockOwner(nullptr);
- thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
- thread->ResumeFromWait();
- } else {
- // The mutex is already owned by some other thread, make this thread wait on it.
- const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- auto owner = handle_table.Get<Thread>(owner_handle);
- ASSERT(owner);
- if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
- thread->SetStatus(ThreadStatus::WaitMutex);
- }
+namespace {
- owner->AddMutexWaiter(thread);
- }
+constexpr bool IsValidSignalType(Svc::SignalType type) {
+ switch (type) {
+ case Svc::SignalType::Signal:
+ case Svc::SignalType::SignalAndIncrementIfEqual:
+ case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
+ return true;
+ default:
+ return false;
}
}
-static void SignalProcessWideKey32(Core::System& system, u32 condition_variable_addr, s32 target) {
- SignalProcessWideKey(system, condition_variable_addr, target);
+constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
+ switch (type) {
+ case Svc::ArbitrationType::WaitIfLessThan:
+ case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
+ case Svc::ArbitrationType::WaitIfEqual:
+ return true;
+ default:
+ return false;
+ }
}
-// Wait for an address (via Address Arbiter)
-static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value,
- s64 timeout) {
- LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address,
- type, value, timeout);
-
- // If the passed address is a kernel virtual address, return invalid memory state.
- if (Core::Memory::IsKernelVirtualAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
- return ERR_INVALID_ADDRESS_STATE;
- }
+} // namespace
- // If the address is not properly aligned to 4 bytes, return invalid address.
- if (!Common::IsWordAligned(address)) {
- LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address);
- return ERR_INVALID_ADDRESS;
+// Wait for an address (via Address Arbiter)
+static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
+ s32 value, s64 timeout_ns) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
+ address, arb_type, value, timeout_ns);
+
+ // Validate input.
+ if (Memory::IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempting to wait on kernel address (address={:08X})", address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(s32))) {
+ LOG_ERROR(Kernel_SVC, "Wait address must be 4 byte aligned (address={:08X})", address);
+ return ResultInvalidAddress;
+ }
+ if (!IsValidArbitrationType(arb_type)) {
+ LOG_ERROR(Kernel_SVC, "Invalid arbitration type specified (type={})", arb_type);
+ return ResultInvalidEnumValue;
+ }
+
+ // Convert timeout from nanoseconds to ticks.
+ s64 timeout{};
+ if (timeout_ns > 0) {
+ const s64 offset_tick(timeout_ns);
+ if (offset_tick > 0) {
+ timeout = offset_tick + 2;
+ if (timeout <= 0) {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = timeout_ns;
}
- const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type);
- auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
- const ResultCode result =
- address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
- return result;
+ return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
}
-static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value,
- u32 timeout_low, u32 timeout_high) {
- s64 timeout = static_cast<s64>(timeout_low | (static_cast<u64>(timeout_high) << 32));
- return WaitForAddress(system, static_cast<VAddr>(address), type, value, timeout);
+static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
+ s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
+ const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
+ return WaitForAddress(system, address, arb_type, value, timeout);
}
// Signals to an address (via Address Arbiter)
-static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value,
- s32 num_to_wake) {
- LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}",
- address, type, value, num_to_wake);
+static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
+ s32 value, s32 count) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
+ address, signal_type, value, count);
- // If the passed address is a kernel virtual address, return invalid memory state.
- if (Core::Memory::IsKernelVirtualAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
- return ERR_INVALID_ADDRESS_STATE;
+ // Validate input.
+ if (Memory::IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempting to signal to a kernel address (address={:08X})", address);
+ return ResultInvalidCurrentMemory;
}
-
- // If the address is not properly aligned to 4 bytes, return invalid address.
- if (!Common::IsWordAligned(address)) {
- LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address);
- return ERR_INVALID_ADDRESS;
+ if (!Common::IsAligned(address, sizeof(s32))) {
+ LOG_ERROR(Kernel_SVC, "Signaled address must be 4 byte aligned (address={:08X})", address);
+ return ResultInvalidAddress;
+ }
+ if (!IsValidSignalType(signal_type)) {
+ LOG_ERROR(Kernel_SVC, "Invalid signal type specified (type={})", signal_type);
+ return ResultInvalidEnumValue;
}
- const auto signal_type = static_cast<AddressArbiter::SignalType>(type);
- auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
- return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
+ return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
+ count);
}
-static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value,
- s32 num_to_wake) {
- return SignalToAddress(system, static_cast<VAddr>(address), type, value, num_to_wake);
+static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
+ s32 value, s32 count) {
+ return SignalToAddress(system, address, signal_type, value, count);
}
static void KernelDebug([[maybe_unused]] Core::System& system,
@@ -1893,7 +1815,7 @@ static u64 GetSystemTick(Core::System& system) {
}
static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) {
- u64 time = GetSystemTick(system);
+ const auto time = GetSystemTick(system);
*time_low = static_cast<u32>(time);
*time_high = static_cast<u32>(time >> 32);
}
@@ -1914,20 +1836,28 @@ static ResultCode CloseHandle32(Core::System& system, Handle handle) {
static ResultCode ResetSignal(Core::System& system, Handle handle) {
LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
+ // Get the current handle table.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- auto event = handle_table.Get<ReadableEvent>(handle);
- if (event) {
- return event->Reset();
+ // Try to reset as readable event.
+ {
+ auto readable_event = handle_table.Get<KReadableEvent>(handle);
+ if (readable_event) {
+ return readable_event->Reset();
+ }
}
- auto process = handle_table.Get<Process>(handle);
- if (process) {
- return process->ClearSignalState();
+ // Try to reset as process.
+ {
+ auto process = handle_table.Get<Process>(handle);
+ if (process) {
+ return process->Reset();
+ }
}
- LOG_ERROR(Kernel_SVC, "Invalid handle (0x{:08X})", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "invalid handle (0x{:08X})", handle);
+
+ return ResultInvalidHandle;
}
static ResultCode ResetSignal32(Core::System& system, Handle handle) {
@@ -1943,18 +1873,18 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address ({:016X}) is not page aligned!", addr);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(size) || size == 0) {
LOG_ERROR(Kernel_SVC, "Size ({:016X}) is not page aligned or equal to zero!", size);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!IsValidAddressRange(addr, size)) {
LOG_ERROR(Kernel_SVC, "Address and size cause overflow! (address={:016X}, size={:016X})",
addr, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
const auto perms{static_cast<Memory::MemoryPermission>(permissions)};
@@ -1962,10 +1892,17 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
perms == Memory::MemoryPermission::Write) {
LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})",
permissions);
- return ERR_INVALID_MEMORY_PERMISSIONS;
+ return ResultInvalidMemoryPermissions;
}
auto& kernel = system.Kernel();
+ // Reserve a new transfer memory from the process resource limit.
+ KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(),
+ LimitableResource::TransferMemory);
+ if (!trmem_reservation.Succeeded()) {
+ LOG_ERROR(Kernel_SVC, "Could not reserve a new transfer memory");
+ return ResultResourceLimitedExceeded;
+ }
auto transfer_mem_handle = TransferMemory::Create(kernel, system.Memory(), addr, size, perms);
if (const auto reserve_result{transfer_mem_handle->Reserve()}; reserve_result.IsError()) {
@@ -1977,6 +1914,7 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
if (result.Failed()) {
return result.Code();
}
+ trmem_reservation.Commit();
*handle = *result;
return RESULT_SUCCESS;
@@ -1984,176 +1922,207 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size,
u32 permissions) {
- return CreateTransferMemory(system, handle, static_cast<VAddr>(addr),
- static_cast<std::size_t>(size), permissions);
+ return CreateTransferMemory(system, handle, addr, size, permissions);
}
-static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core,
- u64* mask) {
+static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u64* out_affinity_mask) {
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
+ // Get the thread from its handle.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
- thread_handle);
- *core = 0;
- *mask = 0;
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle specified (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- *core = thread->GetIdealCore();
- *mask = thread->GetAffinityMask();
+ // Get the core mask.
+ const auto result = thread->GetCoreMask(out_core_id, out_affinity_mask);
+ if (result.IsError()) {
+ LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve core mask (result={})", result.raw);
+ return result;
+ }
return RESULT_SUCCESS;
}
-static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, u32* core,
- u32* mask_low, u32* mask_high) {
- u64 mask{};
- const auto result = GetThreadCoreMask(system, thread_handle, core, &mask);
- *mask_high = static_cast<u32>(mask >> 32);
- *mask_low = static_cast<u32>(mask);
+static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
+ u64 out_affinity_mask{};
+ const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
+ *out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
+ *out_affinity_mask_low = static_cast<u32>(out_affinity_mask);
return result;
}
-static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core,
+static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
u64 affinity_mask) {
- LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}",
- thread_handle, core, affinity_mask);
-
- const auto* const current_process = system.Kernel().CurrentProcess();
+ LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core_id=0x{:X}, affinity_mask=0x{:016X}",
+ thread_handle, core_id, affinity_mask);
- if (core == static_cast<u32>(THREADPROCESSORID_IDEAL)) {
- const u8 ideal_cpu_core = current_process->GetIdealCore();
+ const auto& current_process = *system.Kernel().CurrentProcess();
- ASSERT(ideal_cpu_core != static_cast<u8>(THREADPROCESSORID_IDEAL));
-
- // Set the target CPU to the ideal core specified by the process.
- core = ideal_cpu_core;
- affinity_mask = 1ULL << core;
+ // Determine the core id/affinity mask.
+ if (core_id == Svc::IdealCoreUseProcessValue) {
+ core_id = current_process.GetIdealCoreId();
+ affinity_mask = (1ULL << core_id);
} else {
- const u64 core_mask = current_process->GetCoreMask();
-
- if ((core_mask | affinity_mask) != core_mask) {
- LOG_ERROR(
- Kernel_SVC,
- "Invalid processor ID specified (core_mask=0x{:08X}, affinity_mask=0x{:016X})",
- core_mask, affinity_mask);
- return ERR_INVALID_PROCESSOR_ID;
+ // Validate the affinity mask.
+ const u64 process_core_mask = current_process.GetCoreMask();
+ if ((affinity_mask | process_core_mask) != process_core_mask) {
+ LOG_ERROR(Kernel_SVC,
+ "Affinity mask does match the process core mask (affinity mask={:016X}, core "
+ "mask={:016X})",
+ affinity_mask, process_core_mask);
+ return ResultInvalidCoreId;
}
-
if (affinity_mask == 0) {
- LOG_ERROR(Kernel_SVC, "Specfified affinity mask is zero.");
- return ERR_INVALID_COMBINATION;
+ LOG_ERROR(Kernel_SVC, "Affinity mask is zero.");
+ return ResultInvalidCombination;
}
- if (core < Core::Hardware::NUM_CPU_CORES) {
- if ((affinity_mask & (1ULL << core)) == 0) {
- LOG_ERROR(Kernel_SVC,
- "Core is not enabled for the current mask, core={}, mask={:016X}", core,
- affinity_mask);
- return ERR_INVALID_COMBINATION;
+ // Validate the core id.
+ if (IsValidCoreId(core_id)) {
+ if (((1ULL << core_id) & affinity_mask) == 0) {
+ LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id);
+ return ResultInvalidCombination;
+ }
+ } else {
+ if (core_id != IdealCoreNoUpdate && core_id != IdealCoreDontCare) {
+ LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id);
+ return ResultInvalidCoreId;
}
- } else if (core != static_cast<u32>(THREADPROCESSORID_DONT_CARE) &&
- core != static_cast<u32>(THREADPROCESSORID_DONT_UPDATE)) {
- LOG_ERROR(Kernel_SVC, "Invalid processor ID specified (core={}).", core);
- return ERR_INVALID_PROCESSOR_ID;
}
}
- const auto& handle_table = current_process->GetHandleTable();
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ // Get the thread from its handle.
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
- thread_handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- return thread->SetCoreAndAffinityMask(core, affinity_mask);
+ // Set the core mask.
+ const auto set_result = thread->SetCoreMask(core_id, affinity_mask);
+ if (set_result.IsError()) {
+ LOG_ERROR(Kernel_SVC, "Unable to successfully set core mask (result={})", set_result.raw);
+ return set_result;
+ }
+ return RESULT_SUCCESS;
}
-static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
+static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
u32 affinity_mask_low, u32 affinity_mask_high) {
- const u64 affinity_mask =
- static_cast<u64>(affinity_mask_low) | (static_cast<u64>(affinity_mask_high) << 32);
- return SetThreadCoreMask(system, thread_handle, core, affinity_mask);
+ const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
+ return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
}
-static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) {
- LOG_DEBUG(Kernel_SVC, "called");
+static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
+ LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
auto& kernel = system.Kernel();
- const auto [readable_event, writable_event] =
- WritableEvent::CreateEventPair(kernel, "CreateEvent");
+ // Get the current handle table.
+ const HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
- HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
-
- const auto write_create_result = handle_table.Create(writable_event);
- if (write_create_result.Failed()) {
- return write_create_result.Code();
+ // Reserve a new event from the process resource limit.
+ KScopedResourceReservation event_reservation(kernel.CurrentProcess(),
+ LimitableResource::Events);
+ if (!event_reservation.Succeeded()) {
+ LOG_ERROR(Kernel, "Could not reserve a new event");
+ return ResultResourceLimitedExceeded;
}
- *write_handle = *write_create_result;
- const auto read_create_result = handle_table.Create(readable_event);
- if (read_create_result.Failed()) {
- handle_table.Close(*write_create_result);
- return read_create_result.Code();
+ // Get the writable event.
+ auto writable_event = handle_table.Get<KWritableEvent>(event_handle);
+ if (!writable_event) {
+ LOG_ERROR(Kernel_SVC, "Invalid event handle provided (handle={:08X})", event_handle);
+ return ResultInvalidHandle;
}
- *read_handle = *read_create_result;
- LOG_DEBUG(Kernel_SVC,
- "successful. Writable event handle=0x{:08X}, Readable event handle=0x{:08X}",
- *write_create_result, *read_create_result);
- return RESULT_SUCCESS;
+ // Commit the successfuly reservation.
+ event_reservation.Commit();
+
+ return writable_event->Signal();
}
-static ResultCode CreateEvent32(Core::System& system, Handle* write_handle, Handle* read_handle) {
- return CreateEvent(system, write_handle, read_handle);
+static ResultCode SignalEvent32(Core::System& system, Handle event_handle) {
+ return SignalEvent(system, event_handle);
}
-static ResultCode ClearEvent(Core::System& system, Handle handle) {
- LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle);
+static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
+ LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
+ // Get the current handle table.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- auto writable_event = handle_table.Get<WritableEvent>(handle);
- if (writable_event) {
- writable_event->Clear();
- return RESULT_SUCCESS;
+ // Try to clear the writable event.
+ {
+ auto writable_event = handle_table.Get<KWritableEvent>(event_handle);
+ if (writable_event) {
+ return writable_event->Clear();
+ }
}
- auto readable_event = handle_table.Get<ReadableEvent>(handle);
- if (readable_event) {
- readable_event->Clear();
- return RESULT_SUCCESS;
+ // Try to clear the readable event.
+ {
+ auto readable_event = handle_table.Get<KReadableEvent>(event_handle);
+ if (readable_event) {
+ return readable_event->Clear();
+ }
}
- LOG_ERROR(Kernel_SVC, "Event handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Event handle does not exist, event_handle=0x{:08X}", event_handle);
+
+ return ResultInvalidHandle;
}
-static ResultCode ClearEvent32(Core::System& system, Handle handle) {
- return ClearEvent(system, handle);
+static ResultCode ClearEvent32(Core::System& system, Handle event_handle) {
+ return ClearEvent(system, event_handle);
}
-static ResultCode SignalEvent(Core::System& system, Handle handle) {
- LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle);
+static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
+ LOG_DEBUG(Kernel_SVC, "called");
- HandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- auto writable_event = handle_table.Get<WritableEvent>(handle);
+ // Get the kernel reference and handle table.
+ auto& kernel = system.Kernel();
+ HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
- if (!writable_event) {
- LOG_ERROR(Kernel_SVC, "Non-existent writable event handle used (0x{:08X})", handle);
- return ERR_INVALID_HANDLE;
+ // Create a new event.
+ const auto event = KEvent::Create(kernel, "CreateEvent");
+ if (!event) {
+ LOG_ERROR(Kernel_SVC, "Unable to create new events. Event creation limit reached.");
+ return ResultOutOfResource;
+ }
+
+ // Initialize the event.
+ event->Initialize();
+
+ // Add the writable event to the handle table.
+ const auto write_create_result = handle_table.Create(event->GetWritableEvent());
+ if (write_create_result.Failed()) {
+ return write_create_result.Code();
+ }
+ *out_write = *write_create_result;
+
+ // Add the writable event to the handle table.
+ auto handle_guard = SCOPE_GUARD({ handle_table.Close(*write_create_result); });
+
+ // Add the readable event to the handle table.
+ const auto read_create_result = handle_table.Create(event->GetReadableEvent());
+ if (read_create_result.Failed()) {
+ return read_create_result.Code();
}
+ *out_read = *read_create_result;
- writable_event->Signal();
+ // We succeeded.
+ handle_guard.Cancel();
return RESULT_SUCCESS;
}
-static ResultCode SignalEvent32(Core::System& system, Handle handle) {
- return SignalEvent(system, handle);
+static ResultCode CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
+ return CreateEvent(system, out_write, out_read);
}
static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
@@ -2169,13 +2138,13 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
if (!process) {
LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
process_handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
const auto info_type = static_cast<InfoType>(type);
if (info_type != InfoType::Status) {
LOG_ERROR(Kernel_SVC, "Expected info_type to be Status but got {} instead", type);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
*out = static_cast<u64>(process->GetStatus());
@@ -2187,7 +2156,7 @@ static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle)
LOG_DEBUG(Kernel_SVC, "called");
auto& kernel = system.Kernel();
- auto resource_limit = ResourceLimit::Create(kernel);
+ auto resource_limit = std::make_shared<KResourceLimit>(kernel, system);
auto* const current_process = kernel.CurrentProcess();
ASSERT(current_process != nullptr);
@@ -2234,30 +2203,30 @@ static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resour
LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}, Value={}", resource_limit,
resource_type, value);
- const auto type = static_cast<ResourceType>(resource_type);
+ const auto type = static_cast<LimitableResource>(resource_type);
if (!IsValidResourceType(type)) {
LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
auto* const current_process = system.Kernel().CurrentProcess();
ASSERT(current_process != nullptr);
auto resource_limit_object =
- current_process->GetHandleTable().Get<ResourceLimit>(resource_limit);
+ current_process->GetHandleTable().Get<KResourceLimit>(resource_limit);
if (!resource_limit_object) {
LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}",
resource_limit);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
const auto set_result = resource_limit_object->SetLimitValue(type, static_cast<s64>(value));
if (set_result.IsError()) {
- LOG_ERROR(
- Kernel_SVC,
- "Attempted to lower resource limit ({}) for category '{}' below its current value ({})",
- resource_limit_object->GetMaxResourceValue(type), resource_type,
- resource_limit_object->GetCurrentResourceValue(type));
+ LOG_ERROR(Kernel_SVC,
+ "Attempted to lower resource limit ({}) for category '{}' below its current "
+ "value ({})",
+ resource_limit_object->GetLimitValue(type), resource_type,
+ resource_limit_object->GetCurrentValue(type));
return set_result;
}
@@ -2274,7 +2243,7 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
LOG_ERROR(Kernel_SVC,
"Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
out_process_ids_size);
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
const auto& kernel = system.Kernel();
@@ -2284,7 +2253,7 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
out_process_ids, total_copy_size)) {
LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
out_process_ids, out_process_ids + total_copy_size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
auto& memory = system.Memory();
@@ -2313,7 +2282,7 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
if ((out_thread_ids_size & 0xF0000000) != 0) {
LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
out_thread_ids_size);
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
const auto* const current_process = system.Kernel().CurrentProcess();
@@ -2323,7 +2292,7 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
!current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) {
LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
out_thread_ids, out_thread_ids + total_copy_size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
auto& memory = system.Memory();
@@ -2341,9 +2310,10 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
return RESULT_SUCCESS;
}
-static ResultCode FlushProcessDataCache32(Core::System& system, Handle handle, u32 address,
- u32 size) {
- // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a nope
+static ResultCode FlushProcessDataCache32([[maybe_unused]] Core::System& system,
+ [[maybe_unused]] Handle handle,
+ [[maybe_unused]] u32 address, [[maybe_unused]] u32 size) {
+ // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a no-op,
// as all emulation is done in the same cache level in host architecture, thus data cache
// does not need flushing.
LOG_DEBUG(Kernel_SVC, "called");
@@ -2639,6 +2609,9 @@ void Call(Core::System& system, u32 immediate) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
+ auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
+ thread->SetIsCallingSvc();
+
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
: GetSVCInfo32(immediate);
if (info) {
@@ -2652,6 +2625,12 @@ void Call(Core::System& system, u32 immediate) {
}
kernel.ExitSVCProfile();
+
+ if (!thread->IsCallingSvc()) {
+ auto* host_context = thread->GetHostContext().get();
+ host_context->Rewind();
+ }
+
system.EnterDynarmicProfile();
}
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
new file mode 100644
index 000000000..4af049551
--- /dev/null
+++ b/src/core/hle/kernel/svc_common.h
@@ -0,0 +1,14 @@
+// Copyright 2020 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Kernel::Svc {
+
+constexpr s32 ArgumentHandleCountMax = 0x40;
+constexpr u32 HandleWaitMask{1u << 30};
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
new file mode 100644
index 000000000..a26d9f2c9
--- /dev/null
+++ b/src/core/hle/kernel/svc_results.h
@@ -0,0 +1,41 @@
+// Copyright 2018 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+// Confirmed Switch kernel error codes
+
+constexpr ResultCode ResultMaxConnectionsReached{ErrorModule::Kernel, 7};
+constexpr ResultCode ResultInvalidCapabilityDescriptor{ErrorModule::Kernel, 14};
+constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
+constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
+constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101};
+constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
+constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103};
+constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104};
+constexpr ResultCode ResultHandleTableFull{ErrorModule::Kernel, 105};
+constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
+constexpr ResultCode ResultInvalidMemoryPermissions{ErrorModule::Kernel, 108};
+constexpr ResultCode ResultInvalidMemoryRange{ErrorModule::Kernel, 110};
+constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112};
+constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113};
+constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
+constexpr ResultCode ResultInvalidPointer{ErrorModule::Kernel, 115};
+constexpr ResultCode ResultInvalidCombination{ErrorModule::Kernel, 116};
+constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
+constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
+constexpr ResultCode ResultOutOfRange{ErrorModule::Kernel, 119};
+constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
+constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121};
+constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122};
+constexpr ResultCode ResultSessionClosedByRemote{ErrorModule::Kernel, 123};
+constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
+constexpr ResultCode ResultReservedValue{ErrorModule::Kernel, 126};
+constexpr ResultCode ResultResourceLimitedExceeded{ErrorModule::Kernel, 132};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 986724beb..ec463b97c 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -23,8 +23,8 @@ enum class MemoryState : u32 {
Ipc = 0x0A,
Stack = 0x0B,
ThreadLocal = 0x0C,
- Transfered = 0x0D,
- SharedTransfered = 0x0E,
+ Transferred = 0x0D,
+ SharedTransferred = 0x0E,
SharedCode = 0x0F,
Inaccessible = 0x10,
NonSecureIpc = 0x11,
@@ -65,4 +65,34 @@ struct MemoryInfo {
u32 padding{};
};
+enum class SignalType : u32 {
+ Signal = 0,
+ SignalAndIncrementIfEqual = 1,
+ SignalAndModifyByWaitingCountIfEqual = 2,
+};
+
+enum class ArbitrationType : u32 {
+ WaitIfLessThan = 0,
+ DecrementAndWaitIfLessThan = 1,
+ WaitIfEqual = 2,
+};
+
+enum class YieldType : s64 {
+ WithoutCoreMigration = 0,
+ WithCoreMigration = -1,
+ ToAnyThread = -2,
+};
+
+enum class ThreadActivity : u32 {
+ Runnable = 0,
+ Paused = 1,
+};
+
+constexpr inline s32 IdealCoreDontCare = -1;
+constexpr inline s32 IdealCoreUseProcessValue = -2;
+constexpr inline s32 IdealCoreNoUpdate = -3;
+
+constexpr inline s32 LowestThreadPriority = 63;
+constexpr inline s32 HighestThreadPriority = 0;
+
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 0b6dd9df0..96afd544b 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -7,6 +7,7 @@
#include "common/common_types.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
+#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
namespace Kernel {
@@ -57,6 +58,14 @@ void SvcWrap64(Core::System& system) {
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw);
}
+// Used by SetThreadActivity
+template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
+void SvcWrap64(Core::System& system) {
+ FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
+ static_cast<Svc::ThreadActivity>(Param(system, 1)))
+ .raw);
+}
+
template <ResultCode func(Core::System&, u32, u64, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
@@ -157,9 +166,18 @@ void SvcWrap64(Core::System& system) {
.raw);
}
-template <ResultCode func(Core::System&, u32, u32*, u64*)>
+// Used by SetThreadCoreMask
+template <ResultCode func(Core::System&, Handle, s32, u64)>
void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
+ FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
+ static_cast<s32>(Param(system, 1)), Param(system, 2))
+ .raw);
+}
+
+// Used by GetThreadCoreMask
+template <ResultCode func(Core::System&, Handle, s32*, u64*)>
+void SvcWrap64(Core::System& system) {
+ s32 param_1 = 0;
u64 param_2 = 0;
const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
@@ -215,9 +233,10 @@ void SvcWrap64(Core::System& system) {
func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)).raw);
}
-template <ResultCode func(Core::System&, u32*, u64, u64, s64)>
+// Used by WaitSynchronization
+template <ResultCode func(Core::System&, s32*, u64, u64, s64)>
void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
+ s32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
static_cast<s64>(Param(system, 3)))
.raw;
@@ -276,18 +295,22 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64, u32, s32, s64)>
+// Used by WaitForAddress
+template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
- static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
- .raw);
+ FuncReturn(system,
+ func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
+ static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
+ .raw);
}
-template <ResultCode func(Core::System&, u64, u32, s32, s32)>
+// Used by SignalToAddress
+template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)>
void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
- static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
- .raw);
+ FuncReturn(system,
+ func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
+ static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
+ .raw);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -467,12 +490,35 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
+// Used by GetThreadCoreMask32
+template <ResultCode func(Core::System&, Handle, s32*, u32*, u32*)>
+void SvcWrap32(Core::System& system) {
+ s32 param_1 = 0;
+ u32 param_2 = 0;
+ u32 param_3 = 0;
+
+ const u32 retval = func(system, Param32(system, 2), &param_1, &param_2, &param_3).raw;
+ system.CurrentArmInterface().SetReg(1, param_1);
+ system.CurrentArmInterface().SetReg(2, param_2);
+ system.CurrentArmInterface().SetReg(3, param_3);
+ FuncReturn(system, retval);
+}
+
// Used by SignalProcessWideKey32
template <void func(Core::System&, u32, s32)>
void SvcWrap32(Core::System& system) {
func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1)));
}
+// Used by SetThreadActivity32
+template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
+void SvcWrap32(Core::System& system) {
+ const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
+ static_cast<Svc::ThreadActivity>(Param(system, 1)))
+ .raw;
+ FuncReturn(system, retval);
+}
+
// Used by SetThreadPriority32
template <ResultCode func(Core::System&, Handle, u32)>
void SvcWrap32(Core::System& system) {
@@ -481,7 +527,7 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
-// Used by SetThreadCoreMask32
+// Used by SetMemoryAttribute32
template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
@@ -491,6 +537,16 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
+// Used by SetThreadCoreMask32
+template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
+void SvcWrap32(Core::System& system) {
+ const u32 retval =
+ func(system, static_cast<Handle>(Param(system, 0)), static_cast<s32>(Param(system, 1)),
+ static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
+ .raw;
+ FuncReturn(system, retval);
+}
+
// Used by WaitProcessWideKeyAtomic32
template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
void SvcWrap32(Core::System& system) {
@@ -503,22 +559,23 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitForAddress32
-template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)>
+template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
- static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)),
- static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4)))
+ static_cast<Svc::ArbitrationType>(Param(system, 1)),
+ static_cast<s32>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
+ static_cast<u32>(Param(system, 4)))
.raw;
FuncReturn(system, retval);
}
// Used by SignalToAddress32
-template <ResultCode func(Core::System&, u32, u32, s32, s32)>
+template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)>
void SvcWrap32(Core::System& system) {
- const u32 retval =
- func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
- static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
- .raw;
+ const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
+ static_cast<Svc::SignalType>(Param(system, 1)),
+ static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
+ .raw;
FuncReturn(system, retval);
}
@@ -539,9 +596,9 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitSynchronization32
-template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
+template <ResultCode func(Core::System&, u32, u32, s32, u32, s32*)>
void SvcWrap32(Core::System& system) {
- u32 param_1 = 0;
+ s32 param_1 = 0;
const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
Param32(system, 3), &param_1)
.raw;
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
deleted file mode 100644
index 8b875d853..000000000
--- a/src/core/hle/kernel/synchronization.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include "core/core.h"
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/handle_table.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/synchronization.h"
-#include "core/hle/kernel/synchronization_object.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/kernel/time_manager.h"
-
-namespace Kernel {
-
-Synchronization::Synchronization(Core::System& system) : system{system} {}
-
-void Synchronization::SignalObject(SynchronizationObject& obj) const {
- auto& kernel = system.Kernel();
- SchedulerLock lock(kernel);
- if (obj.IsSignaled()) {
- for (auto thread : obj.GetWaitingThreads()) {
- if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
- if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
- ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
- ASSERT(thread->IsWaitingSync());
- }
- thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
- thread->ResumeFromWait();
- }
- }
- obj.ClearWaitingThreads();
- }
-}
-
-std::pair<ResultCode, Handle> Synchronization::WaitFor(
- std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
- auto& kernel = system.Kernel();
- auto* const thread = system.CurrentScheduler().GetCurrentThread();
- Handle event_handle = InvalidHandle;
- {
- SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
- const auto itr =
- std::find_if(sync_objects.begin(), sync_objects.end(),
- [thread](const std::shared_ptr<SynchronizationObject>& object) {
- return object->IsSignaled();
- });
-
- if (itr != sync_objects.end()) {
- // We found a ready object, acquire it and set the result value
- SynchronizationObject* object = itr->get();
- object->Acquire(thread);
- const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
- lock.CancelSleep();
- return {RESULT_SUCCESS, index};
- }
-
- if (nano_seconds == 0) {
- lock.CancelSleep();
- return {RESULT_TIMEOUT, InvalidHandle};
- }
-
- if (thread->IsPendingTermination()) {
- lock.CancelSleep();
- return {ERR_THREAD_TERMINATING, InvalidHandle};
- }
-
- if (thread->IsSyncCancelled()) {
- thread->SetSyncCancelled(false);
- lock.CancelSleep();
- return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
- }
-
- for (auto& object : sync_objects) {
- object->AddWaitingThread(SharedFrom(thread));
- }
-
- thread->SetSynchronizationObjects(&sync_objects);
- thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
- thread->SetStatus(ThreadStatus::WaitSynch);
- thread->SetWaitingSync(true);
- }
- thread->SetWaitingSync(false);
-
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
-
- {
- SchedulerLock lock(kernel);
- ResultCode signaling_result = thread->GetSignalingResult();
- SynchronizationObject* signaling_object = thread->GetSignalingObject();
- thread->SetSynchronizationObjects(nullptr);
- auto shared_thread = SharedFrom(thread);
- for (auto& obj : sync_objects) {
- obj->RemoveWaitingThread(shared_thread);
- }
- if (signaling_object != nullptr) {
- const auto itr = std::find_if(
- sync_objects.begin(), sync_objects.end(),
- [signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
- return object.get() == signaling_object;
- });
- ASSERT(itr != sync_objects.end());
- signaling_object->Acquire(thread);
- const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
- return {signaling_result, index};
- }
- return {signaling_result, -1};
- }
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization.h b/src/core/hle/kernel/synchronization.h
deleted file mode 100644
index 379f4b1d3..000000000
--- a/src/core/hle/kernel/synchronization.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <memory>
-#include <utility>
-#include <vector>
-
-#include "core/hle/kernel/object.h"
-#include "core/hle/result.h"
-
-namespace Core {
-class System;
-} // namespace Core
-
-namespace Kernel {
-
-class SynchronizationObject;
-
-/**
- * The 'Synchronization' class is an interface for handling synchronization methods
- * used by Synchronization objects and synchronization SVCs. This centralizes processing of
- * such
- */
-class Synchronization {
-public:
- explicit Synchronization(Core::System& system);
-
- /// Signals a synchronization object, waking up all its waiting threads
- void SignalObject(SynchronizationObject& obj) const;
-
- /// Tries to see if waiting for any of the sync_objects is necessary, if not
- /// it returns Success and the handle index of the signaled sync object. In
- /// case not, the current thread will be locked and wait for nano_seconds or
- /// for a synchronization object to signal.
- std::pair<ResultCode, Handle> WaitFor(
- std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds);
-
-private:
- Core::System& system;
-};
-} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.cpp b/src/core/hle/kernel/synchronization_object.cpp
deleted file mode 100644
index ba4d39157..000000000
--- a/src/core/hle/kernel/synchronization_object.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "common/logging/log.h"
-#include "core/core.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/synchronization.h"
-#include "core/hle/kernel/synchronization_object.h"
-#include "core/hle/kernel/thread.h"
-
-namespace Kernel {
-
-SynchronizationObject::SynchronizationObject(KernelCore& kernel) : Object{kernel} {}
-SynchronizationObject::~SynchronizationObject() = default;
-
-void SynchronizationObject::Signal() {
- kernel.Synchronization().SignalObject(*this);
-}
-
-void SynchronizationObject::AddWaitingThread(std::shared_ptr<Thread> thread) {
- auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
- if (itr == waiting_threads.end())
- waiting_threads.push_back(std::move(thread));
-}
-
-void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread) {
- auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
- // If a thread passed multiple handles to the same object,
- // the kernel might attempt to remove the thread from the object's
- // waiting threads list multiple times.
- if (itr != waiting_threads.end())
- waiting_threads.erase(itr);
-}
-
-void SynchronizationObject::ClearWaitingThreads() {
- waiting_threads.clear();
-}
-
-const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
- return waiting_threads;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h
deleted file mode 100644
index f89b24204..000000000
--- a/src/core/hle/kernel/synchronization_object.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <memory>
-#include <vector>
-
-#include "core/hle/kernel/object.h"
-
-namespace Kernel {
-
-class KernelCore;
-class Synchronization;
-class Thread;
-
-/// Class that represents a Kernel object that a thread can be waiting on
-class SynchronizationObject : public Object {
-public:
- explicit SynchronizationObject(KernelCore& kernel);
- ~SynchronizationObject() override;
-
- /**
- * Check if the specified thread should wait until the object is available
- * @param thread The thread about which we're deciding.
- * @return True if the current thread should wait due to this object being unavailable
- */
- virtual bool ShouldWait(const Thread* thread) const = 0;
-
- /// Acquire/lock the object for the specified thread if it is available
- virtual void Acquire(Thread* thread) = 0;
-
- /// Signal this object
- virtual void Signal();
-
- virtual bool IsSignaled() const {
- return is_signaled;
- }
-
- /**
- * Add a thread to wait on this object
- * @param thread Pointer to thread to add
- */
- void AddWaitingThread(std::shared_ptr<Thread> thread);
-
- /**
- * Removes a thread from waiting on this object (e.g. if it was resumed already)
- * @param thread Pointer to thread to remove
- */
- void RemoveWaitingThread(std::shared_ptr<Thread> thread);
-
- /// Get a const reference to the waiting threads list for debug use
- const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
-
- void ClearWaitingThreads();
-
-protected:
- bool is_signaled{}; // Tells if this sync object is signalled;
-
-private:
- /// Threads waiting for this object to become available
- std::vector<std::shared_ptr<Thread>> waiting_threads;
-};
-
-// Specialization of DynamicObjectCast for SynchronizationObjects
-template <>
-inline std::shared_ptr<SynchronizationObject> DynamicObjectCast<SynchronizationObject>(
- std::shared_ptr<Object> object) {
- if (object != nullptr && object->IsWaitable()) {
- return std::static_pointer_cast<SynchronizationObject>(object);
- }
- return nullptr;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
deleted file mode 100644
index d132aba34..000000000
--- a/src/core/hle/kernel/thread.cpp
+++ /dev/null
@@ -1,540 +0,0 @@
-// Copyright 2014 Citra Emulator Project / PPSSPP Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include <cinttypes>
-#include <optional>
-#include <vector>
-
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "common/fiber.h"
-#include "common/logging/log.h"
-#include "common/thread_queue_list.h"
-#include "core/arm/arm_interface.h"
-#include "core/arm/unicorn/arm_unicorn.h"
-#include "core/core.h"
-#include "core/cpu_manager.h"
-#include "core/hardware_properties.h"
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/handle_table.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/kernel/time_manager.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-#ifdef ARCHITECTURE_x86_64
-#include "core/arm/dynarmic/arm_dynarmic_32.h"
-#include "core/arm/dynarmic/arm_dynarmic_64.h"
-#endif
-
-namespace Kernel {
-
-bool Thread::ShouldWait(const Thread* thread) const {
- return status != ThreadStatus::Dead;
-}
-
-bool Thread::IsSignaled() const {
- return status == ThreadStatus::Dead;
-}
-
-void Thread::Acquire(Thread* thread) {
- ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
-}
-
-Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {}
-Thread::~Thread() = default;
-
-void Thread::Stop() {
- {
- SchedulerLock lock(kernel);
- SetStatus(ThreadStatus::Dead);
- Signal();
- kernel.GlobalHandleTable().Close(global_handle);
-
- if (owner_process) {
- owner_process->UnregisterThread(this);
-
- // Mark the TLS slot in the thread's page as free.
- owner_process->FreeTLSRegion(tls_address);
- }
- arm_interface.reset();
- has_exited = true;
- }
- global_handle = 0;
-}
-
-void Thread::ResumeFromWait() {
- SchedulerLock lock(kernel);
- switch (status) {
- case ThreadStatus::Paused:
- case ThreadStatus::WaitSynch:
- case ThreadStatus::WaitHLEEvent:
- case ThreadStatus::WaitSleep:
- case ThreadStatus::WaitIPC:
- case ThreadStatus::WaitMutex:
- case ThreadStatus::WaitCondVar:
- case ThreadStatus::WaitArb:
- case ThreadStatus::Dormant:
- break;
-
- case ThreadStatus::Ready:
- // The thread's wakeup callback must have already been cleared when the thread was first
- // awoken.
- ASSERT(hle_callback == nullptr);
- // If the thread is waiting on multiple wait objects, it might be awoken more than once
- // before actually resuming. We can ignore subsequent wakeups if the thread status has
- // already been set to ThreadStatus::Ready.
- return;
-
- case ThreadStatus::Running:
- DEBUG_ASSERT_MSG(false, "Thread with object id {} has already resumed.", GetObjectId());
- return;
- case ThreadStatus::Dead:
- // This should never happen, as threads must complete before being stopped.
- DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
- GetObjectId());
- return;
- }
-
- SetStatus(ThreadStatus::Ready);
-}
-
-void Thread::OnWakeUp() {
- SchedulerLock lock(kernel);
-
- SetStatus(ThreadStatus::Ready);
-}
-
-ResultCode Thread::Start() {
- SchedulerLock lock(kernel);
- SetStatus(ThreadStatus::Ready);
- return RESULT_SUCCESS;
-}
-
-void Thread::CancelWait() {
- SchedulerLock lock(kernel);
- if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
- is_sync_cancelled = true;
- return;
- }
- // TODO(Blinkhawk): Implement cancel of server session
- is_sync_cancelled = false;
- SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
- SetStatus(ThreadStatus::Ready);
-}
-
-static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
- u32 entry_point, u32 arg) {
- context = {};
- context.cpu_registers[0] = arg;
- context.cpu_registers[15] = entry_point;
- context.cpu_registers[13] = stack_top;
-}
-
-static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
- VAddr entry_point, u64 arg) {
- context = {};
- context.cpu_registers[0] = arg;
- context.pc = entry_point;
- context.sp = stack_top;
- // TODO(merry): Perform a hardware test to determine the below value.
- context.fpcr = 0;
-}
-
-std::shared_ptr<Common::Fiber>& Thread::GetHostContext() {
- return host_context;
-}
-
-ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
- std::string name, VAddr entry_point, u32 priority,
- u64 arg, s32 processor_id, VAddr stack_top,
- Process* owner_process) {
- std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
- void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
- return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
- owner_process, std::move(init_func), init_func_parameter);
-}
-
-ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
- std::string name, VAddr entry_point, u32 priority,
- u64 arg, s32 processor_id, VAddr stack_top,
- Process* owner_process,
- std::function<void(void*)>&& thread_start_func,
- void* thread_start_parameter) {
- auto& kernel = system.Kernel();
- // Check if priority is in ranged. Lowest priority -> highest priority id.
- if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) {
- LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
- return ERR_INVALID_THREAD_PRIORITY;
- }
-
- if (processor_id > THREADPROCESSORID_MAX) {
- LOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id);
- return ERR_INVALID_PROCESSOR_ID;
- }
-
- if (owner_process) {
- if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) {
- LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
- // TODO (bunnei): Find the correct error code to use here
- return RESULT_UNKNOWN;
- }
- }
-
- std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
-
- thread->thread_id = kernel.CreateNewThreadID();
- thread->status = ThreadStatus::Dormant;
- thread->entry_point = entry_point;
- thread->stack_top = stack_top;
- thread->tpidr_el0 = 0;
- thread->nominal_priority = thread->current_priority = priority;
- thread->last_running_ticks = 0;
- thread->processor_id = processor_id;
- thread->ideal_core = processor_id;
- thread->affinity_mask = 1ULL << processor_id;
- thread->wait_objects = nullptr;
- thread->mutex_wait_address = 0;
- thread->condvar_wait_address = 0;
- thread->wait_handle = 0;
- thread->name = std::move(name);
- thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
- thread->owner_process = owner_process;
- thread->type = type_flags;
- if ((type_flags & THREADTYPE_IDLE) == 0) {
- auto& scheduler = kernel.GlobalScheduler();
- scheduler.AddThread(thread);
- }
- if (owner_process) {
- thread->tls_address = thread->owner_process->CreateTLSRegion();
- thread->owner_process->RegisterThread(thread.get());
- } else {
- thread->tls_address = 0;
- }
- // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
- // to initialize the context
- thread->arm_interface.reset();
- if ((type_flags & THREADTYPE_HLE) == 0) {
-#ifdef ARCHITECTURE_x86_64
- if (owner_process && !owner_process->Is64BitProcess()) {
- thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
- system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
- processor_id);
- } else {
- thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
- system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
- processor_id);
- }
-
-#else
- if (owner_process && !owner_process->Is64BitProcess()) {
- thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
- system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch32,
- processor_id);
- } else {
- thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
- system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch64,
- processor_id);
- }
- LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
-#endif
- ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
- static_cast<u32>(entry_point), static_cast<u32>(arg));
- ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
- }
- thread->host_context =
- std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
-
- return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
-}
-
-void Thread::SetPriority(u32 priority) {
- SchedulerLock lock(kernel);
- ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
- "Invalid priority value.");
- nominal_priority = priority;
- UpdatePriority();
-}
-
-void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) {
- signaling_object = object;
- signaling_result = result;
-}
-
-s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const {
- ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything");
- const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object);
- return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1);
-}
-
-VAddr Thread::GetCommandBufferAddress() const {
- // Offset from the start of TLS at which the IPC command buffer begins.
- constexpr u64 command_header_offset = 0x80;
- return GetTLSAddress() + command_header_offset;
-}
-
-Core::ARM_Interface& Thread::ArmInterface() {
- return *arm_interface;
-}
-
-const Core::ARM_Interface& Thread::ArmInterface() const {
- return *arm_interface;
-}
-
-void Thread::SetStatus(ThreadStatus new_status) {
- if (new_status == status) {
- return;
- }
-
- switch (new_status) {
- case ThreadStatus::Ready:
- case ThreadStatus::Running:
- SetSchedulingStatus(ThreadSchedStatus::Runnable);
- break;
- case ThreadStatus::Dormant:
- SetSchedulingStatus(ThreadSchedStatus::None);
- break;
- case ThreadStatus::Dead:
- SetSchedulingStatus(ThreadSchedStatus::Exited);
- break;
- default:
- SetSchedulingStatus(ThreadSchedStatus::Paused);
- break;
- }
-
- status = new_status;
-}
-
-void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) {
- if (thread->lock_owner.get() == this) {
- // If the thread is already waiting for this thread to release the mutex, ensure that the
- // waiters list is consistent and return without doing anything.
- const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(iter != wait_mutex_threads.end());
- return;
- }
-
- // A thread can't wait on two different mutexes at the same time.
- ASSERT(thread->lock_owner == nullptr);
-
- // Ensure that the thread is not already in the list of mutex waiters
- const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(iter == wait_mutex_threads.end());
-
- // Keep the list in an ordered fashion
- const auto insertion_point = std::find_if(
- wait_mutex_threads.begin(), wait_mutex_threads.end(),
- [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
- wait_mutex_threads.insert(insertion_point, thread);
- thread->lock_owner = SharedFrom(this);
-
- UpdatePriority();
-}
-
-void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) {
- ASSERT(thread->lock_owner.get() == this);
-
- // Ensure that the thread is in the list of mutex waiters
- const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(iter != wait_mutex_threads.end());
-
- wait_mutex_threads.erase(iter);
-
- thread->lock_owner = nullptr;
- UpdatePriority();
-}
-
-void Thread::UpdatePriority() {
- // If any of the threads waiting on the mutex have a higher priority
- // (taking into account priority inheritance), then this thread inherits
- // that thread's priority.
- u32 new_priority = nominal_priority;
- if (!wait_mutex_threads.empty()) {
- if (wait_mutex_threads.front()->current_priority < new_priority) {
- new_priority = wait_mutex_threads.front()->current_priority;
- }
- }
-
- if (new_priority == current_priority) {
- return;
- }
-
- if (GetStatus() == ThreadStatus::WaitCondVar) {
- owner_process->RemoveConditionVariableThread(SharedFrom(this));
- }
-
- SetCurrentPriority(new_priority);
-
- if (GetStatus() == ThreadStatus::WaitCondVar) {
- owner_process->InsertConditionVariableThread(SharedFrom(this));
- }
-
- if (!lock_owner) {
- return;
- }
-
- // Ensure that the thread is within the correct location in the waiting list.
- auto old_owner = lock_owner;
- lock_owner->RemoveMutexWaiter(SharedFrom(this));
- old_owner->AddMutexWaiter(SharedFrom(this));
-
- // Recursively update the priority of the thread that depends on the priority of this one.
- lock_owner->UpdatePriority();
-}
-
-bool Thread::AllSynchronizationObjectsReady() const {
- return std::none_of(wait_objects->begin(), wait_objects->end(),
- [this](const std::shared_ptr<SynchronizationObject>& object) {
- return object->ShouldWait(this);
- });
-}
-
-bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
- ASSERT(hle_callback);
- return hle_callback(std::move(thread));
-}
-
-ResultCode Thread::SetActivity(ThreadActivity value) {
- SchedulerLock lock(kernel);
-
- auto sched_status = GetSchedulingStatus();
-
- if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) {
- return ERR_INVALID_STATE;
- }
-
- if (IsPendingTermination()) {
- return RESULT_SUCCESS;
- }
-
- if (value == ThreadActivity::Paused) {
- if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) {
- return ERR_INVALID_STATE;
- }
- AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
- } else {
- if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) {
- return ERR_INVALID_STATE;
- }
- RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
- }
- return RESULT_SUCCESS;
-}
-
-ResultCode Thread::Sleep(s64 nanoseconds) {
- Handle event_handle{};
- {
- SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
- SetStatus(ThreadStatus::WaitSleep);
- }
-
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
- return RESULT_SUCCESS;
-}
-
-std::pair<ResultCode, bool> Thread::YieldSimple() {
- bool is_redundant = false;
- {
- SchedulerLock lock(kernel);
- is_redundant = kernel.GlobalScheduler().YieldThread(this);
- }
- return {RESULT_SUCCESS, is_redundant};
-}
-
-std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() {
- bool is_redundant = false;
- {
- SchedulerLock lock(kernel);
- is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this);
- }
- return {RESULT_SUCCESS, is_redundant};
-}
-
-std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() {
- bool is_redundant = false;
- {
- SchedulerLock lock(kernel);
- is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this);
- }
- return {RESULT_SUCCESS, is_redundant};
-}
-
-void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
- const u32 old_state = scheduling_state;
- pausing_state |= static_cast<u32>(flag);
- const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
- scheduling_state = base_scheduling | pausing_state;
- kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
-}
-
-void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
- const u32 old_state = scheduling_state;
- pausing_state &= ~static_cast<u32>(flag);
- const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
- scheduling_state = base_scheduling | pausing_state;
- kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
-}
-
-void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
- const u32 old_state = scheduling_state;
- scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
- static_cast<u32>(new_status);
- kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
-}
-
-void Thread::SetCurrentPriority(u32 new_priority) {
- const u32 old_priority = std::exchange(current_priority, new_priority);
- kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority);
-}
-
-ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
- SchedulerLock lock(kernel);
- const auto HighestSetCore = [](u64 mask, u32 max_cores) {
- for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
- if (((mask >> core) & 1) != 0) {
- return core;
- }
- }
- return -1;
- };
-
- const bool use_override = affinity_override_count != 0;
- if (new_core == THREADPROCESSORID_DONT_UPDATE) {
- new_core = use_override ? ideal_core_override : ideal_core;
- if ((new_affinity_mask & (1ULL << new_core)) == 0) {
- LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}",
- new_core, new_affinity_mask);
- return ERR_INVALID_COMBINATION;
- }
- }
- if (use_override) {
- ideal_core_override = new_core;
- affinity_mask_override = new_affinity_mask;
- } else {
- const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask);
- ideal_core = new_core;
- if (old_affinity_mask != new_affinity_mask) {
- const s32 old_core = processor_id;
- if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
- if (static_cast<s32>(ideal_core) < 0) {
- processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES);
- } else {
- processor_id = ideal_core;
- }
- }
- kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core);
- }
- }
- return RESULT_SUCCESS;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
deleted file mode 100644
index 8daf79fac..000000000
--- a/src/core/hle/kernel/thread.h
+++ /dev/null
@@ -1,681 +0,0 @@
-// Copyright 2014 Citra Emulator Project / PPSSPP Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <functional>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "common/common_types.h"
-#include "common/spin_lock.h"
-#include "core/arm/arm_interface.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/synchronization_object.h"
-#include "core/hle/result.h"
-
-namespace Common {
-class Fiber;
-}
-
-namespace Core {
-class ARM_Interface;
-class System;
-} // namespace Core
-
-namespace Kernel {
-
-class GlobalScheduler;
-class KernelCore;
-class Process;
-class Scheduler;
-
-enum ThreadPriority : u32 {
- THREADPRIO_HIGHEST = 0, ///< Highest thread priority
- THREADPRIO_MAX_CORE_MIGRATION = 2, ///< Highest priority for a core migration
- THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
- THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
- THREADPRIO_LOWEST = 63, ///< Lowest thread priority
- THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
-};
-
-enum ThreadType : u32 {
- THREADTYPE_USER = 0x1,
- THREADTYPE_KERNEL = 0x2,
- THREADTYPE_HLE = 0x4,
- THREADTYPE_IDLE = 0x8,
- THREADTYPE_SUSPEND = 0x10,
-};
-
-enum ThreadProcessorId : s32 {
- /// Indicates that no particular processor core is preferred.
- THREADPROCESSORID_DONT_CARE = -1,
-
- /// Run thread on the ideal core specified by the process.
- THREADPROCESSORID_IDEAL = -2,
-
- /// Indicates that the preferred processor ID shouldn't be updated in
- /// a core mask setting operation.
- THREADPROCESSORID_DONT_UPDATE = -3,
-
- THREADPROCESSORID_0 = 0, ///< Run thread on core 0
- THREADPROCESSORID_1 = 1, ///< Run thread on core 1
- THREADPROCESSORID_2 = 2, ///< Run thread on core 2
- THREADPROCESSORID_3 = 3, ///< Run thread on core 3
- THREADPROCESSORID_MAX = 4, ///< Processor ID must be less than this
-
- /// Allowed CPU mask
- THREADPROCESSORID_DEFAULT_MASK = (1 << THREADPROCESSORID_0) | (1 << THREADPROCESSORID_1) |
- (1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3)
-};
-
-enum class ThreadStatus {
- Running, ///< Currently running
- Ready, ///< Ready to run
- Paused, ///< Paused by SetThreadActivity or debug
- WaitHLEEvent, ///< Waiting for hle event to finish
- WaitSleep, ///< Waiting due to a SleepThread SVC
- WaitIPC, ///< Waiting for the reply from an IPC request
- WaitSynch, ///< Waiting due to WaitSynchronization
- WaitMutex, ///< Waiting due to an ArbitrateLock svc
- WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
- WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
- Dormant, ///< Created but not yet made ready
- Dead ///< Run to completion, or forcefully terminated
-};
-
-enum class ThreadWakeupReason {
- Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal.
- Timeout // The thread was woken up due to a wait timeout.
-};
-
-enum class ThreadActivity : u32 {
- Normal = 0,
- Paused = 1,
-};
-
-enum class ThreadSchedStatus : u32 {
- None = 0,
- Paused = 1,
- Runnable = 2,
- Exited = 3,
-};
-
-enum class ThreadSchedFlags : u32 {
- ProcessPauseFlag = 1 << 4,
- ThreadPauseFlag = 1 << 5,
- ProcessDebugPauseFlag = 1 << 6,
- KernelInitPauseFlag = 1 << 8,
-};
-
-enum class ThreadSchedMasks : u32 {
- LowMask = 0x000f,
- HighMask = 0xfff0,
- ForcePauseMask = 0x0070,
-};
-
-class Thread final : public SynchronizationObject {
-public:
- explicit Thread(KernelCore& kernel);
- ~Thread() override;
-
- using MutexWaitingThreads = std::vector<std::shared_ptr<Thread>>;
-
- using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
- using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
-
- using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
-
- using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>;
-
- /**
- * Creates and returns a new thread. The new thread is immediately scheduled
- * @param system The instance of the whole system
- * @param name The friendly name desired for the thread
- * @param entry_point The address at which the thread should start execution
- * @param priority The thread's priority
- * @param arg User data to pass to the thread
- * @param processor_id The ID(s) of the processors on which the thread is desired to be run
- * @param stack_top The address of the thread's stack top
- * @param owner_process The parent process for the thread, if null, it's a kernel thread
- * @return A shared pointer to the newly created thread
- */
- static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
- std::string name, VAddr entry_point,
- u32 priority, u64 arg, s32 processor_id,
- VAddr stack_top, Process* owner_process);
-
- /**
- * Creates and returns a new thread. The new thread is immediately scheduled
- * @param system The instance of the whole system
- * @param name The friendly name desired for the thread
- * @param entry_point The address at which the thread should start execution
- * @param priority The thread's priority
- * @param arg User data to pass to the thread
- * @param processor_id The ID(s) of the processors on which the thread is desired to be run
- * @param stack_top The address of the thread's stack top
- * @param owner_process The parent process for the thread, if null, it's a kernel thread
- * @param thread_start_func The function where the host context will start.
- * @param thread_start_parameter The parameter which will passed to host context on init
- * @return A shared pointer to the newly created thread
- */
- static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
- std::string name, VAddr entry_point,
- u32 priority, u64 arg, s32 processor_id,
- VAddr stack_top, Process* owner_process,
- std::function<void(void*)>&& thread_start_func,
- void* thread_start_parameter);
-
- std::string GetName() const override {
- return name;
- }
-
- void SetName(std::string new_name) {
- name = std::move(new_name);
- }
-
- std::string GetTypeName() const override {
- return "Thread";
- }
-
- static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
- HandleType GetHandleType() const override {
- return HANDLE_TYPE;
- }
-
- bool ShouldWait(const Thread* thread) const override;
- void Acquire(Thread* thread) override;
- bool IsSignaled() const override;
-
- /**
- * Gets the thread's current priority
- * @return The current thread's priority
- */
- u32 GetPriority() const {
- return current_priority;
- }
-
- /**
- * Gets the thread's nominal priority.
- * @return The current thread's nominal priority.
- */
- u32 GetNominalPriority() const {
- return nominal_priority;
- }
-
- /**
- * Sets the thread's current priority
- * @param priority The new priority
- */
- void SetPriority(u32 priority);
-
- /// Adds a thread to the list of threads that are waiting for a lock held by this thread.
- void AddMutexWaiter(std::shared_ptr<Thread> thread);
-
- /// Removes a thread from the list of threads that are waiting for a lock held by this thread.
- void RemoveMutexWaiter(std::shared_ptr<Thread> thread);
-
- /// Recalculates the current priority taking into account priority inheritance.
- void UpdatePriority();
-
- /// Changes the core that the thread is running or scheduled to run on.
- ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
-
- /**
- * Gets the thread's thread ID
- * @return The thread's ID
- */
- u64 GetThreadID() const {
- return thread_id;
- }
-
- /// Resumes a thread from waiting
- void ResumeFromWait();
-
- void OnWakeUp();
-
- ResultCode Start();
-
- /// Cancels a waiting operation that this thread may or may not be within.
- ///
- /// When the thread is within a waiting state, this will set the thread's
- /// waiting result to signal a canceled wait. The function will then resume
- /// this thread.
- ///
- void CancelWait();
-
- void SetSynchronizationResults(SynchronizationObject* object, ResultCode result);
-
- Core::ARM_Interface& ArmInterface();
-
- const Core::ARM_Interface& ArmInterface() const;
-
- SynchronizationObject* GetSignalingObject() const {
- return signaling_object;
- }
-
- ResultCode GetSignalingResult() const {
- return signaling_result;
- }
-
- /**
- * Retrieves the index that this particular object occupies in the list of objects
- * that the thread passed to WaitSynchronization, starting the search from the last element.
- *
- * It is used to set the output index of WaitSynchronization when the thread is awakened.
- *
- * When a thread wakes up due to an object signal, the kernel will use the index of the last
- * matching object in the wait objects list in case of having multiple instances of the same
- * object in the list.
- *
- * @param object Object to query the index of.
- */
- s32 GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const;
-
- /**
- * Stops a thread, invalidating it from further use
- */
- void Stop();
-
- /*
- * Returns the Thread Local Storage address of the current thread
- * @returns VAddr of the thread's TLS
- */
- VAddr GetTLSAddress() const {
- return tls_address;
- }
-
- /*
- * Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
- * @returns The value of the TPIDR_EL0 register.
- */
- u64 GetTPIDR_EL0() const {
- return tpidr_el0;
- }
-
- /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
- void SetTPIDR_EL0(u64 value) {
- tpidr_el0 = value;
- }
-
- /*
- * Returns the address of the current thread's command buffer, located in the TLS.
- * @returns VAddr of the thread's command buffer.
- */
- VAddr GetCommandBufferAddress() const;
-
- ThreadContext32& GetContext32() {
- return context_32;
- }
-
- const ThreadContext32& GetContext32() const {
- return context_32;
- }
-
- ThreadContext64& GetContext64() {
- return context_64;
- }
-
- const ThreadContext64& GetContext64() const {
- return context_64;
- }
-
- bool IsHLEThread() const {
- return (type & THREADTYPE_HLE) != 0;
- }
-
- bool IsSuspendThread() const {
- return (type & THREADTYPE_SUSPEND) != 0;
- }
-
- bool IsIdleThread() const {
- return (type & THREADTYPE_IDLE) != 0;
- }
-
- bool WasRunning() const {
- return was_running;
- }
-
- void SetWasRunning(bool value) {
- was_running = value;
- }
-
- std::shared_ptr<Common::Fiber>& GetHostContext();
-
- ThreadStatus GetStatus() const {
- return status;
- }
-
- void SetStatus(ThreadStatus new_status);
-
- u64 GetLastRunningTicks() const {
- return last_running_ticks;
- }
-
- u64 GetTotalCPUTimeTicks() const {
- return total_cpu_time_ticks;
- }
-
- void UpdateCPUTimeTicks(u64 ticks) {
- total_cpu_time_ticks += ticks;
- }
-
- s32 GetProcessorID() const {
- return processor_id;
- }
-
- void SetProcessorID(s32 new_core) {
- processor_id = new_core;
- }
-
- Process* GetOwnerProcess() {
- return owner_process;
- }
-
- const Process* GetOwnerProcess() const {
- return owner_process;
- }
-
- const ThreadSynchronizationObjects& GetSynchronizationObjects() const {
- return *wait_objects;
- }
-
- void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) {
- wait_objects = objects;
- }
-
- void ClearSynchronizationObjects() {
- for (const auto& waiting_object : *wait_objects) {
- waiting_object->RemoveWaitingThread(SharedFrom(this));
- }
- wait_objects->clear();
- }
-
- /// Determines whether all the objects this thread is waiting on are ready.
- bool AllSynchronizationObjectsReady() const;
-
- const MutexWaitingThreads& GetMutexWaitingThreads() const {
- return wait_mutex_threads;
- }
-
- Thread* GetLockOwner() const {
- return lock_owner.get();
- }
-
- void SetLockOwner(std::shared_ptr<Thread> owner) {
- lock_owner = std::move(owner);
- }
-
- VAddr GetCondVarWaitAddress() const {
- return condvar_wait_address;
- }
-
- void SetCondVarWaitAddress(VAddr address) {
- condvar_wait_address = address;
- }
-
- VAddr GetMutexWaitAddress() const {
- return mutex_wait_address;
- }
-
- void SetMutexWaitAddress(VAddr address) {
- mutex_wait_address = address;
- }
-
- Handle GetWaitHandle() const {
- return wait_handle;
- }
-
- void SetWaitHandle(Handle handle) {
- wait_handle = handle;
- }
-
- VAddr GetArbiterWaitAddress() const {
- return arb_wait_address;
- }
-
- void SetArbiterWaitAddress(VAddr address) {
- arb_wait_address = address;
- }
-
- bool HasHLECallback() const {
- return hle_callback != nullptr;
- }
-
- void SetHLECallback(HLECallback callback) {
- hle_callback = std::move(callback);
- }
-
- void SetHLETimeEvent(Handle time_event) {
- hle_time_event = time_event;
- }
-
- void SetHLESyncObject(SynchronizationObject* object) {
- hle_object = object;
- }
-
- Handle GetHLETimeEvent() const {
- return hle_time_event;
- }
-
- SynchronizationObject* GetHLESyncObject() const {
- return hle_object;
- }
-
- void InvalidateHLECallback() {
- SetHLECallback(nullptr);
- }
-
- bool InvokeHLECallback(std::shared_ptr<Thread> thread);
-
- u32 GetIdealCore() const {
- return ideal_core;
- }
-
- u64 GetAffinityMask() const {
- return affinity_mask;
- }
-
- ResultCode SetActivity(ThreadActivity value);
-
- /// Sleeps this thread for the given amount of nanoseconds.
- ResultCode Sleep(s64 nanoseconds);
-
- /// Yields this thread without rebalancing loads.
- std::pair<ResultCode, bool> YieldSimple();
-
- /// Yields this thread and does a load rebalancing.
- std::pair<ResultCode, bool> YieldAndBalanceLoad();
-
- /// Yields this thread and if the core is left idle, loads are rebalanced
- std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing();
-
- void IncrementYieldCount() {
- yield_count++;
- }
-
- u64 GetYieldCount() const {
- return yield_count;
- }
-
- ThreadSchedStatus GetSchedulingStatus() const {
- return static_cast<ThreadSchedStatus>(scheduling_state &
- static_cast<u32>(ThreadSchedMasks::LowMask));
- }
-
- bool IsRunnable() const {
- return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable);
- }
-
- bool IsRunning() const {
- return is_running;
- }
-
- void SetIsRunning(bool value) {
- is_running = value;
- }
-
- bool IsSyncCancelled() const {
- return is_sync_cancelled;
- }
-
- void SetSyncCancelled(bool value) {
- is_sync_cancelled = value;
- }
-
- Handle GetGlobalHandle() const {
- return global_handle;
- }
-
- bool IsWaitingForArbitration() const {
- return waiting_for_arbitration;
- }
-
- void WaitForArbitration(bool set) {
- waiting_for_arbitration = set;
- }
-
- bool IsWaitingSync() const {
- return is_waiting_on_sync;
- }
-
- void SetWaitingSync(bool is_waiting) {
- is_waiting_on_sync = is_waiting;
- }
-
- bool IsPendingTermination() const {
- return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited;
- }
-
- bool IsPaused() const {
- return pausing_state != 0;
- }
-
- bool IsContinuousOnSVC() const {
- return is_continuous_on_svc;
- }
-
- void SetContinuousOnSVC(bool is_continuous) {
- is_continuous_on_svc = is_continuous;
- }
-
- bool IsPhantomMode() const {
- return is_phantom_mode;
- }
-
- void SetPhantomMode(bool phantom) {
- is_phantom_mode = phantom;
- }
-
- bool HasExited() const {
- return has_exited;
- }
-
-private:
- friend class GlobalScheduler;
- friend class Scheduler;
-
- void SetSchedulingStatus(ThreadSchedStatus new_status);
- void AddSchedulingFlag(ThreadSchedFlags flag);
- void RemoveSchedulingFlag(ThreadSchedFlags flag);
-
- void SetCurrentPriority(u32 new_priority);
-
- Common::SpinLock context_guard{};
- ThreadContext32 context_32{};
- ThreadContext64 context_64{};
- std::unique_ptr<Core::ARM_Interface> arm_interface{};
- std::shared_ptr<Common::Fiber> host_context{};
-
- u64 thread_id = 0;
-
- ThreadStatus status = ThreadStatus::Dormant;
-
- VAddr entry_point = 0;
- VAddr stack_top = 0;
-
- ThreadType type;
-
- /// Nominal thread priority, as set by the emulated application.
- /// The nominal priority is the thread priority without priority
- /// inheritance taken into account.
- u32 nominal_priority = 0;
-
- /// Current thread priority. This may change over the course of the
- /// thread's lifetime in order to facilitate priority inheritance.
- u32 current_priority = 0;
-
- u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
- u64 last_running_ticks = 0; ///< CPU tick when thread was last running
- u64 yield_count = 0; ///< Number of redundant yields carried by this thread.
- ///< a redundant yield is one where no scheduling is changed
-
- s32 processor_id = 0;
-
- VAddr tls_address = 0; ///< Virtual address of the Thread Local Storage of the thread
- u64 tpidr_el0 = 0; ///< TPIDR_EL0 read/write system register.
-
- /// Process that owns this thread
- Process* owner_process;
-
- /// Objects that the thread is waiting on, in the same order as they were
- /// passed to WaitSynchronization.
- ThreadSynchronizationObjects* wait_objects;
-
- SynchronizationObject* signaling_object;
- ResultCode signaling_result{RESULT_SUCCESS};
-
- /// List of threads that are waiting for a mutex that is held by this thread.
- MutexWaitingThreads wait_mutex_threads;
-
- /// Thread that owns the lock that this thread is waiting for.
- std::shared_ptr<Thread> lock_owner;
-
- /// If waiting on a ConditionVariable, this is the ConditionVariable address
- VAddr condvar_wait_address = 0;
- /// If waiting on a Mutex, this is the mutex address
- VAddr mutex_wait_address = 0;
- /// The handle used to wait for the mutex.
- Handle wait_handle = 0;
-
- /// If waiting for an AddressArbiter, this is the address being waited on.
- VAddr arb_wait_address{0};
- bool waiting_for_arbitration{};
-
- /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
- Handle global_handle = 0;
-
- /// Callback for HLE Events
- HLECallback hle_callback;
- Handle hle_time_event;
- SynchronizationObject* hle_object;
-
- Scheduler* scheduler = nullptr;
-
- u32 ideal_core{0xFFFFFFFF};
- u64 affinity_mask{0x1};
-
- s32 ideal_core_override = -1;
- u64 affinity_mask_override = 0x1;
- u32 affinity_override_count = 0;
-
- u32 scheduling_state = 0;
- u32 pausing_state = 0;
- bool is_running = false;
- bool is_waiting_on_sync = false;
- bool is_sync_cancelled = false;
-
- bool is_continuous_on_svc = false;
-
- bool will_be_terminated = false;
- bool is_phantom_mode = false;
- bool has_exited = false;
-
- bool was_running = false;
-
- std::string name;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 95f2446c9..fd0630019 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -7,9 +7,9 @@
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {
@@ -18,41 +18,30 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
time_manager_event_type = Core::Timing::CreateEvent(
"Kernel::TimeManagerCallback",
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
- const SchedulerLock lock(system.Kernel());
- const auto proper_handle = static_cast<Handle>(thread_handle);
- if (cancelled_events[proper_handle]) {
- return;
+ std::shared_ptr<KThread> thread;
+ {
+ std::lock_guard lock{mutex};
+ thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle));
}
- auto thread = this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
- thread->OnWakeUp();
+ thread->Wakeup();
});
}
-void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) {
- event_handle = timetask->GetGlobalHandle();
+void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
+ std::lock_guard lock{mutex};
if (nanoseconds > 0) {
- ASSERT(timetask);
- ASSERT(timetask->GetStatus() != ThreadStatus::Ready);
- ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
+ ASSERT(thread);
+ ASSERT(thread->GetState() != ThreadState::Runnable);
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
- time_manager_event_type, event_handle);
- } else {
- event_handle = InvalidHandle;
+ time_manager_event_type,
+ reinterpret_cast<uintptr_t>(thread));
}
- cancelled_events[event_handle] = false;
}
-void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
- if (event_handle == InvalidHandle) {
- return;
- }
- system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle);
- cancelled_events[event_handle] = true;
-}
-
-void TimeManager::CancelTimeEvent(Thread* time_task) {
- Handle event_handle = time_task->GetGlobalHandle();
- UnscheduleTimeEvent(event_handle);
+void TimeManager::UnscheduleTimeEvent(KThread* thread) {
+ std::lock_guard lock{mutex};
+ system.CoreTiming().UnscheduleEvent(time_manager_event_type,
+ reinterpret_cast<uintptr_t>(thread));
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
index 307a18765..0d7f05f30 100644
--- a/src/core/hle/kernel/time_manager.h
+++ b/src/core/hle/kernel/time_manager.h
@@ -5,6 +5,7 @@
#pragma once
#include <memory>
+#include <mutex>
#include <unordered_map>
#include "core/hle/kernel/object.h"
@@ -19,7 +20,7 @@ struct EventType;
namespace Kernel {
-class Thread;
+class KThread;
/**
* The `TimeManager` takes care of scheduling time events on threads and executes their TimeUp
@@ -30,18 +31,15 @@ public:
explicit TimeManager(Core::System& system);
/// Schedule a time event on `timetask` thread that will expire in 'nanoseconds'
- /// returns a non-invalid handle in `event_handle` if correctly scheduled
- void ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds);
+ void ScheduleTimeEvent(KThread* time_task, s64 nanoseconds);
/// Unschedule an existing time event
- void UnscheduleTimeEvent(Handle event_handle);
-
- void CancelTimeEvent(Thread* time_task);
+ void UnscheduleTimeEvent(KThread* thread);
private:
Core::System& system;
std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
- std::unordered_map<Handle, bool> cancelled_events;
+ std::mutex mutex;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp
index 765f408c3..6b0fc1591 100644
--- a/src/core/hle/kernel/transfer_memory.cpp
+++ b/src/core/hle/kernel/transfer_memory.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/process.h"
@@ -17,6 +18,7 @@ TransferMemory::TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory)
TransferMemory::~TransferMemory() {
// Release memory region when transfer memory is destroyed
Reset();
+ owner_process->GetResourceLimit()->Release(LimitableResource::TransferMemory, 1);
}
std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel,
diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h
index 05e9f7464..777799d12 100644
--- a/src/core/hle/kernel/transfer_memory.h
+++ b/src/core/hle/kernel/transfer_memory.h
@@ -72,6 +72,8 @@ public:
/// is closed.
ResultCode Reset();
+ void Finalize() override {}
+
private:
/// The base address for the memory managed by this instance.
VAddr base_address{};
diff --git a/src/core/hle/kernel/writable_event.cpp b/src/core/hle/kernel/writable_event.cpp
deleted file mode 100644
index fc2f7c424..000000000
--- a/src/core/hle/kernel/writable_event.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include "common/assert.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/readable_event.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/kernel/writable_event.h"
-
-namespace Kernel {
-
-WritableEvent::WritableEvent(KernelCore& kernel) : Object{kernel} {}
-WritableEvent::~WritableEvent() = default;
-
-EventPair WritableEvent::CreateEventPair(KernelCore& kernel, std::string name) {
- std::shared_ptr<WritableEvent> writable_event(new WritableEvent(kernel));
- std::shared_ptr<ReadableEvent> readable_event(new ReadableEvent(kernel));
-
- writable_event->name = name + ":Writable";
- writable_event->readable = readable_event;
- readable_event->name = name + ":Readable";
-
- return {std::move(readable_event), std::move(writable_event)};
-}
-
-std::shared_ptr<ReadableEvent> WritableEvent::GetReadableEvent() const {
- return readable;
-}
-
-void WritableEvent::Signal() {
- readable->Signal();
-}
-
-void WritableEvent::Clear() {
- readable->Clear();
-}
-
-bool WritableEvent::IsSignaled() const {
- return readable->IsSignaled();
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/writable_event.h b/src/core/hle/kernel/writable_event.h
deleted file mode 100644
index 6189cf65c..000000000
--- a/src/core/hle/kernel/writable_event.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <memory>
-
-#include "core/hle/kernel/object.h"
-
-namespace Kernel {
-
-class KernelCore;
-class ReadableEvent;
-class WritableEvent;
-
-struct EventPair {
- std::shared_ptr<ReadableEvent> readable;
- std::shared_ptr<WritableEvent> writable;
-};
-
-class WritableEvent final : public Object {
-public:
- ~WritableEvent() override;
-
- /**
- * Creates an event
- * @param kernel The kernel instance to create this event under.
- * @param name Optional name of event
- */
- static EventPair CreateEventPair(KernelCore& kernel, std::string name = "Unknown");
-
- std::string GetTypeName() const override {
- return "WritableEvent";
- }
- std::string GetName() const override {
- return name;
- }
-
- static constexpr HandleType HANDLE_TYPE = HandleType::WritableEvent;
- HandleType GetHandleType() const override {
- return HANDLE_TYPE;
- }
-
- std::shared_ptr<ReadableEvent> GetReadableEvent() const;
-
- void Signal();
- void Clear();
- bool IsSignaled() const;
-
-private:
- explicit WritableEvent(KernelCore& kernel);
-
- std::shared_ptr<ReadableEvent> readable;
-
- std::string name; ///< Name of event (optional)
-};
-
-} // namespace Kernel