summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorLiam <byteslice@airmail.cc>2022-06-16 16:35:52 +0200
committerLiam <byteslice@airmail.cc>2022-06-23 06:28:00 +0200
commit2c56e94702e897c609711d82057d8267d8f4d0b3 (patch)
treeb037c6951383408517b460577b709f4383a61da0 /src/core/hle/kernel
parentMerge pull request #8491 from Morph1984/extra-assert (diff)
downloadyuzu-2c56e94702e897c609711d82057d8267d8f4d0b3.tar
yuzu-2c56e94702e897c609711d82057d8267d8f4d0b3.tar.gz
yuzu-2c56e94702e897c609711d82057d8267d8f4d0b3.tar.bz2
yuzu-2c56e94702e897c609711d82057d8267d8f4d0b3.tar.lz
yuzu-2c56e94702e897c609711d82057d8267d8f4d0b3.tar.xz
yuzu-2c56e94702e897c609711d82057d8267d8f4d0b3.tar.zst
yuzu-2c56e94702e897c609711d82057d8267d8f4d0b3.zip
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp4
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp4
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp5
-rw-r--r--src/core/hle/kernel/k_process.cpp10
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp25
-rw-r--r--src/core/hle/kernel/k_scheduler.h12
-rw-r--r--src/core/hle/kernel/k_thread.cpp12
-rw-r--r--src/core/hle/kernel/k_thread.h1
-rw-r--r--src/core/hle/kernel/kernel.cpp13
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/svc.cpp13
11 files changed, 60 insertions, 42 deletions
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 04cf86d52..5fa67bae1 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -234,7 +234,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{
@@ -287,7 +287,7 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 43bcd253d..a8b5411e3 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -106,7 +106,7 @@ KConditionVariable::KConditionVariable(Core::System& system_)
KConditionVariable::~KConditionVariable() = default;
ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
- KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ KThread* owner_thread = GetCurrentThreadPointer(kernel);
// Signal the address.
{
@@ -147,7 +147,7 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
}
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
// Wait for the address.
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index cf9ed80d0..d606a7f86 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -15,8 +15,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
return;
}
- auto& scheduler = kernel.Scheduler(core_id);
- auto& current_thread = *scheduler.GetCurrentThread();
+ auto& current_thread = GetCurrentThread(kernel);
// If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
@@ -26,7 +25,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
process->PinCurrentThread(core_id);
// Set the interrupt flag for the thread.
- scheduler.GetCurrentThread()->SetInterruptFlag();
+ GetCurrentThread(kernel).SetInterruptFlag();
}
}
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index cb84c20e3..b477c6e55 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -176,7 +176,8 @@ void KProcess::PinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
+ KThread* cur_thread =
+ kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// If the thread isn't terminated, pin it.
if (!cur_thread->IsTerminationRequested()) {
@@ -193,7 +194,8 @@ void KProcess::UnpinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
+ KThread* cur_thread =
+ kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// Unpin it.
cur_thread->Unpin();
@@ -420,11 +422,11 @@ void KProcess::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting);
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
- for (auto& thread : in_thread_list) {
+ for (auto* thread : in_thread_list) {
if (thread->GetOwnerProcess() != this)
continue;
- if (thread == kernel.CurrentScheduler()->GetCurrentThread())
+ if (thread == GetCurrentThreadPointer(kernel))
continue;
// TODO(Subv): When are the other running/ready threads terminated?
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index fb3b84f3d..d586b3f5c 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -317,7 +317,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
{
KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id);
- if (best_thread == GetCurrentThread()) {
+ if (best_thread == GetCurrentThreadPointer(kernel)) {
best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread);
}
@@ -424,7 +424,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -463,7 +463,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -551,7 +551,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -642,7 +642,7 @@ KScheduler::~KScheduler() {
ASSERT(!idle_thread);
}
-KThread* KScheduler::GetCurrentThread() const {
+KThread* KScheduler::GetSchedulerCurrentThread() const {
if (auto result = current_thread.load(); result) {
return result;
}
@@ -654,7 +654,7 @@ u64 KScheduler::GetLastContextSwitchTicks() const {
}
void KScheduler::RescheduleCurrentCore() {
- ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
+ ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
auto& phys_core = system.Kernel().PhysicalCore(core_id);
if (phys_core.IsInterrupted()) {
@@ -665,7 +665,7 @@ void KScheduler::RescheduleCurrentCore() {
if (state.needs_scheduling.load()) {
Schedule();
} else {
- GetCurrentThread()->EnableDispatch();
+ GetCurrentThread(system.Kernel()).EnableDispatch();
guard.Unlock();
}
}
@@ -718,13 +718,18 @@ void KScheduler::Reload(KThread* thread) {
void KScheduler::SwitchContextStep2() {
// Load context of new thread
- Reload(GetCurrentThread());
+ Reload(GetCurrentThreadPointer(system.Kernel()));
RescheduleCurrentCore();
}
+void KScheduler::Schedule() {
+ ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
+ this->ScheduleImpl();
+}
+
void KScheduler::ScheduleImpl() {
- KThread* previous_thread = GetCurrentThread();
+ KThread* previous_thread = GetCurrentThreadPointer(system.Kernel());
KThread* next_thread = state.highest_priority_thread;
state.needs_scheduling.store(false);
@@ -762,6 +767,7 @@ void KScheduler::ScheduleImpl() {
old_context = &previous_thread->GetHostContext();
// Set the new thread.
+ SetCurrentThread(system.Kernel(), next_thread);
current_thread.store(next_thread);
guard.Unlock();
@@ -805,6 +811,7 @@ void KScheduler::SwitchToCurrent() {
}
}
auto thread = next_thread ? next_thread : idle_thread;
+ SetCurrentThread(system.Kernel(), thread);
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
} while (!is_switch_pending());
}
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 729e006f2..3f90656ee 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -48,18 +48,13 @@ public:
void Reload(KThread* thread);
/// Gets the current running thread
- [[nodiscard]] KThread* GetCurrentThread() const;
+ [[nodiscard]] KThread* GetSchedulerCurrentThread() const;
/// Gets the idle thread
[[nodiscard]] KThread* GetIdleThread() const {
return idle_thread;
}
- /// Returns true if the scheduler is idle
- [[nodiscard]] bool IsIdle() const {
- return GetCurrentThread() == idle_thread;
- }
-
/// Gets the timestamp for the last context switch in ticks.
[[nodiscard]] u64 GetLastContextSwitchTicks() const;
@@ -149,10 +144,7 @@ private:
void RotateScheduledQueue(s32 cpu_core_id, s32 priority);
- void Schedule() {
- ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
- this->ScheduleImpl();
- }
+ void Schedule();
/// Switches the CPU's active thread context to that of the specified thread
void ScheduleImpl();
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index c0a091bb6..fa5352847 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -382,7 +382,7 @@ void KThread::FinishTermination() {
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{};
do {
- core_thread = kernel.Scheduler(i).GetCurrentThread();
+ core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
@@ -631,7 +631,7 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
s32 thread_core;
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
++thread_core) {
- if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
+ if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -756,7 +756,7 @@ void KThread::WaitUntilSuspended() {
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{};
do {
- core_thread = kernel.Scheduler(i).GetCurrentThread();
+ core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
@@ -822,7 +822,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
// Check if the thread is currently running.
// If it is, we'll need to retry.
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (kernel.Scheduler(i).GetCurrentThread() == this) {
+ if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -1175,6 +1175,10 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
return host_context;
}
+void SetCurrentThread(KernelCore& kernel, KThread* thread) {
+ kernel.SetCurrentEmuThread(thread);
+}
+
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
return kernel.GetCurrentEmuThread();
}
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 8c1f8a344..c6ca37f56 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -106,6 +106,7 @@ enum class StepState : u32 {
StepPerformed, ///< Thread has stepped, waiting to be scheduled again
};
+void SetCurrentThread(KernelCore& kernel, KThread* thread);
[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 94953e257..0009193be 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -331,6 +331,8 @@ struct KernelCore::Impl {
return is_shutting_down.load(std::memory_order_relaxed);
}
+ static inline thread_local KThread* current_thread{nullptr};
+
KThread* GetCurrentEmuThread() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (IsShuttingDown()) {
@@ -341,7 +343,12 @@ struct KernelCore::Impl {
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread();
}
- return schedulers[thread_id]->GetCurrentThread();
+
+ return current_thread;
+ }
+
+ void SetCurrentEmuThread(KThread* thread) {
+ current_thread = thread;
}
void DeriveInitialMemoryLayout() {
@@ -1024,6 +1031,10 @@ KThread* KernelCore::GetCurrentEmuThread() const {
return impl->GetCurrentEmuThread();
}
+void KernelCore::SetCurrentEmuThread(KThread* thread) {
+ impl->SetCurrentEmuThread(thread);
+}
+
KMemoryManager& KernelCore::MemoryManager() {
return *impl->memory_manager;
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 4e7beab0e..aa0ebaa02 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -226,6 +226,9 @@ public:
/// Gets the current host_thread/guest_thread pointer.
KThread* GetCurrentEmuThread() const;
+ /// Sets the current guest_thread pointer.
+ void SetCurrentEmuThread(KThread* thread);
+
/// Gets the current host_thread handle.
u32 GetCurrentHostThreadID() const;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 2ff6d5fa6..2b34fc19d 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -327,7 +327,6 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
- auto thread = kernel.CurrentScheduler()->GetCurrentThread();
{
KScopedSchedulerLock lock(kernel);
@@ -337,7 +336,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
}
- return thread->GetWaitResult();
+ return GetCurrentThread(kernel).GetWaitResult();
}
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -624,7 +623,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
handle_debug_buffer(info1, info2);
- auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const auto thread_processor_id = current_thread->GetActiveCore();
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
}
@@ -884,7 +883,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
const auto& core_timing = system.CoreTiming();
const auto& scheduler = *system.Kernel().CurrentScheduler();
- const auto* const current_thread = scheduler.GetCurrentThread();
+ const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const bool same_thread = current_thread == thread.GetPointerUnsafe();
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
@@ -1103,7 +1102,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
if (thread->GetRawState() != ThreadState::Runnable) {
bool current = false;
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetCurrentThread()) {
+ if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetSchedulerCurrentThread()) {
current = true;
break;
}
@@ -1851,7 +1850,7 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
static void ExitThread(Core::System& system) {
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
- auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
system.GlobalSchedulerContext().RemoveThread(current_thread);
current_thread->Exit();
system.Kernel().UnregisterInUseObject(current_thread);
@@ -2993,7 +2992,7 @@ void Call(Core::System& system, u32 immediate) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
- auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
+ auto* thread = GetCurrentThreadPointer(kernel);
thread->SetIsCallingSvc();
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)