summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/k_scheduler.cpp
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2021-08-26 05:59:28 +0200
committerGitHub <noreply@github.com>2021-08-26 05:59:28 +0200
commit0c8594b2251cf600afc3a89ecf1114eb3a25f700 (patch)
treed6f7d065cd8e2f00c85b9749076ef303a705c864 /src/core/hle/kernel/k_scheduler.cpp
parentMerge pull request #6919 from ameerj/vk-int8-capability (diff)
downloadyuzu-0c8594b2251cf600afc3a89ecf1114eb3a25f700.tar
yuzu-0c8594b2251cf600afc3a89ecf1114eb3a25f700.tar.gz
yuzu-0c8594b2251cf600afc3a89ecf1114eb3a25f700.tar.bz2
yuzu-0c8594b2251cf600afc3a89ecf1114eb3a25f700.tar.lz
yuzu-0c8594b2251cf600afc3a89ecf1114eb3a25f700.tar.xz
yuzu-0c8594b2251cf600afc3a89ecf1114eb3a25f700.tar.zst
yuzu-0c8594b2251cf600afc3a89ecf1114eb3a25f700.zip
Diffstat (limited to 'src/core/hle/kernel/k_scheduler.cpp')
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp85
1 files changed, 39 insertions, 46 deletions
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6ddbae52c..6a7d80d03 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -376,18 +376,20 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
}
void KScheduler::DisableScheduling(KernelCore& kernel) {
- ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
- GetCurrentThreadPointer(kernel)->DisableDispatch();
+ if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
+ ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
+ scheduler->GetCurrentThread()->DisableDispatch();
+ }
}
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
- ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1);
-
- if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) {
- GetCurrentThreadPointer(kernel)->EnableDispatch();
- } else {
- RescheduleCores(kernel, cores_needing_scheduling);
+ if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
+ ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
+ if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
+ scheduler->GetCurrentThread()->EnableDispatch();
+ }
}
+ RescheduleCores(kernel, cores_needing_scheduling);
}
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -615,17 +617,13 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
state.highest_priority_thread = nullptr;
}
-void KScheduler::Finalize() {
+KScheduler::~KScheduler() {
if (idle_thread) {
idle_thread->Close();
idle_thread = nullptr;
}
}
-KScheduler::~KScheduler() {
- ASSERT(!idle_thread);
-}
-
KThread* KScheduler::GetCurrentThread() const {
if (auto result = current_thread.load(); result) {
return result;
@@ -644,12 +642,10 @@ void KScheduler::RescheduleCurrentCore() {
if (phys_core.IsInterrupted()) {
phys_core.ClearInterrupt();
}
-
guard.Lock();
if (state.needs_scheduling.load()) {
Schedule();
} else {
- GetCurrentThread()->EnableDispatch();
guard.Unlock();
}
}
@@ -659,33 +655,26 @@ void KScheduler::OnThreadStart() {
}
void KScheduler::Unload(KThread* thread) {
- ASSERT(thread);
-
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
- if (thread->IsCallingSvc()) {
- thread->ClearIsCallingSvc();
- }
-
- auto& physical_core = system.Kernel().PhysicalCore(core_id);
- if (!physical_core.IsInitialized()) {
- return;
- }
-
- Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
- cpu_core.SaveContext(thread->GetContext32());
- cpu_core.SaveContext(thread->GetContext64());
- // Save the TPIDR_EL0 system register in case it was modified.
- thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
-
- if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
- prev_thread = thread;
- } else {
- prev_thread = nullptr;
+ if (thread) {
+ if (thread->IsCallingSvc()) {
+ thread->ClearIsCallingSvc();
+ }
+ if (!thread->IsTerminationRequested()) {
+ prev_thread = thread;
+
+ Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
+ cpu_core.SaveContext(thread->GetContext32());
+ cpu_core.SaveContext(thread->GetContext64());
+ // Save the TPIDR_EL0 system register in case it was modified.
+ thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
+ } else {
+ prev_thread = nullptr;
+ }
+ thread->context_guard.Unlock();
}
-
- thread->context_guard.Unlock();
}
void KScheduler::Reload(KThread* thread) {
@@ -694,6 +683,11 @@ void KScheduler::Reload(KThread* thread) {
if (thread) {
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
+ auto* const thread_owner_process = thread->GetOwnerProcess();
+ if (thread_owner_process != nullptr) {
+ system.Kernel().MakeCurrentProcess(thread_owner_process);
+ }
+
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64());
@@ -711,7 +705,7 @@ void KScheduler::SwitchContextStep2() {
}
void KScheduler::ScheduleImpl() {
- KThread* previous_thread = GetCurrentThread();
+ KThread* previous_thread = current_thread.load();
KThread* next_thread = state.highest_priority_thread;
state.needs_scheduling = false;
@@ -723,15 +717,10 @@ void KScheduler::ScheduleImpl() {
// If we're not actually switching thread, there's nothing to do.
if (next_thread == current_thread.load()) {
- previous_thread->EnableDispatch();
guard.Unlock();
return;
}
- if (next_thread->GetCurrentCore() != core_id) {
- next_thread->SetCurrentCore(core_id);
- }
-
current_thread.store(next_thread);
KProcess* const previous_process = system.Kernel().CurrentProcess();
@@ -742,7 +731,11 @@ void KScheduler::ScheduleImpl() {
Unload(previous_thread);
std::shared_ptr<Common::Fiber>* old_context;
- old_context = &previous_thread->GetHostContext();
+ if (previous_thread != nullptr) {
+ old_context = &previous_thread->GetHostContext();
+ } else {
+ old_context = &idle_thread->GetHostContext();
+ }
guard.Unlock();
Common::Fiber::YieldTo(*old_context, *switch_fiber);