summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorFernando Sahmkow <fsahmkow27@gmail.com>2019-09-11 18:14:37 +0200
committerFernandoS27 <fsahmkow27@gmail.com>2019-10-15 17:55:16 +0200
commit0cf26cee593c3c6abe909f3db52d972f846b13a9 (patch)
tree6e8e4b08271d1c3bd2348ef2bdd3cf5c4912dc9f /src
parentScheduler: Corrections to YieldAndBalanceLoad and Yield bombing protection. (diff)
downloadyuzu-0cf26cee593c3c6abe909f3db52d972f846b13a9.tar
yuzu-0cf26cee593c3c6abe909f3db52d972f846b13a9.tar.gz
yuzu-0cf26cee593c3c6abe909f3db52d972f846b13a9.tar.bz2
yuzu-0cf26cee593c3c6abe909f3db52d972f846b13a9.tar.lz
yuzu-0cf26cee593c3c6abe909f3db52d972f846b13a9.tar.xz
yuzu-0cf26cee593c3c6abe909f3db52d972f846b13a9.tar.zst
yuzu-0cf26cee593c3c6abe909f3db52d972f846b13a9.zip
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/scheduler.cpp81
-rw-r--r--src/core/hle/kernel/thread.h9
2 files changed, 85 insertions, 5 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 78463cef5..5581c43bf 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -241,10 +241,83 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
void GlobalScheduler::PreemptThreads() {
for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) {
const u32 priority = preemption_priorities[core_id];
- if (scheduled_queue[core_id].size(priority) > 1) {
+
+ if (scheduled_queue[core_id].size(priority) > 0) {
+ scheduled_queue[core_id].front(priority)->IncrementYieldCount();
scheduled_queue[core_id].yield(priority);
- reselection_pending.store(true, std::memory_order_release);
+ if (scheduled_queue[core_id].size(priority) > 1) {
+ scheduled_queue[core_id].front(priority)->IncrementYieldCount();
+ }
}
+
+ Thread* current_thread =
+ scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
+ Thread* winner = nullptr;
+ for (auto& thread : suggested_queue[core_id]) {
+ const s32 source_core = thread->GetProcessorID();
+ if (thread->GetPriority() != priority) {
+ continue;
+ }
+ if (source_core >= 0) {
+ Thread* next_thread = scheduled_queue[source_core].empty()
+ ? nullptr
+ : scheduled_queue[source_core].front();
+ if (next_thread != nullptr && next_thread->GetPriority() < 2) {
+ break;
+ }
+ if (next_thread == thread) {
+ continue;
+ }
+ }
+ if (current_thread != nullptr &&
+ current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
+ winner = thread;
+ break;
+ }
+ }
+
+ if (winner != nullptr) {
+ if (winner->IsRunning()) {
+ UnloadThread(winner->GetProcessorID());
+ }
+ TransferToCore(winner->GetPriority(), core_id, winner);
+ current_thread = winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
+ }
+
+ if (current_thread != nullptr && current_thread->GetPriority() > priority) {
+ for (auto& thread : suggested_queue[core_id]) {
+ const s32 source_core = thread->GetProcessorID();
+ if (thread->GetPriority() > priority) {
+ continue;
+ }
+ if (source_core >= 0) {
+ Thread* next_thread = scheduled_queue[source_core].empty()
+ ? nullptr
+ : scheduled_queue[source_core].front();
+ if (next_thread != nullptr && next_thread->GetPriority() < 2) {
+ break;
+ }
+ if (next_thread == thread) {
+ continue;
+ }
+ }
+ if (current_thread != nullptr &&
+ current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
+ winner = thread;
+ break;
+ }
+ }
+
+ if (winner != nullptr) {
+ if (winner->IsRunning()) {
+ UnloadThread(winner->GetProcessorID());
+ }
+ TransferToCore(winner->GetPriority(), core_id, winner);
+ current_thread = winner;
+ }
+ }
+
+ reselection_pending.store(true, std::memory_order_release);
}
}
@@ -260,9 +333,7 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
if (current_thread == winner) {
- // TODO(blinkhawk): manage redundant operations, this is not implemented.
- // as its mostly an optimization.
- // current_thread->SetRedundantSchedulerOperation();
+ current_thread->IncrementYieldCount();
return true;
} else {
reselection_pending.store(true, std::memory_order_release);
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 88255099f..bec23a0e0 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -416,6 +416,14 @@ public:
/// Yields this thread and if the core is left idle, loads are rebalanced
bool YieldAndWaitForLoadBalancing();
+ void IncrementYieldCount() {
+ yield_count++;
+ }
+
+ u64 GetYieldCount() const {
+ return yield_count;
+ }
+
ThreadSchedStatus GetSchedulingStatus() const {
return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask);
}
@@ -460,6 +468,7 @@ private:
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
+ u64 yield_count = 0; ///< Number of innecessaries yields occured.
s32 processor_id = 0;