summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorFernando Sahmkow <fsahmkow27@gmail.com>2019-09-10 16:23:43 +0200
committerFernandoS27 <fsahmkow27@gmail.com>2019-10-15 17:55:13 +0200
commit103f3a2fe51a09caf3f478226b6957b23c6eff79 (patch)
tree5304773e043ed3db6a55e4666fc25143560396af /src
parentKernel: Style and Corrections (diff)
downloadyuzu-103f3a2fe51a09caf3f478226b6957b23c6eff79.tar
yuzu-103f3a2fe51a09caf3f478226b6957b23c6eff79.tar.gz
yuzu-103f3a2fe51a09caf3f478226b6957b23c6eff79.tar.bz2
yuzu-103f3a2fe51a09caf3f478226b6957b23c6eff79.tar.lz
yuzu-103f3a2fe51a09caf3f478226b6957b23c6eff79.tar.xz
yuzu-103f3a2fe51a09caf3f478226b6957b23c6eff79.tar.zst
yuzu-103f3a2fe51a09caf3f478226b6957b23c6eff79.zip
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/scheduler.cpp16
-rw-r--r--src/core/hle/kernel/scheduler.h8
-rw-r--r--src/core/hle/kernel/svc.cpp13
-rw-r--r--src/core/hle/kernel/thread.cpp12
-rw-r--r--src/core/hle/kernel/thread.h6
5 files changed, 31 insertions, 24 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index df4e9b799..451fd8077 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -118,7 +118,7 @@ void GlobalScheduler::SelectThread(u32 core) {
* YieldThread takes a thread and moves it to the back of the it's priority list
* This operation can be redundant and no scheduling is changed if marked as so.
*/
-void GlobalScheduler::YieldThread(Thread* yielding_thread) {
+bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
// Note: caller should use critical section, etc.
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
const u32 priority = yielding_thread->GetPriority();
@@ -129,7 +129,7 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) {
scheduled_queue[core_id].yield(priority);
Thread* winner = scheduled_queue[core_id].front(priority);
- AskForReselectionOrMarkRedundant(yielding_thread, winner);
+ return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
/*
@@ -138,7 +138,7 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) {
* a better priority than the next thread in the core.
* This operation can be redundant and no scheduling is changed if marked as so.
*/
-void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
+bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
// etc.
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
@@ -186,7 +186,7 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
winner = next_thread;
}
- AskForReselectionOrMarkRedundant(yielding_thread, winner);
+ return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
/*
@@ -195,7 +195,7 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
* a suggested thread is obtained instead.
* This operation can be redundant and no scheduling is changed if marked as so.
*/
-void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
+bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
// etc.
Thread* winner = nullptr;
@@ -235,7 +235,7 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
}
}
- AskForReselectionOrMarkRedundant(yielding_thread, winner);
+ return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
@@ -248,13 +248,15 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
scheduled_queue[core].add(thread, priority, false);
}
-void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
+bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
if (current_thread == winner) {
// TODO(blinkhawk): manage redundant operations, this is not implemented.
// as its mostly an optimization.
// current_thread->SetRedundantSchedulerOperation();
+ return true;
} else {
reselection_pending.store(true, std::memory_order_release);
+ return false;
}
}
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 1c9d8a30f..8fcc86bae 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -115,7 +115,7 @@ public:
* YieldThread takes a thread and moves it to the back of the it's priority list
* This operation can be redundant and no scheduling is changed if marked as so.
*/
- void YieldThread(Thread* thread);
+ bool YieldThread(Thread* thread);
/*
* YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list.
@@ -123,7 +123,7 @@ public:
* a better priority than the next thread in the core.
* This operation can be redundant and no scheduling is changed if marked as so.
*/
- void YieldThreadAndBalanceLoad(Thread* thread);
+ bool YieldThreadAndBalanceLoad(Thread* thread);
/*
* YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue
@@ -131,7 +131,7 @@ public:
* a suggested thread is obtained instead.
* This operation can be redundant and no scheduling is changed if marked as so.
*/
- void YieldThreadAndWaitForLoadBalancing(Thread* thread);
+ bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
u32 CpuCoresCount() const {
return NUM_CPU_CORES;
@@ -146,7 +146,7 @@ public:
}
private:
- void AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner);
+ bool AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner);
static constexpr u32 min_regular_priority = 2;
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index d520ed033..bd67fc96d 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1556,17 +1556,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
auto& scheduler = system.CurrentScheduler();
auto* const current_thread = scheduler.GetCurrentThread();
+ bool redundant = false;
if (nanoseconds <= 0) {
switch (static_cast<SleepType>(nanoseconds)) {
case SleepType::YieldWithoutLoadBalancing:
- current_thread->YieldSimple();
+ redundant = current_thread->YieldSimple();
break;
case SleepType::YieldWithLoadBalancing:
- current_thread->YieldAndBalanceLoad();
+ redundant = current_thread->YieldAndBalanceLoad();
break;
case SleepType::YieldAndWaitForLoadBalancing:
- current_thread->YieldAndWaitForLoadBalancing();
+ redundant = current_thread->YieldAndWaitForLoadBalancing();
break;
default:
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
@@ -1575,7 +1576,11 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
current_thread->Sleep(nanoseconds);
}
- system.PrepareReschedule(current_thread->GetProcessorID());
+ if (redundant) {
+ system.CoreTiming().Idle();
+ } else {
+ system.PrepareReschedule(current_thread->GetProcessorID());
+ }
}
/// Wait process wide key atomic
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 8cf0a7ec7..ae62609e3 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -373,19 +373,19 @@ void Thread::Sleep(s64 nanoseconds) {
WakeAfterDelay(nanoseconds);
}
-void Thread::YieldSimple() {
+bool Thread::YieldSimple() {
auto& scheduler = kernel.GlobalScheduler();
- scheduler.YieldThread(this);
+ return scheduler.YieldThread(this);
}
-void Thread::YieldAndBalanceLoad() {
+bool Thread::YieldAndBalanceLoad() {
auto& scheduler = kernel.GlobalScheduler();
- scheduler.YieldThreadAndBalanceLoad(this);
+ return scheduler.YieldThreadAndBalanceLoad(this);
}
-void Thread::YieldAndWaitForLoadBalancing() {
+bool Thread::YieldAndWaitForLoadBalancing() {
auto& scheduler = kernel.GlobalScheduler();
- scheduler.YieldThreadAndWaitForLoadBalancing(this);
+ return scheduler.YieldThreadAndWaitForLoadBalancing(this);
}
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index bf0cae959..88255099f 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -408,13 +408,13 @@ public:
void Sleep(s64 nanoseconds);
/// Yields this thread without rebalancing loads.
- void YieldSimple();
+ bool YieldSimple();
/// Yields this thread and does a load rebalancing.
- void YieldAndBalanceLoad();
+ bool YieldAndBalanceLoad();
/// Yields this thread and if the core is left idle, loads are rebalanced
- void YieldAndWaitForLoadBalancing();
+ bool YieldAndWaitForLoadBalancing();
ThreadSchedStatus GetSchedulingStatus() const {
return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask);