diff options
author | Fernando Sahmkow <fsahmkow27@gmail.com> | 2020-02-12 00:56:24 +0100 |
---|---|---|
committer | FernandoS27 <fsahmkow27@gmail.com> | 2020-02-12 01:19:11 +0100 |
commit | 1e6f8aba04b7be0f90b97aed2527558c755935d6 (patch) | |
tree | dea6dc5efd324eb6bc8667a114e20c6e91a28195 /src/core/hle/kernel | |
parent | Kernel: Refactor synchronization to better match RE (diff) | |
download | yuzu-1e6f8aba04b7be0f90b97aed2527558c755935d6.tar yuzu-1e6f8aba04b7be0f90b97aed2527558c755935d6.tar.gz yuzu-1e6f8aba04b7be0f90b97aed2527558c755935d6.tar.bz2 yuzu-1e6f8aba04b7be0f90b97aed2527558c755935d6.tar.lz yuzu-1e6f8aba04b7be0f90b97aed2527558c755935d6.tar.xz yuzu-1e6f8aba04b7be0f90b97aed2527558c755935d6.tar.zst yuzu-1e6f8aba04b7be0f90b97aed2527558c755935d6.zip |
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 12 | ||||
-rw-r--r-- | src/core/hle/kernel/scheduler.h | 13 | ||||
-rw-r--r-- | src/core/hle/kernel/thread.cpp | 15 |
3 files changed, 21 insertions, 19 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index eb196a690..b5ffa5418 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -124,8 +124,8 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { "Thread yielding without being in front"); scheduled_queue[core_id].yield(priority); - std::array<Thread*, NUM_CPU_CORES> current_threads; - for (u32 i = 0; i < NUM_CPU_CORES; i++) { + std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; + for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); } @@ -177,8 +177,8 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread // function... if (scheduled_queue[core_id].empty()) { // Here, "current_threads" is calculated after the ""yield"", unlike yield -1 - std::array<Thread*, NUM_CPU_CORES> current_threads; - for (u32 i = 0; i < NUM_CPU_CORES; i++) { + std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; + for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); } for (auto& thread : suggested_queue[core_id]) { @@ -208,7 +208,7 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread } void GlobalScheduler::PreemptThreads() { - for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) { + for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { const u32 priority = preemption_priorities[core_id]; if (scheduled_queue[core_id].size(priority) > 0) { @@ -349,7 +349,7 @@ bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, } void GlobalScheduler::Shutdown() { - for (std::size_t core = 0; core < NUM_CPU_CORES; core++) { + for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { scheduled_queue[core].clear(); suggested_queue[core].clear(); } diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 14b77960a..96db049cb 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -10,6 +10,7 @@ #include "common/common_types.h" #include "common/multi_level_queue.h" +#include "core/hardware_properties.h" #include "core/hle/kernel/thread.h" namespace Core { @@ -23,8 +24,6 @@ class Process; class GlobalScheduler final { public: - static constexpr u32 NUM_CPU_CORES = 4; - explicit GlobalScheduler(Core::System& system); ~GlobalScheduler(); @@ -125,7 +124,7 @@ public: void PreemptThreads(); u32 CpuCoresCount() const { - return NUM_CPU_CORES; + return Core::Hardware::NUM_CPU_CORES; } void SetReselectionPending() { @@ -149,13 +148,15 @@ private: bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner); static constexpr u32 min_regular_priority = 2; - std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue; - std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue; + std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES> + scheduled_queue; + std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES> + suggested_queue; std::atomic<bool> is_reselection_pending{false}; // The priority levels at which the global scheduler preempts threads every 10 ms. They are // ordered from Core 0 to Core 3. - std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; + std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; /// Lists all thread ids that aren't deleted/etc. std::vector<std::shared_ptr<Thread>> thread_list; diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index ee9ea7d67..43b30dd3d 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -15,6 +15,7 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/core_timing_util.h" +#include "core/hardware_properties.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" @@ -431,7 +432,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { const s32 old_core = processor_id; if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { if (static_cast<s32>(ideal_core) < 0) { - processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); + processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); } else { processor_id = ideal_core; } @@ -455,7 +456,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this); } - for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { scheduler.Unsuggest(current_priority, core, this); } @@ -466,7 +467,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); } - for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { scheduler.Suggest(current_priority, core, this); } @@ -485,7 +486,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this); } - for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { scheduler.Unsuggest(old_priority, core, this); } @@ -502,7 +503,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { } } - for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { scheduler.Suggest(current_priority, core, this); } @@ -518,7 +519,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { return; } - for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (((old_affinity_mask >> core) & 1) != 0) { if (core == static_cast<u32>(old_core)) { scheduler.Unschedule(current_priority, core, this); @@ -528,7 +529,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { } } - for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (((affinity_mask >> core) & 1) != 0) { if (core == static_cast<u32>(processor_id)) { scheduler.Schedule(current_priority, core, this); |