From b164d8ee536dba526f9da2083433d529daf7b37b Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 29 Mar 2019 17:01:17 -0400 Subject: Implement a new Core Scheduler --- src/core/hle/kernel/scheduler.h | 220 +++++++++++++++++++++------------------- 1 file changed, 117 insertions(+), 103 deletions(-) (limited to 'src/core/hle/kernel/scheduler.h') diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index b29bf7be8..50fa7376b 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -20,124 +20,141 @@ namespace Kernel { class Process; -class Scheduler final { +class GlobalScheduler final { public: - explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core); - ~Scheduler(); - - /// Returns whether there are any threads that are ready to run. - bool HaveReadyThreads() const; - - /// Reschedules to the next available thread (call after current thread is suspended) - void Reschedule(); - - /// Gets the current running thread - Thread* GetCurrentThread() const; - - /// Gets the timestamp for the last context switch in ticks. - u64 GetLastContextSwitchTicks() const; + static constexpr u32 NUM_CPU_CORES = 4; + GlobalScheduler() { + reselection_pending = false; + } + ~GlobalScheduler(); /// Adds a new thread to the scheduler void AddThread(SharedPtr thread); /// Removes a thread from the scheduler void RemoveThread(Thread* thread); - /// Schedules a thread that has become "ready" - void ScheduleThread(Thread* thread, u32 priority); + /// Returns a list of all threads managed by the scheduler + const std::vector>& GetThreadList() const { + return thread_list; + } - /// Unschedules a thread that was already scheduled - void UnscheduleThread(Thread* thread, u32 priority); + void Suggest(u32 priority, u32 core, Thread* thread) { + suggested_queue[core].add(thread, priority); + } - /// Sets the priority of a thread in the scheduler - void SetThreadPriority(Thread* thread, u32 priority); + void Unsuggest(u32 priority, u32 core, Thread* thread) { + suggested_queue[core].remove(thread, priority); + } - /// Gets the next suggested thread for load balancing - Thread* GetNextSuggestedThread(u32 core, u32 minimum_priority) const; + void Schedule(u32 priority, u32 core, Thread* thread) { + ASSERT_MSG(thread->GetProcessorID() == core, + "Thread must be assigned to this core."); + scheduled_queue[core].add(thread, priority); + } - /** - * YieldWithoutLoadBalancing -- analogous to normal yield on a system - * Moves the thread to the end of the ready queue for its priority, and then reschedules the - * system to the new head of the queue. - * - * Example (Single Core -- but can be extrapolated to multi): - * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC (->exec order->) - * Currently Running: ThreadR - * - * ThreadR calls YieldWithoutLoadBalancing - * - * ThreadR is moved to the end of ready_queue[prio=0]: - * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC, ThreadR (->exec order->) - * Currently Running: Nothing - * - * System is rescheduled (ThreadA is popped off of queue): - * ready_queue[prio=0]: ThreadB, ThreadC, ThreadR (->exec order->) - * Currently Running: ThreadA - * - * If the queue is empty at time of call, no yielding occurs. This does not cross between cores - * or priorities at all. - */ - void YieldWithoutLoadBalancing(Thread* thread); + void SchedulePrepend(u32 priority, u32 core, Thread* thread) { + ASSERT_MSG(thread->GetProcessorID() == core, + "Thread must be assigned to this core."); + scheduled_queue[core].add(thread, priority, false); + } - /** - * YieldWithLoadBalancing -- yield but with better selection of the new running thread - * Moves the current thread to the end of the ready queue for its priority, then selects a - * 'suggested thread' (a thread on a different core that could run on this core) from the - * scheduler, changes its core, and reschedules the current core to that thread. - * - * Example (Dual Core -- can be extrapolated to Quad Core, this is just normal yield if it were - * single core): - * ready_queue[core=0][prio=0]: ThreadA, ThreadB (affinities not pictured as irrelevant - * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only] - * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1 - * - * ThreadQ calls YieldWithLoadBalancing - * - * ThreadQ is moved to the end of ready_queue[core=0][prio=0]: - * ready_queue[core=0][prio=0]: ThreadA, ThreadB - * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only] - * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1 - * - * A list of suggested threads for each core is compiled - * Suggested Threads: {ThreadC on Core 1} - * If this were quad core (as the switch is), there could be between 0 and 3 threads in this - * list. If there are more than one, the thread is selected by highest prio. - * - * ThreadC is core changed to Core 0: - * ready_queue[core=0][prio=0]: ThreadC, ThreadA, ThreadB, ThreadQ - * ready_queue[core=1][prio=0]: ThreadD - * Currently Running: None on Core 0 || ThreadP on Core 1 - * - * System is rescheduled (ThreadC is popped off of queue): - * ready_queue[core=0][prio=0]: ThreadA, ThreadB, ThreadQ - * ready_queue[core=1][prio=0]: ThreadD - * Currently Running: ThreadC on Core 0 || ThreadP on Core 1 - * - * If no suggested threads can be found this will behave just as normal yield. If there are - * multiple candidates for the suggested thread on a core, the highest prio is taken. - */ - void YieldWithLoadBalancing(Thread* thread); + void Reschedule(u32 priority, u32 core, Thread* thread) { + scheduled_queue[core].remove(thread, priority); + scheduled_queue[core].add(thread, priority); + } - /// Currently unknown -- asserts as unimplemented on call - void YieldAndWaitForLoadBalancing(Thread* thread); + void Unschedule(u32 priority, u32 core, Thread* thread) { + scheduled_queue[core].remove(thread, priority); + } - /// Returns a list of all threads managed by the scheduler - const std::vector>& GetThreadList() const { - return thread_list; + void TransferToCore(u32 priority, s32 destination_core, Thread* thread) { + bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; + s32 source_core = thread->GetProcessorID(); + if (source_core == destination_core || !schedulable) + return; + thread->SetProcessorID(destination_core); + if (source_core >= 0) + Unschedule(priority, source_core, thread); + if (destination_core >= 0) { + Unsuggest(priority, destination_core, thread); + Schedule(priority, destination_core, thread); + } + if (source_core >= 0) + Suggest(priority, source_core, thread); + } + + void UnloadThread(s32 core); + + void SelectThreads(); + void SelectThread(u32 core); + + bool HaveReadyThreads(u32 core_id) { + return !scheduled_queue[core_id].empty(); + } + + void YieldThread(Thread* thread); + void YieldThreadAndBalanceLoad(Thread* thread); + void YieldThreadAndWaitForLoadBalancing(Thread* thread); + + u32 CpuCoresCount() const { + return NUM_CPU_CORES; + } + + void SetReselectionPending() { + reselection_pending.store(true, std::memory_order_release); + } + + bool IsReselectionPending() { + return reselection_pending.load(std::memory_order_acquire); } private: - /** - * Pops and returns the next thread from the thread queue - * @return A pointer to the next ready thread - */ - Thread* PopNextReadyThread(); + void AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner); + + static constexpr u32 min_regular_priority = 2; + std::array, NUM_CPU_CORES> scheduled_queue; + std::array, NUM_CPU_CORES> suggested_queue; + std::atomic reselection_pending; + + /// Lists all thread ids that aren't deleted/etc. + std::vector> thread_list; +}; + +class Scheduler final { +public: + explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, const u32 id); + ~Scheduler(); + + /// Returns whether there are any threads that are ready to run. + bool HaveReadyThreads() const; + + /// Reschedules to the next available thread (call after current thread is suspended) + void TryDoContextSwitch(); + + void UnloadThread(); + + void SelectThreads(); + + /// Gets the current running thread + Thread* GetCurrentThread() const; + + Thread* GetSelectedThread() const; + + /// Gets the timestamp for the last context switch in ticks. + u64 GetLastContextSwitchTicks() const; + + bool ContextSwitchPending() const { + return context_switch_pending; + } +private: + friend class GlobalScheduler; /** * Switches the CPU's active thread context to that of the specified thread * @param new_thread The thread to switch to */ - void SwitchContext(Thread* new_thread); + void SwitchContext(); /** * Called on every context switch to update the internal timestamp @@ -152,19 +169,16 @@ private: */ void UpdateLastContextSwitchTime(Thread* thread, Process* process); - /// Lists all thread ids that aren't deleted/etc. - std::vector> thread_list; - - /// Lists only ready thread ids. - Common::MultiLevelQueue ready_queue; - SharedPtr current_thread = nullptr; + SharedPtr selected_thread = nullptr; + Core::System& system; Core::ARM_Interface& cpu_core; u64 last_context_switch_time = 0; + u64 idle_selection_count = 0; + const u32 id; - Core::System& system; - static std::mutex scheduler_mutex; + bool context_switch_pending = false; }; } // namespace Kernel -- cgit v1.2.3