summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/core/core_timing.cpp11
-rw-r--r--src/core/core_timing.h2
-rw-r--r--src/core/hle/kernel/scheduler.cpp26
-rw-r--r--src/core/hle/kernel/scheduler.h2
-rw-r--r--src/core/hle/kernel/svc.cpp1
5 files changed, 23 insertions, 19 deletions
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 5a7abcfca..c91ae9975 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -154,7 +154,7 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
basic_lock.unlock();
}
-std::optional<u64> CoreTiming::Advance() {
+std::optional<s64> CoreTiming::Advance() {
advance_lock.lock();
basic_lock.lock();
global_timer = GetGlobalTimeNs().count();
@@ -170,10 +170,11 @@ std::optional<u64> CoreTiming::Advance() {
}
basic_lock.lock();
+ global_timer = GetGlobalTimeNs().count();
}
if (!event_queue.empty()) {
- const u64 next_time = event_queue.front().time - global_timer;
+ const s64 next_time = event_queue.front().time - global_timer;
basic_lock.unlock();
advance_lock.unlock();
return next_time;
@@ -191,8 +192,10 @@ void CoreTiming::ThreadLoop() {
paused_set = false;
const auto next_time = Advance();
if (next_time) {
- std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
- event.WaitFor(next_time_ns);
+ if (*next_time > 0) {
+ std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
+ event.WaitFor(next_time_ns);
+ }
} else {
wait_set = true;
event.Wait();
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index c70b605c8..032eb08aa 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -110,7 +110,7 @@ public:
std::chrono::nanoseconds GetGlobalTimeNs() const;
/// Checks for events manually and returns time in nanoseconds for next event, threadsafe.
- std::optional<u64> Advance();
+ std::optional<s64> Advance();
private:
struct Event;
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index d67d3c5cd..da77967dd 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -47,13 +47,13 @@ u32 GlobalScheduler::SelectThreads() {
ASSERT(is_locked);
const auto update_thread = [](Thread* thread, Scheduler& sched) {
sched.guard.lock();
- if (thread != sched.selected_thread.get()) {
+ if (thread != sched.selected_thread_set.get()) {
if (thread == nullptr) {
++sched.idle_selection_count;
}
- sched.selected_thread = SharedFrom(thread);
+ sched.selected_thread_set = SharedFrom(thread);
}
- const bool reschedule_pending = sched.selected_thread != sched.current_thread;
+ const bool reschedule_pending = sched.selected_thread_set != sched.current_thread;
sched.is_context_switch_pending = reschedule_pending;
std::atomic_thread_fence(std::memory_order_seq_cst);
sched.guard.unlock();
@@ -118,6 +118,8 @@ u32 GlobalScheduler::SelectThreads() {
suggested);
top_threads[candidate_core] = next;
break;
+ } else {
+ suggested = nullptr;
}
}
}
@@ -590,7 +592,7 @@ void Scheduler::OnThreadStart() {
}
void Scheduler::SwitchContextStep2() {
- Thread* previous_thread = current_thread.get();
+ Thread* previous_thread = current_thread_prev.get();
Thread* new_thread = selected_thread.get();
// Load context of new thread
@@ -606,8 +608,6 @@ void Scheduler::SwitchContextStep2() {
"Thread must be ready to become running.");
// Cancel any outstanding wakeup events for this thread
- current_thread = SharedFrom(new_thread);
- new_thread->SetStatus(ThreadStatus::Running);
new_thread->SetIsRunning(true);
auto* const thread_owner_process = current_thread->GetOwnerProcess();
@@ -622,21 +622,21 @@ void Scheduler::SwitchContextStep2() {
cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
}
- } else {
- current_thread = nullptr;
- // Note: We do not reset the current process and current page table when idling because
- // technically we haven't changed processes, our threads are just paused.
}
- guard.unlock();
+
+ TryDoContextSwitch();
}
void Scheduler::SwitchContext() {
- Thread* previous_thread = current_thread.get();
+ current_thread_prev = current_thread;
+ selected_thread = selected_thread_set;
+ Thread* previous_thread = current_thread_prev.get();
Thread* new_thread = selected_thread.get();
+ current_thread = selected_thread;
is_context_switch_pending = false;
+ guard.unlock();
if (new_thread == previous_thread) {
- guard.unlock();
return;
}
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index f26a554f5..f73ca777e 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -249,6 +249,8 @@ private:
std::shared_ptr<Thread> current_thread = nullptr;
std::shared_ptr<Thread> selected_thread = nullptr;
+ std::shared_ptr<Thread> current_thread_prev = nullptr;
+ std::shared_ptr<Thread> selected_thread_set = nullptr;
std::shared_ptr<Thread> idle_thread = nullptr;
Core::System& system;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 9f46a1758..5e9dd43bf 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -316,7 +316,6 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
/// Makes a blocking IPC call to an OS service.
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
- std::lock_guard lock{HLE::g_hle_lock};
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
if (!session) {