summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2020-06-28 18:37:50 +0200
committerGitHub <noreply@github.com>2020-06-28 18:37:50 +0200
commitb05795d704e0c194215f815a5703db09e524b59a (patch)
treeecf4023b4ee0c91555c1d8263762fcb9dcb04a17 /src/core
parentMerge pull request #4196 from ogniK5377/nrr-nro-fixes (diff)
parentCore/Common: Address Feedback. (diff)
downloadyuzu-b05795d704e0c194215f815a5703db09e524b59a.tar
yuzu-b05795d704e0c194215f815a5703db09e524b59a.tar.gz
yuzu-b05795d704e0c194215f815a5703db09e524b59a.tar.bz2
yuzu-b05795d704e0c194215f815a5703db09e524b59a.tar.lz
yuzu-b05795d704e0c194215f815a5703db09e524b59a.tar.xz
yuzu-b05795d704e0c194215f815a5703db09e524b59a.tar.zst
yuzu-b05795d704e0c194215f815a5703db09e524b59a.zip
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CMakeLists.txt14
-rw-r--r--src/core/arm/arm_interface.cpp57
-rw-r--r--src/core/arm/arm_interface.h20
-rw-r--r--src/core/arm/cpu_interrupt_handler.cpp29
-rw-r--r--src/core/arm/cpu_interrupt_handler.h39
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp82
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.h7
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp106
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h26
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_cp15.cpp2
-rw-r--r--src/core/arm/dynarmic/arm_exclusive_monitor.cpp76
-rw-r--r--src/core/arm/dynarmic/arm_exclusive_monitor.h48
-rw-r--r--src/core/arm/exclusive_monitor.cpp2
-rw-r--r--src/core/arm/exclusive_monitor.h6
-rw-r--r--src/core/arm/unicorn/arm_unicorn.cpp19
-rw-r--r--src/core/arm/unicorn/arm_unicorn.h5
-rw-r--r--src/core/core.cpp128
-rw-r--r--src/core/core.h48
-rw-r--r--src/core/core_manager.cpp67
-rw-r--r--src/core/core_manager.h63
-rw-r--r--src/core/core_timing.cpp256
-rw-r--r--src/core/core_timing.h123
-rw-r--r--src/core/core_timing_util.cpp29
-rw-r--r--src/core/core_timing_util.h15
-rw-r--r--src/core/cpu_manager.cpp368
-rw-r--r--src/core/cpu_manager.h80
-rw-r--r--src/core/gdbstub/gdbstub.cpp1
-rw-r--r--src/core/hardware_properties.h4
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp212
-rw-r--r--src/core/hle/kernel/address_arbiter.h3
-rw-r--r--src/core/hle/kernel/client_port.cpp2
-rw-r--r--src/core/hle/kernel/errors.h1
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp40
-rw-r--r--src/core/hle/kernel/kernel.cpp255
-rw-r--r--src/core/hle/kernel/kernel.h39
-rw-r--r--src/core/hle/kernel/mutex.cpp118
-rw-r--r--src/core/hle/kernel/mutex.h4
-rw-r--r--src/core/hle/kernel/physical_core.cpp52
-rw-r--r--src/core/hle/kernel/physical_core.h44
-rw-r--r--src/core/hle/kernel/process.cpp23
-rw-r--r--src/core/hle/kernel/readable_event.cpp3
-rw-r--r--src/core/hle/kernel/scheduler.cpp576
-rw-r--r--src/core/hle/kernel/scheduler.h123
-rw-r--r--src/core/hle/kernel/server_session.cpp16
-rw-r--r--src/core/hle/kernel/svc.cpp464
-rw-r--r--src/core/hle/kernel/svc_wrap.h137
-rw-r--r--src/core/hle/kernel/synchronization.cpp137
-rw-r--r--src/core/hle/kernel/synchronization_object.cpp64
-rw-r--r--src/core/hle/kernel/synchronization_object.h18
-rw-r--r--src/core/hle/kernel/thread.cpp424
-rw-r--r--src/core/hle/kernel/thread.h277
-rw-r--r--src/core/hle/kernel/time_manager.cpp23
-rw-r--r--src/core/hle/kernel/time_manager.h4
-rw-r--r--src/core/hle/service/hid/controllers/debug_pad.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/gesture.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/keyboard.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/mouse.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/stubbed.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/touchscreen.cpp4
-rw-r--r--src/core/hle/service/hid/controllers/xpad.cpp2
-rw-r--r--src/core/hle/service/hid/hid.cpp16
-rw-r--r--src/core/hle/service/hid/irs.cpp2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp3
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp64
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h21
-rw-r--r--src/core/hle/service/sm/sm.cpp2
-rw-r--r--src/core/hle/service/time/standard_steady_clock_core.cpp5
-rw-r--r--src/core/hle/service/time/tick_based_steady_clock_core.cpp5
-rw-r--r--src/core/hle/service/time/time.cpp5
-rw-r--r--src/core/hle/service/time/time_sharedmemory.cpp3
-rw-r--r--src/core/hle/service/vi/vi.cpp2
-rw-r--r--src/core/host_timing.cpp206
-rw-r--r--src/core/host_timing.h160
-rw-r--r--src/core/memory.cpp109
-rw-r--r--src/core/memory.h67
-rw-r--r--src/core/memory/cheat_engine.cpp8
-rw-r--r--src/core/perf_stats.cpp2
-rw-r--r--src/core/tools/freezer.cpp8
79 files changed, 3502 insertions, 1983 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index efbad628f..f87d67db5 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -7,6 +7,16 @@ endif()
add_library(core STATIC
arm/arm_interface.h
arm/arm_interface.cpp
+ arm/cpu_interrupt_handler.cpp
+ arm/cpu_interrupt_handler.h
+ arm/dynarmic/arm_dynarmic_32.cpp
+ arm/dynarmic/arm_dynarmic_32.h
+ arm/dynarmic/arm_dynarmic_64.cpp
+ arm/dynarmic/arm_dynarmic_64.h
+ arm/dynarmic/arm_dynarmic_cp15.cpp
+ arm/dynarmic/arm_dynarmic_cp15.h
+ arm/dynarmic/arm_exclusive_monitor.cpp
+ arm/dynarmic/arm_exclusive_monitor.h
arm/exclusive_monitor.cpp
arm/exclusive_monitor.h
arm/unicorn/arm_unicorn.cpp
@@ -15,8 +25,6 @@ add_library(core STATIC
constants.h
core.cpp
core.h
- core_manager.cpp
- core_manager.h
core_timing.cpp
core_timing.h
core_timing_util.cpp
@@ -547,8 +555,6 @@ add_library(core STATIC
hle/service/vi/vi_u.h
hle/service/wlan/wlan.cpp
hle/service/wlan/wlan.h
- host_timing.cpp
- host_timing.h
loader/deconstructed_rom_directory.cpp
loader/deconstructed_rom_directory.h
loader/elf.cpp
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index d079a1bc8..d2295ed90 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -139,6 +139,63 @@ std::optional<std::string> GetSymbolName(const Symbols& symbols, VAddr func_addr
constexpr u64 SEGMENT_BASE = 0x7100000000ull;
+std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktraceFromContext(
+ System& system, const ThreadContext64& ctx) {
+ std::vector<BacktraceEntry> out;
+ auto& memory = system.Memory();
+
+ auto fp = ctx.cpu_registers[29];
+ auto lr = ctx.cpu_registers[30];
+ while (true) {
+ out.push_back({"", 0, lr, 0});
+ if (!fp) {
+ break;
+ }
+ lr = memory.Read64(fp + 8) - 4;
+ fp = memory.Read64(fp);
+ }
+
+ std::map<VAddr, std::string> modules;
+ auto& loader{system.GetAppLoader()};
+ if (loader.ReadNSOModules(modules) != Loader::ResultStatus::Success) {
+ return {};
+ }
+
+ std::map<std::string, Symbols> symbols;
+ for (const auto& module : modules) {
+ symbols.insert_or_assign(module.second, GetSymbols(module.first, memory));
+ }
+
+ for (auto& entry : out) {
+ VAddr base = 0;
+ for (auto iter = modules.rbegin(); iter != modules.rend(); ++iter) {
+ const auto& module{*iter};
+ if (entry.original_address >= module.first) {
+ entry.module = module.second;
+ base = module.first;
+ break;
+ }
+ }
+
+ entry.offset = entry.original_address - base;
+ entry.address = SEGMENT_BASE + entry.offset;
+
+ if (entry.module.empty())
+ entry.module = "unknown";
+
+ const auto symbol_set = symbols.find(entry.module);
+ if (symbol_set != symbols.end()) {
+ const auto symbol = GetSymbolName(symbol_set->second, entry.offset);
+ if (symbol.has_value()) {
+ // TODO(DarkLordZach): Add demangling of symbol names.
+ entry.name = *symbol;
+ }
+ }
+ }
+
+ return out;
+}
+
std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const {
std::vector<BacktraceEntry> out;
auto& memory = system.Memory();
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index cb2e640e2..1f24051e4 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -7,6 +7,7 @@
#include <array>
#include <vector>
#include "common/common_types.h"
+#include "core/hardware_properties.h"
namespace Common {
struct PageTable;
@@ -18,25 +19,29 @@ enum class VMAPermission : u8;
namespace Core {
class System;
+class CPUInterruptHandler;
+
+using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>;
/// Generic ARMv8 CPU interface
class ARM_Interface : NonCopyable {
public:
- explicit ARM_Interface(System& system_) : system{system_} {}
+ explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers, bool uses_wall_clock)
+ : system{system_}, interrupt_handlers{interrupt_handlers}, uses_wall_clock{
+ uses_wall_clock} {}
virtual ~ARM_Interface() = default;
struct ThreadContext32 {
std::array<u32, 16> cpu_registers{};
+ std::array<u32, 64> extension_registers{};
u32 cpsr{};
- std::array<u8, 4> padding{};
- std::array<u64, 32> fprs{};
u32 fpscr{};
u32 fpexc{};
u32 tpidr{};
};
// Internally within the kernel, it expects the AArch32 version of the
// thread context to be 344 bytes in size.
- static_assert(sizeof(ThreadContext32) == 0x158);
+ static_assert(sizeof(ThreadContext32) == 0x150);
struct ThreadContext64 {
std::array<u64, 31> cpu_registers{};
@@ -143,6 +148,8 @@ public:
*/
virtual void SetTPIDR_EL0(u64 value) = 0;
+ virtual void ChangeProcessorID(std::size_t new_core_id) = 0;
+
virtual void SaveContext(ThreadContext32& ctx) = 0;
virtual void SaveContext(ThreadContext64& ctx) = 0;
virtual void LoadContext(const ThreadContext32& ctx) = 0;
@@ -162,6 +169,9 @@ public:
std::string name;
};
+ static std::vector<BacktraceEntry> GetBacktraceFromContext(System& system,
+ const ThreadContext64& ctx);
+
std::vector<BacktraceEntry> GetBacktrace() const;
/// fp (= r29) points to the last frame record.
@@ -175,6 +185,8 @@ public:
protected:
/// System context that this ARM interface is running under.
System& system;
+ CPUInterrupts& interrupt_handlers;
+ bool uses_wall_clock;
};
} // namespace Core
diff --git a/src/core/arm/cpu_interrupt_handler.cpp b/src/core/arm/cpu_interrupt_handler.cpp
new file mode 100644
index 000000000..2f1a1a269
--- /dev/null
+++ b/src/core/arm/cpu_interrupt_handler.cpp
@@ -0,0 +1,29 @@
+// Copyright 2020 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/thread.h"
+#include "core/arm/cpu_interrupt_handler.h"
+
+namespace Core {
+
+CPUInterruptHandler::CPUInterruptHandler() : is_interrupted{} {
+ interrupt_event = std::make_unique<Common::Event>();
+}
+
+CPUInterruptHandler::~CPUInterruptHandler() = default;
+
+void CPUInterruptHandler::SetInterrupt(bool is_interrupted_) {
+ if (is_interrupted_) {
+ interrupt_event->Set();
+ }
+ this->is_interrupted = is_interrupted_;
+}
+
+void CPUInterruptHandler::AwaitInterrupt() {
+ interrupt_event->Wait();
+}
+
+} // namespace Core
diff --git a/src/core/arm/cpu_interrupt_handler.h b/src/core/arm/cpu_interrupt_handler.h
new file mode 100644
index 000000000..3d062d326
--- /dev/null
+++ b/src/core/arm/cpu_interrupt_handler.h
@@ -0,0 +1,39 @@
+// Copyright 2020 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+
+namespace Common {
+class Event;
+}
+
+namespace Core {
+
+class CPUInterruptHandler {
+public:
+ CPUInterruptHandler();
+ ~CPUInterruptHandler();
+
+ CPUInterruptHandler(const CPUInterruptHandler&) = delete;
+ CPUInterruptHandler& operator=(const CPUInterruptHandler&) = delete;
+
+ CPUInterruptHandler(CPUInterruptHandler&&) = default;
+ CPUInterruptHandler& operator=(CPUInterruptHandler&&) = default;
+
+ bool IsInterrupted() const {
+ return is_interrupted;
+ }
+
+ void SetInterrupt(bool is_interrupted);
+
+ void AwaitInterrupt();
+
+private:
+ bool is_interrupted{};
+ std::unique_ptr<Common::Event> interrupt_event;
+};
+
+} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 4c8663d03..0d4ab95b7 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -7,15 +7,17 @@
#include <dynarmic/A32/a32.h>
#include <dynarmic/A32/config.h>
#include <dynarmic/A32/context.h>
-#include "common/microprofile.h"
+#include "common/logging/log.h"
+#include "common/page_table.h"
+#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/dynarmic/arm_dynarmic_32.h"
-#include "core/arm/dynarmic/arm_dynarmic_64.h"
#include "core/arm/dynarmic/arm_dynarmic_cp15.h"
+#include "core/arm/dynarmic/arm_exclusive_monitor.h"
#include "core/core.h"
-#include "core/core_manager.h"
#include "core/core_timing.h"
#include "core/hle/kernel/svc.h"
#include "core/memory.h"
+#include "core/settings.h"
namespace Core {
@@ -49,6 +51,19 @@ public:
parent.system.Memory().Write64(vaddr, value);
}
+ bool MemoryWriteExclusive8(u32 vaddr, u8 value, u8 expected) override {
+ return parent.system.Memory().WriteExclusive8(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive16(u32 vaddr, u16 value, u16 expected) override {
+ return parent.system.Memory().WriteExclusive16(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive32(u32 vaddr, u32 value, u32 expected) override {
+ return parent.system.Memory().WriteExclusive32(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive64(u32 vaddr, u64 value, u64 expected) override {
+ return parent.system.Memory().WriteExclusive64(vaddr, value, expected);
+ }
+
void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc,
MemoryReadCode(pc));
@@ -72,24 +87,36 @@ public:
}
void AddTicks(u64 ticks) override {
+ if (parent.uses_wall_clock) {
+ return;
+ }
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
// rough approximation of the amount of executed ticks in the system, it may be thrown off
// if not all cores are doing a similar amount of work. Instead of doing this, we should
// device a way so that timing is consistent across all cores without increasing the ticks 4
// times.
- u64 amortized_ticks = (ticks - num_interpreted_instructions) / Core::NUM_CPU_CORES;
+ u64 amortized_ticks =
+ (ticks - num_interpreted_instructions) / Core::Hardware::NUM_CPU_CORES;
// Always execute at least one tick.
amortized_ticks = std::max<u64>(amortized_ticks, 1);
parent.system.CoreTiming().AddTicks(amortized_ticks);
num_interpreted_instructions = 0;
}
+
u64 GetTicksRemaining() override {
- return std::max(parent.system.CoreTiming().GetDowncount(), {});
+ if (parent.uses_wall_clock) {
+ if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
+ return minimum_run_cycles;
+ }
+ return 0U;
+ }
+ return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
}
ARM_Dynarmic_32& parent;
std::size_t num_interpreted_instructions{};
+ static constexpr u64 minimum_run_cycles = 1000U;
};
std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table,
@@ -100,13 +127,31 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
// config.page_table = &page_table.pointers;
config.coprocessors[15] = cp15;
config.define_unpredictable_behaviour = true;
+ static constexpr std::size_t PAGE_BITS = 12;
+ static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS);
+ config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
+ page_table.pointers.data());
+ config.absolute_offset_page_table = true;
+ config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
+ config.only_detect_misalignment_via_page_table_on_page_boundary = true;
+
+ // Multi-process state
+ config.processor_id = core_index;
+ config.global_monitor = &exclusive_monitor.monitor;
+
+ // Timing
+ config.wall_clock_cntpct = uses_wall_clock;
+
+ // Optimizations
+ if (Settings::values.disable_cpu_opt) {
+ config.enable_optimizations = false;
+ config.enable_fast_dispatch = false;
+ }
+
return std::make_unique<Dynarmic::A32::Jit>(config);
}
-MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_32, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64));
-
void ARM_Dynarmic_32::Run() {
- MICROPROFILE_SCOPE(ARM_Jit_Dynarmic_32);
jit->Run();
}
@@ -114,9 +159,11 @@ void ARM_Dynarmic_32::Step() {
jit->Step();
}
-ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, ExclusiveMonitor& exclusive_monitor,
+ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, CPUInterrupts& interrupt_handlers,
+ bool uses_wall_clock, ExclusiveMonitor& exclusive_monitor,
std::size_t core_index)
- : ARM_Interface{system}, cb(std::make_unique<DynarmicCallbacks32>(*this)),
+ : ARM_Interface{system, interrupt_handlers, uses_wall_clock},
+ cb(std::make_unique<DynarmicCallbacks32>(*this)),
cp15(std::make_shared<DynarmicCP15>(*this)), core_index{core_index},
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
@@ -168,17 +215,25 @@ void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) {
cp15->uprw = static_cast<u32>(value);
}
+void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) {
+ jit->ChangeProcessorID(new_core_id);
+}
+
void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
Dynarmic::A32::Context context;
jit->SaveContext(context);
ctx.cpu_registers = context.Regs();
+ ctx.extension_registers = context.ExtRegs();
ctx.cpsr = context.Cpsr();
+ ctx.fpscr = context.Fpscr();
}
void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
Dynarmic::A32::Context context;
context.Regs() = ctx.cpu_registers;
+ context.ExtRegs() = ctx.extension_registers;
context.SetCpsr(ctx.cpsr);
+ context.SetFpscr(ctx.fpscr);
jit->LoadContext(context);
}
@@ -187,10 +242,15 @@ void ARM_Dynarmic_32::PrepareReschedule() {
}
void ARM_Dynarmic_32::ClearInstructionCache() {
+ if (!jit) {
+ return;
+ }
jit->ClearCache();
}
-void ARM_Dynarmic_32::ClearExclusiveState() {}
+void ARM_Dynarmic_32::ClearExclusiveState() {
+ jit->ClearExclusiveState();
+}
void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table,
std::size_t new_address_space_size_in_bits) {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h
index e5b92d7bb..2bab31b92 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.h
@@ -9,7 +9,7 @@
#include <dynarmic/A32/a32.h>
#include <dynarmic/A64/a64.h>
-#include <dynarmic/A64/exclusive_monitor.h>
+#include <dynarmic/exclusive_monitor.h>
#include "common/common_types.h"
#include "common/hash.h"
#include "core/arm/arm_interface.h"
@@ -21,6 +21,7 @@ class Memory;
namespace Core {
+class CPUInterruptHandler;
class DynarmicCallbacks32;
class DynarmicCP15;
class DynarmicExclusiveMonitor;
@@ -28,7 +29,8 @@ class System;
class ARM_Dynarmic_32 final : public ARM_Interface {
public:
- ARM_Dynarmic_32(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
+ ARM_Dynarmic_32(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
+ ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
~ARM_Dynarmic_32() override;
void SetPC(u64 pc) override;
@@ -45,6 +47,7 @@ public:
void SetTlsAddress(VAddr address) override;
void SetTPIDR_EL0(u64 value) override;
u64 GetTPIDR_EL0() const override;
+ void ChangeProcessorID(std::size_t new_core_id) override;
void SaveContext(ThreadContext32& ctx) override;
void SaveContext(ThreadContext64& ctx) override {}
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 5f5e36d94..790981034 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -7,11 +7,11 @@
#include <dynarmic/A64/a64.h>
#include <dynarmic/A64/config.h>
#include "common/logging/log.h"
-#include "common/microprofile.h"
#include "common/page_table.h"
+#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
+#include "core/arm/dynarmic/arm_exclusive_monitor.h"
#include "core/core.h"
-#include "core/core_manager.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/gdbstub/gdbstub.h"
@@ -65,6 +65,22 @@ public:
memory.Write64(vaddr + 8, value[1]);
}
+ bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) override {
+ return parent.system.Memory().WriteExclusive8(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) override {
+ return parent.system.Memory().WriteExclusive16(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) override {
+ return parent.system.Memory().WriteExclusive32(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) override {
+ return parent.system.Memory().WriteExclusive64(vaddr, value, expected);
+ }
+ bool MemoryWriteExclusive128(u64 vaddr, Vector value, Vector expected) override {
+ return parent.system.Memory().WriteExclusive128(vaddr, value, expected);
+ }
+
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
LOG_INFO(Core_ARM, "Unicorn fallback @ 0x{:X} for {} instructions (instr = {:08X})", pc,
num_instructions, MemoryReadCode(pc));
@@ -108,29 +124,42 @@ public:
}
void AddTicks(u64 ticks) override {
+ if (parent.uses_wall_clock) {
+ return;
+ }
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
// rough approximation of the amount of executed ticks in the system, it may be thrown off
// if not all cores are doing a similar amount of work. Instead of doing this, we should
// device a way so that timing is consistent across all cores without increasing the ticks 4
// times.
- u64 amortized_ticks = (ticks - num_interpreted_instructions) / Core::NUM_CPU_CORES;
+ u64 amortized_ticks =
+ (ticks - num_interpreted_instructions) / Core::Hardware::NUM_CPU_CORES;
// Always execute at least one tick.
amortized_ticks = std::max<u64>(amortized_ticks, 1);
parent.system.CoreTiming().AddTicks(amortized_ticks);
num_interpreted_instructions = 0;
}
+
u64 GetTicksRemaining() override {
- return std::max(parent.system.CoreTiming().GetDowncount(), s64{0});
+ if (parent.uses_wall_clock) {
+ if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
+ return minimum_run_cycles;
+ }
+ return 0U;
+ }
+ return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
}
+
u64 GetCNTPCT() override {
- return Timing::CpuCyclesToClockCycles(parent.system.CoreTiming().GetTicks());
+ return parent.system.CoreTiming().GetClockTicks();
}
ARM_Dynarmic_64& parent;
std::size_t num_interpreted_instructions = 0;
u64 tpidrro_el0 = 0;
u64 tpidr_el0 = 0;
+ static constexpr u64 minimum_run_cycles = 1000U;
};
std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table,
@@ -168,14 +197,13 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
config.enable_fast_dispatch = false;
}
+ // Timing
+ config.wall_clock_cntpct = uses_wall_clock;
+
return std::make_shared<Dynarmic::A64::Jit>(config);
}
-MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_64, "ARM JIT", "Dynarmic", MP_RGB(255, 64, 64));
-
void ARM_Dynarmic_64::Run() {
- MICROPROFILE_SCOPE(ARM_Jit_Dynarmic_64);
-
jit->Run();
}
@@ -183,11 +211,16 @@ void ARM_Dynarmic_64::Step() {
cb->InterpreterFallback(jit->GetPC(), 1);
}
-ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor,
+ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, CPUInterrupts& interrupt_handlers,
+ bool uses_wall_clock, ExclusiveMonitor& exclusive_monitor,
std::size_t core_index)
- : ARM_Interface{system}, cb(std::make_unique<DynarmicCallbacks64>(*this)),
- inner_unicorn{system, ARM_Unicorn::Arch::AArch64}, core_index{core_index},
- exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
+ : ARM_Interface{system, interrupt_handlers, uses_wall_clock},
+ cb(std::make_unique<DynarmicCallbacks64>(*this)), inner_unicorn{system, interrupt_handlers,
+ uses_wall_clock,
+ ARM_Unicorn::Arch::AArch64,
+ core_index},
+ core_index{core_index}, exclusive_monitor{
+ dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
@@ -239,6 +272,10 @@ void ARM_Dynarmic_64::SetTPIDR_EL0(u64 value) {
cb->tpidr_el0 = value;
}
+void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) {
+ jit->ChangeProcessorID(new_core_id);
+}
+
void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
ctx.cpu_registers = jit->GetRegisters();
ctx.sp = jit->GetSP();
@@ -266,6 +303,9 @@ void ARM_Dynarmic_64::PrepareReschedule() {
}
void ARM_Dynarmic_64::ClearInstructionCache() {
+ if (!jit) {
+ return;
+ }
jit->ClearCache();
}
@@ -285,44 +325,4 @@ void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table,
jit_cache.emplace(key, jit);
}
-DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count)
- : monitor(core_count), memory{memory} {}
-
-DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
-
-void DynarmicExclusiveMonitor::SetExclusive(std::size_t core_index, VAddr addr) {
- // Size doesn't actually matter.
- monitor.Mark(core_index, addr, 16);
-}
-
-void DynarmicExclusiveMonitor::ClearExclusive() {
- monitor.Clear();
-}
-
-bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
- return monitor.DoExclusiveOperation(core_index, vaddr, 1, [&] { memory.Write8(vaddr, value); });
-}
-
-bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) {
- return monitor.DoExclusiveOperation(core_index, vaddr, 2,
- [&] { memory.Write16(vaddr, value); });
-}
-
-bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) {
- return monitor.DoExclusiveOperation(core_index, vaddr, 4,
- [&] { memory.Write32(vaddr, value); });
-}
-
-bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) {
- return monitor.DoExclusiveOperation(core_index, vaddr, 8,
- [&] { memory.Write64(vaddr, value); });
-}
-
-bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) {
- return monitor.DoExclusiveOperation(core_index, vaddr, 16, [&] {
- memory.Write64(vaddr + 0, value[0]);
- memory.Write64(vaddr + 8, value[1]);
- });
-}
-
} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index 647cecaf0..403c55961 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -8,7 +8,6 @@
#include <unordered_map>
#include <dynarmic/A64/a64.h>
-#include <dynarmic/A64/exclusive_monitor.h>
#include "common/common_types.h"
#include "common/hash.h"
#include "core/arm/arm_interface.h"
@@ -22,12 +21,14 @@ class Memory;
namespace Core {
class DynarmicCallbacks64;
+class CPUInterruptHandler;
class DynarmicExclusiveMonitor;
class System;
class ARM_Dynarmic_64 final : public ARM_Interface {
public:
- ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
+ ARM_Dynarmic_64(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
+ ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
~ARM_Dynarmic_64() override;
void SetPC(u64 pc) override;
@@ -44,6 +45,7 @@ public:
void SetTlsAddress(VAddr address) override;
void SetTPIDR_EL0(u64 value) override;
u64 GetTPIDR_EL0() const override;
+ void ChangeProcessorID(std::size_t new_core_id) override;
void SaveContext(ThreadContext32& ctx) override {}
void SaveContext(ThreadContext64& ctx) override;
@@ -75,24 +77,4 @@ private:
DynarmicExclusiveMonitor& exclusive_monitor;
};
-class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
-public:
- explicit DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count);
- ~DynarmicExclusiveMonitor() override;
-
- void SetExclusive(std::size_t core_index, VAddr addr) override;
- void ClearExclusive() override;
-
- bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override;
- bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) override;
- bool ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) override;
- bool ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) override;
- bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) override;
-
-private:
- friend class ARM_Dynarmic_64;
- Dynarmic::A64::ExclusiveMonitor monitor;
- Core::Memory::Memory& memory;
-};
-
} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp b/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp
index d43e4dd70..54556e0f9 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp
@@ -97,7 +97,7 @@ CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc
const auto callback = static_cast<u64 (*)(Dynarmic::A32::Jit*, void*, u32, u32)>(
[](Dynarmic::A32::Jit*, void* arg, u32, u32) -> u64 {
ARM_Dynarmic_32& parent = *(ARM_Dynarmic_32*)arg;
- return Timing::CpuCyclesToClockCycles(parent.system.CoreTiming().GetTicks());
+ return parent.system.CoreTiming().GetClockTicks();
});
return Dynarmic::A32::Coprocessor::Callback{callback, (void*)&parent};
}
diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.cpp b/src/core/arm/dynarmic/arm_exclusive_monitor.cpp
new file mode 100644
index 000000000..4e209f6a5
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_exclusive_monitor.cpp
@@ -0,0 +1,76 @@
+// Copyright 2018 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <cinttypes>
+#include <memory>
+#include "core/arm/dynarmic/arm_exclusive_monitor.h"
+#include "core/memory.h"
+
+namespace Core {
+
+DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count)
+ : monitor(core_count), memory{memory} {}
+
+DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
+
+u8 DynarmicExclusiveMonitor::ExclusiveRead8(std::size_t core_index, VAddr addr) {
+ return monitor.ReadAndMark<u8>(core_index, addr, [&]() -> u8 { return memory.Read8(addr); });
+}
+
+u16 DynarmicExclusiveMonitor::ExclusiveRead16(std::size_t core_index, VAddr addr) {
+ return monitor.ReadAndMark<u16>(core_index, addr, [&]() -> u16 { return memory.Read16(addr); });
+}
+
+u32 DynarmicExclusiveMonitor::ExclusiveRead32(std::size_t core_index, VAddr addr) {
+ return monitor.ReadAndMark<u32>(core_index, addr, [&]() -> u32 { return memory.Read32(addr); });
+}
+
+u64 DynarmicExclusiveMonitor::ExclusiveRead64(std::size_t core_index, VAddr addr) {
+ return monitor.ReadAndMark<u64>(core_index, addr, [&]() -> u64 { return memory.Read64(addr); });
+}
+
+u128 DynarmicExclusiveMonitor::ExclusiveRead128(std::size_t core_index, VAddr addr) {
+ return monitor.ReadAndMark<u128>(core_index, addr, [&]() -> u128 {
+ u128 result;
+ result[0] = memory.Read64(addr);
+ result[1] = memory.Read64(addr + 8);
+ return result;
+ });
+}
+
+void DynarmicExclusiveMonitor::ClearExclusive() {
+ monitor.Clear();
+}
+
+bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
+ return monitor.DoExclusiveOperation<u8>(core_index, vaddr, [&](u8 expected) -> bool {
+ return memory.WriteExclusive8(vaddr, value, expected);
+ });
+}
+
+bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) {
+ return monitor.DoExclusiveOperation<u16>(core_index, vaddr, [&](u16 expected) -> bool {
+ return memory.WriteExclusive16(vaddr, value, expected);
+ });
+}
+
+bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) {
+ return monitor.DoExclusiveOperation<u32>(core_index, vaddr, [&](u32 expected) -> bool {
+ return memory.WriteExclusive32(vaddr, value, expected);
+ });
+}
+
+bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) {
+ return monitor.DoExclusiveOperation<u64>(core_index, vaddr, [&](u64 expected) -> bool {
+ return memory.WriteExclusive64(vaddr, value, expected);
+ });
+}
+
+bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) {
+ return monitor.DoExclusiveOperation<u128>(core_index, vaddr, [&](u128 expected) -> bool {
+ return memory.WriteExclusive128(vaddr, value, expected);
+ });
+}
+
+} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.h b/src/core/arm/dynarmic/arm_exclusive_monitor.h
new file mode 100644
index 000000000..964f4a55d
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_exclusive_monitor.h
@@ -0,0 +1,48 @@
+// Copyright 2020 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <unordered_map>
+
+#include <dynarmic/exclusive_monitor.h>
+
+#include "common/common_types.h"
+#include "core/arm/dynarmic/arm_dynarmic_32.h"
+#include "core/arm/dynarmic/arm_dynarmic_64.h"
+#include "core/arm/exclusive_monitor.h"
+
+namespace Core::Memory {
+class Memory;
+}
+
+namespace Core {
+
+class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
+public:
+ explicit DynarmicExclusiveMonitor(Memory::Memory& memory, std::size_t core_count);
+ ~DynarmicExclusiveMonitor() override;
+
+ u8 ExclusiveRead8(std::size_t core_index, VAddr addr) override;
+ u16 ExclusiveRead16(std::size_t core_index, VAddr addr) override;
+ u32 ExclusiveRead32(std::size_t core_index, VAddr addr) override;
+ u64 ExclusiveRead64(std::size_t core_index, VAddr addr) override;
+ u128 ExclusiveRead128(std::size_t core_index, VAddr addr) override;
+ void ClearExclusive() override;
+
+ bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override;
+ bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) override;
+ bool ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) override;
+ bool ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) override;
+ bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) override;
+
+private:
+ friend class ARM_Dynarmic_32;
+ friend class ARM_Dynarmic_64;
+ Dynarmic::ExclusiveMonitor monitor;
+ Core::Memory::Memory& memory;
+};
+
+} // namespace Core
diff --git a/src/core/arm/exclusive_monitor.cpp b/src/core/arm/exclusive_monitor.cpp
index b32401e0b..d8cba369d 100644
--- a/src/core/arm/exclusive_monitor.cpp
+++ b/src/core/arm/exclusive_monitor.cpp
@@ -3,7 +3,7 @@
// Refer to the license.txt file included.
#ifdef ARCHITECTURE_x86_64
-#include "core/arm/dynarmic/arm_dynarmic_64.h"
+#include "core/arm/dynarmic/arm_exclusive_monitor.h"
#endif
#include "core/arm/exclusive_monitor.h"
#include "core/memory.h"
diff --git a/src/core/arm/exclusive_monitor.h b/src/core/arm/exclusive_monitor.h
index ccd73b80f..62f6e6023 100644
--- a/src/core/arm/exclusive_monitor.h
+++ b/src/core/arm/exclusive_monitor.h
@@ -18,7 +18,11 @@ class ExclusiveMonitor {
public:
virtual ~ExclusiveMonitor();
- virtual void SetExclusive(std::size_t core_index, VAddr addr) = 0;
+ virtual u8 ExclusiveRead8(std::size_t core_index, VAddr addr) = 0;
+ virtual u16 ExclusiveRead16(std::size_t core_index, VAddr addr) = 0;
+ virtual u32 ExclusiveRead32(std::size_t core_index, VAddr addr) = 0;
+ virtual u64 ExclusiveRead64(std::size_t core_index, VAddr addr) = 0;
+ virtual u128 ExclusiveRead128(std::size_t core_index, VAddr addr) = 0;
virtual void ClearExclusive() = 0;
virtual bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) = 0;
diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp
index e40e9626a..1df3f3ed1 100644
--- a/src/core/arm/unicorn/arm_unicorn.cpp
+++ b/src/core/arm/unicorn/arm_unicorn.cpp
@@ -6,6 +6,7 @@
#include <unicorn/arm64.h>
#include "common/assert.h"
#include "common/microprofile.h"
+#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/unicorn/arm_unicorn.h"
#include "core/core.h"
#include "core/core_timing.h"
@@ -62,7 +63,9 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si
return false;
}
-ARM_Unicorn::ARM_Unicorn(System& system, Arch architecture) : ARM_Interface{system} {
+ARM_Unicorn::ARM_Unicorn(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
+ Arch architecture, std::size_t core_index)
+ : ARM_Interface{system, interrupt_handlers, uses_wall_clock}, core_index{core_index} {
const auto arch = architecture == Arch::AArch32 ? UC_ARCH_ARM : UC_ARCH_ARM64;
CHECKED(uc_open(arch, UC_MODE_ARM, &uc));
@@ -156,12 +159,20 @@ void ARM_Unicorn::SetTPIDR_EL0(u64 value) {
CHECKED(uc_reg_write(uc, UC_ARM64_REG_TPIDR_EL0, &value));
}
+void ARM_Unicorn::ChangeProcessorID(std::size_t new_core_id) {
+ core_index = new_core_id;
+}
+
void ARM_Unicorn::Run() {
if (GDBStub::IsServerEnabled()) {
ExecuteInstructions(std::max(4000000U, 0U));
} else {
- ExecuteInstructions(
- std::max(std::size_t(system.CoreTiming().GetDowncount()), std::size_t{0}));
+ while (true) {
+ if (interrupt_handlers[core_index].IsInterrupted()) {
+ return;
+ }
+ ExecuteInstructions(10);
+ }
}
}
@@ -183,8 +194,6 @@ void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) {
UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, page_buffer.data()));
CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions));
CHECKED(uc_mem_unmap(uc, map_addr, page_buffer.size()));
-
- system.CoreTiming().AddTicks(num_instructions);
if (GDBStub::IsServerEnabled()) {
if (last_bkpt_hit && last_bkpt.type == GDBStub::BreakpointType::Execute) {
uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address);
diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h
index 725c65085..810aff311 100644
--- a/src/core/arm/unicorn/arm_unicorn.h
+++ b/src/core/arm/unicorn/arm_unicorn.h
@@ -20,7 +20,8 @@ public:
AArch64, // 64-bit ARM
};
- explicit ARM_Unicorn(System& system, Arch architecture);
+ explicit ARM_Unicorn(System& system, CPUInterrupts& interrupt_handlers, bool uses_wall_clock,
+ Arch architecture, std::size_t core_index);
~ARM_Unicorn() override;
void SetPC(u64 pc) override;
@@ -35,6 +36,7 @@ public:
void SetTlsAddress(VAddr address) override;
void SetTPIDR_EL0(u64 value) override;
u64 GetTPIDR_EL0() const override;
+ void ChangeProcessorID(std::size_t new_core_id) override;
void PrepareReschedule() override;
void ClearExclusiveState() override;
void ExecuteInstructions(std::size_t num_instructions);
@@ -55,6 +57,7 @@ private:
uc_engine* uc{};
GDBStub::BreakpointAddress last_bkpt{};
bool last_bkpt_hit = false;
+ std::size_t core_index;
};
} // namespace Core
diff --git a/src/core/core.cpp b/src/core/core.cpp
index f9f8a3000..1a243c515 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -8,10 +8,10 @@
#include "common/file_util.h"
#include "common/logging/log.h"
+#include "common/microprofile.h"
#include "common/string_util.h"
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
-#include "core/core_manager.h"
#include "core/core_timing.h"
#include "core/cpu_manager.h"
#include "core/device_memory.h"
@@ -51,6 +51,11 @@
#include "video_core/renderer_base.h"
#include "video_core/video_core.h"
+MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU0, "ARM JIT", "Dynarmic CPU 0", MP_RGB(255, 64, 64));
+MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU1, "ARM JIT", "Dynarmic CPU 1", MP_RGB(255, 64, 64));
+MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU2, "ARM JIT", "Dynarmic CPU 2", MP_RGB(255, 64, 64));
+MICROPROFILE_DEFINE(ARM_Jit_Dynarmic_CPU3, "ARM JIT", "Dynarmic CPU 3", MP_RGB(255, 64, 64));
+
namespace Core {
namespace {
@@ -117,23 +122,22 @@ struct System::Impl {
: kernel{system}, fs_controller{system}, memory{system},
cpu_manager{system}, reporter{system}, applet_manager{system} {}
- CoreManager& CurrentCoreManager() {
- return cpu_manager.GetCurrentCoreManager();
- }
+ ResultStatus Run() {
+ status = ResultStatus::Success;
- Kernel::PhysicalCore& CurrentPhysicalCore() {
- const auto index = cpu_manager.GetActiveCoreIndex();
- return kernel.PhysicalCore(index);
- }
+ kernel.Suspend(false);
+ core_timing.SyncPause(false);
+ cpu_manager.Pause(false);
- Kernel::PhysicalCore& GetPhysicalCore(std::size_t index) {
- return kernel.PhysicalCore(index);
+ return status;
}
- ResultStatus RunLoop(bool tight_loop) {
+ ResultStatus Pause() {
status = ResultStatus::Success;
- cpu_manager.RunLoop(tight_loop);
+ core_timing.SyncPause(true);
+ kernel.Suspend(true);
+ cpu_manager.Pause(true);
return status;
}
@@ -143,7 +147,15 @@ struct System::Impl {
device_memory = std::make_unique<Core::DeviceMemory>(system);
- core_timing.Initialize();
+ is_multicore = Settings::values.use_multi_core;
+ is_async_gpu = is_multicore || Settings::values.use_asynchronous_gpu_emulation;
+
+ kernel.SetMulticore(is_multicore);
+ cpu_manager.SetMulticore(is_multicore);
+ cpu_manager.SetAsyncGpu(is_async_gpu);
+ core_timing.SetMulticore(is_multicore);
+
+ core_timing.Initialize([&system]() { system.RegisterHostThread(); });
kernel.Initialize();
cpu_manager.Initialize();
@@ -180,6 +192,11 @@ struct System::Impl {
is_powered_on = true;
exit_lock = false;
+ microprofile_dynarmic[0] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU0);
+ microprofile_dynarmic[1] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU1);
+ microprofile_dynarmic[2] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU2);
+ microprofile_dynarmic[3] = MICROPROFILE_TOKEN(ARM_Jit_Dynarmic_CPU3);
+
LOG_DEBUG(Core, "Initialized OK");
return ResultStatus::Success;
@@ -277,8 +294,6 @@ struct System::Impl {
service_manager.reset();
cheat_engine.reset();
telemetry_session.reset();
- perf_stats.reset();
- gpu_core.reset();
device_memory.reset();
// Close all CPU/threading state
@@ -290,6 +305,8 @@ struct System::Impl {
// Close app loader
app_loader.reset();
+ gpu_core.reset();
+ perf_stats.reset();
// Clear all applets
applet_manager.ClearAll();
@@ -382,25 +399,35 @@ struct System::Impl {
std::unique_ptr<Core::PerfStats> perf_stats;
Core::FrameLimiter frame_limiter;
+
+ bool is_multicore{};
+ bool is_async_gpu{};
+
+ std::array<u64, Core::Hardware::NUM_CPU_CORES> dynarmic_ticks{};
+ std::array<MicroProfileToken, Core::Hardware::NUM_CPU_CORES> microprofile_dynarmic{};
};
System::System() : impl{std::make_unique<Impl>(*this)} {}
System::~System() = default;
-CoreManager& System::CurrentCoreManager() {
- return impl->CurrentCoreManager();
+CpuManager& System::GetCpuManager() {
+ return impl->cpu_manager;
+}
+
+const CpuManager& System::GetCpuManager() const {
+ return impl->cpu_manager;
}
-const CoreManager& System::CurrentCoreManager() const {
- return impl->CurrentCoreManager();
+System::ResultStatus System::Run() {
+ return impl->Run();
}
-System::ResultStatus System::RunLoop(bool tight_loop) {
- return impl->RunLoop(tight_loop);
+System::ResultStatus System::Pause() {
+ return impl->Pause();
}
System::ResultStatus System::SingleStep() {
- return RunLoop(false);
+ return ResultStatus::Success;
}
void System::InvalidateCpuInstructionCaches() {
@@ -416,7 +443,7 @@ bool System::IsPoweredOn() const {
}
void System::PrepareReschedule() {
- impl->CurrentPhysicalCore().Stop();
+ // Deprecated, does nothing, kept for backward compatibility.
}
void System::PrepareReschedule(const u32 core_index) {
@@ -436,31 +463,41 @@ const TelemetrySession& System::TelemetrySession() const {
}
ARM_Interface& System::CurrentArmInterface() {
- return impl->CurrentPhysicalCore().ArmInterface();
+ return impl->kernel.CurrentScheduler().GetCurrentThread()->ArmInterface();
}
const ARM_Interface& System::CurrentArmInterface() const {
- return impl->CurrentPhysicalCore().ArmInterface();
+ return impl->kernel.CurrentScheduler().GetCurrentThread()->ArmInterface();
}
std::size_t System::CurrentCoreIndex() const {
- return impl->cpu_manager.GetActiveCoreIndex();
+ std::size_t core = impl->kernel.GetCurrentHostThreadID();
+ ASSERT(core < Core::Hardware::NUM_CPU_CORES);
+ return core;
}
Kernel::Scheduler& System::CurrentScheduler() {
- return impl->CurrentPhysicalCore().Scheduler();
+ return impl->kernel.CurrentScheduler();
}
const Kernel::Scheduler& System::CurrentScheduler() const {
- return impl->CurrentPhysicalCore().Scheduler();
+ return impl->kernel.CurrentScheduler();
+}
+
+Kernel::PhysicalCore& System::CurrentPhysicalCore() {
+ return impl->kernel.CurrentPhysicalCore();
+}
+
+const Kernel::PhysicalCore& System::CurrentPhysicalCore() const {
+ return impl->kernel.CurrentPhysicalCore();
}
Kernel::Scheduler& System::Scheduler(std::size_t core_index) {
- return impl->GetPhysicalCore(core_index).Scheduler();
+ return impl->kernel.Scheduler(core_index);
}
const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const {
- return impl->GetPhysicalCore(core_index).Scheduler();
+ return impl->kernel.Scheduler(core_index);
}
/// Gets the global scheduler
@@ -490,20 +527,15 @@ const Kernel::Process* System::CurrentProcess() const {
}
ARM_Interface& System::ArmInterface(std::size_t core_index) {
- return impl->GetPhysicalCore(core_index).ArmInterface();
+ auto* thread = impl->kernel.Scheduler(core_index).GetCurrentThread();
+ ASSERT(thread && !thread->IsHLEThread());
+ return thread->ArmInterface();
}
const ARM_Interface& System::ArmInterface(std::size_t core_index) const {
- return impl->GetPhysicalCore(core_index).ArmInterface();
-}
-
-CoreManager& System::GetCoreManager(std::size_t core_index) {
- return impl->cpu_manager.GetCoreManager(core_index);
-}
-
-const CoreManager& System::GetCoreManager(std::size_t core_index) const {
- ASSERT(core_index < NUM_CPU_CORES);
- return impl->cpu_manager.GetCoreManager(core_index);
+ auto* thread = impl->kernel.Scheduler(core_index).GetCurrentThread();
+ ASSERT(thread && !thread->IsHLEThread());
+ return thread->ArmInterface();
}
ExclusiveMonitor& System::Monitor() {
@@ -722,4 +754,18 @@ void System::RegisterHostThread() {
impl->kernel.RegisterHostThread();
}
+void System::EnterDynarmicProfile() {
+ std::size_t core = impl->kernel.GetCurrentHostThreadID();
+ impl->dynarmic_ticks[core] = MicroProfileEnter(impl->microprofile_dynarmic[core]);
+}
+
+void System::ExitDynarmicProfile() {
+ std::size_t core = impl->kernel.GetCurrentHostThreadID();
+ MicroProfileLeave(impl->microprofile_dynarmic[core], impl->dynarmic_ticks[core]);
+}
+
+bool System::IsMulticore() const {
+ return impl->is_multicore;
+}
+
} // namespace Core
diff --git a/src/core/core.h b/src/core/core.h
index acc53d6a1..5c6cfbffe 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -27,6 +27,7 @@ class VfsFilesystem;
namespace Kernel {
class GlobalScheduler;
class KernelCore;
+class PhysicalCore;
class Process;
class Scheduler;
} // namespace Kernel
@@ -90,7 +91,7 @@ class InterruptManager;
namespace Core {
class ARM_Interface;
-class CoreManager;
+class CpuManager;
class DeviceMemory;
class ExclusiveMonitor;
class FrameLimiter;
@@ -136,16 +137,16 @@ public:
};
/**
- * Run the core CPU loop
- * This function runs the core for the specified number of CPU instructions before trying to
- * update hardware. This is much faster than SingleStep (and should be equivalent), as the CPU
- * is not required to do a full dispatch with each instruction. NOTE: the number of instructions
- * requested is not guaranteed to run, as this will be interrupted preemptively if a hardware
- * update is requested (e.g. on a thread switch).
- * @param tight_loop If false, the CPU single-steps.
- * @return Result status, indicating whether or not the operation succeeded.
+ * Run the OS and Application
+ * This function will start emulation and run the relevant devices
+ */
+ ResultStatus Run();
+
+ /**
+ * Pause the OS and Application
+ * This function will pause emulation and stop the relevant devices
*/
- ResultStatus RunLoop(bool tight_loop = true);
+ ResultStatus Pause();
/**
* Step the CPU one instruction
@@ -209,17 +210,21 @@ public:
/// Gets the scheduler for the CPU core that is currently running
const Kernel::Scheduler& CurrentScheduler() const;
+ /// Gets the physical core for the CPU core that is currently running
+ Kernel::PhysicalCore& CurrentPhysicalCore();
+
+ /// Gets the physical core for the CPU core that is currently running
+ const Kernel::PhysicalCore& CurrentPhysicalCore() const;
+
/// Gets a reference to an ARM interface for the CPU core with the specified index
ARM_Interface& ArmInterface(std::size_t core_index);
/// Gets a const reference to an ARM interface from the CPU core with the specified index
const ARM_Interface& ArmInterface(std::size_t core_index) const;
- /// Gets a CPU interface to the CPU core with the specified index
- CoreManager& GetCoreManager(std::size_t core_index);
+ CpuManager& GetCpuManager();
- /// Gets a CPU interface to the CPU core with the specified index
- const CoreManager& GetCoreManager(std::size_t core_index) const;
+ const CpuManager& GetCpuManager() const;
/// Gets a reference to the exclusive monitor
ExclusiveMonitor& Monitor();
@@ -370,14 +375,17 @@ public:
/// Register a host thread as an auxiliary thread.
void RegisterHostThread();
-private:
- System();
+ /// Enter Dynarmic Microprofile
+ void EnterDynarmicProfile();
+
+ /// Exit Dynarmic Microprofile
+ void ExitDynarmicProfile();
- /// Returns the currently running CPU core
- CoreManager& CurrentCoreManager();
+ /// Tells if system is running on multicore.
+ bool IsMulticore() const;
- /// Returns the currently running CPU core
- const CoreManager& CurrentCoreManager() const;
+private:
+ System();
/**
* Initialize the emulated system.
diff --git a/src/core/core_manager.cpp b/src/core/core_manager.cpp
deleted file mode 100644
index b6b797c80..000000000
--- a/src/core/core_manager.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <condition_variable>
-#include <mutex>
-
-#include "common/logging/log.h"
-#include "core/arm/exclusive_monitor.h"
-#include "core/arm/unicorn/arm_unicorn.h"
-#include "core/core.h"
-#include "core/core_manager.h"
-#include "core/core_timing.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/physical_core.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/lock.h"
-#include "core/settings.h"
-
-namespace Core {
-
-CoreManager::CoreManager(System& system, std::size_t core_index)
- : global_scheduler{system.GlobalScheduler()}, physical_core{system.Kernel().PhysicalCore(
- core_index)},
- core_timing{system.CoreTiming()}, core_index{core_index} {}
-
-CoreManager::~CoreManager() = default;
-
-void CoreManager::RunLoop(bool tight_loop) {
- Reschedule();
-
- // If we don't have a currently active thread then don't execute instructions,
- // instead advance to the next event and try to yield to the next thread
- if (Kernel::GetCurrentThread() == nullptr) {
- LOG_TRACE(Core, "Core-{} idling", core_index);
- core_timing.Idle();
- } else {
- if (tight_loop) {
- physical_core.Run();
- } else {
- physical_core.Step();
- }
- }
- core_timing.Advance();
-
- Reschedule();
-}
-
-void CoreManager::SingleStep() {
- return RunLoop(false);
-}
-
-void CoreManager::PrepareReschedule() {
- physical_core.Stop();
-}
-
-void CoreManager::Reschedule() {
- // Lock the global kernel mutex when we manipulate the HLE state
- std::lock_guard lock(HLE::g_hle_lock);
-
- global_scheduler.SelectThread(core_index);
-
- physical_core.Scheduler().TryDoContextSwitch();
-}
-
-} // namespace Core
diff --git a/src/core/core_manager.h b/src/core/core_manager.h
deleted file mode 100644
index d525de00a..000000000
--- a/src/core/core_manager.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <atomic>
-#include <cstddef>
-#include <memory>
-#include "common/common_types.h"
-
-namespace Kernel {
-class GlobalScheduler;
-class PhysicalCore;
-} // namespace Kernel
-
-namespace Core {
-class System;
-}
-
-namespace Core::Timing {
-class CoreTiming;
-}
-
-namespace Core::Memory {
-class Memory;
-}
-
-namespace Core {
-
-constexpr unsigned NUM_CPU_CORES{4};
-
-class CoreManager {
-public:
- CoreManager(System& system, std::size_t core_index);
- ~CoreManager();
-
- void RunLoop(bool tight_loop = true);
-
- void SingleStep();
-
- void PrepareReschedule();
-
- bool IsMainCore() const {
- return core_index == 0;
- }
-
- std::size_t CoreIndex() const {
- return core_index;
- }
-
-private:
- void Reschedule();
-
- Kernel::GlobalScheduler& global_scheduler;
- Kernel::PhysicalCore& physical_core;
- Timing::CoreTiming& core_timing;
-
- std::atomic<bool> reschedule_pending = false;
- std::size_t core_index;
-};
-
-} // namespace Core
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 46d4178c4..5c83c41a4 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -1,29 +1,27 @@
-// Copyright 2008 Dolphin Emulator Project / 2017 Citra Emulator Project
-// Licensed under GPLv2+
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include "core/core_timing.h"
-
#include <algorithm>
#include <mutex>
#include <string>
#include <tuple>
#include "common/assert.h"
-#include "common/thread.h"
+#include "common/microprofile.h"
+#include "core/core_timing.h"
#include "core/core_timing_util.h"
-#include "core/hardware_properties.h"
namespace Core::Timing {
-constexpr int MAX_SLICE_LENGTH = 10000;
+constexpr u64 MAX_SLICE_LENGTH = 4000;
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
return std::make_shared<EventType>(std::move(callback), std::move(name));
}
struct CoreTiming::Event {
- s64 time;
+ u64 time;
u64 fifo_order;
u64 userdata;
std::weak_ptr<EventType> type;
@@ -39,51 +37,90 @@ struct CoreTiming::Event {
}
};
-CoreTiming::CoreTiming() = default;
-CoreTiming::~CoreTiming() = default;
+CoreTiming::CoreTiming() {
+ clock =
+ Common::CreateBestMatchingClock(Core::Hardware::BASE_CLOCK_RATE, Core::Hardware::CNTFREQ);
+}
-void CoreTiming::Initialize() {
- downcounts.fill(MAX_SLICE_LENGTH);
- time_slice.fill(MAX_SLICE_LENGTH);
- slice_length = MAX_SLICE_LENGTH;
- global_timer = 0;
- idled_cycles = 0;
- current_context = 0;
+CoreTiming::~CoreTiming() = default;
- // The time between CoreTiming being initialized and the first call to Advance() is considered
- // the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before
- // executing the first cycle of each slice to prepare the slice length and downcount for
- // that slice.
- is_global_timer_sane = true;
+void CoreTiming::ThreadEntry(CoreTiming& instance) {
+ constexpr char name[] = "yuzu:HostTiming";
+ MicroProfileOnThreadCreate(name);
+ Common::SetCurrentThreadName(name);
+ Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
+ instance.on_thread_init();
+ instance.ThreadLoop();
+}
+void CoreTiming::Initialize(std::function<void(void)>&& on_thread_init_) {
+ on_thread_init = std::move(on_thread_init_);
event_fifo_id = 0;
-
+ shutting_down = false;
+ ticks = 0;
const auto empty_timed_callback = [](u64, s64) {};
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
+ if (is_multicore) {
+ timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
+ }
}
void CoreTiming::Shutdown() {
+ paused = true;
+ shutting_down = true;
+ pause_event.Set();
+ event.Set();
+ if (timer_thread) {
+ timer_thread->join();
+ }
ClearPendingEvents();
+ timer_thread.reset();
+ has_started = false;
}
-void CoreTiming::ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type,
- u64 userdata) {
- std::lock_guard guard{inner_mutex};
- const s64 timeout = GetTicks() + cycles_into_future;
+void CoreTiming::Pause(bool is_paused) {
+ paused = is_paused;
+ pause_event.Set();
+}
- // If this event needs to be scheduled before the next advance(), force one early
- if (!is_global_timer_sane) {
- ForceExceptionCheck(cycles_into_future);
+void CoreTiming::SyncPause(bool is_paused) {
+ if (is_paused == paused && paused_set == paused) {
+ return;
+ }
+ Pause(is_paused);
+ if (timer_thread) {
+ if (!is_paused) {
+ pause_event.Set();
+ }
+ event.Set();
+ while (paused_set != is_paused)
+ ;
}
+}
- event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
+bool CoreTiming::IsRunning() const {
+ return !paused_set;
+}
- std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
+bool CoreTiming::HasPendingEvents() const {
+ return !(wait_set && event_queue.empty());
}
-void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
- std::lock_guard guard{inner_mutex};
+void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
+ u64 userdata) {
+ {
+ std::scoped_lock scope{basic_lock};
+ const u64 timeout = static_cast<u64>(GetGlobalTimeNs().count() + ns_into_future);
+
+ event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
+ std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
+ }
+ event.Set();
+}
+
+void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
+ std::scoped_lock scope{basic_lock};
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
return e.type.lock().get() == event_type.get() && e.userdata == userdata;
});
@@ -95,21 +132,39 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u
}
}
-u64 CoreTiming::GetTicks() const {
- u64 ticks = static_cast<u64>(global_timer);
- if (!is_global_timer_sane) {
- ticks += accumulated_ticks;
+void CoreTiming::AddTicks(u64 ticks) {
+ this->ticks += ticks;
+ downcount -= ticks;
+}
+
+void CoreTiming::Idle() {
+ if (!event_queue.empty()) {
+ const u64 next_event_time = event_queue.front().time;
+ const u64 next_ticks = nsToCycles(std::chrono::nanoseconds(next_event_time)) + 10U;
+ if (next_ticks > ticks) {
+ ticks = next_ticks;
+ }
+ return;
}
- return ticks;
+ ticks += 1000U;
}
-u64 CoreTiming::GetIdleTicks() const {
- return static_cast<u64>(idled_cycles);
+void CoreTiming::ResetTicks() {
+ downcount = MAX_SLICE_LENGTH;
}
-void CoreTiming::AddTicks(u64 ticks) {
- accumulated_ticks += ticks;
- downcounts[current_context] -= static_cast<s64>(ticks);
+u64 CoreTiming::GetCPUTicks() const {
+ if (is_multicore) {
+ return clock->GetCPUCycles();
+ }
+ return ticks;
+}
+
+u64 CoreTiming::GetClockTicks() const {
+ if (is_multicore) {
+ return clock->GetClockCycles();
+ }
+ return CpuCyclesToClockCycles(ticks);
}
void CoreTiming::ClearPendingEvents() {
@@ -117,7 +172,7 @@ void CoreTiming::ClearPendingEvents() {
}
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
- std::lock_guard guard{inner_mutex};
+ basic_lock.lock();
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
return e.type.lock().get() == event_type.get();
@@ -128,99 +183,72 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
event_queue.erase(itr, event_queue.end());
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
}
+ basic_lock.unlock();
}
-void CoreTiming::ForceExceptionCheck(s64 cycles) {
- cycles = std::max<s64>(0, cycles);
- if (downcounts[current_context] <= cycles) {
- return;
- }
-
- // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int
- // here. Account for cycles already executed by adjusting the g.slice_length
- downcounts[current_context] = static_cast<int>(cycles);
-}
-
-std::optional<u64> CoreTiming::NextAvailableCore(const s64 needed_ticks) const {
- const u64 original_context = current_context;
- u64 next_context = (original_context + 1) % num_cpu_cores;
- while (next_context != original_context) {
- if (time_slice[next_context] >= needed_ticks) {
- return {next_context};
- } else if (time_slice[next_context] >= 0) {
- return std::nullopt;
- }
- next_context = (next_context + 1) % num_cpu_cores;
- }
- return std::nullopt;
-}
-
-void CoreTiming::Advance() {
- std::unique_lock<std::mutex> guard(inner_mutex);
-
- const u64 cycles_executed = accumulated_ticks;
- time_slice[current_context] = std::max<s64>(0, time_slice[current_context] - accumulated_ticks);
- global_timer += cycles_executed;
-
- is_global_timer_sane = true;
+std::optional<s64> CoreTiming::Advance() {
+ std::scoped_lock advance_scope{advance_lock};
+ std::scoped_lock basic_scope{basic_lock};
+ global_timer = GetGlobalTimeNs().count();
while (!event_queue.empty() && event_queue.front().time <= global_timer) {
Event evt = std::move(event_queue.front());
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
event_queue.pop_back();
- inner_mutex.unlock();
+ basic_lock.unlock();
if (auto event_type{evt.type.lock()}) {
event_type->callback(evt.userdata, global_timer - evt.time);
}
- inner_mutex.lock();
+ basic_lock.lock();
+ global_timer = GetGlobalTimeNs().count();
}
- is_global_timer_sane = false;
-
- // Still events left (scheduled in the future)
if (!event_queue.empty()) {
- const s64 needed_ticks =
- std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH);
- const auto next_core = NextAvailableCore(needed_ticks);
- if (next_core) {
- downcounts[*next_core] = needed_ticks;
- }
+ const s64 next_time = event_queue.front().time - global_timer;
+ return next_time;
+ } else {
+ return std::nullopt;
}
-
- accumulated_ticks = 0;
-
- downcounts[current_context] = time_slice[current_context];
}
-void CoreTiming::ResetRun() {
- downcounts.fill(MAX_SLICE_LENGTH);
- time_slice.fill(MAX_SLICE_LENGTH);
- current_context = 0;
- // Still events left (scheduled in the future)
- if (!event_queue.empty()) {
- const s64 needed_ticks =
- std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH);
- downcounts[current_context] = needed_ticks;
+void CoreTiming::ThreadLoop() {
+ has_started = true;
+ while (!shutting_down) {
+ while (!paused) {
+ paused_set = false;
+ const auto next_time = Advance();
+ if (next_time) {
+ if (*next_time > 0) {
+ std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
+ event.WaitFor(next_time_ns);
+ }
+ } else {
+ wait_set = true;
+ event.Wait();
+ }
+ wait_set = false;
+ }
+ paused_set = true;
+ clock->Pause(true);
+ pause_event.Wait();
+ clock->Pause(false);
}
-
- is_global_timer_sane = false;
- accumulated_ticks = 0;
}
-void CoreTiming::Idle() {
- accumulated_ticks += downcounts[current_context];
- idled_cycles += downcounts[current_context];
- downcounts[current_context] = 0;
+std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
+ if (is_multicore) {
+ return clock->GetTimeNS();
+ }
+ return CyclesToNs(ticks);
}
std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
- return std::chrono::microseconds{GetTicks() * 1000000 / Hardware::BASE_CLOCK_RATE};
-}
-
-s64 CoreTiming::GetDowncount() const {
- return downcounts[current_context];
+ if (is_multicore) {
+ return clock->GetTimeUS();
+ }
+ return CyclesToUs(ticks);
}
} // namespace Core::Timing
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index d50f4eb8a..72faaab64 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -1,19 +1,25 @@
-// Copyright 2008 Dolphin Emulator Project / 2017 Citra Emulator Project
-// Licensed under GPLv2+
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
+#include <atomic>
#include <chrono>
#include <functional>
#include <memory>
#include <mutex>
#include <optional>
#include <string>
+#include <thread>
#include <vector>
#include "common/common_types.h"
+#include "common/spin_lock.h"
+#include "common/thread.h"
#include "common/threadsafe_queue.h"
+#include "common/wall_clock.h"
+#include "core/hardware_properties.h"
namespace Core::Timing {
@@ -56,16 +62,40 @@ public:
/// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
- void Initialize();
+ void Initialize(std::function<void(void)>&& on_thread_init_);
/// Tears down all timing related functionality.
void Shutdown();
- /// After the first Advance, the slice lengths and the downcount will be reduced whenever an
- /// event is scheduled earlier than the current values.
- ///
- /// Scheduling from a callback will not update the downcount until the Advance() completes.
- void ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type,
+ /// Sets if emulation is multicore or single core, must be set before Initialize
+ void SetMulticore(bool is_multicore) {
+ this->is_multicore = is_multicore;
+ }
+
+ /// Check if it's using host timing.
+ bool IsHostTiming() const {
+ return is_multicore;
+ }
+
+ /// Pauses/Unpauses the execution of the timer thread.
+ void Pause(bool is_paused);
+
+ /// Pauses/Unpauses the execution of the timer thread and waits until paused.
+ void SyncPause(bool is_paused);
+
+ /// Checks if core timing is running.
+ bool IsRunning() const;
+
+ /// Checks if the timer thread has started.
+ bool HasStarted() const {
+ return has_started;
+ }
+
+ /// Checks if there are any pending time events.
+ bool HasPendingEvents() const;
+
+ /// Schedules an event in core timing
+ void ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
u64 userdata = 0);
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
@@ -73,41 +103,30 @@ public:
/// We only permit one event of each type in the queue at a time.
void RemoveEvent(const std::shared_ptr<EventType>& event_type);
- void ForceExceptionCheck(s64 cycles);
-
- /// This should only be called from the emu thread, if you are calling it any other thread,
- /// you are doing something evil
- u64 GetTicks() const;
-
- u64 GetIdleTicks() const;
-
void AddTicks(u64 ticks);
- /// Advance must be called at the beginning of dispatcher loops, not the end. Advance() ends
- /// the previous timing slice and begins the next one, you must Advance from the previous
- /// slice to the current one before executing any cycles. CoreTiming starts in slice -1 so an
- /// Advance() is required to initialize the slice length before the first cycle of emulated
- /// instructions is executed.
- void Advance();
+ void ResetTicks();
- /// Pretend that the main CPU has executed enough cycles to reach the next event.
void Idle();
- std::chrono::microseconds GetGlobalTimeUs() const;
+ s64 GetDowncount() const {
+ return downcount;
+ }
- void ResetRun();
+ /// Returns current time in emulated CPU cycles
+ u64 GetCPUTicks() const;
- s64 GetDowncount() const;
+ /// Returns current time in emulated in Clock cycles
+ u64 GetClockTicks() const;
- void SwitchContext(u64 new_context) {
- current_context = new_context;
- }
+ /// Returns current time in microseconds.
+ std::chrono::microseconds GetGlobalTimeUs() const;
- bool CanCurrentContextRun() const {
- return time_slice[current_context] > 0;
- }
+ /// Returns current time in nanoseconds.
+ std::chrono::nanoseconds GetGlobalTimeNs() const;
- std::optional<u64> NextAvailableCore(const s64 needed_ticks) const;
+ /// Checks for events manually and returns time in nanoseconds for next event, threadsafe.
+ std::optional<s64> Advance();
private:
struct Event;
@@ -115,21 +134,14 @@ private:
/// Clear all pending events. This should ONLY be done on exit.
void ClearPendingEvents();
- static constexpr u64 num_cpu_cores = 4;
+ static void ThreadEntry(CoreTiming& instance);
+ void ThreadLoop();
- s64 global_timer = 0;
- s64 idled_cycles = 0;
- s64 slice_length = 0;
- u64 accumulated_ticks = 0;
- std::array<s64, num_cpu_cores> downcounts{};
- // Slice of time assigned to each core per run.
- std::array<s64, num_cpu_cores> time_slice{};
- u64 current_context = 0;
+ std::unique_ptr<Common::WallClock> clock;
- // Are we in a function that has been called from Advance()
- // If events are scheduled from a function that gets called from Advance(),
- // don't change slice_length and downcount.
- bool is_global_timer_sane = false;
+ u64 global_timer = 0;
+
+ std::chrono::nanoseconds start_point;
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
@@ -139,8 +151,23 @@ private:
u64 event_fifo_id = 0;
std::shared_ptr<EventType> ev_lost;
-
- std::mutex inner_mutex;
+ Common::Event event{};
+ Common::Event pause_event{};
+ Common::SpinLock basic_lock{};
+ Common::SpinLock advance_lock{};
+ std::unique_ptr<std::thread> timer_thread;
+ std::atomic<bool> paused{};
+ std::atomic<bool> paused_set{};
+ std::atomic<bool> wait_set{};
+ std::atomic<bool> shutting_down{};
+ std::atomic<bool> has_started{};
+ std::function<void(void)> on_thread_init{};
+
+ bool is_multicore{};
+
+ /// Cycle timing
+ u64 ticks{};
+ s64 downcount{};
};
/// Creates a core timing event with the given name and callback.
diff --git a/src/core/core_timing_util.cpp b/src/core/core_timing_util.cpp
index be34b26fe..aefc63663 100644
--- a/src/core/core_timing_util.cpp
+++ b/src/core/core_timing_util.cpp
@@ -38,15 +38,8 @@ s64 usToCycles(std::chrono::microseconds us) {
}
s64 nsToCycles(std::chrono::nanoseconds ns) {
- if (static_cast<u64>(ns.count() / 1000000000) > MAX_VALUE_TO_MULTIPLY) {
- LOG_ERROR(Core_Timing, "Integer overflow, use max value");
- return std::numeric_limits<s64>::max();
- }
- if (static_cast<u64>(ns.count()) > MAX_VALUE_TO_MULTIPLY) {
- LOG_DEBUG(Core_Timing, "Time very big, do rounding");
- return Hardware::BASE_CLOCK_RATE * (ns.count() / 1000000000);
- }
- return (Hardware::BASE_CLOCK_RATE * ns.count()) / 1000000000;
+ const u128 temporal = Common::Multiply64Into128(ns.count(), Hardware::BASE_CLOCK_RATE);
+ return Common::Divide128On32(temporal, static_cast<u32>(1000000000)).first;
}
u64 msToClockCycles(std::chrono::milliseconds ns) {
@@ -69,4 +62,22 @@ u64 CpuCyclesToClockCycles(u64 ticks) {
return Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
}
+std::chrono::milliseconds CyclesToMs(s64 cycles) {
+ const u128 temporal = Common::Multiply64Into128(cycles, 1000);
+ u64 ms = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
+ return std::chrono::milliseconds(ms);
+}
+
+std::chrono::nanoseconds CyclesToNs(s64 cycles) {
+ const u128 temporal = Common::Multiply64Into128(cycles, 1000000000);
+ u64 ns = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
+ return std::chrono::nanoseconds(ns);
+}
+
+std::chrono::microseconds CyclesToUs(s64 cycles) {
+ const u128 temporal = Common::Multiply64Into128(cycles, 1000000);
+ u64 us = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
+ return std::chrono::microseconds(us);
+}
+
} // namespace Core::Timing
diff --git a/src/core/core_timing_util.h b/src/core/core_timing_util.h
index b3c58447d..2ed979e14 100644
--- a/src/core/core_timing_util.h
+++ b/src/core/core_timing_util.h
@@ -16,18 +16,9 @@ s64 nsToCycles(std::chrono::nanoseconds ns);
u64 msToClockCycles(std::chrono::milliseconds ns);
u64 usToClockCycles(std::chrono::microseconds ns);
u64 nsToClockCycles(std::chrono::nanoseconds ns);
-
-inline std::chrono::milliseconds CyclesToMs(s64 cycles) {
- return std::chrono::milliseconds(cycles * 1000 / Hardware::BASE_CLOCK_RATE);
-}
-
-inline std::chrono::nanoseconds CyclesToNs(s64 cycles) {
- return std::chrono::nanoseconds(cycles * 1000000000 / Hardware::BASE_CLOCK_RATE);
-}
-
-inline std::chrono::microseconds CyclesToUs(s64 cycles) {
- return std::chrono::microseconds(cycles * 1000000 / Hardware::BASE_CLOCK_RATE);
-}
+std::chrono::milliseconds CyclesToMs(s64 cycles);
+std::chrono::nanoseconds CyclesToNs(s64 cycles);
+std::chrono::microseconds CyclesToUs(s64 cycles);
u64 CpuCyclesToClockCycles(u64 ticks);
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 70ddbdcca..32afcf3ae 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -2,80 +2,372 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "common/fiber.h"
+#include "common/microprofile.h"
+#include "common/thread.h"
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
-#include "core/core_manager.h"
#include "core/core_timing.h"
#include "core/cpu_manager.h"
#include "core/gdbstub/gdbstub.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/physical_core.h"
+#include "core/hle/kernel/scheduler.h"
+#include "core/hle/kernel/thread.h"
+#include "video_core/gpu.h"
namespace Core {
CpuManager::CpuManager(System& system) : system{system} {}
CpuManager::~CpuManager() = default;
+void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) {
+ cpu_manager.RunThread(core);
+}
+
void CpuManager::Initialize() {
- for (std::size_t index = 0; index < core_managers.size(); ++index) {
- core_managers[index] = std::make_unique<CoreManager>(system, index);
+ running_mode = true;
+ if (is_multicore) {
+ for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ core_data[core].host_thread =
+ std::make_unique<std::thread>(ThreadStart, std::ref(*this), core);
+ }
+ } else {
+ core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0);
}
}
void CpuManager::Shutdown() {
- for (auto& cpu_core : core_managers) {
- cpu_core.reset();
+ running_mode = false;
+ Pause(false);
+ if (is_multicore) {
+ for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ core_data[core].host_thread->join();
+ core_data[core].host_thread.reset();
+ }
+ } else {
+ core_data[0].host_thread->join();
+ core_data[0].host_thread.reset();
}
}
-CoreManager& CpuManager::GetCoreManager(std::size_t index) {
- return *core_managers.at(index);
+std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
+ return std::function<void(void*)>(GuestThreadFunction);
}
-const CoreManager& CpuManager::GetCoreManager(std::size_t index) const {
- return *core_managers.at(index);
+std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() {
+ return std::function<void(void*)>(IdleThreadFunction);
}
-CoreManager& CpuManager::GetCurrentCoreManager() {
- // Otherwise, use single-threaded mode active_core variable
- return *core_managers[active_core];
+std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() {
+ return std::function<void(void*)>(SuspendThreadFunction);
}
-const CoreManager& CpuManager::GetCurrentCoreManager() const {
- // Otherwise, use single-threaded mode active_core variable
- return *core_managers[active_core];
+void CpuManager::GuestThreadFunction(void* cpu_manager_) {
+ CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
+ if (cpu_manager->is_multicore) {
+ cpu_manager->MultiCoreRunGuestThread();
+ } else {
+ cpu_manager->SingleCoreRunGuestThread();
+ }
}
-void CpuManager::RunLoop(bool tight_loop) {
- if (GDBStub::IsServerEnabled()) {
- GDBStub::HandlePacket();
+void CpuManager::GuestRewindFunction(void* cpu_manager_) {
+ CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
+ if (cpu_manager->is_multicore) {
+ cpu_manager->MultiCoreRunGuestLoop();
+ } else {
+ cpu_manager->SingleCoreRunGuestLoop();
+ }
+}
- // If the loop is halted and we want to step, use a tiny (1) number of instructions to
- // execute. Otherwise, get out of the loop function.
- if (GDBStub::GetCpuHaltFlag()) {
- if (GDBStub::GetCpuStepFlag()) {
- tight_loop = false;
- } else {
- return;
+void CpuManager::IdleThreadFunction(void* cpu_manager_) {
+ CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
+ if (cpu_manager->is_multicore) {
+ cpu_manager->MultiCoreRunIdleThread();
+ } else {
+ cpu_manager->SingleCoreRunIdleThread();
+ }
+}
+
+void CpuManager::SuspendThreadFunction(void* cpu_manager_) {
+ CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
+ if (cpu_manager->is_multicore) {
+ cpu_manager->MultiCoreRunSuspendThread();
+ } else {
+ cpu_manager->SingleCoreRunSuspendThread();
+ }
+}
+
+void* CpuManager::GetStartFuncParamater() {
+ return static_cast<void*>(this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+/// MultiCore ///
+///////////////////////////////////////////////////////////////////////////////
+
+void CpuManager::MultiCoreRunGuestThread() {
+ auto& kernel = system.Kernel();
+ {
+ auto& sched = kernel.CurrentScheduler();
+ sched.OnThreadStart();
+ }
+ MultiCoreRunGuestLoop();
+}
+
+void CpuManager::MultiCoreRunGuestLoop() {
+ auto& kernel = system.Kernel();
+ auto* thread = kernel.CurrentScheduler().GetCurrentThread();
+ while (true) {
+ auto* physical_core = &kernel.CurrentPhysicalCore();
+ auto& arm_interface = thread->ArmInterface();
+ system.EnterDynarmicProfile();
+ while (!physical_core->IsInterrupted()) {
+ arm_interface.Run();
+ physical_core = &kernel.CurrentPhysicalCore();
+ }
+ system.ExitDynarmicProfile();
+ arm_interface.ClearExclusiveState();
+ auto& scheduler = kernel.CurrentScheduler();
+ scheduler.TryDoContextSwitch();
+ }
+}
+
+void CpuManager::MultiCoreRunIdleThread() {
+ auto& kernel = system.Kernel();
+ while (true) {
+ auto& physical_core = kernel.CurrentPhysicalCore();
+ physical_core.Idle();
+ auto& scheduler = kernel.CurrentScheduler();
+ scheduler.TryDoContextSwitch();
+ }
+}
+
+void CpuManager::MultiCoreRunSuspendThread() {
+ auto& kernel = system.Kernel();
+ {
+ auto& sched = kernel.CurrentScheduler();
+ sched.OnThreadStart();
+ }
+ while (true) {
+ auto core = kernel.GetCurrentHostThreadID();
+ auto& scheduler = kernel.CurrentScheduler();
+ Kernel::Thread* current_thread = scheduler.GetCurrentThread();
+ Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
+ ASSERT(scheduler.ContextSwitchPending());
+ ASSERT(core == kernel.GetCurrentHostThreadID());
+ scheduler.TryDoContextSwitch();
+ }
+}
+
+void CpuManager::MultiCorePause(bool paused) {
+ if (!paused) {
+ bool all_not_barrier = false;
+ while (!all_not_barrier) {
+ all_not_barrier = true;
+ for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ all_not_barrier &=
+ !core_data[core].is_running.load() && core_data[core].initialized.load();
+ }
+ }
+ for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ core_data[core].enter_barrier->Set();
+ }
+ if (paused_state.load()) {
+ bool all_barrier = false;
+ while (!all_barrier) {
+ all_barrier = true;
+ for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ all_barrier &=
+ core_data[core].is_paused.load() && core_data[core].initialized.load();
+ }
+ }
+ for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ core_data[core].exit_barrier->Set();
+ }
+ }
+ } else {
+ /// Wait until all cores are paused.
+ bool all_barrier = false;
+ while (!all_barrier) {
+ all_barrier = true;
+ for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ all_barrier &=
+ core_data[core].is_paused.load() && core_data[core].initialized.load();
}
}
+ /// Don't release the barrier
}
+ paused_state = paused;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+/// SingleCore ///
+///////////////////////////////////////////////////////////////////////////////
- auto& core_timing = system.CoreTiming();
- core_timing.ResetRun();
- bool keep_running{};
- do {
- keep_running = false;
- for (active_core = 0; active_core < NUM_CPU_CORES; ++active_core) {
- core_timing.SwitchContext(active_core);
- if (core_timing.CanCurrentContextRun()) {
- core_managers[active_core]->RunLoop(tight_loop);
+void CpuManager::SingleCoreRunGuestThread() {
+ auto& kernel = system.Kernel();
+ {
+ auto& sched = kernel.CurrentScheduler();
+ sched.OnThreadStart();
+ }
+ SingleCoreRunGuestLoop();
+}
+
+void CpuManager::SingleCoreRunGuestLoop() {
+ auto& kernel = system.Kernel();
+ auto* thread = kernel.CurrentScheduler().GetCurrentThread();
+ while (true) {
+ auto* physical_core = &kernel.CurrentPhysicalCore();
+ auto& arm_interface = thread->ArmInterface();
+ system.EnterDynarmicProfile();
+ if (!physical_core->IsInterrupted()) {
+ arm_interface.Run();
+ physical_core = &kernel.CurrentPhysicalCore();
+ }
+ system.ExitDynarmicProfile();
+ thread->SetPhantomMode(true);
+ system.CoreTiming().Advance();
+ thread->SetPhantomMode(false);
+ arm_interface.ClearExclusiveState();
+ PreemptSingleCore();
+ auto& scheduler = kernel.Scheduler(current_core);
+ scheduler.TryDoContextSwitch();
+ }
+}
+
+void CpuManager::SingleCoreRunIdleThread() {
+ auto& kernel = system.Kernel();
+ while (true) {
+ auto& physical_core = kernel.CurrentPhysicalCore();
+ PreemptSingleCore(false);
+ system.CoreTiming().AddTicks(1000U);
+ idle_count++;
+ auto& scheduler = physical_core.Scheduler();
+ scheduler.TryDoContextSwitch();
+ }
+}
+
+void CpuManager::SingleCoreRunSuspendThread() {
+ auto& kernel = system.Kernel();
+ {
+ auto& sched = kernel.CurrentScheduler();
+ sched.OnThreadStart();
+ }
+ while (true) {
+ auto core = kernel.GetCurrentHostThreadID();
+ auto& scheduler = kernel.CurrentScheduler();
+ Kernel::Thread* current_thread = scheduler.GetCurrentThread();
+ Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
+ ASSERT(scheduler.ContextSwitchPending());
+ ASSERT(core == kernel.GetCurrentHostThreadID());
+ scheduler.TryDoContextSwitch();
+ }
+}
+
+void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
+ std::size_t old_core = current_core;
+ auto& scheduler = system.Kernel().Scheduler(old_core);
+ Kernel::Thread* current_thread = scheduler.GetCurrentThread();
+ if (idle_count >= 4 || from_running_enviroment) {
+ if (!from_running_enviroment) {
+ system.CoreTiming().Idle();
+ idle_count = 0;
+ }
+ current_thread->SetPhantomMode(true);
+ system.CoreTiming().Advance();
+ current_thread->SetPhantomMode(false);
+ }
+ current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
+ system.CoreTiming().ResetTicks();
+ scheduler.Unload();
+ auto& next_scheduler = system.Kernel().Scheduler(current_core);
+ Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
+ /// May have changed scheduler
+ auto& current_scheduler = system.Kernel().Scheduler(current_core);
+ current_scheduler.Reload();
+ auto* currrent_thread2 = current_scheduler.GetCurrentThread();
+ if (!currrent_thread2->IsIdleThread()) {
+ idle_count = 0;
+ }
+}
+
+void CpuManager::SingleCorePause(bool paused) {
+ if (!paused) {
+ bool all_not_barrier = false;
+ while (!all_not_barrier) {
+ all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
+ }
+ core_data[0].enter_barrier->Set();
+ if (paused_state.load()) {
+ bool all_barrier = false;
+ while (!all_barrier) {
+ all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
}
- keep_running |= core_timing.CanCurrentContextRun();
+ core_data[0].exit_barrier->Set();
}
- } while (keep_running);
+ } else {
+ /// Wait until all cores are paused.
+ bool all_barrier = false;
+ while (!all_barrier) {
+ all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
+ }
+ /// Don't release the barrier
+ }
+ paused_state = paused;
+}
+
+void CpuManager::Pause(bool paused) {
+ if (is_multicore) {
+ MultiCorePause(paused);
+ } else {
+ SingleCorePause(paused);
+ }
+}
- if (GDBStub::IsServerEnabled()) {
- GDBStub::SetCpuStepFlag(false);
+void CpuManager::RunThread(std::size_t core) {
+ /// Initialization
+ system.RegisterCoreThread(core);
+ std::string name;
+ if (is_multicore) {
+ name = "yuzu:CoreCPUThread_" + std::to_string(core);
+ } else {
+ name = "yuzu:CPUThread";
+ }
+ MicroProfileOnThreadCreate(name.c_str());
+ Common::SetCurrentThreadName(name.c_str());
+ Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
+ auto& data = core_data[core];
+ data.enter_barrier = std::make_unique<Common::Event>();
+ data.exit_barrier = std::make_unique<Common::Event>();
+ data.host_context = Common::Fiber::ThreadToFiber();
+ data.is_running = false;
+ data.initialized = true;
+ const bool sc_sync = !is_async_gpu && !is_multicore;
+ bool sc_sync_first_use = sc_sync;
+ /// Running
+ while (running_mode) {
+ data.is_running = false;
+ data.enter_barrier->Wait();
+ if (sc_sync_first_use) {
+ system.GPU().ObtainContext();
+ sc_sync_first_use = false;
+ }
+ auto& scheduler = system.Kernel().CurrentScheduler();
+ Kernel::Thread* current_thread = scheduler.GetCurrentThread();
+ data.is_running = true;
+ Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
+ data.is_running = false;
+ data.is_paused = true;
+ data.exit_barrier->Wait();
+ data.is_paused = false;
}
+ /// Time to cleanup
+ data.host_context->Exit();
+ data.enter_barrier.reset();
+ data.exit_barrier.reset();
+ data.initialized = false;
}
} // namespace Core
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h
index 97554d1bb..35929ed94 100644
--- a/src/core/cpu_manager.h
+++ b/src/core/cpu_manager.h
@@ -5,12 +5,19 @@
#pragma once
#include <array>
+#include <atomic>
+#include <functional>
#include <memory>
+#include <thread>
#include "core/hardware_properties.h"
+namespace Common {
+class Event;
+class Fiber;
+} // namespace Common
+
namespace Core {
-class CoreManager;
class System;
class CpuManager {
@@ -24,24 +31,75 @@ public:
CpuManager& operator=(const CpuManager&) = delete;
CpuManager& operator=(CpuManager&&) = delete;
+ /// Sets if emulation is multicore or single core, must be set before Initialize
+ void SetMulticore(bool is_multicore) {
+ this->is_multicore = is_multicore;
+ }
+
+ /// Sets if emulation is using an asynchronous GPU.
+ void SetAsyncGpu(bool is_async_gpu) {
+ this->is_async_gpu = is_async_gpu;
+ }
+
void Initialize();
void Shutdown();
- CoreManager& GetCoreManager(std::size_t index);
- const CoreManager& GetCoreManager(std::size_t index) const;
+ void Pause(bool paused);
- CoreManager& GetCurrentCoreManager();
- const CoreManager& GetCurrentCoreManager() const;
+ std::function<void(void*)> GetGuestThreadStartFunc();
+ std::function<void(void*)> GetIdleThreadStartFunc();
+ std::function<void(void*)> GetSuspendThreadStartFunc();
+ void* GetStartFuncParamater();
- std::size_t GetActiveCoreIndex() const {
- return active_core;
- }
+ void PreemptSingleCore(bool from_running_enviroment = true);
- void RunLoop(bool tight_loop);
+ std::size_t CurrentCore() const {
+ return current_core.load();
+ }
private:
- std::array<std::unique_ptr<CoreManager>, Hardware::NUM_CPU_CORES> core_managers;
- std::size_t active_core{}; ///< Active core, only used in single thread mode
+ static void GuestThreadFunction(void* cpu_manager);
+ static void GuestRewindFunction(void* cpu_manager);
+ static void IdleThreadFunction(void* cpu_manager);
+ static void SuspendThreadFunction(void* cpu_manager);
+
+ void MultiCoreRunGuestThread();
+ void MultiCoreRunGuestLoop();
+ void MultiCoreRunIdleThread();
+ void MultiCoreRunSuspendThread();
+ void MultiCorePause(bool paused);
+
+ void SingleCoreRunGuestThread();
+ void SingleCoreRunGuestLoop();
+ void SingleCoreRunIdleThread();
+ void SingleCoreRunSuspendThread();
+ void SingleCorePause(bool paused);
+
+ static void ThreadStart(CpuManager& cpu_manager, std::size_t core);
+
+ void RunThread(std::size_t core);
+
+ struct CoreData {
+ std::shared_ptr<Common::Fiber> host_context;
+ std::unique_ptr<Common::Event> enter_barrier;
+ std::unique_ptr<Common::Event> exit_barrier;
+ std::atomic<bool> is_running;
+ std::atomic<bool> is_paused;
+ std::atomic<bool> initialized;
+ std::unique_ptr<std::thread> host_thread;
+ };
+
+ std::atomic<bool> running_mode{};
+ std::atomic<bool> paused_state{};
+
+ std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};
+
+ bool is_async_gpu{};
+ bool is_multicore{};
+ std::atomic<std::size_t> current_core{};
+ std::size_t preemption_count{};
+ std::size_t idle_count{};
+ static constexpr std::size_t max_cycle_runs = 5;
System& system;
};
diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp
index 70c0f8b80..79f22a403 100644
--- a/src/core/gdbstub/gdbstub.cpp
+++ b/src/core/gdbstub/gdbstub.cpp
@@ -35,7 +35,6 @@
#include "common/swap.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
-#include "core/core_manager.h"
#include "core/gdbstub/gdbstub.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/process.h"
diff --git a/src/core/hardware_properties.h b/src/core/hardware_properties.h
index b04e046ed..456b41e1b 100644
--- a/src/core/hardware_properties.h
+++ b/src/core/hardware_properties.h
@@ -42,6 +42,10 @@ struct EmuThreadHandle {
constexpr u32 invalid_handle = 0xFFFFFFFF;
return {invalid_handle, invalid_handle};
}
+
+ bool IsInvalid() const {
+ return (*this) == InvalidHandle();
+ }
};
} // namespace Core
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 8475b698c..4d2a9b35d 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -7,11 +7,15 @@
#include "common/assert.h"
#include "common/common_types.h"
+#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -20,6 +24,7 @@ namespace Kernel {
// Wake up num_to_wake (or all) threads in a vector.
void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
s32 num_to_wake) {
+ auto& time_manager = system.Kernel().TimeManager();
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
// them all.
std::size_t last = waiting_threads.size();
@@ -29,12 +34,10 @@ void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& wai
// Signal the waiting threads.
for (std::size_t i = 0; i < last; i++) {
- ASSERT(waiting_threads[i]->GetStatus() == ThreadStatus::WaitArb);
- waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
+ waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
RemoveThread(waiting_threads[i]);
- waiting_threads[i]->SetArbiterWaitAddress(0);
+ waiting_threads[i]->WaitForArbitration(false);
waiting_threads[i]->ResumeFromWait();
- system.PrepareReschedule(waiting_threads[i]->GetProcessorID());
}
}
@@ -56,6 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v
}
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
+ SchedulerLock lock(system.Kernel());
const std::vector<std::shared_ptr<Thread>> waiting_threads =
GetThreadsWaitingOnAddress(address);
WakeThreads(waiting_threads, num_to_wake);
@@ -64,6 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
s32 num_to_wake) {
+ SchedulerLock lock(system.Kernel());
auto& memory = system.Memory();
// Ensure that we can write to the address.
@@ -71,16 +76,24 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32
return ERR_INVALID_ADDRESS_STATE;
}
- if (static_cast<s32>(memory.Read32(address)) != value) {
- return ERR_INVALID_STATE;
- }
+ const std::size_t current_core = system.CurrentCoreIndex();
+ auto& monitor = system.Monitor();
+ u32 current_value;
+ do {
+ current_value = monitor.ExclusiveRead32(current_core, address);
+
+ if (current_value != value) {
+ return ERR_INVALID_STATE;
+ }
+ current_value++;
+ } while (!monitor.ExclusiveWrite32(current_core, address, current_value));
- memory.Write32(address, static_cast<u32>(value + 1));
return SignalToAddressOnly(address, num_to_wake);
}
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
s32 num_to_wake) {
+ SchedulerLock lock(system.Kernel());
auto& memory = system.Memory();
// Ensure that we can write to the address.
@@ -92,29 +105,33 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
const std::vector<std::shared_ptr<Thread>> waiting_threads =
GetThreadsWaitingOnAddress(address);
- // Determine the modified value depending on the waiting count.
+ const std::size_t current_core = system.CurrentCoreIndex();
+ auto& monitor = system.Monitor();
s32 updated_value;
- if (num_to_wake <= 0) {
- if (waiting_threads.empty()) {
- updated_value = value + 1;
- } else {
- updated_value = value - 1;
+ do {
+ updated_value = monitor.ExclusiveRead32(current_core, address);
+
+ if (updated_value != value) {
+ return ERR_INVALID_STATE;
}
- } else {
- if (waiting_threads.empty()) {
- updated_value = value + 1;
- } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
- updated_value = value - 1;
+ // Determine the modified value depending on the waiting count.
+ if (num_to_wake <= 0) {
+ if (waiting_threads.empty()) {
+ updated_value = value + 1;
+ } else {
+ updated_value = value - 1;
+ }
} else {
- updated_value = value;
+ if (waiting_threads.empty()) {
+ updated_value = value + 1;
+ } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
+ updated_value = value - 1;
+ } else {
+ updated_value = value;
+ }
}
- }
+ } while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
- if (static_cast<s32>(memory.Read32(address)) != value) {
- return ERR_INVALID_STATE;
- }
-
- memory.Write32(address, static_cast<u32>(updated_value));
WakeThreads(waiting_threads, num_to_wake);
return RESULT_SUCCESS;
}
@@ -136,60 +153,127 @@ ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s
ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
bool should_decrement) {
auto& memory = system.Memory();
+ auto& kernel = system.Kernel();
+ Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
- // Ensure that we can read the address.
- if (!memory.IsValidVirtualAddress(address)) {
- return ERR_INVALID_ADDRESS_STATE;
- }
+ Handle event_handle = InvalidHandle;
+ {
+ SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
+
+ if (current_thread->IsPendingTermination()) {
+ lock.CancelSleep();
+ return ERR_THREAD_TERMINATING;
+ }
+
+ // Ensure that we can read the address.
+ if (!memory.IsValidVirtualAddress(address)) {
+ lock.CancelSleep();
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ s32 current_value = static_cast<s32>(memory.Read32(address));
+ if (current_value >= value) {
+ lock.CancelSleep();
+ return ERR_INVALID_STATE;
+ }
+
+ current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
+
+ s32 decrement_value;
+
+ const std::size_t current_core = system.CurrentCoreIndex();
+ auto& monitor = system.Monitor();
+ do {
+ current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
+ if (should_decrement) {
+ decrement_value = current_value - 1;
+ } else {
+ decrement_value = current_value;
+ }
+ } while (
+ !monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
+
+ // Short-circuit without rescheduling, if timeout is zero.
+ if (timeout == 0) {
+ lock.CancelSleep();
+ return RESULT_TIMEOUT;
+ }
- const s32 cur_value = static_cast<s32>(memory.Read32(address));
- if (cur_value >= value) {
- return ERR_INVALID_STATE;
+ current_thread->SetArbiterWaitAddress(address);
+ InsertThread(SharedFrom(current_thread));
+ current_thread->SetStatus(ThreadStatus::WaitArb);
+ current_thread->WaitForArbitration(true);
}
- if (should_decrement) {
- memory.Write32(address, static_cast<u32>(cur_value - 1));
+ if (event_handle != InvalidHandle) {
+ auto& time_manager = kernel.TimeManager();
+ time_manager.UnscheduleTimeEvent(event_handle);
}
- // Short-circuit without rescheduling, if timeout is zero.
- if (timeout == 0) {
- return RESULT_TIMEOUT;
+ {
+ SchedulerLock lock(kernel);
+ if (current_thread->IsWaitingForArbitration()) {
+ RemoveThread(SharedFrom(current_thread));
+ current_thread->WaitForArbitration(false);
+ }
}
- return WaitForAddressImpl(address, timeout);
+ return current_thread->GetSignalingResult();
}
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
auto& memory = system.Memory();
+ auto& kernel = system.Kernel();
+ Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
- // Ensure that we can read the address.
- if (!memory.IsValidVirtualAddress(address)) {
- return ERR_INVALID_ADDRESS_STATE;
- }
+ Handle event_handle = InvalidHandle;
+ {
+ SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
+
+ if (current_thread->IsPendingTermination()) {
+ lock.CancelSleep();
+ return ERR_THREAD_TERMINATING;
+ }
+
+ // Ensure that we can read the address.
+ if (!memory.IsValidVirtualAddress(address)) {
+ lock.CancelSleep();
+ return ERR_INVALID_ADDRESS_STATE;
+ }
- // Only wait for the address if equal.
- if (static_cast<s32>(memory.Read32(address)) != value) {
- return ERR_INVALID_STATE;
+ s32 current_value = static_cast<s32>(memory.Read32(address));
+ if (current_value != value) {
+ lock.CancelSleep();
+ return ERR_INVALID_STATE;
+ }
+
+ // Short-circuit without rescheduling, if timeout is zero.
+ if (timeout == 0) {
+ lock.CancelSleep();
+ return RESULT_TIMEOUT;
+ }
+
+ current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
+ current_thread->SetArbiterWaitAddress(address);
+ InsertThread(SharedFrom(current_thread));
+ current_thread->SetStatus(ThreadStatus::WaitArb);
+ current_thread->WaitForArbitration(true);
}
- // Short-circuit without rescheduling if timeout is zero.
- if (timeout == 0) {
- return RESULT_TIMEOUT;
+ if (event_handle != InvalidHandle) {
+ auto& time_manager = kernel.TimeManager();
+ time_manager.UnscheduleTimeEvent(event_handle);
}
- return WaitForAddressImpl(address, timeout);
-}
+ {
+ SchedulerLock lock(kernel);
+ if (current_thread->IsWaitingForArbitration()) {
+ RemoveThread(SharedFrom(current_thread));
+ current_thread->WaitForArbitration(false);
+ }
+ }
-ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) {
- Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
- current_thread->SetArbiterWaitAddress(address);
- InsertThread(SharedFrom(current_thread));
- current_thread->SetStatus(ThreadStatus::WaitArb);
- current_thread->InvalidateWakeupCallback();
- current_thread->WakeAfterDelay(timeout);
-
- system.PrepareReschedule(current_thread->GetProcessorID());
- return RESULT_TIMEOUT;
+ return current_thread->GetSignalingResult();
}
void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) {
@@ -221,9 +305,9 @@ void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
[&thread](const auto& entry) { return thread == entry; });
- ASSERT(iter != thread_list.cend());
-
- thread_list.erase(iter);
+ if (iter != thread_list.cend()) {
+ thread_list.erase(iter);
+ }
}
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h
index f958eee5a..0b05d533c 100644
--- a/src/core/hle/kernel/address_arbiter.h
+++ b/src/core/hle/kernel/address_arbiter.h
@@ -73,9 +73,6 @@ private:
/// Waits on an address if the value passed is equal to the argument value.
ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
- // Waits on the given address with a timeout in nanoseconds
- ResultCode WaitForAddressImpl(VAddr address, s64 timeout);
-
/// Wake up num_to_wake (or all) threads in a vector.
void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp
index 5498fd313..8aff2227a 100644
--- a/src/core/hle/kernel/client_port.cpp
+++ b/src/core/hle/kernel/client_port.cpp
@@ -34,7 +34,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
}
// Wake the threads waiting on the ServerPort
- server_port->WakeupAllWaitingThreads();
+ server_port->Signal();
return MakeResult(std::move(client));
}
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
index 29bfa3621..d4e5d88cf 100644
--- a/src/core/hle/kernel/errors.h
+++ b/src/core/hle/kernel/errors.h
@@ -12,6 +12,7 @@ namespace Kernel {
constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
+constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 0d01a7047..9277b5d08 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -14,14 +14,17 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/hle/ipc_helpers.h"
+#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/readable_event.h"
+#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/time_manager.h"
#include "core/hle/kernel/writable_event.h"
#include "core/memory.h"
@@ -46,15 +49,6 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
const std::string& reason, u64 timeout, WakeupCallback&& callback,
std::shared_ptr<WritableEvent> writable_event) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
- thread->SetWakeupCallback(
- [context = *this, callback](ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
- std::shared_ptr<SynchronizationObject> object,
- std::size_t index) mutable -> bool {
- ASSERT(thread->GetStatus() == ThreadStatus::WaitHLEEvent);
- callback(thread, context, reason);
- context.WriteToOutgoingCommandBuffer(*thread);
- return true;
- });
if (!writable_event) {
// Create event if not provided
@@ -62,14 +56,26 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
writable_event = pair.writable;
}
- const auto readable_event{writable_event->GetReadableEvent()};
- writable_event->Clear();
- thread->SetStatus(ThreadStatus::WaitHLEEvent);
- thread->SetSynchronizationObjects({readable_event});
- readable_event->AddWaitingThread(thread);
-
- if (timeout > 0) {
- thread->WakeAfterDelay(timeout);
+ {
+ Handle event_handle = InvalidHandle;
+ SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
+ thread->SetHLECallback(
+ [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
+ ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
+ ? ThreadWakeupReason::Timeout
+ : ThreadWakeupReason::Signal;
+ callback(thread, context, reason);
+ context.WriteToOutgoingCommandBuffer(*thread);
+ return true;
+ });
+ const auto readable_event{writable_event->GetReadableEvent()};
+ writable_event->Clear();
+ thread->SetHLESyncObject(readable_event.get());
+ thread->SetStatus(ThreadStatus::WaitHLEEvent);
+ thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
+ readable_event->AddWaitingThread(thread);
+ lock.Release();
+ thread->SetHLETimeEvent(event_handle);
}
is_thread_waiting = true;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 7655382fa..1f2af7a1b 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <array>
#include <atomic>
#include <bitset>
#include <functional>
@@ -13,11 +14,15 @@
#include "common/assert.h"
#include "common/logging/log.h"
+#include "common/microprofile.h"
+#include "common/thread.h"
#include "core/arm/arm_interface.h"
+#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
+#include "core/cpu_manager.h"
#include "core/device_memory.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/client_port.h"
@@ -39,85 +44,28 @@
#include "core/hle/result.h"
#include "core/memory.h"
-namespace Kernel {
-
-/**
- * Callback that will wake up the thread it was scheduled for
- * @param thread_handle The handle of the thread that's been awoken
- * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
- */
-static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
- const auto proper_handle = static_cast<Handle>(thread_handle);
- const auto& system = Core::System::GetInstance();
-
- // Lock the global kernel mutex when we enter the kernel HLE.
- std::lock_guard lock{HLE::g_hle_lock};
-
- std::shared_ptr<Thread> thread =
- system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
- if (thread == nullptr) {
- LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
- return;
- }
-
- bool resume = true;
-
- if (thread->GetStatus() == ThreadStatus::WaitSynch ||
- thread->GetStatus() == ThreadStatus::WaitHLEEvent) {
- // Remove the thread from each of its waiting objects' waitlists
- for (const auto& object : thread->GetSynchronizationObjects()) {
- object->RemoveWaitingThread(thread);
- }
- thread->ClearSynchronizationObjects();
-
- // Invoke the wakeup callback before clearing the wait objects
- if (thread->HasWakeupCallback()) {
- resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
- }
- } else if (thread->GetStatus() == ThreadStatus::WaitMutex ||
- thread->GetStatus() == ThreadStatus::WaitCondVar) {
- thread->SetMutexWaitAddress(0);
- thread->SetWaitHandle(0);
- if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
- thread->GetOwnerProcess()->RemoveConditionVariableThread(thread);
- thread->SetCondVarWaitAddress(0);
- }
-
- auto* const lock_owner = thread->GetLockOwner();
- // Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance
- // and don't have a lock owner unless SignalProcessWideKey was called first and the thread
- // wasn't awakened due to the mutex already being acquired.
- if (lock_owner != nullptr) {
- lock_owner->RemoveMutexWaiter(thread);
- }
- }
+MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
- if (thread->GetStatus() == ThreadStatus::WaitArb) {
- auto& address_arbiter = thread->GetOwnerProcess()->GetAddressArbiter();
- address_arbiter.HandleWakeupThread(thread);
- }
-
- if (resume) {
- if (thread->GetStatus() == ThreadStatus::WaitCondVar ||
- thread->GetStatus() == ThreadStatus::WaitArb) {
- thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
- }
- thread->ResumeFromWait();
- }
-}
+namespace Kernel {
struct KernelCore::Impl {
explicit Impl(Core::System& system, KernelCore& kernel)
: global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {}
+ void SetMulticore(bool is_multicore) {
+ this->is_multicore = is_multicore;
+ }
+
void Initialize(KernelCore& kernel) {
Shutdown();
+ RegisterHostThread();
InitializePhysicalCores();
InitializeSystemResourceLimit(kernel);
InitializeMemoryLayout();
- InitializeThreads();
- InitializePreemption();
+ InitializePreemption(kernel);
+ InitializeSchedulers();
+ InitializeSuspendThreads();
}
void Shutdown() {
@@ -126,13 +74,26 @@ struct KernelCore::Impl {
next_user_process_id = Process::ProcessIDMin;
next_thread_id = 1;
+ for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ if (suspend_threads[i]) {
+ suspend_threads[i].reset();
+ }
+ }
+
+ for (std::size_t i = 0; i < cores.size(); i++) {
+ cores[i].Shutdown();
+ schedulers[i].reset();
+ }
+ cores.clear();
+
+ registered_core_threads.reset();
+
process_list.clear();
current_process = nullptr;
system_resource_limit = nullptr;
global_handle_table.Clear();
- thread_wakeup_event_type = nullptr;
preemption_event = nullptr;
global_scheduler.Shutdown();
@@ -145,13 +106,21 @@ struct KernelCore::Impl {
cores.clear();
exclusive_monitor.reset();
+ host_thread_ids.clear();
}
void InitializePhysicalCores() {
exclusive_monitor =
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- cores.emplace_back(system, i, *exclusive_monitor);
+ schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i);
+ cores.emplace_back(system, i, *schedulers[i], interrupts[i]);
+ }
+ }
+
+ void InitializeSchedulers() {
+ for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ cores[i].Scheduler().Initialize();
}
}
@@ -173,15 +142,13 @@ struct KernelCore::Impl {
}
}
- void InitializeThreads() {
- thread_wakeup_event_type =
- Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback);
- }
-
- void InitializePreemption() {
- preemption_event =
- Core::Timing::CreateEvent("PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
- global_scheduler.PreemptThreads();
+ void InitializePreemption(KernelCore& kernel) {
+ preemption_event = Core::Timing::CreateEvent(
+ "PreemptionCallback", [this, &kernel](u64 userdata, s64 cycles_late) {
+ {
+ SchedulerLock lock(kernel);
+ global_scheduler.PreemptThreads();
+ }
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
});
@@ -190,6 +157,20 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
}
+ void InitializeSuspendThreads() {
+ for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ std::string name = "Suspend Thread Id:" + std::to_string(i);
+ std::function<void(void*)> init_func =
+ system.GetCpuManager().GetSuspendThreadStartFunc();
+ void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
+ ThreadType type =
+ static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
+ auto thread_res = Thread::Create(system, type, name, 0, 0, 0, static_cast<u32>(i), 0,
+ nullptr, std::move(init_func), init_func_parameter);
+ suspend_threads[i] = std::move(thread_res).Unwrap();
+ }
+ }
+
void MakeCurrentProcess(Process* process) {
current_process = process;
@@ -197,15 +178,17 @@ struct KernelCore::Impl {
return;
}
- for (auto& core : cores) {
- core.SetIs64Bit(process->Is64BitProcess());
+ u32 core_id = GetCurrentHostThreadID();
+ if (core_id < Core::Hardware::NUM_CPU_CORES) {
+ system.Memory().SetCurrentPageTable(*process, core_id);
}
-
- system.Memory().SetCurrentPageTable(*process);
}
void RegisterCoreThread(std::size_t core_id) {
std::unique_lock lock{register_thread_mutex};
+ if (!is_multicore) {
+ single_core_thread_id = std::this_thread::get_id();
+ }
const std::thread::id this_id = std::this_thread::get_id();
const auto it = host_thread_ids.find(this_id);
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
@@ -219,12 +202,19 @@ struct KernelCore::Impl {
std::unique_lock lock{register_thread_mutex};
const std::thread::id this_id = std::this_thread::get_id();
const auto it = host_thread_ids.find(this_id);
- ASSERT(it == host_thread_ids.end());
+ if (it != host_thread_ids.end()) {
+ return;
+ }
host_thread_ids[this_id] = registered_thread_ids++;
}
u32 GetCurrentHostThreadID() const {
const std::thread::id this_id = std::this_thread::get_id();
+ if (!is_multicore) {
+ if (single_core_thread_id == this_id) {
+ return static_cast<u32>(system.GetCpuManager().CurrentCore());
+ }
+ }
const auto it = host_thread_ids.find(this_id);
if (it == host_thread_ids.end()) {
return Core::INVALID_HOST_THREAD_ID;
@@ -240,7 +230,7 @@ struct KernelCore::Impl {
}
const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler();
const Kernel::Thread* current = sched.GetCurrentThread();
- if (current != nullptr) {
+ if (current != nullptr && !current->IsPhantomMode()) {
result.guest_handle = current->GetGlobalHandle();
} else {
result.guest_handle = InvalidHandle;
@@ -313,7 +303,6 @@ struct KernelCore::Impl {
std::shared_ptr<ResourceLimit> system_resource_limit;
- std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type;
std::shared_ptr<Core::Timing::EventType> preemption_event;
// This is the kernel's handle table or supervisor handle table which
@@ -343,6 +332,15 @@ struct KernelCore::Impl {
std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
+ std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
+ std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
+ std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
+
+ bool is_multicore{};
+ std::thread::id single_core_thread_id{};
+
+ std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
+
// System context
Core::System& system;
};
@@ -352,6 +350,10 @@ KernelCore::~KernelCore() {
Shutdown();
}
+void KernelCore::SetMulticore(bool is_multicore) {
+ impl->SetMulticore(is_multicore);
+}
+
void KernelCore::Initialize() {
impl->Initialize(*this);
}
@@ -397,11 +399,11 @@ const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
}
Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) {
- return impl->cores[id].Scheduler();
+ return *impl->schedulers[id];
}
const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const {
- return impl->cores[id].Scheduler();
+ return *impl->schedulers[id];
}
Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) {
@@ -412,6 +414,39 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
return impl->cores[id];
}
+Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
+ u32 core_id = impl->GetCurrentHostThreadID();
+ ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
+ return impl->cores[core_id];
+}
+
+const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
+ u32 core_id = impl->GetCurrentHostThreadID();
+ ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
+ return impl->cores[core_id];
+}
+
+Kernel::Scheduler& KernelCore::CurrentScheduler() {
+ u32 core_id = impl->GetCurrentHostThreadID();
+ ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
+ return *impl->schedulers[core_id];
+}
+
+const Kernel::Scheduler& KernelCore::CurrentScheduler() const {
+ u32 core_id = impl->GetCurrentHostThreadID();
+ ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
+ return *impl->schedulers[core_id];
+}
+
+std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
+ return impl->interrupts;
+}
+
+const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts()
+ const {
+ return impl->interrupts;
+}
+
Kernel::Synchronization& KernelCore::Synchronization() {
return impl->synchronization;
}
@@ -437,15 +472,17 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
}
void KernelCore::InvalidateAllInstructionCaches() {
- for (std::size_t i = 0; i < impl->global_scheduler.CpuCoresCount(); i++) {
- PhysicalCore(i).ArmInterface().ClearInstructionCache();
+ auto& threads = GlobalScheduler().GetThreadList();
+ for (auto& thread : threads) {
+ if (!thread->IsHLEThread()) {
+ auto& arm_interface = thread->ArmInterface();
+ arm_interface.ClearInstructionCache();
+ }
}
}
void KernelCore::PrepareReschedule(std::size_t id) {
- if (id < impl->global_scheduler.CpuCoresCount()) {
- impl->cores[id].Stop();
- }
+ // TODO: Reimplement, this
}
void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
@@ -481,10 +518,6 @@ u64 KernelCore::CreateNewUserProcessID() {
return impl->next_user_process_id++;
}
-const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const {
- return impl->thread_wakeup_event_type;
-}
-
Kernel::HandleTable& KernelCore::GlobalHandleTable() {
return impl->global_handle_table;
}
@@ -557,4 +590,34 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
return *impl->time_shared_mem;
}
+void KernelCore::Suspend(bool in_suspention) {
+ const bool should_suspend = exception_exited || in_suspention;
+ {
+ SchedulerLock lock(*this);
+ ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
+ for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ impl->suspend_threads[i]->SetStatus(status);
+ }
+ }
+}
+
+bool KernelCore::IsMulticore() const {
+ return impl->is_multicore;
+}
+
+void KernelCore::ExceptionalExit() {
+ exception_exited = true;
+ Suspend(true);
+}
+
+void KernelCore::EnterSVCProfile() {
+ std::size_t core = impl->GetCurrentHostThreadID();
+ impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
+}
+
+void KernelCore::ExitSVCProfile() {
+ std::size_t core = impl->GetCurrentHostThreadID();
+ MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 83de1f542..49bd47e89 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -4,15 +4,17 @@
#pragma once
+#include <array>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
+#include "core/hardware_properties.h"
#include "core/hle/kernel/memory/memory_types.h"
#include "core/hle/kernel/object.h"
namespace Core {
-struct EmuThreadHandle;
+class CPUInterruptHandler;
class ExclusiveMonitor;
class System;
} // namespace Core
@@ -65,6 +67,9 @@ public:
KernelCore(KernelCore&&) = delete;
KernelCore& operator=(KernelCore&&) = delete;
+ /// Sets if emulation is multicore or single core, must be set before Initialize
+ void SetMulticore(bool is_multicore);
+
/// Resets the kernel to a clean slate for use.
void Initialize();
@@ -110,6 +115,18 @@ public:
/// Gets the an instance of the respective physical CPU core.
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
+ /// Gets the sole instance of the Scheduler at the current running core.
+ Kernel::Scheduler& CurrentScheduler();
+
+ /// Gets the sole instance of the Scheduler at the current running core.
+ const Kernel::Scheduler& CurrentScheduler() const;
+
+ /// Gets the an instance of the current physical CPU core.
+ Kernel::PhysicalCore& CurrentPhysicalCore();
+
+ /// Gets the an instance of the current physical CPU core.
+ const Kernel::PhysicalCore& CurrentPhysicalCore() const;
+
/// Gets the an instance of the Synchronization Interface.
Kernel::Synchronization& Synchronization();
@@ -129,6 +146,10 @@ public:
const Core::ExclusiveMonitor& GetExclusiveMonitor() const;
+ std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts();
+
+ const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
+
void InvalidateAllInstructionCaches();
/// Adds a port to the named port table
@@ -191,6 +212,18 @@ public:
/// Gets the shared memory object for Time services.
const Kernel::SharedMemory& GetTimeSharedMem() const;
+ /// Suspend/unsuspend the OS.
+ void Suspend(bool in_suspention);
+
+ /// Exceptional exit the OS.
+ void ExceptionalExit();
+
+ bool IsMulticore() const;
+
+ void EnterSVCProfile();
+
+ void ExitSVCProfile();
+
private:
friend class Object;
friend class Process;
@@ -208,9 +241,6 @@ private:
/// Creates a new thread ID, incrementing the internal thread ID counter.
u64 CreateNewThreadID();
- /// Retrieves the event type used for thread wakeup callbacks.
- const std::shared_ptr<Core::Timing::EventType>& ThreadWakeupCallbackEventType() const;
-
/// Provides a reference to the global handle table.
Kernel::HandleTable& GlobalHandleTable();
@@ -219,6 +249,7 @@ private:
struct Impl;
std::unique_ptr<Impl> impl;
+ bool exception_exited{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 7869eb32b..8f6c944d1 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -34,8 +34,6 @@ static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThr
if (thread->GetMutexWaitAddress() != mutex_addr)
continue;
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
-
++num_waiters;
if (highest_priority_thread == nullptr ||
thread->GetPriority() < highest_priority_thread->GetPriority()) {
@@ -49,6 +47,7 @@ static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThr
/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
std::shared_ptr<Thread> new_owner) {
+ current_thread->RemoveMutexWaiter(new_owner);
const auto threads = current_thread->GetMutexWaitingThreads();
for (const auto& thread : threads) {
if (thread->GetMutexWaitAddress() != mutex_addr)
@@ -72,85 +71,100 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
return ERR_INVALID_ADDRESS;
}
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ auto& kernel = system.Kernel();
std::shared_ptr<Thread> current_thread =
- SharedFrom(system.CurrentScheduler().GetCurrentThread());
- std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
- std::shared_ptr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle);
+ SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
+ {
+ SchedulerLock lock(kernel);
+ // The mutex address must be 4-byte aligned
+ if ((address % sizeof(u32)) != 0) {
+ return ERR_INVALID_ADDRESS;
+ }
- // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another
- // thread.
- ASSERT(requesting_thread == current_thread);
+ const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+ std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
+ std::shared_ptr<Thread> requesting_thread =
+ handle_table.Get<Thread>(requesting_thread_handle);
- const u32 addr_value = system.Memory().Read32(address);
+ // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
+ // another thread.
+ ASSERT(requesting_thread == current_thread);
- // If the mutex isn't being held, just return success.
- if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
- return RESULT_SUCCESS;
- }
+ current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
- if (holding_thread == nullptr) {
- LOG_ERROR(Kernel, "Holding thread does not exist! thread_handle={:08X}",
- holding_thread_handle);
- return ERR_INVALID_HANDLE;
- }
+ const u32 addr_value = system.Memory().Read32(address);
+
+ // If the mutex isn't being held, just return success.
+ if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
+ return RESULT_SUCCESS;
+ }
- // Wait until the mutex is released
- current_thread->SetMutexWaitAddress(address);
- current_thread->SetWaitHandle(requesting_thread_handle);
+ if (holding_thread == nullptr) {
+ return ERR_INVALID_HANDLE;
+ }
- current_thread->SetStatus(ThreadStatus::WaitMutex);
- current_thread->InvalidateWakeupCallback();
+ // Wait until the mutex is released
+ current_thread->SetMutexWaitAddress(address);
+ current_thread->SetWaitHandle(requesting_thread_handle);
- // Update the lock holder thread's priority to prevent priority inversion.
- holding_thread->AddMutexWaiter(current_thread);
+ current_thread->SetStatus(ThreadStatus::WaitMutex);
- system.PrepareReschedule();
+ // Update the lock holder thread's priority to prevent priority inversion.
+ holding_thread->AddMutexWaiter(current_thread);
+ }
- return RESULT_SUCCESS;
+ {
+ SchedulerLock lock(kernel);
+ auto* owner = current_thread->GetLockOwner();
+ if (owner != nullptr) {
+ owner->RemoveMutexWaiter(current_thread);
+ }
+ }
+ return current_thread->GetSignalingResult();
}
-ResultCode Mutex::Release(VAddr address) {
+std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
+ VAddr address) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
- return ERR_INVALID_ADDRESS;
+ return {ERR_INVALID_ADDRESS, nullptr};
}
- std::shared_ptr<Thread> current_thread =
- SharedFrom(system.CurrentScheduler().GetCurrentThread());
- auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address);
-
- // There are no more threads waiting for the mutex, release it completely.
- if (thread == nullptr) {
+ auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
+ if (new_owner == nullptr) {
system.Memory().Write32(address, 0);
- return RESULT_SUCCESS;
+ return {RESULT_SUCCESS, nullptr};
}
-
// Transfer the ownership of the mutex from the previous owner to the new one.
- TransferMutexOwnership(address, current_thread, thread);
-
- u32 mutex_value = thread->GetWaitHandle();
-
+ TransferMutexOwnership(address, owner, new_owner);
+ u32 mutex_value = new_owner->GetWaitHandle();
if (num_waiters >= 2) {
// Notify the guest that there are still some threads waiting for the mutex
mutex_value |= Mutex::MutexHasWaitersFlag;
}
+ new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
+ new_owner->SetLockOwner(nullptr);
+ new_owner->ResumeFromWait();
- // Grant the mutex to the next waiting thread and resume it.
system.Memory().Write32(address, mutex_value);
+ return {RESULT_SUCCESS, new_owner};
+}
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
- thread->ResumeFromWait();
+ResultCode Mutex::Release(VAddr address) {
+ auto& kernel = system.Kernel();
+ SchedulerLock lock(kernel);
- thread->SetLockOwner(nullptr);
- thread->SetCondVarWaitAddress(0);
- thread->SetMutexWaitAddress(0);
- thread->SetWaitHandle(0);
- thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
+ std::shared_ptr<Thread> current_thread =
+ SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
- system.PrepareReschedule();
+ auto [result, new_owner] = Unlock(current_thread, address);
- return RESULT_SUCCESS;
+ if (result != RESULT_SUCCESS && new_owner != nullptr) {
+ new_owner->SetSynchronizationResults(nullptr, result);
+ }
+
+ return result;
}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
index b904de2e8..3b81dc3df 100644
--- a/src/core/hle/kernel/mutex.h
+++ b/src/core/hle/kernel/mutex.h
@@ -28,6 +28,10 @@ public:
ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
Handle requesting_thread_handle);
+ /// Unlocks a mutex for owner at address
+ std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
+ VAddr address);
+
/// Releases the mutex at the specified address.
ResultCode Release(VAddr address);
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index a15011076..c6bbdb080 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -2,12 +2,15 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "common/assert.h"
#include "common/logging/log.h"
+#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
#ifdef ARCHITECTURE_x86_64
#include "core/arm/dynarmic/arm_dynarmic_32.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
#endif
+#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/exclusive_monitor.h"
#include "core/arm/unicorn/arm_unicorn.h"
#include "core/core.h"
@@ -17,50 +20,37 @@
namespace Kernel {
-PhysicalCore::PhysicalCore(Core::System& system, std::size_t id,
- Core::ExclusiveMonitor& exclusive_monitor)
- : core_index{id} {
-#ifdef ARCHITECTURE_x86_64
- arm_interface_32 =
- std::make_unique<Core::ARM_Dynarmic_32>(system, exclusive_monitor, core_index);
- arm_interface_64 =
- std::make_unique<Core::ARM_Dynarmic_64>(system, exclusive_monitor, core_index);
-
-#else
- using Core::ARM_Unicorn;
- arm_interface_32 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch32);
- arm_interface_64 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch64);
- LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
-#endif
+PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler,
+ Core::CPUInterruptHandler& interrupt_handler)
+ : interrupt_handler{interrupt_handler}, core_index{id}, scheduler{scheduler} {
- scheduler = std::make_unique<Kernel::Scheduler>(system, core_index);
+ guard = std::make_unique<Common::SpinLock>();
}
PhysicalCore::~PhysicalCore() = default;
-void PhysicalCore::Run() {
- arm_interface->Run();
- arm_interface->ClearExclusiveState();
+void PhysicalCore::Idle() {
+ interrupt_handler.AwaitInterrupt();
}
-void PhysicalCore::Step() {
- arm_interface->Step();
+void PhysicalCore::Shutdown() {
+ scheduler.Shutdown();
}
-void PhysicalCore::Stop() {
- arm_interface->PrepareReschedule();
+bool PhysicalCore::IsInterrupted() const {
+ return interrupt_handler.IsInterrupted();
}
-void PhysicalCore::Shutdown() {
- scheduler->Shutdown();
+void PhysicalCore::Interrupt() {
+ guard->lock();
+ interrupt_handler.SetInterrupt(true);
+ guard->unlock();
}
-void PhysicalCore::SetIs64Bit(bool is_64_bit) {
- if (is_64_bit) {
- arm_interface = arm_interface_64.get();
- } else {
- arm_interface = arm_interface_32.get();
- }
+void PhysicalCore::ClearInterrupt() {
+ guard->lock();
+ interrupt_handler.SetInterrupt(false);
+ guard->unlock();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 3269166be..d7a7a951c 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -7,12 +7,17 @@
#include <cstddef>
#include <memory>
+namespace Common {
+class SpinLock;
+}
+
namespace Kernel {
class Scheduler;
} // namespace Kernel
namespace Core {
class ARM_Interface;
+class CPUInterruptHandler;
class ExclusiveMonitor;
class System;
} // namespace Core
@@ -21,7 +26,8 @@ namespace Kernel {
class PhysicalCore {
public:
- PhysicalCore(Core::System& system, std::size_t id, Core::ExclusiveMonitor& exclusive_monitor);
+ PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler,
+ Core::CPUInterruptHandler& interrupt_handler);
~PhysicalCore();
PhysicalCore(const PhysicalCore&) = delete;
@@ -30,23 +36,18 @@ public:
PhysicalCore(PhysicalCore&&) = default;
PhysicalCore& operator=(PhysicalCore&&) = default;
- /// Execute current jit state
- void Run();
- /// Execute a single instruction in current jit.
- void Step();
- /// Stop JIT execution/exit
- void Stop();
+ void Idle();
+ /// Interrupt this physical core.
+ void Interrupt();
- // Shutdown this physical core.
- void Shutdown();
+ /// Clear this core's interrupt
+ void ClearInterrupt();
- Core::ARM_Interface& ArmInterface() {
- return *arm_interface;
- }
+ /// Check if this core is interrupted
+ bool IsInterrupted() const;
- const Core::ARM_Interface& ArmInterface() const {
- return *arm_interface;
- }
+ // Shutdown this physical core.
+ void Shutdown();
bool IsMainCore() const {
return core_index == 0;
@@ -61,21 +62,18 @@ public:
}
Kernel::Scheduler& Scheduler() {
- return *scheduler;
+ return scheduler;
}
const Kernel::Scheduler& Scheduler() const {
- return *scheduler;
+ return scheduler;
}
- void SetIs64Bit(bool is_64_bit);
-
private:
+ Core::CPUInterruptHandler& interrupt_handler;
std::size_t core_index;
- std::unique_ptr<Core::ARM_Interface> arm_interface_32;
- std::unique_ptr<Core::ARM_Interface> arm_interface_64;
- std::unique_ptr<Kernel::Scheduler> scheduler;
- Core::ARM_Interface* arm_interface{};
+ Kernel::Scheduler& scheduler;
+ std::unique_ptr<Common::SpinLock> guard;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index c4c5199b1..f9d7c024d 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -22,6 +22,7 @@
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/lock.h"
#include "core/memory.h"
#include "core/settings.h"
@@ -30,14 +31,15 @@ namespace {
/**
* Sets up the primary application thread
*
+ * @param system The system instance to create the main thread under.
* @param owner_process The parent process for the main thread
- * @param kernel The kernel instance to create the main thread under.
* @param priority The priority to give the main thread
*/
-void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, VAddr stack_top) {
+void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
- auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0,
- owner_process.GetIdealCore(), stack_top, owner_process);
+ ThreadType type = THREADTYPE_USER;
+ auto thread_res = Thread::Create(system, type, "main", entry_point, priority, 0,
+ owner_process.GetIdealCore(), stack_top, &owner_process);
std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap();
@@ -48,8 +50,12 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, V
thread->GetContext32().cpu_registers[1] = thread_handle;
thread->GetContext64().cpu_registers[1] = thread_handle;
+ auto& kernel = system.Kernel();
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
- thread->ResumeFromWait();
+ {
+ SchedulerLock lock{kernel};
+ thread->SetStatus(ThreadStatus::Ready);
+ }
}
} // Anonymous namespace
@@ -182,7 +188,6 @@ void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
}
++it;
}
- UNREACHABLE();
}
std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
@@ -207,6 +212,7 @@ void Process::UnregisterThread(const Thread* thread) {
}
ResultCode Process::ClearSignalState() {
+ SchedulerLock lock(system.Kernel());
if (status == ProcessStatus::Exited) {
LOG_ERROR(Kernel, "called on a terminated process instance.");
return ERR_INVALID_STATE;
@@ -294,7 +300,7 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
ChangeStatus(ProcessStatus::Running);
- SetupMainThread(*this, kernel, main_thread_priority, main_thread_stack_top);
+ SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
resource_limit->Reserve(ResourceType::Threads, 1);
resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size);
}
@@ -340,6 +346,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
}
VAddr Process::CreateTLSRegion() {
+ SchedulerLock lock(system.Kernel());
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
tls_page_iter != tls_pages.cend()) {
return *tls_page_iter->ReserveSlot();
@@ -370,6 +377,7 @@ VAddr Process::CreateTLSRegion() {
}
void Process::FreeTLSRegion(VAddr tls_address) {
+ SchedulerLock lock(system.Kernel());
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
auto iter =
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
@@ -384,6 +392,7 @@ void Process::FreeTLSRegion(VAddr tls_address) {
}
void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
+ std::lock_guard lock{HLE::g_hle_lock};
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
Memory::MemoryPermission permission) {
page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index ef5e19e63..6e286419e 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -6,8 +6,10 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/readable_event.h"
+#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
@@ -37,6 +39,7 @@ void ReadableEvent::Clear() {
}
ResultCode ReadableEvent::Reset() {
+ SchedulerLock lock(kernel);
if (!is_signaled) {
LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
GetObjectId(), GetTypeName(), GetName());
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 1140c72a3..2b12c0dbf 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -11,11 +11,15 @@
#include <utility>
#include "common/assert.h"
+#include "common/bit_util.h"
+#include "common/fiber.h"
#include "common/logging/log.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
#include "core/core_timing.h"
+#include "core/cpu_manager.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/time_manager.h"
@@ -27,103 +31,151 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
GlobalScheduler::~GlobalScheduler() = default;
void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
+ global_list_guard.lock();
thread_list.push_back(std::move(thread));
+ global_list_guard.unlock();
}
void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
+ global_list_guard.lock();
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());
+ global_list_guard.unlock();
}
-void GlobalScheduler::UnloadThread(std::size_t core) {
- Scheduler& sched = kernel.Scheduler(core);
- sched.UnloadThread();
-}
-
-void GlobalScheduler::SelectThread(std::size_t core) {
+u32 GlobalScheduler::SelectThreads() {
+ ASSERT(is_locked);
const auto update_thread = [](Thread* thread, Scheduler& sched) {
- if (thread != sched.selected_thread.get()) {
+ sched.guard.lock();
+ if (thread != sched.selected_thread_set.get()) {
if (thread == nullptr) {
++sched.idle_selection_count;
}
- sched.selected_thread = SharedFrom(thread);
+ sched.selected_thread_set = SharedFrom(thread);
}
- sched.is_context_switch_pending = sched.selected_thread != sched.current_thread;
+ const bool reschedule_pending =
+ sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
+ sched.is_context_switch_pending = reschedule_pending;
std::atomic_thread_fence(std::memory_order_seq_cst);
+ sched.guard.unlock();
+ return reschedule_pending;
};
- Scheduler& sched = kernel.Scheduler(core);
- Thread* current_thread = nullptr;
- // Step 1: Get top thread in schedule queue.
- current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
- if (current_thread) {
- update_thread(current_thread, sched);
- return;
+ if (!is_reselection_pending.load()) {
+ return 0;
}
- // Step 2: Try selecting a suggested thread.
- Thread* winner = nullptr;
- std::set<s32> sug_cores;
- for (auto thread : suggested_queue[core]) {
- s32 this_core = thread->GetProcessorID();
- Thread* thread_on_core = nullptr;
- if (this_core >= 0) {
- thread_on_core = scheduled_queue[this_core].front();
- }
- if (this_core < 0 || thread != thread_on_core) {
- winner = thread;
- break;
+ std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{};
+
+ u32 idle_cores{};
+
+ // Step 1: Get top thread in schedule queue.
+ for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ Thread* top_thread =
+ scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
+ if (top_thread != nullptr) {
+ // TODO(Blinkhawk): Implement Thread Pinning
+ } else {
+ idle_cores |= (1ul << core);
}
- sug_cores.insert(this_core);
+ top_threads[core] = top_thread;
}
- // if we got a suggested thread, select it, else do a second pass.
- if (winner && winner->GetPriority() > 2) {
- if (winner->IsRunning()) {
- UnloadThread(static_cast<u32>(winner->GetProcessorID()));
+
+ while (idle_cores != 0) {
+ u32 core_id = Common::CountTrailingZeroes32(idle_cores);
+
+ if (!suggested_queue[core_id].empty()) {
+ std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{};
+ std::size_t num_candidates = 0;
+ auto iter = suggested_queue[core_id].begin();
+ Thread* suggested = nullptr;
+ // Step 2: Try selecting a suggested thread.
+ while (iter != suggested_queue[core_id].end()) {
+ suggested = *iter;
+ iter++;
+ s32 suggested_core_id = suggested->GetProcessorID();
+ Thread* top_thread =
+ suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
+ if (top_thread != suggested) {
+ if (top_thread != nullptr &&
+ top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
+ suggested = nullptr;
+ break;
+ // There's a too high thread to do core migration, cancel
+ }
+ TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
+ break;
+ }
+ suggested = nullptr;
+ migration_candidates[num_candidates++] = suggested_core_id;
+ }
+ // Step 3: Select a suggested thread from another core
+ if (suggested == nullptr) {
+ for (std::size_t i = 0; i < num_candidates; i++) {
+ s32 candidate_core = migration_candidates[i];
+ suggested = top_threads[candidate_core];
+ auto it = scheduled_queue[candidate_core].begin();
+ it++;
+ Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
+ if (next != nullptr) {
+ TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
+ suggested);
+ top_threads[candidate_core] = next;
+ break;
+ } else {
+ suggested = nullptr;
+ }
+ }
+ }
+ top_threads[core_id] = suggested;
}
- TransferToCore(winner->GetPriority(), static_cast<s32>(core), winner);
- update_thread(winner, sched);
- return;
+
+ idle_cores &= ~(1ul << core_id);
}
- // Step 3: Select a suggested thread from another core
- for (auto& src_core : sug_cores) {
- auto it = scheduled_queue[src_core].begin();
- it++;
- if (it != scheduled_queue[src_core].end()) {
- Thread* thread_on_core = scheduled_queue[src_core].front();
- Thread* to_change = *it;
- if (thread_on_core->IsRunning() || to_change->IsRunning()) {
- UnloadThread(static_cast<u32>(src_core));
- }
- TransferToCore(thread_on_core->GetPriority(), static_cast<s32>(core), thread_on_core);
- current_thread = thread_on_core;
- break;
+ u32 cores_needing_context_switch{};
+ for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ Scheduler& sched = kernel.Scheduler(core);
+ ASSERT(top_threads[core] == nullptr || top_threads[core]->GetProcessorID() == core);
+ if (update_thread(top_threads[core], sched)) {
+ cores_needing_context_switch |= (1ul << core);
}
}
- update_thread(current_thread, sched);
+ return cores_needing_context_switch;
}
bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
+ ASSERT(is_locked);
// Note: caller should use critical section, etc.
+ if (!yielding_thread->IsRunnable()) {
+ // Normally this case shouldn't happen except for SetThreadActivity.
+ is_reselection_pending.store(true, std::memory_order_release);
+ return false;
+ }
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
const u32 priority = yielding_thread->GetPriority();
// Yield the thread
- const Thread* const winner = scheduled_queue[core_id].front(priority);
- ASSERT_MSG(yielding_thread == winner, "Thread yielding without being in front");
- scheduled_queue[core_id].yield(priority);
+ Reschedule(priority, core_id, yielding_thread);
+ const Thread* const winner = scheduled_queue[core_id].front();
+ if (kernel.GetCurrentHostThreadID() != core_id) {
+ is_reselection_pending.store(true, std::memory_order_release);
+ }
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
+ ASSERT(is_locked);
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
// etc.
+ if (!yielding_thread->IsRunnable()) {
+ // Normally this case shouldn't happen except for SetThreadActivity.
+ is_reselection_pending.store(true, std::memory_order_release);
+ return false;
+ }
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
const u32 priority = yielding_thread->GetPriority();
// Yield the thread
- ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
- "Thread yielding without being in front");
- scheduled_queue[core_id].yield(priority);
+ Reschedule(priority, core_id, yielding_thread);
std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
for (std::size_t i = 0; i < current_threads.size(); i++) {
@@ -153,21 +205,28 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
if (winner != nullptr) {
if (winner != yielding_thread) {
- if (winner->IsRunning()) {
- UnloadThread(static_cast<u32>(winner->GetProcessorID()));
- }
TransferToCore(winner->GetPriority(), s32(core_id), winner);
}
} else {
winner = next_thread;
}
+ if (kernel.GetCurrentHostThreadID() != core_id) {
+ is_reselection_pending.store(true, std::memory_order_release);
+ }
+
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
+ ASSERT(is_locked);
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
// etc.
+ if (!yielding_thread->IsRunnable()) {
+ // Normally this case shouldn't happen except for SetThreadActivity.
+ is_reselection_pending.store(true, std::memory_order_release);
+ return false;
+ }
Thread* winner = nullptr;
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
@@ -195,25 +254,31 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
}
if (winner != nullptr) {
if (winner != yielding_thread) {
- if (winner->IsRunning()) {
- UnloadThread(static_cast<u32>(winner->GetProcessorID()));
- }
TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
}
} else {
winner = yielding_thread;
}
+ } else {
+ winner = scheduled_queue[core_id].front();
+ }
+
+ if (kernel.GetCurrentHostThreadID() != core_id) {
+ is_reselection_pending.store(true, std::memory_order_release);
}
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
void GlobalScheduler::PreemptThreads() {
+ ASSERT(is_locked);
for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
const u32 priority = preemption_priorities[core_id];
if (scheduled_queue[core_id].size(priority) > 0) {
- scheduled_queue[core_id].front(priority)->IncrementYieldCount();
+ if (scheduled_queue[core_id].size(priority) > 1) {
+ scheduled_queue[core_id].front(priority)->IncrementYieldCount();
+ }
scheduled_queue[core_id].yield(priority);
if (scheduled_queue[core_id].size(priority) > 1) {
scheduled_queue[core_id].front(priority)->IncrementYieldCount();
@@ -247,9 +312,6 @@ void GlobalScheduler::PreemptThreads() {
}
if (winner != nullptr) {
- if (winner->IsRunning()) {
- UnloadThread(static_cast<u32>(winner->GetProcessorID()));
- }
TransferToCore(winner->GetPriority(), s32(core_id), winner);
current_thread =
winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
@@ -280,9 +342,6 @@ void GlobalScheduler::PreemptThreads() {
}
if (winner != nullptr) {
- if (winner->IsRunning()) {
- UnloadThread(static_cast<u32>(winner->GetProcessorID()));
- }
TransferToCore(winner->GetPriority(), s32(core_id), winner);
current_thread = winner;
}
@@ -292,34 +351,65 @@ void GlobalScheduler::PreemptThreads() {
}
}
+void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
+ Core::EmuThreadHandle global_thread) {
+ u32 current_core = global_thread.host_handle;
+ bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
+ (current_core < Core::Hardware::NUM_CPU_CORES);
+ while (cores_pending_reschedule != 0) {
+ u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
+ ASSERT(core < Core::Hardware::NUM_CPU_CORES);
+ if (!must_context_switch || core != current_core) {
+ auto& phys_core = kernel.PhysicalCore(core);
+ phys_core.Interrupt();
+ } else {
+ must_context_switch = true;
+ }
+ cores_pending_reschedule &= ~(1ul << core);
+ }
+ if (must_context_switch) {
+ auto& core_scheduler = kernel.CurrentScheduler();
+ kernel.ExitSVCProfile();
+ core_scheduler.TryDoContextSwitch();
+ kernel.EnterSVCProfile();
+ }
+}
+
void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
+ ASSERT(is_locked);
suggested_queue[core].add(thread, priority);
}
void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
+ ASSERT(is_locked);
suggested_queue[core].remove(thread, priority);
}
void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
+ ASSERT(is_locked);
ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
scheduled_queue[core].add(thread, priority);
}
void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
+ ASSERT(is_locked);
ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
scheduled_queue[core].add(thread, priority, false);
}
void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
+ ASSERT(is_locked);
scheduled_queue[core].remove(thread, priority);
scheduled_queue[core].add(thread, priority);
}
void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
+ ASSERT(is_locked);
scheduled_queue[core].remove(thread, priority);
}
void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
+ ASSERT(is_locked);
const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
const s32 source_core = thread->GetProcessorID();
if (source_core == destination_core || !schedulable) {
@@ -349,6 +439,108 @@ bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
}
}
+void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
+ if (old_flags == thread->scheduling_state) {
+ return;
+ }
+ ASSERT(is_locked);
+
+ if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ // In this case the thread was running, now it's pausing/exitting
+ if (thread->processor_id >= 0) {
+ Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
+ }
+
+ for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ if (core != static_cast<u32>(thread->processor_id) &&
+ ((thread->affinity_mask >> core) & 1) != 0) {
+ Unsuggest(thread->current_priority, core, thread);
+ }
+ }
+ } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ // The thread is now set to running from being stopped
+ if (thread->processor_id >= 0) {
+ Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
+ }
+
+ for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ if (core != static_cast<u32>(thread->processor_id) &&
+ ((thread->affinity_mask >> core) & 1) != 0) {
+ Suggest(thread->current_priority, core, thread);
+ }
+ }
+ }
+
+ SetReselectionPending();
+}
+
+void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) {
+ if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ return;
+ }
+ ASSERT(is_locked);
+ if (thread->processor_id >= 0) {
+ Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
+ }
+
+ for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ if (core != static_cast<u32>(thread->processor_id) &&
+ ((thread->affinity_mask >> core) & 1) != 0) {
+ Unsuggest(old_priority, core, thread);
+ }
+ }
+
+ if (thread->processor_id >= 0) {
+ if (thread == kernel.CurrentScheduler().GetCurrentThread()) {
+ SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id),
+ thread);
+ } else {
+ Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
+ }
+ }
+
+ for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ if (core != static_cast<u32>(thread->processor_id) &&
+ ((thread->affinity_mask >> core) & 1) != 0) {
+ Suggest(thread->current_priority, core, thread);
+ }
+ }
+ thread->IncrementYieldCount();
+ SetReselectionPending();
+}
+
+void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask,
+ s32 old_core) {
+ if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) ||
+ thread->current_priority >= THREADPRIO_COUNT) {
+ return;
+ }
+ ASSERT(is_locked);
+
+ for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ if (((old_affinity_mask >> core) & 1) != 0) {
+ if (core == static_cast<u32>(old_core)) {
+ Unschedule(thread->current_priority, core, thread);
+ } else {
+ Unsuggest(thread->current_priority, core, thread);
+ }
+ }
+ }
+
+ for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ if (((thread->affinity_mask >> core) & 1) != 0) {
+ if (core == static_cast<u32>(thread->processor_id)) {
+ Schedule(thread->current_priority, core, thread);
+ } else {
+ Suggest(thread->current_priority, core, thread);
+ }
+ }
+ }
+
+ thread->IncrementYieldCount();
+ SetReselectionPending();
+}
+
void GlobalScheduler::Shutdown() {
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
scheduled_queue[core].clear();
@@ -359,10 +551,12 @@ void GlobalScheduler::Shutdown() {
void GlobalScheduler::Lock() {
Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
+ ASSERT(!current_thread.IsInvalid());
if (current_thread == current_owner) {
++scope_lock;
} else {
inner_lock.lock();
+ is_locked = true;
current_owner = current_thread;
ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
scope_lock = 1;
@@ -374,17 +568,18 @@ void GlobalScheduler::Unlock() {
ASSERT(scope_lock > 0);
return;
}
- for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- SelectThread(i);
- }
+ u32 cores_pending_reschedule = SelectThreads();
+ Core::EmuThreadHandle leaving_thread = current_owner;
current_owner = Core::EmuThreadHandle::InvalidHandle();
scope_lock = 1;
+ is_locked = false;
inner_lock.unlock();
- // TODO(Blinkhawk): Setup the interrupts and change context on current core.
+ EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
}
-Scheduler::Scheduler(Core::System& system, std::size_t core_id)
- : system{system}, core_id{core_id} {}
+Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) {
+ switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
+}
Scheduler::~Scheduler() = default;
@@ -393,56 +588,128 @@ bool Scheduler::HaveReadyThreads() const {
}
Thread* Scheduler::GetCurrentThread() const {
- return current_thread.get();
+ if (current_thread) {
+ return current_thread.get();
+ }
+ return idle_thread.get();
}
Thread* Scheduler::GetSelectedThread() const {
return selected_thread.get();
}
-void Scheduler::SelectThreads() {
- system.GlobalScheduler().SelectThread(core_id);
-}
-
u64 Scheduler::GetLastContextSwitchTicks() const {
return last_context_switch_time;
}
void Scheduler::TryDoContextSwitch() {
+ auto& phys_core = system.Kernel().CurrentPhysicalCore();
+ if (phys_core.IsInterrupted()) {
+ phys_core.ClearInterrupt();
+ }
+ guard.lock();
if (is_context_switch_pending) {
SwitchContext();
+ } else {
+ guard.unlock();
}
}
-void Scheduler::UnloadThread() {
- Thread* const previous_thread = GetCurrentThread();
- Process* const previous_process = system.Kernel().CurrentProcess();
+void Scheduler::OnThreadStart() {
+ SwitchContextStep2();
+}
- UpdateLastContextSwitchTime(previous_thread, previous_process);
+void Scheduler::Unload() {
+ Thread* thread = current_thread.get();
+ if (thread) {
+ thread->SetContinuousOnSVC(false);
+ thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
+ thread->SetIsRunning(false);
+ if (!thread->IsHLEThread() && !thread->HasExited()) {
+ Core::ARM_Interface& cpu_core = thread->ArmInterface();
+ cpu_core.SaveContext(thread->GetContext32());
+ cpu_core.SaveContext(thread->GetContext64());
+ // Save the TPIDR_EL0 system register in case it was modified.
+ thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
+ }
+ thread->context_guard.unlock();
+ }
+}
- // Save context for previous thread
- if (previous_thread) {
- system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32());
- system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64());
- // Save the TPIDR_EL0 system register in case it was modified.
- previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0());
+void Scheduler::Reload() {
+ Thread* thread = current_thread.get();
+ if (thread) {
+ ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
+ "Thread must be runnable.");
- if (previous_thread->GetStatus() == ThreadStatus::Running) {
- // This is only the case when a reschedule is triggered without the current thread
- // yielding execution (i.e. an event triggered, system core time-sliced, etc)
- previous_thread->SetStatus(ThreadStatus::Ready);
+ // Cancel any outstanding wakeup events for this thread
+ thread->SetIsRunning(true);
+ thread->SetWasRunning(false);
+ thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
+
+ auto* const thread_owner_process = thread->GetOwnerProcess();
+ if (thread_owner_process != nullptr) {
+ system.Kernel().MakeCurrentProcess(thread_owner_process);
+ }
+ if (!thread->IsHLEThread()) {
+ Core::ARM_Interface& cpu_core = thread->ArmInterface();
+ cpu_core.LoadContext(thread->GetContext32());
+ cpu_core.LoadContext(thread->GetContext64());
+ cpu_core.SetTlsAddress(thread->GetTLSAddress());
+ cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
+ cpu_core.ChangeProcessorID(this->core_id);
+ cpu_core.ClearExclusiveState();
}
- previous_thread->SetIsRunning(false);
}
- current_thread = nullptr;
+}
+
+void Scheduler::SwitchContextStep2() {
+ Thread* previous_thread = current_thread_prev.get();
+ Thread* new_thread = selected_thread.get();
+
+ // Load context of new thread
+ Process* const previous_process =
+ previous_thread != nullptr ? previous_thread->GetOwnerProcess() : nullptr;
+
+ if (new_thread) {
+ ASSERT_MSG(new_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
+ "Thread must be runnable.");
+
+ // Cancel any outstanding wakeup events for this thread
+ new_thread->SetIsRunning(true);
+ new_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
+ new_thread->SetWasRunning(false);
+
+ auto* const thread_owner_process = current_thread->GetOwnerProcess();
+ if (thread_owner_process != nullptr) {
+ system.Kernel().MakeCurrentProcess(thread_owner_process);
+ }
+ if (!new_thread->IsHLEThread()) {
+ Core::ARM_Interface& cpu_core = new_thread->ArmInterface();
+ cpu_core.LoadContext(new_thread->GetContext32());
+ cpu_core.LoadContext(new_thread->GetContext64());
+ cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
+ cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
+ cpu_core.ChangeProcessorID(this->core_id);
+ cpu_core.ClearExclusiveState();
+ }
+ }
+
+ TryDoContextSwitch();
}
void Scheduler::SwitchContext() {
- Thread* const previous_thread = GetCurrentThread();
- Thread* const new_thread = GetSelectedThread();
+ current_thread_prev = current_thread;
+ selected_thread = selected_thread_set;
+ Thread* previous_thread = current_thread_prev.get();
+ Thread* new_thread = selected_thread.get();
+ current_thread = selected_thread;
is_context_switch_pending = false;
+
if (new_thread == previous_thread) {
+ guard.unlock();
return;
}
@@ -452,51 +719,75 @@ void Scheduler::SwitchContext() {
// Save context for previous thread
if (previous_thread) {
- system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32());
- system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64());
- // Save the TPIDR_EL0 system register in case it was modified.
- previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0());
-
- if (previous_thread->GetStatus() == ThreadStatus::Running) {
- // This is only the case when a reschedule is triggered without the current thread
- // yielding execution (i.e. an event triggered, system core time-sliced, etc)
- previous_thread->SetStatus(ThreadStatus::Ready);
+ if (new_thread != nullptr && new_thread->IsSuspendThread()) {
+ previous_thread->SetWasRunning(true);
}
+ previous_thread->SetContinuousOnSVC(false);
+ previous_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
previous_thread->SetIsRunning(false);
- }
-
- // Load context of new thread
- if (new_thread) {
- ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id),
- "Thread must be assigned to this core.");
- ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready,
- "Thread must be ready to become running.");
-
- // Cancel any outstanding wakeup events for this thread
- new_thread->CancelWakeupTimer();
- current_thread = SharedFrom(new_thread);
- new_thread->SetStatus(ThreadStatus::Running);
- new_thread->SetIsRunning(true);
-
- auto* const thread_owner_process = current_thread->GetOwnerProcess();
- if (previous_process != thread_owner_process) {
- system.Kernel().MakeCurrentProcess(thread_owner_process);
+ if (!previous_thread->IsHLEThread() && !previous_thread->HasExited()) {
+ Core::ARM_Interface& cpu_core = previous_thread->ArmInterface();
+ cpu_core.SaveContext(previous_thread->GetContext32());
+ cpu_core.SaveContext(previous_thread->GetContext64());
+ // Save the TPIDR_EL0 system register in case it was modified.
+ previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
}
+ previous_thread->context_guard.unlock();
+ }
- system.ArmInterface(core_id).LoadContext(new_thread->GetContext32());
- system.ArmInterface(core_id).LoadContext(new_thread->GetContext64());
- system.ArmInterface(core_id).SetTlsAddress(new_thread->GetTLSAddress());
- system.ArmInterface(core_id).SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
+ std::shared_ptr<Common::Fiber>* old_context;
+ if (previous_thread != nullptr) {
+ old_context = &previous_thread->GetHostContext();
} else {
- current_thread = nullptr;
- // Note: We do not reset the current process and current page table when idling because
- // technically we haven't changed processes, our threads are just paused.
+ old_context = &idle_thread->GetHostContext();
+ }
+ guard.unlock();
+
+ Common::Fiber::YieldTo(*old_context, switch_fiber);
+ /// When a thread wakes up, the scheduler may have changed to other in another core.
+ auto& next_scheduler = system.Kernel().CurrentScheduler();
+ next_scheduler.SwitchContextStep2();
+}
+
+void Scheduler::OnSwitch(void* this_scheduler) {
+ Scheduler* sched = static_cast<Scheduler*>(this_scheduler);
+ sched->SwitchToCurrent();
+}
+
+void Scheduler::SwitchToCurrent() {
+ while (true) {
+ guard.lock();
+ selected_thread = selected_thread_set;
+ current_thread = selected_thread;
+ is_context_switch_pending = false;
+ guard.unlock();
+ while (!is_context_switch_pending) {
+ if (current_thread != nullptr && !current_thread->IsHLEThread()) {
+ current_thread->context_guard.lock();
+ if (!current_thread->IsRunnable()) {
+ current_thread->context_guard.unlock();
+ break;
+ }
+ if (current_thread->GetProcessorID() != core_id) {
+ current_thread->context_guard.unlock();
+ break;
+ }
+ }
+ std::shared_ptr<Common::Fiber>* next_context;
+ if (current_thread != nullptr) {
+ next_context = &current_thread->GetHostContext();
+ } else {
+ next_context = &idle_thread->GetHostContext();
+ }
+ Common::Fiber::YieldTo(switch_fiber, *next_context);
+ }
}
}
void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
const u64 prev_switch_ticks = last_context_switch_time;
- const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks();
+ const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
if (thread != nullptr) {
@@ -510,6 +801,16 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
last_context_switch_time = most_recent_switch_ticks;
}
+void Scheduler::Initialize() {
+ std::string name = "Idle Thread Id:" + std::to_string(core_id);
+ std::function<void(void*)> init_func = system.GetCpuManager().GetIdleThreadStartFunc();
+ void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
+ ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
+ auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
+ nullptr, std::move(init_func), init_func_parameter);
+ idle_thread = std::move(thread_res).Unwrap();
+}
+
void Scheduler::Shutdown() {
current_thread = nullptr;
selected_thread = nullptr;
@@ -538,4 +839,13 @@ SchedulerLockAndSleep::~SchedulerLockAndSleep() {
time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
}
+void SchedulerLockAndSleep::Release() {
+ if (sleep_cancelled) {
+ return;
+ }
+ auto& time_manager = kernel.TimeManager();
+ time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
+ sleep_cancelled = true;
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 07df33f9c..b3b4b5169 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -11,9 +11,14 @@
#include "common/common_types.h"
#include "common/multi_level_queue.h"
+#include "common/spin_lock.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/thread.h"
+namespace Common {
+class Fiber;
+}
+
namespace Core {
class ARM_Interface;
class System;
@@ -41,41 +46,17 @@ public:
return thread_list;
}
- /**
- * Add a thread to the suggested queue of a cpu core. Suggested threads may be
- * picked if no thread is scheduled to run on the core.
- */
- void Suggest(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
- * picked if no thread is scheduled to run on the core.
- */
- void Unsuggest(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Add a thread to the scheduling queue of a cpu core. The thread is added at the
- * back the queue in its priority level.
- */
- void Schedule(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Add a thread to the scheduling queue of a cpu core. The thread is added at the
- * front the queue in its priority level.
- */
- void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
+ /// Notify the scheduler a thread's status has changed.
+ void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags);
- /// Reschedule an already scheduled thread based on a new priority
- void Reschedule(u32 priority, std::size_t core, Thread* thread);
-
- /// Unschedules a thread.
- void Unschedule(u32 priority, std::size_t core, Thread* thread);
+ /// Notify the scheduler a thread's priority has changed.
+ void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority);
- /// Selects a core and forces it to unload its current thread's context
- void UnloadThread(std::size_t core);
+ /// Notify the scheduler a thread's core and/or affinity mask has changed.
+ void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core);
/**
- * Takes care of selecting the new scheduled thread in three steps:
+ * Takes care of selecting the new scheduled threads in three steps:
*
* 1. First a thread is selected from the top of the priority queue. If no thread
* is obtained then we move to step two, else we are done.
@@ -85,8 +66,10 @@ public:
*
* 3. Third is no suggested thread is found, we do a second pass and pick a running
* thread in another core and swap it with its current thread.
+ *
+ * returns the cores needing scheduling.
*/
- void SelectThread(std::size_t core);
+ u32 SelectThreads();
bool HaveReadyThreads(std::size_t core_id) const {
return !scheduled_queue[core_id].empty();
@@ -149,6 +132,40 @@ private:
/// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
/// and reschedules current core if needed.
void Unlock();
+
+ void EnableInterruptAndSchedule(u32 cores_pending_reschedule,
+ Core::EmuThreadHandle global_thread);
+
+ /**
+ * Add a thread to the suggested queue of a cpu core. Suggested threads may be
+ * picked if no thread is scheduled to run on the core.
+ */
+ void Suggest(u32 priority, std::size_t core, Thread* thread);
+
+ /**
+ * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
+ * picked if no thread is scheduled to run on the core.
+ */
+ void Unsuggest(u32 priority, std::size_t core, Thread* thread);
+
+ /**
+ * Add a thread to the scheduling queue of a cpu core. The thread is added at the
+ * back the queue in its priority level.
+ */
+ void Schedule(u32 priority, std::size_t core, Thread* thread);
+
+ /**
+ * Add a thread to the scheduling queue of a cpu core. The thread is added at the
+ * front the queue in its priority level.
+ */
+ void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
+
+ /// Reschedule an already scheduled thread based on a new priority
+ void Reschedule(u32 priority, std::size_t core, Thread* thread);
+
+ /// Unschedules a thread.
+ void Unschedule(u32 priority, std::size_t core, Thread* thread);
+
/**
* Transfers a thread into an specific core. If the destination_core is -1
* it will be unscheduled from its source code and added into its suggested
@@ -170,10 +187,13 @@ private:
std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
/// Scheduler lock mechanisms.
- std::mutex inner_lock{}; // TODO(Blinkhawk): Replace for a SpinLock
+ bool is_locked{};
+ Common::SpinLock inner_lock{};
std::atomic<s64> scope_lock{};
Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
+ Common::SpinLock global_list_guard{};
+
/// Lists all thread ids that aren't deleted/etc.
std::vector<std::shared_ptr<Thread>> thread_list;
KernelCore& kernel;
@@ -190,11 +210,11 @@ public:
/// Reschedules to the next available thread (call after current thread is suspended)
void TryDoContextSwitch();
- /// Unloads currently running thread
- void UnloadThread();
-
- /// Select the threads in top of the scheduling multilist.
- void SelectThreads();
+ /// The next two are for SingleCore Only.
+ /// Unload current thread before preempting core.
+ void Unload();
+ /// Reload current thread after core preemption.
+ void Reload();
/// Gets the current running thread
Thread* GetCurrentThread() const;
@@ -209,15 +229,30 @@ public:
return is_context_switch_pending;
}
+ void Initialize();
+
/// Shutdowns the scheduler.
void Shutdown();
+ void OnThreadStart();
+
+ std::shared_ptr<Common::Fiber>& ControlContext() {
+ return switch_fiber;
+ }
+
+ const std::shared_ptr<Common::Fiber>& ControlContext() const {
+ return switch_fiber;
+ }
+
private:
friend class GlobalScheduler;
/// Switches the CPU's active thread context to that of the specified thread
void SwitchContext();
+ /// When a thread wakes up, it must run this through it's new scheduler
+ void SwitchContextStep2();
+
/**
* Called on every context switch to update the internal timestamp
* This also updates the running time ticks for the given thread and
@@ -231,14 +266,24 @@ private:
*/
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
+ static void OnSwitch(void* this_scheduler);
+ void SwitchToCurrent();
+
std::shared_ptr<Thread> current_thread = nullptr;
std::shared_ptr<Thread> selected_thread = nullptr;
+ std::shared_ptr<Thread> current_thread_prev = nullptr;
+ std::shared_ptr<Thread> selected_thread_set = nullptr;
+ std::shared_ptr<Thread> idle_thread = nullptr;
+
+ std::shared_ptr<Common::Fiber> switch_fiber = nullptr;
Core::System& system;
u64 last_context_switch_time = 0;
u64 idle_selection_count = 0;
const std::size_t core_id;
+ Common::SpinLock guard{};
+
bool is_context_switch_pending = false;
};
@@ -261,6 +306,8 @@ public:
sleep_cancelled = true;
}
+ void Release();
+
private:
Handle& event_handle;
Thread* time_task;
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 25438b86b..7b23a6889 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -17,6 +17,7 @@
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
@@ -168,9 +169,12 @@ ResultCode ServerSession::CompleteSyncRequest() {
}
// Some service requests require the thread to block
- if (!context.IsThreadWaiting()) {
- context.GetThread().ResumeFromWait();
- context.GetThread().SetWaitSynchronizationResult(result);
+ {
+ SchedulerLock lock(kernel);
+ if (!context.IsThreadWaiting()) {
+ context.GetThread().ResumeFromWait();
+ context.GetThread().SetSynchronizationResults(nullptr, result);
+ }
}
request_queue.Pop();
@@ -180,8 +184,10 @@ ResultCode ServerSession::CompleteSyncRequest() {
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
Core::Memory::Memory& memory) {
- Core::System::GetInstance().CoreTiming().ScheduleEvent(20000, request_event, {});
- return QueueSyncRequest(std::move(thread), memory);
+ ResultCode result = QueueSyncRequest(std::move(thread), memory);
+ const u64 delay = kernel.IsMulticore() ? 0U : 20000U;
+ Core::System::GetInstance().CoreTiming().ScheduleEvent(delay, request_event, {});
+ return result;
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 4ae4529f5..5db19dcf3 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -10,14 +10,15 @@
#include "common/alignment.h"
#include "common/assert.h"
+#include "common/fiber.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
#include "common/string_util.h"
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
-#include "core/core_manager.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
+#include "core/cpu_manager.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
@@ -27,6 +28,7 @@
#include "core/hle/kernel/memory/memory_block.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/mutex.h"
+#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/readable_event.h"
#include "core/hle/kernel/resource_limit.h"
@@ -37,6 +39,7 @@
#include "core/hle/kernel/svc_wrap.h"
#include "core/hle/kernel/synchronization.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/time_manager.h"
#include "core/hle/kernel/transfer_memory.h"
#include "core/hle/kernel/writable_event.h"
#include "core/hle/lock.h"
@@ -133,6 +136,7 @@ enum class ResourceLimitValueType {
ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit,
u32 resource_type, ResourceLimitValueType value_type) {
+ std::lock_guard lock{HLE::g_hle_lock};
const auto type = static_cast<ResourceType>(resource_type);
if (!IsValidResourceType(type)) {
LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
@@ -160,6 +164,7 @@ ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_
/// Set the process heap to a given Size. It can both extend and shrink the heap.
static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
// Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB.
@@ -190,6 +195,7 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s
static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
u32 attribute) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_DEBUG(Kernel_SVC,
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
size, mask, attribute);
@@ -226,8 +232,15 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
static_cast<Memory::MemoryAttribute>(attribute));
}
+static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
+ u32 attribute) {
+ return SetMemoryAttribute(system, static_cast<VAddr>(address), static_cast<std::size_t>(size),
+ mask, attribute);
+}
+
/// Maps a memory range into a different range.
static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
@@ -241,8 +254,14 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
return page_table.Map(dst_addr, src_addr, size);
}
+static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
+ return MapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr),
+ static_cast<std::size_t>(size));
+}
+
/// Unmaps a region that was previously mapped with svcMapMemory
static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
@@ -256,9 +275,15 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
return page_table.Unmap(dst_addr, src_addr, size);
}
+static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
+ return UnmapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr),
+ static_cast<std::size_t>(size));
+}
+
/// Connect to an OS service given the port name, returns the handle to the port to out
static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
VAddr port_name_address) {
+ std::lock_guard lock{HLE::g_hle_lock};
auto& memory = system.Memory();
if (!memory.IsValidVirtualAddress(port_name_address)) {
@@ -317,11 +342,30 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
auto thread = system.CurrentScheduler().GetCurrentThread();
- thread->InvalidateWakeupCallback();
- thread->SetStatus(ThreadStatus::WaitIPC);
- system.PrepareReschedule(thread->GetProcessorID());
+ {
+ SchedulerLock lock(system.Kernel());
+ thread->InvalidateHLECallback();
+ thread->SetStatus(ThreadStatus::WaitIPC);
+ session->SendSyncRequest(SharedFrom(thread), system.Memory());
+ }
+
+ if (thread->HasHLECallback()) {
+ Handle event_handle = thread->GetHLETimeEvent();
+ if (event_handle != InvalidHandle) {
+ auto& time_manager = system.Kernel().TimeManager();
+ time_manager.UnscheduleTimeEvent(event_handle);
+ }
+
+ {
+ SchedulerLock lock(system.Kernel());
+ auto* sync_object = thread->GetHLESyncObject();
+ sync_object->RemoveWaitingThread(SharedFrom(thread));
+ }
+
+ thread->InvokeHLECallback(SharedFrom(thread));
+ }
- return session->SendSyncRequest(SharedFrom(thread), system.Memory());
+ return thread->GetSignalingResult();
}
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -383,6 +427,15 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
return ERR_INVALID_HANDLE;
}
+static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32* process_id_high,
+ Handle handle) {
+ u64 process_id{};
+ const auto result = GetProcessId(system, &process_id, handle);
+ *process_id_low = static_cast<u32>(process_id);
+ *process_id_high = static_cast<u32>(process_id >> 32);
+ return result;
+}
+
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address,
u64 handle_count, s64 nano_seconds) {
@@ -447,10 +500,13 @@ static ResultCode CancelSynchronization(Core::System& system, Handle thread_hand
}
thread->CancelWait();
- system.PrepareReschedule(thread->GetProcessorID());
return RESULT_SUCCESS;
}
+static ResultCode CancelSynchronization32(Core::System& system, Handle thread_handle) {
+ return CancelSynchronization(system, thread_handle);
+}
+
/// Attempts to locks a mutex, creating it if it does not already exist
static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle,
VAddr mutex_addr, Handle requesting_thread_handle) {
@@ -475,6 +531,12 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand
requesting_thread_handle);
}
+static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle,
+ u32 mutex_addr, Handle requesting_thread_handle) {
+ return ArbitrateLock(system, holding_thread_handle, static_cast<VAddr>(mutex_addr),
+ requesting_thread_handle);
+}
+
/// Unlock a mutex
static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
@@ -494,6 +556,10 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
return current_process->GetMutex().Release(mutex_addr);
}
+static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) {
+ return ArbitrateUnlock(system, static_cast<VAddr>(mutex_addr));
+}
+
enum class BreakType : u32 {
Panic = 0,
AssertionFailed = 1,
@@ -594,6 +660,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
if (!break_reason.signal_debugger) {
+ SchedulerLock lock(system.Kernel());
LOG_CRITICAL(
Debug_Emulated,
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
@@ -605,14 +672,16 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
const auto thread_processor_id = current_thread->GetProcessorID();
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
- system.Kernel().CurrentProcess()->PrepareForTermination();
-
// Kill the current thread
+ system.Kernel().ExceptionalExit();
current_thread->Stop();
- system.PrepareReschedule();
}
}
+static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
+ Break(system, reason, static_cast<u64>(info1), static_cast<u64>(info2));
+}
+
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) {
if (len == 0) {
@@ -627,6 +696,7 @@ static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr addre
/// Gets system/memory information for the current process
static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle,
u64 info_sub_id) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
info_sub_id, handle);
@@ -863,9 +933,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks();
- out_ticks = thread_ticks + (core_timing.GetTicks() - prev_ctx_ticks);
+ out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
- out_ticks = core_timing.GetTicks() - prev_ctx_ticks;
+ out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
}
*result = out_ticks;
@@ -892,6 +962,7 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h
/// Maps memory at a desired address
static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -939,8 +1010,13 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
return page_table.MapPhysicalMemory(addr, size);
}
+static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
+ return MapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size));
+}
+
/// Unmaps memory previously mapped via MapPhysicalMemory
static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -988,6 +1064,10 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
return page_table.UnmapPhysicalMemory(addr, size);
}
+static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
+ return UnmapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size));
+}
+
/// Sets the thread activity
static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity);
@@ -1017,10 +1097,11 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
return ERR_BUSY;
}
- thread->SetActivity(static_cast<ThreadActivity>(activity));
+ return thread->SetActivity(static_cast<ThreadActivity>(activity));
+}
- system.PrepareReschedule(thread->GetProcessorID());
- return RESULT_SUCCESS;
+static ResultCode SetThreadActivity32(Core::System& system, Handle handle, u32 activity) {
+ return SetThreadActivity(system, handle, activity);
}
/// Gets the thread context
@@ -1064,6 +1145,10 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
return RESULT_SUCCESS;
}
+static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) {
+ return GetThreadContext(system, static_cast<VAddr>(thread_context), handle);
+}
+
/// Gets the priority for the specified thread
static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) {
LOG_TRACE(Kernel_SVC, "called");
@@ -1071,6 +1156,7 @@ static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
if (!thread) {
+ *priority = 0;
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
return ERR_INVALID_HANDLE;
}
@@ -1105,18 +1191,26 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
thread->SetPriority(priority);
- system.PrepareReschedule(thread->GetProcessorID());
return RESULT_SUCCESS;
}
+static ResultCode SetThreadPriority32(Core::System& system, Handle handle, u32 priority) {
+ return SetThreadPriority(system, handle, priority);
+}
+
/// Get which CPU core is executing the current thread
static u32 GetCurrentProcessorNumber(Core::System& system) {
LOG_TRACE(Kernel_SVC, "called");
- return system.CurrentScheduler().GetCurrentThread()->GetProcessorID();
+ return static_cast<u32>(system.CurrentPhysicalCore().CoreIndex());
+}
+
+static u32 GetCurrentProcessorNumber32(Core::System& system) {
+ return GetCurrentProcessorNumber(system);
}
static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr,
u64 size, u32 permissions) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_TRACE(Kernel_SVC,
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
shared_memory_handle, addr, size, permissions);
@@ -1187,9 +1281,16 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
return shared_memory->Map(*current_process, addr, size, permission_type);
}
+static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr,
+ u32 size, u32 permissions) {
+ return MapSharedMemory(system, shared_memory_handle, static_cast<VAddr>(addr),
+ static_cast<std::size_t>(size), permissions);
+}
+
static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
VAddr page_info_address, Handle process_handle,
VAddr address) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle);
@@ -1372,6 +1473,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
/// Exits the current process
static void ExitProcess(Core::System& system) {
auto* current_process = system.Kernel().CurrentProcess();
+ UNIMPLEMENTED();
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
@@ -1381,8 +1483,10 @@ static void ExitProcess(Core::System& system) {
// Kill the current thread
system.CurrentScheduler().GetCurrentThread()->Stop();
+}
- system.PrepareReschedule();
+static void ExitProcess32(Core::System& system) {
+ ExitProcess(system);
}
/// Creates a new thread
@@ -1428,9 +1532,10 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1));
+ ThreadType type = THREADTYPE_USER;
CASCADE_RESULT(std::shared_ptr<Thread> thread,
- Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top,
- *current_process));
+ Thread::Create(system, type, "", entry_point, priority, arg, processor_id,
+ stack_top, current_process));
const auto new_thread_handle = current_process->GetHandleTable().Create(thread);
if (new_thread_handle.Failed()) {
@@ -1444,11 +1549,15 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
thread->SetName(
fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
- system.PrepareReschedule(thread->GetProcessorID());
-
return RESULT_SUCCESS;
}
+static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
+ u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
+ return CreateThread(system, out_handle, static_cast<VAddr>(entry_point), static_cast<u64>(arg),
+ static_cast<VAddr>(stack_top), priority, processor_id);
+}
+
/// Starts the thread for the provided handle
static ResultCode StartThread(Core::System& system, Handle thread_handle) {
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
@@ -1463,13 +1572,11 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
ASSERT(thread->GetStatus() == ThreadStatus::Dormant);
- thread->ResumeFromWait();
-
- if (thread->GetStatus() == ThreadStatus::Ready) {
- system.PrepareReschedule(thread->GetProcessorID());
- }
+ return thread->Start();
+}
- return RESULT_SUCCESS;
+static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
+ return StartThread(system, thread_handle);
}
/// Called when a thread exits
@@ -1477,9 +1584,12 @@ static void ExitThread(Core::System& system) {
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
- current_thread->Stop();
system.GlobalScheduler().RemoveThread(SharedFrom(current_thread));
- system.PrepareReschedule();
+ current_thread->Stop();
+}
+
+static void ExitThread32(Core::System& system) {
+ ExitThread(system);
}
/// Sleep the current thread
@@ -1498,15 +1608,21 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
if (nanoseconds <= 0) {
switch (static_cast<SleepType>(nanoseconds)) {
- case SleepType::YieldWithoutLoadBalancing:
- is_redundant = current_thread->YieldSimple();
+ case SleepType::YieldWithoutLoadBalancing: {
+ auto pair = current_thread->YieldSimple();
+ is_redundant = pair.second;
break;
- case SleepType::YieldWithLoadBalancing:
- is_redundant = current_thread->YieldAndBalanceLoad();
+ }
+ case SleepType::YieldWithLoadBalancing: {
+ auto pair = current_thread->YieldAndBalanceLoad();
+ is_redundant = pair.second;
break;
- case SleepType::YieldAndWaitForLoadBalancing:
- is_redundant = current_thread->YieldAndWaitForLoadBalancing();
+ }
+ case SleepType::YieldAndWaitForLoadBalancing: {
+ auto pair = current_thread->YieldAndWaitForLoadBalancing();
+ is_redundant = pair.second;
break;
+ }
default:
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
@@ -1514,13 +1630,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
current_thread->Sleep(nanoseconds);
}
- if (is_redundant) {
- // If it's redundant, the core is pretty much idle. Some games keep idling
- // a core while it's doing nothing, we advance timing to avoid costly continuous
- // calls.
- system.CoreTiming().AddTicks(2000);
+ if (is_redundant && !system.Kernel().IsMulticore()) {
+ system.Kernel().ExitSVCProfile();
+ system.CoreTiming().AddTicks(1000U);
+ system.GetCpuManager().PreemptSingleCore();
+ system.Kernel().EnterSVCProfile();
}
- system.PrepareReschedule(current_thread->GetProcessorID());
+}
+
+static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) {
+ const s64 nanoseconds = static_cast<s64>(static_cast<u64>(nanoseconds_low) |
+ (static_cast<u64>(nanoseconds_high) << 32));
+ SleepThread(system, nanoseconds);
}
/// Wait process wide key atomic
@@ -1547,31 +1668,69 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
}
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
-
+ auto& kernel = system.Kernel();
+ Handle event_handle;
+ Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
auto* const current_process = system.Kernel().CurrentProcess();
- const auto& handle_table = current_process->GetHandleTable();
- std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
- ASSERT(thread);
+ {
+ SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
+ const auto& handle_table = current_process->GetHandleTable();
+ std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ ASSERT(thread);
+
+ current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
+
+ if (thread->IsPendingTermination()) {
+ lock.CancelSleep();
+ return ERR_THREAD_TERMINATING;
+ }
+
+ const auto release_result = current_process->GetMutex().Release(mutex_addr);
+ if (release_result.IsError()) {
+ lock.CancelSleep();
+ return release_result;
+ }
+
+ if (nano_seconds == 0) {
+ lock.CancelSleep();
+ return RESULT_TIMEOUT;
+ }
- const auto release_result = current_process->GetMutex().Release(mutex_addr);
- if (release_result.IsError()) {
- return release_result;
+ current_thread->SetCondVarWaitAddress(condition_variable_addr);
+ current_thread->SetMutexWaitAddress(mutex_addr);
+ current_thread->SetWaitHandle(thread_handle);
+ current_thread->SetStatus(ThreadStatus::WaitCondVar);
+ current_process->InsertConditionVariableThread(SharedFrom(current_thread));
}
- Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
- current_thread->SetCondVarWaitAddress(condition_variable_addr);
- current_thread->SetMutexWaitAddress(mutex_addr);
- current_thread->SetWaitHandle(thread_handle);
- current_thread->SetStatus(ThreadStatus::WaitCondVar);
- current_thread->InvalidateWakeupCallback();
- current_process->InsertConditionVariableThread(SharedFrom(current_thread));
+ if (event_handle != InvalidHandle) {
+ auto& time_manager = kernel.TimeManager();
+ time_manager.UnscheduleTimeEvent(event_handle);
+ }
+
+ {
+ SchedulerLock lock(kernel);
- current_thread->WakeAfterDelay(nano_seconds);
+ auto* owner = current_thread->GetLockOwner();
+ if (owner != nullptr) {
+ owner->RemoveMutexWaiter(SharedFrom(current_thread));
+ }
+ current_process->RemoveConditionVariableThread(SharedFrom(current_thread));
+ }
// Note: Deliberately don't attempt to inherit the lock owner's priority.
- system.PrepareReschedule(current_thread->GetProcessorID());
- return RESULT_SUCCESS;
+ return current_thread->GetSignalingResult();
+}
+
+static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr,
+ u32 condition_variable_addr, Handle thread_handle,
+ u32 nanoseconds_low, u32 nanoseconds_high) {
+ const s64 nanoseconds =
+ static_cast<s64>(nanoseconds_low | (static_cast<u64>(nanoseconds_high) << 32));
+ return WaitProcessWideKeyAtomic(system, static_cast<VAddr>(mutex_addr),
+ static_cast<VAddr>(condition_variable_addr), thread_handle,
+ nanoseconds);
}
/// Signal process wide key
@@ -1582,7 +1741,9 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
// Retrieve a list of all threads that are waiting for this condition variable.
- auto* const current_process = system.Kernel().CurrentProcess();
+ auto& kernel = system.Kernel();
+ SchedulerLock lock(kernel);
+ auto* const current_process = kernel.CurrentProcess();
std::vector<std::shared_ptr<Thread>> waiting_threads =
current_process->GetConditionVariableThreads(condition_variable_addr);
@@ -1591,7 +1752,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
std::size_t last = waiting_threads.size();
if (target > 0)
last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
-
+ auto& time_manager = kernel.TimeManager();
for (std::size_t index = 0; index < last; ++index) {
auto& thread = waiting_threads[index];
@@ -1599,7 +1760,6 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
// liberate Cond Var Thread.
current_process->RemoveConditionVariableThread(thread);
- thread->SetCondVarWaitAddress(0);
const std::size_t current_core = system.CurrentCoreIndex();
auto& monitor = system.Monitor();
@@ -1610,10 +1770,8 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
u32 update_val = 0;
const VAddr mutex_address = thread->GetMutexWaitAddress();
do {
- monitor.SetExclusive(current_core, mutex_address);
-
// If the mutex is not yet acquired, acquire it.
- mutex_val = memory.Read32(mutex_address);
+ mutex_val = monitor.ExclusiveRead32(current_core, mutex_address);
if (mutex_val != 0) {
update_val = mutex_val | Mutex::MutexHasWaitersFlag;
@@ -1621,33 +1779,28 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
update_val = thread->GetWaitHandle();
}
} while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
+ monitor.ClearExclusive();
if (mutex_val == 0) {
// We were able to acquire the mutex, resume this thread.
- ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
- thread->ResumeFromWait();
-
auto* const lock_owner = thread->GetLockOwner();
if (lock_owner != nullptr) {
lock_owner->RemoveMutexWaiter(thread);
}
thread->SetLockOwner(nullptr);
- thread->SetMutexWaitAddress(0);
- thread->SetWaitHandle(0);
- thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
- system.PrepareReschedule(thread->GetProcessorID());
+ thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
+ thread->ResumeFromWait();
} else {
// The mutex is already owned by some other thread, make this thread wait on it.
const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
auto owner = handle_table.Get<Thread>(owner_handle);
ASSERT(owner);
- ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
- thread->InvalidateWakeupCallback();
- thread->SetStatus(ThreadStatus::WaitMutex);
+ if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
+ thread->SetStatus(ThreadStatus::WaitMutex);
+ }
owner->AddMutexWaiter(thread);
- system.PrepareReschedule(thread->GetProcessorID());
}
}
}
@@ -1678,12 +1831,15 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type,
auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
const ResultCode result =
address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
- if (result == RESULT_SUCCESS) {
- system.PrepareReschedule();
- }
return result;
}
+static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value,
+ u32 timeout_low, u32 timeout_high) {
+ s64 timeout = static_cast<s64>(timeout_low | (static_cast<u64>(timeout_high) << 32));
+ return WaitForAddress(system, static_cast<VAddr>(address), type, value, timeout);
+}
+
// Signals to an address (via Address Arbiter)
static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value,
s32 num_to_wake) {
@@ -1707,6 +1863,11 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type,
return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
}
+static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value,
+ s32 num_to_wake) {
+ return SignalToAddress(system, static_cast<VAddr>(address), type, value, num_to_wake);
+}
+
static void KernelDebug([[maybe_unused]] Core::System& system,
[[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1,
[[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) {
@@ -1725,14 +1886,21 @@ static u64 GetSystemTick(Core::System& system) {
auto& core_timing = system.CoreTiming();
// Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
- const u64 result{Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks())};
+ const u64 result{system.CoreTiming().GetClockTicks()};
- // Advance time to defeat dumb games that busy-wait for the frame to end.
- core_timing.AddTicks(400);
+ if (!system.Kernel().IsMulticore()) {
+ core_timing.AddTicks(400U);
+ }
return result;
}
+static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) {
+ u64 time = GetSystemTick(system);
+ *time_low = static_cast<u32>(time);
+ *time_high = static_cast<u32>(time >> 32);
+}
+
/// Close a handle
static ResultCode CloseHandle(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
@@ -1765,9 +1933,14 @@ static ResultCode ResetSignal(Core::System& system, Handle handle) {
return ERR_INVALID_HANDLE;
}
+static ResultCode ResetSignal32(Core::System& system, Handle handle) {
+ return ResetSignal(system, handle);
+}
+
/// Creates a TransferMemory object
static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size,
u32 permissions) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size,
permissions);
@@ -1812,6 +1985,12 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
return RESULT_SUCCESS;
}
+static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size,
+ u32 permissions) {
+ return CreateTransferMemory(system, handle, static_cast<VAddr>(addr),
+ static_cast<std::size_t>(size), permissions);
+}
+
static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core,
u64* mask) {
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
@@ -1821,6 +2000,8 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
if (!thread) {
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
thread_handle);
+ *core = 0;
+ *mask = 0;
return ERR_INVALID_HANDLE;
}
@@ -1830,6 +2011,15 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
return RESULT_SUCCESS;
}
+static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, u32* core,
+ u32* mask_low, u32* mask_high) {
+ u64 mask{};
+ const auto result = GetThreadCoreMask(system, thread_handle, core, &mask);
+ *mask_high = static_cast<u32>(mask >> 32);
+ *mask_low = static_cast<u32>(mask);
+ return result;
+}
+
static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core,
u64 affinity_mask) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}",
@@ -1861,7 +2051,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
return ERR_INVALID_COMBINATION;
}
- if (core < Core::NUM_CPU_CORES) {
+ if (core < Core::Hardware::NUM_CPU_CORES) {
if ((affinity_mask & (1ULL << core)) == 0) {
LOG_ERROR(Kernel_SVC,
"Core is not enabled for the current mask, core={}, mask={:016X}", core,
@@ -1883,11 +2073,14 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
return ERR_INVALID_HANDLE;
}
- system.PrepareReschedule(thread->GetProcessorID());
- thread->ChangeCore(core, affinity_mask);
- system.PrepareReschedule(thread->GetProcessorID());
+ return thread->SetCoreAndAffinityMask(core, affinity_mask);
+}
- return RESULT_SUCCESS;
+static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
+ u32 affinity_mask_low, u32 affinity_mask_high) {
+ const u64 affinity_mask =
+ static_cast<u64>(affinity_mask_low) | (static_cast<u64>(affinity_mask_high) << 32);
+ return SetThreadCoreMask(system, thread_handle, core, affinity_mask);
}
static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) {
@@ -1918,6 +2111,10 @@ static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle
return RESULT_SUCCESS;
}
+static ResultCode CreateEvent32(Core::System& system, Handle* write_handle, Handle* read_handle) {
+ return CreateEvent(system, write_handle, read_handle);
+}
+
static ResultCode ClearEvent(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle);
@@ -1939,6 +2136,10 @@ static ResultCode ClearEvent(Core::System& system, Handle handle) {
return ERR_INVALID_HANDLE;
}
+static ResultCode ClearEvent32(Core::System& system, Handle handle) {
+ return ClearEvent(system, handle);
+}
+
static ResultCode SignalEvent(Core::System& system, Handle handle) {
LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle);
@@ -1951,10 +2152,13 @@ static ResultCode SignalEvent(Core::System& system, Handle handle) {
}
writable_event->Signal();
- system.PrepareReschedule();
return RESULT_SUCCESS;
}
+static ResultCode SignalEvent32(Core::System& system, Handle handle) {
+ return SignalEvent(system, handle);
+}
+
static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
@@ -1982,6 +2186,7 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
}
static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) {
+ std::lock_guard lock{HLE::g_hle_lock};
LOG_DEBUG(Kernel_SVC, "called");
auto& kernel = system.Kernel();
@@ -2139,6 +2344,15 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
return RESULT_SUCCESS;
}
+static ResultCode FlushProcessDataCache32(Core::System& system, Handle handle, u32 address,
+ u32 size) {
+ // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a nope
+ // as all emulation is done in the same cache level in host architecture, thus data cache
+ // does not need flushing.
+ LOG_DEBUG(Kernel_SVC, "called");
+ return RESULT_SUCCESS;
+}
+
namespace {
struct FunctionDef {
using Func = void(Core::System&);
@@ -2153,57 +2367,57 @@ static const FunctionDef SVC_Table_32[] = {
{0x00, nullptr, "Unknown"},
{0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"},
{0x02, nullptr, "Unknown"},
- {0x03, nullptr, "SetMemoryAttribute32"},
- {0x04, nullptr, "MapMemory32"},
- {0x05, nullptr, "UnmapMemory32"},
+ {0x03, SvcWrap32<SetMemoryAttribute32>, "SetMemoryAttribute32"},
+ {0x04, SvcWrap32<MapMemory32>, "MapMemory32"},
+ {0x05, SvcWrap32<UnmapMemory32>, "UnmapMemory32"},
{0x06, SvcWrap32<QueryMemory32>, "QueryMemory32"},
- {0x07, nullptr, "ExitProcess32"},
- {0x08, nullptr, "CreateThread32"},
- {0x09, nullptr, "StartThread32"},
- {0x0a, nullptr, "ExitThread32"},
- {0x0b, nullptr, "SleepThread32"},
+ {0x07, SvcWrap32<ExitProcess32>, "ExitProcess32"},
+ {0x08, SvcWrap32<CreateThread32>, "CreateThread32"},
+ {0x09, SvcWrap32<StartThread32>, "StartThread32"},
+ {0x0a, SvcWrap32<ExitThread32>, "ExitThread32"},
+ {0x0b, SvcWrap32<SleepThread32>, "SleepThread32"},
{0x0c, SvcWrap32<GetThreadPriority32>, "GetThreadPriority32"},
- {0x0d, nullptr, "SetThreadPriority32"},
- {0x0e, nullptr, "GetThreadCoreMask32"},
- {0x0f, nullptr, "SetThreadCoreMask32"},
- {0x10, nullptr, "GetCurrentProcessorNumber32"},
- {0x11, nullptr, "SignalEvent32"},
- {0x12, nullptr, "ClearEvent32"},
- {0x13, nullptr, "MapSharedMemory32"},
+ {0x0d, SvcWrap32<SetThreadPriority32>, "SetThreadPriority32"},
+ {0x0e, SvcWrap32<GetThreadCoreMask32>, "GetThreadCoreMask32"},
+ {0x0f, SvcWrap32<SetThreadCoreMask32>, "SetThreadCoreMask32"},
+ {0x10, SvcWrap32<GetCurrentProcessorNumber32>, "GetCurrentProcessorNumber32"},
+ {0x11, SvcWrap32<SignalEvent32>, "SignalEvent32"},
+ {0x12, SvcWrap32<ClearEvent32>, "ClearEvent32"},
+ {0x13, SvcWrap32<MapSharedMemory32>, "MapSharedMemory32"},
{0x14, nullptr, "UnmapSharedMemory32"},
- {0x15, nullptr, "CreateTransferMemory32"},
+ {0x15, SvcWrap32<CreateTransferMemory32>, "CreateTransferMemory32"},
{0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"},
- {0x17, nullptr, "ResetSignal32"},
+ {0x17, SvcWrap32<ResetSignal32>, "ResetSignal32"},
{0x18, SvcWrap32<WaitSynchronization32>, "WaitSynchronization32"},
- {0x19, nullptr, "CancelSynchronization32"},
- {0x1a, nullptr, "ArbitrateLock32"},
- {0x1b, nullptr, "ArbitrateUnlock32"},
- {0x1c, nullptr, "WaitProcessWideKeyAtomic32"},
+ {0x19, SvcWrap32<CancelSynchronization32>, "CancelSynchronization32"},
+ {0x1a, SvcWrap32<ArbitrateLock32>, "ArbitrateLock32"},
+ {0x1b, SvcWrap32<ArbitrateUnlock32>, "ArbitrateUnlock32"},
+ {0x1c, SvcWrap32<WaitProcessWideKeyAtomic32>, "WaitProcessWideKeyAtomic32"},
{0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"},
- {0x1e, nullptr, "GetSystemTick32"},
+ {0x1e, SvcWrap32<GetSystemTick32>, "GetSystemTick32"},
{0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"},
{0x20, nullptr, "Unknown"},
{0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"},
{0x22, nullptr, "SendSyncRequestWithUserBuffer32"},
{0x23, nullptr, "Unknown"},
- {0x24, nullptr, "GetProcessId32"},
+ {0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"},
{0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"},
- {0x26, nullptr, "Break32"},
+ {0x26, SvcWrap32<Break32>, "Break32"},
{0x27, nullptr, "OutputDebugString32"},
{0x28, nullptr, "Unknown"},
{0x29, SvcWrap32<GetInfo32>, "GetInfo32"},
{0x2a, nullptr, "Unknown"},
{0x2b, nullptr, "Unknown"},
- {0x2c, nullptr, "MapPhysicalMemory32"},
- {0x2d, nullptr, "UnmapPhysicalMemory32"},
+ {0x2c, SvcWrap32<MapPhysicalMemory32>, "MapPhysicalMemory32"},
+ {0x2d, SvcWrap32<UnmapPhysicalMemory32>, "UnmapPhysicalMemory32"},
{0x2e, nullptr, "Unknown"},
{0x2f, nullptr, "Unknown"},
{0x30, nullptr, "Unknown"},
{0x31, nullptr, "Unknown"},
- {0x32, nullptr, "SetThreadActivity32"},
- {0x33, nullptr, "GetThreadContext32"},
- {0x34, nullptr, "WaitForAddress32"},
- {0x35, nullptr, "SignalToAddress32"},
+ {0x32, SvcWrap32<SetThreadActivity32>, "SetThreadActivity32"},
+ {0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"},
+ {0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"},
+ {0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"},
{0x36, nullptr, "Unknown"},
{0x37, nullptr, "Unknown"},
{0x38, nullptr, "Unknown"},
@@ -2219,7 +2433,7 @@ static const FunctionDef SVC_Table_32[] = {
{0x42, nullptr, "Unknown"},
{0x43, nullptr, "ReplyAndReceive32"},
{0x44, nullptr, "Unknown"},
- {0x45, nullptr, "CreateEvent32"},
+ {0x45, SvcWrap32<CreateEvent32>, "CreateEvent32"},
{0x46, nullptr, "Unknown"},
{0x47, nullptr, "Unknown"},
{0x48, nullptr, "Unknown"},
@@ -2245,7 +2459,7 @@ static const FunctionDef SVC_Table_32[] = {
{0x5c, nullptr, "Unknown"},
{0x5d, nullptr, "Unknown"},
{0x5e, nullptr, "Unknown"},
- {0x5F, nullptr, "FlushProcessDataCache32"},
+ {0x5F, SvcWrap32<FlushProcessDataCache32>, "FlushProcessDataCache32"},
{0x60, nullptr, "Unknown"},
{0x61, nullptr, "Unknown"},
{0x62, nullptr, "Unknown"},
@@ -2423,13 +2637,10 @@ static const FunctionDef* GetSVCInfo64(u32 func_num) {
return &SVC_Table_64[func_num];
}
-MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
-
void Call(Core::System& system, u32 immediate) {
- MICROPROFILE_SCOPE(Kernel_SVC);
-
- // Lock the global kernel mutex when we enter the kernel HLE.
- std::lock_guard lock{HLE::g_hle_lock};
+ system.ExitDynarmicProfile();
+ auto& kernel = system.Kernel();
+ kernel.EnterSVCProfile();
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
: GetSVCInfo32(immediate);
@@ -2442,6 +2653,9 @@ void Call(Core::System& system, u32 immediate) {
} else {
LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate);
}
+
+ kernel.ExitSVCProfile();
+ system.EnterDynarmicProfile();
}
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 7d735e3fa..0b6dd9df0 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -350,13 +350,50 @@ void SvcWrap64(Core::System& system) {
func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2));
}
-// Used by QueryMemory32
+// Used by QueryMemory32, ArbitrateLock32
template <ResultCode func(Core::System&, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
FuncReturn32(system,
func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw);
}
+// Used by Break32
+template <void func(Core::System&, u32, u32, u32)>
+void SvcWrap32(Core::System& system) {
+ func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2));
+}
+
+// Used by ExitProcess32, ExitThread32
+template <void func(Core::System&)>
+void SvcWrap32(Core::System& system) {
+ func(system);
+}
+
+// Used by GetCurrentProcessorNumber32
+template <u32 func(Core::System&)>
+void SvcWrap32(Core::System& system) {
+ FuncReturn32(system, func(system));
+}
+
+// Used by SleepThread32
+template <void func(Core::System&, u32, u32)>
+void SvcWrap32(Core::System& system) {
+ func(system, Param32(system, 0), Param32(system, 1));
+}
+
+// Used by CreateThread32
+template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
+void SvcWrap32(Core::System& system) {
+ Handle param_1 = 0;
+
+ const u32 retval = func(system, &param_1, Param32(system, 0), Param32(system, 1),
+ Param32(system, 2), Param32(system, 3), Param32(system, 4))
+ .raw;
+
+ system.CurrentArmInterface().SetReg(1, param_1);
+ FuncReturn(system, retval);
+}
+
// Used by GetInfo32
template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
@@ -393,18 +430,114 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
+// Used by GetSystemTick32
+template <void func(Core::System&, u32*, u32*)>
+void SvcWrap32(Core::System& system) {
+ u32 param_1 = 0;
+ u32 param_2 = 0;
+
+ func(system, &param_1, &param_2);
+ system.CurrentArmInterface().SetReg(0, param_1);
+ system.CurrentArmInterface().SetReg(1, param_2);
+}
+
+// Used by CreateEvent32
+template <ResultCode func(Core::System&, Handle*, Handle*)>
+void SvcWrap32(Core::System& system) {
+ Handle param_1 = 0;
+ Handle param_2 = 0;
+
+ const u32 retval = func(system, &param_1, &param_2).raw;
+ system.CurrentArmInterface().SetReg(1, param_1);
+ system.CurrentArmInterface().SetReg(2, param_2);
+ FuncReturn(system, retval);
+}
+
+// Used by GetThreadId32
+template <ResultCode func(Core::System&, Handle, u32*, u32*, u32*)>
+void SvcWrap32(Core::System& system) {
+ u32 param_1 = 0;
+ u32 param_2 = 0;
+ u32 param_3 = 0;
+
+ const u32 retval = func(system, Param32(system, 2), &param_1, &param_2, &param_3).raw;
+ system.CurrentArmInterface().SetReg(1, param_1);
+ system.CurrentArmInterface().SetReg(2, param_2);
+ system.CurrentArmInterface().SetReg(3, param_3);
+ FuncReturn(system, retval);
+}
+
// Used by SignalProcessWideKey32
template <void func(Core::System&, u32, s32)>
void SvcWrap32(Core::System& system) {
func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1)));
}
-// Used by SendSyncRequest32
+// Used by SetThreadPriority32
+template <ResultCode func(Core::System&, Handle, u32)>
+void SvcWrap32(Core::System& system) {
+ const u32 retval =
+ func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw;
+ FuncReturn(system, retval);
+}
+
+// Used by SetThreadCoreMask32
+template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
+void SvcWrap32(Core::System& system) {
+ const u32 retval =
+ func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
+ static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
+ .raw;
+ FuncReturn(system, retval);
+}
+
+// Used by WaitProcessWideKeyAtomic32
+template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
+void SvcWrap32(Core::System& system) {
+ const u32 retval =
+ func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
+ static_cast<Handle>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
+ static_cast<u32>(Param(system, 4)))
+ .raw;
+ FuncReturn(system, retval);
+}
+
+// Used by WaitForAddress32
+template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)>
+void SvcWrap32(Core::System& system) {
+ const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
+ static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)),
+ static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4)))
+ .raw;
+ FuncReturn(system, retval);
+}
+
+// Used by SignalToAddress32
+template <ResultCode func(Core::System&, u32, u32, s32, s32)>
+void SvcWrap32(Core::System& system) {
+ const u32 retval =
+ func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
+ static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
+ .raw;
+ FuncReturn(system, retval);
+}
+
+// Used by SendSyncRequest32, ArbitrateUnlock32
template <ResultCode func(Core::System&, u32)>
void SvcWrap32(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
}
+// Used by CreateTransferMemory32
+template <ResultCode func(Core::System&, Handle*, u32, u32, u32)>
+void SvcWrap32(Core::System& system) {
+ Handle handle = 0;
+ const u32 retval =
+ func(system, &handle, Param32(system, 1), Param32(system, 2), Param32(system, 3)).raw;
+ system.CurrentArmInterface().SetReg(1, handle);
+ FuncReturn(system, retval);
+}
+
// Used by WaitSynchronization32
template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
void SvcWrap32(Core::System& system) {
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
index dc37fad1a..851b702a5 100644
--- a/src/core/hle/kernel/synchronization.cpp
+++ b/src/core/hle/kernel/synchronization.cpp
@@ -10,78 +10,107 @@
#include "core/hle/kernel/synchronization.h"
#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/time_manager.h"
namespace Kernel {
-/// Default thread wakeup callback for WaitSynchronization
-static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
- std::shared_ptr<SynchronizationObject> object,
- std::size_t index) {
- ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
-
- if (reason == ThreadWakeupReason::Timeout) {
- thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
- return true;
- }
-
- ASSERT(reason == ThreadWakeupReason::Signal);
- thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
- thread->SetWaitSynchronizationOutput(static_cast<u32>(index));
- return true;
-}
-
Synchronization::Synchronization(Core::System& system) : system{system} {}
void Synchronization::SignalObject(SynchronizationObject& obj) const {
+ auto& kernel = system.Kernel();
+ SchedulerLock lock(kernel);
+ auto& time_manager = kernel.TimeManager();
if (obj.IsSignaled()) {
- obj.WakeupAllWaitingThreads();
+ for (auto thread : obj.GetWaitingThreads()) {
+ if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
+ if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
+ ASSERT(thread->IsWaitingSync());
+ }
+ thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
+ thread->ResumeFromWait();
+ }
+ }
+ obj.ClearWaitingThreads();
}
}
std::pair<ResultCode, Handle> Synchronization::WaitFor(
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
+ auto& kernel = system.Kernel();
auto* const thread = system.CurrentScheduler().GetCurrentThread();
- // Find the first object that is acquirable in the provided list of objects
- const auto itr = std::find_if(sync_objects.begin(), sync_objects.end(),
- [thread](const std::shared_ptr<SynchronizationObject>& object) {
- return object->IsSignaled();
- });
-
- if (itr != sync_objects.end()) {
- // We found a ready object, acquire it and set the result value
- SynchronizationObject* object = itr->get();
- object->Acquire(thread);
- const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
- return {RESULT_SUCCESS, index};
+ Handle event_handle = InvalidHandle;
+ {
+ SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
+ const auto itr =
+ std::find_if(sync_objects.begin(), sync_objects.end(),
+ [thread](const std::shared_ptr<SynchronizationObject>& object) {
+ return object->IsSignaled();
+ });
+
+ if (itr != sync_objects.end()) {
+ // We found a ready object, acquire it and set the result value
+ SynchronizationObject* object = itr->get();
+ object->Acquire(thread);
+ const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
+ lock.CancelSleep();
+ return {RESULT_SUCCESS, index};
+ }
+
+ if (nano_seconds == 0) {
+ lock.CancelSleep();
+ return {RESULT_TIMEOUT, InvalidHandle};
+ }
+
+ if (thread->IsPendingTermination()) {
+ lock.CancelSleep();
+ return {ERR_THREAD_TERMINATING, InvalidHandle};
+ }
+
+ if (thread->IsSyncCancelled()) {
+ thread->SetSyncCancelled(false);
+ lock.CancelSleep();
+ return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
+ }
+
+ for (auto& object : sync_objects) {
+ object->AddWaitingThread(SharedFrom(thread));
+ }
+
+ thread->SetSynchronizationObjects(&sync_objects);
+ thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
+ thread->SetStatus(ThreadStatus::WaitSynch);
+ thread->SetWaitingSync(true);
}
+ thread->SetWaitingSync(false);
- // No objects were ready to be acquired, prepare to suspend the thread.
-
- // If a timeout value of 0 was provided, just return the Timeout error code instead of
- // suspending the thread.
- if (nano_seconds == 0) {
- return {RESULT_TIMEOUT, InvalidHandle};
+ if (event_handle != InvalidHandle) {
+ auto& time_manager = kernel.TimeManager();
+ time_manager.UnscheduleTimeEvent(event_handle);
}
- if (thread->IsSyncCancelled()) {
- thread->SetSyncCancelled(false);
- return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
+ {
+ SchedulerLock lock(kernel);
+ ResultCode signaling_result = thread->GetSignalingResult();
+ SynchronizationObject* signaling_object = thread->GetSignalingObject();
+ thread->SetSynchronizationObjects(nullptr);
+ auto shared_thread = SharedFrom(thread);
+ for (auto& obj : sync_objects) {
+ obj->RemoveWaitingThread(shared_thread);
+ }
+ if (signaling_object != nullptr) {
+ const auto itr = std::find_if(
+ sync_objects.begin(), sync_objects.end(),
+ [signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
+ return object.get() == signaling_object;
+ });
+ ASSERT(itr != sync_objects.end());
+ signaling_object->Acquire(thread);
+ const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
+ return {signaling_result, index};
+ }
+ return {signaling_result, -1};
}
-
- for (auto& object : sync_objects) {
- object->AddWaitingThread(SharedFrom(thread));
- }
-
- thread->SetSynchronizationObjects(std::move(sync_objects));
- thread->SetStatus(ThreadStatus::WaitSynch);
-
- // Create an event to wake the thread up after the specified nanosecond delay has passed
- thread->WakeAfterDelay(nano_seconds);
- thread->SetWakeupCallback(DefaultThreadWakeupCallback);
-
- system.PrepareReschedule(thread->GetProcessorID());
-
- return {RESULT_TIMEOUT, InvalidHandle};
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.cpp b/src/core/hle/kernel/synchronization_object.cpp
index 43f3eef18..ba4d39157 100644
--- a/src/core/hle/kernel/synchronization_object.cpp
+++ b/src/core/hle/kernel/synchronization_object.cpp
@@ -38,68 +38,8 @@ void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread)
waiting_threads.erase(itr);
}
-std::shared_ptr<Thread> SynchronizationObject::GetHighestPriorityReadyThread() const {
- Thread* candidate = nullptr;
- u32 candidate_priority = THREADPRIO_LOWEST + 1;
-
- for (const auto& thread : waiting_threads) {
- const ThreadStatus thread_status = thread->GetStatus();
-
- // The list of waiting threads must not contain threads that are not waiting to be awakened.
- ASSERT_MSG(thread_status == ThreadStatus::WaitSynch ||
- thread_status == ThreadStatus::WaitHLEEvent,
- "Inconsistent thread statuses in waiting_threads");
-
- if (thread->GetPriority() >= candidate_priority)
- continue;
-
- if (ShouldWait(thread.get()))
- continue;
-
- candidate = thread.get();
- candidate_priority = thread->GetPriority();
- }
-
- return SharedFrom(candidate);
-}
-
-void SynchronizationObject::WakeupWaitingThread(std::shared_ptr<Thread> thread) {
- ASSERT(!ShouldWait(thread.get()));
-
- if (!thread) {
- return;
- }
-
- if (thread->IsSleepingOnWait()) {
- for (const auto& object : thread->GetSynchronizationObjects()) {
- ASSERT(!object->ShouldWait(thread.get()));
- object->Acquire(thread.get());
- }
- } else {
- Acquire(thread.get());
- }
-
- const std::size_t index = thread->GetSynchronizationObjectIndex(SharedFrom(this));
-
- thread->ClearSynchronizationObjects();
-
- thread->CancelWakeupTimer();
-
- bool resume = true;
- if (thread->HasWakeupCallback()) {
- resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Signal, thread, SharedFrom(this),
- index);
- }
- if (resume) {
- thread->ResumeFromWait();
- kernel.PrepareReschedule(thread->GetProcessorID());
- }
-}
-
-void SynchronizationObject::WakeupAllWaitingThreads() {
- while (auto thread = GetHighestPriorityReadyThread()) {
- WakeupWaitingThread(thread);
- }
+void SynchronizationObject::ClearWaitingThreads() {
+ waiting_threads.clear();
}
const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h
index 741c31faf..f89b24204 100644
--- a/src/core/hle/kernel/synchronization_object.h
+++ b/src/core/hle/kernel/synchronization_object.h
@@ -12,6 +12,7 @@
namespace Kernel {
class KernelCore;
+class Synchronization;
class Thread;
/// Class that represents a Kernel object that a thread can be waiting on
@@ -49,24 +50,11 @@ public:
*/
void RemoveWaitingThread(std::shared_ptr<Thread> thread);
- /**
- * Wake up all threads waiting on this object that can be awoken, in priority order,
- * and set the synchronization result and output of the thread.
- */
- void WakeupAllWaitingThreads();
-
- /**
- * Wakes up a single thread waiting on this object.
- * @param thread Thread that is waiting on this object to wakeup.
- */
- void WakeupWaitingThread(std::shared_ptr<Thread> thread);
-
- /// Obtains the highest priority thread that is ready to run from this object's waiting list.
- std::shared_ptr<Thread> GetHighestPriorityReadyThread() const;
-
/// Get a const reference to the waiting threads list for debug use
const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
+ void ClearWaitingThreads();
+
protected:
bool is_signaled{}; // Tells if this sync object is signalled;
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index db7f379ac..2b1092697 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -9,12 +9,21 @@
#include "common/assert.h"
#include "common/common_types.h"
+#include "common/fiber.h"
#include "common/logging/log.h"
#include "common/thread_queue_list.h"
#include "core/arm/arm_interface.h"
+#ifdef ARCHITECTURE_x86_64
+#include "core/arm/dynarmic/arm_dynarmic_32.h"
+#include "core/arm/dynarmic/arm_dynarmic_64.h"
+#endif
+#include "core/arm/cpu_interrupt_handler.h"
+#include "core/arm/exclusive_monitor.h"
+#include "core/arm/unicorn/arm_unicorn.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
+#include "core/cpu_manager.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
@@ -23,6 +32,7 @@
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -44,46 +54,26 @@ Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {}
Thread::~Thread() = default;
void Thread::Stop() {
- // Cancel any outstanding wakeup events for this thread
- Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(),
- global_handle);
- kernel.GlobalHandleTable().Close(global_handle);
- global_handle = 0;
- SetStatus(ThreadStatus::Dead);
- Signal();
-
- // Clean up any dangling references in objects that this thread was waiting for
- for (auto& wait_object : wait_objects) {
- wait_object->RemoveWaitingThread(SharedFrom(this));
- }
- wait_objects.clear();
-
- owner_process->UnregisterThread(this);
-
- // Mark the TLS slot in the thread's page as free.
- owner_process->FreeTLSRegion(tls_address);
-}
-
-void Thread::WakeAfterDelay(s64 nanoseconds) {
- // Don't schedule a wakeup if the thread wants to wait forever
- if (nanoseconds == -1)
- return;
+ {
+ SchedulerLock lock(kernel);
+ SetStatus(ThreadStatus::Dead);
+ Signal();
+ kernel.GlobalHandleTable().Close(global_handle);
- // This function might be called from any thread so we have to be cautious and use the
- // thread-safe version of ScheduleEvent.
- const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds});
- Core::System::GetInstance().CoreTiming().ScheduleEvent(
- cycles, kernel.ThreadWakeupCallbackEventType(), global_handle);
-}
+ if (owner_process) {
+ owner_process->UnregisterThread(this);
-void Thread::CancelWakeupTimer() {
- Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(),
- global_handle);
+ // Mark the TLS slot in the thread's page as free.
+ owner_process->FreeTLSRegion(tls_address);
+ }
+ arm_interface.reset();
+ has_exited = true;
+ }
+ global_handle = 0;
}
void Thread::ResumeFromWait() {
- ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects");
-
+ SchedulerLock lock(kernel);
switch (status) {
case ThreadStatus::Paused:
case ThreadStatus::WaitSynch:
@@ -99,7 +89,7 @@ void Thread::ResumeFromWait() {
case ThreadStatus::Ready:
// The thread's wakeup callback must have already been cleared when the thread was first
// awoken.
- ASSERT(wakeup_callback == nullptr);
+ ASSERT(hle_callback == nullptr);
// If the thread is waiting on multiple wait objects, it might be awoken more than once
// before actually resuming. We can ignore subsequent wakeups if the thread status has
// already been set to ThreadStatus::Ready.
@@ -115,24 +105,31 @@ void Thread::ResumeFromWait() {
return;
}
- wakeup_callback = nullptr;
+ SetStatus(ThreadStatus::Ready);
+}
+
+void Thread::OnWakeUp() {
+ SchedulerLock lock(kernel);
- if (activity == ThreadActivity::Paused) {
- SetStatus(ThreadStatus::Paused);
- return;
- }
+ SetStatus(ThreadStatus::Ready);
+}
+ResultCode Thread::Start() {
+ SchedulerLock lock(kernel);
SetStatus(ThreadStatus::Ready);
+ return RESULT_SUCCESS;
}
void Thread::CancelWait() {
- if (GetSchedulingStatus() != ThreadSchedStatus::Paused) {
+ SchedulerLock lock(kernel);
+ if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
is_sync_cancelled = true;
return;
}
+ // TODO(Blinkhawk): Implement cancel of server session
is_sync_cancelled = false;
- SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED);
- ResumeFromWait();
+ SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
+ SetStatus(ThreadStatus::Ready);
}
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
@@ -153,12 +150,29 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
context.fpcr = 0;
}
-ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name,
- VAddr entry_point, u32 priority, u64 arg,
- s32 processor_id, VAddr stack_top,
- Process& owner_process) {
+std::shared_ptr<Common::Fiber>& Thread::GetHostContext() {
+ return host_context;
+}
+
+ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
+ std::string name, VAddr entry_point, u32 priority,
+ u64 arg, s32 processor_id, VAddr stack_top,
+ Process* owner_process) {
+ std::function<void(void*)> init_func = system.GetCpuManager().GetGuestThreadStartFunc();
+ void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
+ return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
+ owner_process, std::move(init_func), init_func_parameter);
+}
+
+ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
+ std::string name, VAddr entry_point, u32 priority,
+ u64 arg, s32 processor_id, VAddr stack_top,
+ Process* owner_process,
+ std::function<void(void*)>&& thread_start_func,
+ void* thread_start_parameter) {
+ auto& kernel = system.Kernel();
// Check if priority is in ranged. Lowest priority -> highest priority id.
- if (priority > THREADPRIO_LOWEST) {
+ if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) {
LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
return ERR_INVALID_THREAD_PRIORITY;
}
@@ -168,11 +182,12 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::strin
return ERR_INVALID_PROCESSOR_ID;
}
- auto& system = Core::System::GetInstance();
- if (!system.Memory().IsValidVirtualAddress(owner_process, entry_point)) {
- LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
- // TODO (bunnei): Find the correct error code to use here
- return RESULT_UNKNOWN;
+ if (owner_process) {
+ if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) {
+ LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
+ // TODO (bunnei): Find the correct error code to use here
+ return RESULT_UNKNOWN;
+ }
}
std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
@@ -183,51 +198,82 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::strin
thread->stack_top = stack_top;
thread->tpidr_el0 = 0;
thread->nominal_priority = thread->current_priority = priority;
- thread->last_running_ticks = system.CoreTiming().GetTicks();
+ thread->last_running_ticks = 0;
thread->processor_id = processor_id;
thread->ideal_core = processor_id;
thread->affinity_mask = 1ULL << processor_id;
- thread->wait_objects.clear();
+ thread->wait_objects = nullptr;
thread->mutex_wait_address = 0;
thread->condvar_wait_address = 0;
thread->wait_handle = 0;
thread->name = std::move(name);
thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
- thread->owner_process = &owner_process;
- auto& scheduler = kernel.GlobalScheduler();
- scheduler.AddThread(thread);
- thread->tls_address = thread->owner_process->CreateTLSRegion();
-
- thread->owner_process->RegisterThread(thread.get());
+ thread->owner_process = owner_process;
+ thread->type = type_flags;
+ if ((type_flags & THREADTYPE_IDLE) == 0) {
+ auto& scheduler = kernel.GlobalScheduler();
+ scheduler.AddThread(thread);
+ }
+ if (owner_process) {
+ thread->tls_address = thread->owner_process->CreateTLSRegion();
+ thread->owner_process->RegisterThread(thread.get());
+ } else {
+ thread->tls_address = 0;
+ }
+ // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
+ // to initialize the context
+ thread->arm_interface.reset();
+ if ((type_flags & THREADTYPE_HLE) == 0) {
+#ifdef ARCHITECTURE_x86_64
+ if (owner_process && !owner_process->Is64BitProcess()) {
+ thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
+ system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
+ processor_id);
+ } else {
+ thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
+ system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
+ processor_id);
+ }
- ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
- static_cast<u32>(entry_point), static_cast<u32>(arg));
- ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
+#else
+ if (owner_process && !owner_process->Is64BitProcess()) {
+ thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
+ system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch32,
+ processor_id);
+ } else {
+ thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
+ system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch64,
+ processor_id);
+ }
+ LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
+#endif
+ ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
+ static_cast<u32>(entry_point), static_cast<u32>(arg));
+ ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
+ }
+ thread->host_context =
+ std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
}
void Thread::SetPriority(u32 priority) {
+ SchedulerLock lock(kernel);
ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
"Invalid priority value.");
nominal_priority = priority;
UpdatePriority();
}
-void Thread::SetWaitSynchronizationResult(ResultCode result) {
- context_32.cpu_registers[0] = result.raw;
- context_64.cpu_registers[0] = result.raw;
-}
-
-void Thread::SetWaitSynchronizationOutput(s32 output) {
- context_32.cpu_registers[1] = output;
- context_64.cpu_registers[1] = output;
+void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) {
+ signaling_object = object;
+ signaling_result = result;
}
s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const {
- ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything");
- const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object);
- return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1);
+ ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything");
+ const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object);
+ return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1);
}
VAddr Thread::GetCommandBufferAddress() const {
@@ -236,6 +282,14 @@ VAddr Thread::GetCommandBufferAddress() const {
return GetTLSAddress() + command_header_offset;
}
+Core::ARM_Interface& Thread::ArmInterface() {
+ return *arm_interface;
+}
+
+const Core::ARM_Interface& Thread::ArmInterface() const {
+ return *arm_interface;
+}
+
void Thread::SetStatus(ThreadStatus new_status) {
if (new_status == status) {
return;
@@ -257,10 +311,6 @@ void Thread::SetStatus(ThreadStatus new_status) {
break;
}
- if (status == ThreadStatus::Running) {
- last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
- }
-
status = new_status;
}
@@ -341,75 +391,116 @@ void Thread::UpdatePriority() {
lock_owner->UpdatePriority();
}
-void Thread::ChangeCore(u32 core, u64 mask) {
- SetCoreAndAffinityMask(core, mask);
-}
-
bool Thread::AllSynchronizationObjectsReady() const {
- return std::none_of(wait_objects.begin(), wait_objects.end(),
+ return std::none_of(wait_objects->begin(), wait_objects->end(),
[this](const std::shared_ptr<SynchronizationObject>& object) {
return object->ShouldWait(this);
});
}
-bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
- std::shared_ptr<SynchronizationObject> object,
- std::size_t index) {
- ASSERT(wakeup_callback);
- return wakeup_callback(reason, std::move(thread), std::move(object), index);
+bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
+ ASSERT(hle_callback);
+ return hle_callback(std::move(thread));
}
-void Thread::SetActivity(ThreadActivity value) {
- activity = value;
+ResultCode Thread::SetActivity(ThreadActivity value) {
+ SchedulerLock lock(kernel);
+
+ auto sched_status = GetSchedulingStatus();
+
+ if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) {
+ return ERR_INVALID_STATE;
+ }
+
+ if (IsPendingTermination()) {
+ return RESULT_SUCCESS;
+ }
if (value == ThreadActivity::Paused) {
- // Set status if not waiting
- if (status == ThreadStatus::Ready || status == ThreadStatus::Running) {
- SetStatus(ThreadStatus::Paused);
- kernel.PrepareReschedule(processor_id);
+ if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) {
+ return ERR_INVALID_STATE;
+ }
+ AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
+ } else {
+ if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) {
+ return ERR_INVALID_STATE;
}
- } else if (status == ThreadStatus::Paused) {
- // Ready to reschedule
- ResumeFromWait();
+ RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
}
+ return RESULT_SUCCESS;
}
-void Thread::Sleep(s64 nanoseconds) {
- // Sleep current thread and check for next thread to schedule
- SetStatus(ThreadStatus::WaitSleep);
+ResultCode Thread::Sleep(s64 nanoseconds) {
+ Handle event_handle{};
+ {
+ SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
+ SetStatus(ThreadStatus::WaitSleep);
+ }
- // Create an event to wake the thread up after the specified nanosecond delay has passed
- WakeAfterDelay(nanoseconds);
+ if (event_handle != InvalidHandle) {
+ auto& time_manager = kernel.TimeManager();
+ time_manager.UnscheduleTimeEvent(event_handle);
+ }
+ return RESULT_SUCCESS;
+}
+
+std::pair<ResultCode, bool> Thread::YieldSimple() {
+ bool is_redundant = false;
+ {
+ SchedulerLock lock(kernel);
+ is_redundant = kernel.GlobalScheduler().YieldThread(this);
+ }
+ return {RESULT_SUCCESS, is_redundant};
+}
+
+std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() {
+ bool is_redundant = false;
+ {
+ SchedulerLock lock(kernel);
+ is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this);
+ }
+ return {RESULT_SUCCESS, is_redundant};
}
-bool Thread::YieldSimple() {
- auto& scheduler = kernel.GlobalScheduler();
- return scheduler.YieldThread(this);
+std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() {
+ bool is_redundant = false;
+ {
+ SchedulerLock lock(kernel);
+ is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this);
+ }
+ return {RESULT_SUCCESS, is_redundant};
}
-bool Thread::YieldAndBalanceLoad() {
- auto& scheduler = kernel.GlobalScheduler();
- return scheduler.YieldThreadAndBalanceLoad(this);
+void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
+ const u32 old_state = scheduling_state;
+ pausing_state |= static_cast<u32>(flag);
+ const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
+ scheduling_state = base_scheduling | pausing_state;
+ kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
}
-bool Thread::YieldAndWaitForLoadBalancing() {
- auto& scheduler = kernel.GlobalScheduler();
- return scheduler.YieldThreadAndWaitForLoadBalancing(this);
+void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
+ const u32 old_state = scheduling_state;
+ pausing_state &= ~static_cast<u32>(flag);
+ const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
+ scheduling_state = base_scheduling | pausing_state;
+ kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
}
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
- const u32 old_flags = scheduling_state;
+ const u32 old_state = scheduling_state;
scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
static_cast<u32>(new_status);
- AdjustSchedulingOnStatus(old_flags);
+ kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
}
void Thread::SetCurrentPriority(u32 new_priority) {
const u32 old_priority = std::exchange(current_priority, new_priority);
- AdjustSchedulingOnPriority(old_priority);
+ kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority);
}
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
+ SchedulerLock lock(kernel);
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
if (((mask >> core) & 1) != 0) {
@@ -443,111 +534,12 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
processor_id = ideal_core;
}
}
- AdjustSchedulingOnAffinity(old_affinity_mask, old_core);
+ kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core);
}
}
return RESULT_SUCCESS;
}
-void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
- if (old_flags == scheduling_state) {
- return;
- }
-
- auto& scheduler = kernel.GlobalScheduler();
- if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) ==
- ThreadSchedStatus::Runnable) {
- // In this case the thread was running, now it's pausing/exitting
- if (processor_id >= 0) {
- scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this);
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
- scheduler.Unsuggest(current_priority, core, this);
- }
- }
- } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
- // The thread is now set to running from being stopped
- if (processor_id >= 0) {
- scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this);
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
- scheduler.Suggest(current_priority, core, this);
- }
- }
- }
-
- scheduler.SetReselectionPending();
-}
-
-void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
- if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) {
- return;
- }
- auto& scheduler = kernel.GlobalScheduler();
- if (processor_id >= 0) {
- scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this);
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
- scheduler.Unsuggest(old_priority, core, this);
- }
- }
-
- // Add thread to the new priority queues.
- Thread* current_thread = GetCurrentThread();
-
- if (processor_id >= 0) {
- if (current_thread == this) {
- scheduler.SchedulePrepend(current_priority, static_cast<u32>(processor_id), this);
- } else {
- scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this);
- }
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
- scheduler.Suggest(current_priority, core, this);
- }
- }
-
- scheduler.SetReselectionPending();
-}
-
-void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
- auto& scheduler = kernel.GlobalScheduler();
- if (GetSchedulingStatus() != ThreadSchedStatus::Runnable ||
- current_priority >= THREADPRIO_COUNT) {
- return;
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (((old_affinity_mask >> core) & 1) != 0) {
- if (core == static_cast<u32>(old_core)) {
- scheduler.Unschedule(current_priority, core, this);
- } else {
- scheduler.Unsuggest(current_priority, core, this);
- }
- }
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (((affinity_mask >> core) & 1) != 0) {
- if (core == static_cast<u32>(processor_id)) {
- scheduler.Schedule(current_priority, core, this);
- } else {
- scheduler.Suggest(current_priority, core, this);
- }
- }
- }
-
- scheduler.SetReselectionPending();
-}
-
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 23fdef8a4..c0342c462 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -6,26 +6,47 @@
#include <functional>
#include <string>
+#include <utility>
#include <vector>
#include "common/common_types.h"
+#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/result.h"
+namespace Common {
+class Fiber;
+}
+
+namespace Core {
+class ARM_Interface;
+class System;
+} // namespace Core
+
namespace Kernel {
+class GlobalScheduler;
class KernelCore;
class Process;
class Scheduler;
enum ThreadPriority : u32 {
- THREADPRIO_HIGHEST = 0, ///< Highest thread priority
- THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
- THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
- THREADPRIO_LOWEST = 63, ///< Lowest thread priority
- THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
+ THREADPRIO_HIGHEST = 0, ///< Highest thread priority
+ THREADPRIO_MAX_CORE_MIGRATION = 2, ///< Highest priority for a core migration
+ THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
+ THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
+ THREADPRIO_LOWEST = 63, ///< Lowest thread priority
+ THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
+};
+
+enum ThreadType : u32 {
+ THREADTYPE_USER = 0x1,
+ THREADTYPE_KERNEL = 0x2,
+ THREADTYPE_HLE = 0x4,
+ THREADTYPE_IDLE = 0x8,
+ THREADTYPE_SUSPEND = 0x10,
};
enum ThreadProcessorId : s32 {
@@ -107,26 +128,45 @@ public:
using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
- using WakeupCallback =
- std::function<bool(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
- std::shared_ptr<SynchronizationObject> object, std::size_t index)>;
+ using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>;
+
+ /**
+ * Creates and returns a new thread. The new thread is immediately scheduled
+ * @param system The instance of the whole system
+ * @param name The friendly name desired for the thread
+ * @param entry_point The address at which the thread should start execution
+ * @param priority The thread's priority
+ * @param arg User data to pass to the thread
+ * @param processor_id The ID(s) of the processors on which the thread is desired to be run
+ * @param stack_top The address of the thread's stack top
+ * @param owner_process The parent process for the thread, if null, it's a kernel thread
+ * @return A shared pointer to the newly created thread
+ */
+ static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
+ std::string name, VAddr entry_point,
+ u32 priority, u64 arg, s32 processor_id,
+ VAddr stack_top, Process* owner_process);
/**
* Creates and returns a new thread. The new thread is immediately scheduled
- * @param kernel The kernel instance this thread will be created under.
+ * @param system The instance of the whole system
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
- * @param owner_process The parent process for the thread
+ * @param owner_process The parent process for the thread, if null, it's a kernel thread
+ * @param thread_start_func The function where the host context will start.
+ * @param thread_start_parameter The parameter which will passed to host context on init
* @return A shared pointer to the newly created thread
*/
- static ResultVal<std::shared_ptr<Thread>> Create(KernelCore& kernel, std::string name,
- VAddr entry_point, u32 priority, u64 arg,
- s32 processor_id, VAddr stack_top,
- Process& owner_process);
+ static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
+ std::string name, VAddr entry_point,
+ u32 priority, u64 arg, s32 processor_id,
+ VAddr stack_top, Process* owner_process,
+ std::function<void(void*)>&& thread_start_func,
+ void* thread_start_parameter);
std::string GetName() const override {
return name;
@@ -181,7 +221,7 @@ public:
void UpdatePriority();
/// Changes the core that the thread is running or scheduled to run on.
- void ChangeCore(u32 core, u64 mask);
+ ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
/**
* Gets the thread's thread ID
@@ -194,6 +234,10 @@ public:
/// Resumes a thread from waiting
void ResumeFromWait();
+ void OnWakeUp();
+
+ ResultCode Start();
+
/// Cancels a waiting operation that this thread may or may not be within.
///
/// When the thread is within a waiting state, this will set the thread's
@@ -202,26 +246,19 @@ public:
///
void CancelWait();
- /**
- * Schedules an event to wake up the specified thread after the specified delay
- * @param nanoseconds The time this thread will be allowed to sleep for
- */
- void WakeAfterDelay(s64 nanoseconds);
+ void SetSynchronizationResults(SynchronizationObject* object, ResultCode result);
- /// Cancel any outstanding wakeup events for this thread
- void CancelWakeupTimer();
+ Core::ARM_Interface& ArmInterface();
- /**
- * Sets the result after the thread awakens (from svcWaitSynchronization)
- * @param result Value to set to the returned result
- */
- void SetWaitSynchronizationResult(ResultCode result);
+ const Core::ARM_Interface& ArmInterface() const;
- /**
- * Sets the output parameter value after the thread awakens (from svcWaitSynchronization)
- * @param output Value to set to the output parameter
- */
- void SetWaitSynchronizationOutput(s32 output);
+ SynchronizationObject* GetSignalingObject() const {
+ return signaling_object;
+ }
+
+ ResultCode GetSignalingResult() const {
+ return signaling_result;
+ }
/**
* Retrieves the index that this particular object occupies in the list of objects
@@ -269,11 +306,6 @@ public:
*/
VAddr GetCommandBufferAddress() const;
- /// Returns whether this thread is waiting on objects from a WaitSynchronization call.
- bool IsSleepingOnWait() const {
- return status == ThreadStatus::WaitSynch;
- }
-
ThreadContext32& GetContext32() {
return context_32;
}
@@ -290,6 +322,28 @@ public:
return context_64;
}
+ bool IsHLEThread() const {
+ return (type & THREADTYPE_HLE) != 0;
+ }
+
+ bool IsSuspendThread() const {
+ return (type & THREADTYPE_SUSPEND) != 0;
+ }
+
+ bool IsIdleThread() const {
+ return (type & THREADTYPE_IDLE) != 0;
+ }
+
+ bool WasRunning() const {
+ return was_running;
+ }
+
+ void SetWasRunning(bool value) {
+ was_running = value;
+ }
+
+ std::shared_ptr<Common::Fiber>& GetHostContext();
+
ThreadStatus GetStatus() const {
return status;
}
@@ -325,18 +379,18 @@ public:
}
const ThreadSynchronizationObjects& GetSynchronizationObjects() const {
- return wait_objects;
+ return *wait_objects;
}
- void SetSynchronizationObjects(ThreadSynchronizationObjects objects) {
- wait_objects = std::move(objects);
+ void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) {
+ wait_objects = objects;
}
void ClearSynchronizationObjects() {
- for (const auto& waiting_object : wait_objects) {
+ for (const auto& waiting_object : *wait_objects) {
waiting_object->RemoveWaitingThread(SharedFrom(this));
}
- wait_objects.clear();
+ wait_objects->clear();
}
/// Determines whether all the objects this thread is waiting on are ready.
@@ -386,26 +440,35 @@ public:
arb_wait_address = address;
}
- bool HasWakeupCallback() const {
- return wakeup_callback != nullptr;
+ bool HasHLECallback() const {
+ return hle_callback != nullptr;
}
- void SetWakeupCallback(WakeupCallback callback) {
- wakeup_callback = std::move(callback);
+ void SetHLECallback(HLECallback callback) {
+ hle_callback = std::move(callback);
}
- void InvalidateWakeupCallback() {
- SetWakeupCallback(nullptr);
+ void SetHLETimeEvent(Handle time_event) {
+ hle_time_event = time_event;
}
- /**
- * Invokes the thread's wakeup callback.
- *
- * @pre A valid wakeup callback has been set. Violating this precondition
- * will cause an assertion to trigger.
- */
- bool InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
- std::shared_ptr<SynchronizationObject> object, std::size_t index);
+ void SetHLESyncObject(SynchronizationObject* object) {
+ hle_object = object;
+ }
+
+ Handle GetHLETimeEvent() const {
+ return hle_time_event;
+ }
+
+ SynchronizationObject* GetHLESyncObject() const {
+ return hle_object;
+ }
+
+ void InvalidateHLECallback() {
+ SetHLECallback(nullptr);
+ }
+
+ bool InvokeHLECallback(std::shared_ptr<Thread> thread);
u32 GetIdealCore() const {
return ideal_core;
@@ -415,23 +478,19 @@ public:
return affinity_mask;
}
- ThreadActivity GetActivity() const {
- return activity;
- }
-
- void SetActivity(ThreadActivity value);
+ ResultCode SetActivity(ThreadActivity value);
/// Sleeps this thread for the given amount of nanoseconds.
- void Sleep(s64 nanoseconds);
+ ResultCode Sleep(s64 nanoseconds);
/// Yields this thread without rebalancing loads.
- bool YieldSimple();
+ std::pair<ResultCode, bool> YieldSimple();
/// Yields this thread and does a load rebalancing.
- bool YieldAndBalanceLoad();
+ std::pair<ResultCode, bool> YieldAndBalanceLoad();
/// Yields this thread and if the core is left idle, loads are rebalanced
- bool YieldAndWaitForLoadBalancing();
+ std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing();
void IncrementYieldCount() {
yield_count++;
@@ -446,6 +505,10 @@ public:
static_cast<u32>(ThreadSchedMasks::LowMask));
}
+ bool IsRunnable() const {
+ return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable);
+ }
+
bool IsRunning() const {
return is_running;
}
@@ -466,17 +529,67 @@ public:
return global_handle;
}
+ bool IsWaitingForArbitration() const {
+ return waiting_for_arbitration;
+ }
+
+ void WaitForArbitration(bool set) {
+ waiting_for_arbitration = set;
+ }
+
+ bool IsWaitingSync() const {
+ return is_waiting_on_sync;
+ }
+
+ void SetWaitingSync(bool is_waiting) {
+ is_waiting_on_sync = is_waiting;
+ }
+
+ bool IsPendingTermination() const {
+ return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited;
+ }
+
+ bool IsPaused() const {
+ return pausing_state != 0;
+ }
+
+ bool IsContinuousOnSVC() const {
+ return is_continuous_on_svc;
+ }
+
+ void SetContinuousOnSVC(bool is_continuous) {
+ is_continuous_on_svc = is_continuous;
+ }
+
+ bool IsPhantomMode() const {
+ return is_phantom_mode;
+ }
+
+ void SetPhantomMode(bool phantom) {
+ is_phantom_mode = phantom;
+ }
+
+ bool HasExited() const {
+ return has_exited;
+ }
+
private:
+ friend class GlobalScheduler;
+ friend class Scheduler;
+
void SetSchedulingStatus(ThreadSchedStatus new_status);
+ void AddSchedulingFlag(ThreadSchedFlags flag);
+ void RemoveSchedulingFlag(ThreadSchedFlags flag);
+
void SetCurrentPriority(u32 new_priority);
- ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
- void AdjustSchedulingOnStatus(u32 old_flags);
- void AdjustSchedulingOnPriority(u32 old_priority);
void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core);
+ Common::SpinLock context_guard{};
ThreadContext32 context_32{};
ThreadContext64 context_64{};
+ std::unique_ptr<Core::ARM_Interface> arm_interface{};
+ std::shared_ptr<Common::Fiber> host_context{};
u64 thread_id = 0;
@@ -485,6 +598,8 @@ private:
VAddr entry_point = 0;
VAddr stack_top = 0;
+ ThreadType type;
+
/// Nominal thread priority, as set by the emulated application.
/// The nominal priority is the thread priority without priority
/// inheritance taken into account.
@@ -509,7 +624,10 @@ private:
/// Objects that the thread is waiting on, in the same order as they were
/// passed to WaitSynchronization.
- ThreadSynchronizationObjects wait_objects;
+ ThreadSynchronizationObjects* wait_objects;
+
+ SynchronizationObject* signaling_object;
+ ResultCode signaling_result{RESULT_SUCCESS};
/// List of threads that are waiting for a mutex that is held by this thread.
MutexWaitingThreads wait_mutex_threads;
@@ -526,30 +644,39 @@ private:
/// If waiting for an AddressArbiter, this is the address being waited on.
VAddr arb_wait_address{0};
+ bool waiting_for_arbitration{};
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
Handle global_handle = 0;
- /// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
- /// was waiting via WaitSynchronization then the object will be the last object that became
- /// available. In case of a timeout, the object will be nullptr.
- WakeupCallback wakeup_callback;
+ /// Callback for HLE Events
+ HLECallback hle_callback;
+ Handle hle_time_event;
+ SynchronizationObject* hle_object;
Scheduler* scheduler = nullptr;
u32 ideal_core{0xFFFFFFFF};
u64 affinity_mask{0x1};
- ThreadActivity activity = ThreadActivity::Normal;
-
s32 ideal_core_override = -1;
u64 affinity_mask_override = 0x1;
u32 affinity_override_count = 0;
u32 scheduling_state = 0;
+ u32 pausing_state = 0;
bool is_running = false;
+ bool is_waiting_on_sync = false;
bool is_sync_cancelled = false;
+ bool is_continuous_on_svc = false;
+
+ bool will_be_terminated = false;
+ bool is_phantom_mode = false;
+ bool has_exited = false;
+
+ bool was_running = false;
+
std::string name;
};
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 21b290468..941305e8e 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -8,30 +8,37 @@
#include "core/core_timing_util.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {
-TimeManager::TimeManager(Core::System& system) : system{system} {
+TimeManager::TimeManager(Core::System& system_) : system{system_} {
time_manager_event_type = Core::Timing::CreateEvent(
"Kernel::TimeManagerCallback", [this](u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
+ SchedulerLock lock(system.Kernel());
Handle proper_handle = static_cast<Handle>(thread_handle);
+ if (cancelled_events[proper_handle]) {
+ return;
+ }
std::shared_ptr<Thread> thread =
this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
- thread->ResumeFromWait();
+ thread->OnWakeUp();
});
}
void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) {
+ event_handle = timetask->GetGlobalHandle();
if (nanoseconds > 0) {
ASSERT(timetask);
- event_handle = timetask->GetGlobalHandle();
- const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds});
- system.CoreTiming().ScheduleEvent(cycles, time_manager_event_type, event_handle);
+ ASSERT(timetask->GetStatus() != ThreadStatus::Ready);
+ ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
+ system.CoreTiming().ScheduleEvent(nanoseconds, time_manager_event_type, event_handle);
} else {
event_handle = InvalidHandle;
}
+ cancelled_events[event_handle] = false;
}
void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
@@ -39,6 +46,12 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
return;
}
system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle);
+ cancelled_events[event_handle] = true;
+}
+
+void TimeManager::CancelTimeEvent(Thread* time_task) {
+ Handle event_handle = time_task->GetGlobalHandle();
+ UnscheduleTimeEvent(event_handle);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
index eaec486d1..307a18765 100644
--- a/src/core/hle/kernel/time_manager.h
+++ b/src/core/hle/kernel/time_manager.h
@@ -5,6 +5,7 @@
#pragma once
#include <memory>
+#include <unordered_map>
#include "core/hle/kernel/object.h"
@@ -35,9 +36,12 @@ public:
/// Unschedule an existing time event
void UnscheduleTimeEvent(Handle event_handle);
+ void CancelTimeEvent(Thread* time_task);
+
private:
Core::System& system;
std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
+ std::unordered_map<Handle, bool> cancelled_events;
};
} // namespace Kernel
diff --git a/src/core/hle/service/hid/controllers/debug_pad.cpp b/src/core/hle/service/hid/controllers/debug_pad.cpp
index 1f2131ec8..cb35919e9 100644
--- a/src/core/hle/service/hid/controllers/debug_pad.cpp
+++ b/src/core/hle/service/hid/controllers/debug_pad.cpp
@@ -23,7 +23,7 @@ void Controller_DebugPad::OnRelease() {}
void Controller_DebugPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
std::size_t size) {
- shared_memory.header.timestamp = core_timing.GetTicks();
+ shared_memory.header.timestamp = core_timing.GetCPUTicks();
shared_memory.header.total_entry_count = 17;
if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/gesture.cpp b/src/core/hle/service/hid/controllers/gesture.cpp
index 6e990dd00..b7b7bfeae 100644
--- a/src/core/hle/service/hid/controllers/gesture.cpp
+++ b/src/core/hle/service/hid/controllers/gesture.cpp
@@ -19,7 +19,7 @@ void Controller_Gesture::OnRelease() {}
void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
std::size_t size) {
- shared_memory.header.timestamp = core_timing.GetTicks();
+ shared_memory.header.timestamp = core_timing.GetCPUTicks();
shared_memory.header.total_entry_count = 17;
if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/keyboard.cpp b/src/core/hle/service/hid/controllers/keyboard.cpp
index 9a8d354ba..feae89525 100644
--- a/src/core/hle/service/hid/controllers/keyboard.cpp
+++ b/src/core/hle/service/hid/controllers/keyboard.cpp
@@ -21,7 +21,7 @@ void Controller_Keyboard::OnRelease() {}
void Controller_Keyboard::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
std::size_t size) {
- shared_memory.header.timestamp = core_timing.GetTicks();
+ shared_memory.header.timestamp = core_timing.GetCPUTicks();
shared_memory.header.total_entry_count = 17;
if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/mouse.cpp b/src/core/hle/service/hid/controllers/mouse.cpp
index 93d88ea50..ac40989c5 100644
--- a/src/core/hle/service/hid/controllers/mouse.cpp
+++ b/src/core/hle/service/hid/controllers/mouse.cpp
@@ -19,7 +19,7 @@ void Controller_Mouse::OnRelease() {}
void Controller_Mouse::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
std::size_t size) {
- shared_memory.header.timestamp = core_timing.GetTicks();
+ shared_memory.header.timestamp = core_timing.GetCPUTicks();
shared_memory.header.total_entry_count = 17;
if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index 6fbee7efa..ef67ad690 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -328,7 +328,7 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8*
const auto& last_entry =
main_controller->npad[main_controller->common.last_entry_index];
- main_controller->common.timestamp = core_timing.GetTicks();
+ main_controller->common.timestamp = core_timing.GetCPUTicks();
main_controller->common.last_entry_index =
(main_controller->common.last_entry_index + 1) % 17;
diff --git a/src/core/hle/service/hid/controllers/stubbed.cpp b/src/core/hle/service/hid/controllers/stubbed.cpp
index 9e527d176..e7483bfa2 100644
--- a/src/core/hle/service/hid/controllers/stubbed.cpp
+++ b/src/core/hle/service/hid/controllers/stubbed.cpp
@@ -23,7 +23,7 @@ void Controller_Stubbed::OnUpdate(const Core::Timing::CoreTiming& core_timing, u
}
CommonHeader header{};
- header.timestamp = core_timing.GetTicks();
+ header.timestamp = core_timing.GetCPUTicks();
header.total_entry_count = 17;
header.entry_count = 0;
header.last_entry_index = 0;
diff --git a/src/core/hle/service/hid/controllers/touchscreen.cpp b/src/core/hle/service/hid/controllers/touchscreen.cpp
index 1c6e55566..e326f8f5c 100644
--- a/src/core/hle/service/hid/controllers/touchscreen.cpp
+++ b/src/core/hle/service/hid/controllers/touchscreen.cpp
@@ -22,7 +22,7 @@ void Controller_Touchscreen::OnRelease() {}
void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
std::size_t size) {
- shared_memory.header.timestamp = core_timing.GetTicks();
+ shared_memory.header.timestamp = core_timing.GetCPUTicks();
shared_memory.header.total_entry_count = 17;
if (!IsControllerActivated()) {
@@ -49,7 +49,7 @@ void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timin
touch_entry.diameter_x = Settings::values.touchscreen.diameter_x;
touch_entry.diameter_y = Settings::values.touchscreen.diameter_y;
touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle;
- const u64 tick = core_timing.GetTicks();
+ const u64 tick = core_timing.GetCPUTicks();
touch_entry.delta_time = tick - last_touch;
last_touch = tick;
touch_entry.finger = Settings::values.touchscreen.finger;
diff --git a/src/core/hle/service/hid/controllers/xpad.cpp b/src/core/hle/service/hid/controllers/xpad.cpp
index 27511b27b..2503ef241 100644
--- a/src/core/hle/service/hid/controllers/xpad.cpp
+++ b/src/core/hle/service/hid/controllers/xpad.cpp
@@ -20,7 +20,7 @@ void Controller_XPad::OnRelease() {}
void Controller_XPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
std::size_t size) {
for (auto& xpad_entry : shared_memory.shared_memory_entries) {
- xpad_entry.header.timestamp = core_timing.GetTicks();
+ xpad_entry.header.timestamp = core_timing.GetCPUTicks();
xpad_entry.header.total_entry_count = 17;
if (!IsControllerActivated()) {
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 57d5edea7..e9020e0dc 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -39,11 +39,9 @@ namespace Service::HID {
// Updating period for each HID device.
// TODO(ogniK): Find actual polling rate of hid
-constexpr s64 pad_update_ticks = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 66);
-[[maybe_unused]] constexpr s64 accelerometer_update_ticks =
- static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 100);
-[[maybe_unused]] constexpr s64 gyroscope_update_ticks =
- static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 100);
+constexpr s64 pad_update_ticks = static_cast<s64>(1000000000 / 66);
+[[maybe_unused]] constexpr s64 accelerometer_update_ticks = static_cast<s64>(1000000000 / 100);
+[[maybe_unused]] constexpr s64 gyroscope_update_ticks = static_cast<s64>(1000000000 / 100);
constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000;
IAppletResource::IAppletResource(Core::System& system)
@@ -78,8 +76,8 @@ IAppletResource::IAppletResource(Core::System& system)
// Register update callbacks
pad_update_event =
- Core::Timing::CreateEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) {
- UpdateControllers(userdata, cycles_late);
+ Core::Timing::CreateEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 ns_late) {
+ UpdateControllers(userdata, ns_late);
});
// TODO(shinyquagsire23): Other update callbacks? (accel, gyro?)
@@ -109,7 +107,7 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) {
rb.PushCopyObjects(shared_mem);
}
-void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) {
+void IAppletResource::UpdateControllers(u64 userdata, s64 ns_late) {
auto& core_timing = system.CoreTiming();
const bool should_reload = Settings::values.is_device_reload_pending.exchange(false);
@@ -120,7 +118,7 @@ void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) {
controller->OnUpdate(core_timing, shared_mem->GetPointer(), SHARED_MEMORY_SIZE);
}
- core_timing.ScheduleEvent(pad_update_ticks - cycles_late, pad_update_event);
+ core_timing.ScheduleEvent(pad_update_ticks - ns_late, pad_update_event);
}
class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> {
diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp
index 36ed6f7da..e82fd031b 100644
--- a/src/core/hle/service/hid/irs.cpp
+++ b/src/core/hle/service/hid/irs.cpp
@@ -98,7 +98,7 @@ void IRS::GetImageTransferProcessorState(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 5};
rb.Push(RESULT_SUCCESS);
- rb.PushRaw<u64>(system.CoreTiming().GetTicks());
+ rb.PushRaw<u64>(system.CoreTiming().GetCPUTicks());
rb.PushRaw<u32>(0);
}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index 0d913334e..fba89e7a6 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -200,8 +200,7 @@ u32 nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u8>& o
IoctlGetGpuTime params{};
std::memcpy(&params, input.data(), input.size());
- const auto ns = Core::Timing::CyclesToNs(system.CoreTiming().GetTicks());
- params.gpu_time = static_cast<u64_le>(ns.count());
+ params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count());
std::memcpy(output.data(), &params, output.size());
return 0;
}
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index 437bc5dee..2f44d3779 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -9,6 +9,7 @@
#include "common/logging/log.h"
#include "common/microprofile.h"
#include "common/scope_exit.h"
+#include "common/thread.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
@@ -27,8 +28,35 @@
namespace Service::NVFlinger {
-constexpr s64 frame_ticks = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 60);
-constexpr s64 frame_ticks_30fps = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 30);
+constexpr s64 frame_ticks = static_cast<s64>(1000000000 / 60);
+constexpr s64 frame_ticks_30fps = static_cast<s64>(1000000000 / 30);
+
+void NVFlinger::VSyncThread(NVFlinger& nv_flinger) {
+ nv_flinger.SplitVSync();
+}
+
+void NVFlinger::SplitVSync() {
+ system.RegisterHostThread();
+ std::string name = "yuzu:VSyncThread";
+ MicroProfileOnThreadCreate(name.c_str());
+ Common::SetCurrentThreadName(name.c_str());
+ Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
+ s64 delay = 0;
+ while (is_running) {
+ guard->lock();
+ const s64 time_start = system.CoreTiming().GetGlobalTimeNs().count();
+ Compose();
+ const auto ticks = GetNextTicks();
+ const s64 time_end = system.CoreTiming().GetGlobalTimeNs().count();
+ const s64 time_passed = time_end - time_start;
+ const s64 next_time = std::max<s64>(0, ticks - time_passed - delay);
+ guard->unlock();
+ if (next_time > 0) {
+ wait_event->WaitFor(std::chrono::nanoseconds{next_time});
+ }
+ delay = (system.CoreTiming().GetGlobalTimeNs().count() - time_end) - next_time;
+ }
+}
NVFlinger::NVFlinger(Core::System& system) : system(system) {
displays.emplace_back(0, "Default", system);
@@ -36,22 +64,36 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) {
displays.emplace_back(2, "Edid", system);
displays.emplace_back(3, "Internal", system);
displays.emplace_back(4, "Null", system);
+ guard = std::make_shared<std::mutex>();
// Schedule the screen composition events
composition_event =
- Core::Timing::CreateEvent("ScreenComposition", [this](u64 userdata, s64 cycles_late) {
+ Core::Timing::CreateEvent("ScreenComposition", [this](u64 userdata, s64 ns_late) {
+ Lock();
Compose();
- const auto ticks =
- Settings::values.force_30fps_mode ? frame_ticks_30fps : GetNextTicks();
- this->system.CoreTiming().ScheduleEvent(std::max<s64>(0LL, ticks - cycles_late),
+ const auto ticks = GetNextTicks();
+ this->system.CoreTiming().ScheduleEvent(std::max<s64>(0LL, ticks - ns_late),
composition_event);
});
-
- system.CoreTiming().ScheduleEvent(frame_ticks, composition_event);
+ if (system.IsMulticore()) {
+ is_running = true;
+ wait_event = std::make_unique<Common::Event>();
+ vsync_thread = std::make_unique<std::thread>(VSyncThread, std::ref(*this));
+ } else {
+ system.CoreTiming().ScheduleEvent(frame_ticks, composition_event);
+ }
}
NVFlinger::~NVFlinger() {
- system.CoreTiming().UnscheduleEvent(composition_event, 0);
+ if (system.IsMulticore()) {
+ is_running = false;
+ wait_event->Set();
+ vsync_thread->join();
+ vsync_thread.reset();
+ wait_event.reset();
+ } else {
+ system.CoreTiming().UnscheduleEvent(composition_event, 0);
+ }
}
void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
@@ -199,10 +241,12 @@ void NVFlinger::Compose() {
auto& gpu = system.GPU();
const auto& multi_fence = buffer->get().multi_fence;
+ guard->unlock();
for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
const auto& fence = multi_fence.fences[fence_id];
gpu.WaitFence(fence.id, fence.value);
}
+ guard->lock();
MicroProfileFlip();
@@ -223,7 +267,7 @@ void NVFlinger::Compose() {
s64 NVFlinger::GetNextTicks() const {
constexpr s64 max_hertz = 120LL;
- return (Core::Hardware::BASE_CLOCK_RATE * (1LL << swap_interval)) / max_hertz;
+ return (1000000000 * (1LL << swap_interval)) / max_hertz;
}
} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index 57a21f33b..e4959a9af 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -4,15 +4,22 @@
#pragma once
+#include <atomic>
#include <memory>
+#include <mutex>
#include <optional>
#include <string>
#include <string_view>
+#include <thread>
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
+namespace Common {
+class Event;
+} // namespace Common
+
namespace Core::Timing {
class CoreTiming;
struct EventType;
@@ -79,6 +86,10 @@ public:
s64 GetNextTicks() const;
+ std::unique_lock<std::mutex> Lock() {
+ return std::unique_lock{*guard};
+ }
+
private:
/// Finds the display identified by the specified ID.
VI::Display* FindDisplay(u64 display_id);
@@ -92,6 +103,10 @@ private:
/// Finds the layer identified by the specified ID in the desired display.
const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const;
+ static void VSyncThread(NVFlinger& nv_flinger);
+
+ void SplitVSync();
+
std::shared_ptr<Nvidia::Module> nvdrv;
std::vector<VI::Display> displays;
@@ -108,7 +123,13 @@ private:
/// Event that handles screen composition.
std::shared_ptr<Core::Timing::EventType> composition_event;
+ std::shared_ptr<std::mutex> guard;
+
Core::System& system;
+
+ std::unique_ptr<std::thread> vsync_thread;
+ std::unique_ptr<Common::Event> wait_event;
+ std::atomic<bool> is_running{};
};
} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 6ada13be4..d872de16c 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -142,7 +142,7 @@ void SM::GetService(Kernel::HLERequestContext& ctx) {
}
// Wake the threads waiting on the ServerPort
- server_port->WakeupAllWaitingThreads();
+ server_port->Signal();
LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId());
IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
diff --git a/src/core/hle/service/time/standard_steady_clock_core.cpp b/src/core/hle/service/time/standard_steady_clock_core.cpp
index 1575f0b49..59a272f4a 100644
--- a/src/core/hle/service/time/standard_steady_clock_core.cpp
+++ b/src/core/hle/service/time/standard_steady_clock_core.cpp
@@ -11,9 +11,8 @@
namespace Service::Time::Clock {
TimeSpanType StandardSteadyClockCore::GetCurrentRawTimePoint(Core::System& system) {
- const TimeSpanType ticks_time_span{TimeSpanType::FromTicks(
- Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()),
- Core::Hardware::CNTFREQ)};
+ const TimeSpanType ticks_time_span{
+ TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
TimeSpanType raw_time_point{setup_value.nanoseconds + ticks_time_span.nanoseconds};
if (raw_time_point.nanoseconds < cached_raw_time_point.nanoseconds) {
diff --git a/src/core/hle/service/time/tick_based_steady_clock_core.cpp b/src/core/hle/service/time/tick_based_steady_clock_core.cpp
index 44d5bc651..8baaa2a6a 100644
--- a/src/core/hle/service/time/tick_based_steady_clock_core.cpp
+++ b/src/core/hle/service/time/tick_based_steady_clock_core.cpp
@@ -11,9 +11,8 @@
namespace Service::Time::Clock {
SteadyClockTimePoint TickBasedSteadyClockCore::GetTimePoint(Core::System& system) {
- const TimeSpanType ticks_time_span{TimeSpanType::FromTicks(
- Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()),
- Core::Hardware::CNTFREQ)};
+ const TimeSpanType ticks_time_span{
+ TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
return {ticks_time_span.ToSeconds(), GetClockSourceId()};
}
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp
index 67f1bbcf3..4cf58a61a 100644
--- a/src/core/hle/service/time/time.cpp
+++ b/src/core/hle/service/time/time.cpp
@@ -234,9 +234,8 @@ void Module::Interface::CalculateMonotonicSystemClockBaseTimePoint(Kernel::HLERe
const auto current_time_point{steady_clock_core.GetCurrentTimePoint(system)};
if (current_time_point.clock_source_id == context.steady_time_point.clock_source_id) {
- const auto ticks{Clock::TimeSpanType::FromTicks(
- Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()),
- Core::Hardware::CNTFREQ)};
+ const auto ticks{Clock::TimeSpanType::FromTicks(system.CoreTiming().GetClockTicks(),
+ Core::Hardware::CNTFREQ)};
const s64 base_time_point{context.offset + current_time_point.time_point -
ticks.ToSeconds()};
IPC::ResponseBuilder rb{ctx, (sizeof(s64) / 4) + 2};
diff --git a/src/core/hle/service/time/time_sharedmemory.cpp b/src/core/hle/service/time/time_sharedmemory.cpp
index 999ec1e51..e0ae9f874 100644
--- a/src/core/hle/service/time/time_sharedmemory.cpp
+++ b/src/core/hle/service/time/time_sharedmemory.cpp
@@ -30,8 +30,7 @@ void SharedMemory::SetupStandardSteadyClock(Core::System& system,
const Common::UUID& clock_source_id,
Clock::TimeSpanType current_time_point) {
const Clock::TimeSpanType ticks_time_span{Clock::TimeSpanType::FromTicks(
- Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks()),
- Core::Hardware::CNTFREQ)};
+ system.CoreTiming().GetClockTicks(), Core::Hardware::CNTFREQ)};
const Clock::SteadyClockContext context{
static_cast<u64>(current_time_point.nanoseconds - ticks_time_span.nanoseconds),
clock_source_id};
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 46e14c2a3..157092074 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -511,6 +511,7 @@ private:
LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id,
static_cast<u32>(transaction), flags);
+ nv_flinger->Lock();
auto& buffer_queue = nv_flinger->FindBufferQueue(id);
switch (transaction) {
@@ -550,6 +551,7 @@ private:
[=](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
Kernel::ThreadWakeupReason reason) {
// Repeat TransactParcel DequeueBuffer when a buffer is available
+ nv_flinger->Lock();
auto& buffer_queue = nv_flinger->FindBufferQueue(id);
auto result = buffer_queue.DequeueBuffer(width, height);
ASSERT_MSG(result != std::nullopt, "Could not dequeue buffer.");
diff --git a/src/core/host_timing.cpp b/src/core/host_timing.cpp
deleted file mode 100644
index 2f40de1a1..000000000
--- a/src/core/host_timing.cpp
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include "core/host_timing.h"
-
-#include <algorithm>
-#include <mutex>
-#include <string>
-#include <tuple>
-
-#include "common/assert.h"
-#include "core/core_timing_util.h"
-
-namespace Core::HostTiming {
-
-std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
- return std::make_shared<EventType>(std::move(callback), std::move(name));
-}
-
-struct CoreTiming::Event {
- u64 time;
- u64 fifo_order;
- u64 userdata;
- std::weak_ptr<EventType> type;
-
- // Sort by time, unless the times are the same, in which case sort by
- // the order added to the queue
- friend bool operator>(const Event& left, const Event& right) {
- return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order);
- }
-
- friend bool operator<(const Event& left, const Event& right) {
- return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order);
- }
-};
-
-CoreTiming::CoreTiming() {
- clock =
- Common::CreateBestMatchingClock(Core::Hardware::BASE_CLOCK_RATE, Core::Hardware::CNTFREQ);
-}
-
-CoreTiming::~CoreTiming() = default;
-
-void CoreTiming::ThreadEntry(CoreTiming& instance) {
- instance.ThreadLoop();
-}
-
-void CoreTiming::Initialize() {
- event_fifo_id = 0;
- const auto empty_timed_callback = [](u64, s64) {};
- ev_lost = CreateEvent("_lost_event", empty_timed_callback);
- timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
-}
-
-void CoreTiming::Shutdown() {
- paused = true;
- shutting_down = true;
- event.Set();
- timer_thread->join();
- ClearPendingEvents();
- timer_thread.reset();
- has_started = false;
-}
-
-void CoreTiming::Pause(bool is_paused) {
- paused = is_paused;
-}
-
-void CoreTiming::SyncPause(bool is_paused) {
- if (is_paused == paused && paused_set == paused) {
- return;
- }
- Pause(is_paused);
- event.Set();
- while (paused_set != is_paused)
- ;
-}
-
-bool CoreTiming::IsRunning() const {
- return !paused_set;
-}
-
-bool CoreTiming::HasPendingEvents() const {
- return !(wait_set && event_queue.empty());
-}
-
-void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
- u64 userdata) {
- basic_lock.lock();
- const u64 timeout = static_cast<u64>(GetGlobalTimeNs().count() + ns_into_future);
-
- event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
-
- std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
- basic_lock.unlock();
- event.Set();
-}
-
-void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
- basic_lock.lock();
- const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
- return e.type.lock().get() == event_type.get() && e.userdata == userdata;
- });
-
- // Removing random items breaks the invariant so we have to re-establish it.
- if (itr != event_queue.end()) {
- event_queue.erase(itr, event_queue.end());
- std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
- }
- basic_lock.unlock();
-}
-
-void CoreTiming::AddTicks(std::size_t core_index, u64 ticks) {
- ticks_count[core_index] += ticks;
-}
-
-void CoreTiming::ResetTicks(std::size_t core_index) {
- ticks_count[core_index] = 0;
-}
-
-u64 CoreTiming::GetCPUTicks() const {
- return clock->GetCPUCycles();
-}
-
-u64 CoreTiming::GetClockTicks() const {
- return clock->GetClockCycles();
-}
-
-void CoreTiming::ClearPendingEvents() {
- event_queue.clear();
-}
-
-void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
- basic_lock.lock();
-
- const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
- return e.type.lock().get() == event_type.get();
- });
-
- // Removing random items breaks the invariant so we have to re-establish it.
- if (itr != event_queue.end()) {
- event_queue.erase(itr, event_queue.end());
- std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
- }
- basic_lock.unlock();
-}
-
-std::optional<u64> CoreTiming::Advance() {
- advance_lock.lock();
- basic_lock.lock();
- global_timer = GetGlobalTimeNs().count();
-
- while (!event_queue.empty() && event_queue.front().time <= global_timer) {
- Event evt = std::move(event_queue.front());
- std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
- event_queue.pop_back();
- basic_lock.unlock();
-
- if (auto event_type{evt.type.lock()}) {
- event_type->callback(evt.userdata, global_timer - evt.time);
- }
-
- basic_lock.lock();
- }
-
- if (!event_queue.empty()) {
- const u64 next_time = event_queue.front().time - global_timer;
- basic_lock.unlock();
- advance_lock.unlock();
- return next_time;
- } else {
- basic_lock.unlock();
- advance_lock.unlock();
- return std::nullopt;
- }
-}
-
-void CoreTiming::ThreadLoop() {
- has_started = true;
- while (!shutting_down) {
- while (!paused) {
- paused_set = false;
- const auto next_time = Advance();
- if (next_time) {
- std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
- event.WaitFor(next_time_ns);
- } else {
- wait_set = true;
- event.Wait();
- }
- wait_set = false;
- }
- paused_set = true;
- }
-}
-
-std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
- return clock->GetTimeNS();
-}
-
-std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
- return clock->GetTimeUS();
-}
-
-} // namespace Core::HostTiming
diff --git a/src/core/host_timing.h b/src/core/host_timing.h
deleted file mode 100644
index be6b68d7c..000000000
--- a/src/core/host_timing.h
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <atomic>
-#include <chrono>
-#include <functional>
-#include <memory>
-#include <mutex>
-#include <optional>
-#include <string>
-#include <thread>
-#include <vector>
-
-#include "common/common_types.h"
-#include "common/spin_lock.h"
-#include "common/thread.h"
-#include "common/threadsafe_queue.h"
-#include "common/wall_clock.h"
-#include "core/hardware_properties.h"
-
-namespace Core::HostTiming {
-
-/// A callback that may be scheduled for a particular core timing event.
-using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>;
-
-/// Contains the characteristics of a particular event.
-struct EventType {
- EventType(TimedCallback&& callback, std::string&& name)
- : callback{std::move(callback)}, name{std::move(name)} {}
-
- /// The event's callback function.
- TimedCallback callback;
- /// A pointer to the name of the event.
- const std::string name;
-};
-
-/**
- * This is a system to schedule events into the emulated machine's future. Time is measured
- * in main CPU clock cycles.
- *
- * To schedule an event, you first have to register its type. This is where you pass in the
- * callback. You then schedule events using the type id you get back.
- *
- * The int cyclesLate that the callbacks get is how many cycles late it was.
- * So to schedule a new event on a regular basis:
- * inside callback:
- * ScheduleEvent(periodInCycles - cyclesLate, callback, "whatever")
- */
-class CoreTiming {
-public:
- CoreTiming();
- ~CoreTiming();
-
- CoreTiming(const CoreTiming&) = delete;
- CoreTiming(CoreTiming&&) = delete;
-
- CoreTiming& operator=(const CoreTiming&) = delete;
- CoreTiming& operator=(CoreTiming&&) = delete;
-
- /// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
- /// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
- void Initialize();
-
- /// Tears down all timing related functionality.
- void Shutdown();
-
- /// Pauses/Unpauses the execution of the timer thread.
- void Pause(bool is_paused);
-
- /// Pauses/Unpauses the execution of the timer thread and waits until paused.
- void SyncPause(bool is_paused);
-
- /// Checks if core timing is running.
- bool IsRunning() const;
-
- /// Checks if the timer thread has started.
- bool HasStarted() const {
- return has_started;
- }
-
- /// Checks if there are any pending time events.
- bool HasPendingEvents() const;
-
- /// Schedules an event in core timing
- void ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
- u64 userdata = 0);
-
- void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
-
- /// We only permit one event of each type in the queue at a time.
- void RemoveEvent(const std::shared_ptr<EventType>& event_type);
-
- void AddTicks(std::size_t core_index, u64 ticks);
-
- void ResetTicks(std::size_t core_index);
-
- /// Returns current time in emulated CPU cycles
- u64 GetCPUTicks() const;
-
- /// Returns current time in emulated in Clock cycles
- u64 GetClockTicks() const;
-
- /// Returns current time in microseconds.
- std::chrono::microseconds GetGlobalTimeUs() const;
-
- /// Returns current time in nanoseconds.
- std::chrono::nanoseconds GetGlobalTimeNs() const;
-
- /// Checks for events manually and returns time in nanoseconds for next event, threadsafe.
- std::optional<u64> Advance();
-
-private:
- struct Event;
-
- /// Clear all pending events. This should ONLY be done on exit.
- void ClearPendingEvents();
-
- static void ThreadEntry(CoreTiming& instance);
- void ThreadLoop();
-
- std::unique_ptr<Common::WallClock> clock;
-
- u64 global_timer = 0;
-
- std::chrono::nanoseconds start_point;
-
- // The queue is a min-heap using std::make_heap/push_heap/pop_heap.
- // We don't use std::priority_queue because we need to be able to serialize, unserialize and
- // erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
- // accomodated by the standard adaptor class.
- std::vector<Event> event_queue;
- u64 event_fifo_id = 0;
-
- std::shared_ptr<EventType> ev_lost;
- Common::Event event{};
- Common::SpinLock basic_lock{};
- Common::SpinLock advance_lock{};
- std::unique_ptr<std::thread> timer_thread;
- std::atomic<bool> paused{};
- std::atomic<bool> paused_set{};
- std::atomic<bool> wait_set{};
- std::atomic<bool> shutting_down{};
- std::atomic<bool> has_started{};
-
- std::array<std::atomic<u64>, Core::Hardware::NUM_CPU_CORES> ticks_count{};
-};
-
-/// Creates a core timing event with the given name and callback.
-///
-/// @param name The name of the core timing event to create.
-/// @param callback The callback to execute for the event.
-///
-/// @returns An EventType instance representing the created event.
-///
-std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback);
-
-} // namespace Core::HostTiming
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 9d87045a0..7def00768 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -8,6 +8,7 @@
#include <utility>
#include "common/assert.h"
+#include "common/atomic_ops.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/page_table.h"
@@ -29,15 +30,12 @@ namespace Core::Memory {
struct Memory::Impl {
explicit Impl(Core::System& system_) : system{system_} {}
- void SetCurrentPageTable(Kernel::Process& process) {
+ void SetCurrentPageTable(Kernel::Process& process, u32 core_id) {
current_page_table = &process.PageTable().PageTableImpl();
const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth();
- system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width);
- system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width);
- system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width);
- system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
+ system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width);
}
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
@@ -179,6 +177,22 @@ struct Memory::Impl {
}
}
+ bool WriteExclusive8(const VAddr addr, const u8 data, const u8 expected) {
+ return WriteExclusive<u8>(addr, data, expected);
+ }
+
+ bool WriteExclusive16(const VAddr addr, const u16 data, const u16 expected) {
+ return WriteExclusive<u16_le>(addr, data, expected);
+ }
+
+ bool WriteExclusive32(const VAddr addr, const u32 data, const u32 expected) {
+ return WriteExclusive<u32_le>(addr, data, expected);
+ }
+
+ bool WriteExclusive64(const VAddr addr, const u64 data, const u64 expected) {
+ return WriteExclusive<u64_le>(addr, data, expected);
+ }
+
std::string ReadCString(VAddr vaddr, std::size_t max_length) {
std::string string;
string.reserve(max_length);
@@ -682,6 +696,67 @@ struct Memory::Impl {
}
}
+ template <typename T>
+ bool WriteExclusive(const VAddr vaddr, const T data, const T expected) {
+ u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
+ if (page_pointer != nullptr) {
+ // NOTE: Avoid adding any extra logic to this fast-path block
+ T volatile* pointer = reinterpret_cast<T volatile*>(&page_pointer[vaddr]);
+ return Common::AtomicCompareAndSwap(pointer, data, expected);
+ }
+
+ const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
+ switch (type) {
+ case Common::PageType::Unmapped:
+ LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
+ static_cast<u32>(data), vaddr);
+ return true;
+ case Common::PageType::Memory:
+ ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
+ break;
+ case Common::PageType::RasterizerCachedMemory: {
+ u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
+ system.GPU().InvalidateRegion(vaddr, sizeof(T));
+ T volatile* pointer = reinterpret_cast<T volatile*>(&host_ptr);
+ return Common::AtomicCompareAndSwap(pointer, data, expected);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return true;
+ }
+
+ bool WriteExclusive128(const VAddr vaddr, const u128 data, const u128 expected) {
+ u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
+ if (page_pointer != nullptr) {
+ // NOTE: Avoid adding any extra logic to this fast-path block
+ u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&page_pointer[vaddr]);
+ return Common::AtomicCompareAndSwap(pointer, data, expected);
+ }
+
+ const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
+ switch (type) {
+ case Common::PageType::Unmapped:
+ LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}{:016X}", sizeof(data) * 8,
+ static_cast<u64>(data[1]), static_cast<u64>(data[0]), vaddr);
+ return true;
+ case Common::PageType::Memory:
+ ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
+ break;
+ case Common::PageType::RasterizerCachedMemory: {
+ u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
+ system.GPU().InvalidateRegion(vaddr, sizeof(u128));
+ u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&host_ptr);
+ return Common::AtomicCompareAndSwap(pointer, data, expected);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return true;
+ }
+
Common::PageTable* current_page_table = nullptr;
Core::System& system;
};
@@ -689,8 +764,8 @@ struct Memory::Impl {
Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {}
Memory::~Memory() = default;
-void Memory::SetCurrentPageTable(Kernel::Process& process) {
- impl->SetCurrentPageTable(process);
+void Memory::SetCurrentPageTable(Kernel::Process& process, u32 core_id) {
+ impl->SetCurrentPageTable(process, core_id);
}
void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
@@ -764,6 +839,26 @@ void Memory::Write64(VAddr addr, u64 data) {
impl->Write64(addr, data);
}
+bool Memory::WriteExclusive8(VAddr addr, u8 data, u8 expected) {
+ return impl->WriteExclusive8(addr, data, expected);
+}
+
+bool Memory::WriteExclusive16(VAddr addr, u16 data, u16 expected) {
+ return impl->WriteExclusive16(addr, data, expected);
+}
+
+bool Memory::WriteExclusive32(VAddr addr, u32 data, u32 expected) {
+ return impl->WriteExclusive32(addr, data, expected);
+}
+
+bool Memory::WriteExclusive64(VAddr addr, u64 data, u64 expected) {
+ return impl->WriteExclusive64(addr, data, expected);
+}
+
+bool Memory::WriteExclusive128(VAddr addr, u128 data, u128 expected) {
+ return impl->WriteExclusive128(addr, data, expected);
+}
+
std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
return impl->ReadCString(vaddr, max_length);
}
diff --git a/src/core/memory.h b/src/core/memory.h
index 9292f3b0a..4a1cc63f4 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -64,7 +64,7 @@ public:
*
* @param process The process to use the page table of.
*/
- void SetCurrentPageTable(Kernel::Process& process);
+ void SetCurrentPageTable(Kernel::Process& process, u32 core_id);
/**
* Maps an allocated buffer onto a region of the emulated process address space.
@@ -245,6 +245,71 @@ public:
void Write64(VAddr addr, u64 data);
/**
+ * Writes a 8-bit unsigned integer to the given virtual address in
+ * the current process' address space if and only if the address contains
+ * the expected value. This operation is atomic.
+ *
+ * @param addr The virtual address to write the 8-bit unsigned integer to.
+ * @param data The 8-bit unsigned integer to write to the given virtual address.
+ * @param expected The 8-bit unsigned integer to check against the given virtual address.
+ *
+ * @post The memory range [addr, sizeof(data)) contains the given data value.
+ */
+ bool WriteExclusive8(VAddr addr, u8 data, u8 expected);
+
+ /**
+ * Writes a 16-bit unsigned integer to the given virtual address in
+ * the current process' address space if and only if the address contains
+ * the expected value. This operation is atomic.
+ *
+ * @param addr The virtual address to write the 16-bit unsigned integer to.
+ * @param data The 16-bit unsigned integer to write to the given virtual address.
+ * @param expected The 16-bit unsigned integer to check against the given virtual address.
+ *
+ * @post The memory range [addr, sizeof(data)) contains the given data value.
+ */
+ bool WriteExclusive16(VAddr addr, u16 data, u16 expected);
+
+ /**
+ * Writes a 32-bit unsigned integer to the given virtual address in
+ * the current process' address space if and only if the address contains
+ * the expected value. This operation is atomic.
+ *
+ * @param addr The virtual address to write the 32-bit unsigned integer to.
+ * @param data The 32-bit unsigned integer to write to the given virtual address.
+ * @param expected The 32-bit unsigned integer to check against the given virtual address.
+ *
+ * @post The memory range [addr, sizeof(data)) contains the given data value.
+ */
+ bool WriteExclusive32(VAddr addr, u32 data, u32 expected);
+
+ /**
+ * Writes a 64-bit unsigned integer to the given virtual address in
+ * the current process' address space if and only if the address contains
+ * the expected value. This operation is atomic.
+ *
+ * @param addr The virtual address to write the 64-bit unsigned integer to.
+ * @param data The 64-bit unsigned integer to write to the given virtual address.
+ * @param expected The 64-bit unsigned integer to check against the given virtual address.
+ *
+ * @post The memory range [addr, sizeof(data)) contains the given data value.
+ */
+ bool WriteExclusive64(VAddr addr, u64 data, u64 expected);
+
+ /**
+ * Writes a 128-bit unsigned integer to the given virtual address in
+ * the current process' address space if and only if the address contains
+ * the expected value. This operation is atomic.
+ *
+ * @param addr The virtual address to write the 128-bit unsigned integer to.
+ * @param data The 128-bit unsigned integer to write to the given virtual address.
+ * @param expected The 128-bit unsigned integer to check against the given virtual address.
+ *
+ * @post The memory range [addr, sizeof(data)) contains the given data value.
+ */
+ bool WriteExclusive128(VAddr addr, u128 data, u128 expected);
+
+ /**
* Reads a null-terminated string from the given virtual address.
* This function will continually read characters until either:
*
diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp
index b139e8465..53d27859b 100644
--- a/src/core/memory/cheat_engine.cpp
+++ b/src/core/memory/cheat_engine.cpp
@@ -20,7 +20,7 @@
namespace Core::Memory {
-constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 12);
+constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(1000000000 / 12);
constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF;
StandardVmCallbacks::StandardVmCallbacks(Core::System& system, const CheatProcessMetadata& metadata)
@@ -190,7 +190,7 @@ CheatEngine::~CheatEngine() {
void CheatEngine::Initialize() {
event = Core::Timing::CreateEvent(
"CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id),
- [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); });
+ [this](u64 userdata, s64 ns_late) { FrameCallback(userdata, ns_late); });
core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event);
metadata.process_id = system.CurrentProcess()->GetProcessID();
@@ -217,7 +217,7 @@ void CheatEngine::Reload(std::vector<CheatEntry> cheats) {
MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70));
-void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) {
+void CheatEngine::FrameCallback(u64 userdata, s64 ns_late) {
if (is_pending_reload.exchange(false)) {
vm.LoadProgram(cheats);
}
@@ -230,7 +230,7 @@ void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) {
vm.Execute(metadata);
- core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - cycles_late, event);
+ core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - ns_late, event);
}
} // namespace Core::Memory
diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp
index f1ae9d4df..9f3a6b811 100644
--- a/src/core/perf_stats.cpp
+++ b/src/core/perf_stats.cpp
@@ -119,7 +119,7 @@ double PerfStats::GetLastFrameTimeScale() {
}
void FrameLimiter::DoFrameLimiting(microseconds current_system_time_us) {
- if (!Settings::values.use_frame_limit) {
+ if (!Settings::values.use_frame_limit || Settings::values.use_multi_core) {
return;
}
diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp
index b2c6c537e..8b0c50d11 100644
--- a/src/core/tools/freezer.cpp
+++ b/src/core/tools/freezer.cpp
@@ -14,7 +14,7 @@
namespace Tools {
namespace {
-constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 60);
+constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(1000000000 / 60);
u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) {
switch (width) {
@@ -57,7 +57,7 @@ Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& m
: core_timing{core_timing_}, memory{memory_} {
event = Core::Timing::CreateEvent(
"MemoryFreezer::FrameCallback",
- [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); });
+ [this](u64 userdata, s64 ns_late) { FrameCallback(userdata, ns_late); });
core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event);
}
@@ -158,7 +158,7 @@ std::vector<Freezer::Entry> Freezer::GetEntries() const {
return entries;
}
-void Freezer::FrameCallback(u64 userdata, s64 cycles_late) {
+void Freezer::FrameCallback(u64 userdata, s64 ns_late) {
if (!IsActive()) {
LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");
return;
@@ -173,7 +173,7 @@ void Freezer::FrameCallback(u64 userdata, s64 cycles_late) {
MemoryWriteWidth(memory, entry.width, entry.address, entry.value);
}
- core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS - cycles_late, event);
+ core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS - ns_late, event);
}
void Freezer::FillEntryReads() {