summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
authorFernando Sahmkow <fsahmkow27@gmail.com>2020-03-09 03:39:41 +0100
committerFernando Sahmkow <fsahmkow27@gmail.com>2020-06-27 17:35:42 +0200
commitab9aae28bf5daa5fa7105bb4ef41b6d0b3c9cdc1 (patch)
treeec11dc90eb2cad49237eecc98766f61b04724254 /src/core
parentScheduler: Set last running time on thread. (diff)
downloadyuzu-ab9aae28bf5daa5fa7105bb4ef41b6d0b3c9cdc1.tar
yuzu-ab9aae28bf5daa5fa7105bb4ef41b6d0b3c9cdc1.tar.gz
yuzu-ab9aae28bf5daa5fa7105bb4ef41b6d0b3c9cdc1.tar.bz2
yuzu-ab9aae28bf5daa5fa7105bb4ef41b6d0b3c9cdc1.tar.lz
yuzu-ab9aae28bf5daa5fa7105bb4ef41b6d0b3c9cdc1.tar.xz
yuzu-ab9aae28bf5daa5fa7105bb4ef41b6d0b3c9cdc1.tar.zst
yuzu-ab9aae28bf5daa5fa7105bb4ef41b6d0b3c9cdc1.zip
Diffstat (limited to 'src/core')
-rw-r--r--src/core/core.cpp3
-rw-r--r--src/core/cpu_manager.cpp186
-rw-r--r--src/core/cpu_manager.h30
-rw-r--r--src/core/hle/kernel/kernel.cpp19
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/memory.cpp8
6 files changed, 215 insertions, 34 deletions
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 5d4ecdce5..fd1bdcaf0 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -149,6 +149,9 @@ struct System::Impl {
device_memory = std::make_unique<Core::DeviceMemory>(system);
+ kernel.SetMulticore(Settings::values.use_multi_core);
+ cpu_manager.SetMulticore(Settings::values.use_multi_core);
+
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
kernel.Initialize();
cpu_manager.Initialize();
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 9a261968a..e72f89808 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -26,9 +26,13 @@ void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) {
void CpuManager::Initialize() {
running_mode = true;
- for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- core_data[core].host_thread =
- std::make_unique<std::thread>(ThreadStart, std::ref(*this), core);
+ if (is_multicore) {
+ for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
+ core_data[core].host_thread =
+ std::make_unique<std::thread>(ThreadStart, std::ref(*this), core);
+ }
+ } else {
+ core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0);
}
}
@@ -41,52 +45,72 @@ void CpuManager::Shutdown() {
}
}
+std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
+ return std::function<void(void*)>(GuestThreadFunction);
+}
+
+std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() {
+ return std::function<void(void*)>(IdleThreadFunction);
+}
+
+std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() {
+ return std::function<void(void*)>(SuspendThreadFunction);
+}
+
void CpuManager::GuestThreadFunction(void* cpu_manager_) {
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
- cpu_manager->RunGuestThread();
+ if (cpu_manager->is_multicore) {
+ cpu_manager->MultiCoreRunGuestThread();
+ } else {
+ cpu_manager->SingleCoreRunGuestThread();
+ }
}
void CpuManager::GuestRewindFunction(void* cpu_manager_) {
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
- cpu_manager->RunGuestLoop();
+ if (cpu_manager->is_multicore) {
+ cpu_manager->MultiCoreRunGuestLoop();
+ } else {
+ cpu_manager->SingleCoreRunGuestLoop();
+ }
}
void CpuManager::IdleThreadFunction(void* cpu_manager_) {
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
- cpu_manager->RunIdleThread();
+ if (cpu_manager->is_multicore) {
+ cpu_manager->MultiCoreRunIdleThread();
+ } else {
+ cpu_manager->SingleCoreRunIdleThread();
+ }
}
void CpuManager::SuspendThreadFunction(void* cpu_manager_) {
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
- cpu_manager->RunSuspendThread();
-}
-
-std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
- return std::function<void(void*)>(GuestThreadFunction);
-}
-
-std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() {
- return std::function<void(void*)>(IdleThreadFunction);
-}
-
-std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() {
- return std::function<void(void*)>(SuspendThreadFunction);
+ if (cpu_manager->is_multicore) {
+ cpu_manager->MultiCoreRunSuspendThread();
+ } else {
+ cpu_manager->SingleCoreRunSuspendThread();
+ }
}
void* CpuManager::GetStartFuncParamater() {
return static_cast<void*>(this);
}
-void CpuManager::RunGuestThread() {
+///////////////////////////////////////////////////////////////////////////////
+/// MultiCore ///
+///////////////////////////////////////////////////////////////////////////////
+
+void CpuManager::MultiCoreRunGuestThread() {
auto& kernel = system.Kernel();
{
auto& sched = kernel.CurrentScheduler();
sched.OnThreadStart();
}
- RunGuestLoop();
+ MultiCoreRunGuestLoop();
}
-void CpuManager::RunGuestLoop() {
+void CpuManager::MultiCoreRunGuestLoop() {
auto& kernel = system.Kernel();
auto* thread = kernel.CurrentScheduler().GetCurrentThread();
auto host_context = thread->GetHostContext();
@@ -103,7 +127,7 @@ void CpuManager::RunGuestLoop() {
}
}
-void CpuManager::RunIdleThread() {
+void CpuManager::MultiCoreRunIdleThread() {
auto& kernel = system.Kernel();
while (true) {
auto& physical_core = kernel.CurrentPhysicalCore();
@@ -113,7 +137,7 @@ void CpuManager::RunIdleThread() {
}
}
-void CpuManager::RunSuspendThread() {
+void CpuManager::MultiCoreRunSuspendThread() {
auto& kernel = system.Kernel();
{
auto& sched = kernel.CurrentScheduler();
@@ -130,7 +154,7 @@ void CpuManager::RunSuspendThread() {
}
}
-void CpuManager::Pause(bool paused) {
+void CpuManager::MultiCorePause(bool paused) {
if (!paused) {
bool all_not_barrier = false;
while (!all_not_barrier) {
@@ -171,10 +195,120 @@ void CpuManager::Pause(bool paused) {
paused_state = paused;
}
+///////////////////////////////////////////////////////////////////////////////
+/// SingleCore ///
+///////////////////////////////////////////////////////////////////////////////
+
+void CpuManager::SingleCoreRunGuestThread() {
+ auto& kernel = system.Kernel();
+ {
+ auto& sched = kernel.CurrentScheduler();
+ sched.OnThreadStart();
+ }
+ SingleCoreRunGuestLoop();
+}
+
+void CpuManager::SingleCoreRunGuestLoop() {
+ auto& kernel = system.Kernel();
+ auto* thread = kernel.CurrentScheduler().GetCurrentThread();
+ auto host_context = thread->GetHostContext();
+ host_context->SetRewindPoint(std::function<void(void*)>(GuestRewindFunction), this);
+ host_context.reset();
+ while (true) {
+ auto& physical_core = kernel.CurrentPhysicalCore();
+ while (!physical_core.IsInterrupted()) {
+ physical_core.Run();
+ preemption_count++;
+ if (preemption_count % max_cycle_runs == 0) {
+ break;
+ }
+ }
+ physical_core.ClearExclusive();
+ PreemptSingleCore();
+ auto& scheduler = physical_core.Scheduler();
+ scheduler.TryDoContextSwitch();
+ }
+}
+
+void CpuManager::SingleCoreRunIdleThread() {
+ auto& kernel = system.Kernel();
+ while (true) {
+ auto& physical_core = kernel.CurrentPhysicalCore();
+ PreemptSingleCore();
+ auto& scheduler = physical_core.Scheduler();
+ scheduler.TryDoContextSwitch();
+ }
+}
+
+void CpuManager::SingleCoreRunSuspendThread() {
+ auto& kernel = system.Kernel();
+ {
+ auto& sched = kernel.CurrentScheduler();
+ sched.OnThreadStart();
+ }
+ while (true) {
+ auto core = kernel.GetCurrentHostThreadID();
+ auto& scheduler = kernel.CurrentScheduler();
+ Kernel::Thread* current_thread = scheduler.GetCurrentThread();
+ Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
+ ASSERT(scheduler.ContextSwitchPending());
+ ASSERT(core == kernel.GetCurrentHostThreadID());
+ scheduler.TryDoContextSwitch();
+ }
+}
+
+void CpuManager::PreemptSingleCore() {
+ preemption_count = 0;
+ std::size_t old_core = current_core;
+ current_core = (current_core + 1) % Core::Hardware::NUM_CPU_CORES;
+ auto& scheduler = system.Kernel().Scheduler(old_core);
+ Kernel::Thread* current_thread = system.Kernel().Scheduler(old_core).GetCurrentThread();
+ Kernel::Thread* next_thread = system.Kernel().Scheduler(current_core).GetCurrentThread();
+ Common::Fiber::YieldTo(current_thread->GetHostContext(), next_thread->GetHostContext());
+}
+
+void CpuManager::SingleCorePause(bool paused) {
+ if (!paused) {
+ bool all_not_barrier = false;
+ while (!all_not_barrier) {
+ all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
+ }
+ core_data[0].enter_barrier->Set();
+ if (paused_state.load()) {
+ bool all_barrier = false;
+ while (!all_barrier) {
+ all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
+ }
+ core_data[0].exit_barrier->Set();
+ }
+ } else {
+ /// Wait until all cores are paused.
+ bool all_barrier = false;
+ while (!all_barrier) {
+ all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
+ }
+ /// Don't release the barrier
+ }
+ paused_state = paused;
+}
+
+void CpuManager::Pause(bool paused) {
+ if (is_multicore) {
+ MultiCorePause(paused);
+ } else {
+ SingleCorePause(paused);
+ }
+}
+
void CpuManager::RunThread(std::size_t core) {
/// Initialization
system.RegisterCoreThread(core);
- std::string name = "yuzu:CoreHostThread_" + std::to_string(core);
+ std::string name;
+ if (is_multicore) {
+ name = "yuzu:CoreCPUThread_" + std::to_string(core);
+ } else {
+ name = "yuzu:CPUThread";
+ }
MicroProfileOnThreadCreate(name.c_str());
Common::SetCurrentThreadName(name.c_str());
auto& data = core_data[core];
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h
index e83ab20f9..1e81481ec 100644
--- a/src/core/cpu_manager.h
+++ b/src/core/cpu_manager.h
@@ -30,6 +30,10 @@ public:
CpuManager& operator=(const CpuManager&) = delete;
CpuManager& operator=(CpuManager&&) = delete;
+ /// Sets if emulation is multicore or single core, must be set before Initialize
+ void SetMulticore(bool is_multicore) {
+ this->is_multicore = is_multicore;
+ }
void Initialize();
void Shutdown();
@@ -40,21 +44,34 @@ public:
std::function<void(void*)> GetSuspendThreadStartFunc();
void* GetStartFuncParamater();
+ std::size_t CurrentCore() const {
+ return current_core;
+ }
+
private:
static void GuestThreadFunction(void* cpu_manager);
static void GuestRewindFunction(void* cpu_manager);
static void IdleThreadFunction(void* cpu_manager);
static void SuspendThreadFunction(void* cpu_manager);
- void RunGuestThread();
- void RunGuestLoop();
- void RunIdleThread();
- void RunSuspendThread();
+ void MultiCoreRunGuestThread();
+ void MultiCoreRunGuestLoop();
+ void MultiCoreRunIdleThread();
+ void MultiCoreRunSuspendThread();
+ void MultiCorePause(bool paused);
+
+ void SingleCoreRunGuestThread();
+ void SingleCoreRunGuestLoop();
+ void SingleCoreRunIdleThread();
+ void SingleCoreRunSuspendThread();
+ void SingleCorePause(bool paused);
static void ThreadStart(CpuManager& cpu_manager, std::size_t core);
void RunThread(std::size_t core);
+ void PreemptSingleCore();
+
struct CoreData {
std::shared_ptr<Common::Fiber> host_context;
std::unique_ptr<Common::Event> enter_barrier;
@@ -70,6 +87,11 @@ private:
std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};
+ bool is_multicore{};
+ std::size_t current_core{};
+ std::size_t preemption_count{};
+ static constexpr std::size_t max_cycle_runs = 5;
+
System& system;
};
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 721ab1e70..4a091ea38 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -113,6 +113,10 @@ struct KernelCore::Impl {
explicit Impl(Core::System& system, KernelCore& kernel)
: global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {}
+ void SetMulticore(bool is_multicore) {
+ this->is_multicore = is_multicore;
+ }
+
void Initialize(KernelCore& kernel) {
Shutdown();
@@ -237,6 +241,9 @@ struct KernelCore::Impl {
void RegisterCoreThread(std::size_t core_id) {
std::unique_lock lock{register_thread_mutex};
+ if (!is_multicore) {
+ single_core_thread_id = std::this_thread::get_id();
+ }
const std::thread::id this_id = std::this_thread::get_id();
const auto it = host_thread_ids.find(this_id);
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
@@ -258,6 +265,11 @@ struct KernelCore::Impl {
u32 GetCurrentHostThreadID() const {
const std::thread::id this_id = std::this_thread::get_id();
+ if (!is_multicore) {
+ if (single_core_thread_id == this_id) {
+ return static_cast<u32>(system.GetCpuManager().CurrentCore());
+ }
+ }
const auto it = host_thread_ids.find(this_id);
if (it == host_thread_ids.end()) {
return Core::INVALID_HOST_THREAD_ID;
@@ -378,6 +390,9 @@ struct KernelCore::Impl {
std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
+ bool is_multicore{};
+ std::thread::id single_core_thread_id{};
+
// System context
Core::System& system;
};
@@ -387,6 +402,10 @@ KernelCore::~KernelCore() {
Shutdown();
}
+void KernelCore::SetMulticore(bool is_multicore) {
+ impl->SetMulticore(is_multicore);
+}
+
void KernelCore::Initialize() {
impl->Initialize(*this);
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 5d32a8329..162bbd2f8 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -65,6 +65,9 @@ public:
KernelCore(KernelCore&&) = delete;
KernelCore& operator=(KernelCore&&) = delete;
+ /// Sets if emulation is multicore or single core, must be set before Initialize
+ void SetMulticore(bool is_multicore);
+
/// Resets the kernel to a clean slate for use.
void Initialize();
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 4cb5d05e5..7def00768 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -715,8 +715,8 @@ struct Memory::Impl {
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
break;
case Common::PageType::RasterizerCachedMemory: {
- u8* host_ptr{GetPointerFromVMA(vaddr)};
- system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T));
+ u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
+ system.GPU().InvalidateRegion(vaddr, sizeof(T));
T volatile* pointer = reinterpret_cast<T volatile*>(&host_ptr);
return Common::AtomicCompareAndSwap(pointer, data, expected);
break;
@@ -745,8 +745,8 @@ struct Memory::Impl {
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
break;
case Common::PageType::RasterizerCachedMemory: {
- u8* host_ptr{GetPointerFromVMA(vaddr)};
- system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(u128));
+ u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
+ system.GPU().InvalidateRegion(vaddr, sizeof(u128));
u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&host_ptr);
return Common::AtomicCompareAndSwap(pointer, data, expected);
break;