summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp6
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h3
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp5
-rw-r--r--src/core/hle/kernel/k_auto_object.h7
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp11
-rw-r--r--src/core/hle/kernel/k_page_table.cpp11
-rw-r--r--src/core/hle/kernel/k_page_table.h5
-rw-r--r--src/core/hle/kernel/k_process.h2
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp8
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h3
-rw-r--r--src/core/hle/kernel/k_server_port.cpp6
-rw-r--r--src/core/hle/kernel/k_server_session.cpp3
-rw-r--r--src/core/hle/kernel/k_spin_lock.cpp39
-rw-r--r--src/core/hle/kernel/k_spin_lock.h4
-rw-r--r--src/core/hle/kernel/k_thread.cpp39
-rw-r--r--src/core/hle/kernel/k_thread.h16
-rw-r--r--src/core/hle/kernel/kernel.cpp49
-rw-r--r--src/core/hle/kernel/kernel.h8
-rw-r--r--src/core/hle/kernel/physical_core.cpp3
-rw-r--r--src/core/hle/kernel/physical_core.h7
-rw-r--r--src/core/hle/kernel/svc.cpp3
21 files changed, 123 insertions, 115 deletions
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 8027bec00..7765e7848 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -148,9 +148,9 @@ u64 GenerateUniformRange(u64 min, u64 max, F f) {
} // Anonymous namespace
u64 KSystemControl::GenerateRandomU64() {
- static std::random_device device;
- static std::mt19937 gen(device());
- static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
+ std::random_device device;
+ std::mt19937 gen(device());
+ std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
return distribution(gen);
}
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 6f44b534f..47425a3a1 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -8,7 +8,6 @@
#include <vector>
#include "common/common_types.h"
-#include "common/spin_lock.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_priority_queue.h"
#include "core/hle/kernel/k_scheduler_lock.h"
@@ -80,7 +79,7 @@ private:
/// Lists all thread ids that aren't deleted/etc.
std::vector<KThread*> thread_list;
- Common::SpinLock global_list_guard{};
+ std::mutex global_list_guard;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index b547a3463..5828ac923 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -51,7 +51,7 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
return false;
}
- return DomainHandler(object_id - 1).lock() != nullptr;
+ return !DomainHandler(object_id - 1).expired();
} else {
return session_handler != nullptr;
}
@@ -59,6 +59,9 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
void SessionRequestHandler::ClientConnected(KServerSession* session) {
session->ClientConnected(shared_from_this());
+
+ // Ensure our server session is tracked globally.
+ kernel.RegisterServerObject(session);
}
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 05779f2d5..423e8d8f5 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -89,9 +89,7 @@ public:
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {
RegisterWithKernel();
}
- virtual ~KAutoObject() {
- UnregisterWithKernel();
- }
+ virtual ~KAutoObject() = default;
static KAutoObject* Create(KAutoObject* ptr);
@@ -163,11 +161,12 @@ public:
do {
ASSERT(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
- std::memory_order_relaxed));
+ std::memory_order_acq_rel));
// If ref count hits zero, destroy the object.
if (cur_ref_count - 1 == 0) {
this->Destroy();
+ this->UnregisterWithKernel();
}
}
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 63bbe02e9..09eaf004c 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -35,9 +35,14 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
R_TRY(page_table.LockForCodeMemory(addr, size))
// Clear the memory.
- for (const auto& block : m_page_group.Nodes()) {
- std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
- }
+ //
+ // FIXME: this ends up clobbering address ranges outside the scope of the mapping within
+ // guest memory, and is not specifically required if the guest program is correctly
+ // written, so disable until this is further investigated.
+ //
+ // for (const auto& block : m_page_group.Nodes()) {
+ // std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
+ // }
// Set remaining tracking members.
m_address = addr;
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 599013cf6..47ea3c89c 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -346,7 +346,8 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
return ResultSuccess;
}
-ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
+ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ ICacheInvalidationStrategy icache_invalidation_strategy) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
@@ -396,7 +397,11 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std
bool reprotected_pages = false;
SCOPE_EXIT({
if (reprotected_pages && any_code_pages) {
- system.InvalidateCpuInstructionCacheRange(dst_address, size);
+ if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
+ system.InvalidateCpuInstructionCacheRange(dst_address, size);
+ } else {
+ system.InvalidateCpuInstructionCaches();
+ }
}
});
@@ -563,6 +568,8 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
KMemoryAttribute::None);
+ system.InvalidateCpuInstructionCaches();
+
return ResultSuccess;
}
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index bfabdf38c..dd6022975 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -26,6 +26,8 @@ class KMemoryBlockManager;
class KPageTable final {
public:
+ enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
+
YUZU_NON_COPYABLE(KPageTable);
YUZU_NON_MOVEABLE(KPageTable);
@@ -38,7 +40,8 @@ public:
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
KMemoryPermission perm);
ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
- ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
+ ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ ICacheInvalidationStrategy icache_invalidation_strategy);
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
VAddr src_addr);
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 48b17fc74..9f171e3da 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -422,7 +422,7 @@ private:
bool is_64bit_process = true;
/// Total running time for the process in ticks.
- u64 total_process_running_time_ticks = 0;
+ std::atomic<u64> total_process_running_time_ticks = 0;
/// Per-process handle table for storing created object handles in.
KHandleTable handle_table;
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6c0bb1672..526eb4b70 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -705,7 +705,7 @@ void KScheduler::Unload(KThread* thread) {
prev_thread = nullptr;
}
- thread->context_guard.Unlock();
+ thread->context_guard.unlock();
}
void KScheduler::Reload(KThread* thread) {
@@ -794,13 +794,13 @@ void KScheduler::SwitchToCurrent() {
do {
auto next_thread = current_thread.load();
if (next_thread != nullptr) {
- const auto locked = next_thread->context_guard.TryLock();
+ const auto locked = next_thread->context_guard.try_lock();
if (state.needs_scheduling.load()) {
- next_thread->context_guard.Unlock();
+ next_thread->context_guard.unlock();
break;
}
if (next_thread->GetActiveCore() != core_id) {
- next_thread->context_guard.Unlock();
+ next_thread->context_guard.unlock();
break;
}
if (!locked) {
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 93c47f1b1..016e0a818 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -4,6 +4,7 @@
#pragma once
+#include <atomic>
#include "common/assert.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_thread.h"
@@ -75,7 +76,7 @@ private:
KernelCore& kernel;
KAlignedSpinLock spin_lock{};
s32 lock_count{};
- KThread* owner_thread{};
+ std::atomic<KThread*> owner_thread{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp
index 433fc98e1..e66c0c992 100644
--- a/src/core/hle/kernel/k_server_port.cpp
+++ b/src/core/hle/kernel/k_server_port.cpp
@@ -62,6 +62,12 @@ void KServerPort::Destroy() {
// Close our reference to our parent.
parent->Close();
+
+ // Release host emulation members.
+ session_handler.reset();
+
+ // Ensure that the global list tracking server objects does not hold on to a reference.
+ kernel.UnregisterServerObject(this);
}
bool KServerPort::IsSignaled() const {
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 30c56ff29..7ac2ef254 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -49,6 +49,9 @@ void KServerSession::Destroy() {
// Release host emulation members.
manager.reset();
+
+ // Ensure that the global list tracking server objects does not hold on to a reference.
+ kernel.UnregisterServerObject(this);
}
void KServerSession::OnClientClosed() {
diff --git a/src/core/hle/kernel/k_spin_lock.cpp b/src/core/hle/kernel/k_spin_lock.cpp
index 4412aa4bb..527ff0f9f 100644
--- a/src/core/hle/kernel/k_spin_lock.cpp
+++ b/src/core/hle/kernel/k_spin_lock.cpp
@@ -4,51 +4,18 @@
#include "core/hle/kernel/k_spin_lock.h"
-#if _MSC_VER
-#include <intrin.h>
-#if _M_AMD64
-#define __x86_64__ 1
-#endif
-#if _M_ARM64
-#define __aarch64__ 1
-#endif
-#else
-#if __x86_64__
-#include <xmmintrin.h>
-#endif
-#endif
-
-namespace {
-
-void ThreadPause() {
-#if __x86_64__
- _mm_pause();
-#elif __aarch64__ && _MSC_VER
- __yield();
-#elif __aarch64__
- asm("yield");
-#endif
-}
-
-} // namespace
-
namespace Kernel {
void KSpinLock::Lock() {
- while (lck.test_and_set(std::memory_order_acquire)) {
- ThreadPause();
- }
+ lck.lock();
}
void KSpinLock::Unlock() {
- lck.clear(std::memory_order_release);
+ lck.unlock();
}
bool KSpinLock::TryLock() {
- if (lck.test_and_set(std::memory_order_acquire)) {
- return false;
- }
- return true;
+ return lck.try_lock();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_spin_lock.h b/src/core/hle/kernel/k_spin_lock.h
index 4d87d006a..7868b25a5 100644
--- a/src/core/hle/kernel/k_spin_lock.h
+++ b/src/core/hle/kernel/k_spin_lock.h
@@ -4,7 +4,7 @@
#pragma once
-#include <atomic>
+#include <mutex>
#include "core/hle/kernel/k_scoped_lock.h"
@@ -25,7 +25,7 @@ public:
[[nodiscard]] bool TryLock();
private:
- std::atomic_flag lck = ATOMIC_FLAG_INIT;
+ std::mutex lck;
};
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 94c8faf68..af71987e8 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -723,10 +723,10 @@ void KThread::UpdateState() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Set our suspend flags in state.
- const auto old_state = thread_state;
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
const auto new_state =
static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
- thread_state = new_state;
+ thread_state.store(new_state, std::memory_order_relaxed);
// Note the state change in scheduler.
if (new_state != old_state) {
@@ -738,8 +738,8 @@ void KThread::Continue() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Clear our suspend flags in state.
- const auto old_state = thread_state;
- thread_state = old_state & ThreadState::Mask;
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
+ thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
// Note the state change in scheduler.
KScheduler::OnThreadStateChanged(kernel, this, old_state);
@@ -1079,17 +1079,10 @@ void KThread::IfDummyThreadTryWait() {
return;
}
- // Block until we can grab the lock.
- KScopedSpinLock lk{dummy_wait_lock};
-}
-
-void KThread::IfDummyThreadBeginWait() {
- if (!IsDummyThread()) {
- return;
- }
-
- // Ensure the thread will block when IfDummyThreadTryWait is called.
- dummy_wait_lock.Lock();
+ // Block until we are no longer waiting.
+ std::unique_lock lk(dummy_wait_lock);
+ dummy_wait_cv.wait(
+ lk, [&] { return GetState() != ThreadState::Waiting || kernel.IsShuttingDown(); });
}
void KThread::IfDummyThreadEndWait() {
@@ -1097,8 +1090,8 @@ void KThread::IfDummyThreadEndWait() {
return;
}
- // Ensure the thread will no longer block.
- dummy_wait_lock.Unlock();
+ // Wake up the waiting thread.
+ dummy_wait_cv.notify_one();
}
void KThread::BeginWait(KThreadQueue* queue) {
@@ -1107,9 +1100,6 @@ void KThread::BeginWait(KThreadQueue* queue) {
// Set our wait queue.
wait_queue = queue;
-
- // Special case for dummy threads to ensure they block.
- IfDummyThreadBeginWait();
}
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
@@ -1158,10 +1148,11 @@ void KThread::SetState(ThreadState state) {
SetMutexWaitAddressForDebugging({});
SetWaitReasonForDebugging({});
- const ThreadState old_state = thread_state;
- thread_state =
- static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
- if (thread_state != old_state) {
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
+ thread_state.store(
+ static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
+ std::memory_order_relaxed);
+ if (thread_state.load(std::memory_order_relaxed) != old_state) {
KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
}
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index f46db7298..4892fdf76 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -5,6 +5,9 @@
#pragma once
#include <array>
+#include <atomic>
+#include <condition_variable>
+#include <mutex>
#include <span>
#include <string>
#include <utility>
@@ -14,6 +17,7 @@
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
+#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
#include "core/hle/kernel/k_affinity_mask.h"
#include "core/hle/kernel/k_light_lock.h"
@@ -255,11 +259,11 @@ public:
[[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
[[nodiscard]] ThreadState GetState() const {
- return thread_state & ThreadState::Mask;
+ return thread_state.load(std::memory_order_relaxed) & ThreadState::Mask;
}
[[nodiscard]] ThreadState GetRawState() const {
- return thread_state;
+ return thread_state.load(std::memory_order_relaxed);
}
void SetState(ThreadState state);
@@ -641,7 +645,6 @@ public:
// blocking as needed.
void IfDummyThreadTryWait();
- void IfDummyThreadBeginWait();
void IfDummyThreadEndWait();
private:
@@ -751,7 +754,7 @@ private:
KAffinityMask original_physical_affinity_mask{};
s32 original_physical_ideal_core_id{};
s32 num_core_migration_disables{};
- ThreadState thread_state{};
+ std::atomic<ThreadState> thread_state{};
std::atomic<bool> termination_requested{};
bool wait_cancelled{};
bool cancellable{};
@@ -761,13 +764,14 @@ private:
s8 priority_inheritance_count{};
bool resource_limit_release_hint{};
StackParameters stack_parameters{};
- KSpinLock context_guard{};
- KSpinLock dummy_wait_lock{};
+ Common::SpinLock context_guard{};
// For emulation
std::shared_ptr<Common::Fiber> host_context{};
bool is_single_core{};
ThreadType thread_type{};
+ std::mutex dummy_wait_lock;
+ std::condition_variable dummy_wait_cv;
// For debugging
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 134a0b8e9..d840d44e6 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -85,7 +85,7 @@ struct KernelCore::Impl {
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- cores[core_id].Initialize(current_process->Is64BitProcess());
+ cores[core_id].Initialize((*current_process).Is64BitProcess());
system.Memory().SetCurrentPageTable(*current_process, core_id);
}
}
@@ -96,15 +96,15 @@ struct KernelCore::Impl {
process_list.clear();
- // Close all open server ports.
- std::unordered_set<KServerPort*> server_ports_;
+ // Close all open server sessions and ports.
+ std::unordered_set<KAutoObject*> server_objects_;
{
- std::scoped_lock lk{server_ports_lock};
- server_ports_ = server_ports;
- server_ports.clear();
+ std::scoped_lock lk(server_objects_lock);
+ server_objects_ = server_objects;
+ server_objects.clear();
}
- for (auto* server_port : server_ports_) {
- server_port->Close();
+ for (auto* server_object : server_objects_) {
+ server_object->Close();
}
// Ensures all service threads gracefully shutdown.
@@ -168,11 +168,11 @@ struct KernelCore::Impl {
// Shutdown all processes.
if (current_process) {
- current_process->Finalize();
+ (*current_process).Finalize();
// current_process->Close();
// TODO: The current process should be destroyed based on accurate ref counting after
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
- current_process->Destroy();
+ (*current_process).Destroy();
current_process = nullptr;
}
@@ -659,13 +659,20 @@ struct KernelCore::Impl {
}
KClientPort* port = &search->second(system.ServiceManager(), system);
- {
- std::scoped_lock lk{server_ports_lock};
- server_ports.insert(&port->GetParent()->GetServerPort());
- }
+ RegisterServerObject(&port->GetParent()->GetServerPort());
return port;
}
+ void RegisterServerObject(KAutoObject* server_object) {
+ std::scoped_lock lk(server_objects_lock);
+ server_objects.insert(server_object);
+ }
+
+ void UnregisterServerObject(KAutoObject* server_object) {
+ std::scoped_lock lk(server_objects_lock);
+ server_objects.erase(server_object);
+ }
+
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(KernelCore& kernel,
const std::string& name) {
auto service_thread = std::make_shared<Kernel::ServiceThread>(kernel, 1, name);
@@ -693,7 +700,7 @@ struct KernelCore::Impl {
service_threads_manager.QueueWork([this]() { service_threads.clear(); });
}
- std::mutex server_ports_lock;
+ std::mutex server_objects_lock;
std::mutex registered_objects_lock;
std::mutex registered_in_use_objects_lock;
@@ -704,7 +711,7 @@ struct KernelCore::Impl {
// Lists all processes that exist in the current session.
std::vector<KProcess*> process_list;
- KProcess* current_process{};
+ std::atomic<KProcess*> current_process{};
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
Kernel::TimeManager time_manager;
@@ -723,7 +730,7 @@ struct KernelCore::Impl {
/// the ConnectToPort SVC.
std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
NamedPortTable named_ports;
- std::unordered_set<KServerPort*> server_ports;
+ std::unordered_set<KAutoObject*> server_objects;
std::unordered_set<KAutoObject*> registered_objects;
std::unordered_set<KAutoObject*> registered_in_use_objects;
@@ -928,6 +935,14 @@ KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
return impl->CreateNamedServicePort(std::move(name));
}
+void KernelCore::RegisterServerObject(KAutoObject* server_object) {
+ impl->RegisterServerObject(server_object);
+}
+
+void KernelCore::UnregisterServerObject(KAutoObject* server_object) {
+ impl->UnregisterServerObject(server_object);
+}
+
void KernelCore::RegisterKernelObject(KAutoObject* object) {
std::scoped_lock lk{impl->registered_objects_lock};
impl->registered_objects.insert(object);
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 24e26fa44..d709c368b 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -195,6 +195,14 @@ public:
/// Opens a port to a service previously registered with RegisterNamedService.
KClientPort* CreateNamedServicePort(std::string name);
+ /// Registers a server session or port with the gobal emulation state, to be freed on shutdown.
+ /// This is necessary because we do not emulate processes for HLE sessions and ports.
+ void RegisterServerObject(KAutoObject* server_object);
+
+ /// Unregisters a server session or port previously registered with RegisterServerSession when
+ /// it was destroyed during the current emulation session.
+ void UnregisterServerObject(KAutoObject* server_object);
+
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
void RegisterKernelObject(KAutoObject* object);
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 18a5f40f8..cc49e8c7e 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include "common/spin_lock.h"
#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/dynarmic/arm_dynarmic_32.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
@@ -16,7 +15,7 @@ namespace Kernel {
PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_,
Core::CPUInterrupts& interrupts_)
: core_index{core_index_}, system{system_}, scheduler{scheduler_},
- interrupts{interrupts_}, guard{std::make_unique<Common::SpinLock>()} {
+ interrupts{interrupts_}, guard{std::make_unique<std::mutex>()} {
#ifdef ARCHITECTURE_x86_64
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with
// a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 16a032e89..f2112fc1d 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -6,13 +6,10 @@
#include <cstddef>
#include <memory>
+#include <mutex>
#include "core/arm/arm_interface.h"
-namespace Common {
-class SpinLock;
-}
-
namespace Kernel {
class KScheduler;
} // namespace Kernel
@@ -91,7 +88,7 @@ private:
Core::System& system;
Kernel::KScheduler& scheduler;
Core::CPUInterrupts& interrupts;
- std::unique_ptr<Common::SpinLock> guard;
+ std::unique_ptr<std::mutex> guard;
std::unique_ptr<Core::ARM_Interface> arm_interface;
};
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 976d63234..0c86435b5 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1713,7 +1713,8 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
return ResultInvalidMemoryRegion;
}
- return page_table.UnmapCodeMemory(dst_address, src_address, size);
+ return page_table.UnmapCodeMemory(dst_address, src_address, size,
+ KPageTable::ICacheInvalidationStrategy::InvalidateAll);
}
/// Exits the current process