summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp6
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp11
-rw-r--r--src/core/hle/kernel/hle_ipc.h8
-rw-r--r--src/core/hle/kernel/k_auto_object.h2
-rw-r--r--src/core/hle/kernel/k_process.h2
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h3
-rw-r--r--src/core/hle/kernel/k_thread.cpp4
-rw-r--r--src/core/hle/kernel/k_thread.h3
-rw-r--r--src/core/hle/kernel/kernel.cpp38
-rw-r--r--src/core/hle/kernel/kernel.h16
-rw-r--r--src/core/hle/kernel/physical_core.cpp1
-rw-r--r--src/core/hle/kernel/time_manager.cpp4
12 files changed, 68 insertions, 30 deletions
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 8027bec00..7765e7848 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -148,9 +148,9 @@ u64 GenerateUniformRange(u64 min, u64 max, F f) {
} // Anonymous namespace
u64 KSystemControl::GenerateRandomU64() {
- static std::random_device device;
- static std::mt19937 gen(device());
- static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
+ std::random_device device;
+ std::mt19937 gen(device());
+ std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
return distribution(gen);
}
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 42d1b0e31..b547a3463 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -24,8 +24,15 @@
namespace Kernel {
-SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_)
- : kernel{kernel_}, service_thread{kernel.CreateServiceThread(service_name_)} {}
+SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
+ ServiceThreadType thread_type)
+ : kernel{kernel_} {
+ if (thread_type == ServiceThreadType::CreateNew) {
+ service_thread = kernel.CreateServiceThread(service_name_);
+ } else {
+ service_thread = kernel.GetDefaultServiceThread();
+ }
+}
SessionRequestHandler::~SessionRequestHandler() {
kernel.ReleaseServiceThread(service_thread);
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 670cc741c..640146137 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -33,6 +33,11 @@ namespace Service {
class ServiceFrameworkBase;
}
+enum class ServiceThreadType {
+ Default,
+ CreateNew,
+};
+
namespace Kernel {
class Domain;
@@ -57,7 +62,8 @@ enum class ThreadWakeupReason;
*/
class SessionRequestHandler : public std::enable_shared_from_this<SessionRequestHandler> {
public:
- SessionRequestHandler(KernelCore& kernel, const char* service_name_);
+ SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
+ ServiceThreadType thread_type);
virtual ~SessionRequestHandler();
/**
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 05779f2d5..abdb8ae7c 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -163,7 +163,7 @@ public:
do {
ASSERT(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
- std::memory_order_relaxed));
+ std::memory_order_acq_rel));
// If ref count hits zero, destroy the object.
if (cur_ref_count - 1 == 0) {
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 48b17fc74..9f171e3da 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -422,7 +422,7 @@ private:
bool is_64bit_process = true;
/// Total running time for the process in ticks.
- u64 total_process_running_time_ticks = 0;
+ std::atomic<u64> total_process_running_time_ticks = 0;
/// Per-process handle table for storing created object handles in.
KHandleTable handle_table;
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 93c47f1b1..016e0a818 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -4,6 +4,7 @@
#pragma once
+#include <atomic>
#include "common/assert.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_thread.h"
@@ -75,7 +76,7 @@ private:
KernelCore& kernel;
KAlignedSpinLock spin_lock{};
s32 lock_count{};
- KThread* owner_thread{};
+ std::atomic<KThread*> owner_thread{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 94c8faf68..d3bb1c871 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -723,7 +723,7 @@ void KThread::UpdateState() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Set our suspend flags in state.
- const auto old_state = thread_state;
+ const ThreadState old_state = thread_state;
const auto new_state =
static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
thread_state = new_state;
@@ -738,7 +738,7 @@ void KThread::Continue() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Clear our suspend flags in state.
- const auto old_state = thread_state;
+ const ThreadState old_state = thread_state;
thread_state = old_state & ThreadState::Mask;
// Note the state change in scheduler.
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index f46db7298..d0fd85130 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -5,6 +5,7 @@
#pragma once
#include <array>
+#include <atomic>
#include <span>
#include <string>
#include <utility>
@@ -751,7 +752,7 @@ private:
KAffinityMask original_physical_affinity_mask{};
s32 original_physical_ideal_core_id{};
s32 num_core_migration_disables{};
- ThreadState thread_state{};
+ std::atomic<ThreadState> thread_state{};
std::atomic<bool> termination_requested{};
bool wait_cancelled{};
bool cancellable{};
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 34da7c23b..481a0d7cb 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -61,6 +61,7 @@ struct KernelCore::Impl {
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
global_handle_table->Initialize(KHandleTable::MaxTableSize);
+ default_service_thread = CreateServiceThread(kernel, "DefaultServiceThread");
is_phantom_mode_for_singlecore = false;
@@ -84,7 +85,7 @@ struct KernelCore::Impl {
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- cores[core_id].Initialize(current_process->Is64BitProcess());
+ cores[core_id].Initialize((*current_process).Is64BitProcess());
system.Memory().SetCurrentPageTable(*current_process, core_id);
}
}
@@ -98,7 +99,7 @@ struct KernelCore::Impl {
// Close all open server ports.
std::unordered_set<KServerPort*> server_ports_;
{
- std::lock_guard lk(server_ports_lock);
+ std::scoped_lock lk{server_ports_lock};
server_ports_ = server_ports;
server_ports.clear();
}
@@ -156,7 +157,7 @@ struct KernelCore::Impl {
// Close kernel objects that were not freed on shutdown
{
- std::lock_guard lk(registered_in_use_objects_lock);
+ std::scoped_lock lk{registered_in_use_objects_lock};
if (registered_in_use_objects.size()) {
for (auto& object : registered_in_use_objects) {
object->Close();
@@ -167,17 +168,17 @@ struct KernelCore::Impl {
// Shutdown all processes.
if (current_process) {
- current_process->Finalize();
+ (*current_process).Finalize();
// current_process->Close();
// TODO: The current process should be destroyed based on accurate ref counting after
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
- current_process->Destroy();
+ (*current_process).Destroy();
current_process = nullptr;
}
// Track kernel objects that were not freed on shutdown
{
- std::lock_guard lk(registered_objects_lock);
+ std::scoped_lock lk{registered_objects_lock};
if (registered_objects.size()) {
LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!",
registered_objects.size());
@@ -659,7 +660,7 @@ struct KernelCore::Impl {
KClientPort* port = &search->second(system.ServiceManager(), system);
{
- std::lock_guard lk(server_ports_lock);
+ std::scoped_lock lk{server_ports_lock};
server_ports.insert(&port->GetParent()->GetServerPort());
}
return port;
@@ -677,6 +678,12 @@ struct KernelCore::Impl {
void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
if (auto strong_ptr = service_thread.lock()) {
+ if (strong_ptr == default_service_thread.lock()) {
+ // Nothing to do here, the service is using default_service_thread, which will be
+ // released on shutdown.
+ return;
+ }
+
service_threads_manager.QueueWork(
[this, strong_ptr{std::move(strong_ptr)}]() { service_threads.erase(strong_ptr); });
}
@@ -697,7 +704,7 @@ struct KernelCore::Impl {
// Lists all processes that exist in the current session.
std::vector<KProcess*> process_list;
- KProcess* current_process{};
+ std::atomic<KProcess*> current_process{};
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
Kernel::TimeManager time_manager;
@@ -739,7 +746,8 @@ struct KernelCore::Impl {
std::unique_ptr<KMemoryLayout> memory_layout;
// Threads used for services
- std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
+ std::unordered_set<std::shared_ptr<ServiceThread>> service_threads;
+ std::weak_ptr<ServiceThread> default_service_thread;
Common::ThreadWorker service_threads_manager;
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
@@ -921,22 +929,22 @@ KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
}
void KernelCore::RegisterKernelObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_objects_lock);
+ std::scoped_lock lk{impl->registered_objects_lock};
impl->registered_objects.insert(object);
}
void KernelCore::UnregisterKernelObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_objects_lock);
+ std::scoped_lock lk{impl->registered_objects_lock};
impl->registered_objects.erase(object);
}
void KernelCore::RegisterInUseObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_in_use_objects_lock);
+ std::scoped_lock lk{impl->registered_in_use_objects_lock};
impl->registered_in_use_objects.insert(object);
}
void KernelCore::UnregisterInUseObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_in_use_objects_lock);
+ std::scoped_lock lk{impl->registered_in_use_objects_lock};
impl->registered_in_use_objects.erase(object);
}
@@ -1065,6 +1073,10 @@ std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::
return impl->CreateServiceThread(*this, name);
}
+std::weak_ptr<Kernel::ServiceThread> KernelCore::GetDefaultServiceThread() const {
+ return impl->default_service_thread;
+}
+
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
impl->ReleaseServiceThread(service_thread);
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 4c68e96df..24e26fa44 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -271,9 +271,11 @@ public:
void ExitSVCProfile();
/**
- * Creates an HLE service thread, which are used to execute service routines asynchronously.
- * While these are allocated per ServerSession, these need to be owned and managed outside
- * of ServerSession to avoid a circular dependency.
+ * Creates a host thread to execute HLE service requests, which are used to execute service
+ * routines asynchronously. While these are allocated per ServerSession, these need to be owned
+ * and managed outside of ServerSession to avoid a circular dependency. In general, most
+ * services can just use the default service thread, and not need their own host service thread.
+ * See GetDefaultServiceThread.
* @param name String name for the ServerSession creating this thread, used for debug
* purposes.
* @returns The a weak pointer newly created service thread.
@@ -281,6 +283,14 @@ public:
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
/**
+ * Gets the default host service thread, which executes HLE service requests. Unless service
+ * requests need to block on the host, the default service thread should be used in favor of
+ * creating a new service thread.
+ * @returns The a weak pointer for the default service thread.
+ */
+ std::weak_ptr<Kernel::ServiceThread> GetDefaultServiceThread() const;
+
+ /**
* Releases a HLE service thread, instructing KernelCore to free it. This should be called when
* the ServerSession associated with the thread is destroyed.
* @param service_thread Service thread to release.
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 7477668e4..18a5f40f8 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -58,6 +58,7 @@ bool PhysicalCore::IsInterrupted() const {
void PhysicalCore::Interrupt() {
guard->lock();
interrupts[core_index].SetInterrupt(true);
+ arm_interface->SignalInterrupt();
guard->unlock();
}
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index aa985d820..5b8fe8eae 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -24,7 +24,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
}
void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
- std::lock_guard lock{mutex};
+ std::scoped_lock lock{mutex};
if (nanoseconds > 0) {
ASSERT(thread);
ASSERT(thread->GetState() != ThreadState::Runnable);
@@ -35,7 +35,7 @@ void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
}
void TimeManager::UnscheduleTimeEvent(KThread* thread) {
- std::lock_guard lock{mutex};
+ std::scoped_lock lock{mutex};
system.CoreTiming().UnscheduleEvent(time_manager_event_type,
reinterpret_cast<uintptr_t>(thread));
}