diff options
120 files changed, 2174 insertions, 2737 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 75e0c4f38..378e6c023 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -158,6 +158,7 @@ add_library(core STATIC hid/motion_input.h hle/api_version.h hle/ipc.h + hle/kernel/board/nintendo/nx/k_memory_layout.cpp hle/kernel/board/nintendo/nx/k_memory_layout.h hle/kernel/board/nintendo/nx/k_system_control.cpp hle/kernel/board/nintendo/nx/k_system_control.h @@ -211,12 +212,10 @@ add_library(core STATIC hle/kernel/k_light_condition_variable.h hle/kernel/k_light_lock.cpp hle/kernel/k_light_lock.h - hle/kernel/k_linked_list.h hle/kernel/k_memory_block.h hle/kernel/k_memory_block_manager.cpp hle/kernel/k_memory_block_manager.h hle/kernel/k_memory_layout.cpp - hle/kernel/k_memory_layout.board.nintendo_nx.cpp hle/kernel/k_memory_layout.h hle/kernel/k_memory_manager.cpp hle/kernel/k_memory_manager.h diff --git a/src/core/core.cpp b/src/core/core.cpp index bd2082fd6..d2b597068 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -434,7 +434,7 @@ struct System::Impl { } Service::Glue::ApplicationLaunchProperty launch{}; - launch.title_id = process.GetProgramID(); + launch.title_id = process.GetProgramId(); FileSys::PatchManager pm{launch.title_id, fs_controller, *content_provider}; launch.version = pm.GetGameVersion().value_or(0); @@ -762,7 +762,7 @@ const Core::SpeedLimiter& System::SpeedLimiter() const { } u64 System::GetApplicationProcessProgramID() const { - return impl->kernel.ApplicationProcess()->GetProgramID(); + return impl->kernel.ApplicationProcess()->GetProgramId(); } Loader::ResultStatus System::GetGameName(std::string& out) const { diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp index 18afe97e1..b2fe6bd7d 100644 --- a/src/core/debugger/gdbstub.cpp +++ b/src/core/debugger/gdbstub.cpp @@ -421,7 +421,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) { static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory, const Kernel::KThread* thread) { // Read thread type from TLS - const VAddr tls_thread_type{memory.Read32(thread->GetTLSAddress() + 0x1fc)}; + const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)}; const VAddr argument_thread_type{thread->GetArgument()}; if (argument_thread_type && tls_thread_type != argument_thread_type) { @@ -452,7 +452,7 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory, const Kernel::KThread* thread) { // Read thread type from TLS - const VAddr tls_thread_type{memory.Read64(thread->GetTLSAddress() + 0x1f8)}; + const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)}; const VAddr argument_thread_type{thread->GetArgument()}; if (argument_thread_type && tls_thread_type != argument_thread_type) { @@ -576,7 +576,7 @@ void GDBStub::HandleQuery(std::string_view command) { const auto& threads = system.ApplicationProcess()->GetThreadList(); std::vector<std::string> thread_ids; for (const auto& thread : threads) { - thread_ids.push_back(fmt::format("{:x}", thread->GetThreadID())); + thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId())); } SendReply(fmt::format("m{}", fmt::join(thread_ids, ","))); } else if (command.starts_with("sThreadInfo")) { @@ -591,11 +591,11 @@ void GDBStub::HandleQuery(std::string_view command) { for (const auto* thread : threads) { auto thread_name{GetThreadName(system, thread)}; if (!thread_name) { - thread_name = fmt::format("Thread {:d}", thread->GetThreadID()); + thread_name = fmt::format("Thread {:d}", thread->GetThreadId()); } buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)", - thread->GetThreadID(), thread->GetActiveCore(), + thread->GetThreadId(), thread->GetActiveCore(), EscapeXML(*thread_name), GetThreadState(thread)); } @@ -756,7 +756,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { reply = fmt::format("Process: {:#x} ({})\n" "Program Id: {:#018x}\n", - process->GetProcessID(), process->GetName(), process->GetProgramID()); + process->GetProcessId(), process->GetName(), process->GetProgramId()); reply += fmt::format("Layout:\n" " Alias: {:#012x} - {:#012x}\n" @@ -819,7 +819,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) { const auto& threads{system.ApplicationProcess()->GetThreadList()}; for (auto* thread : threads) { - if (thread->GetThreadID() == thread_id) { + if (thread->GetThreadId() == thread_id) { return thread; } } diff --git a/src/core/debugger/gdbstub_arch.cpp b/src/core/debugger/gdbstub_arch.cpp index 831c48513..75c94a91a 100644 --- a/src/core/debugger/gdbstub_arch.cpp +++ b/src/core/debugger/gdbstub_arch.cpp @@ -259,7 +259,7 @@ void GDBStubA64::WriteRegisters(Kernel::KThread* thread, std::string_view regist std::string GDBStubA64::ThreadStatus(const Kernel::KThread* thread, u8 signal) const { return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER, RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER), - LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID()); + LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId()); } u32 GDBStubA64::BreakpointInstruction() const { @@ -469,7 +469,7 @@ void GDBStubA32::WriteRegisters(Kernel::KThread* thread, std::string_view regist std::string GDBStubA32::ThreadStatus(const Kernel::KThread* thread, u8 signal) const { return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER, RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER), - LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID()); + LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId()); } u32 GDBStubA32::BreakpointInstruction() const { diff --git a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp index 098ba6eac..098ba6eac 100644 --- a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp +++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index fd911a3a5..7b090ccb5 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -12,20 +12,19 @@ namespace Kernel { -GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel_) - : kernel{kernel_}, scheduler_lock{kernel_} {} +GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel) + : m_kernel{kernel}, m_scheduler_lock{kernel} {} GlobalSchedulerContext::~GlobalSchedulerContext() = default; void GlobalSchedulerContext::AddThread(KThread* thread) { - std::scoped_lock lock{global_list_guard}; - thread_list.push_back(thread); + std::scoped_lock lock{m_global_list_guard}; + m_thread_list.push_back(thread); } void GlobalSchedulerContext::RemoveThread(KThread* thread) { - std::scoped_lock lock{global_list_guard}; - thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), - thread_list.end()); + std::scoped_lock lock{m_global_list_guard}; + std::erase(m_thread_list, thread); } void GlobalSchedulerContext::PreemptThreads() { @@ -38,37 +37,37 @@ void GlobalSchedulerContext::PreemptThreads() { 63, }; - ASSERT(IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { const u32 priority = preemption_priorities[core_id]; - KScheduler::RotateScheduledQueue(kernel, core_id, priority); + KScheduler::RotateScheduledQueue(m_kernel, core_id, priority); } } bool GlobalSchedulerContext::IsLocked() const { - return scheduler_lock.IsLockedByCurrentThread(); + return m_scheduler_lock.IsLockedByCurrentThread(); } void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) { - ASSERT(IsLocked()); + ASSERT(this->IsLocked()); - woken_dummy_threads.insert(thread); + m_woken_dummy_threads.insert(thread); } void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) { - ASSERT(IsLocked()); + ASSERT(this->IsLocked()); - woken_dummy_threads.erase(thread); + m_woken_dummy_threads.erase(thread); } void GlobalSchedulerContext::WakeupWaitingDummyThreads() { - ASSERT(IsLocked()); + ASSERT(this->IsLocked()); - for (auto* thread : woken_dummy_threads) { + for (auto* thread : m_woken_dummy_threads) { thread->DummyThreadEndWait(); } - woken_dummy_threads.clear(); + m_woken_dummy_threads.clear(); } } // namespace Kernel diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index 220ed6192..c48e8cd12 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -33,7 +33,7 @@ class GlobalSchedulerContext final { public: using LockType = KAbstractSchedulerLock<KScheduler>; - explicit GlobalSchedulerContext(KernelCore& kernel_); + explicit GlobalSchedulerContext(KernelCore& kernel); ~GlobalSchedulerContext(); /// Adds a new thread to the scheduler @@ -43,8 +43,9 @@ public: void RemoveThread(KThread* thread); /// Returns a list of all threads managed by the scheduler - [[nodiscard]] const std::vector<KThread*>& GetThreadList() const { - return thread_list; + /// This is only safe to iterate while holding the scheduler lock + const std::vector<KThread*>& GetThreadList() const { + return m_thread_list; } /** @@ -63,30 +64,26 @@ public: void RegisterDummyThreadForWakeup(KThread* thread); void WakeupWaitingDummyThreads(); - [[nodiscard]] LockType& SchedulerLock() { - return scheduler_lock; - } - - [[nodiscard]] const LockType& SchedulerLock() const { - return scheduler_lock; + LockType& SchedulerLock() { + return m_scheduler_lock; } private: friend class KScopedSchedulerLock; friend class KScopedSchedulerLockAndSleep; - KernelCore& kernel; + KernelCore& m_kernel; - std::atomic_bool scheduler_update_needed{}; - KSchedulerPriorityQueue priority_queue; - LockType scheduler_lock; + std::atomic_bool m_scheduler_update_needed{}; + KSchedulerPriorityQueue m_priority_queue; + LockType m_scheduler_lock; /// Lists dummy threads pending wakeup on lock release - std::set<KThread*> woken_dummy_threads; + std::set<KThread*> m_woken_dummy_threads; /// Lists all thread ids that aren't deleted/etc. - std::vector<KThread*> thread_list; - std::mutex global_list_guard; + std::vector<KThread*> m_thread_list; + std::mutex m_global_list_guard; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index a4c16eca9..30a4e6edb 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -14,8 +14,8 @@ namespace Kernel { -KAddressArbiter::KAddressArbiter(Core::System& system_) - : system{system_}, kernel{system.Kernel()} {} +KAddressArbiter::KAddressArbiter(Core::System& system) + : m_system{system}, m_kernel{system.Kernel()} {} KAddressArbiter::~KAddressArbiter() = default; namespace { @@ -90,8 +90,8 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 class ThreadQueueImplForKAddressArbiter final : public KThreadQueue { public: - explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t) - : KThreadQueue(kernel_), m_tree(t) {} + explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel, KAddressArbiter::ThreadTree* t) + : KThreadQueue(kernel), m_tree(t) {} void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { // If the thread is waiting on an address arbiter, remove it from the tree. @@ -105,7 +105,7 @@ public: } private: - KAddressArbiter::ThreadTree* m_tree; + KAddressArbiter::ThreadTree* m_tree{}; }; } // namespace @@ -114,10 +114,10 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) { // Perform signaling. s32 num_waiters{}; { - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); - auto it = thread_tree.nfind_key({addr, -1}); - while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && + auto it = m_tree.nfind_key({addr, -1}); + while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { // End the thread's wait. KThread* target_thread = std::addressof(*it); @@ -126,31 +126,27 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) { ASSERT(target_thread->IsWaitingForAddressArbiter()); target_thread->ClearAddressArbiter(); - it = thread_tree.erase(it); + it = m_tree.erase(it); ++num_waiters; } } - return ResultSuccess; + R_SUCCEED(); } Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) { // Perform signaling. s32 num_waiters{}; { - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // Check the userspace value. s32 user_value{}; - if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) { - LOG_ERROR(Kernel, "Invalid current memory!"); - return ResultInvalidCurrentMemory; - } - if (user_value != value) { - return ResultInvalidState; - } + R_UNLESS(UpdateIfEqual(m_system, std::addressof(user_value), addr, value, value + 1), + ResultInvalidCurrentMemory); + R_UNLESS(user_value == value, ResultInvalidState); - auto it = thread_tree.nfind_key({addr, -1}); - while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && + auto it = m_tree.nfind_key({addr, -1}); + while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { // End the thread's wait. KThread* target_thread = std::addressof(*it); @@ -159,33 +155,33 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 cou ASSERT(target_thread->IsWaitingForAddressArbiter()); target_thread->ClearAddressArbiter(); - it = thread_tree.erase(it); + it = m_tree.erase(it); ++num_waiters; } } - return ResultSuccess; + R_SUCCEED(); } Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) { // Perform signaling. s32 num_waiters{}; { - [[maybe_unused]] const KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); - auto it = thread_tree.nfind_key({addr, -1}); + auto it = m_tree.nfind_key({addr, -1}); // Determine the updated value. s32 new_value{}; if (count <= 0) { - if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) { + if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) { new_value = value - 2; } else { new_value = value + 1; } } else { - if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) { + if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) { auto tmp_it = it; s32 tmp_num_waiters{}; - while (++tmp_it != thread_tree.end() && tmp_it->GetAddressArbiterKey() == addr) { + while (++tmp_it != m_tree.end() && tmp_it->GetAddressArbiterKey() == addr) { if (tmp_num_waiters++ >= count) { break; } @@ -205,20 +201,15 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val s32 user_value{}; bool succeeded{}; if (value != new_value) { - succeeded = UpdateIfEqual(system, &user_value, addr, value, new_value); + succeeded = UpdateIfEqual(m_system, std::addressof(user_value), addr, value, new_value); } else { - succeeded = ReadFromUser(system, &user_value, addr); + succeeded = ReadFromUser(m_system, std::addressof(user_value), addr); } - if (!succeeded) { - LOG_ERROR(Kernel, "Invalid current memory!"); - return ResultInvalidCurrentMemory; - } - if (user_value != value) { - return ResultInvalidState; - } + R_UNLESS(succeeded, ResultInvalidCurrentMemory); + R_UNLESS(user_value == value, ResultInvalidState); - while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && + while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { // End the thread's wait. KThread* target_thread = std::addressof(*it); @@ -227,57 +218,57 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val ASSERT(target_thread->IsWaitingForAddressArbiter()); target_thread->ClearAddressArbiter(); - it = thread_tree.erase(it); + it = m_tree.erase(it); ++num_waiters; } } - return ResultSuccess; + R_SUCCEED(); } Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { // Prepare to wait. - KThread* cur_thread = GetCurrentThreadPointer(kernel); + KThread* cur_thread = GetCurrentThreadPointer(m_kernel); KHardwareTimer* timer{}; - ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); + ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree)); { - KScopedSchedulerLockAndSleep slp{kernel, std::addressof(timer), cur_thread, timeout}; + KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout}; // Check that the thread isn't terminating. if (cur_thread->IsTerminationRequested()) { slp.CancelSleep(); - return ResultTerminationRequested; + R_THROW(ResultTerminationRequested); } // Read the value from userspace. s32 user_value{}; bool succeeded{}; if (decrement) { - succeeded = DecrementIfLessThan(system, &user_value, addr, value); + succeeded = DecrementIfLessThan(m_system, std::addressof(user_value), addr, value); } else { - succeeded = ReadFromUser(system, &user_value, addr); + succeeded = ReadFromUser(m_system, std::addressof(user_value), addr); } if (!succeeded) { slp.CancelSleep(); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } // Check that the value is less than the specified one. if (user_value >= value) { slp.CancelSleep(); - return ResultInvalidState; + R_THROW(ResultInvalidState); } // Check that the timeout is non-zero. if (timeout == 0) { slp.CancelSleep(); - return ResultTimedOut; + R_THROW(ResultTimedOut); } // Set the arbiter. - cur_thread->SetAddressArbiter(&thread_tree, addr); - thread_tree.insert(*cur_thread); + cur_thread->SetAddressArbiter(std::addressof(m_tree), addr); + m_tree.insert(*cur_thread); // Wait for the thread to finish. wait_queue.SetHardwareTimer(timer); @@ -291,41 +282,41 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6 Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { // Prepare to wait. - KThread* cur_thread = GetCurrentThreadPointer(kernel); + KThread* cur_thread = GetCurrentThreadPointer(m_kernel); KHardwareTimer* timer{}; - ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); + ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree)); { - KScopedSchedulerLockAndSleep slp{kernel, std::addressof(timer), cur_thread, timeout}; + KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout}; // Check that the thread isn't terminating. if (cur_thread->IsTerminationRequested()) { slp.CancelSleep(); - return ResultTerminationRequested; + R_THROW(ResultTerminationRequested); } // Read the value from userspace. s32 user_value{}; - if (!ReadFromUser(system, &user_value, addr)) { + if (!ReadFromUser(m_system, std::addressof(user_value), addr)) { slp.CancelSleep(); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } // Check that the value is equal. if (value != user_value) { slp.CancelSleep(); - return ResultInvalidState; + R_THROW(ResultInvalidState); } // Check that the timeout is non-zero. if (timeout == 0) { slp.CancelSleep(); - return ResultTimedOut; + R_THROW(ResultTimedOut); } // Set the arbiter. - cur_thread->SetAddressArbiter(&thread_tree, addr); - thread_tree.insert(*cur_thread); + cur_thread->SetAddressArbiter(std::addressof(m_tree), addr); + m_tree.insert(*cur_thread); // Wait for the thread to finish. wait_queue.SetHardwareTimer(timer); diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h index e4085ae22..9a8c1ae94 100644 --- a/src/core/hle/kernel/k_address_arbiter.h +++ b/src/core/hle/kernel/k_address_arbiter.h @@ -22,47 +22,46 @@ class KAddressArbiter { public: using ThreadTree = KConditionVariable::ThreadTree; - explicit KAddressArbiter(Core::System& system_); + explicit KAddressArbiter(Core::System& system); ~KAddressArbiter(); - [[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) { + Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) { switch (type) { case Svc::SignalType::Signal: - return Signal(addr, count); + R_RETURN(this->Signal(addr, count)); case Svc::SignalType::SignalAndIncrementIfEqual: - return SignalAndIncrementIfEqual(addr, value, count); + R_RETURN(this->SignalAndIncrementIfEqual(addr, value, count)); case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual: - return SignalAndModifyByWaitingCountIfEqual(addr, value, count); + R_RETURN(this->SignalAndModifyByWaitingCountIfEqual(addr, value, count)); + default: + UNREACHABLE(); } - ASSERT(false); - return ResultUnknown; } - [[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, - s64 timeout) { + Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, s64 timeout) { switch (type) { case Svc::ArbitrationType::WaitIfLessThan: - return WaitIfLessThan(addr, value, false, timeout); + R_RETURN(WaitIfLessThan(addr, value, false, timeout)); case Svc::ArbitrationType::DecrementAndWaitIfLessThan: - return WaitIfLessThan(addr, value, true, timeout); + R_RETURN(WaitIfLessThan(addr, value, true, timeout)); case Svc::ArbitrationType::WaitIfEqual: - return WaitIfEqual(addr, value, timeout); + R_RETURN(WaitIfEqual(addr, value, timeout)); + default: + UNREACHABLE(); } - ASSERT(false); - return ResultUnknown; } private: - [[nodiscard]] Result Signal(VAddr addr, s32 count); - [[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count); - [[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count); - [[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout); - [[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout); + Result Signal(VAddr addr, s32 count); + Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count); + Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count); + Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout); + Result WaitIfEqual(VAddr addr, s32 value, s64 timeout); - ThreadTree thread_tree; - - Core::System& system; - KernelCore& kernel; +private: + ThreadTree m_tree; + Core::System& m_system; + KernelCore& m_kernel; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h index b58716e90..07a5a822c 100644 --- a/src/core/hle/kernel/k_affinity_mask.h +++ b/src/core/hle/kernel/k_affinity_mask.h @@ -13,40 +13,40 @@ class KAffinityMask { public: constexpr KAffinityMask() = default; - [[nodiscard]] constexpr u64 GetAffinityMask() const { - return this->mask; + constexpr u64 GetAffinityMask() const { + return m_mask; } constexpr void SetAffinityMask(u64 new_mask) { ASSERT((new_mask & ~AllowedAffinityMask) == 0); - this->mask = new_mask; + m_mask = new_mask; } - [[nodiscard]] constexpr bool GetAffinity(s32 core) const { - return (this->mask & GetCoreBit(core)) != 0; + constexpr bool GetAffinity(s32 core) const { + return (m_mask & GetCoreBit(core)) != 0; } constexpr void SetAffinity(s32 core, bool set) { if (set) { - this->mask |= GetCoreBit(core); + m_mask |= GetCoreBit(core); } else { - this->mask &= ~GetCoreBit(core); + m_mask &= ~GetCoreBit(core); } } constexpr void SetAll() { - this->mask = AllowedAffinityMask; + m_mask = AllowedAffinityMask; } private: - [[nodiscard]] static constexpr u64 GetCoreBit(s32 core) { + static constexpr u64 GetCoreBit(s32 core) { ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); return (1ULL << core); } static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1; - u64 mask{}; + u64 m_mask{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp index 691af8ccb..0ae42c95c 100644 --- a/src/core/hle/kernel/k_auto_object.cpp +++ b/src/core/hle/kernel/k_auto_object.cpp @@ -12,11 +12,11 @@ KAutoObject* KAutoObject::Create(KAutoObject* obj) { } void KAutoObject::RegisterWithKernel() { - kernel.RegisterKernelObject(this); + m_kernel.RegisterKernelObject(this); } void KAutoObject::UnregisterWithKernel() { - kernel.UnregisterKernelObject(this); + m_kernel.UnregisterKernelObject(this); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index e8118c2b8..9b71fe371 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h @@ -80,7 +80,7 @@ private: KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const); public: - explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) { + explicit KAutoObject(KernelCore& kernel) : m_kernel(kernel) { RegisterWithKernel(); } virtual ~KAutoObject() = default; @@ -164,17 +164,12 @@ public: } } - const std::string& GetName() const { - return name; - } - private: void RegisterWithKernel(); void UnregisterWithKernel(); protected: - KernelCore& kernel; - std::string name; + KernelCore& m_kernel; private: std::atomic<u32> m_ref_count{}; @@ -184,7 +179,7 @@ class KAutoObjectWithListContainer; class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> { public: - explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_) {} + explicit KAutoObjectWithList(KernelCore& kernel) : KAutoObject(kernel) {} static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) { const u64 lid = lhs.GetId(); @@ -200,7 +195,7 @@ public: } friend bool operator<(const KAutoObjectWithList& left, const KAutoObjectWithList& right) { - return &left < &right; + return KAutoObjectWithList::Compare(left, right) < 0; } public: @@ -208,10 +203,6 @@ public: return reinterpret_cast<u64>(this); } - virtual const std::string& GetName() const { - return name; - } - private: friend class KAutoObjectWithListContainer; }; diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp index 2907cc6e3..90e4e8fb0 100644 --- a/src/core/hle/kernel/k_capabilities.cpp +++ b/src/core/hle/kernel/k_capabilities.cpp @@ -11,7 +11,7 @@ namespace Kernel { -Result KCapabilities::InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table) { +Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) { // We're initializing an initial process. m_svc_access_flags.reset(); m_irq_access_flags.reset(); diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h index cd96f8d23..de766c811 100644 --- a/src/core/hle/kernel/k_capabilities.h +++ b/src/core/hle/kernel/k_capabilities.h @@ -22,7 +22,7 @@ class KCapabilities { public: constexpr explicit KCapabilities() = default; - Result InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table); + Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table); Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table); static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps); diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp index 700ae71e3..40e09e532 100644 --- a/src/core/hle/kernel/k_client_port.cpp +++ b/src/core/hle/kernel/k_client_port.cpp @@ -11,26 +11,21 @@ namespace Kernel { -KClientPort::KClientPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} +KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {} KClientPort::~KClientPort() = default; -void KClientPort::Initialize(KPort* parent_port_, s32 max_sessions_, std::string&& name_) { +void KClientPort::Initialize(KPort* parent, s32 max_sessions) { // Set member variables. - num_sessions = 0; - peak_sessions = 0; - parent = parent_port_; - max_sessions = max_sessions_; - name = std::move(name_); + m_num_sessions = 0; + m_peak_sessions = 0; + m_parent = parent; + m_max_sessions = max_sessions; } void KClientPort::OnSessionFinalized() { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; - // This might happen if a session was improperly used with this port. - ASSERT_MSG(num_sessions > 0, "num_sessions is invalid"); - - const auto prev = num_sessions--; - if (prev == max_sessions) { + if (const auto prev = m_num_sessions--; prev == m_max_sessions) { this->NotifyAvailable(); } } @@ -47,81 +42,81 @@ bool KClientPort::IsServerClosed() const { void KClientPort::Destroy() { // Note with our parent that we're closed. - parent->OnClientClosed(); + m_parent->OnClientClosed(); // Close our reference to our parent. - parent->Close(); + m_parent->Close(); } bool KClientPort::IsSignaled() const { - return num_sessions < max_sessions; + return m_num_sessions.load() < m_max_sessions; } Result KClientPort::CreateSession(KClientSession** out) { + // Declare the session we're going to allocate. + KSession* session{}; + // Reserve a new session from the resource limit. //! FIXME: we are reserving this from the wrong resource limit! - KScopedResourceReservation session_reservation(kernel.ApplicationProcess()->GetResourceLimit(), - LimitableResource::SessionCountMax); + KScopedResourceReservation session_reservation( + m_kernel.ApplicationProcess()->GetResourceLimit(), LimitableResource::SessionCountMax); R_UNLESS(session_reservation.Succeeded(), ResultLimitReached); + // Allocate a session normally. + session = KSession::Create(m_kernel); + + // Check that we successfully created a session. + R_UNLESS(session != nullptr, ResultOutOfResource); + // Update the session counts. { + ON_RESULT_FAILURE { + session->Close(); + }; + // Atomically increment the number of sessions. s32 new_sessions{}; { - const auto max = max_sessions; - auto cur_sessions = num_sessions.load(std::memory_order_acquire); + const auto max = m_max_sessions; + auto cur_sessions = m_num_sessions.load(std::memory_order_acquire); do { R_UNLESS(cur_sessions < max, ResultOutOfSessions); new_sessions = cur_sessions + 1; - } while (!num_sessions.compare_exchange_weak(cur_sessions, new_sessions, - std::memory_order_relaxed)); + } while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions, + std::memory_order_relaxed)); } // Atomically update the peak session tracking. { - auto peak = peak_sessions.load(std::memory_order_acquire); + auto peak = m_peak_sessions.load(std::memory_order_acquire); do { if (peak >= new_sessions) { break; } - } while (!peak_sessions.compare_exchange_weak(peak, new_sessions, - std::memory_order_relaxed)); + } while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, + std::memory_order_relaxed)); } } - // Create a new session. - KSession* session = KSession::Create(kernel); - if (session == nullptr) { - // Decrement the session count. - const auto prev = num_sessions--; - if (prev == max_sessions) { - this->NotifyAvailable(); - } - - return ResultOutOfResource; - } - // Initialize the session. - session->Initialize(this, parent->GetName()); + session->Initialize(this, m_parent->GetName()); // Commit the session reservation. session_reservation.Commit(); // Register the session. - KSession::Register(kernel, session); - auto session_guard = SCOPE_GUARD({ + KSession::Register(m_kernel, session); + ON_RESULT_FAILURE { session->GetClientSession().Close(); session->GetServerSession().Close(); - }); + }; // Enqueue the session with our parent. - R_TRY(parent->EnqueueSession(std::addressof(session->GetServerSession()))); + R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession()))); // We succeeded, so set the output. - session_guard.Cancel(); *out = std::addressof(session->GetClientSession()); - return ResultSuccess; + R_SUCCEED(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h index a757cf9cd..23db06ddf 100644 --- a/src/core/hle/kernel/k_client_port.h +++ b/src/core/hle/kernel/k_client_port.h @@ -4,7 +4,6 @@ #pragma once #include <memory> -#include <string> #include "common/common_types.h" #include "core/hle/kernel/k_synchronization_object.h" @@ -20,28 +19,28 @@ class KClientPort final : public KSynchronizationObject { KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject); public: - explicit KClientPort(KernelCore& kernel_); + explicit KClientPort(KernelCore& kernel); ~KClientPort() override; - void Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_); + void Initialize(KPort* parent, s32 max_sessions); void OnSessionFinalized(); void OnServerClosed(); const KPort* GetParent() const { - return parent; + return m_parent; } KPort* GetParent() { - return parent; + return m_parent; } s32 GetNumSessions() const { - return num_sessions; + return m_num_sessions; } s32 GetPeakSessions() const { - return peak_sessions; + return m_peak_sessions; } s32 GetMaxSessions() const { - return max_sessions; + return m_max_sessions; } bool IsLight() const; @@ -54,10 +53,10 @@ public: Result CreateSession(KClientSession** out); private: - std::atomic<s32> num_sessions{}; - std::atomic<s32> peak_sessions{}; - s32 max_sessions{}; - KPort* parent{}; + std::atomic<s32> m_num_sessions{}; + std::atomic<s32> m_peak_sessions{}; + s32 m_max_sessions{}; + KPort* m_parent{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp index da0c9ac8c..d998b2be2 100644 --- a/src/core/hle/kernel/k_client_session.cpp +++ b/src/core/hle/kernel/k_client_session.cpp @@ -12,28 +12,27 @@ namespace Kernel { static constexpr u32 MessageBufferSize = 0x100; -KClientSession::KClientSession(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_} {} +KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {} KClientSession::~KClientSession() = default; void KClientSession::Destroy() { - parent->OnClientClosed(); - parent->Close(); + m_parent->OnClientClosed(); + m_parent->Close(); } void KClientSession::OnServerClosed() {} Result KClientSession::SendSyncRequest() { // Create a session request. - KSessionRequest* request = KSessionRequest::Create(kernel); + KSessionRequest* request = KSessionRequest::Create(m_kernel); R_UNLESS(request != nullptr, ResultOutOfResource); SCOPE_EXIT({ request->Close(); }); // Initialize the request. - request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize); + request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTlsAddress(), MessageBufferSize); // Send the request. - return parent->GetServerSession().OnRequest(request); + R_RETURN(m_parent->GetServerSession().OnRequest(request)); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h index b4a19c546..9b62e55e4 100644 --- a/src/core/hle/kernel/k_client_session.h +++ b/src/core/hle/kernel/k_client_session.h @@ -30,20 +30,19 @@ class KClientSession final KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject); public: - explicit KClientSession(KernelCore& kernel_); + explicit KClientSession(KernelCore& kernel); ~KClientSession() override; - void Initialize(KSession* parent_session_, std::string&& name_) { + void Initialize(KSession* parent) { // Set member variables. - parent = parent_session_; - name = std::move(name_); + m_parent = parent; } void Destroy() override; - static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + static void PostDestroy(uintptr_t arg) {} KSession* GetParent() const { - return parent; + return m_parent; } Result SendSyncRequest(); @@ -51,7 +50,7 @@ public: void OnServerClosed(); private: - KSession* parent{}; + KSession* m_parent{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index 6c44a9e99..89df6b5d8 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -16,18 +16,18 @@ namespace Kernel { -KCodeMemory::KCodeMemory(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {} +KCodeMemory::KCodeMemory(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {} Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) { // Set members. - m_owner = GetCurrentProcessPointer(kernel); + m_owner = GetCurrentProcessPointer(m_kernel); // Get the owner page table. auto& page_table = m_owner->PageTable(); // Construct the page group. - m_page_group.emplace(kernel, page_table.GetBlockInfoManager()); + m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager()); // Lock the memory. R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size)) @@ -45,7 +45,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si m_is_mapped = false; // We succeeded. - return ResultSuccess; + R_SUCCEED(); } void KCodeMemory::Finalize() { @@ -74,13 +74,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) { R_UNLESS(!m_is_mapped, ResultInvalidState); // Map the memory. - R_TRY(GetCurrentProcess(kernel).PageTable().MapPageGroup( + R_TRY(GetCurrentProcess(m_kernel).PageTable().MapPageGroup( address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); // Mark ourselves as mapped. m_is_mapped = true; - return ResultSuccess; + R_SUCCEED(); } Result KCodeMemory::Unmap(VAddr address, size_t size) { @@ -91,13 +91,13 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) { KScopedLightLock lk(m_lock); // Unmap the memory. - R_TRY(GetCurrentProcess(kernel).PageTable().UnmapPageGroup(address, *m_page_group, - KMemoryState::CodeOut)); + R_TRY(GetCurrentProcess(m_kernel).PageTable().UnmapPageGroup(address, *m_page_group, + KMemoryState::CodeOut)); // Mark ourselves as unmapped. m_is_mapped = false; - return ResultSuccess; + R_SUCCEED(); } Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { @@ -131,7 +131,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission // Mark ourselves as mapped. m_is_owner_mapped = true; - return ResultSuccess; + R_SUCCEED(); } Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { @@ -147,7 +147,7 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { // Mark ourselves as unmapped. m_is_owner_mapped = false; - return ResultSuccess; + R_SUCCEED(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h index 5b260b385..23cbb283b 100644 --- a/src/core/hle/kernel/k_code_memory.h +++ b/src/core/hle/kernel/k_code_memory.h @@ -29,7 +29,7 @@ class KCodeMemory final KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject); public: - explicit KCodeMemory(KernelCore& kernel_); + explicit KCodeMemory(KernelCore& kernel); Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size); void Finalize() override; @@ -42,7 +42,7 @@ public: bool IsInitialized() const override { return m_is_initialized; } - static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + static void PostDestroy(uintptr_t arg) {} KProcess* GetOwner() const override { return m_owner; diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 458f4c94e..58b8609d8 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -4,7 +4,6 @@ #include "core/arm/exclusive_monitor.h" #include "core/core.h" #include "core/hle/kernel/k_condition_variable.h" -#include "core/hle/kernel/k_linked_list.h" #include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" @@ -58,8 +57,8 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue { public: - explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_) - : KThreadQueue(kernel_) {} + explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel) + : KThreadQueue(kernel) {} void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { // Remove the thread as a waiter from its owner. @@ -76,8 +75,8 @@ private: public: explicit ThreadQueueImplForKConditionVariableWaitConditionVariable( - KernelCore& kernel_, KConditionVariable::ThreadTree* t) - : KThreadQueue(kernel_), m_tree(t) {} + KernelCore& kernel, KConditionVariable::ThreadTree* t) + : KThreadQueue(kernel), m_tree(t) {} void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { // Remove the thread as a waiter from its owner. @@ -98,17 +97,17 @@ public: } // namespace -KConditionVariable::KConditionVariable(Core::System& system_) - : system{system_}, kernel{system.Kernel()} {} +KConditionVariable::KConditionVariable(Core::System& system) + : m_system{system}, m_kernel{system.Kernel()} {} KConditionVariable::~KConditionVariable() = default; Result KConditionVariable::SignalToAddress(VAddr addr) { - KThread* owner_thread = GetCurrentThreadPointer(kernel); + KThread* owner_thread = GetCurrentThreadPointer(m_kernel); // Signal the address. { - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // Remove waiter thread. bool has_waiters{}; @@ -129,7 +128,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) { // Write the value to userspace. Result result{ResultSuccess}; - if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] { + if (WriteToUser(m_system, addr, std::addressof(next_value))) [[likely]] { result = ResultSuccess; } else { result = ResultInvalidCurrentMemory; @@ -145,26 +144,27 @@ Result KConditionVariable::SignalToAddress(VAddr addr) { } Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { - KThread* cur_thread = GetCurrentThreadPointer(kernel); - ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel); + KThread* cur_thread = GetCurrentThreadPointer(m_kernel); + ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel); // Wait for the address. KThread* owner_thread{}; { - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // Check if the thread should terminate. R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); // Read the tag from userspace. u32 test_tag{}; - R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory); + R_UNLESS(ReadFromUser(m_system, std::addressof(test_tag), addr), + ResultInvalidCurrentMemory); // If the tag isn't the handle (with wait mask), we're done. R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask)); // Get the lock owner thread. - owner_thread = GetCurrentProcess(kernel) + owner_thread = GetCurrentProcess(m_kernel) .GetHandleTable() .GetObjectWithoutPseudoHandle<KThread>(handle) .ReleasePointerUnsafe(); @@ -177,19 +177,18 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) // Begin waiting. cur_thread->BeginWait(std::addressof(wait_queue)); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); - cur_thread->SetMutexWaitAddressForDebugging(addr); } // Close our reference to the owner thread, now that the wait is over. owner_thread->Close(); // Get the wait result. - return cur_thread->GetWaitResult(); + R_RETURN(cur_thread->GetWaitResult()); } void KConditionVariable::SignalImpl(KThread* thread) { // Check pre-conditions. - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Update the tag. VAddr address = thread->GetAddressKey(); @@ -204,7 +203,7 @@ void KConditionVariable::SignalImpl(KThread* thread) { // TODO(bunnei): We should call CanAccessAtomic(..) here. can_access = true; if (can_access) [[likely]] { - UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, + UpdateLockAtomic(m_system, std::addressof(prev_tag), address, own_tag, Svc::HandleWaitMask); } } @@ -215,7 +214,7 @@ void KConditionVariable::SignalImpl(KThread* thread) { thread->EndWait(ResultSuccess); } else { // Get the previous owner. - KThread* owner_thread = GetCurrentProcess(kernel) + KThread* owner_thread = GetCurrentProcess(m_kernel) .GetHandleTable() .GetObjectWithoutPseudoHandle<KThread>( static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) @@ -240,14 +239,14 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { // Perform signaling. s32 num_waiters{}; { - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); - auto it = thread_tree.nfind_key({cv_key, -1}); - while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && + auto it = m_tree.nfind_key({cv_key, -1}); + while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetConditionVariableKey() == cv_key)) { KThread* target_thread = std::addressof(*it); - it = thread_tree.erase(it); + it = m_tree.erase(it); target_thread->ClearConditionVariable(); this->SignalImpl(target_thread); @@ -256,27 +255,27 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { } // If we have no waiters, clear the has waiter flag. - if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) { + if (it == m_tree.end() || it->GetConditionVariableKey() != cv_key) { const u32 has_waiter_flag{}; - WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); + WriteToUser(m_system, cv_key, std::addressof(has_waiter_flag)); } } } Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { // Prepare to wait. - KThread* cur_thread = GetCurrentThreadPointer(kernel); + KThread* cur_thread = GetCurrentThreadPointer(m_kernel); KHardwareTimer* timer{}; - ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue( - kernel, std::addressof(thread_tree)); + ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(m_kernel, + std::addressof(m_tree)); { - KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), cur_thread, timeout); + KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), cur_thread, timeout); // Check that the thread isn't terminating. if (cur_thread->IsTerminationRequested()) { slp.CancelSleep(); - return ResultTerminationRequested; + R_THROW(ResultTerminationRequested); } // Update the value and process for the next owner. @@ -302,14 +301,14 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { // Write to the cv key. { const u32 has_waiter_flag = 1; - WriteToUser(system, key, std::addressof(has_waiter_flag)); - // TODO(bunnei): We should call DataMemoryBarrier(..) here. + WriteToUser(m_system, key, std::addressof(has_waiter_flag)); + std::atomic_thread_fence(std::memory_order_seq_cst); } // Write the value to userspace. - if (!WriteToUser(system, addr, std::addressof(next_value))) { + if (!WriteToUser(m_system, addr, std::addressof(next_value))) { slp.CancelSleep(); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } } @@ -317,18 +316,17 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { R_UNLESS(timeout != 0, ResultTimedOut); // Update condition variable tracking. - cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value); - thread_tree.insert(*cur_thread); + cur_thread->SetConditionVariable(std::addressof(m_tree), addr, key, value); + m_tree.insert(*cur_thread); // Begin waiting. wait_queue.SetHardwareTimer(timer); cur_thread->BeginWait(std::addressof(wait_queue)); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); - cur_thread->SetMutexWaitAddressForDebugging(addr); } // Get the wait result. - return cur_thread->GetWaitResult(); + R_RETURN(cur_thread->GetWaitResult()); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h index fad4ed011..fbd2c1fc0 100644 --- a/src/core/hle/kernel/k_condition_variable.h +++ b/src/core/hle/kernel/k_condition_variable.h @@ -21,36 +21,36 @@ class KConditionVariable { public: using ThreadTree = typename KThread::ConditionVariableThreadTreeType; - explicit KConditionVariable(Core::System& system_); + explicit KConditionVariable(Core::System& system); ~KConditionVariable(); // Arbitration - [[nodiscard]] Result SignalToAddress(VAddr addr); - [[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value); + Result SignalToAddress(VAddr addr); + Result WaitForAddress(Handle handle, VAddr addr, u32 value); // Condition variable void Signal(u64 cv_key, s32 count); - [[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout); + Result Wait(VAddr addr, u64 key, u32 value, s64 timeout); private: void SignalImpl(KThread* thread); - ThreadTree thread_tree; - - Core::System& system; - KernelCore& kernel; +private: + Core::System& m_system; + KernelCore& m_kernel; + ThreadTree m_tree{}; }; -inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, +inline void BeforeUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree, KThread* thread) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); tree->erase(tree->iterator_to(*thread)); } -inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, +inline void AfterUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree, KThread* thread) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); tree->insert(*thread); } diff --git a/src/core/hle/kernel/k_debug.h b/src/core/hle/kernel/k_debug.h index e3a0689c8..2290e3bca 100644 --- a/src/core/hle/kernel/k_debug.h +++ b/src/core/hle/kernel/k_debug.h @@ -12,9 +12,9 @@ class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObj KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject); public: - explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} + explicit KDebug(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {} - static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + static void PostDestroy(uintptr_t arg) {} }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp index 27659ea3b..a2fc4fe1f 100644 --- a/src/core/hle/kernel/k_device_address_space.cpp +++ b/src/core/hle/kernel/k_device_address_space.cpp @@ -9,8 +9,8 @@ namespace Kernel { -KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer(kernel_), m_lock(kernel_), m_is_initialized(false) {} +KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer(kernel), m_lock(kernel), m_is_initialized(false) {} KDeviceAddressSpace::~KDeviceAddressSpace() = default; void KDeviceAddressSpace::Initialize() { diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp index d973853ab..d92b491f8 100644 --- a/src/core/hle/kernel/k_event.cpp +++ b/src/core/hle/kernel/k_event.cpp @@ -7,8 +7,8 @@ namespace Kernel { -KEvent::KEvent(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {} +KEvent::KEvent(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, m_readable_event{kernel} {} KEvent::~KEvent() = default; @@ -36,7 +36,7 @@ void KEvent::Finalize() { } Result KEvent::Signal() { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; R_SUCCEED_IF(m_readable_event_destroyed); @@ -44,7 +44,7 @@ Result KEvent::Signal() { } Result KEvent::Clear() { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; R_SUCCEED_IF(m_readable_event_destroyed); diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h index 48ce7d9a0..f522b0a84 100644 --- a/src/core/hle/kernel/k_event.h +++ b/src/core/hle/kernel/k_event.h @@ -16,7 +16,7 @@ class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObj KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject); public: - explicit KEvent(KernelCore& kernel_); + explicit KEvent(KernelCore& kernel); ~KEvent() override; void Initialize(KProcess* owner); diff --git a/src/core/hle/kernel/k_light_condition_variable.cpp b/src/core/hle/kernel/k_light_condition_variable.cpp index 8fce2bc71..6d5a815aa 100644 --- a/src/core/hle/kernel/k_light_condition_variable.cpp +++ b/src/core/hle/kernel/k_light_condition_variable.cpp @@ -13,9 +13,9 @@ namespace { class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue { public: - ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl, + ThreadQueueImplForKLightConditionVariable(KernelCore& kernel, KThread::WaiterList* wl, bool term) - : KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {} + : KThreadQueue(kernel), m_wait_list(wl), m_allow_terminating_thread(term) {} void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { // Only process waits if we're allowed to. @@ -39,15 +39,15 @@ private: void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) { // Create thread queue. - KThread* owner = GetCurrentThreadPointer(kernel); + KThread* owner = GetCurrentThreadPointer(m_kernel); KHardwareTimer* timer{}; - ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list), + ThreadQueueImplForKLightConditionVariable wait_queue(m_kernel, std::addressof(m_wait_list), allow_terminating_thread); // Sleep the thread. { - KScopedSchedulerLockAndSleep lk(kernel, std::addressof(timer), owner, timeout); + KScopedSchedulerLockAndSleep lk(m_kernel, std::addressof(timer), owner, timeout); if (!allow_terminating_thread && owner->IsTerminationRequested()) { lk.CancelSleep(); @@ -57,7 +57,7 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter lock->Unlock(); // Add the thread to the queue. - wait_list.push_back(*owner); + m_wait_list.push_back(*owner); // Begin waiting. wait_queue.SetHardwareTimer(timer); @@ -69,10 +69,10 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter } void KLightConditionVariable::Broadcast() { - KScopedSchedulerLock lk(kernel); + KScopedSchedulerLock lk(m_kernel); // Signal all threads. - for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) { + for (auto it = m_wait_list.begin(); it != m_wait_list.end(); it = m_wait_list.erase(it)) { it->EndWait(ResultSuccess); } } diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h index 3cabd6b4f..ab612426d 100644 --- a/src/core/hle/kernel/k_light_condition_variable.h +++ b/src/core/hle/kernel/k_light_condition_variable.h @@ -13,13 +13,13 @@ class KLightLock; class KLightConditionVariable { public: - explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {} + explicit KLightConditionVariable(KernelCore& kernel) : m_kernel{kernel} {} void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true); void Broadcast(); private: - KernelCore& kernel; - KThread::WaiterList wait_list{}; + KernelCore& m_kernel; + KThread::WaiterList m_wait_list{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp index 14cb615da..e87ee8b65 100644 --- a/src/core/hle/kernel/k_light_lock.cpp +++ b/src/core/hle/kernel/k_light_lock.cpp @@ -13,7 +13,7 @@ namespace { class ThreadQueueImplForKLightLock final : public KThreadQueue { public: - explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {} + explicit ThreadQueueImplForKLightLock(KernelCore& kernel) : KThreadQueue(kernel) {} void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { // Remove the thread as a waiter from its owner. @@ -29,13 +29,13 @@ public: } // namespace void KLightLock::Lock() { - const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); + const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel)); while (true) { - uintptr_t old_tag = tag.load(std::memory_order_relaxed); + uintptr_t old_tag = m_tag.load(std::memory_order_relaxed); - while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1), - std::memory_order_acquire)) { + while (!m_tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1), + std::memory_order_acquire)) { } if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) { @@ -45,30 +45,30 @@ void KLightLock::Lock() { } void KLightLock::Unlock() { - const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); + const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel)); uintptr_t expected = cur_thread; - if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) { + if (!m_tag.compare_exchange_strong(expected, 0, std::memory_order_release)) { this->UnlockSlowPath(cur_thread); } } bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread); - ThreadQueueImplForKLightLock wait_queue(kernel); + ThreadQueueImplForKLightLock wait_queue(m_kernel); // Pend the current thread waiting on the owner thread. { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Ensure we actually have locking to do. - if (tag.load(std::memory_order_relaxed) != _owner) { + if (m_tag.load(std::memory_order_relaxed) != _owner) { return false; } // Add the current thread as a waiter on the owner. KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL); - cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag))); + cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_tag))); owner_thread->AddWaiter(cur_thread); // Begin waiting to hold the lock. @@ -87,12 +87,12 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { // Unlock. { - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // Get the next owner. bool has_waiters; KThread* next_owner = owner_thread->RemoveKernelWaiterByKey( - std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag))); + std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_tag))); // Pass the lock to the next owner. uintptr_t next_tag = 0; @@ -114,12 +114,13 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { } // Write the new tag value. - tag.store(next_tag, std::memory_order_release); + m_tag.store(next_tag, std::memory_order_release); } } bool KLightLock::IsLockedByCurrentThread() const { - return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL); + return (m_tag.load() | 1ULL) == + (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel)) | 1ULL); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h index 7edd950c0..626f57596 100644 --- a/src/core/hle/kernel/k_light_lock.h +++ b/src/core/hle/kernel/k_light_lock.h @@ -13,7 +13,7 @@ class KernelCore; class KLightLock { public: - explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {} + explicit KLightLock(KernelCore& kernel) : m_kernel{kernel} {} void Lock(); @@ -24,14 +24,14 @@ public: void UnlockSlowPath(uintptr_t cur_thread); bool IsLocked() const { - return tag != 0; + return m_tag.load() != 0; } bool IsLockedByCurrentThread() const; private: - std::atomic<uintptr_t> tag{}; - KernelCore& kernel; + std::atomic<uintptr_t> m_tag{}; + KernelCore& m_kernel; }; using KScopedLightLock = KScopedLock<KLightLock>; diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h deleted file mode 100644 index 29ebd16b7..000000000 --- a/src/core/hle/kernel/k_linked_list.h +++ /dev/null @@ -1,238 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#pragma once - -#include <boost/intrusive/list.hpp> - -#include "common/assert.h" -#include "core/hle/kernel/slab_helpers.h" - -namespace Kernel { - -class KernelCore; - -class KLinkedListNode : public boost::intrusive::list_base_hook<>, - public KSlabAllocated<KLinkedListNode> { - -public: - explicit KLinkedListNode(KernelCore&) {} - KLinkedListNode() = default; - - void Initialize(void* it) { - m_item = it; - } - - void* GetItem() const { - return m_item; - } - -private: - void* m_item = nullptr; -}; - -template <typename T> -class KLinkedList : private boost::intrusive::list<KLinkedListNode> { -private: - using BaseList = boost::intrusive::list<KLinkedListNode>; - -public: - template <bool Const> - class Iterator; - - using value_type = T; - using size_type = size_t; - using difference_type = ptrdiff_t; - using pointer = value_type*; - using const_pointer = const value_type*; - using reference = value_type&; - using const_reference = const value_type&; - using iterator = Iterator<false>; - using const_iterator = Iterator<true>; - using reverse_iterator = std::reverse_iterator<iterator>; - using const_reverse_iterator = std::reverse_iterator<const_iterator>; - - template <bool Const> - class Iterator { - private: - using BaseIterator = BaseList::iterator; - friend class KLinkedList; - - public: - using iterator_category = std::bidirectional_iterator_tag; - using value_type = typename KLinkedList::value_type; - using difference_type = typename KLinkedList::difference_type; - using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>; - using reference = - std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>; - - public: - explicit Iterator(BaseIterator it) : m_base_it(it) {} - - pointer GetItem() const { - return static_cast<pointer>(m_base_it->GetItem()); - } - - bool operator==(const Iterator& rhs) const { - return m_base_it == rhs.m_base_it; - } - - bool operator!=(const Iterator& rhs) const { - return !(*this == rhs); - } - - pointer operator->() const { - return this->GetItem(); - } - - reference operator*() const { - return *this->GetItem(); - } - - Iterator& operator++() { - ++m_base_it; - return *this; - } - - Iterator& operator--() { - --m_base_it; - return *this; - } - - Iterator operator++(int) { - const Iterator it{*this}; - ++(*this); - return it; - } - - Iterator operator--(int) { - const Iterator it{*this}; - --(*this); - return it; - } - - operator Iterator<true>() const { - return Iterator<true>(m_base_it); - } - - private: - BaseIterator m_base_it; - }; - -public: - constexpr KLinkedList(KernelCore& kernel_) : BaseList(), kernel{kernel_} {} - - ~KLinkedList() { - // Erase all elements. - for (auto it = begin(); it != end(); it = erase(it)) { - } - - // Ensure we succeeded. - ASSERT(this->empty()); - } - - // Iterator accessors. - iterator begin() { - return iterator(BaseList::begin()); - } - - const_iterator begin() const { - return const_iterator(BaseList::begin()); - } - - iterator end() { - return iterator(BaseList::end()); - } - - const_iterator end() const { - return const_iterator(BaseList::end()); - } - - const_iterator cbegin() const { - return this->begin(); - } - - const_iterator cend() const { - return this->end(); - } - - reverse_iterator rbegin() { - return reverse_iterator(this->end()); - } - - const_reverse_iterator rbegin() const { - return const_reverse_iterator(this->end()); - } - - reverse_iterator rend() { - return reverse_iterator(this->begin()); - } - - const_reverse_iterator rend() const { - return const_reverse_iterator(this->begin()); - } - - const_reverse_iterator crbegin() const { - return this->rbegin(); - } - - const_reverse_iterator crend() const { - return this->rend(); - } - - // Content management. - using BaseList::empty; - using BaseList::size; - - reference back() { - return *(--this->end()); - } - - const_reference back() const { - return *(--this->end()); - } - - reference front() { - return *this->begin(); - } - - const_reference front() const { - return *this->begin(); - } - - iterator insert(const_iterator pos, reference ref) { - KLinkedListNode* new_node = KLinkedListNode::Allocate(kernel); - ASSERT(new_node != nullptr); - new_node->Initialize(std::addressof(ref)); - return iterator(BaseList::insert(pos.m_base_it, *new_node)); - } - - void push_back(reference ref) { - this->insert(this->end(), ref); - } - - void push_front(reference ref) { - this->insert(this->begin(), ref); - } - - void pop_back() { - this->erase(--this->end()); - } - - void pop_front() { - this->erase(this->begin()); - } - - iterator erase(const iterator pos) { - KLinkedListNode* freed_node = std::addressof(*pos.m_base_it); - iterator ret = iterator(BaseList::erase(pos.m_base_it)); - KLinkedListNode::Free(kernel, freed_node); - - return ret; - } - -private: - KernelCore& kernel; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 87ca65592..e01929da6 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -471,8 +471,8 @@ public: m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight); } - constexpr void UpdateDeviceDisableMergeStateForShareLeft( - [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { + constexpr void UpdateDeviceDisableMergeStateForShareLeft(KMemoryPermission new_perm, bool left, + bool right) { // New permission/right aren't used. if (left) { m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( @@ -482,8 +482,8 @@ public: } } - constexpr void UpdateDeviceDisableMergeStateForShareRight( - [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { + constexpr void UpdateDeviceDisableMergeStateForShareRight(KMemoryPermission new_perm, bool left, + bool right) { // New permission/left aren't used. if (right) { m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( @@ -499,8 +499,7 @@ public: this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right); } - constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, - bool right) { + constexpr void ShareToDevice(KMemoryPermission new_perm, bool left, bool right) { // New permission isn't used. // We must either be shared or have a zero lock count. @@ -516,8 +515,8 @@ public: this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right); } - constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( - [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { + constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(KMemoryPermission new_perm, + bool left, bool right) { // New permission/right aren't used. if (left) { @@ -536,8 +535,8 @@ public: } } - constexpr void UpdateDeviceDisableMergeStateForUnshareRight( - [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { + constexpr void UpdateDeviceDisableMergeStateForUnshareRight(KMemoryPermission new_perm, + bool left, bool right) { // New permission/left aren't used. if (right) { @@ -556,8 +555,7 @@ public: this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); } - constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, - bool right) { + constexpr void UnshareToDevice(KMemoryPermission new_perm, bool left, bool right) { // New permission isn't used. // We must be shared. @@ -575,8 +573,7 @@ public: this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right); } - constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, - bool right) { + constexpr void UnshareToDeviceRight(KMemoryPermission new_perm, bool left, bool right) { // New permission isn't used. // We must be shared. @@ -594,7 +591,7 @@ public: this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); } - constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { + constexpr void LockForIpc(KMemoryPermission new_perm, bool left, bool right) { // We must either be locked or have a zero lock count. ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked || m_ipc_lock_count == 0); @@ -626,8 +623,7 @@ public: } } - constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, - [[maybe_unused]] bool right) { + constexpr void UnlockForIpc(KMemoryPermission new_perm, bool left, bool right) { // New permission isn't used. // We must be locked. diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp index 72c3ee4b7..9ff751119 100644 --- a/src/core/hle/kernel/k_memory_layout.cpp +++ b/src/core/hle/kernel/k_memory_layout.cpp @@ -18,11 +18,11 @@ KMemoryRegion* AllocateRegion(KMemoryRegionAllocator& memory_region_allocator, A } // namespace -KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_) - : memory_region_allocator{memory_region_allocator_} {} +KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator) + : m_memory_region_allocator{memory_region_allocator} {} void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) { - this->insert(*AllocateRegion(memory_region_allocator, address, last_address, attr, type_id)); + this->insert(*AllocateRegion(m_memory_region_allocator, address, last_address, attr, type_id)); } bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { @@ -69,7 +69,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at const u64 new_pair = (old_pair != std::numeric_limits<u64>::max()) ? old_pair + (address - old_address) : old_pair; - this->insert(*AllocateRegion(memory_region_allocator, address, inserted_region_last, + this->insert(*AllocateRegion(m_memory_region_allocator, address, inserted_region_last, new_pair, new_attr, type_id)); } @@ -78,7 +78,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at const u64 after_pair = (old_pair != std::numeric_limits<u64>::max()) ? old_pair + (inserted_region_end - old_address) : old_pair; - this->insert(*AllocateRegion(memory_region_allocator, inserted_region_end, old_last, + this->insert(*AllocateRegion(m_memory_region_allocator, inserted_region_end, old_last, after_pair, old_attr, old_type)); } @@ -126,14 +126,15 @@ VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u } KMemoryLayout::KMemoryLayout() - : virtual_tree{memory_region_allocator}, physical_tree{memory_region_allocator}, - virtual_linear_tree{memory_region_allocator}, physical_linear_tree{memory_region_allocator} {} + : m_virtual_tree{m_memory_region_allocator}, m_physical_tree{m_memory_region_allocator}, + m_virtual_linear_tree{m_memory_region_allocator}, m_physical_linear_tree{ + m_memory_region_allocator} {} void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, VAddr linear_virtual_start) { // Set static differences. - linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; - linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; + m_linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; + m_linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; // Initialize linear trees. for (auto& region : GetPhysicalMemoryRegionTree()) { diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 17fa1a6ed..551b7a0e4 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -80,35 +80,35 @@ public: KMemoryLayout(); KMemoryRegionTree& GetVirtualMemoryRegionTree() { - return virtual_tree; + return m_virtual_tree; } const KMemoryRegionTree& GetVirtualMemoryRegionTree() const { - return virtual_tree; + return m_virtual_tree; } KMemoryRegionTree& GetPhysicalMemoryRegionTree() { - return physical_tree; + return m_physical_tree; } const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const { - return physical_tree; + return m_physical_tree; } KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() { - return virtual_linear_tree; + return m_virtual_linear_tree; } const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const { - return virtual_linear_tree; + return m_virtual_linear_tree; } KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() { - return physical_linear_tree; + return m_physical_linear_tree; } const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const { - return physical_linear_tree; + return m_physical_linear_tree; } VAddr GetLinearVirtualAddress(PAddr address) const { - return address + linear_phys_to_virt_diff; + return address + m_linear_phys_to_virt_diff; } PAddr GetLinearPhysicalAddress(VAddr address) const { - return address + linear_virt_to_phys_diff; + return address + m_linear_virt_to_phys_diff; } const KMemoryRegion* FindVirtual(VAddr address) const { @@ -391,13 +391,13 @@ private: } private: - u64 linear_phys_to_virt_diff{}; - u64 linear_virt_to_phys_diff{}; - KMemoryRegionAllocator memory_region_allocator; - KMemoryRegionTree virtual_tree; - KMemoryRegionTree physical_tree; - KMemoryRegionTree virtual_linear_tree; - KMemoryRegionTree physical_linear_tree; + u64 m_linear_phys_to_virt_diff{}; + u64 m_linear_virt_to_phys_diff{}; + KMemoryRegionAllocator m_memory_region_allocator; + KMemoryRegionTree m_virtual_tree; + KMemoryRegionTree m_physical_tree; + KMemoryRegionTree m_virtual_linear_tree; + KMemoryRegionTree m_physical_linear_tree; }; namespace Init { diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h index 5037e657f..cfe86fb82 100644 --- a/src/core/hle/kernel/k_memory_region.h +++ b/src/core/hle/kernel/k_memory_region.h @@ -21,15 +21,15 @@ public: YUZU_NON_MOVEABLE(KMemoryRegion); constexpr KMemoryRegion() = default; - constexpr KMemoryRegion(u64 address_, u64 last_address_) - : address{address_}, last_address{last_address_} {} - constexpr KMemoryRegion(u64 address_, u64 last_address_, u64 pair_address_, u32 attributes_, - u32 type_id_) - : address(address_), last_address(last_address_), pair_address(pair_address_), - attributes(attributes_), type_id(type_id_) {} - constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_) - : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_, - type_id_) {} + constexpr KMemoryRegion(u64 address, u64 last_address) + : m_address{address}, m_last_address{last_address} {} + constexpr KMemoryRegion(u64 address, u64 last_address, u64 pair_address, u32 attributes, + u32 type_id) + : m_address(address), m_last_address(last_address), m_pair_address(pair_address), + m_attributes(attributes), m_type_id(type_id) {} + constexpr KMemoryRegion(u64 address, u64 last_address, u32 attributes, u32 type_id) + : KMemoryRegion(address, last_address, std::numeric_limits<u64>::max(), attributes, + type_id) {} ~KMemoryRegion() = default; @@ -44,15 +44,15 @@ public: } constexpr u64 GetAddress() const { - return address; + return m_address; } constexpr u64 GetPairAddress() const { - return pair_address; + return m_pair_address; } constexpr u64 GetLastAddress() const { - return last_address; + return m_last_address; } constexpr u64 GetEndAddress() const { @@ -64,16 +64,16 @@ public: } constexpr u32 GetAttributes() const { - return attributes; + return m_attributes; } constexpr u32 GetType() const { - return type_id; + return m_type_id; } constexpr void SetType(u32 type) { ASSERT(this->CanDerive(type)); - type_id = type; + m_type_id = type; } constexpr bool Contains(u64 addr) const { @@ -94,27 +94,27 @@ public: } constexpr void SetPairAddress(u64 a) { - pair_address = a; + m_pair_address = a; } constexpr void SetTypeAttribute(u32 attr) { - type_id |= attr; + m_type_id |= attr; } private: constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) { - address = a; - pair_address = p; - last_address = la; - attributes = r; - type_id = t; - } - - u64 address{}; - u64 last_address{}; - u64 pair_address{}; - u32 attributes{}; - u32 type_id{}; + m_address = a; + m_pair_address = p; + m_last_address = la; + m_attributes = r; + m_type_id = t; + } + + u64 m_address{}; + u64 m_last_address{}; + u64 m_pair_address{}; + u32 m_attributes{}; + u32 m_type_id{}; }; class KMemoryRegionTree final { @@ -322,7 +322,7 @@ public: private: TreeType m_tree{}; - KMemoryRegionAllocator& memory_region_allocator; + KMemoryRegionAllocator& m_memory_region_allocator; }; class KMemoryRegionAllocator final { @@ -338,18 +338,18 @@ public: template <typename... Args> KMemoryRegion* Allocate(Args&&... args) { // Ensure we stay within the bounds of our heap. - ASSERT(this->num_regions < MaxMemoryRegions); + ASSERT(m_num_regions < MaxMemoryRegions); // Create the new region. - KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]); - new (region) KMemoryRegion(std::forward<Args>(args)...); + KMemoryRegion* region = std::addressof(m_region_heap[m_num_regions++]); + std::construct_at(region, std::forward<Args>(args)...); return region; } private: - std::array<KMemoryRegion, MaxMemoryRegions> region_heap{}; - size_t num_regions{}; + std::array<KMemoryRegion, MaxMemoryRegions> m_region_heap{}; + size_t m_num_regions{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_object_name.h b/src/core/hle/kernel/k_object_name.h index b7f943134..2d97fc777 100644 --- a/src/core/hle/kernel/k_object_name.h +++ b/src/core/hle/kernel/k_object_name.h @@ -41,7 +41,7 @@ public: // Check that the object is closed. R_UNLESS(derived->IsServerClosed(), ResultInvalidState); - return Delete(kernel, obj.GetPointerUnsafe(), name); + R_RETURN(Delete(kernel, obj.GetPointerUnsafe(), name)); } template <typename Derived> diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h index cfedaae61..b7a3ccb4a 100644 --- a/src/core/hle/kernel/k_page_buffer.h +++ b/src/core/hle/kernel/k_page_buffer.h @@ -29,7 +29,7 @@ public: static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr); private: - [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; + alignas(PageSize) std::array<u8, PageSize> m_buffer{}; }; static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize); diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 367dab613..5c5356338 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -484,7 +484,7 @@ private: } PageLinkedList* GetPageList() { - return &m_ll; + return std::addressof(m_ll); } }; diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h index a9543cbd0..9a8d77316 100644 --- a/src/core/hle/kernel/k_page_table_slab_heap.h +++ b/src/core/hle/kernel/k_page_table_slab_heap.h @@ -20,7 +20,8 @@ public: PageTablePage() = default; private: - std::array<u8, PageSize> m_buffer{}; + // Initializer intentionally skipped + std::array<u8, PageSize> m_buffer; }; static_assert(sizeof(PageTablePage) == PageSize); diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp index 0a45ffd57..1621ca1d3 100644 --- a/src/core/hle/kernel/k_port.cpp +++ b/src/core/hle/kernel/k_port.cpp @@ -7,56 +7,55 @@ namespace Kernel { -KPort::KPort(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {} +KPort::KPort(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {} KPort::~KPort() = default; -void KPort::Initialize(s32 max_sessions_, bool is_light_, const std::string& name_) { +void KPort::Initialize(s32 max_sessions, bool is_light, uintptr_t name) { // Open a new reference count to the initialized port. - Open(); + this->Open(); // Create and initialize our server/client pair. - KAutoObject::Create(std::addressof(server)); - KAutoObject::Create(std::addressof(client)); - server.Initialize(this, name_ + ":Server"); - client.Initialize(this, max_sessions_, name_ + ":Client"); + KAutoObject::Create(std::addressof(m_server)); + KAutoObject::Create(std::addressof(m_client)); + m_server.Initialize(this); + m_client.Initialize(this, max_sessions); // Set our member variables. - is_light = is_light_; - name = name_; - state = State::Normal; + m_is_light = is_light; + m_name = name; + m_state = State::Normal; } void KPort::OnClientClosed() { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; - if (state == State::Normal) { - state = State::ClientClosed; + if (m_state == State::Normal) { + m_state = State::ClientClosed; } } void KPort::OnServerClosed() { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; - if (state == State::Normal) { - state = State::ServerClosed; + if (m_state == State::Normal) { + m_state = State::ServerClosed; } } bool KPort::IsServerClosed() const { - KScopedSchedulerLock sl{kernel}; - return state == State::ServerClosed; + KScopedSchedulerLock sl{m_kernel}; + return m_state == State::ServerClosed; } Result KPort::EnqueueSession(KServerSession* session) { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; - R_UNLESS(state == State::Normal, ResultPortClosed); + R_UNLESS(m_state == State::Normal, ResultPortClosed); - server.EnqueueSession(session); - - return ResultSuccess; + m_server.EnqueueSession(session); + R_SUCCEED(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_port.h b/src/core/hle/kernel/k_port.h index 0cfc16dab..991be27ab 100644 --- a/src/core/hle/kernel/k_port.h +++ b/src/core/hle/kernel/k_port.h @@ -19,17 +19,20 @@ class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjec KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject); public: - explicit KPort(KernelCore& kernel_); + explicit KPort(KernelCore& kernel); ~KPort() override; - static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + static void PostDestroy(uintptr_t arg) {} - void Initialize(s32 max_sessions_, bool is_light_, const std::string& name_); + void Initialize(s32 max_sessions, bool is_light, uintptr_t name); void OnClientClosed(); void OnServerClosed(); + uintptr_t GetName() const { + return m_name; + } bool IsLight() const { - return is_light; + return m_is_light; } bool IsServerClosed() const; @@ -37,16 +40,16 @@ public: Result EnqueueSession(KServerSession* session); KClientPort& GetClientPort() { - return client; + return m_client; } KServerPort& GetServerPort() { - return server; + return m_server; } const KClientPort& GetClientPort() const { - return client; + return m_client; } const KServerPort& GetServerPort() const { - return server; + return m_server; } private: @@ -57,10 +60,11 @@ private: ServerClosed = 3, }; - KServerPort server; - KClientPort client; - State state{State::Invalid}; - bool is_light{}; + KServerPort m_server; + KClientPort m_client; + uintptr_t m_name; + State m_state{State::Invalid}; + bool m_is_light{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h index 645c5b531..26677ec65 100644 --- a/src/core/hle/kernel/k_priority_queue.h +++ b/src/core/hle/kernel/k_priority_queue.h @@ -77,11 +77,11 @@ private: public: class KPerCoreQueue { private: - std::array<Entry, NumCores> root{}; + std::array<Entry, NumCores> m_root{}; public: constexpr KPerCoreQueue() { - for (auto& per_core_root : root) { + for (auto& per_core_root : m_root) { per_core_root.Initialize(); } } @@ -91,15 +91,15 @@ public: Entry& member_entry = member->GetPriorityQueueEntry(core); // Get the entry associated with the end of the queue. - Member* tail = this->root[core].GetPrev(); + Member* tail = m_root[core].GetPrev(); Entry& tail_entry = - (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; + (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : m_root[core]; // Link the entries. member_entry.SetPrev(tail); member_entry.SetNext(nullptr); tail_entry.SetNext(member); - this->root[core].SetPrev(member); + m_root[core].SetPrev(member); return tail == nullptr; } @@ -109,15 +109,15 @@ public: Entry& member_entry = member->GetPriorityQueueEntry(core); // Get the entry associated with the front of the queue. - Member* head = this->root[core].GetNext(); + Member* head = m_root[core].GetNext(); Entry& head_entry = - (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; + (head != nullptr) ? head->GetPriorityQueueEntry(core) : m_root[core]; // Link the entries. member_entry.SetPrev(nullptr); member_entry.SetNext(head); head_entry.SetPrev(member); - this->root[core].SetNext(member); + m_root[core].SetNext(member); return (head == nullptr); } @@ -130,9 +130,9 @@ public: Member* prev = member_entry.GetPrev(); Member* next = member_entry.GetNext(); Entry& prev_entry = - (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core]; + (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : m_root[core]; Entry& next_entry = - (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; + (next != nullptr) ? next->GetPriorityQueueEntry(core) : m_root[core]; // Unlink. prev_entry.SetNext(next); @@ -142,7 +142,7 @@ public: } constexpr Member* GetFront(s32 core) const { - return this->root[core].GetNext(); + return m_root[core].GetNext(); } }; @@ -158,8 +158,8 @@ public: return; } - if (this->queues[priority].PushBack(core, member)) { - this->available_priorities[core].SetBit(priority); + if (m_queues[priority].PushBack(core, member)) { + m_available_priorities[core].SetBit(priority); } } @@ -171,8 +171,8 @@ public: return; } - if (this->queues[priority].PushFront(core, member)) { - this->available_priorities[core].SetBit(priority); + if (m_queues[priority].PushFront(core, member)) { + m_available_priorities[core].SetBit(priority); } } @@ -184,18 +184,17 @@ public: return; } - if (this->queues[priority].Remove(core, member)) { - this->available_priorities[core].ClearBit(priority); + if (m_queues[priority].Remove(core, member)) { + m_available_priorities[core].ClearBit(priority); } } constexpr Member* GetFront(s32 core) const { ASSERT(IsValidCore(core)); - const s32 priority = - static_cast<s32>(this->available_priorities[core].CountLeadingZero()); + const s32 priority = static_cast<s32>(m_available_priorities[core].CountLeadingZero()); if (priority <= LowestPriority) { - return this->queues[priority].GetFront(core); + return m_queues[priority].GetFront(core); } else { return nullptr; } @@ -206,7 +205,7 @@ public: ASSERT(IsValidPriority(priority)); if (priority <= LowestPriority) { - return this->queues[priority].GetFront(core); + return m_queues[priority].GetFront(core); } else { return nullptr; } @@ -218,9 +217,9 @@ public: Member* next = member->GetPriorityQueueEntry(core).GetNext(); if (next == nullptr) { const s32 priority = static_cast<s32>( - this->available_priorities[core].GetNextSet(member->GetPriority())); + m_available_priorities[core].GetNextSet(member->GetPriority())); if (priority <= LowestPriority) { - next = this->queues[priority].GetFront(core); + next = m_queues[priority].GetFront(core); } } return next; @@ -231,8 +230,8 @@ public: ASSERT(IsValidPriority(priority)); if (priority <= LowestPriority) { - this->queues[priority].Remove(core, member); - this->queues[priority].PushFront(core, member); + m_queues[priority].Remove(core, member); + m_queues[priority].PushFront(core, member); } } @@ -241,29 +240,29 @@ public: ASSERT(IsValidPriority(priority)); if (priority <= LowestPriority) { - this->queues[priority].Remove(core, member); - this->queues[priority].PushBack(core, member); - return this->queues[priority].GetFront(core); + m_queues[priority].Remove(core, member); + m_queues[priority].PushBack(core, member); + return m_queues[priority].GetFront(core); } else { return nullptr; } } private: - std::array<KPerCoreQueue, NumPriority> queues{}; - std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{}; + std::array<KPerCoreQueue, NumPriority> m_queues{}; + std::array<Common::BitSet64<NumPriority>, NumCores> m_available_priorities{}; }; private: - KPriorityQueueImpl scheduled_queue; - KPriorityQueueImpl suggested_queue; + KPriorityQueueImpl m_scheduled_queue; + KPriorityQueueImpl m_suggested_queue; private: - constexpr void ClearAffinityBit(u64& affinity, s32 core) { + static constexpr void ClearAffinityBit(u64& affinity, s32 core) { affinity &= ~(UINT64_C(1) << core); } - constexpr s32 GetNextCore(u64& affinity) { + static constexpr s32 GetNextCore(u64& affinity) { const s32 core = std::countr_zero(affinity); ClearAffinityBit(affinity, core); return core; @@ -275,13 +274,13 @@ private: // Push onto the scheduled queue for its core, if we can. u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { - this->scheduled_queue.PushBack(priority, core, member); + m_scheduled_queue.PushBack(priority, core, member); ClearAffinityBit(affinity, core); } // And suggest the thread for all other cores. while (affinity) { - this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + m_suggested_queue.PushBack(priority, GetNextCore(affinity), member); } } @@ -291,14 +290,14 @@ private: // Push onto the scheduled queue for its core, if we can. u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { - this->scheduled_queue.PushFront(priority, core, member); + m_scheduled_queue.PushFront(priority, core, member); ClearAffinityBit(affinity, core); } // And suggest the thread for all other cores. // Note: Nintendo pushes onto the back of the suggested queue, not the front. while (affinity) { - this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + m_suggested_queue.PushBack(priority, GetNextCore(affinity), member); } } @@ -308,13 +307,13 @@ private: // Remove from the scheduled queue for its core. u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { - this->scheduled_queue.Remove(priority, core, member); + m_scheduled_queue.Remove(priority, core, member); ClearAffinityBit(affinity, core); } // Remove from the suggested queue for all other cores. while (affinity) { - this->suggested_queue.Remove(priority, GetNextCore(affinity), member); + m_suggested_queue.Remove(priority, GetNextCore(affinity), member); } } @@ -323,27 +322,27 @@ public: // Getters. constexpr Member* GetScheduledFront(s32 core) const { - return this->scheduled_queue.GetFront(core); + return m_scheduled_queue.GetFront(core); } constexpr Member* GetScheduledFront(s32 core, s32 priority) const { - return this->scheduled_queue.GetFront(priority, core); + return m_scheduled_queue.GetFront(priority, core); } constexpr Member* GetSuggestedFront(s32 core) const { - return this->suggested_queue.GetFront(core); + return m_suggested_queue.GetFront(core); } constexpr Member* GetSuggestedFront(s32 core, s32 priority) const { - return this->suggested_queue.GetFront(priority, core); + return m_suggested_queue.GetFront(priority, core); } constexpr Member* GetScheduledNext(s32 core, const Member* member) const { - return this->scheduled_queue.GetNext(core, member); + return m_scheduled_queue.GetNext(core, member); } constexpr Member* GetSuggestedNext(s32 core, const Member* member) const { - return this->suggested_queue.GetNext(core, member); + return m_suggested_queue.GetNext(core, member); } constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const { @@ -375,7 +374,7 @@ public: return; } - this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); + m_scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); } constexpr KThread* MoveToScheduledBack(Member* member) { @@ -384,8 +383,7 @@ public: return {}; } - return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), - member); + return m_scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member); } // First class fancy operations. @@ -425,9 +423,9 @@ public: for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { if (prev_affinity.GetAffinity(core)) { if (core == prev_core) { - this->scheduled_queue.Remove(priority, core, member); + m_scheduled_queue.Remove(priority, core, member); } else { - this->suggested_queue.Remove(priority, core, member); + m_suggested_queue.Remove(priority, core, member); } } } @@ -436,9 +434,9 @@ public: for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { if (new_affinity.GetAffinity(core)) { if (core == new_core) { - this->scheduled_queue.PushBack(priority, core, member); + m_scheduled_queue.PushBack(priority, core, member); } else { - this->suggested_queue.PushBack(priority, core, member); + m_suggested_queue.PushBack(priority, core, member); } } } @@ -458,22 +456,22 @@ public: if (prev_core != new_core) { // Remove from the scheduled queue for the previous core. if (prev_core >= 0) { - this->scheduled_queue.Remove(priority, prev_core, member); + m_scheduled_queue.Remove(priority, prev_core, member); } // Remove from the suggested queue and add to the scheduled queue for the new core. if (new_core >= 0) { - this->suggested_queue.Remove(priority, new_core, member); + m_suggested_queue.Remove(priority, new_core, member); if (to_front) { - this->scheduled_queue.PushFront(priority, new_core, member); + m_scheduled_queue.PushFront(priority, new_core, member); } else { - this->scheduled_queue.PushBack(priority, new_core, member); + m_scheduled_queue.PushBack(priority, new_core, member); } } // Add to the suggested queue for the previous core. if (prev_core >= 0) { - this->suggested_queue.PushBack(priority, prev_core, member); + m_suggested_queue.PushBack(priority, prev_core, member); } } } diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 74a04aa66..9d18f4049 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -44,14 +44,14 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority SCOPE_EXIT({ thread->Close(); }); ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority, - owner_process.GetIdealCoreId(), &owner_process) + owner_process.GetIdealCoreId(), + std::addressof(owner_process)) .IsSuccess()); // Register 1 must be a handle to the main thread Handle thread_handle{}; - owner_process.GetHandleTable().Add(&thread_handle, thread); + owner_process.GetHandleTable().Add(std::addressof(thread_handle), thread); - thread->SetName("main"); thread->GetContext32().cpu_registers[0] = 0; thread->GetContext64().cpu_registers[0] = 0; thread->GetContext32().cpu_registers[1] = thread_handle; @@ -71,32 +71,32 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string auto& kernel = system.Kernel(); process->name = std::move(process_name); - process->resource_limit = res_limit; - process->system_resource_address = 0; - process->state = State::Created; - process->program_id = 0; - process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() - : kernel.CreateNewUserProcessID(); - process->capabilities.InitializeForMetadatalessProcess(); - process->is_initialized = true; + process->m_resource_limit = res_limit; + process->m_system_resource_address = 0; + process->m_state = State::Created; + process->m_program_id = 0; + process->m_process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() + : kernel.CreateNewUserProcessID(); + process->m_capabilities.InitializeForMetadatalessProcess(); + process->m_is_initialized = true; std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr))); std::uniform_int_distribution<u64> distribution; - std::generate(process->random_entropy.begin(), process->random_entropy.end(), + std::generate(process->m_random_entropy.begin(), process->m_random_entropy.end(), [&] { return distribution(rng); }); kernel.AppendNewProcess(process); // Clear remaining fields. - process->num_running_threads = 0; - process->is_signaled = false; - process->exception_thread = nullptr; - process->is_suspended = false; - process->schedule_count = 0; - process->is_handle_table_initialized = false; + process->m_num_running_threads = 0; + process->m_is_signaled = false; + process->m_exception_thread = nullptr; + process->m_is_suspended = false; + process->m_schedule_count = 0; + process->m_is_handle_table_initialized = false; // Open a reference to the resource limit. - process->resource_limit->Open(); + process->m_resource_limit->Open(); R_SUCCEED(); } @@ -106,65 +106,65 @@ void KProcess::DoWorkerTaskImpl() { } KResourceLimit* KProcess::GetResourceLimit() const { - return resource_limit; + return m_resource_limit; } void KProcess::IncrementRunningThreadCount() { - ASSERT(num_running_threads.load() >= 0); - ++num_running_threads; + ASSERT(m_num_running_threads.load() >= 0); + ++m_num_running_threads; } void KProcess::DecrementRunningThreadCount() { - ASSERT(num_running_threads.load() > 0); + ASSERT(m_num_running_threads.load() > 0); - if (const auto prev = num_running_threads--; prev == 1) { + if (const auto prev = m_num_running_threads--; prev == 1) { // TODO(bunnei): Process termination to be implemented when multiprocess is supported. } } u64 KProcess::GetTotalPhysicalMemoryAvailable() { - const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) + - page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + - main_thread_stack_size}; - if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); + const u64 capacity{m_resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) + + m_page_table.GetNormalMemorySize() + GetSystemResourceSize() + m_image_size + + m_main_thread_stack_size}; + if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); capacity != pool_size) { LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size); } - if (capacity < memory_usage_capacity) { + if (capacity < m_memory_usage_capacity) { return capacity; } - return memory_usage_capacity; + return m_memory_usage_capacity; } u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { - return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); + return this->GetTotalPhysicalMemoryAvailable() - this->GetSystemResourceSize(); } u64 KProcess::GetTotalPhysicalMemoryUsed() { - return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() + - GetSystemResourceSize(); + return m_image_size + m_main_thread_stack_size + m_page_table.GetNormalMemorySize() + + this->GetSystemResourceSize(); } u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { - return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); + return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceUsage(); } bool KProcess::ReleaseUserException(KThread* thread) { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; - if (exception_thread == thread) { - exception_thread = nullptr; + if (m_exception_thread == thread) { + m_exception_thread = nullptr; // Remove waiter thread. bool has_waiters{}; if (KThread* next = thread->RemoveKernelWaiterByKey( std::addressof(has_waiters), - reinterpret_cast<uintptr_t>(std::addressof(exception_thread))); + reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread))); next != nullptr) { next->EndWait(ResultSuccess); } - KScheduler::SetSchedulerUpdateNeeded(kernel); + KScheduler::SetSchedulerUpdateNeeded(m_kernel); return true; } else { @@ -173,72 +173,72 @@ bool KProcess::ReleaseUserException(KThread* thread) { } void KProcess::PinCurrentThread(s32 core_id) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Get the current thread. KThread* cur_thread = - kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); + m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); // If the thread isn't terminated, pin it. if (!cur_thread->IsTerminationRequested()) { // Pin it. - PinThread(core_id, cur_thread); + this->PinThread(core_id, cur_thread); cur_thread->Pin(core_id); // An update is needed. - KScheduler::SetSchedulerUpdateNeeded(kernel); + KScheduler::SetSchedulerUpdateNeeded(m_kernel); } } void KProcess::UnpinCurrentThread(s32 core_id) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Get the current thread. KThread* cur_thread = - kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); + m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); // Unpin it. cur_thread->Unpin(); - UnpinThread(core_id, cur_thread); + this->UnpinThread(core_id, cur_thread); // An update is needed. - KScheduler::SetSchedulerUpdateNeeded(kernel); + KScheduler::SetSchedulerUpdateNeeded(m_kernel); } void KProcess::UnpinThread(KThread* thread) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Get the thread's core id. const auto core_id = thread->GetActiveCore(); // Unpin it. - UnpinThread(core_id, thread); + this->UnpinThread(core_id, thread); thread->Unpin(); // An update is needed. - KScheduler::SetSchedulerUpdateNeeded(kernel); + KScheduler::SetSchedulerUpdateNeeded(m_kernel); } Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, [[maybe_unused]] size_t size) { // Lock ourselves, to prevent concurrent access. - KScopedLightLock lk(state_lock); + KScopedLightLock lk(m_state_lock); // Try to find an existing info for the memory. KSharedMemoryInfo* shemen_info = nullptr; const auto iter = std::find_if( - shared_memory_list.begin(), shared_memory_list.end(), + m_shared_memory_list.begin(), m_shared_memory_list.end(), [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; }); - if (iter != shared_memory_list.end()) { + if (iter != m_shared_memory_list.end()) { shemen_info = *iter; } if (shemen_info == nullptr) { - shemen_info = KSharedMemoryInfo::Allocate(kernel); + shemen_info = KSharedMemoryInfo::Allocate(m_kernel); R_UNLESS(shemen_info != nullptr, ResultOutOfMemory); shemen_info->Initialize(shmem); - shared_memory_list.push_back(shemen_info); + m_shared_memory_list.push_back(shemen_info); } // Open a reference to the shared memory and its info. @@ -251,21 +251,21 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, [[maybe_unused]] size_t size) { // Lock ourselves, to prevent concurrent access. - KScopedLightLock lk(state_lock); + KScopedLightLock lk(m_state_lock); KSharedMemoryInfo* shemen_info = nullptr; const auto iter = std::find_if( - shared_memory_list.begin(), shared_memory_list.end(), + m_shared_memory_list.begin(), m_shared_memory_list.end(), [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; }); - if (iter != shared_memory_list.end()) { + if (iter != m_shared_memory_list.end()) { shemen_info = *iter; } ASSERT(shemen_info != nullptr); if (shemen_info->Close()) { - shared_memory_list.erase(iter); - KSharedMemoryInfo::Free(kernel, shemen_info); + m_shared_memory_list.erase(iter); + KSharedMemoryInfo::Free(m_kernel, shemen_info); } // Close a reference to the shared memory. @@ -273,22 +273,22 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a } void KProcess::RegisterThread(KThread* thread) { - KScopedLightLock lk{list_lock}; + KScopedLightLock lk{m_list_lock}; - thread_list.push_back(thread); + m_thread_list.push_back(thread); } void KProcess::UnregisterThread(KThread* thread) { - KScopedLightLock lk{list_lock}; + KScopedLightLock lk{m_list_lock}; - thread_list.remove(thread); + m_thread_list.remove(thread); } u64 KProcess::GetFreeThreadCount() const { - if (resource_limit != nullptr) { + if (m_resource_limit != nullptr) { const auto current_value = - resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax); - const auto limit_value = resource_limit->GetLimitValue(LimitableResource::ThreadCountMax); + m_resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax); + const auto limit_value = m_resource_limit->GetLimitValue(LimitableResource::ThreadCountMax); return limit_value - current_value; } else { return 0; @@ -297,84 +297,85 @@ u64 KProcess::GetFreeThreadCount() const { Result KProcess::Reset() { // Lock the process and the scheduler. - KScopedLightLock lk(state_lock); - KScopedSchedulerLock sl{kernel}; + KScopedLightLock lk(m_state_lock); + KScopedSchedulerLock sl{m_kernel}; // Validate that we're in a state that we can reset. - R_UNLESS(state != State::Terminated, ResultInvalidState); - R_UNLESS(is_signaled, ResultInvalidState); + R_UNLESS(m_state != State::Terminated, ResultInvalidState); + R_UNLESS(m_is_signaled, ResultInvalidState); // Clear signaled. - is_signaled = false; + m_is_signaled = false; R_SUCCEED(); } Result KProcess::SetActivity(ProcessActivity activity) { // Lock ourselves and the scheduler. - KScopedLightLock lk{state_lock}; - KScopedLightLock list_lk{list_lock}; - KScopedSchedulerLock sl{kernel}; + KScopedLightLock lk{m_state_lock}; + KScopedLightLock list_lk{m_list_lock}; + KScopedSchedulerLock sl{m_kernel}; // Validate our state. - R_UNLESS(state != State::Terminating, ResultInvalidState); - R_UNLESS(state != State::Terminated, ResultInvalidState); + R_UNLESS(m_state != State::Terminating, ResultInvalidState); + R_UNLESS(m_state != State::Terminated, ResultInvalidState); // Either pause or resume. if (activity == ProcessActivity::Paused) { // Verify that we're not suspended. - R_UNLESS(!is_suspended, ResultInvalidState); + R_UNLESS(!m_is_suspended, ResultInvalidState); // Suspend all threads. - for (auto* thread : GetThreadList()) { + for (auto* thread : this->GetThreadList()) { thread->RequestSuspend(SuspendType::Process); } // Set ourselves as suspended. - SetSuspended(true); + this->SetSuspended(true); } else { ASSERT(activity == ProcessActivity::Runnable); // Verify that we're suspended. - R_UNLESS(is_suspended, ResultInvalidState); + R_UNLESS(m_is_suspended, ResultInvalidState); // Resume all threads. - for (auto* thread : GetThreadList()) { + for (auto* thread : this->GetThreadList()) { thread->Resume(SuspendType::Process); } // Set ourselves as resumed. - SetSuspended(false); + this->SetSuspended(false); } R_SUCCEED(); } Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { - program_id = metadata.GetTitleID(); - ideal_core = metadata.GetMainThreadCore(); - is_64bit_process = metadata.Is64BitProgram(); - system_resource_size = metadata.GetSystemResourceSize(); - image_size = code_size; + m_program_id = metadata.GetTitleID(); + m_ideal_core = metadata.GetMainThreadCore(); + m_is_64bit_process = metadata.Is64BitProgram(); + m_system_resource_size = metadata.GetSystemResourceSize(); + m_image_size = code_size; KScopedResourceReservation memory_reservation( - resource_limit, LimitableResource::PhysicalMemoryMax, code_size + system_resource_size); + m_resource_limit, LimitableResource::PhysicalMemoryMax, code_size + m_system_resource_size); if (!memory_reservation.Succeeded()) { LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", - code_size + system_resource_size); + code_size + m_system_resource_size); R_RETURN(ResultLimitReached); } // Initialize process address space - if (const Result result{page_table.InitializeForProcess( + if (const Result result{m_page_table.InitializeForProcess( metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, - 0x8000000, code_size, &kernel.GetAppSystemResource(), resource_limit)}; + 0x8000000, code_size, std::addressof(m_kernel.GetAppSystemResource()), + m_resource_limit)}; result.IsError()) { R_RETURN(result); } // Map process code region - if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(), - code_size / PageSize, KMemoryState::Code, - KMemoryPermission::None)}; + if (const Result result{m_page_table.MapProcessCode(m_page_table.GetCodeRegionStart(), + code_size / PageSize, KMemoryState::Code, + KMemoryPermission::None)}; result.IsError()) { R_RETURN(result); } @@ -382,7 +383,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: // Initialize process capabilities const auto& caps{metadata.GetKernelCapabilities()}; if (const Result result{ - capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)}; + m_capabilities.InitializeForUserProcess(caps.data(), caps.size(), m_page_table)}; result.IsError()) { R_RETURN(result); } @@ -392,12 +393,14 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is36Bit: case FileSys::ProgramAddressSpaceType::Is39Bit: - memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart(); + m_memory_usage_capacity = + m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart(); break; case FileSys::ProgramAddressSpaceType::Is32BitNoMap: - memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() + - page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart(); + m_memory_usage_capacity = + m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart() + + m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart(); break; default: @@ -406,33 +409,34 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: } // Create TLS region - R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address))); + R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address))); memory_reservation.Commit(); - R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize())); + R_RETURN(m_handle_table.Initialize(m_capabilities.GetHandleTableSize())); } void KProcess::Run(s32 main_thread_priority, u64 stack_size) { - ASSERT(AllocateMainThreadStack(stack_size) == ResultSuccess); - resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); + ASSERT(this->AllocateMainThreadStack(stack_size) == ResultSuccess); + m_resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); - const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; - ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); + const std::size_t heap_capacity{m_memory_usage_capacity - + (m_main_thread_stack_size + m_image_size)}; + ASSERT(!m_page_table.SetMaxHeapSize(heap_capacity).IsError()); - ChangeState(State::Running); + this->ChangeState(State::Running); - SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); + SetupMainThread(m_kernel.System(), *this, main_thread_priority, m_main_thread_stack_top); } void KProcess::PrepareForTermination() { - ChangeState(State::Terminating); + this->ChangeState(State::Terminating); const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { for (auto* thread : in_thread_list) { if (thread->GetOwnerProcess() != this) continue; - if (thread == GetCurrentThreadPointer(kernel)) + if (thread == GetCurrentThreadPointer(m_kernel)) continue; // TODO(Subv): When are the other running/ready threads terminated? @@ -443,24 +447,24 @@ void KProcess::PrepareForTermination() { } }; - stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); + stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList()); - this->DeleteThreadLocalRegion(plr_address); - plr_address = 0; + this->DeleteThreadLocalRegion(m_plr_address); + m_plr_address = 0; - if (resource_limit) { - resource_limit->Release(LimitableResource::PhysicalMemoryMax, - main_thread_stack_size + image_size); + if (m_resource_limit) { + m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, + m_main_thread_stack_size + m_image_size); } - ChangeState(State::Terminated); + this->ChangeState(State::Terminated); } void KProcess::Finalize() { // Free all shared memory infos. { - auto it = shared_memory_list.begin(); - while (it != shared_memory_list.end()) { + auto it = m_shared_memory_list.begin(); + while (it != m_shared_memory_list.end()) { KSharedMemoryInfo* info = *it; KSharedMemory* shmem = info->GetSharedMemory(); @@ -470,22 +474,22 @@ void KProcess::Finalize() { shmem->Close(); - it = shared_memory_list.erase(it); - KSharedMemoryInfo::Free(kernel, info); + it = m_shared_memory_list.erase(it); + KSharedMemoryInfo::Free(m_kernel, info); } } // Release memory to the resource limit. - if (resource_limit != nullptr) { - resource_limit->Close(); - resource_limit = nullptr; + if (m_resource_limit != nullptr) { + m_resource_limit->Close(); + m_resource_limit = nullptr; } // Finalize the page table. - page_table.Finalize(); + m_page_table.Finalize(); // Perform inherited finalization. - KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); + KSynchronizationObject::Finalize(); } Result KProcess::CreateThreadLocalRegion(VAddr* out) { @@ -494,16 +498,16 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { // See if we can get a region from a partially used TLP. { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; - if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) { + if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) { tlr = it->Reserve(); ASSERT(tlr != 0); if (it->IsAllUsed()) { tlp = std::addressof(*it); - partially_used_tlp_tree.erase(it); - fully_used_tlp_tree.insert(*tlp); + m_partially_used_tlp_tree.erase(it); + m_fully_used_tlp_tree.insert(*tlp); } *out = tlr; @@ -512,12 +516,12 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { } // Allocate a new page. - tlp = KThreadLocalPage::Allocate(kernel); + tlp = KThreadLocalPage::Allocate(m_kernel); R_UNLESS(tlp != nullptr, ResultOutOfMemory); - auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); }); + auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(m_kernel, tlp); }); // Initialize the new page. - R_TRY(tlp->Initialize(kernel, this)); + R_TRY(tlp->Initialize(m_kernel, this)); // Reserve a TLR. tlr = tlp->Reserve(); @@ -525,11 +529,11 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { // Insert into our tree. { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; if (tlp->IsAllUsed()) { - fully_used_tlp_tree.insert(*tlp); + m_fully_used_tlp_tree.insert(*tlp); } else { - partially_used_tlp_tree.insert(*tlp); + m_partially_used_tlp_tree.insert(*tlp); } } @@ -544,25 +548,25 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { // Release the region. { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Try to find the page in the partially used list. - auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); - if (it == partially_used_tlp_tree.end()) { + auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); + if (it == m_partially_used_tlp_tree.end()) { // If we don't find it, it has to be in the fully used list. - it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); - R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress); + it = m_fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); + R_UNLESS(it != m_fully_used_tlp_tree.end(), ResultInvalidAddress); // Release the region. it->Release(addr); // Move the page out of the fully used list. KThreadLocalPage* tlp = std::addressof(*it); - fully_used_tlp_tree.erase(it); + m_fully_used_tlp_tree.erase(it); if (tlp->IsAllFree()) { page_to_free = tlp; } else { - partially_used_tlp_tree.insert(*tlp); + m_partially_used_tlp_tree.insert(*tlp); } } else { // Release the region. @@ -571,7 +575,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { // Handle the all-free case. KThreadLocalPage* tlp = std::addressof(*it); if (tlp->IsAllFree()) { - partially_used_tlp_tree.erase(it); + m_partially_used_tlp_tree.erase(it); page_to_free = tlp; } } @@ -581,7 +585,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { if (page_to_free != nullptr) { page_to_free->Finalize(); - KThreadLocalPage::Free(kernel, page_to_free); + KThreadLocalPage::Free(m_kernel, page_to_free); } R_SUCCEED(); @@ -589,11 +593,11 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type) { - const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) { + const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) { return wp.type == DebugWatchpointType::None; })}; - if (watch == watchpoints.end()) { + if (watch == m_watchpoints.end()) { return false; } @@ -602,7 +606,7 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, watch->type = type; for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) { - debug_page_refcounts[page]++; + m_debug_page_refcounts[page]++; system.Memory().MarkRegionDebug(page, PageSize, true); } @@ -611,11 +615,11 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type) { - const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) { + const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) { return wp.start_address == addr && wp.end_address == addr + size && wp.type == type; })}; - if (watch == watchpoints.end()) { + if (watch == m_watchpoints.end()) { return false; } @@ -624,8 +628,8 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, watch->type = DebugWatchpointType::None; for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) { - debug_page_refcounts[page]--; - if (!debug_page_refcounts[page]) { + m_debug_page_refcounts[page]--; + if (!m_debug_page_refcounts[page]) { system.Memory().MarkRegionDebug(page, PageSize, false); } } @@ -636,11 +640,11 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { const auto ReprotectSegment = [&](const CodeSet::Segment& segment, Svc::MemoryPermission permission) { - page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); + m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); }; - kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), - code_set.memory.size()); + m_kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), + code_set.memory.size()); ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute); ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read); @@ -648,35 +652,35 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { } bool KProcess::IsSignaled() const { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - return is_signaled; + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); + return m_is_signaled; } -KProcess::KProcess(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()}, - handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, - state_lock{kernel_}, list_lock{kernel_} {} +KProcess::KProcess(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, m_page_table{m_kernel.System()}, + m_handle_table{m_kernel}, m_address_arbiter{m_kernel.System()}, + m_condition_var{m_kernel.System()}, m_state_lock{m_kernel}, m_list_lock{m_kernel} {} KProcess::~KProcess() = default; void KProcess::ChangeState(State new_state) { - if (state == new_state) { + if (m_state == new_state) { return; } - state = new_state; - is_signaled = true; - NotifyAvailable(); + m_state = new_state; + m_is_signaled = true; + this->NotifyAvailable(); } Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { // Ensure that we haven't already allocated stack. - ASSERT(main_thread_stack_size == 0); + ASSERT(m_main_thread_stack_size == 0); // Ensure that we're allocating a valid stack. stack_size = Common::AlignUp(stack_size, PageSize); // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory); - R_UNLESS(stack_size + image_size >= image_size, ResultOutOfMemory); + R_UNLESS(stack_size + m_image_size >= m_image_size, ResultOutOfMemory); // Place a tentative reservation of memory for our new stack. KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax, @@ -686,11 +690,11 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { // Allocate and map our stack. if (stack_size) { KProcessAddress stack_bottom; - R_TRY(page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, - KMemoryState::Stack, KMemoryPermission::UserReadWrite)); + R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, + KMemoryState::Stack, KMemoryPermission::UserReadWrite)); - main_thread_stack_top = stack_bottom + stack_size; - main_thread_stack_size = stack_size; + m_main_thread_stack_top = stack_bottom + stack_size; + m_main_thread_stack_size = stack_size; } // We succeeded! Commit our memory reservation. diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index bd9b9f876..7b7a971b8 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -68,7 +68,7 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject); public: - explicit KProcess(KernelCore& kernel_); + explicit KProcess(KernelCore& kernel); ~KProcess() override; enum class State { @@ -107,66 +107,76 @@ public: /// Gets a reference to the process' page table. KPageTable& PageTable() { - return page_table; + return m_page_table; } /// Gets const a reference to the process' page table. const KPageTable& PageTable() const { - return page_table; + return m_page_table; + } + + /// Gets a reference to the process' page table. + KPageTable& GetPageTable() { + return m_page_table; + } + + /// Gets const a reference to the process' page table. + const KPageTable& GetPageTable() const { + return m_page_table; } /// Gets a reference to the process' handle table. KHandleTable& GetHandleTable() { - return handle_table; + return m_handle_table; } /// Gets a const reference to the process' handle table. const KHandleTable& GetHandleTable() const { - return handle_table; + return m_handle_table; } Result SignalToAddress(VAddr address) { - return condition_var.SignalToAddress(address); + return m_condition_var.SignalToAddress(address); } Result WaitForAddress(Handle handle, VAddr address, u32 tag) { - return condition_var.WaitForAddress(handle, address, tag); + return m_condition_var.WaitForAddress(handle, address, tag); } void SignalConditionVariable(u64 cv_key, int32_t count) { - return condition_var.Signal(cv_key, count); + return m_condition_var.Signal(cv_key, count); } Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { - R_RETURN(condition_var.Wait(address, cv_key, tag, ns)); + R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns)); } Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { - R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count)); + R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count)); } Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, s64 timeout) { - R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout)); + R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout)); } VAddr GetProcessLocalRegionAddress() const { - return plr_address; + return m_plr_address; } /// Gets the current status of the process State GetState() const { - return state; + return m_state; } /// Gets the unique ID that identifies this particular process. - u64 GetProcessID() const { - return process_id; + u64 GetProcessId() const { + return m_process_id; } /// Gets the program ID corresponding to this process. - u64 GetProgramID() const { - return program_id; + u64 GetProgramId() const { + return m_program_id; } /// Gets the resource limit descriptor for this process @@ -174,7 +184,7 @@ public: /// Gets the ideal CPU core ID for this process u8 GetIdealCoreId() const { - return ideal_core; + return m_ideal_core; } /// Checks if the specified thread priority is valid. @@ -184,17 +194,17 @@ public: /// Gets the bitmask of allowed cores that this process' threads can run on. u64 GetCoreMask() const { - return capabilities.GetCoreMask(); + return m_capabilities.GetCoreMask(); } /// Gets the bitmask of allowed thread priorities. u64 GetPriorityMask() const { - return capabilities.GetPriorityMask(); + return m_capabilities.GetPriorityMask(); } /// Gets the amount of secure memory to allocate for memory management. u32 GetSystemResourceSize() const { - return system_resource_size; + return m_system_resource_size; } /// Gets the amount of secure memory currently in use for memory management. @@ -214,67 +224,67 @@ public: /// Whether this process is an AArch64 or AArch32 process. bool Is64BitProcess() const { - return is_64bit_process; + return m_is_64bit_process; } - [[nodiscard]] bool IsSuspended() const { - return is_suspended; + bool IsSuspended() const { + return m_is_suspended; } void SetSuspended(bool suspended) { - is_suspended = suspended; + m_is_suspended = suspended; } /// Gets the total running time of the process instance in ticks. u64 GetCPUTimeTicks() const { - return total_process_running_time_ticks; + return m_total_process_running_time_ticks; } /// Updates the total running time, adding the given ticks to it. void UpdateCPUTimeTicks(u64 ticks) { - total_process_running_time_ticks += ticks; + m_total_process_running_time_ticks += ticks; } /// Gets the process schedule count, used for thread yielding s64 GetScheduledCount() const { - return schedule_count; + return m_schedule_count; } /// Increments the process schedule count, used for thread yielding. void IncrementScheduledCount() { - ++schedule_count; + ++m_schedule_count; } void IncrementRunningThreadCount(); void DecrementRunningThreadCount(); void SetRunningThread(s32 core, KThread* thread, u64 idle_count) { - running_threads[core] = thread; - running_thread_idle_counts[core] = idle_count; + m_running_threads[core] = thread; + m_running_thread_idle_counts[core] = idle_count; } void ClearRunningThread(KThread* thread) { - for (size_t i = 0; i < running_threads.size(); ++i) { - if (running_threads[i] == thread) { - running_threads[i] = nullptr; + for (size_t i = 0; i < m_running_threads.size(); ++i) { + if (m_running_threads[i] == thread) { + m_running_threads[i] = nullptr; } } } [[nodiscard]] KThread* GetRunningThread(s32 core) const { - return running_threads[core]; + return m_running_threads[core]; } bool ReleaseUserException(KThread* thread); [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const { ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); - return pinned_threads[core_id]; + return m_pinned_threads[core_id]; } /// Gets 8 bytes of random data for svcGetInfo RandomEntropy u64 GetRandomEntropy(std::size_t index) const { - return random_entropy.at(index); + return m_random_entropy.at(index); } /// Retrieves the total physical memory available to this process in bytes. @@ -293,7 +303,7 @@ public: /// Gets the list of all threads created with this process as their owner. std::list<KThread*>& GetThreadList() { - return thread_list; + return m_thread_list; } /// Registers a thread as being created under this process, @@ -345,15 +355,15 @@ public: void LoadModule(CodeSet code_set, VAddr base_addr); bool IsInitialized() const override { - return is_initialized; + return m_is_initialized; } - static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + static void PostDestroy(uintptr_t arg) {} void Finalize() override; u64 GetId() const override { - return GetProcessID(); + return GetProcessId(); } bool IsSignaled() const override; @@ -367,7 +377,7 @@ public: void UnpinThread(KThread* thread); KLightLock& GetStateLock() { - return state_lock; + return m_state_lock; } Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size); @@ -392,30 +402,34 @@ public: bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type); const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const { - return watchpoints; + return m_watchpoints; + } + + const std::string& GetName() { + return name; } private: void PinThread(s32 core_id, KThread* thread) { ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); ASSERT(thread != nullptr); - ASSERT(pinned_threads[core_id] == nullptr); - pinned_threads[core_id] = thread; + ASSERT(m_pinned_threads[core_id] == nullptr); + m_pinned_threads[core_id] = thread; } void UnpinThread(s32 core_id, KThread* thread) { ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); ASSERT(thread != nullptr); - ASSERT(pinned_threads[core_id] == thread); - pinned_threads[core_id] = nullptr; + ASSERT(m_pinned_threads[core_id] == thread); + m_pinned_threads[core_id] = nullptr; } void FinalizeHandleTable() { // Finalize the table. - handle_table.Finalize(); + m_handle_table.Finalize(); // Note that the table is finalized. - is_handle_table_initialized = false; + m_is_handle_table_initialized = false; } void ChangeState(State new_state); @@ -424,105 +438,107 @@ private: Result AllocateMainThreadStack(std::size_t stack_size); /// Memory manager for this process - KPageTable page_table; + KPageTable m_page_table; /// Current status of the process - State state{}; + State m_state{}; /// The ID of this process - u64 process_id = 0; + u64 m_process_id = 0; /// Title ID corresponding to the process - u64 program_id = 0; + u64 m_program_id = 0; /// Specifies additional memory to be reserved for the process's memory management by the /// system. When this is non-zero, secure memory is allocated and used for page table allocation /// instead of using the normal global page tables/memory block management. - u32 system_resource_size = 0; + u32 m_system_resource_size = 0; /// Resource limit descriptor for this process - KResourceLimit* resource_limit{}; + KResourceLimit* m_resource_limit{}; - VAddr system_resource_address{}; + VAddr m_system_resource_address{}; /// The ideal CPU core for this process, threads are scheduled on this core by default. - u8 ideal_core = 0; + u8 m_ideal_core = 0; /// Contains the parsed process capability descriptors. - ProcessCapabilities capabilities; + ProcessCapabilities m_capabilities; /// Whether or not this process is AArch64, or AArch32. /// By default, we currently assume this is true, unless otherwise /// specified by metadata provided to the process during loading. - bool is_64bit_process = true; + bool m_is_64bit_process = true; /// Total running time for the process in ticks. - std::atomic<u64> total_process_running_time_ticks = 0; + std::atomic<u64> m_total_process_running_time_ticks = 0; /// Per-process handle table for storing created object handles in. - KHandleTable handle_table; + KHandleTable m_handle_table; /// Per-process address arbiter. - KAddressArbiter address_arbiter; + KAddressArbiter m_address_arbiter; /// The per-process mutex lock instance used for handling various /// forms of services, such as lock arbitration, and condition /// variable related facilities. - KConditionVariable condition_var; + KConditionVariable m_condition_var; /// Address indicating the location of the process' dedicated TLS region. - VAddr plr_address = 0; + VAddr m_plr_address = 0; /// Random values for svcGetInfo RandomEntropy - std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; + std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{}; /// List of threads that are running with this process as their owner. - std::list<KThread*> thread_list; + std::list<KThread*> m_thread_list; /// List of shared memory that are running with this process as their owner. - std::list<KSharedMemoryInfo*> shared_memory_list; + std::list<KSharedMemoryInfo*> m_shared_memory_list; /// Address of the top of the main thread's stack - VAddr main_thread_stack_top{}; + VAddr m_main_thread_stack_top{}; /// Size of the main thread's stack - std::size_t main_thread_stack_size{}; + std::size_t m_main_thread_stack_size{}; /// Memory usage capacity for the process - std::size_t memory_usage_capacity{}; + std::size_t m_memory_usage_capacity{}; /// Process total image size - std::size_t image_size{}; + std::size_t m_image_size{}; /// Schedule count of this process - s64 schedule_count{}; + s64 m_schedule_count{}; + + size_t m_memory_release_hint{}; - size_t memory_release_hint{}; + std::string name{}; - bool is_signaled{}; - bool is_suspended{}; - bool is_immortal{}; - bool is_handle_table_initialized{}; - bool is_initialized{}; + bool m_is_signaled{}; + bool m_is_suspended{}; + bool m_is_immortal{}; + bool m_is_handle_table_initialized{}; + bool m_is_initialized{}; - std::atomic<u16> num_running_threads{}; + std::atomic<u16> m_num_running_threads{}; - std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{}; - std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{}; - std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{}; - std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> watchpoints{}; - std::map<VAddr, u64> debug_page_refcounts; + std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{}; + std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{}; + std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{}; + std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{}; + std::map<VAddr, u64> m_debug_page_refcounts; - KThread* exception_thread{}; + KThread* m_exception_thread{}; - KLightLock state_lock; - KLightLock list_lock; + KLightLock m_state_lock; + KLightLock m_list_lock; using TLPTree = Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>; using TLPIterator = TLPTree::iterator; - TLPTree fully_used_tlp_tree; - TLPTree partially_used_tlp_tree; + TLPTree m_fully_used_tlp_tree; + TLPTree m_partially_used_tlp_tree; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp index 5c942d47c..c30662666 100644 --- a/src/core/hle/kernel/k_readable_event.cpp +++ b/src/core/hle/kernel/k_readable_event.cpp @@ -11,7 +11,7 @@ namespace Kernel { -KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} +KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {} KReadableEvent::~KReadableEvent() = default; @@ -25,7 +25,7 @@ void KReadableEvent::Initialize(KEvent* parent) { } bool KReadableEvent::IsSignaled() const { - ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); return m_is_signaled; } @@ -33,7 +33,7 @@ bool KReadableEvent::IsSignaled() const { void KReadableEvent::Destroy() { if (m_parent) { { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; m_parent->OnReadableEventDestroyed(); } m_parent->Close(); @@ -41,31 +41,29 @@ void KReadableEvent::Destroy() { } Result KReadableEvent::Signal() { - KScopedSchedulerLock lk{kernel}; + KScopedSchedulerLock lk{m_kernel}; if (!m_is_signaled) { m_is_signaled = true; this->NotifyAvailable(); } - return ResultSuccess; + R_SUCCEED(); } Result KReadableEvent::Clear() { this->Reset(); - return ResultSuccess; + R_SUCCEED(); } Result KReadableEvent::Reset() { - KScopedSchedulerLock lk{kernel}; + KScopedSchedulerLock lk{m_kernel}; - if (!m_is_signaled) { - return ResultInvalidState; - } + R_UNLESS(m_is_signaled, ResultInvalidState); m_is_signaled = false; - return ResultSuccess; + R_SUCCEED(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h index 743f96bf5..d2ec36323 100644 --- a/src/core/hle/kernel/k_readable_event.h +++ b/src/core/hle/kernel/k_readable_event.h @@ -17,7 +17,7 @@ class KReadableEvent : public KSynchronizationObject { KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject); public: - explicit KReadableEvent(KernelCore& kernel_); + explicit KReadableEvent(KernelCore& kernel); ~KReadableEvent() override; void Initialize(KEvent* parent); diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp index 626517619..fcee26a29 100644 --- a/src/core/hle/kernel/k_resource_limit.cpp +++ b/src/core/hle/kernel/k_resource_limit.cpp @@ -11,12 +11,12 @@ namespace Kernel { constexpr s64 DefaultTimeout = 10000000000; // 10 seconds -KResourceLimit::KResourceLimit(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, lock{kernel_}, cond_var{kernel_} {} +KResourceLimit::KResourceLimit(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock{m_kernel}, m_cond_var{m_kernel} {} KResourceLimit::~KResourceLimit() = default; -void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) { - core_timing = core_timing_; +void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing) { + m_core_timing = core_timing; } void KResourceLimit::Finalize() {} @@ -25,11 +25,11 @@ s64 KResourceLimit::GetLimitValue(LimitableResource which) const { const auto index = static_cast<std::size_t>(which); s64 value{}; { - KScopedLightLock lk{lock}; - value = limit_values[index]; + KScopedLightLock lk{m_lock}; + value = m_limit_values[index]; ASSERT(value >= 0); - ASSERT(current_values[index] <= limit_values[index]); - ASSERT(current_hints[index] <= current_values[index]); + ASSERT(m_current_values[index] <= m_limit_values[index]); + ASSERT(m_current_hints[index] <= m_current_values[index]); } return value; } @@ -38,11 +38,11 @@ s64 KResourceLimit::GetCurrentValue(LimitableResource which) const { const auto index = static_cast<std::size_t>(which); s64 value{}; { - KScopedLightLock lk{lock}; - value = current_values[index]; + KScopedLightLock lk{m_lock}; + value = m_current_values[index]; ASSERT(value >= 0); - ASSERT(current_values[index] <= limit_values[index]); - ASSERT(current_hints[index] <= current_values[index]); + ASSERT(m_current_values[index] <= m_limit_values[index]); + ASSERT(m_current_hints[index] <= m_current_values[index]); } return value; } @@ -51,11 +51,11 @@ s64 KResourceLimit::GetPeakValue(LimitableResource which) const { const auto index = static_cast<std::size_t>(which); s64 value{}; { - KScopedLightLock lk{lock}; - value = peak_values[index]; + KScopedLightLock lk{m_lock}; + value = m_peak_values[index]; ASSERT(value >= 0); - ASSERT(current_values[index] <= limit_values[index]); - ASSERT(current_hints[index] <= current_values[index]); + ASSERT(m_current_values[index] <= m_limit_values[index]); + ASSERT(m_current_hints[index] <= m_current_values[index]); } return value; } @@ -64,11 +64,11 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const { const auto index = static_cast<std::size_t>(which); s64 value{}; { - KScopedLightLock lk(lock); - ASSERT(current_values[index] >= 0); - ASSERT(current_values[index] <= limit_values[index]); - ASSERT(current_hints[index] <= current_values[index]); - value = limit_values[index] - current_values[index]; + KScopedLightLock lk(m_lock); + ASSERT(m_current_values[index] >= 0); + ASSERT(m_current_values[index] <= m_limit_values[index]); + ASSERT(m_current_hints[index] <= m_current_values[index]); + value = m_limit_values[index] - m_current_values[index]; } return value; @@ -76,51 +76,51 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const { Result KResourceLimit::SetLimitValue(LimitableResource which, s64 value) { const auto index = static_cast<std::size_t>(which); - KScopedLightLock lk(lock); - R_UNLESS(current_values[index] <= value, ResultInvalidState); + KScopedLightLock lk(m_lock); + R_UNLESS(m_current_values[index] <= value, ResultInvalidState); - limit_values[index] = value; - peak_values[index] = current_values[index]; + m_limit_values[index] = value; + m_peak_values[index] = m_current_values[index]; - return ResultSuccess; + R_SUCCEED(); } bool KResourceLimit::Reserve(LimitableResource which, s64 value) { - return Reserve(which, value, core_timing->GetGlobalTimeNs().count() + DefaultTimeout); + return Reserve(which, value, m_core_timing->GetGlobalTimeNs().count() + DefaultTimeout); } bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) { ASSERT(value >= 0); const auto index = static_cast<std::size_t>(which); - KScopedLightLock lk(lock); + KScopedLightLock lk(m_lock); - ASSERT(current_hints[index] <= current_values[index]); - if (current_hints[index] >= limit_values[index]) { + ASSERT(m_current_hints[index] <= m_current_values[index]); + if (m_current_hints[index] >= m_limit_values[index]) { return false; } // Loop until we reserve or run out of time. while (true) { - ASSERT(current_values[index] <= limit_values[index]); - ASSERT(current_hints[index] <= current_values[index]); + ASSERT(m_current_values[index] <= m_limit_values[index]); + ASSERT(m_current_hints[index] <= m_current_values[index]); // If we would overflow, don't allow to succeed. - if (Common::WrappingAdd(current_values[index], value) <= current_values[index]) { + if (Common::WrappingAdd(m_current_values[index], value) <= m_current_values[index]) { break; } - if (current_values[index] + value <= limit_values[index]) { - current_values[index] += value; - current_hints[index] += value; - peak_values[index] = std::max(peak_values[index], current_values[index]); + if (m_current_values[index] + value <= m_limit_values[index]) { + m_current_values[index] += value; + m_current_hints[index] += value; + m_peak_values[index] = std::max(m_peak_values[index], m_current_values[index]); return true; } - if (current_hints[index] + value <= limit_values[index] && - (timeout < 0 || core_timing->GetGlobalTimeNs().count() < timeout)) { - waiter_count++; - cond_var.Wait(&lock, timeout, false); - waiter_count--; + if (m_current_hints[index] + value <= m_limit_values[index] && + (timeout < 0 || m_core_timing->GetGlobalTimeNs().count() < timeout)) { + m_waiter_count++; + m_cond_var.Wait(std::addressof(m_lock), timeout, false); + m_waiter_count--; } else { break; } @@ -138,23 +138,23 @@ void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) { ASSERT(hint >= 0); const auto index = static_cast<std::size_t>(which); - KScopedLightLock lk(lock); - ASSERT(current_values[index] <= limit_values[index]); - ASSERT(current_hints[index] <= current_values[index]); - ASSERT(value <= current_values[index]); - ASSERT(hint <= current_hints[index]); + KScopedLightLock lk(m_lock); + ASSERT(m_current_values[index] <= m_limit_values[index]); + ASSERT(m_current_hints[index] <= m_current_values[index]); + ASSERT(value <= m_current_values[index]); + ASSERT(hint <= m_current_hints[index]); - current_values[index] -= value; - current_hints[index] -= hint; + m_current_values[index] -= value; + m_current_hints[index] -= hint; - if (waiter_count != 0) { - cond_var.Broadcast(); + if (m_waiter_count != 0) { + m_cond_var.Broadcast(); } } KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size) { auto* resource_limit = KResourceLimit::Create(system.Kernel()); - resource_limit->Initialize(&system.CoreTiming()); + resource_limit->Initialize(std::addressof(system.CoreTiming())); // Initialize default resource limit values. // TODO(bunnei): These values are the system defaults, the limits for service processes are diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h index 2573d1b7c..15e69af56 100644 --- a/src/core/hle/kernel/k_resource_limit.h +++ b/src/core/hle/kernel/k_resource_limit.h @@ -28,10 +28,10 @@ class KResourceLimit final KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject); public: - explicit KResourceLimit(KernelCore& kernel_); + explicit KResourceLimit(KernelCore& kernel); ~KResourceLimit() override; - void Initialize(const Core::Timing::CoreTiming* core_timing_); + void Initialize(const Core::Timing::CoreTiming* core_timing); void Finalize() override; s64 GetLimitValue(LimitableResource which) const; @@ -46,18 +46,18 @@ public: void Release(LimitableResource which, s64 value); void Release(LimitableResource which, s64 value, s64 hint); - static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + static void PostDestroy(uintptr_t arg) {} private: using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>; - ResourceArray limit_values{}; - ResourceArray current_values{}; - ResourceArray current_hints{}; - ResourceArray peak_values{}; - mutable KLightLock lock; - s32 waiter_count{}; - KLightConditionVariable cond_var; - const Core::Timing::CoreTiming* core_timing{}; + ResourceArray m_limit_values{}; + ResourceArray m_current_values{}; + ResourceArray m_current_hints{}; + ResourceArray m_peak_values{}; + mutable KLightLock m_lock; + s32 m_waiter_count{}; + KLightConditionVariable m_cond_var; + const Core::Timing::CoreTiming* m_core_timing{}; }; KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size); diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index d6c214237..ecadf2916 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -27,7 +27,7 @@ static void IncrementScheduledCount(Kernel::KThread* thread) { } } -KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { +KScheduler::KScheduler(KernelCore& kernel) : m_kernel{kernel} { m_switch_fiber = std::make_shared<Common::Fiber>([this] { while (true) { ScheduleImplFiber(); @@ -47,7 +47,7 @@ void KScheduler::SetInterruptTaskRunnable() { void KScheduler::RequestScheduleOnInterrupt() { m_state.needs_scheduling = true; - if (CanSchedule(kernel)) { + if (CanSchedule(m_kernel)) { ScheduleOnInterrupt(); } } @@ -97,50 +97,50 @@ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { } void KScheduler::Schedule() { - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); - ASSERT(m_core_id == GetCurrentCoreId(kernel)); + ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1); + ASSERT(m_core_id == GetCurrentCoreId(m_kernel)); ScheduleImpl(); } void KScheduler::ScheduleOnInterrupt() { - GetCurrentThread(kernel).DisableDispatch(); + GetCurrentThread(m_kernel).DisableDispatch(); Schedule(); - GetCurrentThread(kernel).EnableDispatch(); + GetCurrentThread(m_kernel).EnableDispatch(); } void KScheduler::PreemptSingleCore() { - GetCurrentThread(kernel).DisableDispatch(); + GetCurrentThread(m_kernel).DisableDispatch(); - auto* thread = GetCurrentThreadPointer(kernel); - auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore()); + auto* thread = GetCurrentThreadPointer(m_kernel); + auto& previous_scheduler = m_kernel.Scheduler(thread->GetCurrentCore()); previous_scheduler.Unload(thread); Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber); - GetCurrentThread(kernel).EnableDispatch(); + GetCurrentThread(m_kernel).EnableDispatch(); } void KScheduler::RescheduleCurrentCore() { - ASSERT(!kernel.IsPhantomModeForSingleCore()); - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); + ASSERT(!m_kernel.IsPhantomModeForSingleCore()); + ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1); - GetCurrentThread(kernel).EnableDispatch(); + GetCurrentThread(m_kernel).EnableDispatch(); if (m_state.needs_scheduling.load()) { // Disable interrupts, and then check again if rescheduling is needed. // KScopedInterruptDisable intr_disable; - kernel.CurrentScheduler()->RescheduleCurrentCoreImpl(); + m_kernel.CurrentScheduler()->RescheduleCurrentCoreImpl(); } } void KScheduler::RescheduleCurrentCoreImpl() { // Check that scheduling is needed. if (m_state.needs_scheduling.load()) [[likely]] { - GetCurrentThread(kernel).DisableDispatch(); + GetCurrentThread(m_kernel).DisableDispatch(); Schedule(); - GetCurrentThread(kernel).EnableDispatch(); + GetCurrentThread(m_kernel).EnableDispatch(); } } @@ -149,18 +149,18 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core m_core_id = core_id; m_idle_thread = idle_thread; // m_state.idle_thread_stack = m_idle_thread->GetStackTop(); - // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager(); + // m_state.interrupt_task_manager = std::addressof(kernel.GetInterruptTaskManager()); // Insert the main thread into the priority queue. // { - // KScopedSchedulerLock lk{kernel}; - // GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel)); - // SetSchedulerUpdateNeeded(kernel); + // KScopedSchedulerLock lk{m_kernel}; + // GetPriorityQueue(m_kernel).PushBack(GetCurrentThreadPointer(m_kernel)); + // SetSchedulerUpdateNeeded(m_kernel); // } // Bind interrupt handler. // kernel.GetInterruptManager().BindHandler( - // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, + // GetSchedulerInterruptHandler(m_kernel), KInterruptName::Scheduler, m_core_id, // KInterruptController::PriorityLevel::Scheduler, false, false); // Set the current thread. @@ -168,7 +168,7 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core } void KScheduler::Activate() { - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); + ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1); // m_state.should_count_idle = KTargetSystem::IsDebugMode(); m_is_active = true; @@ -176,7 +176,7 @@ void KScheduler::Activate() { } void KScheduler::OnThreadStart() { - GetCurrentThread(kernel).EnableDispatch(); + GetCurrentThread(m_kernel).EnableDispatch(); } u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { @@ -184,7 +184,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { prev_highest_thread != highest_thread) [[likely]] { if (prev_highest_thread != nullptr) [[likely]] { IncrementScheduledCount(prev_highest_thread); - prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks()); + prev_highest_thread->SetLastScheduledTick(m_kernel.System().CoreTiming().GetCPUTicks()); } if (m_state.should_count_idle) { if (highest_thread != nullptr) [[likely]] { @@ -328,8 +328,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { } void KScheduler::SwitchThread(KThread* next_thread) { - KProcess* const cur_process = GetCurrentProcessPointer(kernel); - KThread* const cur_thread = GetCurrentThreadPointer(kernel); + KProcess* const cur_process = GetCurrentProcessPointer(m_kernel); + KThread* const cur_thread = GetCurrentThreadPointer(m_kernel); // We never want to schedule a null thread, so use the idle thread if we don't have a next. if (next_thread == nullptr) { @@ -351,7 +351,7 @@ void KScheduler::SwitchThread(KThread* next_thread) { // Update the CPU time tracking variables. const s64 prev_tick = m_last_context_switch_time; - const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks(); + const s64 cur_tick = m_kernel.System().CoreTiming().GetCPUTicks(); const s64 tick_diff = cur_tick - prev_tick; cur_thread->AddCpuTime(m_core_id, tick_diff); if (cur_process != nullptr) { @@ -375,7 +375,7 @@ void KScheduler::SwitchThread(KThread* next_thread) { // } // Set the new thread. - SetCurrentThread(kernel, next_thread); + SetCurrentThread(m_kernel, next_thread); m_current_thread = next_thread; // Set the new Thread Local region. @@ -388,7 +388,7 @@ void KScheduler::ScheduleImpl() { std::atomic_thread_fence(std::memory_order_seq_cst); // Load the appropriate thread pointers for scheduling. - KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; + KThread* const cur_thread{GetCurrentThreadPointer(m_kernel)}; KThread* highest_priority_thread{m_state.highest_priority_thread}; // Check whether there are runnable interrupt tasks. @@ -411,7 +411,7 @@ void KScheduler::ScheduleImpl() { m_switch_cur_thread = cur_thread; m_switch_highest_priority_thread = highest_priority_thread; m_switch_from_schedule = true; - Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber); + Common::Fiber::YieldTo(cur_thread->m_host_context, *m_switch_fiber); // Returning from ScheduleImpl occurs after this thread has been scheduled again. } @@ -450,7 +450,7 @@ void KScheduler::ScheduleImplFiber() { // We want to try to lock the highest priority thread's context. // Try to take it. - while (!highest_priority_thread->context_guard.try_lock()) { + while (!highest_priority_thread->m_context_guard.try_lock()) { // The highest priority thread's context is already locked. // Check if we need scheduling. If we don't, we can retry directly. if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { @@ -468,7 +468,7 @@ void KScheduler::ScheduleImplFiber() { if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { // Our switch failed. // We should unlock the thread context, and then retry. - highest_priority_thread->context_guard.unlock(); + highest_priority_thread->m_context_guard.unlock(); goto retry; } else { break; @@ -489,30 +489,30 @@ void KScheduler::ScheduleImplFiber() { Reload(highest_priority_thread); // Reload the host thread. - Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context); + Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->m_host_context); } void KScheduler::Unload(KThread* thread) { - auto& cpu_core = kernel.System().ArmInterface(m_core_id); + auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); cpu_core.SaveContext(thread->GetContext32()); cpu_core.SaveContext(thread->GetContext64()); // Save the TPIDR_EL0 system register in case it was modified. - thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + thread->SetTpidrEl0(cpu_core.GetTPIDR_EL0()); cpu_core.ClearExclusiveState(); // Check if the thread is terminated by checking the DPC flags. if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { // The thread isn't terminated, so we want to unlock it. - thread->context_guard.unlock(); + thread->m_context_guard.unlock(); } } void KScheduler::Reload(KThread* thread) { - auto& cpu_core = kernel.System().ArmInterface(m_core_id); + auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); cpu_core.LoadContext(thread->GetContext32()); cpu_core.LoadContext(thread->GetContext64()); - cpu_core.SetTlsAddress(thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); + cpu_core.SetTlsAddress(thread->GetTlsAddress()); + cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); cpu_core.ClearExclusiveState(); } @@ -891,7 +891,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) { if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) { - RescheduleCores(kernel, core_mask); + RescheduleCores(m_kernel, core_mask); } } diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 534321d8d..d85a0c040 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -80,17 +80,17 @@ public: return GetCurrentThread(kernel).GetDisableDispatchCount() == 0; } static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { - return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); + return kernel.GlobalSchedulerContext().m_scheduler_lock.IsLockedByCurrentThread(); } static bool IsSchedulerUpdateNeeded(KernelCore& kernel) { - return kernel.GlobalSchedulerContext().scheduler_update_needed; + return kernel.GlobalSchedulerContext().m_scheduler_update_needed; } static void SetSchedulerUpdateNeeded(KernelCore& kernel) { - kernel.GlobalSchedulerContext().scheduler_update_needed = true; + kernel.GlobalSchedulerContext().m_scheduler_update_needed = true; } static void ClearSchedulerUpdateNeeded(KernelCore& kernel) { - kernel.GlobalSchedulerContext().scheduler_update_needed = false; + kernel.GlobalSchedulerContext().m_scheduler_update_needed = false; } static void DisableScheduling(KernelCore& kernel); @@ -115,7 +115,7 @@ public: private: // Static private API. static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) { - return kernel.GlobalSchedulerContext().priority_queue; + return kernel.GlobalSchedulerContext().m_priority_queue; } static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); @@ -149,7 +149,7 @@ private: KInterruptTaskManager* interrupt_task_manager{nullptr}; }; - KernelCore& kernel; + KernelCore& m_kernel; SchedulingState m_state; bool m_is_active{false}; s32 m_core_id{0}; @@ -166,7 +166,7 @@ private: class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { public: explicit KScopedSchedulerLock(KernelCore& kernel) - : KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {} + : KScopedLock(kernel.GlobalSchedulerContext().m_scheduler_lock) {} ~KScopedSchedulerLock() = default; }; diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 13463717f..caa1404f1 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -14,74 +14,67 @@ namespace Kernel { class KernelCore; +class GlobalSchedulerContext; template <typename SchedulerType> class KAbstractSchedulerLock { public: - explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {} + explicit KAbstractSchedulerLock(KernelCore& kernel) : m_kernel{kernel} {} bool IsLockedByCurrentThread() const { - return owner_thread == GetCurrentThreadPointer(kernel); + return m_owner_thread == GetCurrentThreadPointer(m_kernel); } void Lock() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { - return; - } - - if (IsLockedByCurrentThread()) { + if (this->IsLockedByCurrentThread()) { // If we already own the lock, the lock count should be > 0. // For debug, ensure this is true. - ASSERT(lock_count > 0); + ASSERT(m_lock_count > 0); } else { // Otherwise, we want to disable scheduling and acquire the spinlock. - SchedulerType::DisableScheduling(kernel); - spin_lock.Lock(); + SchedulerType::DisableScheduling(m_kernel); + m_spin_lock.Lock(); - ASSERT(lock_count == 0); - ASSERT(owner_thread == nullptr); + ASSERT(m_lock_count == 0); + ASSERT(m_owner_thread == nullptr); // Take ownership of the lock. - owner_thread = GetCurrentThreadPointer(kernel); + m_owner_thread = GetCurrentThreadPointer(m_kernel); } // Increment the lock count. - lock_count++; + m_lock_count++; } void Unlock() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { - return; - } - - ASSERT(IsLockedByCurrentThread()); - ASSERT(lock_count > 0); + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(m_lock_count > 0); // Release an instance of the lock. - if ((--lock_count) == 0) { + if ((--m_lock_count) == 0) { // Perform a memory barrier here. std::atomic_thread_fence(std::memory_order_seq_cst); // We're no longer going to hold the lock. Take note of what cores need scheduling. const u64 cores_needing_scheduling = - SchedulerType::UpdateHighestPriorityThreads(kernel); + SchedulerType::UpdateHighestPriorityThreads(m_kernel); // Note that we no longer hold the lock, and unlock the spinlock. - owner_thread = nullptr; - spin_lock.Unlock(); + m_owner_thread = nullptr; + m_spin_lock.Unlock(); // Enable scheduling, and perform a rescheduling operation. - SchedulerType::EnableScheduling(kernel, cores_needing_scheduling); + SchedulerType::EnableScheduling(m_kernel, cores_needing_scheduling); } } private: - KernelCore& kernel; - KAlignedSpinLock spin_lock{}; - s32 lock_count{}; - std::atomic<KThread*> owner_thread{}; + friend class GlobalSchedulerContext; + + KernelCore& m_kernel; + KAlignedSpinLock m_spin_lock{}; + s32 m_lock_count{}; + std::atomic<KThread*> m_owner_thread{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h index a15640fd2..629a7d20d 100644 --- a/src/core/hle/kernel/k_scoped_lock.h +++ b/src/core/hle/kernel/k_scoped_lock.h @@ -18,15 +18,15 @@ std::is_reference_v<T>&& requires(T& t) { template <typename T> requires KLockable<T> -class [[nodiscard]] KScopedLock { +class KScopedLock { public: - explicit KScopedLock(T* l) : lock_ptr(l) { - this->lock_ptr->Lock(); + explicit KScopedLock(T* l) : m_lock(*l) {} + explicit KScopedLock(T& l) : m_lock(l) { + m_lock.Lock(); } - explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) {} ~KScopedLock() { - this->lock_ptr->Unlock(); + m_lock.Unlock(); } KScopedLock(const KScopedLock&) = delete; @@ -36,7 +36,7 @@ public: KScopedLock& operator=(KScopedLock&&) = delete; private: - T* lock_ptr; + T& m_lock; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_scoped_resource_reservation.h b/src/core/hle/kernel/k_scoped_resource_reservation.h index 436bcf9fe..2cc464612 100644 --- a/src/core/hle/kernel/k_scoped_resource_reservation.h +++ b/src/core/hle/kernel/k_scoped_resource_reservation.h @@ -12,20 +12,20 @@ namespace Kernel { class KScopedResourceReservation { public: explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v, s64 timeout) - : resource_limit(std::move(l)), value(v), resource(r) { - if (resource_limit && value) { - success = resource_limit->Reserve(resource, value, timeout); + : m_limit(l), m_value(v), m_resource(r) { + if (m_limit && m_value) { + m_succeeded = m_limit->Reserve(m_resource, m_value, timeout); } else { - success = true; + m_succeeded = true; } } explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v = 1) - : resource_limit(std::move(l)), value(v), resource(r) { - if (resource_limit && value) { - success = resource_limit->Reserve(resource, value); + : m_limit(l), m_value(v), m_resource(r) { + if (m_limit && m_value) { + m_succeeded = m_limit->Reserve(m_resource, m_value); } else { - success = true; + m_succeeded = true; } } @@ -36,26 +36,26 @@ public: : KScopedResourceReservation(p->GetResourceLimit(), r, v) {} ~KScopedResourceReservation() noexcept { - if (resource_limit && value && success) { - // resource was not committed, release the reservation. - resource_limit->Release(resource, value); + if (m_limit && m_value && m_succeeded) { + // Resource was not committed, release the reservation. + m_limit->Release(m_resource, m_value); } } /// Commit the resource reservation, destruction of this object does not release the resource void Commit() { - resource_limit = nullptr; + m_limit = nullptr; } - [[nodiscard]] bool Succeeded() const { - return success; + bool Succeeded() const { + return m_succeeded; } private: - KResourceLimit* resource_limit{}; - s64 value; - LimitableResource resource; - bool success; + KResourceLimit* m_limit{}; + s64 m_value{}; + LimitableResource m_resource{}; + bool m_succeeded{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index 14b83a819..c485022f5 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -11,39 +11,39 @@ namespace Kernel { -class [[nodiscard]] KScopedSchedulerLockAndSleep { +class KScopedSchedulerLockAndSleep { public: - explicit KScopedSchedulerLockAndSleep(KernelCore& kernel_, KHardwareTimer** out_timer, - KThread* t, s64 timeout) - : kernel(kernel_), timeout_tick(timeout), thread(t), timer() { + explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KHardwareTimer** out_timer, + KThread* thread, s64 timeout_tick) + : m_kernel(kernel), m_timeout_tick(timeout_tick), m_thread(thread), m_timer() { // Lock the scheduler. - kernel.GlobalSchedulerContext().scheduler_lock.Lock(); + kernel.GlobalSchedulerContext().m_scheduler_lock.Lock(); // Set our timer only if the time is positive. - timer = (timeout_tick > 0) ? std::addressof(kernel.HardwareTimer()) : nullptr; + m_timer = (timeout_tick > 0) ? std::addressof(kernel.HardwareTimer()) : nullptr; - *out_timer = timer; + *out_timer = m_timer; } ~KScopedSchedulerLockAndSleep() { // Register the sleep. - if (timeout_tick > 0) { - timer->RegisterTask(thread, timeout_tick); + if (m_timeout_tick > 0) { + m_timer->RegisterTask(m_thread, m_timeout_tick); } // Unlock the scheduler. - kernel.GlobalSchedulerContext().scheduler_lock.Unlock(); + m_kernel.GlobalSchedulerContext().m_scheduler_lock.Unlock(); } void CancelSleep() { - timeout_tick = 0; + m_timeout_tick = 0; } private: - KernelCore& kernel; - s64 timeout_tick{}; - KThread* thread{}; - KHardwareTimer* timer{}; + KernelCore& m_kernel; + s64 m_timeout_tick{}; + KThread* m_thread{}; + KHardwareTimer* m_timer{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp index 16968ba97..a29d34bc1 100644 --- a/src/core/hle/kernel/k_server_port.cpp +++ b/src/core/hle/kernel/k_server_port.cpp @@ -12,13 +12,12 @@ namespace Kernel { -KServerPort::KServerPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} +KServerPort::KServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {} KServerPort::~KServerPort() = default; -void KServerPort::Initialize(KPort* parent_port_, std::string&& name_) { +void KServerPort::Initialize(KPort* parent) { // Set member variables. - parent = parent_port_; - name = std::move(name_); + m_parent = parent; } bool KServerPort::IsLight() const { @@ -36,10 +35,10 @@ void KServerPort::CleanupSessions() { // Get the last session in the list KServerSession* session = nullptr; { - KScopedSchedulerLock sl{kernel}; - if (!session_list.empty()) { - session = std::addressof(session_list.front()); - session_list.pop_front(); + KScopedSchedulerLock sl{m_kernel}; + if (!m_session_list.empty()) { + session = std::addressof(m_session_list.front()); + m_session_list.pop_front(); } } @@ -54,13 +53,13 @@ void KServerPort::CleanupSessions() { void KServerPort::Destroy() { // Note with our parent that we're closed. - parent->OnServerClosed(); + m_parent->OnServerClosed(); // Perform necessary cleanup of our session lists. this->CleanupSessions(); // Close our reference to our parent. - parent->Close(); + m_parent->Close(); } bool KServerPort::IsSignaled() const { @@ -68,18 +67,18 @@ bool KServerPort::IsSignaled() const { UNIMPLEMENTED(); return false; } else { - return !session_list.empty(); + return !m_session_list.empty(); } } void KServerPort::EnqueueSession(KServerSession* session) { ASSERT(!this->IsLight()); - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Add the session to our queue. - session_list.push_back(*session); - if (session_list.size() == 1) { + m_session_list.push_back(*session); + if (m_session_list.size() == 1) { this->NotifyAvailable(); } } @@ -87,15 +86,15 @@ void KServerPort::EnqueueSession(KServerSession* session) { KServerSession* KServerPort::AcceptSession() { ASSERT(!this->IsLight()); - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Return the first session in the list. - if (session_list.empty()) { + if (m_session_list.empty()) { return nullptr; } - KServerSession* session = std::addressof(session_list.front()); - session_list.pop_front(); + KServerSession* session = std::addressof(m_session_list.front()); + m_session_list.pop_front(); return session; } diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h index 5fc7ee683..21c040e62 100644 --- a/src/core/hle/kernel/k_server_port.h +++ b/src/core/hle/kernel/k_server_port.h @@ -22,17 +22,17 @@ class KServerPort final : public KSynchronizationObject { KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject); public: - explicit KServerPort(KernelCore& kernel_); + explicit KServerPort(KernelCore& kernel); ~KServerPort() override; - void Initialize(KPort* parent_port_, std::string&& name_); + void Initialize(KPort* parent); - void EnqueueSession(KServerSession* pending_session); + void EnqueueSession(KServerSession* session); KServerSession* AcceptSession(); const KPort* GetParent() const { - return parent; + return m_parent; } bool IsLight() const; @@ -46,8 +46,8 @@ private: void CleanupSessions(); - SessionList session_list; - KPort* parent{}; + SessionList m_session_list{}; + KPort* m_parent{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 01591af5b..2288ee435 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -28,23 +28,17 @@ namespace Kernel { using ThreadQueueImplForKServerSessionRequest = KThreadQueue; -KServerSession::KServerSession(KernelCore& kernel_) - : KSynchronizationObject{kernel_}, m_lock{kernel_} {} +KServerSession::KServerSession(KernelCore& kernel) + : KSynchronizationObject{kernel}, m_lock{m_kernel} {} KServerSession::~KServerSession() = default; -void KServerSession::Initialize(KSession* parent_session_, std::string&& name_) { - // Set member variables. - parent = parent_session_; - name = std::move(name_); -} - void KServerSession::Destroy() { - parent->OnServerClosed(); + m_parent->OnServerClosed(); this->CleanupRequests(); - parent->Close(); + m_parent->Close(); } void KServerSession::OnClientClosed() { @@ -62,7 +56,7 @@ void KServerSession::OnClientClosed() { // Get the next request. { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; if (m_current_request != nullptr && m_current_request != prev_request) { // Set the request, open a reference as we process it. @@ -121,7 +115,7 @@ void KServerSession::OnClientClosed() { // // Get the process and page table. // KProcess *client_process = thread->GetOwnerProcess(); - // auto &client_pt = client_process->GetPageTable(); + // auto& client_pt = client_process->GetPageTable(); // // Reply to the request. // ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(), @@ -141,10 +135,10 @@ void KServerSession::OnClientClosed() { } bool KServerSession::IsSignaled() const { - ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // If the client is closed, we're always signaled. - if (parent->IsClientClosed()) { + if (m_parent->IsClientClosed()) { return true; } @@ -154,17 +148,17 @@ bool KServerSession::IsSignaled() const { Result KServerSession::OnRequest(KSessionRequest* request) { // Create the wait queue. - ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; + ThreadQueueImplForKServerSessionRequest wait_queue{m_kernel}; { // Lock the scheduler. - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Ensure that we can handle new requests. - R_UNLESS(!parent->IsServerClosed(), ResultSessionClosed); + R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed); // Check that we're not terminating. - R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); + R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); // Get whether we're empty. const bool was_empty = m_request_list.empty(); @@ -182,11 +176,11 @@ Result KServerSession::OnRequest(KSessionRequest* request) { R_SUCCEED_IF(request->GetEvent() != nullptr); // This is a synchronous request, so we should wait for our request to complete. - GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); - GetCurrentThread(kernel).BeginWait(&wait_queue); + GetCurrentThread(m_kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); + GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue)); } - return GetCurrentThread(kernel).GetWaitResult(); + return GetCurrentThread(m_kernel).GetWaitResult(); } Result KServerSession::SendReply(bool is_hle) { @@ -196,7 +190,7 @@ Result KServerSession::SendReply(bool is_hle) { // Get the request. KSessionRequest* request; { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Get the current request. request = m_current_request; @@ -219,7 +213,7 @@ Result KServerSession::SendReply(bool is_hle) { KEvent* event = request->GetEvent(); // Check whether we're closed. - const bool closed = (client_thread == nullptr || parent->IsClientClosed()); + const bool closed = (client_thread == nullptr || m_parent->IsClientClosed()); Result result = ResultSuccess; if (!closed) { @@ -228,11 +222,11 @@ Result KServerSession::SendReply(bool is_hle) { // HLE servers write directly to a pointer to the thread command buffer. Therefore // the reply has already been written in this case. } else { - Core::Memory::Memory& memory{kernel.System().Memory()}; - KThread* server_thread{GetCurrentThreadPointer(kernel)}; + Core::Memory::Memory& memory{m_kernel.System().Memory()}; + KThread* server_thread{GetCurrentThreadPointer(m_kernel)}; UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); - auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); + auto* src_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress()); auto* dst_msg_buffer = memory.GetPointer(client_message); std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); } @@ -254,7 +248,7 @@ Result KServerSession::SendReply(bool is_hle) { if (event != nullptr) { // // Get the client process/page table. // KProcess *client_process = client_thread->GetOwnerProcess(); - // KPageTable *client_page_table = &client_process->PageTable(); + // KPageTable *client_page_table = std::addressof(client_process->PageTable()); // // If we need to, reply with an async error. // if (R_FAILED(client_result)) { @@ -270,7 +264,7 @@ Result KServerSession::SendReply(bool is_hle) { event->Signal(); } else { // End the client thread's wait. - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; if (!client_thread->IsTerminationRequested()) { client_thread->EndWait(client_result); @@ -278,7 +272,7 @@ Result KServerSession::SendReply(bool is_hle) { } } - return result; + R_RETURN(result); } Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context, @@ -291,10 +285,10 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext KThread* client_thread; { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Ensure that we can service the request. - R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed); + R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed); // Ensure we aren't already servicing a request. R_UNLESS(m_current_request == nullptr, ResultNotFound); @@ -303,7 +297,7 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext R_UNLESS(!m_request_list.empty(), ResultNotFound); // Pop the first request from the list. - request = &m_request_list.front(); + request = std::addressof(m_request_list.front()); m_request_list.pop_front(); // Get the thread for the request. @@ -325,27 +319,27 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext // bool recv_list_broken = false; // Receive the message. - Core::Memory::Memory& memory{kernel.System().Memory()}; + Core::Memory::Memory& memory{m_kernel.System().Memory()}; if (out_context != nullptr) { // HLE request. u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))}; *out_context = - std::make_shared<Service::HLERequestContext>(kernel, memory, this, client_thread); + std::make_shared<Service::HLERequestContext>(m_kernel, memory, this, client_thread); (*out_context)->SetSessionRequestManager(manager); (*out_context) ->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(), cmd_buf); } else { - KThread* server_thread{GetCurrentThreadPointer(kernel)}; + KThread* server_thread{GetCurrentThreadPointer(m_kernel)}; UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); auto* src_msg_buffer = memory.GetPointer(client_message); - auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); + auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress()); std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); } // We succeeded. - return ResultSuccess; + R_SUCCEED(); } void KServerSession::CleanupRequests() { @@ -356,7 +350,7 @@ void KServerSession::CleanupRequests() { // Get the next request. KSessionRequest* request = nullptr; { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; if (m_current_request) { // Choose the current request if we have one. @@ -364,7 +358,7 @@ void KServerSession::CleanupRequests() { m_current_request = nullptr; } else if (!m_request_list.empty()) { // Pop the request from the front of the list. - request = &m_request_list.front(); + request = std::addressof(m_request_list.front()); m_request_list.pop_front(); } } @@ -387,7 +381,8 @@ void KServerSession::CleanupRequests() { // KProcess *client_process = (client_thread != nullptr) ? // client_thread->GetOwnerProcess() : nullptr; // KProcessPageTable *client_page_table = (client_process != nullptr) ? - // &client_process->GetPageTable() : nullptr; + // std::addressof(client_process->GetPageTable()) + // : nullptr; // Cleanup the mappings. // Result result = CleanupMap(request, server_process, client_page_table); @@ -407,7 +402,7 @@ void KServerSession::CleanupRequests() { event->Signal(); } else { // End the client thread's wait. - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; if (!client_thread->IsTerminationRequested()) { client_thread->EndWait(ResultSessionClosed); diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h index 33f380352..5ee02f556 100644 --- a/src/core/hle/kernel/k_server_session.h +++ b/src/core/hle/kernel/k_server_session.h @@ -33,19 +33,17 @@ class KServerSession final : public KSynchronizationObject, friend class ServiceThread; public: - explicit KServerSession(KernelCore& kernel_); + explicit KServerSession(KernelCore& kernel); ~KServerSession() override; void Destroy() override; - void Initialize(KSession* parent_session_, std::string&& name_); - - KSession* GetParent() { - return parent; + void Initialize(KSession* p) { + m_parent = p; } const KSession* GetParent() const { - return parent; + return m_parent; } bool IsSignaled() const override; @@ -66,10 +64,10 @@ private: void CleanupRequests(); /// KSession that owns this KServerSession - KSession* parent{}; + KSession* m_parent{}; /// List of threads which are pending a reply. - boost::intrusive::list<KSessionRequest> m_request_list; + boost::intrusive::list<KSessionRequest> m_request_list{}; KSessionRequest* m_current_request{}; KLightLock m_lock; diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp index 7e677c028..44d7a8f02 100644 --- a/src/core/hle/kernel/k_session.cpp +++ b/src/core/hle/kernel/k_session.cpp @@ -9,69 +9,63 @@ namespace Kernel { -KSession::KSession(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {} +KSession::KSession(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {} KSession::~KSession() = default; -void KSession::Initialize(KClientPort* port_, const std::string& name_) { +void KSession::Initialize(KClientPort* client_port, uintptr_t name) { // Increment reference count. // Because reference count is one on creation, this will result // in a reference count of two. Thus, when both server and client are closed // this object will be destroyed. - Open(); + this->Open(); // Create our sub sessions. - KAutoObject::Create(std::addressof(server)); - KAutoObject::Create(std::addressof(client)); + KAutoObject::Create(std::addressof(m_server)); + KAutoObject::Create(std::addressof(m_client)); // Initialize our sub sessions. - server.Initialize(this, name_ + ":Server"); - client.Initialize(this, name_ + ":Client"); + m_server.Initialize(this); + m_client.Initialize(this); // Set state and name. - SetState(State::Normal); - name = name_; + this->SetState(State::Normal); + m_name = name; // Set our owner process. //! FIXME: this is the wrong process! - process = kernel.ApplicationProcess(); - process->Open(); + m_process = m_kernel.ApplicationProcess(); + m_process->Open(); // Set our port. - port = port_; - if (port != nullptr) { - port->Open(); + m_port = client_port; + if (m_port != nullptr) { + m_port->Open(); } // Mark initialized. - initialized = true; + m_initialized = true; } void KSession::Finalize() { - if (port == nullptr) { - return; + if (m_port != nullptr) { + m_port->OnSessionFinalized(); + m_port->Close(); } - - port->OnSessionFinalized(); - port->Close(); } void KSession::OnServerClosed() { - if (GetState() != State::Normal) { - return; + if (this->GetState() == State::Normal) { + this->SetState(State::ServerClosed); + m_client.OnServerClosed(); } - - SetState(State::ServerClosed); - client.OnServerClosed(); } void KSession::OnClientClosed() { - if (GetState() != State::Normal) { - return; + if (this->GetState() == State::Normal) { + SetState(State::ClientClosed); + m_server.OnClientClosed(); } - - SetState(State::ClientClosed); - server.OnClientClosed(); } void KSession::PostDestroy(uintptr_t arg) { diff --git a/src/core/hle/kernel/k_session.h b/src/core/hle/kernel/k_session.h index 93e5e6f71..f69bab088 100644 --- a/src/core/hle/kernel/k_session.h +++ b/src/core/hle/kernel/k_session.h @@ -18,19 +18,18 @@ class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAut KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject); public: - explicit KSession(KernelCore& kernel_); + explicit KSession(KernelCore& kernel); ~KSession() override; - void Initialize(KClientPort* port_, const std::string& name_); - + void Initialize(KClientPort* port, uintptr_t name); void Finalize() override; bool IsInitialized() const override { - return initialized; + return m_initialized; } uintptr_t GetPostDestroyArgument() const override { - return reinterpret_cast<uintptr_t>(process); + return reinterpret_cast<uintptr_t>(m_process); } static void PostDestroy(uintptr_t arg); @@ -48,27 +47,23 @@ public: } KClientSession& GetClientSession() { - return client; + return m_client; } KServerSession& GetServerSession() { - return server; + return m_server; } const KClientSession& GetClientSession() const { - return client; + return m_client; } const KServerSession& GetServerSession() const { - return server; + return m_server; } const KClientPort* GetParent() const { - return port; - } - - KClientPort* GetParent() { - return port; + return m_port; } private: @@ -80,20 +75,20 @@ private: }; void SetState(State state) { - atomic_state = static_cast<u8>(state); + m_atomic_state = static_cast<u8>(state); } State GetState() const { - return static_cast<State>(atomic_state.load(std::memory_order_relaxed)); + return static_cast<State>(m_atomic_state.load()); } - KServerSession server; - KClientSession client; - std::atomic<std::underlying_type_t<State>> atomic_state{ - static_cast<std::underlying_type_t<State>>(State::Invalid)}; - KClientPort* port{}; - KProcess* process{}; - bool initialized{}; + KServerSession m_server; + KClientSession m_client; + KClientPort* m_port{}; + uintptr_t m_name{}; + KProcess* m_process{}; + std::atomic<u8> m_atomic_state{static_cast<u8>(State::Invalid)}; + bool m_initialized{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp index 520da6aa7..a329e5690 100644 --- a/src/core/hle/kernel/k_session_request.cpp +++ b/src/core/hle/kernel/k_session_request.cpp @@ -14,46 +14,46 @@ Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, siz // Get the mapping. Mapping* mapping; if (index < NumStaticMappings) { - mapping = &m_static_mappings[index]; + mapping = std::addressof(m_static_mappings[index]); } else { // Allocate a page for the extra mappings. if (m_mappings == nullptr) { - KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel); + KPageBuffer* page_buffer = KPageBuffer::Allocate(m_kernel); R_UNLESS(page_buffer != nullptr, ResultOutOfMemory); m_mappings = reinterpret_cast<Mapping*>(page_buffer); } - mapping = &m_mappings[index - NumStaticMappings]; + mapping = std::addressof(m_mappings[index - NumStaticMappings]); } // Set the mapping. mapping->Set(client, server, size, state); - return ResultSuccess; + R_SUCCEED(); } Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) { ASSERT(m_num_recv == 0); ASSERT(m_num_exch == 0); - return this->PushMap(client, server, size, state, m_num_send++); + R_RETURN(this->PushMap(client, server, size, state, m_num_send++)); } Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) { ASSERT(m_num_exch == 0); - return this->PushMap(client, server, size, state, m_num_send + m_num_recv++); + R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv++)); } Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) { - return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++); + R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++)); } void KSessionRequest::SessionMappings::Finalize() { if (m_mappings) { - KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings)); + KPageBuffer::Free(m_kernel, reinterpret_cast<KPageBuffer*>(m_mappings)); m_mappings = nullptr; } } diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h index e5558bc2c..5685048ba 100644 --- a/src/core/hle/kernel/k_session_request.h +++ b/src/core/hle/kernel/k_session_request.h @@ -47,14 +47,14 @@ public: } private: - VAddr m_client_address; - VAddr m_server_address; - size_t m_size; - KMemoryState m_state; + VAddr m_client_address{}; + VAddr m_server_address{}; + size_t m_size{}; + KMemoryState m_state{}; }; public: - explicit SessionMappings(KernelCore& kernel_) : kernel(kernel_) {} + explicit SessionMappings(KernelCore& kernel) : m_kernel(kernel) {} void Initialize() {} void Finalize(); @@ -149,8 +149,8 @@ public: } private: - KernelCore& kernel; - std::array<Mapping, NumStaticMappings> m_static_mappings; + KernelCore& m_kernel; + std::array<Mapping, NumStaticMappings> m_static_mappings{}; Mapping* m_mappings{}; u8 m_num_send{}; u8 m_num_recv{}; @@ -158,7 +158,7 @@ public: }; public: - explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {} + explicit KSessionRequest(KernelCore& kernel) : KAutoObject(kernel), m_mappings(kernel) {} static KSessionRequest* Create(KernelCore& kernel) { KSessionRequest* req = KSessionRequest::Allocate(kernel); @@ -170,13 +170,13 @@ public: void Destroy() override { this->Finalize(); - KSessionRequest::Free(kernel, this); + KSessionRequest::Free(m_kernel, this); } void Initialize(KEvent* event, uintptr_t address, size_t size) { m_mappings.Initialize(); - m_thread = GetCurrentThreadPointer(kernel); + m_thread = GetCurrentThreadPointer(m_kernel); m_event = event; m_address = address; m_size = size; diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index df505edfe..954e5befe 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -12,29 +12,27 @@ namespace Kernel { -KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} +KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {} KSharedMemory::~KSharedMemory() = default; -Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, - Svc::MemoryPermission owner_permission_, - Svc::MemoryPermission user_permission_, std::size_t size_, - std::string name_) { +Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* owner_process, + Svc::MemoryPermission owner_permission, + Svc::MemoryPermission user_permission, std::size_t size) { // Set members. - owner_process = owner_process_; - device_memory = &device_memory_; - owner_permission = owner_permission_; - user_permission = user_permission_; - size = Common::AlignUp(size_, PageSize); - name = std::move(name_); + m_owner_process = owner_process; + m_device_memory = std::addressof(device_memory); + m_owner_permission = owner_permission; + m_user_permission = user_permission; + m_size = Common::AlignUp(size, PageSize); const size_t num_pages = Common::DivideUp(size, PageSize); // Get the resource limit. - KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); + KResourceLimit* reslimit = m_kernel.GetSystemResourceLimit(); // Reserve memory for ourselves. KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax, - size_); + size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate the memory. @@ -42,67 +40,66 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o //! HACK: Open continuous mapping from sysmodule pool. auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure, KMemoryManager::Direction::FromBack); - physical_address = kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option); - R_UNLESS(physical_address != 0, ResultOutOfMemory); + m_physical_address = m_kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option); + R_UNLESS(m_physical_address != 0, ResultOutOfMemory); //! Insert the result into our page group. - page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); - page_group->AddBlock(physical_address, num_pages); + m_page_group.emplace(m_kernel, + std::addressof(m_kernel.GetSystemSystemResource().GetBlockInfoManager())); + m_page_group->AddBlock(m_physical_address, num_pages); // Commit our reservation. memory_reservation.Commit(); // Set our resource limit. - resource_limit = reslimit; - resource_limit->Open(); + m_resource_limit = reslimit; + m_resource_limit->Open(); // Mark initialized. - is_initialized = true; + m_is_initialized = true; // Clear all pages in the memory. - for (const auto& block : *page_group) { - std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize()); + for (const auto& block : *m_page_group) { + std::memset(m_device_memory->GetPointer<void>(block.GetAddress()), 0, block.GetSize()); } - return ResultSuccess; + R_SUCCEED(); } void KSharedMemory::Finalize() { // Close and finalize the page group. - page_group->Close(); - page_group->Finalize(); + m_page_group->Close(); + m_page_group->Finalize(); // Release the memory reservation. - resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); - resource_limit->Close(); - - // Perform inherited finalization. - KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize(); + m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, m_size); + m_resource_limit->Close(); } Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size, Svc::MemoryPermission map_perm) { // Validate the size. - R_UNLESS(size == map_size, ResultInvalidSize); + R_UNLESS(m_size == map_size, ResultInvalidSize); // Validate the permission. const Svc::MemoryPermission test_perm = - &target_process == owner_process ? owner_permission : user_permission; + std::addressof(target_process) == m_owner_process ? m_owner_permission : m_user_permission; if (test_perm == Svc::MemoryPermission::DontCare) { ASSERT(map_perm == Svc::MemoryPermission::Read || map_perm == Svc::MemoryPermission::Write); } else { R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission); } - return target_process.PageTable().MapPageGroup(address, *page_group, KMemoryState::Shared, - ConvertToKMemoryPermission(map_perm)); + R_RETURN(target_process.PageTable().MapPageGroup(address, *m_page_group, KMemoryState::Shared, + ConvertToKMemoryPermission(map_perm))); } Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) { // Validate the size. - R_UNLESS(size == unmap_size, ResultInvalidSize); + R_UNLESS(m_size == unmap_size, ResultInvalidSize); - return target_process.PageTable().UnmapPageGroup(address, *page_group, KMemoryState::Shared); + R_RETURN( + target_process.PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::Shared)); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h index 8b29f0b4a..b4c4125bb 100644 --- a/src/core/hle/kernel/k_shared_memory.h +++ b/src/core/hle/kernel/k_shared_memory.h @@ -23,12 +23,12 @@ class KSharedMemory final KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject); public: - explicit KSharedMemory(KernelCore& kernel_); + explicit KSharedMemory(KernelCore& kernel); ~KSharedMemory() override; Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, Svc::MemoryPermission owner_permission_, - Svc::MemoryPermission user_permission_, std::size_t size_, std::string name_); + Svc::MemoryPermission user_permission_, std::size_t size_); /** * Maps a shared memory block to an address in the target process' address space @@ -54,7 +54,7 @@ public: * @return A pointer to the shared memory block from the specified offset */ u8* GetPointer(std::size_t offset = 0) { - return device_memory->GetPointer<u8>(physical_address + offset); + return m_device_memory->GetPointer<u8>(m_physical_address + offset); } /** @@ -63,26 +63,26 @@ public: * @return A pointer to the shared memory block from the specified offset */ const u8* GetPointer(std::size_t offset = 0) const { - return device_memory->GetPointer<u8>(physical_address + offset); + return m_device_memory->GetPointer<u8>(m_physical_address + offset); } void Finalize() override; bool IsInitialized() const override { - return is_initialized; + return m_is_initialized; } - static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + static void PostDestroy(uintptr_t arg) {} private: - Core::DeviceMemory* device_memory{}; - KProcess* owner_process{}; - std::optional<KPageGroup> page_group{}; - Svc::MemoryPermission owner_permission{}; - Svc::MemoryPermission user_permission{}; - PAddr physical_address{}; - std::size_t size{}; - KResourceLimit* resource_limit{}; - bool is_initialized{}; + Core::DeviceMemory* m_device_memory{}; + KProcess* m_owner_process{}; + std::optional<KPageGroup> m_page_group{}; + Svc::MemoryPermission m_owner_permission{}; + Svc::MemoryPermission m_user_permission{}; + PAddr m_physical_address{}; + std::size_t m_size{}; + KResourceLimit* m_resource_limit{}; + bool m_is_initialized{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_shared_memory_info.h b/src/core/hle/kernel/k_shared_memory_info.h index 2bb6b6d08..75b73ba39 100644 --- a/src/core/hle/kernel/k_shared_memory_info.h +++ b/src/core/hle/kernel/k_shared_memory_info.h @@ -18,25 +18,28 @@ public: explicit KSharedMemoryInfo(KernelCore&) {} KSharedMemoryInfo() = default; - constexpr void Initialize(KSharedMemory* shmem) { - shared_memory = shmem; + constexpr void Initialize(KSharedMemory* m) { + m_shared_memory = m; + m_reference_count = 0; } constexpr KSharedMemory* GetSharedMemory() const { - return shared_memory; + return m_shared_memory; } constexpr void Open() { - ++reference_count; + ++m_reference_count; + ASSERT(m_reference_count > 0); } constexpr bool Close() { - return (--reference_count) == 0; + ASSERT(m_reference_count > 0); + return (--m_reference_count) == 0; } private: - KSharedMemory* shared_memory{}; - size_t reference_count{}; + KSharedMemory* m_shared_memory{}; + size_t m_reference_count{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h index 68469b041..334afebb7 100644 --- a/src/core/hle/kernel/k_slab_heap.h +++ b/src/core/hle/kernel/k_slab_heap.h @@ -89,7 +89,8 @@ private: if (alloc_peak <= cur_peak) { break; } - } while (!Common::AtomicCompareAndSwap(&m_peak, alloc_peak, cur_peak, cur_peak)); + } while ( + !Common::AtomicCompareAndSwap(std::addressof(m_peak), alloc_peak, cur_peak, cur_peak)); } public: diff --git a/src/core/hle/kernel/k_spin_lock.cpp b/src/core/hle/kernel/k_spin_lock.cpp index 6e16a1849..852532037 100644 --- a/src/core/hle/kernel/k_spin_lock.cpp +++ b/src/core/hle/kernel/k_spin_lock.cpp @@ -6,15 +6,15 @@ namespace Kernel { void KSpinLock::Lock() { - lck.lock(); + m_lock.lock(); } void KSpinLock::Unlock() { - lck.unlock(); + m_lock.unlock(); } bool KSpinLock::TryLock() { - return lck.try_lock(); + return m_lock.try_lock(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_spin_lock.h b/src/core/hle/kernel/k_spin_lock.h index 397a93d21..094a1e6be 100644 --- a/src/core/hle/kernel/k_spin_lock.h +++ b/src/core/hle/kernel/k_spin_lock.h @@ -5,26 +5,24 @@ #include <mutex> +#include "common/common_funcs.h" #include "core/hle/kernel/k_scoped_lock.h" namespace Kernel { class KSpinLock { public: - KSpinLock() = default; + explicit KSpinLock() = default; - KSpinLock(const KSpinLock&) = delete; - KSpinLock& operator=(const KSpinLock&) = delete; - - KSpinLock(KSpinLock&&) = delete; - KSpinLock& operator=(KSpinLock&&) = delete; + YUZU_NON_COPYABLE(KSpinLock); + YUZU_NON_MOVEABLE(KSpinLock); void Lock(); void Unlock(); - [[nodiscard]] bool TryLock(); + bool TryLock(); private: - std::mutex lck; + std::mutex m_lock; }; // TODO(bunnei): Alias for now, in case we want to implement these accurately in the future. diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index 40fd0c038..b7da3eee7 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -17,9 +17,9 @@ namespace { class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait { public: - ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o, + ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel, KSynchronizationObject** o, KSynchronizationObject::ThreadListNode* n, s32 c) - : KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {} + : KThreadQueueWithoutEndWait(kernel), m_objects(o), m_nodes(n), m_count(c) {} void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, Result wait_result) override { @@ -71,26 +71,26 @@ void KSynchronizationObject::Finalize() { KAutoObject::Finalize(); } -Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, +Result KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, KSynchronizationObject** objects, const s32 num_objects, s64 timeout) { // Allocate space on stack for thread nodes. std::vector<ThreadListNode> thread_nodes(num_objects); // Prepare for wait. - KThread* thread = GetCurrentThreadPointer(kernel_ctx); + KThread* thread = GetCurrentThreadPointer(kernel); KHardwareTimer* timer{}; - ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects, - thread_nodes.data(), num_objects); + ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel, objects, thread_nodes.data(), + num_objects); { // Setup the scheduling lock and sleep. - KScopedSchedulerLockAndSleep slp(kernel_ctx, std::addressof(timer), thread, timeout); + KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), thread, timeout); // Check if the thread should terminate. if (thread->IsTerminationRequested()) { slp.CancelSleep(); - return ResultTerminationRequested; + R_THROW(ResultTerminationRequested); } // Check if any of the objects are already signaled. @@ -100,21 +100,21 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, if (objects[i]->IsSignaled()) { *out_index = i; slp.CancelSleep(); - return ResultSuccess; + R_THROW(ResultSuccess); } } // Check if the timeout is zero. if (timeout == 0) { slp.CancelSleep(); - return ResultTimedOut; + R_THROW(ResultTimedOut); } // Check if waiting was canceled. if (thread->IsWaitCancelled()) { slp.CancelSleep(); thread->ClearWaitCancelled(); - return ResultCancelled; + R_THROW(ResultCancelled); } // Add the waiters. @@ -141,16 +141,15 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, *out_index = thread->GetSyncedIndex(); // Get the wait result. - return thread->GetWaitResult(); + R_RETURN(thread->GetWaitResult()); } -KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) - : KAutoObjectWithList{kernel_} {} +KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {} KSynchronizationObject::~KSynchronizationObject() = default; void KSynchronizationObject::NotifyAvailable(Result result) { - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // If we're not signaled, we've nothing to notify. if (!this->IsSignaled()) { @@ -158,7 +157,7 @@ void KSynchronizationObject::NotifyAvailable(Result result) { } // Iterate over each thread. - for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { + for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { cur_node->thread->NotifyAvailable(this, result); } } @@ -168,8 +167,8 @@ std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() co // If debugging, dump the list of waiters. { - KScopedSchedulerLock lock(kernel); - for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { + KScopedSchedulerLock lock(m_kernel); + for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { threads.emplace_back(cur_node->thread); } } diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h index 8d8122ab7..d55a2673d 100644 --- a/src/core/hle/kernel/k_synchronization_object.h +++ b/src/core/hle/kernel/k_synchronization_object.h @@ -24,31 +24,30 @@ public: KThread* thread{}; }; - [[nodiscard]] static Result Wait(KernelCore& kernel, s32* out_index, - KSynchronizationObject** objects, const s32 num_objects, - s64 timeout); + static Result Wait(KernelCore& kernel, s32* out_index, KSynchronizationObject** objects, + const s32 num_objects, s64 timeout); void Finalize() override; - [[nodiscard]] virtual bool IsSignaled() const = 0; + virtual bool IsSignaled() const = 0; - [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; + std::vector<KThread*> GetWaitingThreadsForDebugging() const; void LinkNode(ThreadListNode* node_) { // Link the node to the list. - if (thread_list_tail == nullptr) { - thread_list_head = node_; + if (m_thread_list_tail == nullptr) { + m_thread_list_head = node_; } else { - thread_list_tail->next = node_; + m_thread_list_tail->next = node_; } - thread_list_tail = node_; + m_thread_list_tail = node_; } void UnlinkNode(ThreadListNode* node_) { // Unlink the node from the list. ThreadListNode* prev_ptr = - reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head)); + reinterpret_cast<ThreadListNode*>(std::addressof(m_thread_list_head)); ThreadListNode* prev_val = nullptr; ThreadListNode *prev, *tail_prev; @@ -59,8 +58,8 @@ public: prev_val = prev_ptr; } while (prev_ptr != node_); - if (thread_list_tail == node_) { - thread_list_tail = tail_prev; + if (m_thread_list_tail == node_) { + m_thread_list_tail = tail_prev; } prev->next = node_->next; @@ -78,8 +77,8 @@ protected: } private: - ThreadListNode* thread_list_head{}; - ThreadListNode* thread_list_tail{}; + ThreadListNode* m_thread_list_head{}; + ThreadListNode* m_thread_list_tail{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp index 4cc377a6c..e6c8d589a 100644 --- a/src/core/hle/kernel/k_system_resource.cpp +++ b/src/core/hle/kernel/k_system_resource.cpp @@ -5,9 +5,8 @@ namespace Kernel { -Result KSecureSystemResource::Initialize([[maybe_unused]] size_t size, - [[maybe_unused]] KResourceLimit* resource_limit, - [[maybe_unused]] KMemoryManager::Pool pool) { +Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit, + KMemoryManager::Pool pool) { // Unimplemented UNREACHABLE(); } @@ -17,8 +16,8 @@ void KSecureSystemResource::Finalize() { UNREACHABLE(); } -size_t KSecureSystemResource::CalculateRequiredSecureMemorySize( - [[maybe_unused]] size_t size, [[maybe_unused]] KMemoryManager::Pool pool) { +size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size, + KMemoryManager::Pool pool) { // Unimplemented UNREACHABLE(); } diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h index 9a991f725..d36aaa9bd 100644 --- a/src/core/hle/kernel/k_system_resource.h +++ b/src/core/hle/kernel/k_system_resource.h @@ -21,7 +21,7 @@ class KSystemResource : public KAutoObject { KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject); public: - explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {} + explicit KSystemResource(KernelCore& kernel) : KAutoObject(kernel) {} protected: void SetSecureResource() { @@ -87,8 +87,8 @@ private: class KSecureSystemResource final : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> { public: - explicit KSecureSystemResource(KernelCore& kernel_) - : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) { + explicit KSecureSystemResource(KernelCore& kernel) + : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel) { // Mark ourselves as being a secure resource. this->SetSecureResource(); } @@ -99,7 +99,7 @@ public: bool IsInitialized() const { return m_is_initialized; } - static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + static void PostDestroy(uintptr_t arg) {} size_t CalculateRequiredSecureMemorySize() const { return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool); diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 26e3700e4..c0e3ecb45 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -35,15 +35,11 @@ #include "core/hle/result.h" #include "core/memory.h" -#ifdef ARCHITECTURE_x86_64 -#include "core/arm/dynarmic/arm_dynarmic_32.h" -#endif - namespace { constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1; -static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, +static void ResetThreadContext32(Kernel::KThread::ThreadContext32& context, u32 stack_top, u32 entry_point, u32 arg) { context = {}; context.cpu_registers[0] = arg; @@ -52,7 +48,7 @@ static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, context.fpscr = 0; } -static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top, +static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, VAddr stack_top, VAddr entry_point, u64 arg) { context = {}; context.cpu_registers[0] = arg; @@ -77,14 +73,14 @@ struct ThreadLocalRegion { class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { public: - explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_) - : KThreadQueueWithoutEndWait(kernel_) {} + explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel) + : KThreadQueueWithoutEndWait(kernel) {} }; class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue { public: - explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl) - : KThreadQueue(kernel_), m_wait_list(wl) {} + explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel, KThread::WaiterList* wl) + : KThreadQueue(kernel), m_wait_list(wl) {} void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { // Remove the thread from the wait list. @@ -95,13 +91,13 @@ public: } private: - KThread::WaiterList* m_wait_list; + KThread::WaiterList* m_wait_list{}; }; } // namespace -KThread::KThread(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {} +KThread::KThread(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {} KThread::~KThread() = default; Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, @@ -117,7 +113,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); // First, clear the TLS address. - tls_address = {}; + m_tls_address = {}; // Next, assert things based on the type. switch (type) { @@ -141,110 +137,110 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type)); break; } - thread_type = type; + m_thread_type = type; // Set the ideal core ID and affinity mask. - virtual_ideal_core_id = virt_core; - physical_ideal_core_id = phys_core; - virtual_affinity_mask = 1ULL << virt_core; - physical_affinity_mask.SetAffinity(phys_core, true); + m_virtual_ideal_core_id = virt_core; + m_physical_ideal_core_id = phys_core; + m_virtual_affinity_mask = 1ULL << virt_core; + m_physical_affinity_mask.SetAffinity(phys_core, true); // Set the thread state. - thread_state = (type == ThreadType::Main || type == ThreadType::Dummy) - ? ThreadState::Runnable - : ThreadState::Initialized; + m_thread_state = (type == ThreadType::Main || type == ThreadType::Dummy) + ? ThreadState::Runnable + : ThreadState::Initialized; // Set TLS address. - tls_address = 0; + m_tls_address = 0; // Set parent and condvar tree. - parent = nullptr; - condvar_tree = nullptr; + m_parent = nullptr; + m_condvar_tree = nullptr; // Set sync booleans. - signaled = false; - termination_requested = false; - wait_cancelled = false; - cancellable = false; + m_signaled = false; + m_termination_requested = false; + m_wait_cancelled = false; + m_cancellable = false; // Set core ID and wait result. - core_id = phys_core; - wait_result = ResultNoSynchronizationObject; + m_core_id = phys_core; + m_wait_result = ResultNoSynchronizationObject; // Set priorities. - priority = prio; - base_priority = prio; + m_priority = prio; + m_base_priority = prio; // Initialize sleeping queue. - wait_queue = nullptr; + m_wait_queue = nullptr; // Set suspend flags. - suspend_request_flags = 0; - suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask); + m_suspend_request_flags = 0; + m_suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask); // We're neither debug attached, nor are we nesting our priority inheritance. - debug_attached = false; - priority_inheritance_count = 0; + m_debug_attached = false; + m_priority_inheritance_count = 0; // We haven't been scheduled, and we have done no light IPC. - schedule_count = -1; - last_scheduled_tick = 0; - light_ipc_data = nullptr; + m_schedule_count = -1; + m_last_scheduled_tick = 0; + m_light_ipc_data = nullptr; // We're not waiting for a lock, and we haven't disabled migration. - waiting_lock_info = nullptr; - num_core_migration_disables = 0; + m_waiting_lock_info = nullptr; + m_num_core_migration_disables = 0; // We have no waiters, but we do have an entrypoint. - num_kernel_waiters = 0; + m_num_kernel_waiters = 0; // Set our current core id. - current_core_id = phys_core; + m_current_core_id = phys_core; // We haven't released our resource limit hint, and we've spent no time on the cpu. - resource_limit_release_hint = false; - cpu_time = 0; + m_resource_limit_release_hint = false; + m_cpu_time = 0; // Set debug context. - stack_top = user_stack_top; - argument = arg; + m_stack_top = user_stack_top; + m_argument = arg; // Clear our stack parameters. - std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0, + std::memset(static_cast<void*>(std::addressof(this->GetStackParameters())), 0, sizeof(StackParameters)); // Set parent, if relevant. if (owner != nullptr) { // Setup the TLS, if needed. if (type == ThreadType::User) { - R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address))); + R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address))); } - parent = owner; - parent->Open(); + m_parent = owner; + m_parent->Open(); } // Initialize thread context. - ResetThreadContext64(thread_context_64, user_stack_top, func, arg); - ResetThreadContext32(thread_context_32, static_cast<u32>(user_stack_top), + ResetThreadContext64(m_thread_context_64, user_stack_top, func, arg); + ResetThreadContext32(m_thread_context_32, static_cast<u32>(user_stack_top), static_cast<u32>(func), static_cast<u32>(arg)); // Setup the stack parameters. - StackParameters& sp = GetStackParameters(); + StackParameters& sp = this->GetStackParameters(); sp.cur_thread = this; sp.disable_count = 1; - SetInExceptionHandler(); + this->SetInExceptionHandler(); // Set thread ID. - thread_id = kernel.CreateNewThreadID(); + m_thread_id = m_kernel.CreateNewThreadID(); // We initialized! - initialized = true; + m_initialized = true; // Register ourselves with our parent process. - if (parent != nullptr) { - parent->RegisterThread(this); - if (parent->IsSuspended()) { + if (m_parent != nullptr) { + m_parent->RegisterThread(this); + if (m_parent->IsSuspended()) { RequestSuspend(SuspendType::Process); } } @@ -259,8 +255,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); // Initialize emulation parameters. - thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); - thread->is_single_core = !Settings::values.use_multi_core.GetValue(); + thread->m_host_context = std::make_shared<Common::Fiber>(std::move(init_func)); R_SUCCEED(); } @@ -270,7 +265,7 @@ Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) { R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy)); // Initialize emulation parameters. - thread->stack_parameters.disable_count = 0; + thread->m_stack_parameters.disable_count = 0; R_SUCCEED(); } @@ -331,25 +326,25 @@ void KThread::PostDestroy(uintptr_t arg) { void KThread::Finalize() { // If the thread has an owner process, unregister it. - if (parent != nullptr) { - parent->UnregisterThread(this); + if (m_parent != nullptr) { + m_parent->UnregisterThread(this); } // If the thread has a local region, delete it. - if (tls_address != 0) { - ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess()); + if (m_tls_address != 0) { + ASSERT(m_parent->DeleteThreadLocalRegion(m_tls_address).IsSuccess()); } // Release any waiters. { - ASSERT(waiting_lock_info == nullptr); - KScopedSchedulerLock sl{kernel}; + ASSERT(m_waiting_lock_info == nullptr); + KScopedSchedulerLock sl{m_kernel}; // Check that we have no kernel waiters. - ASSERT(num_kernel_waiters == 0); + ASSERT(m_num_kernel_waiters == 0); - auto it = held_lock_info_list.begin(); - while (it != held_lock_info_list.end()) { + auto it = m_held_lock_info_list.begin(); + while (it != m_held_lock_info_list.end()) { // Get the lock info. auto* const lock_info = std::addressof(*it); @@ -371,70 +366,70 @@ void KThread::Finalize() { } // Remove the held lock from our list. - it = held_lock_info_list.erase(it); + it = m_held_lock_info_list.erase(it); // Free the lock info. - LockWithPriorityInheritanceInfo::Free(kernel, lock_info); + LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); } } // Release host emulation members. - host_context.reset(); + m_host_context.reset(); // Perform inherited finalization. KSynchronizationObject::Finalize(); } bool KThread::IsSignaled() const { - return signaled; + return m_signaled; } void KThread::OnTimer() { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // If we're waiting, cancel the wait. - if (GetState() == ThreadState::Waiting) { - wait_queue->CancelWait(this, ResultTimedOut, false); + if (this->GetState() == ThreadState::Waiting) { + m_wait_queue->CancelWait(this, ResultTimedOut, false); } } void KThread::StartTermination() { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Release user exception and unpin, if relevant. - if (parent != nullptr) { - parent->ReleaseUserException(this); - if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) { - parent->UnpinCurrentThread(core_id); + if (m_parent != nullptr) { + m_parent->ReleaseUserException(this); + if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) { + m_parent->UnpinCurrentThread(m_core_id); } } // Set state to terminated. - SetState(ThreadState::Terminated); + this->SetState(ThreadState::Terminated); // Clear the thread's status as running in parent. - if (parent != nullptr) { - parent->ClearRunningThread(this); + if (m_parent != nullptr) { + m_parent->ClearRunningThread(this); } // Signal. - signaled = true; + m_signaled = true; KSynchronizationObject::NotifyAvailable(); // Clear previous thread in KScheduler. - KScheduler::ClearPreviousThread(kernel, this); + KScheduler::ClearPreviousThread(m_kernel, this); // Register terminated dpc flag. - RegisterDpc(DpcFlag::Terminated); + this->RegisterDpc(DpcFlag::Terminated); } void KThread::FinishTermination() { // Ensure that the thread is not executing on any core. - if (parent != nullptr) { + if (m_parent != nullptr) { for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) { KThread* core_thread{}; do { - core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread(); + core_thread = m_kernel.Scheduler(i).GetSchedulerCurrentThread(); } while (core_thread == this); } } @@ -449,182 +444,183 @@ void KThread::DoWorkerTaskImpl() { } void KThread::Pin(s32 current_core) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Set ourselves as pinned. GetStackParameters().is_pinned = true; // Disable core migration. - ASSERT(num_core_migration_disables == 0); + ASSERT(m_num_core_migration_disables == 0); { - ++num_core_migration_disables; + ++m_num_core_migration_disables; // Save our ideal state to restore when we're unpinned. - original_physical_ideal_core_id = physical_ideal_core_id; - original_physical_affinity_mask = physical_affinity_mask; + m_original_physical_ideal_core_id = m_physical_ideal_core_id; + m_original_physical_affinity_mask = m_physical_affinity_mask; // Bind ourselves to this core. - const s32 active_core = GetActiveCore(); + const s32 active_core = this->GetActiveCore(); - SetActiveCore(current_core); - physical_ideal_core_id = current_core; - physical_affinity_mask.SetAffinityMask(1ULL << current_core); + this->SetActiveCore(current_core); + m_physical_ideal_core_id = current_core; + m_physical_affinity_mask.SetAffinityMask(1ULL << current_core); - if (active_core != current_core || physical_affinity_mask.GetAffinityMask() != - original_physical_affinity_mask.GetAffinityMask()) { - KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask, - active_core); + if (active_core != current_core || + m_physical_affinity_mask.GetAffinityMask() != + m_original_physical_affinity_mask.GetAffinityMask()) { + KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, + m_original_physical_affinity_mask, active_core); } } // Disallow performing thread suspension. { // Update our allow flags. - suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) + - static_cast<u32>(ThreadState::SuspendShift))); + m_suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) + + static_cast<u32>(ThreadState::SuspendShift))); // Update our state. - UpdateState(); + this->UpdateState(); } // TODO(bunnei): Update our SVC access permissions. - ASSERT(parent != nullptr); + ASSERT(m_parent != nullptr); } void KThread::Unpin() { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Set ourselves as unpinned. - GetStackParameters().is_pinned = false; + this->GetStackParameters().is_pinned = false; // Enable core migration. - ASSERT(num_core_migration_disables == 1); + ASSERT(m_num_core_migration_disables == 1); { - num_core_migration_disables--; + m_num_core_migration_disables--; // Restore our original state. - const KAffinityMask old_mask = physical_affinity_mask; + const KAffinityMask old_mask = m_physical_affinity_mask; - physical_ideal_core_id = original_physical_ideal_core_id; - physical_affinity_mask = original_physical_affinity_mask; + m_physical_ideal_core_id = m_original_physical_ideal_core_id; + m_physical_affinity_mask = m_original_physical_affinity_mask; - if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { - const s32 active_core = GetActiveCore(); + if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + const s32 active_core = this->GetActiveCore(); - if (!physical_affinity_mask.GetAffinity(active_core)) { - if (physical_ideal_core_id >= 0) { - SetActiveCore(physical_ideal_core_id); + if (!m_physical_affinity_mask.GetAffinity(active_core)) { + if (m_physical_ideal_core_id >= 0) { + this->SetActiveCore(m_physical_ideal_core_id); } else { - SetActiveCore(static_cast<s32>( + this->SetActiveCore(static_cast<s32>( Common::BitSize<u64>() - 1 - - std::countl_zero(physical_affinity_mask.GetAffinityMask()))); + std::countl_zero(m_physical_affinity_mask.GetAffinityMask()))); } } - KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core); + KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core); } } // Allow performing thread suspension (if termination hasn't been requested). - if (!IsTerminationRequested()) { + if (!this->IsTerminationRequested()) { // Update our allow flags. - suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) + - static_cast<u32>(ThreadState::SuspendShift))); + m_suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) + + static_cast<u32>(ThreadState::SuspendShift))); // Update our state. - UpdateState(); + this->UpdateState(); } // TODO(bunnei): Update our SVC access permissions. - ASSERT(parent != nullptr); + ASSERT(m_parent != nullptr); // Resume any threads that began waiting on us while we were pinned. - for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) { + for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); ++it) { it->EndWait(ResultSuccess); } } u16 KThread::GetUserDisableCount() const { - if (!IsUserThread()) { + if (!this->IsUserThread()) { // We only emulate TLS for user threads return {}; } - auto& memory = kernel.System().Memory(); - return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count)); + auto& memory = m_kernel.System().Memory(); + return memory.Read16(m_tls_address + offsetof(ThreadLocalRegion, disable_count)); } void KThread::SetInterruptFlag() { - if (!IsUserThread()) { + if (!this->IsUserThread()) { // We only emulate TLS for user threads return; } - auto& memory = kernel.System().Memory(); - memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); + auto& memory = m_kernel.System().Memory(); + memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); } void KThread::ClearInterruptFlag() { - if (!IsUserThread()) { + if (!this->IsUserThread()) { // We only emulate TLS for user threads return; } - auto& memory = kernel.System().Memory(); - memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); + auto& memory = m_kernel.System().Memory(); + memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); } Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Get the virtual mask. - *out_ideal_core = virtual_ideal_core_id; - *out_affinity_mask = virtual_affinity_mask; + *out_ideal_core = m_virtual_ideal_core_id; + *out_affinity_mask = m_virtual_affinity_mask; R_SUCCEED(); } Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { - KScopedSchedulerLock sl{kernel}; - ASSERT(num_core_migration_disables >= 0); + KScopedSchedulerLock sl{m_kernel}; + ASSERT(m_num_core_migration_disables >= 0); // Select between core mask and original core mask. - if (num_core_migration_disables == 0) { - *out_ideal_core = physical_ideal_core_id; - *out_affinity_mask = physical_affinity_mask.GetAffinityMask(); + if (m_num_core_migration_disables == 0) { + *out_ideal_core = m_physical_ideal_core_id; + *out_affinity_mask = m_physical_affinity_mask.GetAffinityMask(); } else { - *out_ideal_core = original_physical_ideal_core_id; - *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); + *out_ideal_core = m_original_physical_ideal_core_id; + *out_affinity_mask = m_original_physical_affinity_mask.GetAffinityMask(); } R_SUCCEED(); } -Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { - ASSERT(parent != nullptr); +Result KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { + ASSERT(m_parent != nullptr); ASSERT(v_affinity_mask != 0); - KScopedLightLock lk(activity_pause_lock); + KScopedLightLock lk(m_activity_pause_lock); // Set the core mask. u64 p_affinity_mask = 0; { - KScopedSchedulerLock sl(kernel); - ASSERT(num_core_migration_disables >= 0); + KScopedSchedulerLock sl(m_kernel); + ASSERT(m_num_core_migration_disables >= 0); // If we're updating, set our ideal virtual core. - if (core_id_ != Svc::IdealCoreNoUpdate) { - virtual_ideal_core_id = core_id_; + if (core_id != Svc::IdealCoreNoUpdate) { + m_virtual_ideal_core_id = core_id; } else { // Preserve our ideal core id. - core_id_ = virtual_ideal_core_id; - R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination); + core_id = m_virtual_ideal_core_id; + R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, ResultInvalidCombination); } // Set our affinity mask. - virtual_affinity_mask = v_affinity_mask; + m_virtual_affinity_mask = v_affinity_mask; // Translate the virtual core to a physical core. - if (core_id_ >= 0) { - core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_]; + if (core_id >= 0) { + core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id]; } // Translate the virtual affinity mask to a physical one. @@ -635,43 +631,43 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { } // If we haven't disabled migration, perform an affinity change. - if (num_core_migration_disables == 0) { - const KAffinityMask old_mask = physical_affinity_mask; + if (m_num_core_migration_disables == 0) { + const KAffinityMask old_mask = m_physical_affinity_mask; // Set our new ideals. - physical_ideal_core_id = core_id_; - physical_affinity_mask.SetAffinityMask(p_affinity_mask); + m_physical_ideal_core_id = core_id; + m_physical_affinity_mask.SetAffinityMask(p_affinity_mask); - if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { const s32 active_core = GetActiveCore(); - if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) { + if (active_core >= 0 && !m_physical_affinity_mask.GetAffinity(active_core)) { const s32 new_core = static_cast<s32>( - physical_ideal_core_id >= 0 - ? physical_ideal_core_id + m_physical_ideal_core_id >= 0 + ? m_physical_ideal_core_id : Common::BitSize<u64>() - 1 - - std::countl_zero(physical_affinity_mask.GetAffinityMask())); + std::countl_zero(m_physical_affinity_mask.GetAffinityMask())); SetActiveCore(new_core); } - KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core); + KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core); } } else { // Otherwise, we edit the original affinity for restoration later. - original_physical_ideal_core_id = core_id_; - original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); + m_original_physical_ideal_core_id = core_id; + m_original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); } } // Update the pinned waiter list. - ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list)); + ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel, std::addressof(m_pinned_waiter_list)); { bool retry_update{}; do { // Lock the scheduler. - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // Don't do any further management if our termination has been requested. - R_SUCCEED_IF(IsTerminationRequested()); + R_SUCCEED_IF(this->IsTerminationRequested()); // By default, we won't need to retry. retry_update = false; @@ -681,7 +677,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { s32 thread_core; for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++thread_core) { - if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) { + if (m_kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) { thread_is_current = true; break; } @@ -691,14 +687,14 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { // new mask. if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) { // If the thread is pinned, we want to wait until it's not pinned. - if (GetStackParameters().is_pinned) { + if (this->GetStackParameters().is_pinned) { // Verify that the current thread isn't terminating. - R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), + R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); // Wait until the thread isn't pinned any more. - pinned_waiter_list.push_back(GetCurrentThread(kernel)); - GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); + m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel)); + GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue)); } else { // If the thread isn't pinned, release the scheduler lock and retry until it's // not current. @@ -714,124 +710,124 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { void KThread::SetBasePriority(s32 value) { ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority); - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Change our base priority. - base_priority = value; + m_base_priority = value; // Perform a priority restoration. - RestorePriority(kernel, this); + RestorePriority(m_kernel, this); } KThread* KThread::GetLockOwner() const { - return waiting_lock_info != nullptr ? waiting_lock_info->GetOwner() : nullptr; + return m_waiting_lock_info != nullptr ? m_waiting_lock_info->GetOwner() : nullptr; } -void KThread::IncreaseBasePriority(s32 priority_) { - ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority); - ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); +void KThread::IncreaseBasePriority(s32 priority) { + ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); ASSERT(!this->GetStackParameters().is_pinned); // Set our base priority. - if (base_priority > priority_) { - base_priority = priority_; + if (m_base_priority > priority) { + m_base_priority = priority; // Perform a priority restoration. - RestorePriority(kernel, this); + RestorePriority(m_kernel, this); } } void KThread::RequestSuspend(SuspendType type) { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Note the request in our flags. - suspend_request_flags |= - (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); + m_suspend_request_flags |= + (1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); // Try to perform the suspend. - TrySuspend(); + this->TrySuspend(); } void KThread::Resume(SuspendType type) { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Clear the request in our flags. - suspend_request_flags &= - ~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); + m_suspend_request_flags &= + ~(1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); // Update our state. this->UpdateState(); } void KThread::WaitCancel() { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Check if we're waiting and cancellable. - if (this->GetState() == ThreadState::Waiting && cancellable) { - wait_cancelled = false; - wait_queue->CancelWait(this, ResultCancelled, true); + if (this->GetState() == ThreadState::Waiting && m_cancellable) { + m_wait_cancelled = false; + m_wait_queue->CancelWait(this, ResultCancelled, true); } else { // Otherwise, note that we cancelled a wait. - wait_cancelled = true; + m_wait_cancelled = true; } } void KThread::TrySuspend() { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - ASSERT(IsSuspendRequested()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); + ASSERT(this->IsSuspendRequested()); // Ensure that we have no waiters. - if (GetNumKernelWaiters() > 0) { + if (this->GetNumKernelWaiters() > 0) { return; } - ASSERT(GetNumKernelWaiters() == 0); + ASSERT(this->GetNumKernelWaiters() == 0); // Perform the suspend. this->UpdateState(); } void KThread::UpdateState() { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Set our suspend flags in state. - const ThreadState old_state = thread_state.load(std::memory_order_relaxed); + const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed); const auto new_state = static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask); - thread_state.store(new_state, std::memory_order_relaxed); + m_thread_state.store(new_state, std::memory_order_relaxed); // Note the state change in scheduler. if (new_state != old_state) { - KScheduler::OnThreadStateChanged(kernel, this, old_state); + KScheduler::OnThreadStateChanged(m_kernel, this, old_state); } } void KThread::Continue() { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Clear our suspend flags in state. - const ThreadState old_state = thread_state.load(std::memory_order_relaxed); - thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed); + const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed); + m_thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed); // Note the state change in scheduler. - KScheduler::OnThreadStateChanged(kernel, this, old_state); + KScheduler::OnThreadStateChanged(m_kernel, this, old_state); } void KThread::CloneFpuStatus() { // We shouldn't reach here when starting kernel threads. ASSERT(this->GetOwnerProcess() != nullptr); - ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(kernel)); + ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel)); if (this->GetOwnerProcess()->Is64BitProcess()) { // Clone FPSR and FPCR. ThreadContext64 cur_ctx{}; - kernel.System().CurrentArmInterface().SaveContext(cur_ctx); + m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx); this->GetContext64().fpcr = cur_ctx.fpcr; this->GetContext64().fpsr = cur_ctx.fpsr; } else { // Clone FPSCR. ThreadContext32 cur_ctx{}; - kernel.System().CurrentArmInterface().SaveContext(cur_ctx); + m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx); this->GetContext32().fpscr = cur_ctx.fpscr; } @@ -839,12 +835,12 @@ void KThread::CloneFpuStatus() { Result KThread::SetActivity(Svc::ThreadActivity activity) { // Lock ourselves. - KScopedLightLock lk(activity_pause_lock); + KScopedLightLock lk(m_activity_pause_lock); // Set the activity. { // Lock the scheduler. - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // Verify our state. const auto cur_state = this->GetState(); @@ -871,13 +867,13 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { // If the thread is now paused, update the pinned waiter list. if (activity == Svc::ThreadActivity::Paused) { - ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, - std::addressof(pinned_waiter_list)); + ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel, + std::addressof(m_pinned_waiter_list)); - bool thread_is_current; + bool thread_is_current{}; do { // Lock the scheduler. - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // Don't do any further management if our termination has been requested. R_SUCCEED_IF(this->IsTerminationRequested()); @@ -888,17 +884,17 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { // Check whether the thread is pinned. if (this->GetStackParameters().is_pinned) { // Verify that the current thread isn't terminating. - R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), + R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); // Wait until the thread isn't pinned any more. - pinned_waiter_list.push_back(GetCurrentThread(kernel)); - GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); + m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel)); + GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue)); } else { // Check if the thread is currently running. // If it is, we'll need to retry. for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) { - if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) { + if (m_kernel.Scheduler(i).GetSchedulerCurrentThread() == this) { thread_is_current = true; break; } @@ -912,32 +908,32 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { Result KThread::GetThreadContext3(std::vector<u8>& out) { // Lock ourselves. - KScopedLightLock lk{activity_pause_lock}; + KScopedLightLock lk{m_activity_pause_lock}; // Get the context. { // Lock the scheduler. - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Verify that we're suspended. - R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); + R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState); // If we're not terminating, get the thread's user context. - if (!IsTerminationRequested()) { - if (parent->Is64BitProcess()) { + if (!this->IsTerminationRequested()) { + if (m_parent->Is64BitProcess()) { // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. auto context = GetContext64(); context.pstate &= 0xFF0FFE20; out.resize(sizeof(context)); - std::memcpy(out.data(), &context, sizeof(context)); + std::memcpy(out.data(), std::addressof(context), sizeof(context)); } else { // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. auto context = GetContext32(); context.cpsr &= 0xFF0FFE20; out.resize(sizeof(context)); - std::memcpy(out.data(), &context, sizeof(context)); + std::memcpy(out.data(), std::addressof(context), sizeof(context)); } } } @@ -946,23 +942,23 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) { } void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { - ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Set ourselves as the lock's owner. lock_info->SetOwner(this); // Add the lock to our held list. - held_lock_info_list.push_front(*lock_info); + m_held_lock_info_list.push_front(*lock_info); } -KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_, - bool is_kernel_address_key_) { - ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); +KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key, + bool is_kernel_address_key) { + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Try to find an existing held lock. - for (auto& held_lock : held_lock_info_list) { - if (held_lock.GetAddressKey() == address_key_ && - held_lock.GetIsKernelAddressKey() == is_kernel_address_key_) { + for (auto& held_lock : m_held_lock_info_list) { + if (held_lock.GetAddressKey() == address_key && + held_lock.GetIsKernelAddressKey() == is_kernel_address_key) { return std::addressof(held_lock); } } @@ -971,25 +967,25 @@ KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_ke } void KThread::AddWaiterImpl(KThread* thread) { - ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); ASSERT(thread->GetConditionVariableTree() == nullptr); // Get the thread's address key. - const auto address_key_ = thread->GetAddressKey(); - const auto is_kernel_address_key_ = thread->GetIsKernelAddressKey(); + const auto address_key = thread->GetAddressKey(); + const auto is_kernel_address_key = thread->GetIsKernelAddressKey(); // Keep track of how many kernel waiters we have. - if (is_kernel_address_key_) { - ASSERT((num_kernel_waiters++) >= 0); - KScheduler::SetSchedulerUpdateNeeded(kernel); + if (is_kernel_address_key) { + ASSERT((m_num_kernel_waiters++) >= 0); + KScheduler::SetSchedulerUpdateNeeded(m_kernel); } // Get the relevant lock info. - auto* lock_info = this->FindHeldLock(address_key_, is_kernel_address_key_); + auto* lock_info = this->FindHeldLock(address_key, is_kernel_address_key); if (lock_info == nullptr) { // Create a new lock for the address key. lock_info = - LockWithPriorityInheritanceInfo::Create(kernel, address_key_, is_kernel_address_key_); + LockWithPriorityInheritanceInfo::Create(m_kernel, address_key, is_kernel_address_key); // Add the new lock to our list. this->AddHeldLock(lock_info); @@ -1000,12 +996,12 @@ void KThread::AddWaiterImpl(KThread* thread) { } void KThread::RemoveWaiterImpl(KThread* thread) { - ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Keep track of how many kernel waiters we have. if (thread->GetIsKernelAddressKey()) { - ASSERT((num_kernel_waiters--) > 0); - KScheduler::SetSchedulerUpdateNeeded(kernel); + ASSERT((m_num_kernel_waiters--) > 0); + KScheduler::SetSchedulerUpdateNeeded(m_kernel); } // Get the info for the lock the thread is waiting on. @@ -1014,8 +1010,8 @@ void KThread::RemoveWaiterImpl(KThread* thread) { // Remove the waiter. if (lock_info->RemoveWaiter(thread)) { - held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); - LockWithPriorityInheritanceInfo::Free(kernel, lock_info); + m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info)); + LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); } } @@ -1025,7 +1021,7 @@ void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { while (thread != nullptr) { // We want to inherit priority where possible. s32 new_priority = thread->GetBasePriority(); - for (const auto& held_lock : thread->held_lock_info_list) { + for (const auto& held_lock : thread->m_held_lock_info_list) { new_priority = std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority()); } @@ -1076,7 +1072,7 @@ void KThread::AddWaiter(KThread* thread) { // If the thread has a higher priority than us, we should inherit. if (thread->GetPriority() < this->GetPriority()) { - RestorePriority(kernel, this); + RestorePriority(m_kernel, this); } } @@ -1087,12 +1083,12 @@ void KThread::RemoveWaiter(KThread* thread) { // lower priority. if (this->GetPriority() == thread->GetPriority() && this->GetPriority() < this->GetBasePriority()) { - RestorePriority(kernel, this); + RestorePriority(m_kernel, this); } } KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) { - ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Get the relevant lock info. auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_); @@ -1102,13 +1098,13 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke } // Remove the lock info from our held list. - held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); + m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info)); // Keep track of how many kernel waiters we have. if (lock_info->GetIsKernelAddressKey()) { - num_kernel_waiters -= lock_info->GetWaiterCount(); - ASSERT(num_kernel_waiters >= 0); - KScheduler::SetSchedulerUpdateNeeded(kernel); + m_num_kernel_waiters -= lock_info->GetWaiterCount(); + ASSERT(m_num_kernel_waiters >= 0); + KScheduler::SetSchedulerUpdateNeeded(m_kernel); } ASSERT(lock_info->GetWaiterCount() > 0); @@ -1120,7 +1116,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke *out_has_waiters = false; // Free the lock info, since it has no waiters. - LockWithPriorityInheritanceInfo::Free(kernel, lock_info); + LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); } else { // There are additional waiters on the lock. *out_has_waiters = true; @@ -1130,8 +1126,8 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke // Keep track of any kernel waiters for the new owner. if (lock_info->GetIsKernelAddressKey()) { - next_lock_owner->num_kernel_waiters += lock_info->GetWaiterCount(); - ASSERT(next_lock_owner->num_kernel_waiters > 0); + next_lock_owner->m_num_kernel_waiters += lock_info->GetWaiterCount(); + ASSERT(next_lock_owner->m_num_kernel_waiters > 0); // NOTE: No need to set scheduler update needed, because we will have already done so // when removing earlier. @@ -1142,7 +1138,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke // to lower priority. if (this->GetPriority() == next_lock_owner->GetPriority() && this->GetPriority() < this->GetBasePriority()) { - RestorePriority(kernel, this); + RestorePriority(m_kernel, this); // NOTE: No need to restore priority on the next lock owner, because it was already the // highest priority waiter on the lock. } @@ -1153,76 +1149,76 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke Result KThread::Run() { while (true) { - KScopedSchedulerLock lk{kernel}; + KScopedSchedulerLock lk{m_kernel}; // If either this thread or the current thread are requesting termination, note it. - R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested); - R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); + R_UNLESS(!this->IsTerminationRequested(), ResultTerminationRequested); + R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); // Ensure our thread state is correct. - R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState); + R_UNLESS(this->GetState() == ThreadState::Initialized, ResultInvalidState); // If the current thread has been asked to suspend, suspend it and retry. - if (GetCurrentThread(kernel).IsSuspended()) { - GetCurrentThread(kernel).UpdateState(); + if (GetCurrentThread(m_kernel).IsSuspended()) { + GetCurrentThread(m_kernel).UpdateState(); continue; } // If we're not a kernel thread and we've been asked to suspend, suspend ourselves. if (KProcess* owner = this->GetOwnerProcess(); owner != nullptr) { - if (IsUserThread() && IsSuspended()) { + if (this->IsUserThread() && this->IsSuspended()) { this->UpdateState(); } owner->IncrementRunningThreadCount(); } // Set our state and finish. - SetState(ThreadState::Runnable); + this->SetState(ThreadState::Runnable); R_SUCCEED(); } } void KThread::Exit() { - ASSERT(this == GetCurrentThreadPointer(kernel)); + ASSERT(this == GetCurrentThreadPointer(m_kernel)); // Release the thread resource hint, running thread count from parent. - if (parent != nullptr) { - parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1); - resource_limit_release_hint = true; - parent->DecrementRunningThreadCount(); + if (m_parent != nullptr) { + m_parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1); + m_resource_limit_release_hint = true; + m_parent->DecrementRunningThreadCount(); } // Perform termination. { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Disallow all suspension. - suspend_allowed_flags = 0; + m_suspend_allowed_flags = 0; this->UpdateState(); // Disallow all suspension. - suspend_allowed_flags = 0; + m_suspend_allowed_flags = 0; // Start termination. - StartTermination(); + this->StartTermination(); // Register the thread as a work task. - KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this); + KWorkerTaskManager::AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this); } UNREACHABLE_MSG("KThread::Exit() would return"); } Result KThread::Terminate() { - ASSERT(this != GetCurrentThreadPointer(kernel)); + ASSERT(this != GetCurrentThreadPointer(m_kernel)); // Request the thread terminate if it hasn't already. if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) { // If the thread isn't terminated, wait for it to terminate. s32 index; KSynchronizationObject* objects[] = {this}; - R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1, + R_TRY(KSynchronizationObject::Wait(m_kernel, std::addressof(index), objects, 1, Svc::WaitInfinite)); } @@ -1230,22 +1226,22 @@ Result KThread::Terminate() { } ThreadState KThread::RequestTerminate() { - ASSERT(this != GetCurrentThreadPointer(kernel)); + ASSERT(this != GetCurrentThreadPointer(m_kernel)); - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Determine if this is the first termination request. const bool first_request = [&]() -> bool { // Perform an atomic compare-and-swap from false to true. bool expected = false; - return termination_requested.compare_exchange_strong(expected, true); + return m_termination_requested.compare_exchange_strong(expected, true); }(); // If this is the first request, start termination procedure. if (first_request) { // If the thread is in initialized state, just change state to terminated. if (this->GetState() == ThreadState::Initialized) { - thread_state = ThreadState::Terminated; + m_thread_state = ThreadState::Terminated; return ThreadState::Terminated; } @@ -1259,7 +1255,7 @@ ThreadState KThread::RequestTerminate() { // If the thread is suspended, continue it. if (this->IsSuspended()) { - suspend_allowed_flags = 0; + m_suspend_allowed_flags = 0; this->UpdateState(); } @@ -1268,16 +1264,16 @@ ThreadState KThread::RequestTerminate() { // If the thread is runnable, send a termination interrupt to other cores. if (this->GetState() == ThreadState::Runnable) { - if (const u64 core_mask = - physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel)); + if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() & + ~(1ULL << GetCurrentCoreId(m_kernel)); core_mask != 0) { - Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask); + Kernel::KInterruptManager::SendInterProcessorInterrupt(m_kernel, core_mask); } } // Wake up the thread. if (this->GetState() == ThreadState::Waiting) { - wait_queue->CancelWait(this, ResultTerminationRequested, true); + m_wait_queue->CancelWait(this, ResultTerminationRequested, true); } } @@ -1285,15 +1281,15 @@ ThreadState KThread::RequestTerminate() { } Result KThread::Sleep(s64 timeout) { - ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); - ASSERT(this == GetCurrentThreadPointer(kernel)); + ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); + ASSERT(this == GetCurrentThreadPointer(m_kernel)); ASSERT(timeout > 0); - ThreadQueueImplForKThreadSleep wait_queue_(kernel); + ThreadQueueImplForKThreadSleep wait_queue(m_kernel); KHardwareTimer* timer{}; { // Setup the scheduling lock and sleep. - KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), this, timeout); + KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), this, timeout); // Check if the thread should terminate. if (this->IsTerminationRequested()) { @@ -1302,103 +1298,102 @@ Result KThread::Sleep(s64 timeout) { } // Wait for the sleep to end. - wait_queue_.SetHardwareTimer(timer); - this->BeginWait(std::addressof(wait_queue_)); - SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); + wait_queue.SetHardwareTimer(timer); + this->BeginWait(std::addressof(wait_queue)); + this->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); } R_SUCCEED(); } void KThread::RequestDummyThreadWait() { - ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); ASSERT(this->IsDummyThread()); // We will block when the scheduler lock is released. - dummy_thread_runnable.store(false); + m_dummy_thread_runnable.store(false); } void KThread::DummyThreadBeginWait() { - if (!this->IsDummyThread() || kernel.IsPhantomModeForSingleCore()) { + if (!this->IsDummyThread() || m_kernel.IsPhantomModeForSingleCore()) { // Occurs in single core mode. return; } // Block until runnable is no longer false. - dummy_thread_runnable.wait(false); + m_dummy_thread_runnable.wait(false); } void KThread::DummyThreadEndWait() { - ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); ASSERT(this->IsDummyThread()); // Wake up the waiting thread. - dummy_thread_runnable.store(true); - dummy_thread_runnable.notify_one(); + m_dummy_thread_runnable.store(true); + m_dummy_thread_runnable.notify_one(); } void KThread::BeginWait(KThreadQueue* queue) { // Set our state as waiting. - SetState(ThreadState::Waiting); + this->SetState(ThreadState::Waiting); // Set our wait queue. - wait_queue = queue; + m_wait_queue = queue; } -void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) { +void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result) { // Lock the scheduler. - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // If we're waiting, notify our queue that we're available. - if (GetState() == ThreadState::Waiting) { - wait_queue->NotifyAvailable(this, signaled_object, wait_result_); + if (this->GetState() == ThreadState::Waiting) { + m_wait_queue->NotifyAvailable(this, signaled_object, wait_result); } } -void KThread::EndWait(Result wait_result_) { +void KThread::EndWait(Result wait_result) { // Lock the scheduler. - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // If we're waiting, notify our queue that we're available. - if (GetState() == ThreadState::Waiting) { - if (wait_queue == nullptr) { + if (this->GetState() == ThreadState::Waiting) { + if (m_wait_queue == nullptr) { // This should never happen, but avoid a hard crash below to get this logged. ASSERT_MSG(false, "wait_queue is nullptr!"); return; } - wait_queue->EndWait(this, wait_result_); + m_wait_queue->EndWait(this, wait_result); } } -void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) { +void KThread::CancelWait(Result wait_result, bool cancel_timer_task) { // Lock the scheduler. - KScopedSchedulerLock sl(kernel); + KScopedSchedulerLock sl(m_kernel); // If we're waiting, notify our queue that we're available. - if (GetState() == ThreadState::Waiting) { - wait_queue->CancelWait(this, wait_result_, cancel_timer_task); + if (this->GetState() == ThreadState::Waiting) { + m_wait_queue->CancelWait(this, wait_result, cancel_timer_task); } } void KThread::SetState(ThreadState state) { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl{m_kernel}; // Clear debugging state - SetMutexWaitAddressForDebugging({}); - SetWaitReasonForDebugging({}); + this->SetWaitReasonForDebugging({}); - const ThreadState old_state = thread_state.load(std::memory_order_relaxed); - thread_state.store( + const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed); + m_thread_state.store( static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)), std::memory_order_relaxed); - if (thread_state.load(std::memory_order_relaxed) != old_state) { - KScheduler::OnThreadStateChanged(kernel, this, old_state); + if (m_thread_state.load(std::memory_order_relaxed) != old_state) { + KScheduler::OnThreadStateChanged(m_kernel, this, old_state); } } std::shared_ptr<Common::Fiber>& KThread::GetHostContext() { - return host_context; + return m_host_context; } void SetCurrentThread(KernelCore& kernel, KThread* thread) { @@ -1427,20 +1422,20 @@ s32 GetCurrentCoreId(KernelCore& kernel) { KScopedDisableDispatch::~KScopedDisableDispatch() { // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { + if (m_kernel.IsShuttingDown()) { return; } - if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { - auto* scheduler = kernel.CurrentScheduler(); + if (GetCurrentThread(m_kernel).GetDisableDispatchCount() <= 1) { + auto* scheduler = m_kernel.CurrentScheduler(); - if (scheduler && !kernel.IsPhantomModeForSingleCore()) { + if (scheduler && !m_kernel.IsPhantomModeForSingleCore()) { scheduler->RescheduleCurrentCore(); } else { - KScheduler::RescheduleCurrentHLEThread(kernel); + KScheduler::RescheduleCurrentHLEThread(m_kernel); } } else { - GetCurrentThread(kernel).EnableDispatch(); + GetCurrentThread(m_kernel).EnableDispatch(); } } diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 9423f08ca..53fa64369 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -108,11 +108,11 @@ enum class StepState : u32 { }; void SetCurrentThread(KernelCore& kernel, KThread* thread); -[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel); -[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel); -[[nodiscard]] KProcess* GetCurrentProcessPointer(KernelCore& kernel); -[[nodiscard]] KProcess& GetCurrentProcess(KernelCore& kernel); -[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel); +KThread* GetCurrentThreadPointer(KernelCore& kernel); +KThread& GetCurrentThread(KernelCore& kernel); +KProcess* GetCurrentProcessPointer(KernelCore& kernel); +KProcess& GetCurrentProcess(KernelCore& kernel); +s32 GetCurrentCoreId(KernelCore& kernel); class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>, public boost::intrusive::list_base_hook<>, @@ -128,7 +128,7 @@ public: static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1; static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2; - explicit KThread(KernelCore& kernel_); + explicit KThread(KernelCore& kernel); ~KThread() override; public: @@ -136,16 +136,12 @@ public: using ThreadContext64 = Core::ARM_Interface::ThreadContext64; using WaiterList = boost::intrusive::list<KThread>; - void SetName(std::string new_name) { - name = std::move(new_name); - } - /** * Gets the thread's current priority * @return The current thread's priority */ - [[nodiscard]] s32 GetPriority() const { - return priority; + s32 GetPriority() const { + return m_priority; } /** @@ -153,23 +149,23 @@ public: * @param priority The new priority. */ void SetPriority(s32 value) { - priority = value; + m_priority = value; } /** * Gets the thread's nominal priority. * @return The current thread's nominal priority. */ - [[nodiscard]] s32 GetBasePriority() const { - return base_priority; + s32 GetBasePriority() const { + return m_base_priority; } /** * Gets the thread's thread ID * @return The thread's ID */ - [[nodiscard]] u64 GetThreadID() const { - return thread_id; + u64 GetThreadId() const { + return m_thread_id; } void ContinueIfHasKernelWaiters() { @@ -180,7 +176,7 @@ public: void SetBasePriority(s32 value); - [[nodiscard]] Result Run(); + Result Run(); void Exit(); @@ -188,22 +184,22 @@ public: ThreadState RequestTerminate(); - [[nodiscard]] u32 GetSuspendFlags() const { - return suspend_allowed_flags & suspend_request_flags; + u32 GetSuspendFlags() const { + return m_suspend_allowed_flags & m_suspend_request_flags; } - [[nodiscard]] bool IsSuspended() const { + bool IsSuspended() const { return GetSuspendFlags() != 0; } - [[nodiscard]] bool IsSuspendRequested(SuspendType type) const { - return (suspend_request_flags & - (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) != + bool IsSuspendRequested(SuspendType type) const { + return (m_suspend_request_flags & + (1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) != 0; } - [[nodiscard]] bool IsSuspendRequested() const { - return suspend_request_flags != 0; + bool IsSuspendRequested() const { + return m_suspend_request_flags != 0; } void RequestSuspend(SuspendType type); @@ -217,124 +213,124 @@ public: void Continue(); constexpr void SetSyncedIndex(s32 index) { - synced_index = index; + m_synced_index = index; } - [[nodiscard]] constexpr s32 GetSyncedIndex() const { - return synced_index; + constexpr s32 GetSyncedIndex() const { + return m_synced_index; } constexpr void SetWaitResult(Result wait_res) { - wait_result = wait_res; + m_wait_result = wait_res; } - [[nodiscard]] constexpr Result GetWaitResult() const { - return wait_result; + constexpr Result GetWaitResult() const { + return m_wait_result; } /* * Returns the Thread Local Storage address of the current thread * @returns VAddr of the thread's TLS */ - [[nodiscard]] VAddr GetTLSAddress() const { - return tls_address; + VAddr GetTlsAddress() const { + return m_tls_address; } /* * Returns the value of the TPIDR_EL0 Read/Write system register for this thread. * @returns The value of the TPIDR_EL0 register. */ - [[nodiscard]] u64 GetTPIDR_EL0() const { - return thread_context_64.tpidr; + u64 GetTpidrEl0() const { + return m_thread_context_64.tpidr; } /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread. - void SetTPIDR_EL0(u64 value) { - thread_context_64.tpidr = value; - thread_context_32.tpidr = static_cast<u32>(value); + void SetTpidrEl0(u64 value) { + m_thread_context_64.tpidr = value; + m_thread_context_32.tpidr = static_cast<u32>(value); } void CloneFpuStatus(); - [[nodiscard]] ThreadContext32& GetContext32() { - return thread_context_32; + ThreadContext32& GetContext32() { + return m_thread_context_32; } - [[nodiscard]] const ThreadContext32& GetContext32() const { - return thread_context_32; + const ThreadContext32& GetContext32() const { + return m_thread_context_32; } - [[nodiscard]] ThreadContext64& GetContext64() { - return thread_context_64; + ThreadContext64& GetContext64() { + return m_thread_context_64; } - [[nodiscard]] const ThreadContext64& GetContext64() const { - return thread_context_64; + const ThreadContext64& GetContext64() const { + return m_thread_context_64; } - [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext(); + std::shared_ptr<Common::Fiber>& GetHostContext(); - [[nodiscard]] ThreadState GetState() const { - return thread_state.load(std::memory_order_relaxed) & ThreadState::Mask; + ThreadState GetState() const { + return m_thread_state.load(std::memory_order_relaxed) & ThreadState::Mask; } - [[nodiscard]] ThreadState GetRawState() const { - return thread_state.load(std::memory_order_relaxed); + ThreadState GetRawState() const { + return m_thread_state.load(std::memory_order_relaxed); } void SetState(ThreadState state); - [[nodiscard]] StepState GetStepState() const { - return step_state; + StepState GetStepState() const { + return m_step_state; } void SetStepState(StepState state) { - step_state = state; + m_step_state = state; } - [[nodiscard]] s64 GetLastScheduledTick() const { - return last_scheduled_tick; + s64 GetLastScheduledTick() const { + return m_last_scheduled_tick; } void SetLastScheduledTick(s64 tick) { - last_scheduled_tick = tick; + m_last_scheduled_tick = tick; } - void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) { - cpu_time += amount; + void AddCpuTime(s32 core_id, s64 amount) { + m_cpu_time += amount; // TODO(bunnei): Debug kernels track per-core tick counts. Should we? } - [[nodiscard]] s64 GetCpuTime() const { - return cpu_time; + s64 GetCpuTime() const { + return m_cpu_time; } - [[nodiscard]] s32 GetActiveCore() const { - return core_id; + s32 GetActiveCore() const { + return m_core_id; } void SetActiveCore(s32 core) { - core_id = core; + m_core_id = core; } - [[nodiscard]] s32 GetCurrentCore() const { - return current_core_id; + s32 GetCurrentCore() const { + return m_current_core_id; } void SetCurrentCore(s32 core) { - current_core_id = core; + m_current_core_id = core; } - [[nodiscard]] KProcess* GetOwnerProcess() { - return parent; + KProcess* GetOwnerProcess() { + return m_parent; } - [[nodiscard]] const KProcess* GetOwnerProcess() const { - return parent; + const KProcess* GetOwnerProcess() const { + return m_parent; } - [[nodiscard]] bool IsUserThread() const { - return parent != nullptr; + bool IsUserThread() const { + return m_parent != nullptr; } u16 GetUserDisableCount() const; @@ -343,69 +339,69 @@ public: KThread* GetLockOwner() const; - [[nodiscard]] const KAffinityMask& GetAffinityMask() const { - return physical_affinity_mask; + const KAffinityMask& GetAffinityMask() const { + return m_physical_affinity_mask; } - [[nodiscard]] Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask); + Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask); - [[nodiscard]] Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask); + Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask); - [[nodiscard]] Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask); + Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask); - [[nodiscard]] Result SetActivity(Svc::ThreadActivity activity); + Result SetActivity(Svc::ThreadActivity activity); - [[nodiscard]] Result Sleep(s64 timeout); + Result Sleep(s64 timeout); - [[nodiscard]] s64 GetYieldScheduleCount() const { - return schedule_count; + s64 GetYieldScheduleCount() const { + return m_schedule_count; } void SetYieldScheduleCount(s64 count) { - schedule_count = count; + m_schedule_count = count; } void WaitCancel(); - [[nodiscard]] bool IsWaitCancelled() const { - return wait_cancelled; + bool IsWaitCancelled() const { + return m_wait_cancelled; } void ClearWaitCancelled() { - wait_cancelled = false; + m_wait_cancelled = false; } - [[nodiscard]] bool IsCancellable() const { - return cancellable; + bool IsCancellable() const { + return m_cancellable; } void SetCancellable() { - cancellable = true; + m_cancellable = true; } void ClearCancellable() { - cancellable = false; + m_cancellable = false; } - [[nodiscard]] bool IsTerminationRequested() const { - return termination_requested || GetRawState() == ThreadState::Terminated; + bool IsTerminationRequested() const { + return m_termination_requested || GetRawState() == ThreadState::Terminated; } - [[nodiscard]] u64 GetId() const override { - return this->GetThreadID(); + u64 GetId() const override { + return this->GetThreadId(); } - [[nodiscard]] bool IsInitialized() const override { - return initialized; + bool IsInitialized() const override { + return m_initialized; } - [[nodiscard]] uintptr_t GetPostDestroyArgument() const override { - return reinterpret_cast<uintptr_t>(parent) | (resource_limit_release_hint ? 1 : 0); + uintptr_t GetPostDestroyArgument() const override { + return reinterpret_cast<uintptr_t>(m_parent) | (m_resource_limit_release_hint ? 1 : 0); } void Finalize() override; - [[nodiscard]] bool IsSignaled() const override; + bool IsSignaled() const override; void OnTimer(); @@ -413,26 +409,22 @@ public: static void PostDestroy(uintptr_t arg); - [[nodiscard]] static Result InitializeDummyThread(KThread* thread, KProcess* owner); + static Result InitializeDummyThread(KThread* thread, KProcess* owner); - [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, - s32 virt_core); + static Result InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core); - [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread, - s32 virt_core); + static Result InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core); - [[nodiscard]] static Result InitializeHighPriorityThread(Core::System& system, KThread* thread, - KThreadFunction func, uintptr_t arg, - s32 virt_core); + static Result InitializeHighPriorityThread(Core::System& system, KThread* thread, + KThreadFunction func, uintptr_t arg, s32 virt_core); - [[nodiscard]] static Result InitializeUserThread(Core::System& system, KThread* thread, - KThreadFunction func, uintptr_t arg, - VAddr user_stack_top, s32 prio, s32 virt_core, - KProcess* owner); + static Result InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, + uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, + KProcess* owner); - [[nodiscard]] static Result InitializeServiceThread(Core::System& system, KThread* thread, - std::function<void()>&& thread_func, - s32 prio, s32 virt_core, KProcess* owner); + static Result InitializeServiceThread(Core::System& system, KThread* thread, + std::function<void()>&& thread_func, s32 prio, + s32 virt_core, KProcess* owner); public: struct StackParameters { @@ -446,12 +438,12 @@ public: KThread* cur_thread; }; - [[nodiscard]] StackParameters& GetStackParameters() { - return stack_parameters; + StackParameters& GetStackParameters() { + return m_stack_parameters; } - [[nodiscard]] const StackParameters& GetStackParameters() const { - return stack_parameters; + const StackParameters& GetStackParameters() const { + return m_stack_parameters; } class QueueEntry { @@ -459,47 +451,47 @@ public: constexpr QueueEntry() = default; constexpr void Initialize() { - prev = nullptr; - next = nullptr; + m_prev = nullptr; + m_next = nullptr; } constexpr KThread* GetPrev() const { - return prev; + return m_prev; } constexpr KThread* GetNext() const { - return next; + return m_next; } constexpr void SetPrev(KThread* thread) { - prev = thread; + m_prev = thread; } constexpr void SetNext(KThread* thread) { - next = thread; + m_next = thread; } private: - KThread* prev{}; - KThread* next{}; + KThread* m_prev{}; + KThread* m_next{}; }; - [[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) { - return per_core_priority_queue_entry[core]; + QueueEntry& GetPriorityQueueEntry(s32 core) { + return m_per_core_priority_queue_entry[core]; } - [[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const { - return per_core_priority_queue_entry[core]; + const QueueEntry& GetPriorityQueueEntry(s32 core) const { + return m_per_core_priority_queue_entry[core]; } - [[nodiscard]] s32 GetDisableDispatchCount() const { + s32 GetDisableDispatchCount() const { return this->GetStackParameters().disable_count; } void DisableDispatch() { - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); + ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() >= 0); this->GetStackParameters().disable_count++; } void EnableDispatch() { - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); + ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() > 0); this->GetStackParameters().disable_count--; } @@ -515,7 +507,7 @@ public: this->GetStackParameters().is_in_exception_handler = false; } - [[nodiscard]] bool IsInExceptionHandler() const { + bool IsInExceptionHandler() const { return this->GetStackParameters().is_in_exception_handler; } @@ -527,11 +519,11 @@ public: this->GetStackParameters().is_calling_svc = false; } - [[nodiscard]] bool IsCallingSvc() const { + bool IsCallingSvc() const { return this->GetStackParameters().is_calling_svc; } - [[nodiscard]] u8 GetSvcId() const { + u8 GetSvcId() const { return this->GetStackParameters().current_svc_id; } @@ -543,78 +535,54 @@ public: this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag); } - [[nodiscard]] u8 GetDpc() const { + u8 GetDpc() const { return this->GetStackParameters().dpc_flags; } - [[nodiscard]] bool HasDpc() const { + bool HasDpc() const { return this->GetDpc() != 0; } void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) { - wait_reason_for_debugging = reason; - } - - [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const { - return wait_reason_for_debugging; - } - - [[nodiscard]] ThreadType GetThreadType() const { - return thread_type; - } - - [[nodiscard]] bool IsDummyThread() const { - return GetThreadType() == ThreadType::Dummy; - } - - void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) { - wait_objects_for_debugging.clear(); - wait_objects_for_debugging.reserve(objects.size()); - for (const auto& object : objects) { - wait_objects_for_debugging.emplace_back(object); - } + m_wait_reason_for_debugging = reason; } - [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const { - return wait_objects_for_debugging; + ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const { + return m_wait_reason_for_debugging; } - void SetMutexWaitAddressForDebugging(VAddr address) { - mutex_wait_address_for_debugging = address; + ThreadType GetThreadType() const { + return m_thread_type; } - [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const { - return mutex_wait_address_for_debugging; - } - - [[nodiscard]] s32 GetIdealCoreForDebugging() const { - return virtual_ideal_core_id; + bool IsDummyThread() const { + return this->GetThreadType() == ThreadType::Dummy; } void AddWaiter(KThread* thread); void RemoveWaiter(KThread* thread); - [[nodiscard]] Result GetThreadContext3(std::vector<u8>& out); + Result GetThreadContext3(std::vector<u8>& out); - [[nodiscard]] KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) { + KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) { return this->RemoveWaiterByKey(out_has_waiters, key, false); } - [[nodiscard]] KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) { + KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) { return this->RemoveWaiterByKey(out_has_waiters, key, true); } - [[nodiscard]] VAddr GetAddressKey() const { - return address_key; + VAddr GetAddressKey() const { + return m_address_key; } - [[nodiscard]] u32 GetAddressKeyValue() const { - return address_key_value; + u32 GetAddressKeyValue() const { + return m_address_key_value; } - [[nodiscard]] bool GetIsKernelAddressKey() const { - return is_kernel_address_key; + bool GetIsKernelAddressKey() const { + return m_is_kernel_address_key; } //! NB: intentional deviation from official kernel. @@ -624,37 +592,37 @@ public: // into things. void SetUserAddressKey(VAddr key, u32 val) { - ASSERT(waiting_lock_info == nullptr); - address_key = key; - address_key_value = val; - is_kernel_address_key = false; + ASSERT(m_waiting_lock_info == nullptr); + m_address_key = key; + m_address_key_value = val; + m_is_kernel_address_key = false; } void SetKernelAddressKey(VAddr key) { - ASSERT(waiting_lock_info == nullptr); - address_key = key; - is_kernel_address_key = true; + ASSERT(m_waiting_lock_info == nullptr); + m_address_key = key; + m_is_kernel_address_key = true; } void ClearWaitQueue() { - wait_queue = nullptr; + m_wait_queue = nullptr; } void BeginWait(KThreadQueue* queue); - void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_); - void EndWait(Result wait_result_); - void CancelWait(Result wait_result_, bool cancel_timer_task); + void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result); + void EndWait(Result wait_result); + void CancelWait(Result wait_result, bool cancel_timer_task); - [[nodiscard]] s32 GetNumKernelWaiters() const { - return num_kernel_waiters; + s32 GetNumKernelWaiters() const { + return m_num_kernel_waiters; } - [[nodiscard]] u64 GetConditionVariableKey() const { - return condvar_key; + u64 GetConditionVariableKey() const { + return m_condvar_key; } - [[nodiscard]] u64 GetAddressArbiterKey() const { - return condvar_key; + u64 GetAddressArbiterKey() const { + return m_condvar_key; } // Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and @@ -665,17 +633,16 @@ public: void DummyThreadBeginWait(); void DummyThreadEndWait(); - [[nodiscard]] uintptr_t GetArgument() const { - return argument; + uintptr_t GetArgument() const { + return m_argument; } - [[nodiscard]] VAddr GetUserStackTop() const { - return stack_top; + VAddr GetUserStackTop() const { + return m_stack_top; } private: - [[nodiscard]] KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, - bool is_kernel_address_key); + KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key); static constexpr size_t PriorityInheritanceCountMax = 10; union SyncObjectBuffer { @@ -692,11 +659,11 @@ private: u64 cv_key{}; s32 priority{}; - [[nodiscard]] constexpr u64 GetConditionVariableKey() const { + constexpr u64 GetConditionVariableKey() const { return cv_key; } - [[nodiscard]] constexpr s32 GetPriority() const { + constexpr s32 GetPriority() const { return priority; } }; @@ -728,22 +695,21 @@ private: void IncreaseBasePriority(s32 priority); - [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, - s32 prio, s32 virt_core, KProcess* owner, ThreadType type); + Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, + s32 virt_core, KProcess* owner, ThreadType type); - [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func, - uintptr_t arg, VAddr user_stack_top, s32 prio, - s32 core, KProcess* owner, ThreadType type, - std::function<void()>&& init_func); + static Result InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, + VAddr user_stack_top, s32 prio, s32 core, KProcess* owner, + ThreadType type, std::function<void()>&& init_func); // For core KThread implementation - ThreadContext32 thread_context_32{}; - ThreadContext64 thread_context_64{}; - Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{}; - s32 priority{}; + ThreadContext32 m_thread_context_32{}; + ThreadContext64 m_thread_context_64{}; + Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{}; + s32 m_priority{}; using ConditionVariableThreadTreeTraits = Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< - &KThread::condvar_arbiter_tree_node>; + &KThread::m_condvar_arbiter_tree_node>; using ConditionVariableThreadTree = ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>; @@ -773,7 +739,7 @@ private: using LockWithPriorityInheritanceThreadTreeTraits = Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< - &KThread::condvar_arbiter_tree_node>; + &KThread::m_condvar_arbiter_tree_node>; using LockWithPriorityInheritanceThreadTree = ConditionVariableThreadTreeTraits::TreeType<LockWithPriorityInheritanceComparator>; @@ -809,7 +775,7 @@ public: waiter->SetWaitingLockInfo(this); } - [[nodiscard]] bool RemoveWaiter(KThread* waiter) { + bool RemoveWaiter(KThread* waiter) { m_tree.erase(m_tree.iterator_to(*waiter)); waiter->SetWaitingLockInfo(nullptr); @@ -853,11 +819,11 @@ public: }; void SetWaitingLockInfo(LockWithPriorityInheritanceInfo* lock) { - waiting_lock_info = lock; + m_waiting_lock_info = lock; } LockWithPriorityInheritanceInfo* GetWaitingLockInfo() { - return waiting_lock_info; + return m_waiting_lock_info; } void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info); @@ -867,111 +833,110 @@ private: using LockWithPriorityInheritanceInfoList = boost::intrusive::list<LockWithPriorityInheritanceInfo>; - ConditionVariableThreadTree* condvar_tree{}; - u64 condvar_key{}; - u64 virtual_affinity_mask{}; - KAffinityMask physical_affinity_mask{}; - u64 thread_id{}; - std::atomic<s64> cpu_time{}; - VAddr address_key{}; - KProcess* parent{}; - VAddr kernel_stack_top{}; - u32* light_ipc_data{}; - VAddr tls_address{}; - KLightLock activity_pause_lock; - s64 schedule_count{}; - s64 last_scheduled_tick{}; - std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; - KThreadQueue* wait_queue{}; - LockWithPriorityInheritanceInfoList held_lock_info_list{}; - LockWithPriorityInheritanceInfo* waiting_lock_info{}; - WaiterList pinned_waiter_list{}; - u32 address_key_value{}; - u32 suspend_request_flags{}; - u32 suspend_allowed_flags{}; - s32 synced_index{}; - Result wait_result{ResultSuccess}; - s32 base_priority{}; - s32 physical_ideal_core_id{}; - s32 virtual_ideal_core_id{}; - s32 num_kernel_waiters{}; - s32 current_core_id{}; - s32 core_id{}; - KAffinityMask original_physical_affinity_mask{}; - s32 original_physical_ideal_core_id{}; - s32 num_core_migration_disables{}; - std::atomic<ThreadState> thread_state{}; - std::atomic<bool> termination_requested{}; - bool wait_cancelled{}; - bool cancellable{}; - bool signaled{}; - bool initialized{}; - bool debug_attached{}; - s8 priority_inheritance_count{}; - bool resource_limit_release_hint{}; - bool is_kernel_address_key{}; - StackParameters stack_parameters{}; - Common::SpinLock context_guard{}; + ConditionVariableThreadTree* m_condvar_tree{}; + u64 m_condvar_key{}; + u64 m_virtual_affinity_mask{}; + KAffinityMask m_physical_affinity_mask{}; + u64 m_thread_id{}; + std::atomic<s64> m_cpu_time{}; + VAddr m_address_key{}; + KProcess* m_parent{}; + VAddr m_kernel_stack_top{}; + u32* m_light_ipc_data{}; + VAddr m_tls_address{}; + KLightLock m_activity_pause_lock; + s64 m_schedule_count{}; + s64 m_last_scheduled_tick{}; + std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> m_per_core_priority_queue_entry{}; + KThreadQueue* m_wait_queue{}; + LockWithPriorityInheritanceInfoList m_held_lock_info_list{}; + LockWithPriorityInheritanceInfo* m_waiting_lock_info{}; + WaiterList m_pinned_waiter_list{}; + u32 m_address_key_value{}; + u32 m_suspend_request_flags{}; + u32 m_suspend_allowed_flags{}; + s32 m_synced_index{}; + Result m_wait_result{ResultSuccess}; + s32 m_base_priority{}; + s32 m_physical_ideal_core_id{}; + s32 m_virtual_ideal_core_id{}; + s32 m_num_kernel_waiters{}; + s32 m_current_core_id{}; + s32 m_core_id{}; + KAffinityMask m_original_physical_affinity_mask{}; + s32 m_original_physical_ideal_core_id{}; + s32 m_num_core_migration_disables{}; + std::atomic<ThreadState> m_thread_state{}; + std::atomic<bool> m_termination_requested{}; + bool m_wait_cancelled{}; + bool m_cancellable{}; + bool m_signaled{}; + bool m_initialized{}; + bool m_debug_attached{}; + s8 m_priority_inheritance_count{}; + bool m_resource_limit_release_hint{}; + bool m_is_kernel_address_key{}; + StackParameters m_stack_parameters{}; + Common::SpinLock m_context_guard{}; // For emulation - std::shared_ptr<Common::Fiber> host_context{}; - bool is_single_core{}; - ThreadType thread_type{}; - StepState step_state{}; - std::atomic<bool> dummy_thread_runnable{true}; + std::shared_ptr<Common::Fiber> m_host_context{}; + ThreadType m_thread_type{}; + StepState m_step_state{}; + std::atomic<bool> m_dummy_thread_runnable{true}; // For debugging - std::vector<KSynchronizationObject*> wait_objects_for_debugging; - VAddr mutex_wait_address_for_debugging{}; - ThreadWaitReasonForDebugging wait_reason_for_debugging{}; - uintptr_t argument{}; - VAddr stack_top{}; + std::vector<KSynchronizationObject*> m_wait_objects_for_debugging{}; + VAddr m_mutex_wait_address_for_debugging{}; + ThreadWaitReasonForDebugging m_wait_reason_for_debugging{}; + uintptr_t m_argument{}; + VAddr m_stack_top{}; public: using ConditionVariableThreadTreeType = ConditionVariableThreadTree; void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key, u32 value) { - ASSERT(waiting_lock_info == nullptr); - condvar_tree = tree; - condvar_key = cv_key; - address_key = address; - address_key_value = value; - is_kernel_address_key = false; + ASSERT(m_waiting_lock_info == nullptr); + m_condvar_tree = tree; + m_condvar_key = cv_key; + m_address_key = address; + m_address_key_value = value; + m_is_kernel_address_key = false; } void ClearConditionVariable() { - condvar_tree = nullptr; + m_condvar_tree = nullptr; } - [[nodiscard]] bool IsWaitingForConditionVariable() const { - return condvar_tree != nullptr; + bool IsWaitingForConditionVariable() const { + return m_condvar_tree != nullptr; } void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) { - ASSERT(waiting_lock_info == nullptr); - condvar_tree = tree; - condvar_key = address; + ASSERT(m_waiting_lock_info == nullptr); + m_condvar_tree = tree; + m_condvar_key = address; } void ClearAddressArbiter() { - condvar_tree = nullptr; + m_condvar_tree = nullptr; } - [[nodiscard]] bool IsWaitingForAddressArbiter() const { - return condvar_tree != nullptr; + bool IsWaitingForAddressArbiter() const { + return m_condvar_tree != nullptr; } - [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const { - return condvar_tree; + ConditionVariableThreadTree* GetConditionVariableTree() const { + return m_condvar_tree; } }; class KScopedDisableDispatch { public: - [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { + explicit KScopedDisableDispatch(KernelCore& kernel) : m_kernel{kernel} { // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { + if (m_kernel.IsShuttingDown()) { return; } GetCurrentThread(kernel).DisableDispatch(); @@ -980,7 +945,7 @@ public: ~KScopedDisableDispatch(); private: - KernelCore& kernel; + KernelCore& m_kernel; }; inline void KTimerTask::OnTimer() { diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp index 563560114..c2af6898a 100644 --- a/src/core/hle/kernel/k_thread_local_page.cpp +++ b/src/core/hle/kernel/k_thread_local_page.cpp @@ -16,7 +16,7 @@ namespace Kernel { Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) { // Set that this process owns us. m_owner = process; - m_kernel = &kernel; + m_kernel = std::addressof(kernel); // Allocate a new page. KPageBuffer* page_buf = KPageBuffer::Allocate(kernel); diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp index fe648447b..61488f4ce 100644 --- a/src/core/hle/kernel/k_thread_queue.cpp +++ b/src/core/hle/kernel/k_thread_queue.cpp @@ -7,9 +7,10 @@ namespace Kernel { -void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread, - [[maybe_unused]] KSynchronizationObject* signaled_object, - [[maybe_unused]] Result wait_result) {} +void KThreadQueue::NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, + Result wait_result) { + UNREACHABLE(); +} void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) { // Set the thread's wait result. @@ -43,7 +44,8 @@ void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool } } -void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread, - [[maybe_unused]] Result wait_result) {} +void KThreadQueueWithoutEndWait::EndWait(KThread* waiting_thread, Result wait_result) { + UNREACHABLE(); +} } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h index 01e330e2e..117af0919 100644 --- a/src/core/hle/kernel/k_thread_queue.h +++ b/src/core/hle/kernel/k_thread_queue.h @@ -12,7 +12,7 @@ class KHardwareTimer; class KThreadQueue { public: - explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_}, m_hardware_timer{} {} + explicit KThreadQueue(KernelCore& kernel) : m_kernel{kernel}, m_hardware_timer{} {} virtual ~KThreadQueue() = default; void SetHardwareTimer(KHardwareTimer* timer) { @@ -25,13 +25,13 @@ public: virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task); private: - KernelCore& kernel; + KernelCore& m_kernel; KHardwareTimer* m_hardware_timer{}; }; class KThreadQueueWithoutEndWait : public KThreadQueue { public: - explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {} + explicit KThreadQueueWithoutEndWait(KernelCore& kernel) : KThreadQueue(kernel) {} void EndWait(KThread* waiting_thread, Result wait_result) override final; }; diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp index faa5c73b5..471349282 100644 --- a/src/core/hle/kernel/k_transfer_memory.cpp +++ b/src/core/hle/kernel/k_transfer_memory.cpp @@ -8,32 +8,29 @@ namespace Kernel { -KTransferMemory::KTransferMemory(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_} {} +KTransferMemory::KTransferMemory(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel} {} KTransferMemory::~KTransferMemory() = default; -Result KTransferMemory::Initialize(VAddr address_, std::size_t size_, - Svc::MemoryPermission owner_perm_) { +Result KTransferMemory::Initialize(VAddr address, std::size_t size, + Svc::MemoryPermission owner_perm) { // Set members. - owner = GetCurrentProcessPointer(kernel); + m_owner = GetCurrentProcessPointer(m_kernel); // TODO(bunnei): Lock for transfer memory // Set remaining tracking members. - owner->Open(); - owner_perm = owner_perm_; - address = address_; - size = size_; - is_initialized = true; + m_owner->Open(); + m_owner_perm = owner_perm; + m_address = address; + m_size = size; + m_is_initialized = true; - return ResultSuccess; + R_SUCCEED(); } -void KTransferMemory::Finalize() { - // Perform inherited finalization. - KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList>::Finalize(); -} +void KTransferMemory::Finalize() {} void KTransferMemory::PostDestroy(uintptr_t arg) { KProcess* owner = reinterpret_cast<KProcess*>(arg); diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h index 85d508ee7..3d4d795a5 100644 --- a/src/core/hle/kernel/k_transfer_memory.h +++ b/src/core/hle/kernel/k_transfer_memory.h @@ -23,41 +23,41 @@ class KTransferMemory final KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject); public: - explicit KTransferMemory(KernelCore& kernel_); + explicit KTransferMemory(KernelCore& kernel); ~KTransferMemory() override; - Result Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_); + Result Initialize(VAddr address, std::size_t size, Svc::MemoryPermission owner_perm); void Finalize() override; bool IsInitialized() const override { - return is_initialized; + return m_is_initialized; } uintptr_t GetPostDestroyArgument() const override { - return reinterpret_cast<uintptr_t>(owner); + return reinterpret_cast<uintptr_t>(m_owner); } static void PostDestroy(uintptr_t arg); KProcess* GetOwner() const override { - return owner; + return m_owner; } VAddr GetSourceAddress() const { - return address; + return m_address; } size_t GetSize() const { - return is_initialized ? size : 0; + return m_is_initialized ? m_size : 0; } private: - KProcess* owner{}; - VAddr address{}; - Svc::MemoryPermission owner_perm{}; - size_t size{}; - bool is_initialized{}; + KProcess* m_owner{}; + VAddr m_address{}; + Svc::MemoryPermission m_owner_perm{}; + size_t m_size{}; + bool m_is_initialized{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_worker_task.h b/src/core/hle/kernel/k_worker_task.h index ef591d831..9a230c03c 100644 --- a/src/core/hle/kernel/k_worker_task.h +++ b/src/core/hle/kernel/k_worker_task.h @@ -9,7 +9,7 @@ namespace Kernel { class KWorkerTask : public KSynchronizationObject { public: - explicit KWorkerTask(KernelCore& kernel_); + explicit KWorkerTask(KernelCore& kernel); void DoWorkerTask(); }; diff --git a/src/core/hle/kernel/k_worker_task_manager.cpp b/src/core/hle/kernel/k_worker_task_manager.cpp index 04042bf8f..8ead39591 100644 --- a/src/core/hle/kernel/k_worker_task_manager.cpp +++ b/src/core/hle/kernel/k_worker_task_manager.cpp @@ -10,7 +10,7 @@ namespace Kernel { -KWorkerTask::KWorkerTask(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} +KWorkerTask::KWorkerTask(KernelCore& kernel) : KSynchronizationObject{kernel} {} void KWorkerTask::DoWorkerTask() { if (auto* const thread = this->DynamicCast<KThread*>(); thread != nullptr) { diff --git a/src/core/hle/kernel/k_worker_task_manager.h b/src/core/hle/kernel/k_worker_task_manager.h index f6618883e..8745a4ce2 100644 --- a/src/core/hle/kernel/k_worker_task_manager.h +++ b/src/core/hle/kernel/k_worker_task_manager.h @@ -20,7 +20,7 @@ public: KWorkerTaskManager(); - static void AddTask(KernelCore& kernel_, WorkerType type, KWorkerTask* task); + static void AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task); private: void AddTask(KernelCore& kernel, KWorkerTask* task); diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index ef7057ff7..98ecaf12f 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -214,7 +214,6 @@ struct KernelCore::Impl { cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]); auto* main_thread{Kernel::KThread::Create(system.Kernel())}; - main_thread->SetName(fmt::format("MainThread:{}", core)); main_thread->SetCurrentCore(core); ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess()); @@ -356,7 +355,6 @@ struct KernelCore::Impl { ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {}, core_id) .IsSuccess()); - shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id)); } } @@ -388,11 +386,10 @@ struct KernelCore::Impl { // Gets the dummy KThread for the caller, allocating a new one if this is the first time KThread* GetHostDummyThread(KThread* existing_thread) { - auto initialize = [this](KThread* thread) { + const auto initialize{[](KThread* thread) { ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess()); - thread->SetName(fmt::format("DummyThread:{}", next_host_thread_id++)); return thread; - }; + }}; thread_local KThread raw_thread{system.Kernel()}; thread_local KThread* thread = existing_thread ? existing_thread : initialize(&raw_thread); @@ -742,16 +739,15 @@ struct KernelCore::Impl { hidbus_shared_mem = KSharedMemory::Create(system.Kernel()); hid_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None, - Svc::MemoryPermission::Read, hid_size, "HID:SharedMemory"); + Svc::MemoryPermission::Read, hid_size); font_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None, - Svc::MemoryPermission::Read, font_size, "Font:SharedMemory"); + Svc::MemoryPermission::Read, font_size); irs_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None, - Svc::MemoryPermission::Read, irs_size, "IRS:SharedMemory"); + Svc::MemoryPermission::Read, irs_size); time_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None, - Svc::MemoryPermission::Read, time_size, "Time:SharedMemory"); + Svc::MemoryPermission::Read, time_size); hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None, - Svc::MemoryPermission::Read, hidbus_size, - "HidBus:SharedMemory"); + Svc::MemoryPermission::Read, hidbus_size); } std::mutex registered_objects_lock; @@ -1321,7 +1317,6 @@ const Core::System& KernelCore::System() const { struct KernelCore::SlabHeapContainer { KSlabHeap<KClientSession> client_session; KSlabHeap<KEvent> event; - KSlabHeap<KLinkedListNode> linked_list_node; KSlabHeap<KPort> port; KSlabHeap<KProcess> process; KSlabHeap<KResourceLimit> resource_limit; @@ -1348,8 +1343,6 @@ KSlabHeap<T>& KernelCore::SlabHeap() { return slab_heap_container->client_session; } else if constexpr (std::is_same_v<T, KEvent>) { return slab_heap_container->event; - } else if constexpr (std::is_same_v<T, KLinkedListNode>) { - return slab_heap_container->linked_list_node; } else if constexpr (std::is_same_v<T, KPort>) { return slab_heap_container->port; } else if constexpr (std::is_same_v<T, KProcess>) { @@ -1391,7 +1384,6 @@ KSlabHeap<T>& KernelCore::SlabHeap() { template KSlabHeap<KClientSession>& KernelCore::SlabHeap(); template KSlabHeap<KEvent>& KernelCore::SlabHeap(); -template KSlabHeap<KLinkedListNode>& KernelCore::SlabHeap(); template KSlabHeap<KPort>& KernelCore::SlabHeap(); template KSlabHeap<KProcess>& KernelCore::SlabHeap(); template KSlabHeap<KResourceLimit>& KernelCore::SlabHeap(); diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 1b380a07b..183a4d227 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -47,7 +47,6 @@ class KEvent; class KEventInfo; class KHandleTable; class KHardwareTimer; -class KLinkedListNode; class KMemoryLayout; class KMemoryManager; class KObjectName; diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index 3044922ac..2e0c36129 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -10,14 +10,14 @@ namespace Kernel { -PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_) - : core_index{core_index_}, system{system_}, scheduler{scheduler_} { +PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, KScheduler& scheduler) + : m_core_index{core_index}, m_system{system}, m_scheduler{scheduler} { #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) // TODO(bunnei): Initialization relies on a core being available. We may later replace this with // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager. auto& kernel = system.Kernel(); - arm_interface = std::make_unique<Core::ARM_Dynarmic_64>( - system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); + m_arm_interface = std::make_unique<Core::ARM_Dynarmic_64>( + system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index); #else #error Platform not supported yet. #endif @@ -25,13 +25,13 @@ PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KSche PhysicalCore::~PhysicalCore() = default; -void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { +void PhysicalCore::Initialize(bool is_64_bit) { #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) - auto& kernel = system.Kernel(); + auto& kernel = m_system.Kernel(); if (!is_64_bit) { // We already initialized a 64-bit core, replace with a 32-bit one. - arm_interface = std::make_unique<Core::ARM_Dynarmic_32>( - system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); + m_arm_interface = std::make_unique<Core::ARM_Dynarmic_32>( + m_system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index); } #else #error Platform not supported yet. @@ -39,31 +39,30 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { } void PhysicalCore::Run() { - arm_interface->Run(); - arm_interface->ClearExclusiveState(); + m_arm_interface->Run(); + m_arm_interface->ClearExclusiveState(); } void PhysicalCore::Idle() { - std::unique_lock lk{guard}; - on_interrupt.wait(lk, [this] { return is_interrupted; }); + std::unique_lock lk{m_guard}; + m_on_interrupt.wait(lk, [this] { return m_is_interrupted; }); } bool PhysicalCore::IsInterrupted() const { - return is_interrupted; + return m_is_interrupted; } void PhysicalCore::Interrupt() { - std::unique_lock lk{guard}; - is_interrupted = true; - arm_interface->SignalInterrupt(); - on_interrupt.notify_all(); + std::unique_lock lk{m_guard}; + m_is_interrupted = true; + m_arm_interface->SignalInterrupt(); + m_on_interrupt.notify_all(); } void PhysicalCore::ClearInterrupt() { - std::unique_lock lk{guard}; - is_interrupted = false; - arm_interface->ClearInterrupt(); - on_interrupt.notify_all(); + std::unique_lock lk{m_guard}; + m_is_interrupted = false; + m_arm_interface->ClearInterrupt(); } } // namespace Kernel diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index fb8e7933e..5cb398fdc 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h @@ -47,46 +47,38 @@ public: bool IsInterrupted() const; bool IsInitialized() const { - return arm_interface != nullptr; + return m_arm_interface != nullptr; } Core::ARM_Interface& ArmInterface() { - return *arm_interface; + return *m_arm_interface; } const Core::ARM_Interface& ArmInterface() const { - return *arm_interface; - } - - bool IsMainCore() const { - return core_index == 0; - } - - bool IsSystemCore() const { - return core_index == 3; + return *m_arm_interface; } std::size_t CoreIndex() const { - return core_index; + return m_core_index; } Kernel::KScheduler& Scheduler() { - return scheduler; + return m_scheduler; } const Kernel::KScheduler& Scheduler() const { - return scheduler; + return m_scheduler; } private: - const std::size_t core_index; - Core::System& system; - Kernel::KScheduler& scheduler; - - std::mutex guard; - std::condition_variable on_interrupt; - std::unique_ptr<Core::ARM_Interface> arm_interface; - bool is_interrupted{}; + const std::size_t m_core_index; + Core::System& m_system; + Kernel::KScheduler& m_scheduler; + + std::mutex m_guard; + std::condition_variable m_on_interrupt; + std::unique_ptr<Core::ARM_Interface> m_arm_interface; + bool m_is_interrupted{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index 0228ce188..d1bbc7670 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h @@ -66,7 +66,7 @@ private: } public: - explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} + explicit KAutoObjectWithSlabHeap(KernelCore& kernel) : Base(kernel) {} virtual ~KAutoObjectWithSlabHeap() = default; virtual void Destroy() override { @@ -76,7 +76,7 @@ public: arg = this->GetPostDestroyArgument(); this->Finalize(); } - Free(kernel, static_cast<Derived*>(this)); + Free(Base::m_kernel, static_cast<Derived*>(this)); if (is_initialized) { Derived::PostDestroy(arg); } @@ -90,7 +90,7 @@ public: } size_t GetSlabIndex() const { - return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); + return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this)); } public: @@ -125,14 +125,11 @@ public: static size_t GetNumRemaining(KernelCore& kernel) { return kernel.SlabHeap<Derived>().GetNumRemaining(); } - -protected: - KernelCore& kernel; }; template <typename Derived, typename Base> class KAutoObjectWithSlabHeapAndContainer : public Base { - static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); + static_assert(std::is_base_of_v<KAutoObjectWithList, Base>); private: static Derived* Allocate(KernelCore& kernel) { @@ -144,18 +141,18 @@ private: } public: - KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} + KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel) : Base(kernel) {} virtual ~KAutoObjectWithSlabHeapAndContainer() {} virtual void Destroy() override { const bool is_initialized = this->IsInitialized(); uintptr_t arg = 0; if (is_initialized) { - kernel.ObjectListContainer().Unregister(this); + Base::m_kernel.ObjectListContainer().Unregister(this); arg = this->GetPostDestroyArgument(); this->Finalize(); } - Free(kernel, static_cast<Derived*>(this)); + Free(Base::m_kernel, static_cast<Derived*>(this)); if (is_initialized) { Derived::PostDestroy(arg); } @@ -169,7 +166,7 @@ public: } size_t GetSlabIndex() const { - return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); + return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this)); } public: @@ -209,9 +206,6 @@ public: static size_t GetNumRemaining(KernelCore& kernel) { return kernel.SlabHeap<Derived>().GetNumRemaining(); } - -protected: - KernelCore& kernel; }; } // namespace Kernel diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index a0bfd6bbc..871d541d4 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -36,9 +36,9 @@ static To Convert(const From& from) { To to{}; if constexpr (sizeof(To) >= sizeof(From)) { - std::memcpy(&to, &from, sizeof(From)); + std::memcpy(std::addressof(to), std::addressof(from), sizeof(From)); } else { - std::memcpy(&to, &from, sizeof(To)); + std::memcpy(std::addressof(to), std::addressof(from), sizeof(To)); } return to; @@ -87,7 +87,7 @@ static void SvcWrap_SetHeapSize64From32(Core::System& system) { size = Convert<uint32_t>(GetReg32(system, 1)); - ret = SetHeapSize64From32(system, &out_address, size); + ret = SetHeapSize64From32(system, std::addressof(out_address), size); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_address)); @@ -169,7 +169,7 @@ static void SvcWrap_QueryMemory64From32(Core::System& system) { out_memory_info = Convert<uint32_t>(GetReg32(system, 0)); address = Convert<uint32_t>(GetReg32(system, 2)); - ret = QueryMemory64From32(system, out_memory_info, &out_page_info, address); + ret = QueryMemory64From32(system, out_memory_info, std::addressof(out_page_info), address); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_page_info)); @@ -195,7 +195,7 @@ static void SvcWrap_CreateThread64From32(Core::System& system) { priority = Convert<int32_t>(GetReg32(system, 0)); core_id = Convert<int32_t>(GetReg32(system, 4)); - ret = CreateThread64From32(system, &out_handle, func, arg, stack_bottom, priority, core_id); + ret = CreateThread64From32(system, std::addressof(out_handle), func, arg, stack_bottom, priority, core_id); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -236,7 +236,7 @@ static void SvcWrap_GetThreadPriority64From32(Core::System& system) { thread_handle = Convert<Handle>(GetReg32(system, 1)); - ret = GetThreadPriority64From32(system, &out_priority, thread_handle); + ret = GetThreadPriority64From32(system, std::addressof(out_priority), thread_handle); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_priority)); @@ -265,7 +265,7 @@ static void SvcWrap_GetThreadCoreMask64From32(Core::System& system) { thread_handle = Convert<Handle>(GetReg32(system, 2)); - ret = GetThreadCoreMask64From32(system, &out_core_id, &out_affinity_mask, thread_handle); + ret = GetThreadCoreMask64From32(system, std::addressof(out_core_id), std::addressof(out_affinity_mask), thread_handle); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_core_id)); @@ -371,7 +371,7 @@ static void SvcWrap_CreateTransferMemory64From32(Core::System& system) { size = Convert<uint32_t>(GetReg32(system, 2)); map_perm = Convert<MemoryPermission>(GetReg32(system, 3)); - ret = CreateTransferMemory64From32(system, &out_handle, address, size, map_perm); + ret = CreateTransferMemory64From32(system, std::addressof(out_handle), address, size, map_perm); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -416,7 +416,7 @@ static void SvcWrap_WaitSynchronization64From32(Core::System& system) { timeout_ns_gather[1] = GetReg32(system, 3); timeout_ns = Convert<int64_t>(timeout_ns_gather); - ret = WaitSynchronization64From32(system, &out_index, handles, num_handles, timeout_ns); + ret = WaitSynchronization64From32(system, std::addressof(out_index), handles, num_handles, timeout_ns); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_index)); @@ -511,7 +511,7 @@ static void SvcWrap_ConnectToNamedPort64From32(Core::System& system) { name = Convert<uint32_t>(GetReg32(system, 1)); - ret = ConnectToNamedPort64From32(system, &out_handle, name); + ret = ConnectToNamedPort64From32(system, std::addressof(out_handle), name); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -557,7 +557,7 @@ static void SvcWrap_SendAsyncRequestWithUserBuffer64From32(Core::System& system) message_buffer_size = Convert<uint32_t>(GetReg32(system, 2)); session_handle = Convert<Handle>(GetReg32(system, 3)); - ret = SendAsyncRequestWithUserBuffer64From32(system, &out_event_handle, message_buffer, message_buffer_size, session_handle); + ret = SendAsyncRequestWithUserBuffer64From32(system, std::addressof(out_event_handle), message_buffer, message_buffer_size, session_handle); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_event_handle)); @@ -571,7 +571,7 @@ static void SvcWrap_GetProcessId64From32(Core::System& system) { process_handle = Convert<Handle>(GetReg32(system, 1)); - ret = GetProcessId64From32(system, &out_process_id, process_handle); + ret = GetProcessId64From32(system, std::addressof(out_process_id), process_handle); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_process_id_scatter = Convert<std::array<uint32_t, 2>>(out_process_id); @@ -587,7 +587,7 @@ static void SvcWrap_GetThreadId64From32(Core::System& system) { thread_handle = Convert<Handle>(GetReg32(system, 1)); - ret = GetThreadId64From32(system, &out_thread_id, thread_handle); + ret = GetThreadId64From32(system, std::addressof(out_thread_id), thread_handle); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_thread_id_scatter = Convert<std::array<uint32_t, 2>>(out_thread_id); @@ -644,7 +644,7 @@ static void SvcWrap_GetInfo64From32(Core::System& system) { info_subtype_gather[1] = GetReg32(system, 3); info_subtype = Convert<uint64_t>(info_subtype_gather); - ret = GetInfo64From32(system, &out, info_type, handle, info_subtype); + ret = GetInfo64From32(system, std::addressof(out), info_type, handle, info_subtype); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_scatter = Convert<std::array<uint32_t, 2>>(out); @@ -712,7 +712,7 @@ static void SvcWrap_GetDebugFutureThreadInfo64From32(Core::System& system) { ns_gather[1] = GetReg32(system, 1); ns = Convert<int64_t>(ns_gather); - ret = GetDebugFutureThreadInfo64From32(system, &out_context, &out_thread_id, debug_handle, ns); + ret = GetDebugFutureThreadInfo64From32(system, std::addressof(out_context), std::addressof(out_thread_id), debug_handle, ns); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_context_scatter = Convert<std::array<uint32_t, 4>>(out_context); @@ -732,7 +732,7 @@ static void SvcWrap_GetLastThreadInfo64From32(Core::System& system) { uint64_t out_tls_address{}; uint32_t out_flags{}; - ret = GetLastThreadInfo64From32(system, &out_context, &out_tls_address, &out_flags); + ret = GetLastThreadInfo64From32(system, std::addressof(out_context), std::addressof(out_tls_address), std::addressof(out_flags)); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_context_scatter = Convert<std::array<uint32_t, 4>>(out_context); @@ -754,7 +754,7 @@ static void SvcWrap_GetResourceLimitLimitValue64From32(Core::System& system) { resource_limit_handle = Convert<Handle>(GetReg32(system, 1)); which = Convert<LimitableResource>(GetReg32(system, 2)); - ret = GetResourceLimitLimitValue64From32(system, &out_limit_value, resource_limit_handle, which); + ret = GetResourceLimitLimitValue64From32(system, std::addressof(out_limit_value), resource_limit_handle, which); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_limit_value_scatter = Convert<std::array<uint32_t, 2>>(out_limit_value); @@ -772,7 +772,7 @@ static void SvcWrap_GetResourceLimitCurrentValue64From32(Core::System& system) { resource_limit_handle = Convert<Handle>(GetReg32(system, 1)); which = Convert<LimitableResource>(GetReg32(system, 2)); - ret = GetResourceLimitCurrentValue64From32(system, &out_current_value, resource_limit_handle, which); + ret = GetResourceLimitCurrentValue64From32(system, std::addressof(out_current_value), resource_limit_handle, which); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_current_value_scatter = Convert<std::array<uint32_t, 2>>(out_current_value); @@ -861,7 +861,7 @@ static void SvcWrap_GetResourceLimitPeakValue64From32(Core::System& system) { resource_limit_handle = Convert<Handle>(GetReg32(system, 1)); which = Convert<LimitableResource>(GetReg32(system, 2)); - ret = GetResourceLimitPeakValue64From32(system, &out_peak_value, resource_limit_handle, which); + ret = GetResourceLimitPeakValue64From32(system, std::addressof(out_peak_value), resource_limit_handle, which); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_peak_value_scatter = Convert<std::array<uint32_t, 2>>(out_peak_value); @@ -877,7 +877,7 @@ static void SvcWrap_CreateIoPool64From32(Core::System& system) { which = Convert<IoPoolType>(GetReg32(system, 1)); - ret = CreateIoPool64From32(system, &out_handle, which); + ret = CreateIoPool64From32(system, std::addressof(out_handle), which); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -902,7 +902,7 @@ static void SvcWrap_CreateIoRegion64From32(Core::System& system) { mapping = Convert<MemoryMapping>(GetReg32(system, 4)); perm = Convert<MemoryPermission>(GetReg32(system, 5)); - ret = CreateIoRegion64From32(system, &out_handle, io_pool, physical_address, size, mapping, perm); + ret = CreateIoRegion64From32(system, std::addressof(out_handle), io_pool, physical_address, size, mapping, perm); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -950,7 +950,7 @@ static void SvcWrap_CreateSession64From32(Core::System& system) { is_light = Convert<bool>(GetReg32(system, 2)); name = Convert<uint32_t>(GetReg32(system, 3)); - ret = CreateSession64From32(system, &out_server_session_handle, &out_client_session_handle, is_light, name); + ret = CreateSession64From32(system, std::addressof(out_server_session_handle), std::addressof(out_client_session_handle), is_light, name); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_server_session_handle)); @@ -965,7 +965,7 @@ static void SvcWrap_AcceptSession64From32(Core::System& system) { port = Convert<Handle>(GetReg32(system, 1)); - ret = AcceptSession64From32(system, &out_handle, port); + ret = AcceptSession64From32(system, std::addressof(out_handle), port); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -988,7 +988,7 @@ static void SvcWrap_ReplyAndReceive64From32(Core::System& system) { timeout_ns_gather[1] = GetReg32(system, 4); timeout_ns = Convert<int64_t>(timeout_ns_gather); - ret = ReplyAndReceive64From32(system, &out_index, handles, num_handles, reply_target, timeout_ns); + ret = ReplyAndReceive64From32(system, std::addressof(out_index), handles, num_handles, reply_target, timeout_ns); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_index)); @@ -1015,7 +1015,7 @@ static void SvcWrap_ReplyAndReceiveWithUserBuffer64From32(Core::System& system) timeout_ns_gather[1] = GetReg32(system, 6); timeout_ns = Convert<int64_t>(timeout_ns_gather); - ret = ReplyAndReceiveWithUserBuffer64From32(system, &out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns); + ret = ReplyAndReceiveWithUserBuffer64From32(system, std::addressof(out_index), message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_index)); @@ -1027,7 +1027,7 @@ static void SvcWrap_CreateEvent64From32(Core::System& system) { Handle out_write_handle{}; Handle out_read_handle{}; - ret = CreateEvent64From32(system, &out_write_handle, &out_read_handle); + ret = CreateEvent64From32(system, std::addressof(out_write_handle), std::addressof(out_read_handle)); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_write_handle)); @@ -1118,7 +1118,7 @@ static void SvcWrap_CreateCodeMemory64From32(Core::System& system) { address = Convert<uint32_t>(GetReg32(system, 1)); size = Convert<uint32_t>(GetReg32(system, 2)); - ret = CreateCodeMemory64From32(system, &out_handle, address, size); + ret = CreateCodeMemory64From32(system, std::addressof(out_handle), address, size); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -1169,7 +1169,7 @@ static void SvcWrap_ReadWriteRegister64From32(Core::System& system) { mask = Convert<uint32_t>(GetReg32(system, 0)); value = Convert<uint32_t>(GetReg32(system, 1)); - ret = ReadWriteRegister64From32(system, &out_value, address, mask, value); + ret = ReadWriteRegister64From32(system, std::addressof(out_value), address, mask, value); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_value)); @@ -1201,7 +1201,7 @@ static void SvcWrap_CreateSharedMemory64From32(Core::System& system) { owner_perm = Convert<MemoryPermission>(GetReg32(system, 2)); remote_perm = Convert<MemoryPermission>(GetReg32(system, 3)); - ret = CreateSharedMemory64From32(system, &out_handle, size, owner_perm, remote_perm); + ret = CreateSharedMemory64From32(system, std::addressof(out_handle), size, owner_perm, remote_perm); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -1251,7 +1251,7 @@ static void SvcWrap_CreateInterruptEvent64From32(Core::System& system) { interrupt_id = Convert<int32_t>(GetReg32(system, 1)); interrupt_type = Convert<InterruptType>(GetReg32(system, 2)); - ret = CreateInterruptEvent64From32(system, &out_read_handle, interrupt_id, interrupt_type); + ret = CreateInterruptEvent64From32(system, std::addressof(out_read_handle), interrupt_id, interrupt_type); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_read_handle)); @@ -1265,7 +1265,7 @@ static void SvcWrap_QueryPhysicalAddress64From32(Core::System& system) { address = Convert<uint32_t>(GetReg32(system, 1)); - ret = QueryPhysicalAddress64From32(system, &out_info, address); + ret = QueryPhysicalAddress64From32(system, std::addressof(out_info), address); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_info_scatter = Convert<std::array<uint32_t, 4>>(out_info); @@ -1289,7 +1289,7 @@ static void SvcWrap_QueryIoMapping64From32(Core::System& system) { physical_address = Convert<uint64_t>(physical_address_gather); size = Convert<uint32_t>(GetReg32(system, 0)); - ret = QueryIoMapping64From32(system, &out_address, &out_size, physical_address, size); + ret = QueryIoMapping64From32(system, std::addressof(out_address), std::addressof(out_size), physical_address, size); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_address)); @@ -1312,7 +1312,7 @@ static void SvcWrap_CreateDeviceAddressSpace64From32(Core::System& system) { das_size_gather[1] = GetReg32(system, 1); das_size = Convert<uint64_t>(das_size_gather); - ret = CreateDeviceAddressSpace64From32(system, &out_handle, das_address, das_size); + ret = CreateDeviceAddressSpace64From32(system, std::addressof(out_handle), das_address, das_size); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -1505,7 +1505,7 @@ static void SvcWrap_DebugActiveProcess64From32(Core::System& system) { process_id_gather[1] = GetReg32(system, 3); process_id = Convert<uint64_t>(process_id_gather); - ret = DebugActiveProcess64From32(system, &out_handle, process_id); + ret = DebugActiveProcess64From32(system, std::addressof(out_handle), process_id); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -1577,7 +1577,7 @@ static void SvcWrap_GetProcessList64From32(Core::System& system) { out_process_ids = Convert<uint32_t>(GetReg32(system, 1)); max_out_count = Convert<int32_t>(GetReg32(system, 2)); - ret = GetProcessList64From32(system, &out_num_processes, out_process_ids, max_out_count); + ret = GetProcessList64From32(system, std::addressof(out_num_processes), out_process_ids, max_out_count); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_num_processes)); @@ -1595,7 +1595,7 @@ static void SvcWrap_GetThreadList64From32(Core::System& system) { max_out_count = Convert<int32_t>(GetReg32(system, 2)); debug_handle = Convert<Handle>(GetReg32(system, 3)); - ret = GetThreadList64From32(system, &out_num_threads, out_thread_ids, max_out_count, debug_handle); + ret = GetThreadList64From32(system, std::addressof(out_num_threads), out_thread_ids, max_out_count, debug_handle); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_num_threads)); @@ -1655,7 +1655,7 @@ static void SvcWrap_QueryDebugProcessMemory64From32(Core::System& system) { process_handle = Convert<Handle>(GetReg32(system, 2)); address = Convert<uint32_t>(GetReg32(system, 3)); - ret = QueryDebugProcessMemory64From32(system, out_memory_info, &out_page_info, process_handle, address); + ret = QueryDebugProcessMemory64From32(system, out_memory_info, std::addressof(out_page_info), process_handle, address); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_page_info)); @@ -1735,7 +1735,7 @@ static void SvcWrap_GetDebugThreadParam64From32(Core::System& system) { thread_id = Convert<uint64_t>(thread_id_gather); param = Convert<DebugThreadParam>(GetReg32(system, 3)); - ret = GetDebugThreadParam64From32(system, &out_64, &out_32, debug_handle, thread_id, param); + ret = GetDebugThreadParam64From32(system, std::addressof(out_64), std::addressof(out_32), debug_handle, thread_id, param); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_64_scatter = Convert<std::array<uint32_t, 2>>(out_64); @@ -1759,7 +1759,7 @@ static void SvcWrap_GetSystemInfo64From32(Core::System& system) { info_subtype_gather[1] = GetReg32(system, 3); info_subtype = Convert<uint64_t>(info_subtype_gather); - ret = GetSystemInfo64From32(system, &out, info_type, handle, info_subtype); + ret = GetSystemInfo64From32(system, std::addressof(out), info_type, handle, info_subtype); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_scatter = Convert<std::array<uint32_t, 2>>(out); @@ -1780,7 +1780,7 @@ static void SvcWrap_CreatePort64From32(Core::System& system) { is_light = Convert<bool>(GetReg32(system, 3)); name = Convert<uint32_t>(GetReg32(system, 0)); - ret = CreatePort64From32(system, &out_server_handle, &out_client_handle, max_sessions, is_light, name); + ret = CreatePort64From32(system, std::addressof(out_server_handle), std::addressof(out_client_handle), max_sessions, is_light, name); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_server_handle)); @@ -1797,7 +1797,7 @@ static void SvcWrap_ManageNamedPort64From32(Core::System& system) { name = Convert<uint32_t>(GetReg32(system, 1)); max_sessions = Convert<int32_t>(GetReg32(system, 2)); - ret = ManageNamedPort64From32(system, &out_server_handle, name, max_sessions); + ret = ManageNamedPort64From32(system, std::addressof(out_server_handle), name, max_sessions); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_server_handle)); @@ -1811,7 +1811,7 @@ static void SvcWrap_ConnectToPort64From32(Core::System& system) { port = Convert<Handle>(GetReg32(system, 1)); - ret = ConnectToPort64From32(system, &out_handle, port); + ret = ConnectToPort64From32(system, std::addressof(out_handle), port); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -1898,7 +1898,7 @@ static void SvcWrap_QueryProcessMemory64From32(Core::System& system) { address_gather[1] = GetReg32(system, 3); address = Convert<uint64_t>(address_gather); - ret = QueryProcessMemory64From32(system, out_memory_info, &out_page_info, process_handle, address); + ret = QueryProcessMemory64From32(system, out_memory_info, std::addressof(out_page_info), process_handle, address); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_page_info)); @@ -1970,7 +1970,7 @@ static void SvcWrap_CreateProcess64From32(Core::System& system) { caps = Convert<uint32_t>(GetReg32(system, 2)); num_caps = Convert<int32_t>(GetReg32(system, 3)); - ret = CreateProcess64From32(system, &out_handle, parameters, caps, num_caps); + ret = CreateProcess64From32(system, std::addressof(out_handle), parameters, caps, num_caps); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -2019,7 +2019,7 @@ static void SvcWrap_GetProcessInfo64From32(Core::System& system) { process_handle = Convert<Handle>(GetReg32(system, 1)); info_type = Convert<ProcessInfoType>(GetReg32(system, 2)); - ret = GetProcessInfo64From32(system, &out_info, process_handle, info_type); + ret = GetProcessInfo64From32(system, std::addressof(out_info), process_handle, info_type); SetReg32(system, 0, Convert<uint32_t>(ret)); auto out_info_scatter = Convert<std::array<uint32_t, 2>>(out_info); @@ -2032,7 +2032,7 @@ static void SvcWrap_CreateResourceLimit64From32(Core::System& system) { Handle out_handle{}; - ret = CreateResourceLimit64From32(system, &out_handle); + ret = CreateResourceLimit64From32(system, std::addressof(out_handle)); SetReg32(system, 0, Convert<uint32_t>(ret)); SetReg32(system, 1, Convert<uint32_t>(out_handle)); @@ -2093,7 +2093,7 @@ static void SvcWrap_SetHeapSize64(Core::System& system) { size = Convert<uint64_t>(GetReg64(system, 1)); - ret = SetHeapSize64(system, &out_address, size); + ret = SetHeapSize64(system, std::addressof(out_address), size); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_address)); @@ -2175,7 +2175,7 @@ static void SvcWrap_QueryMemory64(Core::System& system) { out_memory_info = Convert<uint64_t>(GetReg64(system, 0)); address = Convert<uint64_t>(GetReg64(system, 2)); - ret = QueryMemory64(system, out_memory_info, &out_page_info, address); + ret = QueryMemory64(system, out_memory_info, std::addressof(out_page_info), address); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_page_info)); @@ -2201,7 +2201,7 @@ static void SvcWrap_CreateThread64(Core::System& system) { priority = Convert<int32_t>(GetReg64(system, 4)); core_id = Convert<int32_t>(GetReg64(system, 5)); - ret = CreateThread64(system, &out_handle, func, arg, stack_bottom, priority, core_id); + ret = CreateThread64(system, std::addressof(out_handle), func, arg, stack_bottom, priority, core_id); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -2239,7 +2239,7 @@ static void SvcWrap_GetThreadPriority64(Core::System& system) { thread_handle = Convert<Handle>(GetReg64(system, 1)); - ret = GetThreadPriority64(system, &out_priority, thread_handle); + ret = GetThreadPriority64(system, std::addressof(out_priority), thread_handle); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_priority)); @@ -2268,7 +2268,7 @@ static void SvcWrap_GetThreadCoreMask64(Core::System& system) { thread_handle = Convert<Handle>(GetReg64(system, 2)); - ret = GetThreadCoreMask64(system, &out_core_id, &out_affinity_mask, thread_handle); + ret = GetThreadCoreMask64(system, std::addressof(out_core_id), std::addressof(out_affinity_mask), thread_handle); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_core_id)); @@ -2369,7 +2369,7 @@ static void SvcWrap_CreateTransferMemory64(Core::System& system) { size = Convert<uint64_t>(GetReg64(system, 2)); map_perm = Convert<MemoryPermission>(GetReg64(system, 3)); - ret = CreateTransferMemory64(system, &out_handle, address, size, map_perm); + ret = CreateTransferMemory64(system, std::addressof(out_handle), address, size, map_perm); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -2411,7 +2411,7 @@ static void SvcWrap_WaitSynchronization64(Core::System& system) { num_handles = Convert<int32_t>(GetReg64(system, 2)); timeout_ns = Convert<int64_t>(GetReg64(system, 3)); - ret = WaitSynchronization64(system, &out_index, handles, num_handles, timeout_ns); + ret = WaitSynchronization64(system, std::addressof(out_index), handles, num_handles, timeout_ns); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_index)); @@ -2501,7 +2501,7 @@ static void SvcWrap_ConnectToNamedPort64(Core::System& system) { name = Convert<uint64_t>(GetReg64(system, 1)); - ret = ConnectToNamedPort64(system, &out_handle, name); + ret = ConnectToNamedPort64(system, std::addressof(out_handle), name); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -2547,7 +2547,7 @@ static void SvcWrap_SendAsyncRequestWithUserBuffer64(Core::System& system) { message_buffer_size = Convert<uint64_t>(GetReg64(system, 2)); session_handle = Convert<Handle>(GetReg64(system, 3)); - ret = SendAsyncRequestWithUserBuffer64(system, &out_event_handle, message_buffer, message_buffer_size, session_handle); + ret = SendAsyncRequestWithUserBuffer64(system, std::addressof(out_event_handle), message_buffer, message_buffer_size, session_handle); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_event_handle)); @@ -2561,7 +2561,7 @@ static void SvcWrap_GetProcessId64(Core::System& system) { process_handle = Convert<Handle>(GetReg64(system, 1)); - ret = GetProcessId64(system, &out_process_id, process_handle); + ret = GetProcessId64(system, std::addressof(out_process_id), process_handle); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_process_id)); @@ -2575,7 +2575,7 @@ static void SvcWrap_GetThreadId64(Core::System& system) { thread_handle = Convert<Handle>(GetReg64(system, 1)); - ret = GetThreadId64(system, &out_thread_id, thread_handle); + ret = GetThreadId64(system, std::addressof(out_thread_id), thread_handle); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_thread_id)); @@ -2627,7 +2627,7 @@ static void SvcWrap_GetInfo64(Core::System& system) { handle = Convert<Handle>(GetReg64(system, 2)); info_subtype = Convert<uint64_t>(GetReg64(system, 3)); - ret = GetInfo64(system, &out, info_type, handle, info_subtype); + ret = GetInfo64(system, std::addressof(out), info_type, handle, info_subtype); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out)); @@ -2690,7 +2690,7 @@ static void SvcWrap_GetDebugFutureThreadInfo64(Core::System& system) { debug_handle = Convert<Handle>(GetReg64(system, 2)); ns = Convert<int64_t>(GetReg64(system, 3)); - ret = GetDebugFutureThreadInfo64(system, &out_context, &out_thread_id, debug_handle, ns); + ret = GetDebugFutureThreadInfo64(system, std::addressof(out_context), std::addressof(out_thread_id), debug_handle, ns); SetReg64(system, 0, Convert<uint64_t>(ret)); auto out_context_scatter = Convert<std::array<uint64_t, 4>>(out_context); @@ -2708,7 +2708,7 @@ static void SvcWrap_GetLastThreadInfo64(Core::System& system) { uint64_t out_tls_address{}; uint32_t out_flags{}; - ret = GetLastThreadInfo64(system, &out_context, &out_tls_address, &out_flags); + ret = GetLastThreadInfo64(system, std::addressof(out_context), std::addressof(out_tls_address), std::addressof(out_flags)); SetReg64(system, 0, Convert<uint64_t>(ret)); auto out_context_scatter = Convert<std::array<uint64_t, 4>>(out_context); @@ -2730,7 +2730,7 @@ static void SvcWrap_GetResourceLimitLimitValue64(Core::System& system) { resource_limit_handle = Convert<Handle>(GetReg64(system, 1)); which = Convert<LimitableResource>(GetReg64(system, 2)); - ret = GetResourceLimitLimitValue64(system, &out_limit_value, resource_limit_handle, which); + ret = GetResourceLimitLimitValue64(system, std::addressof(out_limit_value), resource_limit_handle, which); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_limit_value)); @@ -2746,7 +2746,7 @@ static void SvcWrap_GetResourceLimitCurrentValue64(Core::System& system) { resource_limit_handle = Convert<Handle>(GetReg64(system, 1)); which = Convert<LimitableResource>(GetReg64(system, 2)); - ret = GetResourceLimitCurrentValue64(system, &out_current_value, resource_limit_handle, which); + ret = GetResourceLimitCurrentValue64(system, std::addressof(out_current_value), resource_limit_handle, which); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_current_value)); @@ -2830,7 +2830,7 @@ static void SvcWrap_GetResourceLimitPeakValue64(Core::System& system) { resource_limit_handle = Convert<Handle>(GetReg64(system, 1)); which = Convert<LimitableResource>(GetReg64(system, 2)); - ret = GetResourceLimitPeakValue64(system, &out_peak_value, resource_limit_handle, which); + ret = GetResourceLimitPeakValue64(system, std::addressof(out_peak_value), resource_limit_handle, which); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_peak_value)); @@ -2844,7 +2844,7 @@ static void SvcWrap_CreateIoPool64(Core::System& system) { which = Convert<IoPoolType>(GetReg64(system, 1)); - ret = CreateIoPool64(system, &out_handle, which); + ret = CreateIoPool64(system, std::addressof(out_handle), which); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -2866,7 +2866,7 @@ static void SvcWrap_CreateIoRegion64(Core::System& system) { mapping = Convert<MemoryMapping>(GetReg64(system, 4)); perm = Convert<MemoryPermission>(GetReg64(system, 5)); - ret = CreateIoRegion64(system, &out_handle, io_pool, physical_address, size, mapping, perm); + ret = CreateIoRegion64(system, std::addressof(out_handle), io_pool, physical_address, size, mapping, perm); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -2905,7 +2905,7 @@ static void SvcWrap_CreateSession64(Core::System& system) { is_light = Convert<bool>(GetReg64(system, 2)); name = Convert<uint64_t>(GetReg64(system, 3)); - ret = CreateSession64(system, &out_server_session_handle, &out_client_session_handle, is_light, name); + ret = CreateSession64(system, std::addressof(out_server_session_handle), std::addressof(out_client_session_handle), is_light, name); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_server_session_handle)); @@ -2920,7 +2920,7 @@ static void SvcWrap_AcceptSession64(Core::System& system) { port = Convert<Handle>(GetReg64(system, 1)); - ret = AcceptSession64(system, &out_handle, port); + ret = AcceptSession64(system, std::addressof(out_handle), port); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -2940,7 +2940,7 @@ static void SvcWrap_ReplyAndReceive64(Core::System& system) { reply_target = Convert<Handle>(GetReg64(system, 3)); timeout_ns = Convert<int64_t>(GetReg64(system, 4)); - ret = ReplyAndReceive64(system, &out_index, handles, num_handles, reply_target, timeout_ns); + ret = ReplyAndReceive64(system, std::addressof(out_index), handles, num_handles, reply_target, timeout_ns); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_index)); @@ -2964,7 +2964,7 @@ static void SvcWrap_ReplyAndReceiveWithUserBuffer64(Core::System& system) { reply_target = Convert<Handle>(GetReg64(system, 5)); timeout_ns = Convert<int64_t>(GetReg64(system, 6)); - ret = ReplyAndReceiveWithUserBuffer64(system, &out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns); + ret = ReplyAndReceiveWithUserBuffer64(system, std::addressof(out_index), message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_index)); @@ -2976,7 +2976,7 @@ static void SvcWrap_CreateEvent64(Core::System& system) { Handle out_write_handle{}; Handle out_read_handle{}; - ret = CreateEvent64(system, &out_write_handle, &out_read_handle); + ret = CreateEvent64(system, std::addressof(out_write_handle), std::addressof(out_read_handle)); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_write_handle)); @@ -3067,7 +3067,7 @@ static void SvcWrap_CreateCodeMemory64(Core::System& system) { address = Convert<uint64_t>(GetReg64(system, 1)); size = Convert<uint64_t>(GetReg64(system, 2)); - ret = CreateCodeMemory64(system, &out_handle, address, size); + ret = CreateCodeMemory64(system, std::addressof(out_handle), address, size); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -3109,7 +3109,7 @@ static void SvcWrap_ReadWriteRegister64(Core::System& system) { mask = Convert<uint32_t>(GetReg64(system, 2)); value = Convert<uint32_t>(GetReg64(system, 3)); - ret = ReadWriteRegister64(system, &out_value, address, mask, value); + ret = ReadWriteRegister64(system, std::addressof(out_value), address, mask, value); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_value)); @@ -3141,7 +3141,7 @@ static void SvcWrap_CreateSharedMemory64(Core::System& system) { owner_perm = Convert<MemoryPermission>(GetReg64(system, 2)); remote_perm = Convert<MemoryPermission>(GetReg64(system, 3)); - ret = CreateSharedMemory64(system, &out_handle, size, owner_perm, remote_perm); + ret = CreateSharedMemory64(system, std::addressof(out_handle), size, owner_perm, remote_perm); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -3191,7 +3191,7 @@ static void SvcWrap_CreateInterruptEvent64(Core::System& system) { interrupt_id = Convert<int32_t>(GetReg64(system, 1)); interrupt_type = Convert<InterruptType>(GetReg64(system, 2)); - ret = CreateInterruptEvent64(system, &out_read_handle, interrupt_id, interrupt_type); + ret = CreateInterruptEvent64(system, std::addressof(out_read_handle), interrupt_id, interrupt_type); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_read_handle)); @@ -3205,7 +3205,7 @@ static void SvcWrap_QueryPhysicalAddress64(Core::System& system) { address = Convert<uint64_t>(GetReg64(system, 1)); - ret = QueryPhysicalAddress64(system, &out_info, address); + ret = QueryPhysicalAddress64(system, std::addressof(out_info), address); SetReg64(system, 0, Convert<uint64_t>(ret)); auto out_info_scatter = Convert<std::array<uint64_t, 3>>(out_info); @@ -3225,7 +3225,7 @@ static void SvcWrap_QueryIoMapping64(Core::System& system) { physical_address = Convert<uint64_t>(GetReg64(system, 2)); size = Convert<uint64_t>(GetReg64(system, 3)); - ret = QueryIoMapping64(system, &out_address, &out_size, physical_address, size); + ret = QueryIoMapping64(system, std::addressof(out_address), std::addressof(out_size), physical_address, size); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_address)); @@ -3242,7 +3242,7 @@ static void SvcWrap_CreateDeviceAddressSpace64(Core::System& system) { das_address = Convert<uint64_t>(GetReg64(system, 1)); das_size = Convert<uint64_t>(GetReg64(system, 2)); - ret = CreateDeviceAddressSpace64(system, &out_handle, das_address, das_size); + ret = CreateDeviceAddressSpace64(system, std::addressof(out_handle), das_address, das_size); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -3396,7 +3396,7 @@ static void SvcWrap_DebugActiveProcess64(Core::System& system) { process_id = Convert<uint64_t>(GetReg64(system, 1)); - ret = DebugActiveProcess64(system, &out_handle, process_id); + ret = DebugActiveProcess64(system, std::addressof(out_handle), process_id); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -3468,7 +3468,7 @@ static void SvcWrap_GetProcessList64(Core::System& system) { out_process_ids = Convert<uint64_t>(GetReg64(system, 1)); max_out_count = Convert<int32_t>(GetReg64(system, 2)); - ret = GetProcessList64(system, &out_num_processes, out_process_ids, max_out_count); + ret = GetProcessList64(system, std::addressof(out_num_processes), out_process_ids, max_out_count); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_num_processes)); @@ -3486,7 +3486,7 @@ static void SvcWrap_GetThreadList64(Core::System& system) { max_out_count = Convert<int32_t>(GetReg64(system, 2)); debug_handle = Convert<Handle>(GetReg64(system, 3)); - ret = GetThreadList64(system, &out_num_threads, out_thread_ids, max_out_count, debug_handle); + ret = GetThreadList64(system, std::addressof(out_num_threads), out_thread_ids, max_out_count, debug_handle); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_num_threads)); @@ -3540,7 +3540,7 @@ static void SvcWrap_QueryDebugProcessMemory64(Core::System& system) { process_handle = Convert<Handle>(GetReg64(system, 2)); address = Convert<uint64_t>(GetReg64(system, 3)); - ret = QueryDebugProcessMemory64(system, out_memory_info, &out_page_info, process_handle, address); + ret = QueryDebugProcessMemory64(system, out_memory_info, std::addressof(out_page_info), process_handle, address); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_page_info)); @@ -3611,7 +3611,7 @@ static void SvcWrap_GetDebugThreadParam64(Core::System& system) { thread_id = Convert<uint64_t>(GetReg64(system, 3)); param = Convert<DebugThreadParam>(GetReg64(system, 4)); - ret = GetDebugThreadParam64(system, &out_64, &out_32, debug_handle, thread_id, param); + ret = GetDebugThreadParam64(system, std::addressof(out_64), std::addressof(out_32), debug_handle, thread_id, param); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_64)); @@ -3630,7 +3630,7 @@ static void SvcWrap_GetSystemInfo64(Core::System& system) { handle = Convert<Handle>(GetReg64(system, 2)); info_subtype = Convert<uint64_t>(GetReg64(system, 3)); - ret = GetSystemInfo64(system, &out, info_type, handle, info_subtype); + ret = GetSystemInfo64(system, std::addressof(out), info_type, handle, info_subtype); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out)); @@ -3649,7 +3649,7 @@ static void SvcWrap_CreatePort64(Core::System& system) { is_light = Convert<bool>(GetReg64(system, 3)); name = Convert<uint64_t>(GetReg64(system, 4)); - ret = CreatePort64(system, &out_server_handle, &out_client_handle, max_sessions, is_light, name); + ret = CreatePort64(system, std::addressof(out_server_handle), std::addressof(out_client_handle), max_sessions, is_light, name); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_server_handle)); @@ -3666,7 +3666,7 @@ static void SvcWrap_ManageNamedPort64(Core::System& system) { name = Convert<uint64_t>(GetReg64(system, 1)); max_sessions = Convert<int32_t>(GetReg64(system, 2)); - ret = ManageNamedPort64(system, &out_server_handle, name, max_sessions); + ret = ManageNamedPort64(system, std::addressof(out_server_handle), name, max_sessions); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_server_handle)); @@ -3680,7 +3680,7 @@ static void SvcWrap_ConnectToPort64(Core::System& system) { port = Convert<Handle>(GetReg64(system, 1)); - ret = ConnectToPort64(system, &out_handle, port); + ret = ConnectToPort64(system, std::addressof(out_handle), port); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -3752,7 +3752,7 @@ static void SvcWrap_QueryProcessMemory64(Core::System& system) { process_handle = Convert<Handle>(GetReg64(system, 2)); address = Convert<uint64_t>(GetReg64(system, 3)); - ret = QueryProcessMemory64(system, out_memory_info, &out_page_info, process_handle, address); + ret = QueryProcessMemory64(system, out_memory_info, std::addressof(out_page_info), process_handle, address); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_page_info)); @@ -3806,7 +3806,7 @@ static void SvcWrap_CreateProcess64(Core::System& system) { caps = Convert<uint64_t>(GetReg64(system, 2)); num_caps = Convert<int32_t>(GetReg64(system, 3)); - ret = CreateProcess64(system, &out_handle, parameters, caps, num_caps); + ret = CreateProcess64(system, std::addressof(out_handle), parameters, caps, num_caps); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); @@ -3852,7 +3852,7 @@ static void SvcWrap_GetProcessInfo64(Core::System& system) { process_handle = Convert<Handle>(GetReg64(system, 1)); info_type = Convert<ProcessInfoType>(GetReg64(system, 2)); - ret = GetProcessInfo64(system, &out_info, process_handle, info_type); + ret = GetProcessInfo64(system, std::addressof(out_info), process_handle, info_type); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_info)); @@ -3863,7 +3863,7 @@ static void SvcWrap_CreateResourceLimit64(Core::System& system) { Handle out_handle{}; - ret = CreateResourceLimit64(system, &out_handle); + ret = CreateResourceLimit64(system, std::addressof(out_handle)); SetReg64(system, 0, Convert<uint64_t>(ret)); SetReg64(system, 1, Convert<uint64_t>(out_handle)); diff --git a/src/core/hle/kernel/svc/svc_address_arbiter.cpp b/src/core/hle/kernel/svc/svc_address_arbiter.cpp index 998bd3f22..22071731b 100644 --- a/src/core/hle/kernel/svc/svc_address_arbiter.cpp +++ b/src/core/hle/kernel/svc/svc_address_arbiter.cpp @@ -43,18 +43,9 @@ Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_t address, arb_type, value, timeout_ns); // Validate input. - if (IsKernelAddress(address)) { - LOG_ERROR(Kernel_SVC, "Attempting to wait on kernel address (address={:08X})", address); - return ResultInvalidCurrentMemory; - } - if (!Common::IsAligned(address, sizeof(s32))) { - LOG_ERROR(Kernel_SVC, "Wait address must be 4 byte aligned (address={:08X})", address); - return ResultInvalidAddress; - } - if (!IsValidArbitrationType(arb_type)) { - LOG_ERROR(Kernel_SVC, "Invalid arbitration type specified (type={})", arb_type); - return ResultInvalidEnumValue; - } + R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory); + R_UNLESS(Common::IsAligned(address, sizeof(s32)), ResultInvalidAddress); + R_UNLESS(IsValidArbitrationType(arb_type), ResultInvalidEnumValue); // Convert timeout from nanoseconds to ticks. s64 timeout{}; @@ -72,7 +63,8 @@ Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_t timeout = timeout_ns; } - return GetCurrentProcess(system.Kernel()).WaitAddressArbiter(address, arb_type, value, timeout); + R_RETURN( + GetCurrentProcess(system.Kernel()).WaitAddressArbiter(address, arb_type, value, timeout)); } // Signals to an address (via Address Arbiter) @@ -82,41 +74,32 @@ Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_ty address, signal_type, value, count); // Validate input. - if (IsKernelAddress(address)) { - LOG_ERROR(Kernel_SVC, "Attempting to signal to a kernel address (address={:08X})", address); - return ResultInvalidCurrentMemory; - } - if (!Common::IsAligned(address, sizeof(s32))) { - LOG_ERROR(Kernel_SVC, "Signaled address must be 4 byte aligned (address={:08X})", address); - return ResultInvalidAddress; - } - if (!IsValidSignalType(signal_type)) { - LOG_ERROR(Kernel_SVC, "Invalid signal type specified (type={})", signal_type); - return ResultInvalidEnumValue; - } + R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory); + R_UNLESS(Common::IsAligned(address, sizeof(s32)), ResultInvalidAddress); + R_UNLESS(IsValidSignalType(signal_type), ResultInvalidEnumValue); - return GetCurrentProcess(system.Kernel()) - .SignalAddressArbiter(address, signal_type, value, count); + R_RETURN(GetCurrentProcess(system.Kernel()) + .SignalAddressArbiter(address, signal_type, value, count)); } Result WaitForAddress64(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value, s64 timeout_ns) { - return WaitForAddress(system, address, arb_type, value, timeout_ns); + R_RETURN(WaitForAddress(system, address, arb_type, value, timeout_ns)); } Result SignalToAddress64(Core::System& system, VAddr address, SignalType signal_type, s32 value, s32 count) { - return SignalToAddress(system, address, signal_type, value, count); + R_RETURN(SignalToAddress(system, address, signal_type, value, count)); } Result WaitForAddress64From32(Core::System& system, u32 address, ArbitrationType arb_type, s32 value, s64 timeout_ns) { - return WaitForAddress(system, address, arb_type, value, timeout_ns); + R_RETURN(WaitForAddress(system, address, arb_type, value, timeout_ns)); } Result SignalToAddress64From32(Core::System& system, u32 address, SignalType signal_type, s32 value, s32 count) { - return SignalToAddress(system, address, signal_type, value, count); + R_RETURN(SignalToAddress(system, address, signal_type, value, count)); } } // namespace Kernel::Svc diff --git a/src/core/hle/kernel/svc/svc_code_memory.cpp b/src/core/hle/kernel/svc/svc_code_memory.cpp index 8bed747af..43feab986 100644 --- a/src/core/hle/kernel/svc/svc_code_memory.cpp +++ b/src/core/hle/kernel/svc/svc_code_memory.cpp @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include "common/scope_exit.h" #include "core/core.h" #include "core/hle/kernel/k_code_memory.h" #include "core/hle/kernel/k_process.h" @@ -44,6 +45,7 @@ Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, uint64 KCodeMemory* code_mem = KCodeMemory::Create(kernel); R_UNLESS(code_mem != nullptr, ResultOutOfResource); + SCOPE_EXIT({ code_mem->Close(); }); // Verify that the region is in range. R_UNLESS(GetCurrentProcess(system.Kernel()).PageTable().Contains(address, size), @@ -58,9 +60,7 @@ Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, uint64 // Add the code memory to the handle table. R_TRY(GetCurrentProcess(system.Kernel()).GetHandleTable().Add(out, code_mem)); - code_mem->Close(); - - return ResultSuccess; + R_SUCCEED(); } Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, @@ -140,10 +140,10 @@ Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, R_TRY(code_mem->UnmapFromOwner(address, size)); } break; default: - return ResultInvalidEnumValue; + R_THROW(ResultInvalidEnumValue); } - return ResultSuccess; + R_SUCCEED(); } Result CreateCodeMemory64(Core::System& system, Handle* out_handle, uint64_t address, diff --git a/src/core/hle/kernel/svc/svc_condition_variable.cpp b/src/core/hle/kernel/svc/svc_condition_variable.cpp index 8ad1a0b8f..648ed23d0 100644 --- a/src/core/hle/kernel/svc/svc_condition_variable.cpp +++ b/src/core/hle/kernel/svc/svc_condition_variable.cpp @@ -17,14 +17,8 @@ Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_ke cv_key, tag, timeout_ns); // Validate input. - if (IsKernelAddress(address)) { - LOG_ERROR(Kernel_SVC, "Attempted to wait on kernel address (address={:08X})", address); - return ResultInvalidCurrentMemory; - } - if (!Common::IsAligned(address, sizeof(s32))) { - LOG_ERROR(Kernel_SVC, "Address must be 4 byte aligned (address={:08X})", address); - return ResultInvalidAddress; - } + R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory); + R_UNLESS(Common::IsAligned(address, sizeof(s32)), ResultInvalidAddress); // Convert timeout from nanoseconds to ticks. s64 timeout{}; @@ -43,8 +37,9 @@ Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_ke } // Wait on the condition variable. - return GetCurrentProcess(system.Kernel()) - .WaitConditionVariable(address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout); + R_RETURN( + GetCurrentProcess(system.Kernel()) + .WaitConditionVariable(address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout)); } /// Signal process wide key diff --git a/src/core/hle/kernel/svc/svc_event.cpp b/src/core/hle/kernel/svc/svc_event.cpp index 8692b00f2..901202e6a 100644 --- a/src/core/hle/kernel/svc/svc_event.cpp +++ b/src/core/hle/kernel/svc/svc_event.cpp @@ -21,7 +21,7 @@ Result SignalEvent(Core::System& system, Handle event_handle) { KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle); R_UNLESS(event.IsNotNull(), ResultInvalidHandle); - return event->Signal(); + R_RETURN(event->Signal()); } Result ClearEvent(Core::System& system, Handle event_handle) { @@ -34,7 +34,7 @@ Result ClearEvent(Core::System& system, Handle event_handle) { { KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle); if (event.IsNotNull()) { - return event->Clear(); + R_RETURN(event->Clear()); } } @@ -42,13 +42,11 @@ Result ClearEvent(Core::System& system, Handle event_handle) { { KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle); if (readable_event.IsNotNull()) { - return readable_event->Clear(); + R_RETURN(readable_event->Clear()); } } - LOG_ERROR(Kernel_SVC, "Event handle does not exist, event_handle=0x{:08X}", event_handle); - - return ResultInvalidHandle; + R_THROW(ResultInvalidHandle); } Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) { @@ -86,14 +84,12 @@ Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) { R_TRY(handle_table.Add(out_write, event)); // Ensure that we maintain a clean handle state on exit. - auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); }); + ON_RESULT_FAILURE { + handle_table.Remove(*out_write); + }; // Add the readable event to the handle table. - R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent()))); - - // We succeeded. - handle_guard.Cancel(); - return ResultSuccess; + R_RETURN(handle_table.Add(out_read, std::addressof(event->GetReadableEvent()))); } Result SignalEvent64(Core::System& system, Handle event_handle) { diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp index cbed4dc8c..04b6d6964 100644 --- a/src/core/hle/kernel/svc/svc_info.cpp +++ b/src/core/hle/kernel/svc/svc_info.cpp @@ -38,126 +38,110 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle case InfoType::UsedNonSystemMemorySize: case InfoType::IsApplication: case InfoType::FreeThreadCount: { - if (info_sub_id != 0) { - LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id, - info_sub_id); - return ResultInvalidEnumValue; - } + R_UNLESS(info_sub_id == 0, ResultInvalidEnumValue); const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable(); KScopedAutoObject process = handle_table.GetObject<KProcess>(handle); - if (process.IsNull()) { - LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}", - info_id, info_sub_id, handle); - return ResultInvalidHandle; - } + R_UNLESS(process.IsNotNull(), ResultInvalidHandle); switch (info_id_type) { case InfoType::CoreMask: *result = process->GetCoreMask(); - return ResultSuccess; + R_SUCCEED(); case InfoType::PriorityMask: *result = process->GetPriorityMask(); - return ResultSuccess; + R_SUCCEED(); case InfoType::AliasRegionAddress: *result = process->PageTable().GetAliasRegionStart(); - return ResultSuccess; + R_SUCCEED(); case InfoType::AliasRegionSize: *result = process->PageTable().GetAliasRegionSize(); - return ResultSuccess; + R_SUCCEED(); case InfoType::HeapRegionAddress: *result = process->PageTable().GetHeapRegionStart(); - return ResultSuccess; + R_SUCCEED(); case InfoType::HeapRegionSize: *result = process->PageTable().GetHeapRegionSize(); - return ResultSuccess; + R_SUCCEED(); case InfoType::AslrRegionAddress: *result = process->PageTable().GetAliasCodeRegionStart(); - return ResultSuccess; + R_SUCCEED(); case InfoType::AslrRegionSize: *result = process->PageTable().GetAliasCodeRegionSize(); - return ResultSuccess; + R_SUCCEED(); case InfoType::StackRegionAddress: *result = process->PageTable().GetStackRegionStart(); - return ResultSuccess; + R_SUCCEED(); case InfoType::StackRegionSize: *result = process->PageTable().GetStackRegionSize(); - return ResultSuccess; + R_SUCCEED(); case InfoType::TotalMemorySize: *result = process->GetTotalPhysicalMemoryAvailable(); - return ResultSuccess; + R_SUCCEED(); case InfoType::UsedMemorySize: *result = process->GetTotalPhysicalMemoryUsed(); - return ResultSuccess; + R_SUCCEED(); case InfoType::SystemResourceSizeTotal: *result = process->GetSystemResourceSize(); - return ResultSuccess; + R_SUCCEED(); case InfoType::SystemResourceSizeUsed: LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage"); *result = process->GetSystemResourceUsage(); - return ResultSuccess; + R_SUCCEED(); case InfoType::ProgramId: - *result = process->GetProgramID(); - return ResultSuccess; + *result = process->GetProgramId(); + R_SUCCEED(); case InfoType::UserExceptionContextAddress: *result = process->GetProcessLocalRegionAddress(); - return ResultSuccess; + R_SUCCEED(); case InfoType::TotalNonSystemMemorySize: *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource(); - return ResultSuccess; + R_SUCCEED(); case InfoType::UsedNonSystemMemorySize: *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource(); - return ResultSuccess; + R_SUCCEED(); case InfoType::IsApplication: LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application"); *result = true; - return ResultSuccess; + R_SUCCEED(); case InfoType::FreeThreadCount: *result = process->GetFreeThreadCount(); - return ResultSuccess; + R_SUCCEED(); default: break; } LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id); - return ResultInvalidEnumValue; + R_THROW(ResultInvalidEnumValue); } case InfoType::DebuggerAttached: *result = 0; - return ResultSuccess; + R_SUCCEED(); case InfoType::ResourceLimit: { - if (handle != 0) { - LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle); - return ResultInvalidHandle; - } - - if (info_sub_id != 0) { - LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id, - info_sub_id); - return ResultInvalidCombination; - } + R_UNLESS(handle == 0, ResultInvalidHandle); + R_UNLESS(info_sub_id == 0, ResultInvalidCombination); KProcess* const current_process = GetCurrentProcessPointer(system.Kernel()); KHandleTable& handle_table = current_process->GetHandleTable(); @@ -165,44 +149,35 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle if (!resource_limit) { *result = Svc::InvalidHandle; // Yes, the kernel considers this a successful operation. - return ResultSuccess; + R_SUCCEED(); } Handle resource_handle{}; - R_TRY(handle_table.Add(&resource_handle, resource_limit)); + R_TRY(handle_table.Add(std::addressof(resource_handle), resource_limit)); *result = resource_handle; - return ResultSuccess; + R_SUCCEED(); } case InfoType::RandomEntropy: - if (handle != 0) { - LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}", - handle); - return ResultInvalidHandle; - } - - if (info_sub_id >= KProcess::RANDOM_ENTROPY_SIZE) { - LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}", - KProcess::RANDOM_ENTROPY_SIZE, info_sub_id); - return ResultInvalidCombination; - } + R_UNLESS(handle == 0, ResultInvalidHandle); + R_UNLESS(info_sub_id < KProcess::RANDOM_ENTROPY_SIZE, ResultInvalidCombination); *result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id); - return ResultSuccess; + R_SUCCEED(); case InfoType::InitialProcessIdRange: LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query privileged process id bounds, returned 0"); *result = 0; - return ResultSuccess; + R_SUCCEED(); case InfoType::ThreadTickCount: { constexpr u64 num_cpus = 4; if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) { LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus, info_sub_id); - return ResultInvalidCombination; + R_THROW(ResultInvalidCombination); } KScopedAutoObject thread = GetCurrentProcess(system.Kernel()) @@ -211,7 +186,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle if (thread.IsNull()) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", static_cast<Handle>(handle)); - return ResultInvalidHandle; + R_THROW(ResultInvalidHandle); } const auto& core_timing = system.CoreTiming(); @@ -230,7 +205,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle } *result = out_ticks; - return ResultSuccess; + R_SUCCEED(); } case InfoType::IdleTickCount: { // Verify the input handle is invalid. @@ -244,7 +219,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle // Get the idle tick count. *result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime(); - return ResultSuccess; + R_SUCCEED(); } case InfoType::MesosphereCurrentProcess: { // Verify the input handle is invalid. @@ -259,17 +234,17 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle // Get a new handle for the current process. Handle tmp; - R_TRY(handle_table.Add(&tmp, current_process)); + R_TRY(handle_table.Add(std::addressof(tmp), current_process)); // Set the output. *result = tmp; // We succeeded. - return ResultSuccess; + R_SUCCEED(); } default: LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id); - return ResultInvalidEnumValue; + R_THROW(ResultInvalidEnumValue); } } diff --git a/src/core/hle/kernel/svc/svc_ipc.cpp b/src/core/hle/kernel/svc/svc_ipc.cpp index a7a2c3b92..46fd0f2ea 100644 --- a/src/core/hle/kernel/svc/svc_ipc.cpp +++ b/src/core/hle/kernel/svc/svc_ipc.cpp @@ -19,7 +19,7 @@ Result SendSyncRequest(Core::System& system, Handle handle) { LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); - return session->SendSyncRequest(); + R_RETURN(session->SendSyncRequest()); } Result SendSyncRequestWithUserBuffer(Core::System& system, uint64_t message_buffer, @@ -79,10 +79,10 @@ Result ReplyAndReceive(Core::System& system, s32* out_index, uint64_t handles_ad while (true) { // Wait for an object. s32 index; - Result result = KSynchronizationObject::Wait(kernel, &index, objs.data(), + Result result = KSynchronizationObject::Wait(kernel, std::addressof(index), objs.data(), static_cast<s32>(objs.size()), timeout_ns); if (result == ResultTimedOut) { - return result; + R_RETURN(result); } // Receive the request. @@ -97,7 +97,7 @@ Result ReplyAndReceive(Core::System& system, s32* out_index, uint64_t handles_ad } *out_index = index; - return result; + R_RETURN(result); } } diff --git a/src/core/hle/kernel/svc/svc_lock.cpp b/src/core/hle/kernel/svc/svc_lock.cpp index f3d3e140b..3681279d6 100644 --- a/src/core/hle/kernel/svc/svc_lock.cpp +++ b/src/core/hle/kernel/svc/svc_lock.cpp @@ -14,17 +14,10 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, thread_handle, address, tag); // Validate the input address. - if (IsKernelAddress(address)) { - LOG_ERROR(Kernel_SVC, "Attempting to arbitrate a lock on a kernel address (address={:08X})", - address); - return ResultInvalidCurrentMemory; - } - if (!Common::IsAligned(address, sizeof(u32))) { - LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address); - return ResultInvalidAddress; - } + R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory); + R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress); - return GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag); + R_RETURN(GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag)); } /// Unlock a mutex @@ -32,18 +25,10 @@ Result ArbitrateUnlock(Core::System& system, VAddr address) { LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address); // Validate the input address. - if (IsKernelAddress(address)) { - LOG_ERROR(Kernel_SVC, - "Attempting to arbitrate an unlock on a kernel address (address={:08X})", - address); - return ResultInvalidCurrentMemory; - } - if (!Common::IsAligned(address, sizeof(u32))) { - LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address); - return ResultInvalidAddress; - } + R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory); + R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress); - return GetCurrentProcess(system.Kernel()).SignalToAddress(address); + R_RETURN(GetCurrentProcess(system.Kernel()).SignalToAddress(address)); } Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) { diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp index 214bcd073..4db25a3b7 100644 --- a/src/core/hle/kernel/svc/svc_memory.cpp +++ b/src/core/hle/kernel/svc/svc_memory.cpp @@ -33,49 +33,49 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAd u64 size) { if (!Common::Is4KBAligned(dst_addr)) { LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); - return ResultInvalidAddress; + R_THROW(ResultInvalidAddress); } if (!Common::Is4KBAligned(src_addr)) { LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr); - return ResultInvalidSize; + R_THROW(ResultInvalidSize); } if (size == 0) { LOG_ERROR(Kernel_SVC, "Size is 0"); - return ResultInvalidSize; + R_THROW(ResultInvalidSize); } if (!Common::Is4KBAligned(size)) { LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size); - return ResultInvalidSize; + R_THROW(ResultInvalidSize); } if (!IsValidAddressRange(dst_addr, size)) { LOG_ERROR(Kernel_SVC, "Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}", dst_addr, size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } if (!IsValidAddressRange(src_addr, size)) { LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}", src_addr, size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } if (!manager.IsInsideAddressSpace(src_addr, size)) { LOG_ERROR(Kernel_SVC, "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", src_addr, size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } if (manager.IsOutsideStackRegion(dst_addr, size)) { LOG_ERROR(Kernel_SVC, "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}", dst_addr, size); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } if (manager.IsInsideHeapRegion(dst_addr, size)) { @@ -83,7 +83,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAd "Destination does not fit within the heap region, addr=0x{:016X}, " "size=0x{:016X}", dst_addr, size); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } if (manager.IsInsideAliasRegion(dst_addr, size)) { @@ -91,10 +91,10 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAd "Destination does not fit within the map region, addr=0x{:016X}, " "size=0x{:016X}", dst_addr, size); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } - return ResultSuccess; + R_SUCCEED(); } } // namespace @@ -117,7 +117,7 @@ Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, Memory R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); // Set the memory attribute. - return page_table.SetMemoryPermission(address, size, perm); + R_RETURN(page_table.SetMemoryPermission(address, size, perm)); } Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attr) { @@ -141,7 +141,7 @@ Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mas R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); // Set the memory attribute. - return page_table.SetMemoryAttribute(address, size, mask, attr); + R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr)); } /// Maps a memory range into a different range. @@ -156,7 +156,7 @@ Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) return result; } - return page_table.MapMemory(dst_addr, src_addr, size); + R_RETURN(page_table.MapMemory(dst_addr, src_addr, size)); } /// Unmaps a region that was previously mapped with svcMapMemory @@ -171,7 +171,7 @@ Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 siz return result; } - return page_table.UnmapMemory(dst_addr, src_addr, size); + R_RETURN(page_table.UnmapMemory(dst_addr, src_addr, size)); } Result SetMemoryPermission64(Core::System& system, uint64_t address, uint64_t size, diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp index ed6a624ac..63196e1ed 100644 --- a/src/core/hle/kernel/svc/svc_physical_memory.cpp +++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp @@ -16,9 +16,7 @@ Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) { R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize); // Set the heap size. - R_TRY(GetCurrentProcess(system.Kernel()).PageTable().SetHeapSize(out_address, size)); - - return ResultSuccess; + R_RETURN(GetCurrentProcess(system.Kernel()).PageTable().SetHeapSize(out_address, size)); } /// Maps memory at a desired address @@ -27,22 +25,22 @@ Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { if (!Common::Is4KBAligned(addr)) { LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr); - return ResultInvalidAddress; + R_THROW(ResultInvalidAddress); } if (!Common::Is4KBAligned(size)) { LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size); - return ResultInvalidSize; + R_THROW(ResultInvalidSize); } if (size == 0) { LOG_ERROR(Kernel_SVC, "Size is zero"); - return ResultInvalidSize; + R_THROW(ResultInvalidSize); } if (!(addr < addr + size)) { LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())}; @@ -50,24 +48,24 @@ Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { if (current_process->GetSystemResourceSize() == 0) { LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); - return ResultInvalidState; + R_THROW(ResultInvalidState); } if (!page_table.IsInsideAddressSpace(addr, size)) { LOG_ERROR(Kernel_SVC, "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, size); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } if (page_table.IsOutsideAliasRegion(addr, size)) { LOG_ERROR(Kernel_SVC, "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, size); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } - return page_table.MapPhysicalMemory(addr, size); + R_RETURN(page_table.MapPhysicalMemory(addr, size)); } /// Unmaps memory previously mapped via MapPhysicalMemory @@ -76,22 +74,22 @@ Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { if (!Common::Is4KBAligned(addr)) { LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr); - return ResultInvalidAddress; + R_THROW(ResultInvalidAddress); } if (!Common::Is4KBAligned(size)) { LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size); - return ResultInvalidSize; + R_THROW(ResultInvalidSize); } if (size == 0) { LOG_ERROR(Kernel_SVC, "Size is zero"); - return ResultInvalidSize; + R_THROW(ResultInvalidSize); } if (!(addr < addr + size)) { LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())}; @@ -99,24 +97,24 @@ Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { if (current_process->GetSystemResourceSize() == 0) { LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); - return ResultInvalidState; + R_THROW(ResultInvalidState); } if (!page_table.IsInsideAddressSpace(addr, size)) { LOG_ERROR(Kernel_SVC, "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, size); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } if (page_table.IsOutsideAliasRegion(addr, size)) { LOG_ERROR(Kernel_SVC, "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, size); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } - return page_table.UnmapPhysicalMemory(addr, size); + R_RETURN(page_table.UnmapPhysicalMemory(addr, size)); } Result MapPhysicalMemoryUnsafe(Core::System& system, uint64_t address, uint64_t size) { diff --git a/src/core/hle/kernel/svc/svc_port.cpp b/src/core/hle/kernel/svc/svc_port.cpp index 78c2a8d17..0b5556bc4 100644 --- a/src/core/hle/kernel/svc/svc_port.cpp +++ b/src/core/hle/kernel/svc/svc_port.cpp @@ -81,7 +81,7 @@ Result ManageNamedPort(Core::System& system, Handle* out_server_handle, uint64_t R_UNLESS(port != nullptr, ResultOutOfResource); // Initialize the new port. - port->Initialize(max_sessions, false, ""); + port->Initialize(max_sessions, false, 0); // Register the port. KPort::Register(system.Kernel(), port); diff --git a/src/core/hle/kernel/svc/svc_process.cpp b/src/core/hle/kernel/svc/svc_process.cpp index c35d2be76..b538c37e7 100644 --- a/src/core/hle/kernel/svc/svc_process.cpp +++ b/src/core/hle/kernel/svc/svc_process.cpp @@ -11,7 +11,7 @@ namespace Kernel::Svc { void ExitProcess(Core::System& system) { auto* current_process = GetCurrentProcessPointer(system.Kernel()); - LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); + LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessId()); ASSERT_MSG(current_process->GetState() == KProcess::State::Running, "Process has already exited"); @@ -47,7 +47,7 @@ Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) { // Get the process id. *out_process_id = process->GetId(); - return ResultSuccess; + R_SUCCEED(); } Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_process_ids, @@ -60,7 +60,7 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_pr LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}", out_process_ids_size); - return ResultOutOfRange; + R_THROW(ResultOutOfRange); } auto& kernel = system.Kernel(); @@ -70,7 +70,7 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_pr out_process_ids, total_copy_size)) { LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", out_process_ids, out_process_ids + total_copy_size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } auto& memory = system.Memory(); @@ -80,12 +80,12 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_pr std::min(static_cast<std::size_t>(out_process_ids_size), num_processes); for (std::size_t i = 0; i < copy_amount; ++i) { - memory.Write64(out_process_ids, process_list[i]->GetProcessID()); + memory.Write64(out_process_ids, process_list[i]->GetProcessId()); out_process_ids += sizeof(u64); } *out_num_processes = static_cast<u32>(num_processes); - return ResultSuccess; + R_SUCCEED(); } Result GetProcessInfo(Core::System& system, s64* out, Handle process_handle, @@ -97,17 +97,17 @@ Result GetProcessInfo(Core::System& system, s64* out, Handle process_handle, if (process.IsNull()) { LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}", process_handle); - return ResultInvalidHandle; + R_THROW(ResultInvalidHandle); } if (info_type != ProcessInfoType::ProcessState) { LOG_ERROR(Kernel_SVC, "Expected info_type to be ProcessState but got {} instead", info_type); - return ResultInvalidEnumValue; + R_THROW(ResultInvalidEnumValue); } *out = static_cast<s64>(process->GetState()); - return ResultSuccess; + R_SUCCEED(); } Result CreateProcess(Core::System& system, Handle* out_handle, uint64_t parameters, uint64_t caps, diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp index 8e2fb4092..f9210ca1e 100644 --- a/src/core/hle/kernel/svc/svc_process_memory.cpp +++ b/src/core/hle/kernel/svc/svc_process_memory.cpp @@ -53,7 +53,7 @@ Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, V R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); // Set the memory permission. - return page_table.SetProcessMemoryPermission(address, size, perm); + R_RETURN(page_table.SetProcessMemoryPermission(address, size, perm)); } Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle, @@ -93,10 +93,8 @@ Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_ KMemoryAttribute::All, KMemoryAttribute::None)); // Map the group. - R_TRY(dst_pt.MapPageGroup(dst_address, pg, KMemoryState::SharedCode, - KMemoryPermission::UserReadWrite)); - - return ResultSuccess; + R_RETURN(dst_pt.MapPageGroup(dst_address, pg, KMemoryState::SharedCode, + KMemoryPermission::UserReadWrite)); } Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle, @@ -129,9 +127,7 @@ Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle proces ResultInvalidMemoryRegion); // Unmap the memory. - R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address)); - - return ResultSuccess; + R_RETURN(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address)); } Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address, @@ -144,18 +140,18 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst if (!Common::Is4KBAligned(src_address)) { LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).", src_address); - return ResultInvalidAddress; + R_THROW(ResultInvalidAddress); } if (!Common::Is4KBAligned(dst_address)) { LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).", dst_address); - return ResultInvalidAddress; + R_THROW(ResultInvalidAddress); } if (size == 0 || !Common::Is4KBAligned(size)) { LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size); - return ResultInvalidSize; + R_THROW(ResultInvalidSize); } if (!IsValidAddressRange(dst_address, size)) { @@ -163,7 +159,7 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst "Destination address range overflows the address space (dst_address=0x{:016X}, " "size=0x{:016X}).", dst_address, size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } if (!IsValidAddressRange(src_address, size)) { @@ -171,7 +167,7 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst "Source address range overflows the address space (src_address=0x{:016X}, " "size=0x{:016X}).", src_address, size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable(); @@ -179,7 +175,7 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst if (process.IsNull()) { LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", process_handle); - return ResultInvalidHandle; + R_THROW(ResultInvalidHandle); } auto& page_table = process->PageTable(); @@ -188,7 +184,7 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst "Source address range is not within the address space (src_address=0x{:016X}, " "size=0x{:016X}).", src_address, size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } if (!page_table.IsInsideASLRRegion(dst_address, size)) { @@ -196,10 +192,10 @@ Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " "size=0x{:016X}).", dst_address, size); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } - return page_table.MapCodeMemory(dst_address, src_address, size); + R_RETURN(page_table.MapCodeMemory(dst_address, src_address, size)); } Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address, @@ -212,18 +208,18 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d if (!Common::Is4KBAligned(dst_address)) { LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).", dst_address); - return ResultInvalidAddress; + R_THROW(ResultInvalidAddress); } if (!Common::Is4KBAligned(src_address)) { LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).", src_address); - return ResultInvalidAddress; + R_THROW(ResultInvalidAddress); } if (size == 0 || !Common::Is4KBAligned(size)) { LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size); - return ResultInvalidSize; + R_THROW(ResultInvalidSize); } if (!IsValidAddressRange(dst_address, size)) { @@ -231,7 +227,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d "Destination address range overflows the address space (dst_address=0x{:016X}, " "size=0x{:016X}).", dst_address, size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } if (!IsValidAddressRange(src_address, size)) { @@ -239,7 +235,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d "Source address range overflows the address space (src_address=0x{:016X}, " "size=0x{:016X}).", src_address, size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable(); @@ -247,7 +243,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d if (process.IsNull()) { LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", process_handle); - return ResultInvalidHandle; + R_THROW(ResultInvalidHandle); } auto& page_table = process->PageTable(); @@ -256,7 +252,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d "Source address range is not within the address space (src_address=0x{:016X}, " "size=0x{:016X}).", src_address, size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } if (!page_table.IsInsideASLRRegion(dst_address, size)) { @@ -264,11 +260,11 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " "size=0x{:016X}).", dst_address, size); - return ResultInvalidMemoryRegion; + R_THROW(ResultInvalidMemoryRegion); } - return page_table.UnmapCodeMemory(dst_address, src_address, size, - KPageTable::ICacheInvalidationStrategy::InvalidateAll); + R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size, + KPageTable::ICacheInvalidationStrategy::InvalidateAll)); } Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address, diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp index ee75ad370..457ebf950 100644 --- a/src/core/hle/kernel/svc/svc_query_memory.cpp +++ b/src/core/hle/kernel/svc/svc_query_memory.cpp @@ -15,8 +15,8 @@ Result QueryMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out out_memory_info, query_address); // Query memory is just QueryProcessMemory on the current process. - return QueryProcessMemory(system, out_memory_info, out_page_info, CurrentProcess, - query_address); + R_RETURN( + QueryProcessMemory(system, out_memory_info, out_page_info, CurrentProcess, query_address)); } Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info, @@ -27,13 +27,13 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn if (process.IsNull()) { LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}", process_handle); - return ResultInvalidHandle; + R_THROW(ResultInvalidHandle); } auto& memory{system.Memory()}; const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()}; - memory.WriteBlock(out_memory_info, &memory_info, sizeof(memory_info)); + memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info)); //! This is supposed to be part of the QueryInfo call. *out_page_info = {}; diff --git a/src/core/hle/kernel/svc/svc_resource_limit.cpp b/src/core/hle/kernel/svc/svc_resource_limit.cpp index 88166299e..732bc017e 100644 --- a/src/core/hle/kernel/svc/svc_resource_limit.cpp +++ b/src/core/hle/kernel/svc/svc_resource_limit.cpp @@ -21,15 +21,13 @@ Result CreateResourceLimit(Core::System& system, Handle* out_handle) { SCOPE_EXIT({ resource_limit->Close(); }); // Initialize the resource limit. - resource_limit->Initialize(&system.CoreTiming()); + resource_limit->Initialize(std::addressof(system.CoreTiming())); // Register the limit. KResourceLimit::Register(kernel, resource_limit); // Add the limit to the handle table. - R_TRY(GetCurrentProcess(kernel).GetHandleTable().Add(out_handle, resource_limit)); - - return ResultSuccess; + R_RETURN(GetCurrentProcess(kernel).GetHandleTable().Add(out_handle, resource_limit)); } Result GetResourceLimitLimitValue(Core::System& system, s64* out_limit_value, @@ -49,7 +47,7 @@ Result GetResourceLimitLimitValue(Core::System& system, s64* out_limit_value, // Get the limit value. *out_limit_value = resource_limit->GetLimitValue(which); - return ResultSuccess; + R_SUCCEED(); } Result GetResourceLimitCurrentValue(Core::System& system, s64* out_current_value, @@ -69,7 +67,7 @@ Result GetResourceLimitCurrentValue(Core::System& system, s64* out_current_value // Get the current value. *out_current_value = resource_limit->GetCurrentValue(which); - return ResultSuccess; + R_SUCCEED(); } Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle, @@ -87,9 +85,7 @@ Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_ha R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle); // Set the limit value. - R_TRY(resource_limit->SetLimitValue(which, limit_value)); - - return ResultSuccess; + R_RETURN(resource_limit->SetLimitValue(which, limit_value)); } Result GetResourceLimitPeakValue(Core::System& system, int64_t* out_peak_value, diff --git a/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp b/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp index 20f6ec643..62c781551 100644 --- a/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp +++ b/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp @@ -29,7 +29,7 @@ void SvcWrap_CallSecureMonitor64(Core::System& system) { args.r[i] = core.GetReg(i); } - CallSecureMonitor64(system, &args); + CallSecureMonitor64(system, std::addressof(args)); for (int i = 0; i < 8; i++) { core.SetReg(i, args.r[i]); @@ -43,7 +43,7 @@ void SvcWrap_CallSecureMonitor64From32(Core::System& system) { args.r[i] = static_cast<u32>(core.GetReg(i)); } - CallSecureMonitor64From32(system, &args); + CallSecureMonitor64From32(system, std::addressof(args)); for (int i = 0; i < 8; i++) { core.SetReg(i, args.r[i]); diff --git a/src/core/hle/kernel/svc/svc_session.cpp b/src/core/hle/kernel/svc/svc_session.cpp index 6dd242dcf..01b8a52ad 100644 --- a/src/core/hle/kernel/svc/svc_session.cpp +++ b/src/core/hle/kernel/svc/svc_session.cpp @@ -12,7 +12,7 @@ namespace Kernel::Svc { namespace { template <typename T> -Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u64 name) { +Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, uint64_t name) { auto& process = GetCurrentProcess(system.Kernel()); auto& handle_table = process.GetHandleTable(); @@ -21,16 +21,17 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien // Reserve a new session from the process resource limit. // FIXME: LimitableResource_SessionCountMax - KScopedResourceReservation session_reservation(&process, LimitableResource::SessionCountMax); + KScopedResourceReservation session_reservation(std::addressof(process), + LimitableResource::SessionCountMax); if (session_reservation.Succeeded()) { session = T::Create(system.Kernel()); } else { - return ResultLimitReached; + R_THROW(ResultLimitReached); // // We couldn't reserve a session. Check that we support dynamically expanding the // // resource limit. // R_UNLESS(process.GetResourceLimit() == - // &system.Kernel().GetSystemResourceLimit(), ResultLimitReached); + // std::addressof(system.Kernel().GetSystemResourceLimit()), ResultLimitReached); // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached()); // // Try to allocate a session from unused slab memory. @@ -59,7 +60,7 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien R_UNLESS(session != nullptr, ResultOutOfResource); // Initialize the session. - session->Initialize(nullptr, fmt::format("{}", name)); + session->Initialize(nullptr, name); // Commit the session reservation. session_reservation.Commit(); @@ -75,17 +76,15 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien T::Register(system.Kernel(), session); // Add the server session to the handle table. - R_TRY(handle_table.Add(out_server, &session->GetServerSession())); + R_TRY(handle_table.Add(out_server, std::addressof(session->GetServerSession()))); - // Add the client session to the handle table. - const auto result = handle_table.Add(out_client, &session->GetClientSession()); - - if (!R_SUCCEEDED(result)) { - // Ensure that we maintain a clean handle state on exit. + // Ensure that we maintain a clean handle state on exit. + ON_RESULT_FAILURE { handle_table.Remove(*out_server); - } + }; - return result; + // Add the client session to the handle table. + R_RETURN(handle_table.Add(out_client, std::addressof(session->GetClientSession()))); } } // namespace @@ -94,9 +93,9 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien u64 name) { if (is_light) { // return CreateSession<KLightSession>(system, out_server, out_client, name); - return ResultNotImplemented; + R_THROW(ResultNotImplemented); } else { - return CreateSession<KSession>(system, out_server, out_client, name); + R_RETURN(CreateSession<KSession>(system, out_server, out_client, name)); } } diff --git a/src/core/hle/kernel/svc/svc_shared_memory.cpp b/src/core/hle/kernel/svc/svc_shared_memory.cpp index 18e0dc904..40d878f17 100644 --- a/src/core/hle/kernel/svc/svc_shared_memory.cpp +++ b/src/core/hle/kernel/svc/svc_shared_memory.cpp @@ -56,15 +56,12 @@ Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size)); // Ensure that we clean up the shared memory if we fail to map it. - auto guard = - SCOPE_GUARD({ process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); }); + ON_RESULT_FAILURE { + process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); + }; // Map the shared memory. - R_TRY(shmem->Map(process, address, size, map_perm)); - - // We succeeded. - guard.Cancel(); - return ResultSuccess; + R_RETURN(shmem->Map(process, address, size, map_perm)); } Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size) { @@ -91,7 +88,7 @@ Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr addres // Remove the shared memory from the process. process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); - return ResultSuccess; + R_SUCCEED(); } Result CreateSharedMemory(Core::System& system, Handle* out_handle, uint64_t size, diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp index 9e7bf9530..660b45c23 100644 --- a/src/core/hle/kernel/svc/svc_synchronization.cpp +++ b/src/core/hle/kernel/svc/svc_synchronization.cpp @@ -17,7 +17,7 @@ Result CloseHandle(Core::System& system, Handle handle) { R_UNLESS(GetCurrentProcess(system.Kernel()).GetHandleTable().Remove(handle), ResultInvalidHandle); - return ResultSuccess; + R_SUCCEED(); } /// Clears the signaled state of an event or process. @@ -31,7 +31,7 @@ Result ResetSignal(Core::System& system, Handle handle) { { KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(handle); if (readable_event.IsNotNull()) { - return readable_event->Reset(); + R_RETURN(readable_event->Reset()); } } @@ -39,13 +39,11 @@ Result ResetSignal(Core::System& system, Handle handle) { { KScopedAutoObject process = handle_table.GetObject<KProcess>(handle); if (process.IsNotNull()) { - return process->Reset(); + R_RETURN(process->Reset()); } } - LOG_ERROR(Kernel_SVC, "invalid handle (0x{:08X})", handle); - - return ResultInvalidHandle; + R_THROW(ResultInvalidHandle); } static Result WaitSynchronization(Core::System& system, int32_t* out_index, const Handle* handles, @@ -109,7 +107,7 @@ Result CancelSynchronization(Core::System& system, Handle handle) { // Cancel the thread's wait. thread->WaitCancel(); - return ResultSuccess; + R_SUCCEED(); } void SynchronizePreemptionState(Core::System& system) { diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp index 9bc1ebe74..50991fb62 100644 --- a/src/core/hle/kernel/svc/svc_thread.cpp +++ b/src/core/hle/kernel/svc/svc_thread.cpp @@ -34,51 +34,31 @@ Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, } // Validate arguments. - if (!IsValidVirtualCoreId(core_id)) { - LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id); - return ResultInvalidCoreId; - } - if (((1ULL << core_id) & process.GetCoreMask()) == 0) { - LOG_ERROR(Kernel_SVC, "Core ID doesn't fall within allowable cores (id={})", core_id); - return ResultInvalidCoreId; - } + R_UNLESS(IsValidVirtualCoreId(core_id), ResultInvalidCoreId); + R_UNLESS(((1ull << core_id) & process.GetCoreMask()) != 0, ResultInvalidCoreId); - if (HighestThreadPriority > priority || priority > LowestThreadPriority) { - LOG_ERROR(Kernel_SVC, "Invalid priority specified (priority={})", priority); - return ResultInvalidPriority; - } - if (!process.CheckThreadPriority(priority)) { - LOG_ERROR(Kernel_SVC, "Invalid allowable thread priority (priority={})", priority); - return ResultInvalidPriority; - } + R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority, + ResultInvalidPriority); + R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority); // Reserve a new thread from the process resource limit (waiting up to 100ms). - KScopedResourceReservation thread_reservation(&process, LimitableResource::ThreadCountMax, 1, - system.CoreTiming().GetGlobalTimeNs().count() + - 100000000); - if (!thread_reservation.Succeeded()) { - LOG_ERROR(Kernel_SVC, "Could not reserve a new thread"); - return ResultLimitReached; - } + KScopedResourceReservation thread_reservation( + std::addressof(process), LimitableResource::ThreadCountMax, 1, + system.CoreTiming().GetGlobalTimeNs().count() + 100000000); + R_UNLESS(thread_reservation.Succeeded(), ResultLimitReached); // Create the thread. KThread* thread = KThread::Create(kernel); - if (!thread) { - LOG_ERROR(Kernel_SVC, "Unable to create new threads. Thread creation limit reached."); - return ResultOutOfResource; - } + R_UNLESS(thread != nullptr, ResultOutOfResource) SCOPE_EXIT({ thread->Close(); }); // Initialize the thread. { KScopedLightLock lk{process.GetStateLock()}; R_TRY(KThread::InitializeUserThread(system, thread, entry_point, arg, stack_bottom, - priority, core_id, &process)); + priority, core_id, std::addressof(process))); } - // Set the thread name for debugging purposes. - thread->SetName(fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *out_handle)); - // Commit the thread reservation. thread_reservation.Commit(); @@ -89,9 +69,7 @@ Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, KThread::Register(kernel, thread); // Add the thread to the handle table. - R_TRY(process.GetHandleTable().Add(out_handle, thread)); - - return ResultSuccess; + R_RETURN(process.GetHandleTable().Add(out_handle, thread)); } /// Starts the thread for the provided handle @@ -110,7 +88,7 @@ Result StartThread(Core::System& system, Handle thread_handle) { thread->Open(); system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe()); - return ResultSuccess; + R_SUCCEED(); } /// Called when a thread exits @@ -202,10 +180,8 @@ Result GetThreadContext3(Core::System& system, VAddr out_context, Handle thread_ // Copy the thread context to user space. system.Memory().WriteBlock(out_context, context.data(), context.size()); - return ResultSuccess; + R_SUCCEED(); } - - return ResultSuccess; } /// Gets the priority for the specified thread @@ -219,7 +195,7 @@ Result GetThreadPriority(Core::System& system, s32* out_priority, Handle handle) // Get the thread's priority. *out_priority = thread->GetPriority(); - return ResultSuccess; + R_SUCCEED(); } /// Sets the priority for the specified thread @@ -238,7 +214,7 @@ Result SetThreadPriority(Core::System& system, Handle thread_handle, s32 priorit // Set the thread priority. thread->SetBasePriority(priority); - return ResultSuccess; + R_SUCCEED(); } Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_thread_ids, @@ -253,7 +229,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_threa if ((out_thread_ids_size & 0xF0000000) != 0) { LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}", out_thread_ids_size); - return ResultOutOfRange; + R_THROW(ResultOutOfRange); } auto* const current_process = GetCurrentProcessPointer(system.Kernel()); @@ -263,7 +239,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_threa !current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) { LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", out_thread_ids, out_thread_ids + total_copy_size); - return ResultInvalidCurrentMemory; + R_THROW(ResultInvalidCurrentMemory); } auto& memory = system.Memory(); @@ -273,12 +249,12 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_threa auto list_iter = thread_list.cbegin(); for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) { - memory.Write64(out_thread_ids, (*list_iter)->GetThreadID()); + memory.Write64(out_thread_ids, (*list_iter)->GetThreadId()); out_thread_ids += sizeof(u64); } *out_num_threads = static_cast<u32>(num_threads); - return ResultSuccess; + R_SUCCEED(); } Result GetThreadCoreMask(Core::System& system, s32* out_core_id, u64* out_affinity_mask, @@ -291,9 +267,7 @@ Result GetThreadCoreMask(Core::System& system, s32* out_core_id, u64* out_affini R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); // Get the core mask. - R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask)); - - return ResultSuccess; + R_RETURN(thread->GetCoreMask(out_core_id, out_affinity_mask)); } Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id, @@ -323,9 +297,7 @@ Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); // Set the core mask. - R_TRY(thread->SetCoreMask(core_id, affinity_mask)); - - return ResultSuccess; + R_RETURN(thread->SetCoreMask(core_id, affinity_mask)); } /// Get the ID for the specified thread. @@ -337,7 +309,7 @@ Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handl // Get the thread's id. *out_thread_id = thread->GetId(); - return ResultSuccess; + R_SUCCEED(); } Result CreateThread64(Core::System& system, Handle* out_handle, uint64_t func, uint64_t arg, diff --git a/src/core/hle/kernel/svc/svc_transfer_memory.cpp b/src/core/hle/kernel/svc/svc_transfer_memory.cpp index 7ffc24adf..394f06728 100644 --- a/src/core/hle/kernel/svc/svc_transfer_memory.cpp +++ b/src/core/hle/kernel/svc/svc_transfer_memory.cpp @@ -43,7 +43,7 @@ Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u6 auto& handle_table = process.GetHandleTable(); // Reserve a new transfer memory from the process resource limit. - KScopedResourceReservation trmem_reservation(&process, + KScopedResourceReservation trmem_reservation(std::addressof(process), LimitableResource::TransferMemoryCountMax); R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached); @@ -67,9 +67,7 @@ Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u6 KTransferMemory::Register(kernel, trmem); // Add the transfer memory to the handle table. - R_TRY(handle_table.Add(out, trmem)); - - return ResultSuccess; + R_RETURN(handle_table.Add(out, trmem)); } Result MapTransferMemory(Core::System& system, Handle trmem_handle, uint64_t address, uint64_t size, diff --git a/src/core/hle/kernel/svc_generator.py b/src/core/hle/kernel/svc_generator.py index 0cce69e85..7fcbb1ba1 100644 --- a/src/core/hle/kernel/svc_generator.py +++ b/src/core/hle/kernel/svc_generator.py @@ -460,7 +460,7 @@ def emit_wrapper(wrapped_fn, suffix, register_info, arguments, byte_size): call_arguments = ["system"] for arg in arguments: if arg.is_output and not arg.is_outptr: - call_arguments.append(f"&{arg.var_name}") + call_arguments.append(f"std::addressof({arg.var_name})") else: call_arguments.append(arg.var_name) @@ -574,9 +574,9 @@ static To Convert(const From& from) { To to{}; if constexpr (sizeof(To) >= sizeof(From)) { - std::memcpy(&to, &from, sizeof(From)); + std::memcpy(std::addressof(to), std::addressof(from), sizeof(From)); } else { - std::memcpy(&to, &from, sizeof(To)); + std::memcpy(std::addressof(to), std::addressof(from), sizeof(To)); } return to; diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index f17df5124..deeca925d 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp @@ -79,7 +79,7 @@ IWindowController::IWindowController(Core::System& system_) IWindowController::~IWindowController() = default; void IWindowController::GetAppletResourceUserId(HLERequestContext& ctx) { - const u64 process_id = system.ApplicationProcess()->GetProcessID(); + const u64 process_id = system.ApplicationProcess()->GetProcessId(); LOG_DEBUG(Service_AM, "called. Process ID=0x{:016X}", process_id); diff --git a/src/core/hle/service/glue/arp.cpp b/src/core/hle/service/glue/arp.cpp index 929dcca0d..ed6fcb5f6 100644 --- a/src/core/hle/service/glue/arp.cpp +++ b/src/core/hle/service/glue/arp.cpp @@ -18,14 +18,14 @@ namespace { std::optional<u64> GetTitleIDForProcessID(const Core::System& system, u64 process_id) { const auto& list = system.Kernel().GetProcessList(); const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) { - return process->GetProcessID() == process_id; + return process->GetProcessId() == process_id; }); if (iter == list.end()) { return std::nullopt; } - return (*iter)->GetProgramID(); + return (*iter)->GetProgramId(); } } // Anonymous namespace diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp index c221ffe11..cca697c64 100644 --- a/src/core/hle/service/hle_ipc.cpp +++ b/src/core/hle/service/hle_ipc.cpp @@ -303,7 +303,7 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(Kernel::KThread& requesti } // Copy the translated command buffer back into the thread's command buffer area. - memory.WriteBlock(owner_process, requesting_thread.GetTLSAddress(), cmd_buf.data(), + memory.WriteBlock(owner_process, requesting_thread.GetTlsAddress(), cmd_buf.data(), write_size * sizeof(u32)); return ResultSuccess; diff --git a/src/core/hle/service/ipc_helpers.h b/src/core/hle/service/ipc_helpers.h index 8703b57ca..e4cb4e1f2 100644 --- a/src/core/hle/service/ipc_helpers.h +++ b/src/core/hle/service/ipc_helpers.h @@ -155,7 +155,7 @@ public: Kernel::LimitableResource::SessionCountMax, 1); auto* session = Kernel::KSession::Create(kernel); - session->Initialize(nullptr, iface->GetServiceName()); + session->Initialize(nullptr, 0); auto next_manager = std::make_shared<Service::SessionRequestManager>( kernel, manager->GetServerManager()); diff --git a/src/core/hle/service/pm/pm.cpp b/src/core/hle/service/pm/pm.cpp index ea249c26f..f9cf2dda3 100644 --- a/src/core/hle/service/pm/pm.cpp +++ b/src/core/hle/service/pm/pm.cpp @@ -37,12 +37,12 @@ std::optional<Kernel::KProcess*> SearchProcessList( void GetApplicationPidGeneric(HLERequestContext& ctx, const std::vector<Kernel::KProcess*>& process_list) { const auto process = SearchProcessList(process_list, [](const auto& proc) { - return proc->GetProcessID() == Kernel::KProcess::ProcessIDMin; + return proc->GetProcessId() == Kernel::KProcess::ProcessIDMin; }); IPC::ResponseBuilder rb{ctx, 4}; rb.Push(ResultSuccess); - rb.Push(process.has_value() ? (*process)->GetProcessID() : NO_PROCESS_FOUND_PID); + rb.Push(process.has_value() ? (*process)->GetProcessId() : NO_PROCESS_FOUND_PID); } } // Anonymous namespace @@ -108,7 +108,7 @@ private: const auto process = SearchProcessList(kernel.GetProcessList(), [program_id](const auto& proc) { - return proc->GetProgramID() == program_id; + return proc->GetProgramId() == program_id; }); if (!process.has_value()) { @@ -119,7 +119,7 @@ private: IPC::ResponseBuilder rb{ctx, 4}; rb.Push(ResultSuccess); - rb.Push((*process)->GetProcessID()); + rb.Push((*process)->GetProcessId()); } void GetApplicationProcessId(HLERequestContext& ctx) { @@ -136,7 +136,7 @@ private: LOG_WARNING(Service_PM, "(Partial Implementation) called, pid={:016X}", pid); const auto process = SearchProcessList(kernel.GetProcessList(), [pid](const auto& proc) { - return proc->GetProcessID() == pid; + return proc->GetProcessId() == pid; }); if (!process.has_value()) { @@ -159,7 +159,7 @@ private: OverrideStatus override_status{}; ProgramLocation program_location{ - .program_id = (*process)->GetProgramID(), + .program_id = (*process)->GetProgramId(), .storage_id = 0, }; @@ -194,7 +194,7 @@ private: LOG_DEBUG(Service_PM, "called, process_id={:016X}", process_id); const auto process = SearchProcessList(process_list, [process_id](const auto& proc) { - return proc->GetProcessID() == process_id; + return proc->GetProcessId() == process_id; }); if (!process.has_value()) { @@ -205,7 +205,7 @@ private: IPC::ResponseBuilder rb{ctx, 4}; rb.Push(ResultSuccess); - rb.Push((*process)->GetProgramID()); + rb.Push((*process)->GetProgramId()); } void AtmosphereGetProcessId(HLERequestContext& ctx) { @@ -215,7 +215,7 @@ private: LOG_DEBUG(Service_PM, "called, program_id={:016X}", program_id); const auto process = SearchProcessList(process_list, [program_id](const auto& proc) { - return proc->GetProgramID() == program_id; + return proc->GetProgramId() == program_id; }); if (!process.has_value()) { @@ -226,7 +226,7 @@ private: IPC::ResponseBuilder rb{ctx, 4}; rb.Push(ResultSuccess); - rb.Push((*process)->GetProcessID()); + rb.Push((*process)->GetProcessId()); } const std::vector<Kernel::KProcess*>& process_list; diff --git a/src/core/hle/service/server_manager.cpp b/src/core/hle/service/server_manager.cpp index bd04cd023..6b4a1291e 100644 --- a/src/core/hle/service/server_manager.cpp +++ b/src/core/hle/service/server_manager.cpp @@ -124,7 +124,7 @@ Result ServerManager::ManageNamedPort(const std::string& service_name, // Create a new port. auto* port = Kernel::KPort::Create(m_system.Kernel()); - port->Initialize(max_sessions, false, service_name); + port->Initialize(max_sessions, false, 0); // Register the port. Kernel::KPort::Register(m_system.Kernel(), port); diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp index b4046d3ce..c45be5726 100644 --- a/src/core/hle/service/sm/sm.cpp +++ b/src/core/hle/service/sm/sm.cpp @@ -62,7 +62,7 @@ Result ServiceManager::RegisterService(std::string name, u32 max_sessions, } auto* port = Kernel::KPort::Create(kernel); - port->Initialize(ServerSessionCountMax, false, name); + port->Initialize(ServerSessionCountMax, false, 0); service_ports.emplace(name, port); registered_services.emplace(name, handler); @@ -211,7 +211,7 @@ void SM::RegisterService(HLERequestContext& ctx) { } auto* port = Kernel::KPort::Create(kernel); - port->Initialize(ServerSessionCountMax, is_light, name); + port->Initialize(ServerSessionCountMax, is_light, 0); SCOPE_EXIT({ port->GetClientPort().Close(); }); IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; diff --git a/src/core/hle/service/sm/sm_controller.cpp b/src/core/hle/service/sm/sm_controller.cpp index 0111c8d7f..419c1df2b 100644 --- a/src/core/hle/service/sm/sm_controller.cpp +++ b/src/core/hle/service/sm/sm_controller.cpp @@ -44,7 +44,7 @@ void Controller::CloneCurrentObject(HLERequestContext& ctx) { ASSERT(session != nullptr); // Initialize the session. - session->Initialize(nullptr, ""); + session->Initialize(nullptr, 0); // Commit the session reservation. session_reservation.Commit(); diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp index c2d96bbec..de729955f 100644 --- a/src/core/memory/cheat_engine.cpp +++ b/src/core/memory/cheat_engine.cpp @@ -196,7 +196,7 @@ void CheatEngine::Initialize() { }); core_timing.ScheduleLoopingEvent(CHEAT_ENGINE_NS, CHEAT_ENGINE_NS, event); - metadata.process_id = system.ApplicationProcess()->GetProcessID(); + metadata.process_id = system.ApplicationProcess()->GetProcessId(); metadata.title_id = system.GetApplicationProcessProgramID(); const auto& page_table = system.ApplicationProcess()->PageTable(); diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index 7f7c5fc42..0783a2430 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -112,33 +112,6 @@ QString WaitTreeText::GetText() const { return text; } -WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address_, const Kernel::KHandleTable& handle_table, - Core::System& system_) - : mutex_address{mutex_address_}, system{system_} { - mutex_value = system.Memory().Read32(mutex_address); - owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Svc::HandleWaitMask); - owner = handle_table.GetObject<Kernel::KThread>(owner_handle).GetPointerUnsafe(); -} - -WaitTreeMutexInfo::~WaitTreeMutexInfo() = default; - -QString WaitTreeMutexInfo::GetText() const { - return tr("waiting for mutex 0x%1").arg(mutex_address, 16, 16, QLatin1Char{'0'}); -} - -std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() const { - const bool has_waiters = (mutex_value & Kernel::Svc::HandleWaitMask) != 0; - - std::vector<std::unique_ptr<WaitTreeItem>> list; - list.push_back(std::make_unique<WaitTreeText>(tr("has waiters: %1").arg(has_waiters))); - list.push_back(std::make_unique<WaitTreeText>( - tr("owner handle: 0x%1").arg(owner_handle, 8, 16, QLatin1Char{'0'}))); - if (owner != nullptr) { - list.push_back(std::make_unique<WaitTreeThread>(*owner, system)); - } - return list; -} - WaitTreeCallstack::WaitTreeCallstack(const Kernel::KThread& thread_, Core::System& system_) : thread{thread_}, system{system_} {} WaitTreeCallstack::~WaitTreeCallstack() = default; @@ -182,10 +155,9 @@ bool WaitTreeExpandableItem::IsExpandable() const { } QString WaitTreeSynchronizationObject::GetText() const { - return tr("[%1] %2 %3") + return tr("[%1] %2") .arg(object.GetId()) - .arg(QString::fromStdString(object.GetTypeObj().GetName()), - QString::fromStdString(object.GetName())); + .arg(QString::fromStdString(object.GetTypeObj().GetName())); } std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::make( @@ -217,26 +189,6 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSynchronizationObject::GetChi return list; } -WaitTreeObjectList::WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list, - bool w_all, Core::System& system_) - : object_list(list), wait_all(w_all), system{system_} {} - -WaitTreeObjectList::~WaitTreeObjectList() = default; - -QString WaitTreeObjectList::GetText() const { - if (wait_all) - return tr("waiting for all objects"); - return tr("waiting for one of the following objects"); -} - -std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeObjectList::GetChildren() const { - std::vector<std::unique_ptr<WaitTreeItem>> list(object_list.size()); - std::transform(object_list.begin(), object_list.end(), list.begin(), [this](const auto& t) { - return WaitTreeSynchronizationObject::make(*t, system); - }); - return list; -} - WaitTreeThread::WaitTreeThread(const Kernel::KThread& thread, Core::System& system_) : WaitTreeSynchronizationObject(thread, system_), system{system_} {} WaitTreeThread::~WaitTreeThread() = default; @@ -348,32 +300,14 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const { list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor))); list.push_back(std::make_unique<WaitTreeText>( - tr("ideal core = %1").arg(thread.GetIdealCoreForDebugging()))); - list.push_back(std::make_unique<WaitTreeText>( tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask()))); - list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID()))); + list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId()))); list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)") .arg(thread.GetPriority()) .arg(thread.GetBasePriority()))); list.push_back(std::make_unique<WaitTreeText>( tr("last running ticks = %1").arg(thread.GetLastScheduledTick()))); - const VAddr mutex_wait_address = thread.GetMutexWaitAddressForDebugging(); - if (mutex_wait_address != 0) { - const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable(); - list.push_back( - std::make_unique<WaitTreeMutexInfo>(mutex_wait_address, handle_table, system)); - } else { - list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex"))); - } - - if (thread.GetState() == Kernel::ThreadState::Waiting && - thread.GetWaitReasonForDebugging() == - Kernel::ThreadWaitReasonForDebugging::Synchronization) { - list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetWaitObjectsForDebugging(), - thread.IsCancellable(), system)); - } - list.push_back(std::make_unique<WaitTreeCallstack>(thread, system)); return list; diff --git a/src/yuzu/debugger/wait_tree.h b/src/yuzu/debugger/wait_tree.h index 7e528b592..23c329fbe 100644 --- a/src/yuzu/debugger/wait_tree.h +++ b/src/yuzu/debugger/wait_tree.h @@ -74,25 +74,6 @@ public: bool IsExpandable() const override; }; -class WaitTreeMutexInfo : public WaitTreeExpandableItem { - Q_OBJECT -public: - explicit WaitTreeMutexInfo(VAddr mutex_address_, const Kernel::KHandleTable& handle_table, - Core::System& system_); - ~WaitTreeMutexInfo() override; - - QString GetText() const override; - std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; - -private: - VAddr mutex_address{}; - u32 mutex_value{}; - Kernel::Handle owner_handle{}; - Kernel::KThread* owner{}; - - Core::System& system; -}; - class WaitTreeCallstack : public WaitTreeExpandableItem { Q_OBJECT public: @@ -127,23 +108,6 @@ private: Core::System& system; }; -class WaitTreeObjectList : public WaitTreeExpandableItem { - Q_OBJECT -public: - WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list, bool wait_all, - Core::System& system_); - ~WaitTreeObjectList() override; - - QString GetText() const override; - std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; - -private: - const std::vector<Kernel::KSynchronizationObject*>& object_list; - bool wait_all; - - Core::System& system; -}; - class WaitTreeThread : public WaitTreeSynchronizationObject { Q_OBJECT public: |