diff options
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r-- | src/core/hle/kernel/hle_ipc.h | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/init/init_slab_setup.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/k_address_arbiter.h | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_address_space_info.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_auto_object.h | 11 | ||||
-rw-r--r-- | src/core/hle/kernel/k_class_token.h | 1 | ||||
-rw-r--r-- | src/core/hle/kernel/k_code_memory.cpp | 28 | ||||
-rw-r--r-- | src/core/hle/kernel/k_memory_manager.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 127 | ||||
-rw-r--r-- | src/core/hle/kernel/k_page_table.h | 5 | ||||
-rw-r--r-- | src/core/hle/kernel/k_port.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/k_process.cpp | 6 | ||||
-rw-r--r-- | src/core/hle/kernel/k_server_session.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 6 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.h | 25 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.cpp | 8 | ||||
-rw-r--r-- | src/core/hle/kernel/svc.cpp | 12 |
17 files changed, 189 insertions, 60 deletions
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index a427cbc93..0ddc8df9e 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -141,7 +141,7 @@ public: if (index < DomainHandlerCount()) { domain_handlers[index] = nullptr; } else { - UNREACHABLE_MSG("Unexpected handler index {}", index); + ASSERT_MSG(false, "Unexpected handler index {}", index); } } diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 34a8be052..9b6b284d0 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -244,7 +244,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP) // If we somehow get an invalid type, abort. default: - UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]); + ASSERT_MSG(false, "Unknown slab type: {}", slab_types[i]); } // If we've hit the end of a gap, free it. diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h index e46e0d848..5fa19d386 100644 --- a/src/core/hle/kernel/k_address_arbiter.h +++ b/src/core/hle/kernel/k_address_arbiter.h @@ -35,7 +35,7 @@ public: case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual: return SignalAndModifyByWaitingCountIfEqual(addr, value, count); } - UNREACHABLE(); + ASSERT(false); return ResultUnknown; } @@ -49,7 +49,7 @@ public: case Svc::ArbitrationType::WaitIfEqual: return WaitIfEqual(addr, value, timeout); } - UNREACHABLE(); + ASSERT(false); return ResultUnknown; } diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp index bc37cadda..3e612a207 100644 --- a/src/core/hle/kernel/k_address_space_info.cpp +++ b/src/core/hle/kernel/k_address_space_info.cpp @@ -84,7 +84,7 @@ u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) { ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index])); return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; } - UNREACHABLE(); + ASSERT(false); return 0; } @@ -101,7 +101,7 @@ std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) ASSERT(IsAllowed39BitType(type)); return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; } - UNREACHABLE(); + ASSERT(false); return 0; } diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index ea47fc600..2827763d5 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h @@ -18,7 +18,7 @@ namespace Kernel { class KernelCore; class KProcess; -#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ +#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \ \ private: \ friend class ::Kernel::KClassTokenGenerator; \ @@ -40,16 +40,19 @@ public: static constexpr const char* GetStaticTypeName() { \ return TypeName; \ } \ - virtual TypeObj GetTypeObj() const { \ + virtual TypeObj GetTypeObj() ATTRIBUTE { \ return GetStaticTypeObj(); \ } \ - virtual const char* GetTypeName() const { \ + virtual const char* GetTypeName() ATTRIBUTE { \ return GetStaticTypeName(); \ } \ \ private: \ constexpr bool operator!=(const TypeObj& rhs) +#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ + KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override) + class KAutoObject { protected: class TypeObj { @@ -82,7 +85,7 @@ protected: }; private: - KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject); + KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const); public: explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) { diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h index be9e3c357..c9001ae3d 100644 --- a/src/core/hle/kernel/k_class_token.h +++ b/src/core/hle/kernel/k_class_token.h @@ -49,6 +49,7 @@ private: } } } + UNREACHABLE(); }(); template <typename T> diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index fd3cbfd94..4ae40ec8e 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -27,23 +27,18 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr auto& page_table = m_owner->PageTable(); // Construct the page group. - m_page_group = - KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize)); + m_page_group = {}; // Lock the memory. - R_TRY(page_table.LockForCodeMemory(addr, size)) + R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size)) // Clear the memory. - // - // FIXME: this ends up clobbering address ranges outside the scope of the mapping within - // guest memory, and is not specifically required if the guest program is correctly - // written, so disable until this is further investigated. - // - // for (const auto& block : m_page_group.Nodes()) { - // std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); - // } + for (const auto& block : m_page_group.Nodes()) { + std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); + } // Set remaining tracking members. + m_owner->Open(); m_address = addr; m_is_initialized = true; m_is_owner_mapped = false; @@ -57,8 +52,14 @@ void KCodeMemory::Finalize() { // Unlock. if (!m_is_mapped && !m_is_owner_mapped) { const size_t size = m_page_group.GetNumPages() * PageSize; - m_owner->PageTable().UnlockForCodeMemory(m_address, size); + m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group); } + + // Close the page group. + m_page_group = {}; + + // Close our reference to our owner. + m_owner->Close(); } ResultCode KCodeMemory::Map(VAddr address, size_t size) { @@ -118,7 +119,8 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis k_perm = KMemoryPermission::UserReadExecute; break; default: - break; + // Already validated by ControlCodeMemory svc + UNREACHABLE(); } // Map the memory. diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index a55db3088..58e540f31 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -29,7 +29,7 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { return KMemoryManager::Pool::SystemNonSecure; } else { - UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool"); + ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool"); return {}; } } diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index b38ef333b..504e22cb9 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -35,7 +35,7 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT case FileSys::ProgramAddressSpaceType::Is39Bit: return 39; default: - UNREACHABLE(); + ASSERT(false); return {}; } } @@ -128,7 +128,7 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_ const std::size_t needed_size{ (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; if (alloc_size < needed_size) { - UNREACHABLE(); + ASSERT(false); return ResultOutOfMemory; } @@ -542,6 +542,95 @@ ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num return ResultSuccess; } +bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size_t num_pages) { + ASSERT(this->IsLockedByCurrentThread()); + + const size_t size = num_pages * PageSize; + const auto& pg = pg_ll.Nodes(); + const auto& memory_layout = system.Kernel().MemoryLayout(); + + // Empty groups are necessarily invalid. + if (pg.empty()) { + return false; + } + + // We're going to validate that the group we'd expect is the group we see. + auto cur_it = pg.begin(); + PAddr cur_block_address = cur_it->GetAddress(); + size_t cur_block_pages = cur_it->GetNumPages(); + + auto UpdateCurrentIterator = [&]() { + if (cur_block_pages == 0) { + if ((++cur_it) == pg.end()) { + return false; + } + + cur_block_address = cur_it->GetAddress(); + cur_block_pages = cur_it->GetNumPages(); + } + return true; + }; + + // Begin traversal. + Common::PageTable::TraversalContext context; + Common::PageTable::TraversalEntry next_entry; + if (!page_table_impl.BeginTraversal(next_entry, context, addr)) { + return false; + } + + // Prepare tracking variables. + PAddr cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + // Iterate, comparing expected to actual. + while (tot_size < size) { + if (!page_table_impl.ContinueTraversal(next_entry, context)) { + return false; + } + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + const size_t cur_pages = cur_size / PageSize; + + if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) { + return false; + } + + if (!UpdateCurrentIterator()) { + return false; + } + + if (cur_block_address != cur_addr || cur_block_pages < cur_pages) { + return false; + } + + cur_block_address += cur_size; + cur_block_pages -= cur_pages; + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we compare the right amount for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) { + return false; + } + + if (!UpdateCurrentIterator()) { + return false; + } + + return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); +} + ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, VAddr src_addr) { KScopedLightLock lk(general_lock); @@ -1341,7 +1430,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, new_state = KMemoryState::AliasCodeData; break; default: - UNREACHABLE(); + ASSERT(false); } } @@ -1687,22 +1776,22 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) return ResultSuccess; } -ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) { +ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size) { return this->LockMemoryAndOpen( - nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, - KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, - KMemoryAttribute::All, KMemoryAttribute::None, + out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, + KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, + KMemoryAttribute::None, static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite), KMemoryAttribute::Locked); } -ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) { - return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory, - KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::All, - KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, - KMemoryAttribute::Locked, nullptr); +ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, + const KPageLinkedList& pg) { + return this->UnlockMemory( + addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, + KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg); } ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { @@ -1734,9 +1823,7 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, VAddr addr{start}; while (addr < start + (num_pages * PageSize)) { const PAddr paddr{GetPhysicalAddr(addr)}; - if (!paddr) { - UNREACHABLE(); - } + ASSERT(paddr != 0); page_linked_list.AddBlock(paddr, 1); addr += PageSize; } @@ -1767,7 +1854,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); break; default: - UNREACHABLE(); + ASSERT(false); } addr += size; @@ -1798,7 +1885,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermiss case OperationType::ChangePermissionsAndRefresh: break; default: - UNREACHABLE(); + ASSERT(false); } return ResultSuccess; } @@ -1835,7 +1922,6 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { return code_region_start; default: UNREACHABLE(); - return {}; } } @@ -1871,7 +1957,6 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const { return code_region_end - code_region_start; default: UNREACHABLE(); - return {}; } } @@ -2125,7 +2210,7 @@ ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_ // Check the page group. if (pg != nullptr) { - UNIMPLEMENTED_MSG("PageGroup support is unimplemented!"); + R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion); } // Decide on new perm and attr. diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 52a93ce86..6312eb682 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -72,8 +72,8 @@ public: KMemoryPermission perm, PAddr map_addr = 0); ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size); ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); - ResultCode LockForCodeMemory(VAddr addr, std::size_t size); - ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size); + ResultCode LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size); + ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageLinkedList& pg); ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, @@ -178,6 +178,7 @@ private: const KPageLinkedList* pg); ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages); + bool IsValidPageGroup(const KPageLinkedList& pg, VAddr addr, size_t num_pages); bool IsLockedByCurrentThread() const { return general_lock.IsLockedByCurrentThread(); diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp index a31861cdb..51c2cd1ef 100644 --- a/src/core/hle/kernel/k_port.cpp +++ b/src/core/hle/kernel/k_port.cpp @@ -60,7 +60,7 @@ ResultCode KPort::EnqueueSession(KServerSession* session) { if (auto session_ptr = server.GetSessionRequestHandler().lock()) { session_ptr->ClientConnected(server.AcceptSession()); } else { - UNREACHABLE(); + ASSERT(false); } return ResultSuccess; diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 490e31fc7..8c79b4f0f 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -64,6 +64,10 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority { KScopedSchedulerLock lock{kernel}; thread->SetState(ThreadState::Runnable); + + if (system.DebuggerEnabled()) { + thread->RequestSuspend(SuspendType::Debug); + } } } } // Anonymous namespace @@ -346,7 +350,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, break; default: - UNREACHABLE(); + ASSERT(false); } // Create TLS region diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 7e39f6d50..60f8ed470 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -97,13 +97,13 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co "object_id {} is too big! This probably means a recent service call " "to {} needed to return a new interface!", object_id, name); - UNREACHABLE(); + ASSERT(false); return ResultSuccess; // Ignore error if asserts are off } if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) { return strong_ptr->HandleSyncRequest(*this, context); } else { - UNREACHABLE(); + ASSERT(false); return ResultSuccess; } diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index ab9ce6a86..ea2160099 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -133,7 +133,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s UNIMPLEMENTED(); break; default: - UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type)); + ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type)); break; } thread_type = type; @@ -198,6 +198,10 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s resource_limit_release_hint = false; cpu_time = 0; + // Set debug context. + stack_top = user_stack_top; + argument = arg; + // Clear our stack parameters. std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0, sizeof(StackParameters)); diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index b55a922ab..f4d83f99a 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -100,6 +100,12 @@ enum class ThreadWaitReasonForDebugging : u32 { Suspended, ///< Thread is waiting due to process suspension }; +enum class StepState : u32 { + NotStepping, ///< Thread is not currently stepping + StepPending, ///< Thread will step when next scheduled + StepPerformed, ///< Thread has stepped, waiting to be scheduled again +}; + [[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel); [[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel); [[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel); @@ -267,6 +273,14 @@ public: void SetState(ThreadState state); + [[nodiscard]] StepState GetStepState() const { + return step_state; + } + + void SetStepState(StepState state) { + step_state = state; + } + [[nodiscard]] s64 GetLastScheduledTick() const { return last_scheduled_tick; } @@ -646,6 +660,14 @@ public: void IfDummyThreadTryWait(); void IfDummyThreadEndWait(); + [[nodiscard]] uintptr_t GetArgument() const { + return argument; + } + + [[nodiscard]] VAddr GetUserStackTop() const { + return stack_top; + } + private: static constexpr size_t PriorityInheritanceCountMax = 10; union SyncObjectBuffer { @@ -769,6 +791,7 @@ private: std::shared_ptr<Common::Fiber> host_context{}; bool is_single_core{}; ThreadType thread_type{}; + StepState step_state{}; std::mutex dummy_wait_lock; std::condition_variable dummy_wait_cv; @@ -776,6 +799,8 @@ private: std::vector<KSynchronizationObject*> wait_objects_for_debugging; VAddr mutex_wait_address_for_debugging{}; ThreadWaitReasonForDebugging wait_reason_for_debugging{}; + uintptr_t argument; + VAddr stack_top; public: using ConditionVariableThreadTreeType = ConditionVariableThreadTree; diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 92f6d8c49..b2c4f12b4 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -212,7 +212,9 @@ struct KernelCore::Impl { system_resource_limit = KResourceLimit::Create(system.Kernel()); system_resource_limit->Initialize(&core_timing); - const auto [total_size, kernel_size] = memory_layout->GetTotalAndKernelMemorySizes(); + const auto sizes{memory_layout->GetTotalAndKernelMemorySizes()}; + const auto total_size{sizes.first}; + const auto kernel_size{sizes.second}; // If setting the default system values fails, then something seriously wrong has occurred. ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size) @@ -252,6 +254,7 @@ struct KernelCore::Impl { core_id) .IsSuccess()); suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id)); + suspend_threads[core_id]->DisableDispatch(); } } @@ -1073,9 +1076,6 @@ void KernelCore::Suspend(bool in_suspention) { impl->suspend_threads[core_id]->SetState(state); impl->suspend_threads[core_id]->SetWaitReasonForDebugging( ThreadWaitReasonForDebugging::Suspended); - if (!should_suspend) { - impl->suspend_threads[core_id]->DisableDispatch(); - } } } } diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 66e0ce2d0..4f0a44363 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -15,6 +15,7 @@ #include "common/scope_exit.h" #include "core/core.h" #include "core/core_timing.h" +#include "core/debugger/debugger.h" #include "core/hle/kernel/k_client_port.h" #include "core/hle/kernel/k_client_session.h" #include "core/hle/kernel/k_code_memory.h" @@ -627,6 +628,12 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { const auto thread_processor_id = current_thread->GetActiveCore(); system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); } + + if (system.DebuggerEnabled()) { + auto* thread = system.Kernel().GetCurrentEmuThread(); + system.GetDebugger().NotifyThreadStopped(thread); + thread->RequestSuspend(Kernel::SuspendType::Debug); + } } static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) { @@ -1876,7 +1883,7 @@ static void SleepThread(Core::System& system, s64 nanoseconds) { KScheduler::YieldToAnyThread(kernel); } else { // Nintendo does nothing at all if an otherwise invalid value is passed. - UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); + ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds); } } @@ -2982,7 +2989,6 @@ static const FunctionDef* GetSVCInfo64(u32 func_num) { } void Call(Core::System& system, u32 immediate) { - system.ExitDynarmicProfile(); auto& kernel = system.Kernel(); kernel.EnterSVCProfile(); @@ -3007,8 +3013,6 @@ void Call(Core::System& system, u32 immediate) { auto* host_context = thread->GetHostContext().get(); host_context->Rewind(); } - - system.EnterDynarmicProfile(); } } // namespace Kernel::Svc |