summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorfearlessTobi <thm.frey@gmail.com>2018-09-15 15:21:06 +0200
committerfearlessTobi <thm.frey@gmail.com>2018-09-15 15:21:06 +0200
commit63c2e32e207d31ecadd9022e1d7cd705c9febac8 (patch)
tree8a90e8ef2804f147dff7225a543a8740ecf7160c /src/core/hle/kernel
parentMerge pull request #1310 from lioncash/kernel-ns (diff)
downloadyuzu-63c2e32e207d31ecadd9022e1d7cd705c9febac8.tar
yuzu-63c2e32e207d31ecadd9022e1d7cd705c9febac8.tar.gz
yuzu-63c2e32e207d31ecadd9022e1d7cd705c9febac8.tar.bz2
yuzu-63c2e32e207d31ecadd9022e1d7cd705c9febac8.tar.lz
yuzu-63c2e32e207d31ecadd9022e1d7cd705c9febac8.tar.xz
yuzu-63c2e32e207d31ecadd9022e1d7cd705c9febac8.tar.zst
yuzu-63c2e32e207d31ecadd9022e1d7cd705c9febac8.zip
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp25
-rw-r--r--src/core/hle/kernel/handle_table.cpp2
-rw-r--r--src/core/hle/kernel/handle_table.h2
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp27
-rw-r--r--src/core/hle/kernel/hle_ipc.h20
-rw-r--r--src/core/hle/kernel/process.cpp6
-rw-r--r--src/core/hle/kernel/process.h4
-rw-r--r--src/core/hle/kernel/shared_memory.h2
-rw-r--r--src/core/hle/kernel/svc.cpp27
-rw-r--r--src/core/hle/kernel/thread.cpp2
-rw-r--r--src/core/hle/kernel/thread.h2
-rw-r--r--src/core/hle/kernel/vm_manager.cpp2
-rw-r--r--src/core/hle/kernel/vm_manager.h4
-rw-r--r--src/core/hle/kernel/wait_object.cpp2
14 files changed, 65 insertions, 62 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 6657accd5..93577591f 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -35,16 +35,17 @@ static ResultCode WaitForAddress(VAddr address, s64 timeout) {
// Gets the threads waiting on an address.
static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) {
- const auto RetrieveWaitingThreads =
- [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr arb_addr) {
- const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
- auto& thread_list = scheduler->GetThreadList();
-
- for (auto& thread : thread_list) {
- if (thread->arb_wait_address == arb_addr)
- waiting_threads.push_back(thread);
- }
- };
+ const auto RetrieveWaitingThreads = [](std::size_t core_index,
+ std::vector<SharedPtr<Thread>>& waiting_threads,
+ VAddr arb_addr) {
+ const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
+ auto& thread_list = scheduler->GetThreadList();
+
+ for (auto& thread : thread_list) {
+ if (thread->arb_wait_address == arb_addr)
+ waiting_threads.push_back(thread);
+ }
+ };
// Retrieve all threads that are waiting for this address.
std::vector<SharedPtr<Thread>> threads;
@@ -66,12 +67,12 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address)
static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
// them all.
- size_t last = waiting_threads.size();
+ std::size_t last = waiting_threads.size();
if (num_to_wake > 0)
last = num_to_wake;
// Signal the waiting threads.
- for (size_t i = 0; i < last; i++) {
+ for (std::size_t i = 0; i < last; i++) {
ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb);
waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
waiting_threads[i]->arb_wait_address = 0;
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index 3a079b9a9..5ee5c05e3 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -65,7 +65,7 @@ ResultCode HandleTable::Close(Handle handle) {
}
bool HandleTable::IsValid(Handle handle) const {
- size_t slot = GetSlot(handle);
+ std::size_t slot = GetSlot(handle);
u16 generation = GetGeneration(handle);
return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation;
diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h
index cac928adb..9e2f33e8a 100644
--- a/src/core/hle/kernel/handle_table.h
+++ b/src/core/hle/kernel/handle_table.h
@@ -93,7 +93,7 @@ private:
* This is the maximum limit of handles allowed per process in CTR-OS. It can be further
* reduced by ExHeader values, but this is not emulated here.
*/
- static const size_t MAX_COUNT = 4096;
+ static const std::size_t MAX_COUNT = 4096;
static u16 GetSlot(Handle handle) {
return handle >> 15;
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 7264be906..72fb9d250 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -42,9 +42,9 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
Kernel::SharedPtr<Kernel::Event> event) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
- thread->wakeup_callback =
- [context = *this, callback](ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index) mutable -> bool {
+ thread->wakeup_callback = [context = *this, callback](
+ ThreadWakeupReason reason, SharedPtr<Thread> thread,
+ SharedPtr<WaitObject> object, std::size_t index) mutable -> bool {
ASSERT(thread->status == ThreadStatus::WaitHLEEvent);
callback(thread, context, reason);
context.WriteToOutgoingCommandBuffer(*thread);
@@ -199,8 +199,8 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb
}
// The data_size already includes the payload header, the padding and the domain header.
- size_t size = data_payload_offset + command_header->data_size -
- sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
+ std::size_t size = data_payload_offset + command_header->data_size -
+ sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
if (domain_message_header)
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
std::copy_n(src_cmdbuf, size, cmd_buf.begin());
@@ -217,8 +217,8 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
ParseCommandBuffer(cmd_buf.data(), false);
// The data_size already includes the payload header, the padding and the domain header.
- size_t size = data_payload_offset + command_header->data_size -
- sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
+ std::size_t size = data_payload_offset + command_header->data_size -
+ sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
if (domain_message_header)
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
@@ -229,7 +229,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
"Handle descriptor bit set but no handles to translate");
// We write the translated handles at a specific offset in the command buffer, this space
// was already reserved when writing the header.
- size_t current_offset =
+ std::size_t current_offset =
(sizeof(IPC::CommandHeader) + sizeof(IPC::HandleDescriptorHeader)) / sizeof(u32);
ASSERT_MSG(!handle_descriptor_header->send_current_pid, "Sending PID is not implemented");
@@ -258,7 +258,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
ASSERT(domain_message_header->num_objects == domain_objects.size());
// Write the domain objects to the command buffer, these go after the raw untranslated data.
// TODO(Subv): This completely ignores C buffers.
- size_t domain_offset = size - domain_message_header->num_objects;
+ std::size_t domain_offset = size - domain_message_header->num_objects;
auto& request_handlers = server_session->domain_request_handlers;
for (auto& object : domain_objects) {
@@ -291,14 +291,15 @@ std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const {
return buffer;
}
-size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffer_index) const {
+std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
+ int buffer_index) const {
if (size == 0) {
LOG_WARNING(Core, "skip empty buffer write");
return 0;
}
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()};
- const size_t buffer_size{GetWriteBufferSize(buffer_index)};
+ const std::size_t buffer_size{GetWriteBufferSize(buffer_index)};
if (size > buffer_size) {
LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
buffer_size);
@@ -314,13 +315,13 @@ size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffe
return size;
}
-size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
+std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()};
return is_buffer_a ? BufferDescriptorA()[buffer_index].Size()
: BufferDescriptorX()[buffer_index].Size();
}
-size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
+std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()};
return is_buffer_b ? BufferDescriptorB()[buffer_index].Size()
: BufferDescriptorC()[buffer_index].Size();
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index f0d07f1b6..894479ee0 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -170,7 +170,7 @@ public:
std::vector<u8> ReadBuffer(int buffer_index = 0) const;
/// Helper function to write a buffer using the appropriate buffer descriptor
- size_t WriteBuffer(const void* buffer, size_t size, int buffer_index = 0) const;
+ std::size_t WriteBuffer(const void* buffer, std::size_t size, int buffer_index = 0) const;
/* Helper function to write a buffer using the appropriate buffer descriptor
*
@@ -182,7 +182,7 @@ public:
*/
template <typename ContiguousContainer,
typename = std::enable_if_t<!std::is_pointer_v<ContiguousContainer>>>
- size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const {
+ std::size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const {
using ContiguousType = typename ContiguousContainer::value_type;
static_assert(std::is_trivially_copyable_v<ContiguousType>,
@@ -193,19 +193,19 @@ public:
}
/// Helper function to get the size of the input buffer
- size_t GetReadBufferSize(int buffer_index = 0) const;
+ std::size_t GetReadBufferSize(int buffer_index = 0) const;
/// Helper function to get the size of the output buffer
- size_t GetWriteBufferSize(int buffer_index = 0) const;
+ std::size_t GetWriteBufferSize(int buffer_index = 0) const;
template <typename T>
- SharedPtr<T> GetCopyObject(size_t index) {
+ SharedPtr<T> GetCopyObject(std::size_t index) {
ASSERT(index < copy_objects.size());
return DynamicObjectCast<T>(copy_objects[index]);
}
template <typename T>
- SharedPtr<T> GetMoveObject(size_t index) {
+ SharedPtr<T> GetMoveObject(std::size_t index) {
ASSERT(index < move_objects.size());
return DynamicObjectCast<T>(move_objects[index]);
}
@@ -223,7 +223,7 @@ public:
}
template <typename T>
- std::shared_ptr<T> GetDomainRequestHandler(size_t index) const {
+ std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const {
return std::static_pointer_cast<T>(domain_request_handlers[index]);
}
@@ -240,15 +240,15 @@ public:
domain_objects.clear();
}
- size_t NumMoveObjects() const {
+ std::size_t NumMoveObjects() const {
return move_objects.size();
}
- size_t NumCopyObjects() const {
+ std::size_t NumCopyObjects() const {
return copy_objects.size();
}
- size_t NumDomainObjects() const {
+ std::size_t NumDomainObjects() const {
return domain_objects.size();
}
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index b025e323f..7a272d031 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -40,8 +40,8 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) {
return process;
}
-void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
- for (size_t i = 0; i < len; ++i) {
+void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
+ for (std::size_t i = 0; i < len; ++i) {
u32 descriptor = kernel_caps[i];
u32 type = descriptor >> 20;
@@ -211,7 +211,7 @@ ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
"Shared memory exceeds bounds of mapped block");
const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
- size_t backing_block_offset = vma->second.offset + vma_offset;
+ std::size_t backing_block_offset = vma->second.offset + vma_offset;
CASCADE_RESULT(auto new_vma,
vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size,
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 1587d40c1..81538f70c 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -59,7 +59,7 @@ class ResourceLimit;
struct CodeSet final : public Object {
struct Segment {
- size_t offset = 0;
+ std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
};
@@ -164,7 +164,7 @@ public:
* Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
* to this process.
*/
- void ParseKernelCaps(const u32* kernel_caps, size_t len);
+ void ParseKernelCaps(const u32* kernel_caps, std::size_t len);
/**
* Applies address space changes and launches the process main thread.
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index 2c729afe3..2c06bb7ce 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -119,7 +119,7 @@ public:
/// Backing memory for this shared memory block.
std::shared_ptr<std::vector<u8>> backing_block;
/// Offset into the backing block for this shared memory.
- size_t backing_block_offset;
+ std::size_t backing_block_offset;
/// Size of the memory block. Page-aligned.
u64 size;
/// Permission restrictions applied to the process which created the block.
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index f500fd2e7..a5aaa089d 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -146,7 +146,7 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
/// Default thread wakeup callback for WaitSynchronization
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index) {
+ SharedPtr<WaitObject> object, std::size_t index) {
ASSERT(thread->status == ThreadStatus::WaitSynchAny);
if (reason == ThreadWakeupReason::Timeout) {
@@ -647,16 +647,17 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
condition_variable_addr, target);
- auto RetrieveWaitingThreads =
- [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr condvar_addr) {
- const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
- auto& thread_list = scheduler->GetThreadList();
+ auto RetrieveWaitingThreads = [](std::size_t core_index,
+ std::vector<SharedPtr<Thread>>& waiting_threads,
+ VAddr condvar_addr) {
+ const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
+ auto& thread_list = scheduler->GetThreadList();
- for (auto& thread : thread_list) {
- if (thread->condvar_wait_address == condvar_addr)
- waiting_threads.push_back(thread);
- }
- };
+ for (auto& thread : thread_list) {
+ if (thread->condvar_wait_address == condvar_addr)
+ waiting_threads.push_back(thread);
+ }
+ };
// Retrieve a list of all threads that are waiting for this condition variable.
std::vector<SharedPtr<Thread>> waiting_threads;
@@ -672,7 +673,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
// Only process up to 'target' threads, unless 'target' is -1, in which case process
// them all.
- size_t last = waiting_threads.size();
+ std::size_t last = waiting_threads.size();
if (target != -1)
last = target;
@@ -680,12 +681,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
if (last > waiting_threads.size())
return RESULT_SUCCESS;
- for (size_t index = 0; index < last; ++index) {
+ for (std::size_t index = 0; index < last; ++index) {
auto& thread = waiting_threads[index];
ASSERT(thread->condvar_wait_address == condition_variable_addr);
- size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
+ std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
auto& monitor = Core::System::GetInstance().Monitor();
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 3f12a84dc..89cd5f401 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -275,7 +275,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
available_slot = 0; // Use the first slot in the new page
// Allocate some memory from the end of the linear heap for this region.
- const size_t offset = thread->tls_memory->size();
+ const std::size_t offset = thread->tls_memory->size();
thread->tls_memory->insert(thread->tls_memory->end(), Memory::PAGE_SIZE, 0);
auto& vm_manager = owner_process->vm_manager;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index cb57ee78a..df4748942 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -254,7 +254,7 @@ public:
Handle callback_handle;
using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index);
+ SharedPtr<WaitObject> object, std::size_t index);
// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
// was waiting via WaitSynchronizationN then the object will be the last object that became
// available. In case of a timeout, the object will be nullptr.
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 479cacb62..608cbd57b 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -86,7 +86,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
std::shared_ptr<std::vector<u8>> block,
- size_t offset, u64 size,
+ std::size_t offset, u64 size,
MemoryState state) {
ASSERT(block != nullptr);
ASSERT(offset + size <= block->size());
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 98bd04bea..de75036c0 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -81,7 +81,7 @@ struct VirtualMemoryArea {
/// Memory block backing this VMA.
std::shared_ptr<std::vector<u8>> backing_block = nullptr;
/// Offset into the backing_memory the mapping starts from.
- size_t offset = 0;
+ std::size_t offset = 0;
// Settings for type = BackingMemory
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
@@ -147,7 +147,7 @@ public:
* @param state MemoryState tag to attach to the VMA.
*/
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
- size_t offset, u64 size, MemoryState state);
+ std::size_t offset, u64 size, MemoryState state);
/**
* Maps an unmanaged host memory pointer at a given address.
diff --git a/src/core/hle/kernel/wait_object.cpp b/src/core/hle/kernel/wait_object.cpp
index eef00b729..b190ceb98 100644
--- a/src/core/hle/kernel/wait_object.cpp
+++ b/src/core/hle/kernel/wait_object.cpp
@@ -81,7 +81,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
}
}
- size_t index = thread->GetWaitObjectIndex(this);
+ std::size_t index = thread->GetWaitObjectIndex(this);
for (auto& object : thread->wait_objects)
object->RemoveWaitingThread(thread.get());