summaryrefslogtreecommitdiffstats
path: root/src/common
diff options
context:
space:
mode:
Diffstat (limited to 'src/common')
-rw-r--r--src/common/CMakeLists.txt1
-rw-r--r--src/common/assert.cpp7
-rw-r--r--src/common/assert.h55
-rw-r--r--src/common/bounded_threadsafe_queue.h180
-rw-r--r--src/common/detached_tasks.cpp4
-rw-r--r--src/common/elf.h333
-rw-r--r--src/common/fs/path_util.cpp4
-rw-r--r--src/common/input.h1
-rw-r--r--src/common/settings.cpp3
-rw-r--r--src/common/settings.h5
-rw-r--r--src/common/string_util.cpp4
-rw-r--r--src/common/string_util.h2
12 files changed, 562 insertions, 37 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index adf70eb8b..73bf626d4 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -58,6 +58,7 @@ add_library(common STATIC
div_ceil.h
dynamic_library.cpp
dynamic_library.h
+ elf.h
error.cpp
error.h
expected.h
diff --git a/src/common/assert.cpp b/src/common/assert.cpp
index b44570528..6026b7dc2 100644
--- a/src/common/assert.cpp
+++ b/src/common/assert.cpp
@@ -6,8 +6,13 @@
#include "common/settings.h"
-void assert_handle_failure() {
+void assert_fail_impl() {
if (Settings::values.use_debug_asserts) {
Crash();
}
}
+
+[[noreturn]] void unreachable_impl() {
+ Crash();
+ throw std::runtime_error("Unreachable code");
+}
diff --git a/src/common/assert.h b/src/common/assert.h
index dbfd8abaf..8c927fcc0 100644
--- a/src/common/assert.h
+++ b/src/common/assert.h
@@ -9,44 +9,43 @@
// Sometimes we want to try to continue even after hitting an assert.
// However touching this file yields a global recompilation as this header is included almost
// everywhere. So let's just move the handling of the failed assert to a single cpp file.
-void assert_handle_failure();
-// For asserts we'd like to keep all the junk executed when an assert happens away from the
-// important code in the function. One way of doing this is to put all the relevant code inside a
-// lambda and force the compiler to not inline it. Unfortunately, MSVC seems to have no syntax to
-// specify __declspec on lambda functions, so what we do instead is define a noinline wrapper
-// template that calls the lambda. This seems to generate an extra instruction at the call-site
-// compared to the ideal implementation (which wouldn't support ASSERT_MSG parameters), but is good
-// enough for our purposes.
-template <typename Fn>
-#if defined(_MSC_VER)
-[[msvc::noinline]]
-#elif defined(__GNUC__)
-[[gnu::cold, gnu::noinline]]
+void assert_fail_impl();
+[[noreturn]] void unreachable_impl();
+
+#ifdef _MSC_VER
+#define YUZU_NO_INLINE __declspec(noinline)
+#else
+#define YUZU_NO_INLINE __attribute__((noinline))
#endif
-static void
-assert_noinline_call(const Fn& fn) {
- fn();
- assert_handle_failure();
-}
#define ASSERT(_a_) \
- do \
- if (!(_a_)) { \
- assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
+ ([&]() YUZU_NO_INLINE { \
+ if (!(_a_)) [[unlikely]] { \
+ LOG_CRITICAL(Debug, "Assertion Failed!"); \
+ assert_fail_impl(); \
} \
- while (0)
+ }())
#define ASSERT_MSG(_a_, ...) \
- do \
- if (!(_a_)) { \
- assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
+ ([&]() YUZU_NO_INLINE { \
+ if (!(_a_)) [[unlikely]] { \
+ LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); \
+ assert_fail_impl(); \
} \
- while (0)
+ }())
+
+#define UNREACHABLE() \
+ do { \
+ LOG_CRITICAL(Debug, "Unreachable code!"); \
+ unreachable_impl(); \
+ } while (0)
-#define UNREACHABLE() assert_noinline_call([] { LOG_CRITICAL(Debug, "Unreachable code!"); })
#define UNREACHABLE_MSG(...) \
- assert_noinline_call([&] { LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); })
+ do { \
+ LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); \
+ unreachable_impl(); \
+ } while (0)
#ifdef _DEBUG
#define DEBUG_ASSERT(_a_) ASSERT(_a_)
diff --git a/src/common/bounded_threadsafe_queue.h b/src/common/bounded_threadsafe_queue.h
new file mode 100644
index 000000000..e83064c7f
--- /dev/null
+++ b/src/common/bounded_threadsafe_queue.h
@@ -0,0 +1,180 @@
+// SPDX-FileCopyrightText: Copyright (c) 2020 Erik Rigtorp <erik@rigtorp.se>
+// SPDX-License-Identifier: MIT
+#pragma once
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4324)
+#endif
+
+#include <atomic>
+#include <bit>
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <new>
+#include <stdexcept>
+#include <stop_token>
+#include <type_traits>
+#include <utility>
+
+namespace Common {
+namespace mpsc {
+#if defined(__cpp_lib_hardware_interference_size)
+constexpr size_t hardware_interference_size = std::hardware_destructive_interference_size;
+#else
+constexpr size_t hardware_interference_size = 64;
+#endif
+
+template <typename T>
+using AlignedAllocator = std::allocator<T>;
+
+template <typename T>
+struct Slot {
+ ~Slot() noexcept {
+ if (turn.test()) {
+ destroy();
+ }
+ }
+
+ template <typename... Args>
+ void construct(Args&&... args) noexcept {
+ static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
+ "T must be nothrow constructible with Args&&...");
+ std::construct_at(reinterpret_cast<T*>(&storage), std::forward<Args>(args)...);
+ }
+
+ void destroy() noexcept {
+ static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
+ std::destroy_at(reinterpret_cast<T*>(&storage));
+ }
+
+ T&& move() noexcept {
+ return reinterpret_cast<T&&>(storage);
+ }
+
+ // Align to avoid false sharing between adjacent slots
+ alignas(hardware_interference_size) std::atomic_flag turn{};
+ struct aligned_store {
+ struct type {
+ alignas(T) unsigned char data[sizeof(T)];
+ };
+ };
+ typename aligned_store::type storage;
+};
+
+template <typename T, typename Allocator = AlignedAllocator<Slot<T>>>
+class Queue {
+public:
+ explicit Queue(const size_t capacity, const Allocator& allocator = Allocator())
+ : allocator_(allocator) {
+ if (capacity < 1) {
+ throw std::invalid_argument("capacity < 1");
+ }
+ // Ensure that the queue length is an integer power of 2
+ // This is so that idx(i) can be a simple i & mask_ insted of i % capacity
+ // https://github.com/rigtorp/MPMCQueue/pull/36
+ if (!std::has_single_bit(capacity)) {
+ throw std::invalid_argument("capacity must be an integer power of 2");
+ }
+
+ mask_ = capacity - 1;
+
+ // Allocate one extra slot to prevent false sharing on the last slot
+ slots_ = allocator_.allocate(mask_ + 2);
+ // Allocators are not required to honor alignment for over-aligned types
+ // (see http://eel.is/c++draft/allocator.requirements#10) so we verify
+ // alignment here
+ if (reinterpret_cast<uintptr_t>(slots_) % alignof(Slot<T>) != 0) {
+ allocator_.deallocate(slots_, mask_ + 2);
+ throw std::bad_alloc();
+ }
+ for (size_t i = 0; i < mask_ + 1; ++i) {
+ std::construct_at(&slots_[i]);
+ }
+ static_assert(alignof(Slot<T>) == hardware_interference_size,
+ "Slot must be aligned to cache line boundary to prevent false sharing");
+ static_assert(sizeof(Slot<T>) % hardware_interference_size == 0,
+ "Slot size must be a multiple of cache line size to prevent "
+ "false sharing between adjacent slots");
+ static_assert(sizeof(Queue) % hardware_interference_size == 0,
+ "Queue size must be a multiple of cache line size to "
+ "prevent false sharing between adjacent queues");
+ }
+
+ ~Queue() noexcept {
+ for (size_t i = 0; i < mask_ + 1; ++i) {
+ slots_[i].~Slot();
+ }
+ allocator_.deallocate(slots_, mask_ + 2);
+ }
+
+ // non-copyable and non-movable
+ Queue(const Queue&) = delete;
+ Queue& operator=(const Queue&) = delete;
+
+ void Push(const T& v) noexcept {
+ static_assert(std::is_nothrow_copy_constructible_v<T>,
+ "T must be nothrow copy constructible");
+ emplace(v);
+ }
+
+ template <typename P, typename = std::enable_if_t<std::is_nothrow_constructible_v<T, P&&>>>
+ void Push(P&& v) noexcept {
+ emplace(std::forward<P>(v));
+ }
+
+ void Pop(T& v, std::stop_token stop) noexcept {
+ auto const tail = tail_.fetch_add(1);
+ auto& slot = slots_[idx(tail)];
+ if (false == slot.turn.test()) {
+ std::unique_lock lock{cv_mutex};
+ cv.wait(lock, stop, [&slot] { return slot.turn.test(); });
+ }
+ v = slot.move();
+ slot.destroy();
+ slot.turn.clear();
+ slot.turn.notify_one();
+ }
+
+private:
+ template <typename... Args>
+ void emplace(Args&&... args) noexcept {
+ static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
+ "T must be nothrow constructible with Args&&...");
+ auto const head = head_.fetch_add(1);
+ auto& slot = slots_[idx(head)];
+ slot.turn.wait(true);
+ slot.construct(std::forward<Args>(args)...);
+ slot.turn.test_and_set();
+ cv.notify_one();
+ }
+
+ constexpr size_t idx(size_t i) const noexcept {
+ return i & mask_;
+ }
+
+ std::conditional_t<true, std::condition_variable_any, std::condition_variable> cv;
+ std::mutex cv_mutex;
+ size_t mask_;
+ Slot<T>* slots_;
+ [[no_unique_address]] Allocator allocator_;
+
+ // Align to avoid false sharing between head_ and tail_
+ alignas(hardware_interference_size) std::atomic<size_t> head_{0};
+ alignas(hardware_interference_size) std::atomic<size_t> tail_{0};
+
+ static_assert(std::is_nothrow_copy_assignable_v<T> || std::is_nothrow_move_assignable_v<T>,
+ "T must be nothrow copy or move assignable");
+
+ static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
+};
+} // namespace mpsc
+
+template <typename T, typename Allocator = mpsc::AlignedAllocator<mpsc::Slot<T>>>
+using MPSCQueue = mpsc::Queue<T, Allocator>;
+
+} // namespace Common
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
diff --git a/src/common/detached_tasks.cpp b/src/common/detached_tasks.cpp
index c1362631e..ec31d0b88 100644
--- a/src/common/detached_tasks.cpp
+++ b/src/common/detached_tasks.cpp
@@ -33,9 +33,9 @@ void DetachedTasks::AddTask(std::function<void()> task) {
++instance->count;
std::thread([task{std::move(task)}]() {
task();
- std::unique_lock lock{instance->mutex};
+ std::unique_lock thread_lock{instance->mutex};
--instance->count;
- std::notify_all_at_thread_exit(instance->cv, std::move(lock));
+ std::notify_all_at_thread_exit(instance->cv, std::move(thread_lock));
}).detach();
}
diff --git a/src/common/elf.h b/src/common/elf.h
new file mode 100644
index 000000000..14a5e9597
--- /dev/null
+++ b/src/common/elf.h
@@ -0,0 +1,333 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <array>
+#include <cstddef>
+
+#include "common_types.h"
+
+namespace Common {
+namespace ELF {
+
+/* Type for a 16-bit quantity. */
+using Elf32_Half = u16;
+using Elf64_Half = u16;
+
+/* Types for signed and unsigned 32-bit quantities. */
+using Elf32_Word = u32;
+using Elf32_Sword = s32;
+using Elf64_Word = u32;
+using Elf64_Sword = s32;
+
+/* Types for signed and unsigned 64-bit quantities. */
+using Elf32_Xword = u64;
+using Elf32_Sxword = s64;
+using Elf64_Xword = u64;
+using Elf64_Sxword = s64;
+
+/* Type of addresses. */
+using Elf32_Addr = u32;
+using Elf64_Addr = u64;
+
+/* Type of file offsets. */
+using Elf32_Off = u32;
+using Elf64_Off = u64;
+
+/* Type for section indices, which are 16-bit quantities. */
+using Elf32_Section = u16;
+using Elf64_Section = u16;
+
+/* Type for version symbol information. */
+using Elf32_Versym = Elf32_Half;
+using Elf64_Versym = Elf64_Half;
+
+constexpr size_t ElfIdentSize = 16;
+
+/* The ELF file header. This appears at the start of every ELF file. */
+
+struct Elf32_Ehdr {
+ std::array<u8, ElfIdentSize> e_ident; /* Magic number and other info */
+ Elf32_Half e_type; /* Object file type */
+ Elf32_Half e_machine; /* Architecture */
+ Elf32_Word e_version; /* Object file version */
+ Elf32_Addr e_entry; /* Entry point virtual address */
+ Elf32_Off e_phoff; /* Program header table file offset */
+ Elf32_Off e_shoff; /* Section header table file offset */
+ Elf32_Word e_flags; /* Processor-specific flags */
+ Elf32_Half e_ehsize; /* ELF header size in bytes */
+ Elf32_Half e_phentsize; /* Program header table entry size */
+ Elf32_Half e_phnum; /* Program header table entry count */
+ Elf32_Half e_shentsize; /* Section header table entry size */
+ Elf32_Half e_shnum; /* Section header table entry count */
+ Elf32_Half e_shstrndx; /* Section header string table index */
+};
+
+struct Elf64_Ehdr {
+ std::array<u8, ElfIdentSize> e_ident; /* Magic number and other info */
+ Elf64_Half e_type; /* Object file type */
+ Elf64_Half e_machine; /* Architecture */
+ Elf64_Word e_version; /* Object file version */
+ Elf64_Addr e_entry; /* Entry point virtual address */
+ Elf64_Off e_phoff; /* Program header table file offset */
+ Elf64_Off e_shoff; /* Section header table file offset */
+ Elf64_Word e_flags; /* Processor-specific flags */
+ Elf64_Half e_ehsize; /* ELF header size in bytes */
+ Elf64_Half e_phentsize; /* Program header table entry size */
+ Elf64_Half e_phnum; /* Program header table entry count */
+ Elf64_Half e_shentsize; /* Section header table entry size */
+ Elf64_Half e_shnum; /* Section header table entry count */
+ Elf64_Half e_shstrndx; /* Section header string table index */
+};
+
+constexpr u8 ElfClass32 = 1; /* 32-bit objects */
+constexpr u8 ElfClass64 = 2; /* 64-bit objects */
+constexpr u8 ElfData2Lsb = 1; /* 2's complement, little endian */
+constexpr u8 ElfVersionCurrent = 1; /* EV_CURRENT */
+constexpr u8 ElfOsAbiNone = 0; /* System V ABI */
+
+constexpr u16 ElfTypeNone = 0; /* No file type */
+constexpr u16 ElfTypeRel = 0; /* Relocatable file */
+constexpr u16 ElfTypeExec = 0; /* Executable file */
+constexpr u16 ElfTypeDyn = 0; /* Shared object file */
+
+constexpr u16 ElfMachineArm = 40; /* ARM */
+constexpr u16 ElfMachineAArch64 = 183; /* ARM AARCH64 */
+
+constexpr std::array<u8, ElfIdentSize> Elf32Ident{
+ 0x7f, 'E', 'L', 'F', ElfClass32, ElfData2Lsb, ElfVersionCurrent, ElfOsAbiNone};
+
+constexpr std::array<u8, ElfIdentSize> Elf64Ident{
+ 0x7f, 'E', 'L', 'F', ElfClass64, ElfData2Lsb, ElfVersionCurrent, ElfOsAbiNone};
+
+/* Section header. */
+
+struct Elf32_Shdr {
+ Elf32_Word sh_name; /* Section name (string tbl index) */
+ Elf32_Word sh_type; /* Section type */
+ Elf32_Word sh_flags; /* Section flags */
+ Elf32_Addr sh_addr; /* Section virtual addr at execution */
+ Elf32_Off sh_offset; /* Section file offset */
+ Elf32_Word sh_size; /* Section size in bytes */
+ Elf32_Word sh_link; /* Link to another section */
+ Elf32_Word sh_info; /* Additional section information */
+ Elf32_Word sh_addralign; /* Section alignment */
+ Elf32_Word sh_entsize; /* Entry size if section holds table */
+};
+
+struct Elf64_Shdr {
+ Elf64_Word sh_name; /* Section name (string tbl index) */
+ Elf64_Word sh_type; /* Section type */
+ Elf64_Xword sh_flags; /* Section flags */
+ Elf64_Addr sh_addr; /* Section virtual addr at execution */
+ Elf64_Off sh_offset; /* Section file offset */
+ Elf64_Xword sh_size; /* Section size in bytes */
+ Elf64_Word sh_link; /* Link to another section */
+ Elf64_Word sh_info; /* Additional section information */
+ Elf64_Xword sh_addralign; /* Section alignment */
+ Elf64_Xword sh_entsize; /* Entry size if section holds table */
+};
+
+constexpr u32 ElfShnUndef = 0; /* Undefined section */
+
+constexpr u32 ElfShtNull = 0; /* Section header table entry unused */
+constexpr u32 ElfShtProgBits = 1; /* Program data */
+constexpr u32 ElfShtSymtab = 2; /* Symbol table */
+constexpr u32 ElfShtStrtab = 3; /* String table */
+constexpr u32 ElfShtRela = 4; /* Relocation entries with addends */
+constexpr u32 ElfShtDynamic = 6; /* Dynamic linking information */
+constexpr u32 ElfShtNobits = 7; /* Program space with no data (bss) */
+constexpr u32 ElfShtRel = 9; /* Relocation entries, no addends */
+constexpr u32 ElfShtDynsym = 11; /* Dynamic linker symbol table */
+
+/* Symbol table entry. */
+
+struct Elf32_Sym {
+ Elf32_Word st_name; /* Symbol name (string tbl index) */
+ Elf32_Addr st_value; /* Symbol value */
+ Elf32_Word st_size; /* Symbol size */
+ u8 st_info; /* Symbol type and binding */
+ u8 st_other; /* Symbol visibility */
+ Elf32_Section st_shndx; /* Section index */
+};
+
+struct Elf64_Sym {
+ Elf64_Word st_name; /* Symbol name (string tbl index) */
+ u8 st_info; /* Symbol type and binding */
+ u8 st_other; /* Symbol visibility */
+ Elf64_Section st_shndx; /* Section index */
+ Elf64_Addr st_value; /* Symbol value */
+ Elf64_Xword st_size; /* Symbol size */
+};
+
+/* How to extract and insert information held in the st_info field. */
+
+static inline u8 ElfStBind(u8 st_info) {
+ return st_info >> 4;
+}
+static inline u8 ElfStType(u8 st_info) {
+ return st_info & 0xf;
+}
+static inline u8 ElfStInfo(u8 st_bind, u8 st_type) {
+ return static_cast<u8>((st_bind << 4) + (st_type & 0xf));
+}
+
+constexpr u8 ElfBindLocal = 0; /* Local symbol */
+constexpr u8 ElfBindGlobal = 1; /* Global symbol */
+constexpr u8 ElfBindWeak = 2; /* Weak symbol */
+
+constexpr u8 ElfTypeUnspec = 0; /* Symbol type is unspecified */
+constexpr u8 ElfTypeObject = 1; /* Symbol is a data object */
+constexpr u8 ElfTypeFunc = 2; /* Symbol is a code object */
+
+static inline u8 ElfStVisibility(u8 st_other) {
+ return static_cast<u8>(st_other & 0x3);
+}
+
+constexpr u8 ElfVisibilityDefault = 0; /* Default symbol visibility rules */
+constexpr u8 ElfVisibilityInternal = 1; /* Processor specific hidden class */
+constexpr u8 ElfVisibilityHidden = 2; /* Sym unavailable in other modules */
+constexpr u8 ElfVisibilityProtected = 3; /* Not preemptible, not exported */
+
+/* Relocation table entry without addend (in section of type ShtRel). */
+
+struct Elf32_Rel {
+ Elf32_Addr r_offset; /* Address */
+ Elf32_Word r_info; /* Relocation type and symbol index */
+};
+
+/* Relocation table entry with addend (in section of type ShtRela). */
+
+struct Elf32_Rela {
+ Elf32_Addr r_offset; /* Address */
+ Elf32_Word r_info; /* Relocation type and symbol index */
+ Elf32_Sword r_addend; /* Addend */
+};
+
+struct Elf64_Rela {
+ Elf64_Addr r_offset; /* Address */
+ Elf64_Xword r_info; /* Relocation type and symbol index */
+ Elf64_Sxword r_addend; /* Addend */
+};
+
+/* How to extract and insert information held in the r_info field. */
+
+static inline u32 Elf32RelSymIndex(Elf32_Word r_info) {
+ return r_info >> 8;
+}
+static inline u8 Elf32RelType(Elf32_Word r_info) {
+ return static_cast<u8>(r_info & 0xff);
+}
+static inline Elf32_Word Elf32RelInfo(u32 sym_index, u8 type) {
+ return (sym_index << 8) + type;
+}
+static inline u32 Elf64RelSymIndex(Elf64_Xword r_info) {
+ return static_cast<u32>(r_info >> 32);
+}
+static inline u32 Elf64RelType(Elf64_Xword r_info) {
+ return r_info & 0xffffffff;
+}
+static inline Elf64_Xword Elf64RelInfo(u32 sym_index, u32 type) {
+ return (static_cast<Elf64_Xword>(sym_index) << 32) + type;
+}
+
+constexpr u32 ElfArmCopy = 20; /* Copy symbol at runtime */
+constexpr u32 ElfArmGlobDat = 21; /* Create GOT entry */
+constexpr u32 ElfArmJumpSlot = 22; /* Create PLT entry */
+constexpr u32 ElfArmRelative = 23; /* Adjust by program base */
+
+constexpr u32 ElfAArch64Copy = 1024; /* Copy symbol at runtime */
+constexpr u32 ElfAArch64GlobDat = 1025; /* Create GOT entry */
+constexpr u32 ElfAArch64JumpSlot = 1026; /* Create PLT entry */
+constexpr u32 ElfAArch64Relative = 1027; /* Adjust by program base */
+
+/* Program segment header. */
+
+struct Elf32_Phdr {
+ Elf32_Word p_type; /* Segment type */
+ Elf32_Off p_offset; /* Segment file offset */
+ Elf32_Addr p_vaddr; /* Segment virtual address */
+ Elf32_Addr p_paddr; /* Segment physical address */
+ Elf32_Word p_filesz; /* Segment size in file */
+ Elf32_Word p_memsz; /* Segment size in memory */
+ Elf32_Word p_flags; /* Segment flags */
+ Elf32_Word p_align; /* Segment alignment */
+};
+
+struct Elf64_Phdr {
+ Elf64_Word p_type; /* Segment type */
+ Elf64_Word p_flags; /* Segment flags */
+ Elf64_Off p_offset; /* Segment file offset */
+ Elf64_Addr p_vaddr; /* Segment virtual address */
+ Elf64_Addr p_paddr; /* Segment physical address */
+ Elf64_Xword p_filesz; /* Segment size in file */
+ Elf64_Xword p_memsz; /* Segment size in memory */
+ Elf64_Xword p_align; /* Segment alignment */
+};
+
+/* Legal values for p_type (segment type). */
+
+constexpr u32 ElfPtNull = 0; /* Program header table entry unused */
+constexpr u32 ElfPtLoad = 1; /* Loadable program segment */
+constexpr u32 ElfPtDynamic = 2; /* Dynamic linking information */
+constexpr u32 ElfPtInterp = 3; /* Program interpreter */
+constexpr u32 ElfPtNote = 4; /* Auxiliary information */
+constexpr u32 ElfPtPhdr = 6; /* Entry for header table itself */
+constexpr u32 ElfPtTls = 7; /* Thread-local storage segment */
+
+/* Legal values for p_flags (segment flags). */
+
+constexpr u32 ElfPfExec = 0; /* Segment is executable */
+constexpr u32 ElfPfWrite = 1; /* Segment is writable */
+constexpr u32 ElfPfRead = 2; /* Segment is readable */
+
+/* Dynamic section entry. */
+
+struct Elf32_Dyn {
+ Elf32_Sword d_tag; /* Dynamic entry type */
+ union {
+ Elf32_Word d_val; /* Integer value */
+ Elf32_Addr d_ptr; /* Address value */
+ } d_un;
+};
+
+struct Elf64_Dyn {
+ Elf64_Sxword d_tag; /* Dynamic entry type */
+ union {
+ Elf64_Xword d_val; /* Integer value */
+ Elf64_Addr d_ptr; /* Address value */
+ } d_un;
+};
+
+/* Legal values for d_tag (dynamic entry type). */
+
+constexpr u32 ElfDtNull = 0; /* Marks end of dynamic section */
+constexpr u32 ElfDtNeeded = 1; /* Name of needed library */
+constexpr u32 ElfDtPltRelSz = 2; /* Size in bytes of PLT relocs */
+constexpr u32 ElfDtPltGot = 3; /* Processor defined value */
+constexpr u32 ElfDtHash = 4; /* Address of symbol hash table */
+constexpr u32 ElfDtStrtab = 5; /* Address of string table */
+constexpr u32 ElfDtSymtab = 6; /* Address of symbol table */
+constexpr u32 ElfDtRela = 7; /* Address of Rela relocs */
+constexpr u32 ElfDtRelasz = 8; /* Total size of Rela relocs */
+constexpr u32 ElfDtRelaent = 9; /* Size of one Rela reloc */
+constexpr u32 ElfDtStrsz = 10; /* Size of string table */
+constexpr u32 ElfDtSyment = 11; /* Size of one symbol table entry */
+constexpr u32 ElfDtInit = 12; /* Address of init function */
+constexpr u32 ElfDtFini = 13; /* Address of termination function */
+constexpr u32 ElfDtRel = 17; /* Address of Rel relocs */
+constexpr u32 ElfDtRelsz = 18; /* Total size of Rel relocs */
+constexpr u32 ElfDtRelent = 19; /* Size of one Rel reloc */
+constexpr u32 ElfDtPltRel = 20; /* Type of reloc in PLT */
+constexpr u32 ElfDtTextRel = 22; /* Reloc might modify .text */
+constexpr u32 ElfDtJmpRel = 23; /* Address of PLT relocs */
+constexpr u32 ElfDtBindNow = 24; /* Process relocations of object */
+constexpr u32 ElfDtInitArray = 25; /* Array with addresses of init fct */
+constexpr u32 ElfDtFiniArray = 26; /* Array with addresses of fini fct */
+constexpr u32 ElfDtInitArraySz = 27; /* Size in bytes of DT_INIT_ARRAY */
+constexpr u32 ElfDtFiniArraySz = 28; /* Size in bytes of DT_FINI_ARRAY */
+constexpr u32 ElfDtSymtabShndx = 34; /* Address of SYMTAB_SHNDX section */
+
+} // namespace ELF
+} // namespace Common
diff --git a/src/common/fs/path_util.cpp b/src/common/fs/path_util.cpp
index 62318e70c..1074f2421 100644
--- a/src/common/fs/path_util.cpp
+++ b/src/common/fs/path_util.cpp
@@ -232,9 +232,7 @@ void SetYuzuPath(YuzuPath yuzu_path, const fs::path& new_path) {
fs::path GetExeDirectory() {
wchar_t exe_path[MAX_PATH];
- GetModuleFileNameW(nullptr, exe_path, MAX_PATH);
-
- if (!exe_path) {
+ if (GetModuleFileNameW(nullptr, exe_path, MAX_PATH) == 0) {
LOG_ERROR(Common_Filesystem,
"Failed to get the path to the executable of the current process");
}
diff --git a/src/common/input.h b/src/common/input.h
index 54fcb24b0..bb42aaacc 100644
--- a/src/common/input.h
+++ b/src/common/input.h
@@ -72,6 +72,7 @@ enum class PollingError {
enum class VibrationAmplificationType {
Linear,
Exponential,
+ Test,
};
// Analog properties for calibration
diff --git a/src/common/settings.cpp b/src/common/settings.cpp
index 9a9c74a70..751549583 100644
--- a/src/common/settings.cpp
+++ b/src/common/settings.cpp
@@ -70,6 +70,7 @@ void LogSettings() {
log_path("DataStorage_NANDDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir));
log_path("DataStorage_SDMCDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::SDMCDir));
log_setting("Debugging_ProgramArgs", values.program_args.GetValue());
+ log_setting("Debugging_GDBStub", values.use_gdbstub.GetValue());
log_setting("Input_EnableMotion", values.motion_enabled.GetValue());
log_setting("Input_EnableVibration", values.vibration_enabled.GetValue());
log_setting("Input_EnableRawInput", values.enable_raw_input.GetValue());
@@ -146,7 +147,7 @@ void UpdateRescalingInfo() {
info.down_shift = 0;
break;
default:
- UNREACHABLE();
+ ASSERT(false);
info.up_scale = 1;
info.down_shift = 0;
}
diff --git a/src/common/settings.h b/src/common/settings.h
index 5b34169a8..a507744a2 100644
--- a/src/common/settings.h
+++ b/src/common/settings.h
@@ -496,7 +496,7 @@ struct Values {
// Renderer
RangedSetting<RendererBackend> renderer_backend{
- RendererBackend::OpenGL, RendererBackend::OpenGL, RendererBackend::Vulkan, "backend"};
+ RendererBackend::Vulkan, RendererBackend::OpenGL, RendererBackend::Vulkan, "backend"};
BasicSetting<bool> renderer_debug{false, "debug"};
BasicSetting<bool> renderer_shader_feedback{false, "shader_feedback"};
BasicSetting<bool> enable_nsight_aftermath{false, "nsight_aftermath"};
@@ -601,11 +601,12 @@ struct Values {
// Debugging
bool record_frame_times;
BasicSetting<bool> use_gdbstub{false, "use_gdbstub"};
- BasicSetting<u16> gdbstub_port{0, "gdbstub_port"};
+ BasicSetting<u16> gdbstub_port{6543, "gdbstub_port"};
BasicSetting<std::string> program_args{std::string(), "program_args"};
BasicSetting<bool> dump_exefs{false, "dump_exefs"};
BasicSetting<bool> dump_nso{false, "dump_nso"};
BasicSetting<bool> dump_shaders{false, "dump_shaders"};
+ BasicSetting<bool> dump_macros{false, "dump_macros"};
BasicSetting<bool> enable_fs_access_log{false, "enable_fs_access_log"};
BasicSetting<bool> reporting_services{false, "reporting_services"};
BasicSetting<bool> quest_flag{false, "quest_flag"};
diff --git a/src/common/string_util.cpp b/src/common/string_util.cpp
index 703aa5db8..7a495bc79 100644
--- a/src/common/string_util.cpp
+++ b/src/common/string_util.cpp
@@ -178,6 +178,10 @@ std::wstring UTF8ToUTF16W(const std::string& input) {
#endif
+std::u16string U16StringFromBuffer(const u16* input, std::size_t length) {
+ return std::u16string(reinterpret_cast<const char16_t*>(input), length);
+}
+
std::string StringFromFixedZeroTerminatedBuffer(std::string_view buffer, std::size_t max_len) {
std::size_t len = 0;
while (len < buffer.length() && len < max_len && buffer[len] != '\0') {
diff --git a/src/common/string_util.h b/src/common/string_util.h
index a33830aec..ce18a33cf 100644
--- a/src/common/string_util.h
+++ b/src/common/string_util.h
@@ -44,6 +44,8 @@ bool SplitPath(const std::string& full_path, std::string* _pPath, std::string* _
#endif
+[[nodiscard]] std::u16string U16StringFromBuffer(const u16* input, std::size_t length);
+
/**
* Compares the string defined by the range [`begin`, `end`) to the null-terminated C-string
* `other` for equality.