diff options
Diffstat (limited to 'src')
86 files changed, 6876 insertions, 4572 deletions
diff --git a/src/audio_core/stream.cpp b/src/audio_core/stream.cpp index 874673c4e..4ce2d374e 100644 --- a/src/audio_core/stream.cpp +++ b/src/audio_core/stream.cpp @@ -68,7 +68,7 @@ static void VolumeAdjustSamples(std::vector<s16>& samples) { } // Implementation of a volume slider with a dynamic range of 60 dB - const float volume_scale_factor{std::exp(6.90775f * volume) * 0.001f}; + const float volume_scale_factor = volume == 0 ? 0 : std::exp(6.90775f * volume) * 0.001f; for (auto& sample : samples) { sample = static_cast<s16>(sample * volume_scale_factor); } diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 965c28787..f61bcd40d 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -140,8 +140,6 @@ add_library(core STATIC hle/kernel/svc_wrap.h hle/kernel/thread.cpp hle/kernel/thread.h - hle/kernel/timer.cpp - hle/kernel/timer.h hle/kernel/vm_manager.cpp hle/kernel/vm_manager.h hle/kernel/wait_object.cpp diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index 0d6c85aed..90f276ee8 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h @@ -217,6 +217,11 @@ private: /// Push /// template <> +inline void ResponseBuilder::Push(s32 value) { + cmdbuf[index++] = static_cast<u32>(value); +} + +template <> inline void ResponseBuilder::Push(u32 value) { cmdbuf[index++] = value; } @@ -235,6 +240,22 @@ inline void ResponseBuilder::Push(ResultCode value) { } template <> +inline void ResponseBuilder::Push(s8 value) { + PushRaw(value); +} + +template <> +inline void ResponseBuilder::Push(s16 value) { + PushRaw(value); +} + +template <> +inline void ResponseBuilder::Push(s64 value) { + Push(static_cast<u32>(value)); + Push(static_cast<u32>(value >> 32)); +} + +template <> inline void ResponseBuilder::Push(u8 value) { PushRaw(value); } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 67674cd47..7a524ce5a 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -18,7 +18,6 @@ #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/thread.h" -#include "core/hle/kernel/timer.h" #include "core/hle/lock.h" #include "core/hle/result.h" @@ -86,27 +85,12 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_ } } -/// The timer callback event, called when a timer is fired -static void TimerCallback(u64 timer_handle, int cycles_late) { - const auto proper_handle = static_cast<Handle>(timer_handle); - const auto& system = Core::System::GetInstance(); - SharedPtr<Timer> timer = system.Kernel().RetrieveTimerFromCallbackHandleTable(proper_handle); - - if (timer == nullptr) { - LOG_CRITICAL(Kernel, "Callback fired for invalid timer {:016X}", timer_handle); - return; - } - - timer->Signal(cycles_late); -} - struct KernelCore::Impl { void Initialize(KernelCore& kernel) { Shutdown(); InitializeSystemResourceLimit(kernel); InitializeThreads(); - InitializeTimers(); } void Shutdown() { @@ -122,9 +106,6 @@ struct KernelCore::Impl { thread_wakeup_callback_handle_table.Clear(); thread_wakeup_event_type = nullptr; - timer_callback_handle_table.Clear(); - timer_callback_event_type = nullptr; - named_ports.clear(); } @@ -146,11 +127,6 @@ struct KernelCore::Impl { CoreTiming::RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback); } - void InitializeTimers() { - timer_callback_handle_table.Clear(); - timer_callback_event_type = CoreTiming::RegisterEvent("TimerCallback", TimerCallback); - } - std::atomic<u32> next_object_id{0}; std::atomic<u64> next_process_id{Process::ProcessIDMin}; std::atomic<u64> next_thread_id{1}; @@ -161,12 +137,6 @@ struct KernelCore::Impl { SharedPtr<ResourceLimit> system_resource_limit; - /// The event type of the generic timer callback event - CoreTiming::EventType* timer_callback_event_type = nullptr; - // TODO(yuriks): This can be removed if Timer objects are explicitly pooled in the future, - // allowing us to simply use a pool index or similar. - Kernel::HandleTable timer_callback_handle_table; - CoreTiming::EventType* thread_wakeup_event_type = nullptr; // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, // allowing us to simply use a pool index or similar. @@ -198,10 +168,6 @@ SharedPtr<Thread> KernelCore::RetrieveThreadFromWakeupCallbackHandleTable(Handle return impl->thread_wakeup_callback_handle_table.Get<Thread>(handle); } -SharedPtr<Timer> KernelCore::RetrieveTimerFromCallbackHandleTable(Handle handle) const { - return impl->timer_callback_handle_table.Get<Timer>(handle); -} - void KernelCore::AppendNewProcess(SharedPtr<Process> process) { impl->process_list.push_back(std::move(process)); } @@ -247,18 +213,10 @@ u64 KernelCore::CreateNewProcessID() { return impl->next_process_id++; } -ResultVal<Handle> KernelCore::CreateTimerCallbackHandle(const SharedPtr<Timer>& timer) { - return impl->timer_callback_handle_table.Create(timer); -} - CoreTiming::EventType* KernelCore::ThreadWakeupCallbackEventType() const { return impl->thread_wakeup_event_type; } -CoreTiming::EventType* KernelCore::TimerCallbackEventType() const { - return impl->timer_callback_event_type; -} - Kernel::HandleTable& KernelCore::ThreadWakeupCallbackHandleTable() { return impl->thread_wakeup_callback_handle_table; } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 58c9d108b..c643a6401 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -22,7 +22,6 @@ class HandleTable; class Process; class ResourceLimit; class Thread; -class Timer; /// Represents a single instance of the kernel. class KernelCore { @@ -51,9 +50,6 @@ public: /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table. SharedPtr<Thread> RetrieveThreadFromWakeupCallbackHandleTable(Handle handle) const; - /// Retrieves a shared pointer to a Timer instance within the timer callback handle table. - SharedPtr<Timer> RetrieveTimerFromCallbackHandleTable(Handle handle) const; - /// Adds the given shared pointer to an internal list of active processes. void AppendNewProcess(SharedPtr<Process> process); @@ -82,7 +78,6 @@ private: friend class Object; friend class Process; friend class Thread; - friend class Timer; /// Creates a new object ID, incrementing the internal object ID counter. u32 CreateNewObjectID(); @@ -93,15 +88,9 @@ private: /// Creates a new thread ID, incrementing the internal thread ID counter. u64 CreateNewThreadID(); - /// Creates a timer callback handle for the given timer. - ResultVal<Handle> CreateTimerCallbackHandle(const SharedPtr<Timer>& timer); - /// Retrieves the event type used for thread wakeup callbacks. CoreTiming::EventType* ThreadWakeupCallbackEventType() const; - /// Retrieves the event type used for timer callbacks. - CoreTiming::EventType* TimerCallbackEventType() const; - /// Provides a reference to the thread wakeup callback handle table. Kernel::HandleTable& ThreadWakeupCallbackHandleTable(); diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp index 806078638..8870463d0 100644 --- a/src/core/hle/kernel/object.cpp +++ b/src/core/hle/kernel/object.cpp @@ -16,7 +16,6 @@ bool Object::IsWaitable() const { case HandleType::ReadableEvent: case HandleType::Thread: case HandleType::Process: - case HandleType::Timer: case HandleType::ServerPort: case HandleType::ServerSession: return true; diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h index 1541b6e3c..4c2505908 100644 --- a/src/core/hle/kernel/object.h +++ b/src/core/hle/kernel/object.h @@ -25,7 +25,6 @@ enum class HandleType : u32 { Thread, Process, AddressArbiter, - Timer, ResourceLimit, ClientPort, ServerPort, diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp index 6973e580c..0e5083f70 100644 --- a/src/core/hle/kernel/readable_event.cpp +++ b/src/core/hle/kernel/readable_event.cpp @@ -44,8 +44,4 @@ ResultCode ReadableEvent::Reset() { return RESULT_SUCCESS; } -void ReadableEvent::WakeupAllWaitingThreads() { - WaitObject::WakeupAllWaitingThreads(); -} - } // namespace Kernel diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h index 80b3b0aba..77a9c362c 100644 --- a/src/core/hle/kernel/readable_event.h +++ b/src/core/hle/kernel/readable_event.h @@ -39,8 +39,6 @@ public: bool ShouldWait(Thread* thread) const override; void Acquire(Thread* thread) override; - void WakeupAllWaitingThreads() override; - /// Unconditionally clears the readable event's state. void Clear(); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 6588bd3b8..7cfecb68c 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -597,6 +597,7 @@ enum class BreakType : u32 { PostNROLoad = 4, PreNROUnload = 5, PostNROUnload = 6, + CppException = 7, }; struct BreakReason { @@ -669,6 +670,9 @@ static void Break(u32 reason, u64 info1, u64 info2) { "Signalling debugger, Unloaded an NRO at 0x{:016X} with size 0x{:016X}", info1, info2); break; + case BreakType::CppException: + LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered."); + break; default: LOG_WARNING( Debug_Emulated, diff --git a/src/core/hle/kernel/timer.cpp b/src/core/hle/kernel/timer.cpp deleted file mode 100644 index 2c4f50e2b..000000000 --- a/src/core/hle/kernel/timer.cpp +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2015 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include "common/assert.h" -#include "common/logging/log.h" -#include "core/core.h" -#include "core/core_timing.h" -#include "core/core_timing_util.h" -#include "core/hle/kernel/handle_table.h" -#include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/thread.h" -#include "core/hle/kernel/timer.h" - -namespace Kernel { - -Timer::Timer(KernelCore& kernel) : WaitObject{kernel} {} -Timer::~Timer() = default; - -SharedPtr<Timer> Timer::Create(KernelCore& kernel, ResetType reset_type, std::string name) { - SharedPtr<Timer> timer(new Timer(kernel)); - - timer->reset_type = reset_type; - timer->signaled = false; - timer->name = std::move(name); - timer->initial_delay = 0; - timer->interval_delay = 0; - timer->callback_handle = kernel.CreateTimerCallbackHandle(timer).Unwrap(); - - return timer; -} - -bool Timer::ShouldWait(Thread* thread) const { - return !signaled; -} - -void Timer::Acquire(Thread* thread) { - ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); - - if (reset_type == ResetType::OneShot) - signaled = false; -} - -void Timer::Set(s64 initial, s64 interval) { - // Ensure we get rid of any previous scheduled event - Cancel(); - - initial_delay = initial; - interval_delay = interval; - - if (initial == 0) { - // Immediately invoke the callback - Signal(0); - } else { - CoreTiming::ScheduleEvent(CoreTiming::nsToCycles(initial), kernel.TimerCallbackEventType(), - callback_handle); - } -} - -void Timer::Cancel() { - CoreTiming::UnscheduleEvent(kernel.TimerCallbackEventType(), callback_handle); -} - -void Timer::Clear() { - signaled = false; -} - -void Timer::WakeupAllWaitingThreads() { - WaitObject::WakeupAllWaitingThreads(); -} - -void Timer::Signal(int cycles_late) { - LOG_TRACE(Kernel, "Timer {} fired", GetObjectId()); - - signaled = true; - - // Resume all waiting threads - WakeupAllWaitingThreads(); - - if (interval_delay != 0) { - // Reschedule the timer with the interval delay - CoreTiming::ScheduleEvent(CoreTiming::nsToCycles(interval_delay) - cycles_late, - kernel.TimerCallbackEventType(), callback_handle); - } -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/timer.h b/src/core/hle/kernel/timer.h deleted file mode 100644 index 12915c1b1..000000000 --- a/src/core/hle/kernel/timer.h +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include "common/common_types.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/wait_object.h" - -namespace Kernel { - -class KernelCore; - -class Timer final : public WaitObject { -public: - /** - * Creates a timer - * @param kernel The kernel instance to create the timer callback handle for. - * @param reset_type ResetType describing how to create the timer - * @param name Optional name of timer - * @return The created Timer - */ - static SharedPtr<Timer> Create(KernelCore& kernel, ResetType reset_type, - std::string name = "Unknown"); - - std::string GetTypeName() const override { - return "Timer"; - } - std::string GetName() const override { - return name; - } - - static const HandleType HANDLE_TYPE = HandleType::Timer; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } - - ResetType GetResetType() const { - return reset_type; - } - - u64 GetInitialDelay() const { - return initial_delay; - } - - u64 GetIntervalDelay() const { - return interval_delay; - } - - bool ShouldWait(Thread* thread) const override; - void Acquire(Thread* thread) override; - - void WakeupAllWaitingThreads() override; - - /** - * Starts the timer, with the specified initial delay and interval. - * @param initial Delay until the timer is first fired - * @param interval Delay until the timer is fired after the first time - */ - void Set(s64 initial, s64 interval); - - void Cancel(); - void Clear(); - - /** - * Signals the timer, waking up any waiting threads and rescheduling it - * for the next interval. - * This method should not be called from outside the timer callback handler, - * lest multiple callback events get scheduled. - */ - void Signal(int cycles_late); - -private: - explicit Timer(KernelCore& kernel); - ~Timer() override; - - ResetType reset_type; ///< The ResetType of this timer - - u64 initial_delay; ///< The delay until the timer fires for the first time - u64 interval_delay; ///< The delay until the timer fires after the first time - - bool signaled; ///< Whether the timer has been signaled or not - std::string name; ///< Name of timer (optional) - - /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. - Handle callback_handle; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/wait_object.h b/src/core/hle/kernel/wait_object.h index d70b67893..5987fb971 100644 --- a/src/core/hle/kernel/wait_object.h +++ b/src/core/hle/kernel/wait_object.h @@ -33,19 +33,19 @@ public: * Add a thread to wait on this object * @param thread Pointer to thread to add */ - virtual void AddWaitingThread(SharedPtr<Thread> thread); + void AddWaitingThread(SharedPtr<Thread> thread); /** * Removes a thread from waiting on this object (e.g. if it was resumed already) * @param thread Pointer to thread to remove */ - virtual void RemoveWaitingThread(Thread* thread); + void RemoveWaitingThread(Thread* thread); /** * Wake up all threads waiting on this object that can be awoken, in priority order, * and set the synchronization result and output of the thread. */ - virtual void WakeupAllWaitingThreads(); + void WakeupAllWaitingThreads(); /** * Wakes up a single thread waiting on this object. diff --git a/src/core/hle/service/am/applet_ae.cpp b/src/core/hle/service/am/applet_ae.cpp index 41a573a91..b888f861d 100644 --- a/src/core/hle/service/am/applet_ae.cpp +++ b/src/core/hle/service/am/applet_ae.cpp @@ -249,7 +249,8 @@ AppletAE::AppletAE(std::shared_ptr<NVFlinger::NVFlinger> nvflinger, {300, nullptr, "OpenOverlayAppletProxy"}, {350, nullptr, "OpenSystemApplicationProxy"}, {400, nullptr, "CreateSelfLibraryAppletCreatorForDevelop"}, - {401, nullptr, "GetSystemAppletControllerForDebug"}, + {410, nullptr, "GetSystemAppletControllerForDebug"}, + {1000, nullptr, "GetDebugFunctions"}, }; // clang-format on diff --git a/src/core/hle/service/audio/audin_u.cpp b/src/core/hle/service/audio/audin_u.cpp index 657010312..088410564 100644 --- a/src/core/hle/service/audio/audin_u.cpp +++ b/src/core/hle/service/audio/audin_u.cpp @@ -12,6 +12,7 @@ namespace Service::Audio { class IAudioIn final : public ServiceFramework<IAudioIn> { public: IAudioIn() : ServiceFramework("IAudioIn") { + // clang-format off static const FunctionInfo functions[] = { {0, nullptr, "GetAudioInState"}, {1, nullptr, "StartAudioIn"}, @@ -28,16 +29,24 @@ public: {12, nullptr, "SetAudioInDeviceGain"}, {13, nullptr, "GetAudioInDeviceGain"}, }; + // clang-format on + RegisterHandlers(functions); } ~IAudioIn() = default; }; AudInU::AudInU() : ServiceFramework("audin:u") { + // clang-format off static const FunctionInfo functions[] = { - {0, nullptr, "ListAudioIns"}, {1, nullptr, "OpenAudioIn"}, {2, nullptr, "Unknown"}, - {3, nullptr, "OpenAudioInAuto"}, {4, nullptr, "ListAudioInsAuto"}, + {0, nullptr, "ListAudioIns"}, + {1, nullptr, "OpenAudioIn"}, + {2, nullptr, "Unknown"}, + {3, nullptr, "OpenAudioInAuto"}, + {4, nullptr, "ListAudioInsAuto"}, }; + // clang-format on + RegisterHandlers(functions); } diff --git a/src/core/hle/service/audio/audrec_u.cpp b/src/core/hle/service/audio/audrec_u.cpp index 34974afa9..6956a2e64 100644 --- a/src/core/hle/service/audio/audrec_u.cpp +++ b/src/core/hle/service/audio/audrec_u.cpp @@ -12,6 +12,7 @@ namespace Service::Audio { class IFinalOutputRecorder final : public ServiceFramework<IFinalOutputRecorder> { public: IFinalOutputRecorder() : ServiceFramework("IFinalOutputRecorder") { + // clang-format off static const FunctionInfo functions[] = { {0, nullptr, "GetFinalOutputRecorderState"}, {1, nullptr, "StartFinalOutputRecorder"}, @@ -20,10 +21,13 @@ public: {4, nullptr, "RegisterBufferEvent"}, {5, nullptr, "GetReleasedFinalOutputRecorderBuffer"}, {6, nullptr, "ContainsFinalOutputRecorderBuffer"}, - {7, nullptr, "Unknown"}, + {7, nullptr, "GetFinalOutputRecorderBufferEndTime"}, {8, nullptr, "AppendFinalOutputRecorderBufferAuto"}, {9, nullptr, "GetReleasedFinalOutputRecorderBufferAuto"}, + {10, nullptr, "FlushFinalOutputRecorderBuffers"}, }; + // clang-format on + RegisterHandlers(functions); } ~IFinalOutputRecorder() = default; diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp index 945259c7d..76cc48254 100644 --- a/src/core/hle/service/audio/audren_u.cpp +++ b/src/core/hle/service/audio/audren_u.cpp @@ -229,14 +229,16 @@ private: }; // namespace Audio AudRenU::AudRenU() : ServiceFramework("audren:u") { + // clang-format off static const FunctionInfo functions[] = { {0, &AudRenU::OpenAudioRenderer, "OpenAudioRenderer"}, {1, &AudRenU::GetAudioRendererWorkBufferSize, "GetAudioRendererWorkBufferSize"}, - {2, &AudRenU::GetAudioDevice, "GetAudioDevice"}, + {2, &AudRenU::GetAudioDeviceService, "GetAudioDeviceService"}, {3, nullptr, "OpenAudioRendererAuto"}, - {4, &AudRenU::GetAudioDeviceServiceWithRevisionInfo, - "GetAudioDeviceServiceWithRevisionInfo"}, + {4, &AudRenU::GetAudioDeviceServiceWithRevisionInfo, "GetAudioDeviceServiceWithRevisionInfo"}, }; + // clang-format on + RegisterHandlers(functions); } @@ -313,7 +315,7 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_Audio, "buffer_size=0x{:X}", output_sz); } -void AudRenU::GetAudioDevice(Kernel::HLERequestContext& ctx) { +void AudRenU::GetAudioDeviceService(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_Audio, "called"); IPC::ResponseBuilder rb{ctx, 2, 0, 1}; diff --git a/src/core/hle/service/audio/audren_u.h b/src/core/hle/service/audio/audren_u.h index c6bc3a90a..3d63388fb 100644 --- a/src/core/hle/service/audio/audren_u.h +++ b/src/core/hle/service/audio/audren_u.h @@ -20,7 +20,7 @@ public: private: void OpenAudioRenderer(Kernel::HLERequestContext& ctx); void GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx); - void GetAudioDevice(Kernel::HLERequestContext& ctx); + void GetAudioDeviceService(Kernel::HLERequestContext& ctx); void GetAudioDeviceServiceWithRevisionInfo(Kernel::HLERequestContext& ctx); enum class AudioFeatures : u32 { diff --git a/src/core/hle/service/audio/hwopus.cpp b/src/core/hle/service/audio/hwopus.cpp index a850cadc8..11eba4a12 100644 --- a/src/core/hle/service/audio/hwopus.cpp +++ b/src/core/hle/service/audio/hwopus.cpp @@ -5,7 +5,6 @@ #include <chrono> #include <cstring> #include <memory> -#include <optional> #include <vector> #include <opus.h> @@ -30,48 +29,66 @@ public: u32 channel_count) : ServiceFramework("IHardwareOpusDecoderManager"), decoder(std::move(decoder)), sample_rate(sample_rate), channel_count(channel_count) { + // clang-format off static const FunctionInfo functions[] = { - {0, &IHardwareOpusDecoderManager::DecodeInterleaved, "DecodeInterleaved"}, + {0, &IHardwareOpusDecoderManager::DecodeInterleavedOld, "DecodeInterleavedOld"}, {1, nullptr, "SetContext"}, - {2, nullptr, "DecodeInterleavedForMultiStream"}, + {2, nullptr, "DecodeInterleavedForMultiStreamOld"}, {3, nullptr, "SetContextForMultiStream"}, - {4, &IHardwareOpusDecoderManager::DecodeInterleavedWithPerformance, - "DecodeInterleavedWithPerformance"}, - {5, nullptr, "Unknown5"}, - {6, nullptr, "Unknown6"}, - {7, nullptr, "Unknown7"}, + {4, &IHardwareOpusDecoderManager::DecodeInterleavedWithPerfOld, "DecodeInterleavedWithPerfOld"}, + {5, nullptr, "DecodeInterleavedForMultiStreamWithPerfOld"}, + {6, &IHardwareOpusDecoderManager::DecodeInterleaved, "DecodeInterleaved"}, + {7, nullptr, "DecodeInterleavedForMultiStream"}, }; + // clang-format on + RegisterHandlers(functions); } private: - void DecodeInterleaved(Kernel::HLERequestContext& ctx) { + /// Describes extra behavior that may be asked of the decoding context. + enum class ExtraBehavior { + /// No extra behavior. + None, + + /// Resets the decoder context back to a freshly initialized state. + ResetContext, + }; + + void DecodeInterleavedOld(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Audio, "called"); - u32 consumed = 0; - u32 sample_count = 0; - std::vector<opus_int16> samples(ctx.GetWriteBufferSize() / sizeof(opus_int16)); - if (!Decoder_DecodeInterleaved(consumed, sample_count, ctx.ReadBuffer(), samples)) { - LOG_ERROR(Audio, "Failed to decode opus data"); - IPC::ResponseBuilder rb{ctx, 2}; - // TODO(ogniK): Use correct error code - rb.Push(ResultCode(-1)); - return; - } - IPC::ResponseBuilder rb{ctx, 4}; - rb.Push(RESULT_SUCCESS); - rb.Push<u32>(consumed); - rb.Push<u32>(sample_count); - ctx.WriteBuffer(samples.data(), samples.size() * sizeof(s16)); + DecodeInterleavedHelper(ctx, nullptr, ExtraBehavior::None); } - void DecodeInterleavedWithPerformance(Kernel::HLERequestContext& ctx) { + void DecodeInterleavedWithPerfOld(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Audio, "called"); + + u64 performance = 0; + DecodeInterleavedHelper(ctx, &performance, ExtraBehavior::None); + } + + void DecodeInterleaved(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Audio, "called"); + IPC::RequestParser rp{ctx}; + const auto extra_behavior = + rp.Pop<bool>() ? ExtraBehavior::ResetContext : ExtraBehavior::None; + + u64 performance = 0; + DecodeInterleavedHelper(ctx, &performance, extra_behavior); + } + + void DecodeInterleavedHelper(Kernel::HLERequestContext& ctx, u64* performance, + ExtraBehavior extra_behavior) { u32 consumed = 0; u32 sample_count = 0; - u64 performance = 0; std::vector<opus_int16> samples(ctx.GetWriteBufferSize() / sizeof(opus_int16)); + + if (extra_behavior == ExtraBehavior::ResetContext) { + ResetDecoderContext(); + } + if (!Decoder_DecodeInterleaved(consumed, sample_count, ctx.ReadBuffer(), samples, performance)) { LOG_ERROR(Audio, "Failed to decode opus data"); @@ -80,25 +97,28 @@ private: rb.Push(ResultCode(-1)); return; } - IPC::ResponseBuilder rb{ctx, 6}; + + const u32 param_size = performance != nullptr ? 6 : 4; + IPC::ResponseBuilder rb{ctx, param_size}; rb.Push(RESULT_SUCCESS); rb.Push<u32>(consumed); rb.Push<u32>(sample_count); - rb.Push<u64>(performance); + if (performance) { + rb.Push<u64>(*performance); + } ctx.WriteBuffer(samples.data(), samples.size() * sizeof(s16)); } - bool Decoder_DecodeInterleaved( - u32& consumed, u32& sample_count, const std::vector<u8>& input, - std::vector<opus_int16>& output, - std::optional<std::reference_wrapper<u64>> performance_time = std::nullopt) { + bool Decoder_DecodeInterleaved(u32& consumed, u32& sample_count, const std::vector<u8>& input, + std::vector<opus_int16>& output, u64* out_performance_time) { const auto start_time = std::chrono::high_resolution_clock::now(); - std::size_t raw_output_sz = output.size() * sizeof(opus_int16); + const std::size_t raw_output_sz = output.size() * sizeof(opus_int16); if (sizeof(OpusHeader) > input.size()) { LOG_ERROR(Audio, "Input is smaller than the header size, header_sz={}, input_sz={}", sizeof(OpusHeader), input.size()); return false; } + OpusHeader hdr{}; std::memcpy(&hdr, input.data(), sizeof(OpusHeader)); if (sizeof(OpusHeader) + static_cast<u32>(hdr.sz) > input.size()) { @@ -106,8 +126,9 @@ private: sizeof(OpusHeader) + static_cast<u32>(hdr.sz), input.size()); return false; } - auto frame = input.data() + sizeof(OpusHeader); - auto decoded_sample_count = opus_packet_get_nb_samples( + + const auto frame = input.data() + sizeof(OpusHeader); + const auto decoded_sample_count = opus_packet_get_nb_samples( frame, static_cast<opus_int32>(input.size() - sizeof(OpusHeader)), static_cast<opus_int32>(sample_rate)); if (decoded_sample_count * channel_count * sizeof(u16) > raw_output_sz) { @@ -117,8 +138,9 @@ private: decoded_sample_count * channel_count * sizeof(u16), raw_output_sz); return false; } + const int frame_size = (static_cast<int>(raw_output_sz / sizeof(s16) / channel_count)); - auto out_sample_count = + const auto out_sample_count = opus_decode(decoder.get(), frame, hdr.sz, output.data(), frame_size, 0); if (out_sample_count < 0) { LOG_ERROR(Audio, @@ -127,16 +149,24 @@ private: out_sample_count, frame_size, static_cast<u32>(hdr.sz)); return false; } + const auto end_time = std::chrono::high_resolution_clock::now() - start_time; sample_count = out_sample_count; consumed = static_cast<u32>(sizeof(OpusHeader) + hdr.sz); - if (performance_time.has_value()) { - performance_time->get() = + if (out_performance_time != nullptr) { + *out_performance_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time).count(); } + return true; } + void ResetDecoderContext() { + ASSERT(decoder != nullptr); + + opus_decoder_ctl(decoder.get(), OPUS_RESET_STATE); + } + struct OpusHeader { u32_be sz; // Needs to be BE for some odd reason INSERT_PADDING_WORDS(1); @@ -157,6 +187,7 @@ void HwOpus::GetWorkBufferSize(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; const auto sample_rate = rp.Pop<u32>(); const auto channel_count = rp.Pop<u32>(); + LOG_DEBUG(Audio, "called with sample_rate={}, channel_count={}", sample_rate, channel_count); ASSERT_MSG(sample_rate == 48000 || sample_rate == 24000 || sample_rate == 16000 || @@ -174,9 +205,10 @@ void HwOpus::GetWorkBufferSize(Kernel::HLERequestContext& ctx) { void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; - auto sample_rate = rp.Pop<u32>(); - auto channel_count = rp.Pop<u32>(); - auto buffer_sz = rp.Pop<u32>(); + const auto sample_rate = rp.Pop<u32>(); + const auto channel_count = rp.Pop<u32>(); + const auto buffer_sz = rp.Pop<u32>(); + LOG_DEBUG(Audio, "called sample_rate={}, channel_count={}, buffer_size={}", sample_rate, channel_count, buffer_sz); @@ -185,8 +217,9 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { "Invalid sample rate"); ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count"); - std::size_t worker_sz = WorkerBufferSize(channel_count); + const std::size_t worker_sz = WorkerBufferSize(channel_count); ASSERT_MSG(buffer_sz >= worker_sz, "Worker buffer too large"); + std::unique_ptr<OpusDecoder, OpusDeleter> decoder{ static_cast<OpusDecoder*>(operator new(worker_sz))}; if (const int err = opus_decoder_init(decoder.get(), sample_rate, channel_count)) { diff --git a/src/core/hle/service/btdrv/btdrv.cpp b/src/core/hle/service/btdrv/btdrv.cpp index 5704ca0ab..59ef603e1 100644 --- a/src/core/hle/service/btdrv/btdrv.cpp +++ b/src/core/hle/service/btdrv/btdrv.cpp @@ -19,16 +19,16 @@ public: explicit Bt() : ServiceFramework{"bt"} { // clang-format off static const FunctionInfo functions[] = { - {0, nullptr, "Unknown0"}, - {1, nullptr, "Unknown1"}, - {2, nullptr, "Unknown2"}, - {3, nullptr, "Unknown3"}, - {4, nullptr, "Unknown4"}, - {5, nullptr, "Unknown5"}, - {6, nullptr, "Unknown6"}, - {7, nullptr, "Unknown7"}, - {8, nullptr, "Unknown8"}, - {9, &Bt::RegisterEvent, "RegisterEvent"}, + {0, nullptr, "LeClientReadCharacteristic"}, + {1, nullptr, "LeClientReadDescriptor"}, + {2, nullptr, "LeClientWriteCharacteristic"}, + {3, nullptr, "LeClientWriteDescriptor"}, + {4, nullptr, "LeClientRegisterNotification"}, + {5, nullptr, "LeClientDeregisterNotification"}, + {6, nullptr, "SetLeResponse"}, + {7, nullptr, "LeSendIndication"}, + {8, nullptr, "GetLeEventInfo"}, + {9, &Bt::RegisterBleEvent, "RegisterBleEvent"}, }; // clang-format on RegisterHandlers(functions); @@ -39,7 +39,7 @@ public: } private: - void RegisterEvent(Kernel::HLERequestContext& ctx) { + void RegisterBleEvent(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_BTM, "(STUBBED) called"); IPC::ResponseBuilder rb{ctx, 2, 1}; @@ -55,11 +55,11 @@ public: explicit BtDrv() : ServiceFramework{"btdrv"} { // clang-format off static const FunctionInfo functions[] = { - {0, nullptr, "Unknown"}, - {1, nullptr, "Init"}, - {2, nullptr, "Enable"}, - {3, nullptr, "Disable"}, - {4, nullptr, "CleanupAndShutdown"}, + {0, nullptr, "InitializeBluetoothDriver"}, + {1, nullptr, "InitializeBluetooth"}, + {2, nullptr, "EnableBluetooth"}, + {3, nullptr, "DisableBluetooth"}, + {4, nullptr, "CleanupBluetooth"}, {5, nullptr, "GetAdapterProperties"}, {6, nullptr, "GetAdapterProperty"}, {7, nullptr, "SetAdapterProperty"}, @@ -70,36 +70,91 @@ public: {12, nullptr, "CancelBond"}, {13, nullptr, "PinReply"}, {14, nullptr, "SspReply"}, - {15, nullptr, "Unknown2"}, - {16, nullptr, "InitInterfaces"}, - {17, nullptr, "HidHostInterface_Connect"}, - {18, nullptr, "HidHostInterface_Disconnect"}, - {19, nullptr, "HidHostInterface_SendData"}, - {20, nullptr, "HidHostInterface_SendData2"}, - {21, nullptr, "HidHostInterface_SetReport"}, - {22, nullptr, "HidHostInterface_GetReport"}, - {23, nullptr, "HidHostInterface_WakeController"}, - {24, nullptr, "HidHostInterface_AddPairedDevice"}, - {25, nullptr, "HidHostInterface_GetPairedDevice"}, - {26, nullptr, "HidHostInterface_CleanupAndShutdown"}, - {27, nullptr, "Unknown3"}, - {28, nullptr, "ExtInterface_SetTSI"}, - {29, nullptr, "ExtInterface_SetBurstMode"}, - {30, nullptr, "ExtInterface_SetZeroRetran"}, - {31, nullptr, "ExtInterface_SetMcMode"}, - {32, nullptr, "ExtInterface_StartLlrMode"}, - {33, nullptr, "ExtInterface_ExitLlrMode"}, - {34, nullptr, "ExtInterface_SetRadio"}, - {35, nullptr, "ExtInterface_SetVisibility"}, - {36, nullptr, "Unknown4"}, - {37, nullptr, "Unknown5"}, - {38, nullptr, "HidHostInterface_GetLatestPlr"}, - {39, nullptr, "ExtInterface_GetPendingConnections"}, - {40, nullptr, "HidHostInterface_GetChannelMap"}, - {41, nullptr, "SetIsBluetoothBoostEnabled"}, - {42, nullptr, "GetIsBluetoothBoostEnabled"}, - {43, nullptr, "SetIsBluetoothAfhEnabled"}, - {44, nullptr, "GetIsBluetoothAfhEnabled"}, + {15, nullptr, "GetEventInfo"}, + {16, nullptr, "InitializeHid"}, + {17, nullptr, "HidConnect"}, + {18, nullptr, "HidDisconnect"}, + {19, nullptr, "HidSendData"}, + {20, nullptr, "HidSendData2"}, + {21, nullptr, "HidSetReport"}, + {22, nullptr, "HidGetReport"}, + {23, nullptr, "HidWakeController"}, + {24, nullptr, "HidAddPairedDevice"}, + {25, nullptr, "HidGetPairedDevice"}, + {26, nullptr, "CleanupHid"}, + {27, nullptr, "HidGetEventInfo"}, + {28, nullptr, "ExtSetTsi"}, + {29, nullptr, "ExtSetBurstMode"}, + {30, nullptr, "ExtSetZeroRetran"}, + {31, nullptr, "ExtSetMcMode"}, + {32, nullptr, "ExtStartLlrMode"}, + {33, nullptr, "ExtExitLlrMode"}, + {34, nullptr, "ExtSetRadio"}, + {35, nullptr, "ExtSetVisibility"}, + {36, nullptr, "ExtSetTbfcScan"}, + {37, nullptr, "RegisterHidReportEvent"}, + {38, nullptr, "HidGetReportEventInfo"}, + {39, nullptr, "GetLatestPlr"}, + {40, nullptr, "ExtGetPendingConnections"}, + {41, nullptr, "GetChannelMap"}, + {42, nullptr, "EnableBluetoothBoostSetting"}, + {43, nullptr, "IsBluetoothBoostSettingEnabled"}, + {44, nullptr, "EnableBluetoothAfhSetting"}, + {45, nullptr, "IsBluetoothAfhSettingEnabled"}, + {46, nullptr, "InitializeBluetoothLe"}, + {47, nullptr, "EnableBluetoothLe"}, + {48, nullptr, "DisableBluetoothLe"}, + {49, nullptr, "CleanupBluetoothLe"}, + {50, nullptr, "SetLeVisibility"}, + {51, nullptr, "SetLeConnectionParameter"}, + {52, nullptr, "SetLeDefaultConnectionParameter"}, + {53, nullptr, "SetLeAdvertiseData"}, + {54, nullptr, "SetLeAdvertiseParameter"}, + {55, nullptr, "StartLeScan"}, + {56, nullptr, "StopLeScan"}, + {57, nullptr, "AddLeScanFilterCondition"}, + {58, nullptr, "DeleteLeScanFilterCondition"}, + {59, nullptr, "DeleteLeScanFilter"}, + {60, nullptr, "ClearLeScanFilters"}, + {61, nullptr, "EnableLeScanFilter"}, + {62, nullptr, "RegisterLeClient"}, + {63, nullptr, "UnregisterLeClient"}, + {64, nullptr, "UnregisterLeClientAll"}, + {65, nullptr, "LeClientConnect"}, + {66, nullptr, "LeClientCancelConnection"}, + {67, nullptr, "LeClientDisconnect"}, + {68, nullptr, "LeClientGetAttributes"}, + {69, nullptr, "LeClientDiscoverService"}, + {70, nullptr, "LeClientConfigureMtu"}, + {71, nullptr, "RegisterLeServer"}, + {72, nullptr, "UnregisterLeServer"}, + {73, nullptr, "LeServerConnect"}, + {74, nullptr, "LeServerDisconnect"}, + {75, nullptr, "CreateLeService"}, + {76, nullptr, "StartLeService"}, + {77, nullptr, "AddLeCharacteristic"}, + {78, nullptr, "AddLeDescriptor"}, + {79, nullptr, "GetLeCoreEventInfo"}, + {80, nullptr, "LeGetFirstCharacteristic"}, + {81, nullptr, "LeGetNextCharacteristic"}, + {82, nullptr, "LeGetFirstDescriptor"}, + {83, nullptr, "LeGetNextDescriptor"}, + {84, nullptr, "RegisterLeCoreDataPath"}, + {85, nullptr, "UnregisterLeCoreDataPath"}, + {86, nullptr, "RegisterLeHidDataPath"}, + {87, nullptr, "UnregisterLeHidDataPath"}, + {88, nullptr, "RegisterLeDataPath"}, + {89, nullptr, "UnregisterLeDataPath"}, + {90, nullptr, "LeClientReadCharacteristic"}, + {91, nullptr, "LeClientReadDescriptor"}, + {92, nullptr, "LeClientWriteCharacteristic"}, + {93, nullptr, "LeClientWriteDescriptor"}, + {94, nullptr, "LeClientRegisterNotification"}, + {95, nullptr, "LeClientDeregisterNotification"}, + {96, nullptr, "GetLeHidEventInfo"}, + {97, nullptr, "RegisterBleHidEvent"}, + {98, nullptr, "SetLeScanParameter"}, + {256, nullptr, "GetIsManufacturingMode"} }; // clang-format on diff --git a/src/core/hle/service/btm/btm.cpp b/src/core/hle/service/btm/btm.cpp index ef7398a23..4f15c3f19 100644 --- a/src/core/hle/service/btm/btm.cpp +++ b/src/core/hle/service/btm/btm.cpp @@ -20,38 +20,38 @@ public: explicit IBtmUserCore() : ServiceFramework{"IBtmUserCore"} { // clang-format off static const FunctionInfo functions[] = { - {0, &IBtmUserCore::GetScanEvent, "GetScanEvent"}, - {1, nullptr, "Unknown1"}, - {2, nullptr, "Unknown2"}, - {3, nullptr, "Unknown3"}, - {4, nullptr, "Unknown4"}, - {5, nullptr, "Unknown5"}, - {6, nullptr, "Unknown6"}, - {7, nullptr, "Unknown7"}, - {8, nullptr, "Unknown8"}, - {9, nullptr, "Unknown9"}, - {10, nullptr, "Unknown10"}, - {17, &IBtmUserCore::GetConnectionEvent, "GetConnectionEvent"}, - {18, nullptr, "Unknown18"}, - {19, nullptr, "Unknown19"}, - {20, nullptr, "Unknown20"}, - {21, nullptr, "Unknown21"}, - {22, nullptr, "Unknown22"}, - {23, nullptr, "Unknown23"}, - {24, nullptr, "Unknown24"}, - {25, nullptr, "Unknown25"}, - {26, &IBtmUserCore::GetDiscoveryEvent, "AcquireBleServiceDiscoveryEventImpl"}, - {27, nullptr, "Unknown27"}, - {28, nullptr, "Unknown28"}, - {29, nullptr, "Unknown29"}, - {30, nullptr, "Unknown30"}, - {31, nullptr, "Unknown31"}, - {32, nullptr, "Unknown32"}, - {33, &IBtmUserCore::GetConfigEvent, "GetConfigEvent"}, - {34, nullptr, "Unknown34"}, - {35, nullptr, "Unknown35"}, - {36, nullptr, "Unknown36"}, - {37, nullptr, "Unknown37"}, + {0, &IBtmUserCore::AcquireBleScanEvent, "AcquireBleScanEvent"}, + {1, nullptr, "GetBleScanFilterParameter"}, + {2, nullptr, "GetBleScanFilterParameter2"}, + {3, nullptr, "StartBleScanForGeneral"}, + {4, nullptr, "StopBleScanForGeneral"}, + {5, nullptr, "GetBleScanResultsForGeneral"}, + {6, nullptr, "StartBleScanForPaired"}, + {7, nullptr, "StopBleScanForPaired"}, + {8, nullptr, "StartBleScanForSmartDevice"}, + {9, nullptr, "StopBleScanForSmartDevice"}, + {10, nullptr, "GetBleScanResultsForSmartDevice"}, + {17, &IBtmUserCore::AcquireBleConnectionEvent, "AcquireBleConnectionEvent"}, + {18, nullptr, "BleConnect"}, + {19, nullptr, "BleDisconnect"}, + {20, nullptr, "BleGetConnectionState"}, + {21, nullptr, "AcquireBlePairingEvent"}, + {22, nullptr, "BlePairDevice"}, + {23, nullptr, "BleUnPairDevice"}, + {24, nullptr, "BleUnPairDevice2"}, + {25, nullptr, "BleGetPairedDevices"}, + {26, &IBtmUserCore::AcquireBleServiceDiscoveryEvent, "AcquireBleServiceDiscoveryEvent"}, + {27, nullptr, "GetGattServices"}, + {28, nullptr, "GetGattService"}, + {29, nullptr, "GetGattIncludedServices"}, + {30, nullptr, "GetBelongingGattService"}, + {31, nullptr, "GetGattCharacteristics"}, + {32, nullptr, "GetGattDescriptors"}, + {33, &IBtmUserCore::AcquireBleMtuConfigEvent, "AcquireBleMtuConfigEvent"}, + {34, nullptr, "ConfigureBleMtu"}, + {35, nullptr, "GetBleMtu"}, + {36, nullptr, "RegisterBleGattDataPath"}, + {37, nullptr, "UnregisterBleGattDataPath"}, }; // clang-format on RegisterHandlers(functions); @@ -68,7 +68,7 @@ public: } private: - void GetScanEvent(Kernel::HLERequestContext& ctx) { + void AcquireBleScanEvent(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_BTM, "(STUBBED) called"); IPC::ResponseBuilder rb{ctx, 2, 1}; @@ -76,7 +76,7 @@ private: rb.PushCopyObjects(scan_event.readable); } - void GetConnectionEvent(Kernel::HLERequestContext& ctx) { + void AcquireBleConnectionEvent(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_BTM, "(STUBBED) called"); IPC::ResponseBuilder rb{ctx, 2, 1}; @@ -84,7 +84,7 @@ private: rb.PushCopyObjects(connection_event.readable); } - void GetDiscoveryEvent(Kernel::HLERequestContext& ctx) { + void AcquireBleServiceDiscoveryEvent(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_BTM, "(STUBBED) called"); IPC::ResponseBuilder rb{ctx, 2, 1}; @@ -92,7 +92,7 @@ private: rb.PushCopyObjects(service_discovery.readable); } - void GetConfigEvent(Kernel::HLERequestContext& ctx) { + void AcquireBleMtuConfigEvent(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_BTM, "(STUBBED) called"); IPC::ResponseBuilder rb{ctx, 2, 1}; @@ -111,14 +111,14 @@ public: explicit BTM_USR() : ServiceFramework{"btm:u"} { // clang-format off static const FunctionInfo functions[] = { - {0, &BTM_USR::GetCoreImpl, "GetCoreImpl"}, + {0, &BTM_USR::GetCore, "GetCore"}, }; // clang-format on RegisterHandlers(functions); } private: - void GetCoreImpl(Kernel::HLERequestContext& ctx) { + void GetCore(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_BTM, "called"); IPC::ResponseBuilder rb{ctx, 2, 0, 1}; @@ -134,26 +134,64 @@ public: static const FunctionInfo functions[] = { {0, nullptr, "Unknown1"}, {1, nullptr, "Unknown2"}, - {2, nullptr, "RegisterSystemEventForConnectedDeviceConditionImpl"}, + {2, nullptr, "RegisterSystemEventForConnectedDeviceCondition"}, {3, nullptr, "Unknown3"}, {4, nullptr, "Unknown4"}, {5, nullptr, "Unknown5"}, {6, nullptr, "Unknown6"}, {7, nullptr, "Unknown7"}, - {8, nullptr, "RegisterSystemEventForRegisteredDeviceInfoImpl"}, + {8, nullptr, "RegisterSystemEventForRegisteredDeviceInfo"}, {9, nullptr, "Unknown8"}, {10, nullptr, "Unknown9"}, {11, nullptr, "Unknown10"}, {12, nullptr, "Unknown11"}, {13, nullptr, "Unknown12"}, - {14, nullptr, "EnableRadioImpl"}, - {15, nullptr, "DisableRadioImpl"}, + {14, nullptr, "EnableRadio"}, + {15, nullptr, "DisableRadio"}, {16, nullptr, "Unknown13"}, {17, nullptr, "Unknown14"}, {18, nullptr, "Unknown15"}, {19, nullptr, "Unknown16"}, {20, nullptr, "Unknown17"}, {21, nullptr, "Unknown18"}, + {22, nullptr, "Unknown19"}, + {23, nullptr, "Unknown20"}, + {24, nullptr, "Unknown21"}, + {25, nullptr, "Unknown22"}, + {26, nullptr, "Unknown23"}, + {27, nullptr, "Unknown24"}, + {28, nullptr, "Unknown25"}, + {29, nullptr, "Unknown26"}, + {30, nullptr, "Unknown27"}, + {31, nullptr, "Unknown28"}, + {32, nullptr, "Unknown29"}, + {33, nullptr, "Unknown30"}, + {34, nullptr, "Unknown31"}, + {35, nullptr, "Unknown32"}, + {36, nullptr, "Unknown33"}, + {37, nullptr, "Unknown34"}, + {38, nullptr, "Unknown35"}, + {39, nullptr, "Unknown36"}, + {40, nullptr, "Unknown37"}, + {41, nullptr, "Unknown38"}, + {42, nullptr, "Unknown39"}, + {43, nullptr, "Unknown40"}, + {44, nullptr, "Unknown41"}, + {45, nullptr, "Unknown42"}, + {46, nullptr, "Unknown43"}, + {47, nullptr, "Unknown44"}, + {48, nullptr, "Unknown45"}, + {49, nullptr, "Unknown46"}, + {50, nullptr, "Unknown47"}, + {51, nullptr, "Unknown48"}, + {52, nullptr, "Unknown49"}, + {53, nullptr, "Unknown50"}, + {54, nullptr, "Unknown51"}, + {55, nullptr, "Unknown52"}, + {56, nullptr, "Unknown53"}, + {57, nullptr, "Unknown54"}, + {58, nullptr, "Unknown55"}, + {59, nullptr, "Unknown56"}, }; // clang-format on @@ -166,7 +204,7 @@ public: explicit BTM_DBG() : ServiceFramework{"btm:dbg"} { // clang-format off static const FunctionInfo functions[] = { - {0, nullptr, "RegisterSystemEventForDiscoveryImpl"}, + {0, nullptr, "RegisterSystemEventForDiscovery"}, {1, nullptr, "Unknown1"}, {2, nullptr, "Unknown2"}, {3, nullptr, "Unknown3"}, @@ -175,6 +213,10 @@ public: {6, nullptr, "Unknown6"}, {7, nullptr, "Unknown7"}, {8, nullptr, "Unknown8"}, + {9, nullptr, "Unknown9"}, + {10, nullptr, "Unknown10"}, + {11, nullptr, "Unknown11"}, + {12, nullptr, "Unknown11"}, }; // clang-format on @@ -187,16 +229,16 @@ public: explicit IBtmSystemCore() : ServiceFramework{"IBtmSystemCore"} { // clang-format off static const FunctionInfo functions[] = { - {0, nullptr, "StartGamepadPairingImpl"}, - {1, nullptr, "CancelGamepadPairingImpl"}, - {2, nullptr, "ClearGamepadPairingDatabaseImpl"}, - {3, nullptr, "GetPairedGamepadCountImpl"}, - {4, nullptr, "EnableRadioImpl"}, - {5, nullptr, "DisableRadioImpl"}, - {6, nullptr, "GetRadioOnOffImpl"}, - {7, nullptr, "AcquireRadioEventImpl"}, - {8, nullptr, "AcquireGamepadPairingEventImpl"}, - {9, nullptr, "IsGamepadPairingStartedImpl"}, + {0, nullptr, "StartGamepadPairing"}, + {1, nullptr, "CancelGamepadPairing"}, + {2, nullptr, "ClearGamepadPairingDatabase"}, + {3, nullptr, "GetPairedGamepadCount"}, + {4, nullptr, "EnableRadio"}, + {5, nullptr, "DisableRadio"}, + {6, nullptr, "GetRadioOnOff"}, + {7, nullptr, "AcquireRadioEvent"}, + {8, nullptr, "AcquireGamepadPairingEvent"}, + {9, nullptr, "IsGamepadPairingStarted"}, }; // clang-format on @@ -209,7 +251,7 @@ public: explicit BTM_SYS() : ServiceFramework{"btm:sys"} { // clang-format off static const FunctionInfo functions[] = { - {0, &BTM_SYS::GetCoreImpl, "GetCoreImpl"}, + {0, &BTM_SYS::GetCore, "GetCore"}, }; // clang-format on @@ -217,7 +259,7 @@ public: } private: - void GetCoreImpl(Kernel::HLERequestContext& ctx) { + void GetCore(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_BTM, "called"); IPC::ResponseBuilder rb{ctx, 2, 0, 1}; diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp index 74c4e583b..54959edd8 100644 --- a/src/core/hle/service/filesystem/fsp_srv.cpp +++ b/src/core/hle/service/filesystem/fsp_srv.cpp @@ -627,8 +627,8 @@ private: FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") { // clang-format off static const FunctionInfo functions[] = { - {0, nullptr, "MountContent"}, - {1, &FSP_SRV::Initialize, "Initialize"}, + {0, nullptr, "OpenFileSystem"}, + {1, &FSP_SRV::SetCurrentProcess, "SetCurrentProcess"}, {2, nullptr, "OpenDataFileSystemByCurrentProcess"}, {7, &FSP_SRV::OpenFileSystemWithPatch, "OpenFileSystemWithPatch"}, {8, nullptr, "OpenFileSystemWithId"}, @@ -637,10 +637,10 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") { {12, nullptr, "OpenBisStorage"}, {13, nullptr, "InvalidateBisCache"}, {17, nullptr, "OpenHostFileSystem"}, - {18, &FSP_SRV::MountSdCard, "MountSdCard"}, + {18, &FSP_SRV::OpenSdCardFileSystem, "OpenSdCardFileSystem"}, {19, nullptr, "FormatSdCardFileSystem"}, {21, nullptr, "DeleteSaveDataFileSystem"}, - {22, &FSP_SRV::CreateSaveData, "CreateSaveData"}, + {22, &FSP_SRV::CreateSaveDataFileSystem, "CreateSaveDataFileSystem"}, {23, nullptr, "CreateSaveDataFileSystemBySystemSaveDataId"}, {24, nullptr, "RegisterSaveDataFileSystemAtomicDeletion"}, {25, nullptr, "DeleteSaveDataFileSystemBySaveDataSpaceId"}, @@ -652,7 +652,8 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") { {32, nullptr, "ExtendSaveDataFileSystem"}, {33, nullptr, "DeleteCacheStorage"}, {34, nullptr, "GetCacheStorageSize"}, - {51, &FSP_SRV::MountSaveData, "MountSaveData"}, + {35, nullptr, "CreateSaveDataFileSystemByHashSalt"}, + {51, &FSP_SRV::OpenSaveDataFileSystem, "OpenSaveDataFileSystem"}, {52, nullptr, "OpenSaveDataFileSystemBySystemSaveDataId"}, {53, &FSP_SRV::OpenReadOnlySaveDataFileSystem, "OpenReadOnlySaveDataFileSystem"}, {57, nullptr, "ReadSaveDataFileSystemExtraDataBySaveDataSpaceId"}, @@ -664,21 +665,26 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") { {64, nullptr, "OpenSaveDataInternalStorageFileSystem"}, {65, nullptr, "UpdateSaveDataMacForDebug"}, {66, nullptr, "WriteSaveDataFileSystemExtraData2"}, + {67, nullptr, "FindSaveDataWithFilter"}, + {68, nullptr, "OpenSaveDataInfoReaderBySaveDataFilter"}, {80, nullptr, "OpenSaveDataMetaFile"}, {81, nullptr, "OpenSaveDataTransferManager"}, {82, nullptr, "OpenSaveDataTransferManagerVersion2"}, {83, nullptr, "OpenSaveDataTransferProhibiterForCloudBackUp"}, + {84, nullptr, "ListApplicationAccessibleSaveDataOwnerId"}, {100, nullptr, "OpenImageDirectoryFileSystem"}, {110, nullptr, "OpenContentStorageFileSystem"}, + {120, nullptr, "OpenCloudBackupWorkStorageFileSystem"}, {200, &FSP_SRV::OpenDataStorageByCurrentProcess, "OpenDataStorageByCurrentProcess"}, {201, nullptr, "OpenDataStorageByProgramId"}, {202, &FSP_SRV::OpenDataStorageByDataId, "OpenDataStorageByDataId"}, - {203, &FSP_SRV::OpenRomStorage, "OpenRomStorage"}, + {203, &FSP_SRV::OpenPatchDataStorageByCurrentProcess, "OpenPatchDataStorageByCurrentProcess"}, {400, nullptr, "OpenDeviceOperator"}, {500, nullptr, "OpenSdCardDetectionEventNotifier"}, {501, nullptr, "OpenGameCardDetectionEventNotifier"}, {510, nullptr, "OpenSystemDataUpdateEventNotifier"}, {511, nullptr, "NotifySystemDataUpdateEvent"}, + {520, nullptr, "SimulateGameCardDetectionEvent"}, {600, nullptr, "SetCurrentPosixTime"}, {601, nullptr, "QuerySaveDataTotalSize"}, {602, nullptr, "VerifySaveDataFileSystem"}, @@ -717,6 +723,8 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") { {1008, nullptr, "OpenRegisteredUpdatePartition"}, {1009, nullptr, "GetAndClearMemoryReportInfo"}, {1100, nullptr, "OverrideSaveDataTransferTokenSignVerificationKey"}, + {1110, nullptr, "CorruptSaveDataFileSystemBySaveDataSpaceId2"}, + {1200, nullptr, "OpenMultiCommitManager"}, }; // clang-format on RegisterHandlers(functions); @@ -724,7 +732,7 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") { FSP_SRV::~FSP_SRV() = default; -void FSP_SRV::Initialize(Kernel::HLERequestContext& ctx) { +void FSP_SRV::SetCurrentProcess(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_FS, "(STUBBED) called"); IPC::ResponseBuilder rb{ctx, 2}; @@ -743,7 +751,7 @@ void FSP_SRV::OpenFileSystemWithPatch(Kernel::HLERequestContext& ctx) { rb.Push(ResultCode(-1)); } -void FSP_SRV::MountSdCard(Kernel::HLERequestContext& ctx) { +void FSP_SRV::OpenSdCardFileSystem(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_FS, "called"); IFileSystem filesystem(OpenSDMC().Unwrap()); @@ -753,7 +761,7 @@ void FSP_SRV::MountSdCard(Kernel::HLERequestContext& ctx) { rb.PushIpcInterface<IFileSystem>(std::move(filesystem)); } -void FSP_SRV::CreateSaveData(Kernel::HLERequestContext& ctx) { +void FSP_SRV::CreateSaveDataFileSystem(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; auto save_struct = rp.PopRaw<FileSys::SaveDataDescriptor>(); @@ -767,7 +775,7 @@ void FSP_SRV::CreateSaveData(Kernel::HLERequestContext& ctx) { rb.Push(RESULT_SUCCESS); } -void FSP_SRV::MountSaveData(Kernel::HLERequestContext& ctx) { +void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; auto space_id = rp.PopRaw<FileSys::SaveDataSpaceId>(); @@ -793,7 +801,7 @@ void FSP_SRV::MountSaveData(Kernel::HLERequestContext& ctx) { void FSP_SRV::OpenReadOnlySaveDataFileSystem(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_FS, "(STUBBED) called, delegating to 51 OpenSaveDataFilesystem"); - MountSaveData(ctx); + OpenSaveDataFileSystem(ctx); } void FSP_SRV::OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext& ctx) { @@ -881,7 +889,7 @@ void FSP_SRV::OpenDataStorageByDataId(Kernel::HLERequestContext& ctx) { rb.PushIpcInterface<IStorage>(std::move(storage)); } -void FSP_SRV::OpenRomStorage(Kernel::HLERequestContext& ctx) { +void FSP_SRV::OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; auto storage_id = rp.PopRaw<FileSys::StorageId>(); diff --git a/src/core/hle/service/filesystem/fsp_srv.h b/src/core/hle/service/filesystem/fsp_srv.h index e7abec0a3..3a5f4e200 100644 --- a/src/core/hle/service/filesystem/fsp_srv.h +++ b/src/core/hle/service/filesystem/fsp_srv.h @@ -19,17 +19,17 @@ public: ~FSP_SRV() override; private: - void Initialize(Kernel::HLERequestContext& ctx); + void SetCurrentProcess(Kernel::HLERequestContext& ctx); void OpenFileSystemWithPatch(Kernel::HLERequestContext& ctx); - void MountSdCard(Kernel::HLERequestContext& ctx); - void CreateSaveData(Kernel::HLERequestContext& ctx); - void MountSaveData(Kernel::HLERequestContext& ctx); + void OpenSdCardFileSystem(Kernel::HLERequestContext& ctx); + void CreateSaveDataFileSystem(Kernel::HLERequestContext& ctx); + void OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx); void OpenReadOnlySaveDataFileSystem(Kernel::HLERequestContext& ctx); void OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext& ctx); void GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx); void OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); void OpenDataStorageByDataId(Kernel::HLERequestContext& ctx); - void OpenRomStorage(Kernel::HLERequestContext& ctx); + void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); FileSys::VirtualFile romfs; }; diff --git a/src/core/hle/service/ncm/ncm.cpp b/src/core/hle/service/ncm/ncm.cpp index 0297edca0..5d31f638f 100644 --- a/src/core/hle/service/ncm/ncm.cpp +++ b/src/core/hle/service/ncm/ncm.cpp @@ -40,10 +40,10 @@ public: {6, nullptr, "CloseContentStorageForcibly"}, {7, nullptr, "CloseContentMetaDatabaseForcibly"}, {8, nullptr, "CleanupContentMetaDatabase"}, - {9, nullptr, "OpenContentStorage2"}, - {10, nullptr, "CloseContentStorage"}, - {11, nullptr, "OpenContentMetaDatabase2"}, - {12, nullptr, "CloseContentMetaDatabase"}, + {9, nullptr, "ActivateContentStorage"}, + {10, nullptr, "InactivateContentStorage"}, + {11, nullptr, "ActivateContentMetaDatabase"}, + {12, nullptr, "InactivateContentMetaDatabase"}, }; // clang-format on diff --git a/src/core/hle/service/ns/ns.cpp b/src/core/hle/service/ns/ns.cpp index 2663f56b1..0eb04037a 100644 --- a/src/core/hle/service/ns/ns.cpp +++ b/src/core/hle/service/ns/ns.cpp @@ -43,7 +43,7 @@ public: {11, nullptr, "CalculateApplicationOccupiedSize"}, {16, nullptr, "PushApplicationRecord"}, {17, nullptr, "ListApplicationRecordContentMeta"}, - {19, nullptr, "LaunchApplication"}, + {19, nullptr, "LaunchApplicationOld"}, {21, nullptr, "GetApplicationContentPath"}, {22, nullptr, "TerminateApplication"}, {23, nullptr, "ResolveApplicationContentPath"}, @@ -96,10 +96,10 @@ public: {86, nullptr, "EnableApplicationCrashReport"}, {87, nullptr, "IsApplicationCrashReportEnabled"}, {90, nullptr, "BoostSystemMemoryResourceLimit"}, - {91, nullptr, "Unknown1"}, - {92, nullptr, "Unknown2"}, + {91, nullptr, "DeprecatedLaunchApplication"}, + {92, nullptr, "GetRunningApplicationProgramId"}, {93, nullptr, "GetMainApplicationProgramIndex"}, - {94, nullptr, "LaunchApplication2"}, + {94, nullptr, "LaunchApplication"}, {95, nullptr, "GetApplicationLaunchInfo"}, {96, nullptr, "AcquireApplicationLaunchInfo"}, {97, nullptr, "GetMainApplicationProgramIndex2"}, @@ -163,7 +163,7 @@ public: {907, nullptr, "WithdrawApplicationUpdateRequest"}, {908, nullptr, "ListApplicationRecordInstalledContentMeta"}, {909, nullptr, "WithdrawCleanupAddOnContentsWithNoRightsRecommendation"}, - {910, nullptr, "Unknown3"}, + {910, nullptr, "HasApplicationRecord"}, {911, nullptr, "SetPreInstalledApplication"}, {912, nullptr, "ClearPreInstalledApplicationFlag"}, {1000, nullptr, "RequestVerifyApplicationDeprecated"}, @@ -219,10 +219,10 @@ public: {2015, nullptr, "CompareSystemDeliveryInfo"}, {2016, nullptr, "ListNotCommittedContentMeta"}, {2017, nullptr, "CreateDownloadTask"}, - {2018, nullptr, "Unknown4"}, - {2050, nullptr, "Unknown5"}, - {2100, nullptr, "Unknown6"}, - {2101, nullptr, "Unknown7"}, + {2018, nullptr, "GetApplicationDeliveryInfoHash"}, + {2050, nullptr, "GetApplicationRightsOnClient"}, + {2100, nullptr, "GetApplicationTerminateResult"}, + {2101, nullptr, "GetRawApplicationTerminateResult"}, {2150, nullptr, "CreateRightsEnvironment"}, {2151, nullptr, "DestroyRightsEnvironment"}, {2152, nullptr, "ActivateRightsEnvironment"}, @@ -237,10 +237,10 @@ public: {2182, nullptr, "SetActiveRightsContextUsingStateToRightsEnvironment"}, {2190, nullptr, "GetRightsEnvironmentHandleForApplication"}, {2199, nullptr, "GetRightsEnvironmentCountForDebug"}, - {2200, nullptr, "Unknown8"}, - {2201, nullptr, "Unknown9"}, - {2250, nullptr, "Unknown10"}, - {2300, nullptr, "Unknown11"}, + {2200, nullptr, "GetGameCardApplicationCopyIdentifier"}, + {2201, nullptr, "GetInstalledApplicationCopyIdentifier"}, + {2250, nullptr, "RequestReportActiveELicence"}, + {2300, nullptr, "ListEventLog"}, }; // clang-format on @@ -355,6 +355,7 @@ public: static const FunctionInfo functions[] = { {21, nullptr, "GetApplicationContentPath"}, {23, nullptr, "ResolveApplicationContentPath"}, + {93, nullptr, "GetRunningApplicationProgramId"}, }; // clang-format on @@ -389,6 +390,11 @@ public: // clang-format off static const FunctionInfo functions[] = { {0, nullptr, "RequestLinkDevice"}, + {1, nullptr, "RequestCleanupAllPreInstalledApplications"}, + {2, nullptr, "RequestCleanupPreInstalledApplication"}, + {3, nullptr, "RequestSyncRights"}, + {4, nullptr, "RequestUnlinkDevice"}, + {5, nullptr, "RequestRevokeAllELicense"}, }; // clang-format on @@ -403,7 +409,7 @@ public: static const FunctionInfo functions[] = { {100, nullptr, "ResetToFactorySettings"}, {101, nullptr, "ResetToFactorySettingsWithoutUserSaveData"}, - {102, nullptr, "ResetToFactorySettingsForRefurbishment "}, + {102, nullptr, "ResetToFactorySettingsForRefurbishment"}, }; // clang-format on diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 6a613aeab..8dfc0df03 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -5,7 +5,6 @@ #include <algorithm> #include <optional> -#include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" #include "common/microprofile.h" @@ -22,7 +21,6 @@ #include "core/hle/service/nvflinger/nvflinger.h" #include "core/perf_stats.h" #include "video_core/renderer_base.h" -#include "video_core/video_core.h" namespace Service::NVFlinger { @@ -30,12 +28,6 @@ constexpr std::size_t SCREEN_REFRESH_RATE = 60; constexpr u64 frame_ticks = static_cast<u64>(CoreTiming::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); NVFlinger::NVFlinger() { - // Add the different displays to the list of displays. - displays.emplace_back(0, "Default"); - displays.emplace_back(1, "External"); - displays.emplace_back(2, "Edid"); - displays.emplace_back(3, "Internal"); - // Schedule the screen composition events composition_event = CoreTiming::RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { @@ -55,13 +47,13 @@ void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { } u64 NVFlinger::OpenDisplay(std::string_view name) { - LOG_WARNING(Service, "Opening display {}", name); + LOG_DEBUG(Service, "Opening \"{}\" display", name); // TODO(Subv): Currently we only support the Default display. ASSERT(name == "Default"); - auto itr = std::find_if(displays.begin(), displays.end(), - [&](const Display& display) { return display.name == name; }); + const auto itr = std::find_if(displays.begin(), displays.end(), + [&](const Display& display) { return display.name == name; }); ASSERT(itr != displays.end()); @@ -69,48 +61,66 @@ u64 NVFlinger::OpenDisplay(std::string_view name) { } u64 NVFlinger::CreateLayer(u64 display_id) { - auto& display = GetDisplay(display_id); + auto& display = FindDisplay(display_id); ASSERT_MSG(display.layers.empty(), "Only one layer is supported per display at the moment"); - u64 layer_id = next_layer_id++; - u32 buffer_queue_id = next_buffer_queue_id++; + const u64 layer_id = next_layer_id++; + const u32 buffer_queue_id = next_buffer_queue_id++; auto buffer_queue = std::make_shared<BufferQueue>(buffer_queue_id, layer_id); display.layers.emplace_back(layer_id, buffer_queue); buffer_queues.emplace_back(std::move(buffer_queue)); return layer_id; } -u32 NVFlinger::GetBufferQueueId(u64 display_id, u64 layer_id) { - const auto& layer = GetLayer(display_id, layer_id); +u32 NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) const { + const auto& layer = FindLayer(display_id, layer_id); return layer.buffer_queue->GetId(); } Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::GetVsyncEvent(u64 display_id) { - return GetDisplay(display_id).vsync_event.readable; + return FindDisplay(display_id).vsync_event.readable; } -std::shared_ptr<BufferQueue> NVFlinger::GetBufferQueue(u32 id) const { - auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), - [&](const auto& queue) { return queue->GetId() == id; }); +std::shared_ptr<BufferQueue> NVFlinger::FindBufferQueue(u32 id) const { + const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), + [&](const auto& queue) { return queue->GetId() == id; }); ASSERT(itr != buffer_queues.end()); return *itr; } -Display& NVFlinger::GetDisplay(u64 display_id) { - auto itr = std::find_if(displays.begin(), displays.end(), - [&](const Display& display) { return display.id == display_id; }); +Display& NVFlinger::FindDisplay(u64 display_id) { + const auto itr = std::find_if(displays.begin(), displays.end(), + [&](const Display& display) { return display.id == display_id; }); + + ASSERT(itr != displays.end()); + return *itr; +} + +const Display& NVFlinger::FindDisplay(u64 display_id) const { + const auto itr = std::find_if(displays.begin(), displays.end(), + [&](const Display& display) { return display.id == display_id; }); ASSERT(itr != displays.end()); return *itr; } -Layer& NVFlinger::GetLayer(u64 display_id, u64 layer_id) { - auto& display = GetDisplay(display_id); +Layer& NVFlinger::FindLayer(u64 display_id, u64 layer_id) { + auto& display = FindDisplay(display_id); + + const auto itr = std::find_if(display.layers.begin(), display.layers.end(), + [&](const Layer& layer) { return layer.id == layer_id; }); + + ASSERT(itr != display.layers.end()); + return *itr; +} + +const Layer& NVFlinger::FindLayer(u64 display_id, u64 layer_id) const { + const auto& display = FindDisplay(display_id); - auto itr = std::find_if(display.layers.begin(), display.layers.end(), - [&](const Layer& layer) { return layer.id == layer_id; }); + const auto itr = std::find_if(display.layers.begin(), display.layers.end(), + [&](const Layer& layer) { return layer.id == layer_id; }); ASSERT(itr != display.layers.end()); return *itr; @@ -145,7 +155,7 @@ void NVFlinger::Compose() { continue; } - auto& igbp_buffer = buffer->get().igbp_buffer; + const auto& igbp_buffer = buffer->get().igbp_buffer; // Now send the buffer to the GPU for drawing. // TODO(Subv): Support more than just disp0. The display device selection is probably based diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index 9abba555b..83e974ed3 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h @@ -4,6 +4,7 @@ #pragma once +#include <array> #include <memory> #include <string> #include <string_view> @@ -56,35 +57,47 @@ public: /// Sets the NVDrv module instance to use to send buffers to the GPU. void SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance); - /// Opens the specified display and returns the id. + /// Opens the specified display and returns the ID. u64 OpenDisplay(std::string_view name); - /// Creates a layer on the specified display and returns the layer id. + /// Creates a layer on the specified display and returns the layer ID. u64 CreateLayer(u64 display_id); - /// Gets the buffer queue id of the specified layer in the specified display. - u32 GetBufferQueueId(u64 display_id, u64 layer_id); + /// Finds the buffer queue ID of the specified layer in the specified display. + u32 FindBufferQueueId(u64 display_id, u64 layer_id) const; /// Gets the vsync event for the specified display. Kernel::SharedPtr<Kernel::ReadableEvent> GetVsyncEvent(u64 display_id); - /// Obtains a buffer queue identified by the id. - std::shared_ptr<BufferQueue> GetBufferQueue(u32 id) const; + /// Obtains a buffer queue identified by the ID. + std::shared_ptr<BufferQueue> FindBufferQueue(u32 id) const; /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when /// finished. void Compose(); private: - /// Returns the display identified by the specified id. - Display& GetDisplay(u64 display_id); + /// Finds the display identified by the specified ID. + Display& FindDisplay(u64 display_id); - /// Returns the layer identified by the specified id in the desired display. - Layer& GetLayer(u64 display_id, u64 layer_id); + /// Finds the display identified by the specified ID. + const Display& FindDisplay(u64 display_id) const; + + /// Finds the layer identified by the specified ID in the desired display. + Layer& FindLayer(u64 display_id, u64 layer_id); + + /// Finds the layer identified by the specified ID in the desired display. + const Layer& FindLayer(u64 display_id, u64 layer_id) const; std::shared_ptr<Nvidia::Module> nvdrv; - std::vector<Display> displays; + std::array<Display, 5> displays{{ + {0, "Default"}, + {1, "External"}, + {2, "Edid"}, + {3, "Internal"}, + {4, "Null"}, + }}; std::vector<std::shared_ptr<BufferQueue>> buffer_queues; /// Id to use for the next layer that is created, this counter is shared among all displays. diff --git a/src/core/hle/service/pm/pm.cpp b/src/core/hle/service/pm/pm.cpp index 53e7da9c3..6b27dc4a3 100644 --- a/src/core/hle/service/pm/pm.cpp +++ b/src/core/hle/service/pm/pm.cpp @@ -13,7 +13,7 @@ public: explicit BootMode() : ServiceFramework{"pm:bm"} { static const FunctionInfo functions[] = { {0, &BootMode::GetBootMode, "GetBootMode"}, - {1, nullptr, "SetMaintenanceBoot"}, + {1, &BootMode::SetMaintenanceBoot, "SetMaintenanceBoot"}, }; RegisterHandlers(functions); } @@ -24,8 +24,19 @@ private: IPC::ResponseBuilder rb{ctx, 3}; rb.Push(RESULT_SUCCESS); - rb.Push<u32>(static_cast<u32>(SystemBootMode::Normal)); // Normal boot mode + rb.PushEnum(boot_mode); } + + void SetMaintenanceBoot(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_PM, "called"); + + boot_mode = SystemBootMode::Maintenance; + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); + } + + SystemBootMode boot_mode = SystemBootMode::Normal; }; class DebugMonitor final : public ServiceFramework<DebugMonitor> { diff --git a/src/core/hle/service/pm/pm.h b/src/core/hle/service/pm/pm.h index 370f2ed72..cc8d3f215 100644 --- a/src/core/hle/service/pm/pm.h +++ b/src/core/hle/service/pm/pm.h @@ -9,7 +9,12 @@ class ServiceManager; } namespace Service::PM { -enum class SystemBootMode : u32 { Normal = 0, Maintenance = 1 }; + +enum class SystemBootMode { + Normal, + Maintenance, +}; + /// Registers all PM services with the specified service manager. void InstallInterfaces(SM::ServiceManager& service_manager); diff --git a/src/core/hle/service/psc/psc.cpp b/src/core/hle/service/psc/psc.cpp index 0ba0a4076..53ec6b031 100644 --- a/src/core/hle/service/psc/psc.cpp +++ b/src/core/hle/service/psc/psc.cpp @@ -17,13 +17,13 @@ public: explicit PSC_C() : ServiceFramework{"psc:c"} { // clang-format off static const FunctionInfo functions[] = { - {0, nullptr, "Unknown1"}, - {1, nullptr, "Unknown2"}, - {2, nullptr, "Unknown3"}, - {3, nullptr, "Unknown4"}, - {4, nullptr, "Unknown5"}, - {5, nullptr, "Unknown6"}, - {6, nullptr, "Unknown7"}, + {0, nullptr, "Initialize"}, + {1, nullptr, "DispatchRequest"}, + {2, nullptr, "GetResult"}, + {3, nullptr, "GetState"}, + {4, nullptr, "Cancel"}, + {5, nullptr, "PrintModuleInformation"}, + {6, nullptr, "GetModuleInformation"}, }; // clang-format on @@ -39,7 +39,8 @@ public: {0, nullptr, "Initialize"}, {1, nullptr, "GetRequest"}, {2, nullptr, "Acknowledge"}, - {3, nullptr, "Unknown1"}, + {3, nullptr, "Finalize"}, + {4, nullptr, "AcknowledgeEx"}, }; // clang-format on diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index 70c933934..fe08c38f2 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp @@ -524,7 +524,7 @@ private: LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, static_cast<u32>(transaction), flags); - auto buffer_queue = nv_flinger->GetBufferQueue(id); + auto buffer_queue = nv_flinger->FindBufferQueue(id); if (transaction == TransactionId::Connect) { IGBPConnectRequestParcel request{ctx.ReadBuffer()}; @@ -558,7 +558,7 @@ private: [=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, Kernel::ThreadWakeupReason reason) { // Repeat TransactParcel DequeueBuffer when a buffer is available - auto buffer_queue = nv_flinger->GetBufferQueue(id); + auto buffer_queue = nv_flinger->FindBufferQueue(id); std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height); ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer."); @@ -628,7 +628,7 @@ private: LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown); - const auto buffer_queue = nv_flinger->GetBufferQueue(id); + const auto buffer_queue = nv_flinger->FindBufferQueue(id); // TODO(Subv): Find out what this actually is. IPC::ResponseBuilder rb{ctx, 2, 1}; @@ -704,13 +704,14 @@ private: rb.Push(RESULT_SUCCESS); } + // This function currently does nothing but return a success error code in + // the vi library itself, so do the same thing, but log out the passed in values. void SetLayerVisibility(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; const u64 layer_id = rp.Pop<u64>(); const bool visibility = rp.Pop<bool>(); - LOG_WARNING(Service_VI, "(STUBBED) called, layer_id=0x{:08X}, visibility={}", layer_id, - visibility); + LOG_DEBUG(Service_VI, "called, layer_id=0x{:08X}, visibility={}", layer_id, visibility); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); @@ -1043,7 +1044,7 @@ private: LOG_DEBUG(Service_VI, "called. layer_id=0x{:016X}, aruid=0x{:016X}", layer_id, aruid); const u64 display_id = nv_flinger->OpenDisplay(display_name); - const u32 buffer_queue_id = nv_flinger->GetBufferQueueId(display_id, layer_id); + const u32 buffer_queue_id = nv_flinger->FindBufferQueueId(display_id, layer_id); NativeWindow native_window{buffer_queue_id}; IPC::ResponseBuilder rb{ctx, 4}; @@ -1062,7 +1063,7 @@ private: // TODO(Subv): What's the difference between a Stray and a Managed layer? const u64 layer_id = nv_flinger->CreateLayer(display_id); - const u32 buffer_queue_id = nv_flinger->GetBufferQueueId(display_id, layer_id); + const u32 buffer_queue_id = nv_flinger->FindBufferQueueId(display_id, layer_id); NativeWindow native_window{buffer_queue_id}; IPC::ResponseBuilder rb{ctx, 6}; diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 327db68a5..6113e17ff 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -59,6 +59,35 @@ add_library(video_core STATIC renderer_opengl/renderer_opengl.h renderer_opengl/utils.cpp renderer_opengl/utils.h + shader/decode/arithmetic.cpp + shader/decode/arithmetic_immediate.cpp + shader/decode/bfe.cpp + shader/decode/bfi.cpp + shader/decode/shift.cpp + shader/decode/arithmetic_integer.cpp + shader/decode/arithmetic_integer_immediate.cpp + shader/decode/arithmetic_half.cpp + shader/decode/arithmetic_half_immediate.cpp + shader/decode/ffma.cpp + shader/decode/hfma2.cpp + shader/decode/conversion.cpp + shader/decode/memory.cpp + shader/decode/float_set_predicate.cpp + shader/decode/integer_set_predicate.cpp + shader/decode/half_set_predicate.cpp + shader/decode/predicate_set_register.cpp + shader/decode/predicate_set_predicate.cpp + shader/decode/register_set_predicate.cpp + shader/decode/float_set.cpp + shader/decode/integer_set.cpp + shader/decode/half_set.cpp + shader/decode/video.cpp + shader/decode/xmad.cpp + shader/decode/other.cpp + shader/decode.cpp + shader/shader_ir.cpp + shader/shader_ir.h + shader/track.cpp surface.cpp surface.h textures/astc.cpp diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index 16e0697c4..1097e5c49 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h @@ -83,7 +83,7 @@ private: u32 subchannel; ///< Current subchannel u32 method_count; ///< Current method count u32 length_pending; ///< Large NI command length pending - bool non_incrementing; ///< Current command’s NI flag + bool non_incrementing; ///< Current command's NI flag }; DmaState dma_state{}; diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index e53c77f2b..269df9437 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -208,6 +208,8 @@ enum class UniformType : u64 { SignedShort = 3, Single = 4, Double = 5, + Quad = 6, + UnsignedQuad = 7, }; enum class StoreType : u64 { @@ -215,9 +217,9 @@ enum class StoreType : u64 { Signed8 = 1, Unsigned16 = 2, Signed16 = 3, - Bytes32 = 4, - Bytes64 = 5, - Bytes128 = 6, + Bits32 = 4, + Bits64 = 5, + Bits128 = 6, }; enum class IMinMaxExchange : u64 { @@ -397,6 +399,10 @@ struct IpaMode { bool operator!=(const IpaMode& a) const { return !operator==(a); } + bool operator<(const IpaMode& a) const { + return std::tie(interpolation_mode, sampling_mode) < + std::tie(a.interpolation_mode, a.sampling_mode); + } }; enum class SystemVariable : u64 { @@ -644,6 +650,7 @@ union Instruction { BitField<37, 2, HalfPrecision> precision; BitField<32, 1, u64> saturate; + BitField<31, 1, u64> negate_b; BitField<30, 1, u64> negate_c; BitField<35, 2, HalfType> type_c; } rr; @@ -780,6 +787,12 @@ union Instruction { } st_l; union { + BitField<48, 3, UniformType> type; + BitField<46, 2, u64> cache_mode; + BitField<20, 24, s64> immediate_offset; + } ldg; + + union { BitField<0, 3, u64> pred0; BitField<3, 3, u64> pred3; BitField<7, 1, u64> abs_a; @@ -968,6 +981,10 @@ union Instruction { } return false; } + + bool IsComponentEnabled(std::size_t component) const { + return ((1ULL << component) & component_mask) != 0; + } } txq; union { @@ -1235,11 +1252,19 @@ union Instruction { union { BitField<20, 14, u64> offset; BitField<34, 5, u64> index; + + u64 GetOffset() const { + return offset * 4; + } } cbuf34; union { BitField<20, 16, s64> offset; BitField<36, 5, u64> index; + + s64 GetOffset() const { + return offset; + } } cbuf36; // Unsure about the size of this one. @@ -1431,6 +1456,7 @@ public: PredicateSetRegister, RegisterSetPredicate, Conversion, + Video, Xmad, Unknown, }; @@ -1562,8 +1588,8 @@ private: INST("11100000--------", Id::IPA, Type::Trivial, "IPA"), INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"), - INST("01011111--------", Id::VMAD, Type::Trivial, "VMAD"), - INST("0101000011110---", Id::VSETP, Type::Trivial, "VSETP"), + INST("01011111--------", Id::VMAD, Type::Video, "VMAD"), + INST("0101000011110---", Id::VSETP, Type::Video, "VSETP"), INST("0011001-1-------", Id::FFMA_IMM, Type::Ffma, "FFMA_IMM"), INST("010010011-------", Id::FFMA_CR, Type::Ffma, "FFMA_CR"), INST("010100011-------", Id::FFMA_RC, Type::Ffma, "FFMA_RC"), diff --git a/src/video_core/engines/shader_header.h b/src/video_core/engines/shader_header.h index 99c34649f..cf2b76ff6 100644 --- a/src/video_core/engines/shader_header.h +++ b/src/video_core/engines/shader_header.h @@ -106,7 +106,7 @@ struct Header { } ps; }; - u64 GetLocalMemorySize() { + u64 GetLocalMemorySize() const { return (common1.shader_local_memory_low_size | (common2.shader_local_memory_high_size << 24)); } diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 08cf6268f..d3d32a359 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -3,6 +3,8 @@ // Refer to the license.txt file included. #include "common/assert.h" +#include "core/core_timing.h" +#include "core/memory.h" #include "video_core/engines/fermi_2d.h" #include "video_core/engines/kepler_memory.h" #include "video_core/engines/maxwell_3d.h" @@ -124,9 +126,36 @@ u32 DepthFormatBytesPerPixel(DepthFormat format) { } } +// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence +// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4. +// So the values you see in docs might be multiplied by 4. enum class BufferMethods { - BindObject = 0, - CountBufferMethods = 0x40, + BindObject = 0x0, + Nop = 0x2, + SemaphoreAddressHigh = 0x4, + SemaphoreAddressLow = 0x5, + SemaphoreSequence = 0x6, + SemaphoreTrigger = 0x7, + NotifyIntr = 0x8, + WrcacheFlush = 0x9, + Unk28 = 0xA, + Unk2c = 0xB, + RefCnt = 0x14, + SemaphoreAcquire = 0x1A, + SemaphoreRelease = 0x1B, + Unk70 = 0x1C, + Unk74 = 0x1D, + Unk78 = 0x1E, + Unk7c = 0x1F, + Yield = 0x20, + NonPullerMethods = 0x40, +}; + +enum class GpuSemaphoreOperation { + AcquireEqual = 0x1, + WriteLong = 0x2, + AcquireGequal = 0x4, + AcquireMask = 0x8, }; void GPU::CallMethod(const MethodCall& method_call) { @@ -135,20 +164,78 @@ void GPU::CallMethod(const MethodCall& method_call) { ASSERT(method_call.subchannel < bound_engines.size()); - if (method_call.method == static_cast<u32>(BufferMethods::BindObject)) { - // Bind the current subchannel to the desired engine id. - LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, - method_call.argument); - bound_engines[method_call.subchannel] = static_cast<EngineID>(method_call.argument); - return; + if (ExecuteMethodOnEngine(method_call)) { + CallEngineMethod(method_call); + } else { + CallPullerMethod(method_call); } +} + +bool GPU::ExecuteMethodOnEngine(const MethodCall& method_call) { + const auto method = static_cast<BufferMethods>(method_call.method); + return method >= BufferMethods::NonPullerMethods; +} - if (method_call.method < static_cast<u32>(BufferMethods::CountBufferMethods)) { - // TODO(Subv): Research and implement these methods. - LOG_ERROR(HW_GPU, "Special buffer methods other than Bind are not implemented"); - return; +void GPU::CallPullerMethod(const MethodCall& method_call) { + regs.reg_array[method_call.method] = method_call.argument; + const auto method = static_cast<BufferMethods>(method_call.method); + + switch (method) { + case BufferMethods::BindObject: { + ProcessBindMethod(method_call); + break; + } + case BufferMethods::Nop: + case BufferMethods::SemaphoreAddressHigh: + case BufferMethods::SemaphoreAddressLow: + case BufferMethods::SemaphoreSequence: + case BufferMethods::RefCnt: + break; + case BufferMethods::SemaphoreTrigger: { + ProcessSemaphoreTriggerMethod(); + break; + } + case BufferMethods::NotifyIntr: { + // TODO(Kmather73): Research and implement this method. + LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented"); + break; + } + case BufferMethods::WrcacheFlush: { + // TODO(Kmather73): Research and implement this method. + LOG_ERROR(HW_GPU, "Special puller engine method WrcacheFlush not implemented"); + break; + } + case BufferMethods::Unk28: { + // TODO(Kmather73): Research and implement this method. + LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented"); + break; + } + case BufferMethods::Unk2c: { + // TODO(Kmather73): Research and implement this method. + LOG_ERROR(HW_GPU, "Special puller engine method Unk2c not implemented"); + break; + } + case BufferMethods::SemaphoreAcquire: { + ProcessSemaphoreAcquire(); + break; } + case BufferMethods::SemaphoreRelease: { + ProcessSemaphoreRelease(); + break; + } + case BufferMethods::Yield: { + // TODO(Kmather73): Research and implement this method. + LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented"); + break; + } + default: + LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", + static_cast<u32>(method)); + break; + } +} +void GPU::CallEngineMethod(const MethodCall& method_call) { const EngineID engine = bound_engines[method_call.subchannel]; switch (engine) { @@ -172,4 +259,76 @@ void GPU::CallMethod(const MethodCall& method_call) { } } +void GPU::ProcessBindMethod(const MethodCall& method_call) { + // Bind the current subchannel to the desired engine id. + LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, + method_call.argument); + bound_engines[method_call.subchannel] = static_cast<EngineID>(method_call.argument); +} + +void GPU::ProcessSemaphoreTriggerMethod() { + const auto semaphoreOperationMask = 0xF; + const auto op = + static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask); + if (op == GpuSemaphoreOperation::WriteLong) { + auto address = memory_manager->GpuToCpuAddress(regs.smaphore_address.SmaphoreAddress()); + struct Block { + u32 sequence; + u32 zeros = 0; + u64 timestamp; + }; + + Block block{}; + block.sequence = regs.semaphore_sequence; + // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of + // CoreTiming + block.timestamp = CoreTiming::GetTicks(); + Memory::WriteBlock(*address, &block, sizeof(block)); + } else { + const auto address = + memory_manager->GpuToCpuAddress(regs.smaphore_address.SmaphoreAddress()); + const u32 word = Memory::Read32(*address); + if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || + (op == GpuSemaphoreOperation::AcquireGequal && + static_cast<s32>(word - regs.semaphore_sequence) > 0) || + (op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) { + // Nothing to do in this case + } else { + regs.acquire_source = true; + regs.acquire_value = regs.semaphore_sequence; + if (op == GpuSemaphoreOperation::AcquireEqual) { + regs.acquire_active = true; + regs.acquire_mode = false; + } else if (op == GpuSemaphoreOperation::AcquireGequal) { + regs.acquire_active = true; + regs.acquire_mode = true; + } else if (op == GpuSemaphoreOperation::AcquireMask) { + // TODO(kemathe) The acquire mask operation waits for a value that, ANDed with + // semaphore_sequence, gives a non-0 result + LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented"); + } else { + LOG_ERROR(HW_GPU, "Invalid semaphore operation"); + } + } + } +} + +void GPU::ProcessSemaphoreRelease() { + const auto address = memory_manager->GpuToCpuAddress(regs.smaphore_address.SmaphoreAddress()); + Memory::Write32(*address, regs.semaphore_release); +} + +void GPU::ProcessSemaphoreAcquire() { + const auto address = memory_manager->GpuToCpuAddress(regs.smaphore_address.SmaphoreAddress()); + const u32 word = Memory::Read32(*address); + const auto value = regs.semaphore_acquire; + if (word != value) { + regs.acquire_active = true; + regs.acquire_value = value; + // TODO(kemathe73) figure out how to do the acquire_timeout + regs.acquire_mode = false; + regs.acquire_source = false; + } +} + } // namespace Tegra diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index af5ccd1e9..fb8975811 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -156,6 +156,46 @@ public: /// Returns a const reference to the GPU DMA pusher. const Tegra::DmaPusher& DmaPusher() const; + struct Regs { + static constexpr size_t NUM_REGS = 0x100; + + union { + struct { + INSERT_PADDING_WORDS(0x4); + struct { + u32 address_high; + u32 address_low; + + GPUVAddr SmaphoreAddress() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + } smaphore_address; + + u32 semaphore_sequence; + u32 semaphore_trigger; + INSERT_PADDING_WORDS(0xC); + + // The puser and the puller share the reference counter, the pusher only has read + // access + u32 reference_count; + INSERT_PADDING_WORDS(0x5); + + u32 semaphore_acquire; + u32 semaphore_release; + INSERT_PADDING_WORDS(0xE4); + + // Puller state + u32 acquire_mode; + u32 acquire_source; + u32 acquire_active; + u32 acquire_timeout; + u32 acquire_value; + }; + std::array<u32, NUM_REGS> reg_array; + }; + } regs{}; + private: std::unique_ptr<Tegra::DmaPusher> dma_pusher; std::unique_ptr<Tegra::MemoryManager> memory_manager; @@ -173,6 +213,37 @@ private: std::unique_ptr<Engines::MaxwellDMA> maxwell_dma; /// Inline memory engine std::unique_ptr<Engines::KeplerMemory> kepler_memory; + + void ProcessBindMethod(const MethodCall& method_call); + void ProcessSemaphoreTriggerMethod(); + void ProcessSemaphoreRelease(); + void ProcessSemaphoreAcquire(); + + // Calls a GPU puller method. + void CallPullerMethod(const MethodCall& method_call); + // Calls a GPU engine method. + void CallEngineMethod(const MethodCall& method_call); + // Determines where the method should be executed. + bool ExecuteMethodOnEngine(const MethodCall& method_call); }; +#define ASSERT_REG_POSITION(field_name, position) \ + static_assert(offsetof(GPU::Regs, field_name) == position * 4, \ + "Field " #field_name " has invalid position") + +ASSERT_REG_POSITION(smaphore_address, 0x4); +ASSERT_REG_POSITION(semaphore_sequence, 0x6); +ASSERT_REG_POSITION(semaphore_trigger, 0x7); +ASSERT_REG_POSITION(reference_count, 0x14); +ASSERT_REG_POSITION(semaphore_acquire, 0x1A); +ASSERT_REG_POSITION(semaphore_release, 0x1B); + +ASSERT_REG_POSITION(acquire_mode, 0x100); +ASSERT_REG_POSITION(acquire_source, 0x101); +ASSERT_REG_POSITION(acquire_active, 0x102); +ASSERT_REG_POSITION(acquire_timeout, 0x103); +ASSERT_REG_POSITION(acquire_value, 0x104); + +#undef ASSERT_REG_POSITION + } // namespace Tegra diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index ff5310848..4c08bb148 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -49,11 +49,6 @@ public: return false; } - /// Attempt to use a faster method to fill a region - virtual bool AccelerateFill(const void* config) { - return false; - } - /// Attempt to use a faster method to display the framebuffer to screen virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, u32 pixel_stride) { diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp index 7992b82c4..c7f32feaa 100644 --- a/src/video_core/renderer_opengl/gl_global_cache.cpp +++ b/src/video_core/renderer_opengl/gl_global_cache.cpp @@ -4,8 +4,13 @@ #include <glad/glad.h> +#include "common/assert.h" +#include "common/logging/log.h" +#include "core/core.h" +#include "core/memory.h" #include "video_core/renderer_opengl/gl_global_cache.h" #include "video_core/renderer_opengl/gl_rasterizer.h" +#include "video_core/renderer_opengl/gl_shader_decompiler.h" #include "video_core/renderer_opengl/utils.h" namespace OpenGL { @@ -18,7 +23,72 @@ CachedGlobalRegion::CachedGlobalRegion(VAddr addr, u32 size) : addr{addr}, size{ LabelGLObject(GL_BUFFER, buffer.handle, addr, "GlobalMemory"); } +void CachedGlobalRegion::Reload(u32 size_) { + constexpr auto max_size = static_cast<u32>(RasterizerOpenGL::MaxGlobalMemorySize); + + size = size_; + if (size > max_size) { + size = max_size; + LOG_CRITICAL(HW_GPU, "Global region size {} exceeded the expected size {}!", size_, + max_size); + } + + // TODO(Rodrigo): Get rid of Memory::GetPointer with a staging buffer + glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); + glBufferData(GL_SHADER_STORAGE_BUFFER, size, Memory::GetPointer(addr), GL_DYNAMIC_DRAW); +} + +GlobalRegion GlobalRegionCacheOpenGL::TryGetReservedGlobalRegion(VAddr addr, u32 size) const { + const auto search{reserve.find(addr)}; + if (search == reserve.end()) { + return {}; + } + return search->second; +} + +GlobalRegion GlobalRegionCacheOpenGL::GetUncachedGlobalRegion(VAddr addr, u32 size) { + GlobalRegion region{TryGetReservedGlobalRegion(addr, size)}; + if (!region) { + // No reserved surface available, create a new one and reserve it + region = std::make_shared<CachedGlobalRegion>(addr, size); + ReserveGlobalRegion(region); + } + region->Reload(size); + return region; +} + +void GlobalRegionCacheOpenGL::ReserveGlobalRegion(const GlobalRegion& region) { + reserve[region->GetAddr()] = region; +} + GlobalRegionCacheOpenGL::GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer) : RasterizerCache{rasterizer} {} +GlobalRegion GlobalRegionCacheOpenGL::GetGlobalRegion( + const GLShader::GlobalMemoryEntry& global_region, + Tegra::Engines::Maxwell3D::Regs::ShaderStage stage) { + + auto& gpu{Core::System::GetInstance().GPU()}; + const auto cbufs = gpu.Maxwell3D().state.shader_stages[static_cast<u64>(stage)]; + const auto cbuf_addr = gpu.MemoryManager().GpuToCpuAddress( + cbufs.const_buffers[global_region.GetCbufIndex()].address + global_region.GetCbufOffset()); + ASSERT(cbuf_addr); + + const auto actual_addr_gpu = Memory::Read64(*cbuf_addr); + const auto size = Memory::Read32(*cbuf_addr + 8); + const auto actual_addr = gpu.MemoryManager().GpuToCpuAddress(actual_addr_gpu); + ASSERT(actual_addr); + + // Look up global region in the cache based on address + GlobalRegion region = TryGet(*actual_addr); + + if (!region) { + // No global region found - create a new one + region = GetUncachedGlobalRegion(*actual_addr, size); + Register(region); + } + + return region; +} + } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_global_cache.h b/src/video_core/renderer_opengl/gl_global_cache.h index 406a735bc..37830bb7c 100644 --- a/src/video_core/renderer_opengl/gl_global_cache.h +++ b/src/video_core/renderer_opengl/gl_global_cache.h @@ -5,9 +5,13 @@ #pragma once #include <memory> +#include <unordered_map> + #include <glad/glad.h> +#include "common/assert.h" #include "common/common_types.h" +#include "video_core/engines/maxwell_3d.h" #include "video_core/rasterizer_cache.h" #include "video_core/renderer_opengl/gl_resource_manager.h" @@ -40,6 +44,9 @@ public: return buffer.handle; } + /// Reloads the global region from guest memory + void Reload(u32 size_); + // TODO(Rodrigo): When global memory is written (STG), implement flushing void Flush() override { UNIMPLEMENTED(); @@ -55,6 +62,17 @@ private: class GlobalRegionCacheOpenGL final : public RasterizerCache<GlobalRegion> { public: explicit GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer); + + /// Gets the current specified shader stage program + GlobalRegion GetGlobalRegion(const GLShader::GlobalMemoryEntry& descriptor, + Tegra::Engines::Maxwell3D::Regs::ShaderStage stage); + +private: + GlobalRegion TryGetReservedGlobalRegion(VAddr addr, u32 size) const; + GlobalRegion GetUncachedGlobalRegion(VAddr addr, u32 size); + void ReserveGlobalRegion(const GlobalRegion& region); + + std::unordered_map<VAddr, GlobalRegion> reserve; }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 6600ad528..9f7c837d6 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -297,10 +297,7 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { MICROPROFILE_SCOPE(OpenGL_Shader); auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); - // Next available bindpoints to use when uploading the const buffers and textures to the GLSL - // shaders. The constbuffer bindpoint starts after the shader stage configuration bind points. - u32 current_constbuffer_bindpoint = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage; - u32 current_texture_bindpoint = 0; + BaseBindings base_bindings; std::array<bool, Maxwell::NumClipDistances> clip_distances{}; for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { @@ -324,43 +321,35 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { const GLintptr offset = buffer_cache.UploadHostMemory( &ubo, sizeof(ubo), static_cast<std::size_t>(uniform_buffer_alignment)); - // Bind the buffer - glBindBufferRange(GL_UNIFORM_BUFFER, static_cast<GLuint>(stage), buffer_cache.GetHandle(), - offset, static_cast<GLsizeiptr>(sizeof(ubo))); + // Bind the emulation info buffer + glBindBufferRange(GL_UNIFORM_BUFFER, base_bindings.cbuf, buffer_cache.GetHandle(), offset, + static_cast<GLsizeiptr>(sizeof(ubo))); Shader shader{shader_cache.GetStageProgram(program)}; + const auto [program_handle, next_bindings] = + shader->GetProgramHandle(primitive_mode, base_bindings); switch (program) { case Maxwell::ShaderProgram::VertexA: - case Maxwell::ShaderProgram::VertexB: { - shader_program_manager->UseProgrammableVertexShader( - shader->GetProgramHandle(primitive_mode)); + case Maxwell::ShaderProgram::VertexB: + shader_program_manager->UseProgrammableVertexShader(program_handle); break; - } - case Maxwell::ShaderProgram::Geometry: { - shader_program_manager->UseProgrammableGeometryShader( - shader->GetProgramHandle(primitive_mode)); + case Maxwell::ShaderProgram::Geometry: + shader_program_manager->UseProgrammableGeometryShader(program_handle); break; - } - case Maxwell::ShaderProgram::Fragment: { - shader_program_manager->UseProgrammableFragmentShader( - shader->GetProgramHandle(primitive_mode)); + case Maxwell::ShaderProgram::Fragment: + shader_program_manager->UseProgrammableFragmentShader(program_handle); break; - } default: LOG_CRITICAL(HW_GPU, "Unimplemented shader index={}, enable={}, offset=0x{:08X}", index, shader_config.enable.Value(), shader_config.offset); UNREACHABLE(); } - // Configure the const buffers for this shader stage. - current_constbuffer_bindpoint = - SetupConstBuffers(static_cast<Maxwell::ShaderStage>(stage), shader, primitive_mode, - current_constbuffer_bindpoint); - - // Configure the textures for this shader stage. - current_texture_bindpoint = SetupTextures(static_cast<Maxwell::ShaderStage>(stage), shader, - primitive_mode, current_texture_bindpoint); + const auto stage_enum = static_cast<Maxwell::ShaderStage>(stage); + SetupConstBuffers(stage_enum, shader, program_handle, base_bindings); + SetupGlobalRegions(stage_enum, shader, program_handle, base_bindings); + SetupTextures(stage_enum, shader, program_handle, base_bindings); // Workaround for Intel drivers. // When a clip distance is enabled but not set in the shader it crops parts of the screen @@ -375,6 +364,8 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { // VertexB was combined with VertexA, so we skip the VertexB iteration index++; } + + base_bindings = next_bindings; } SyncClipEnabled(clip_distances); @@ -486,9 +477,9 @@ void RasterizerOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { cached_pages.add({pages_interval, delta}); } -void RasterizerOpenGL::ConfigureFramebuffers(OpenGLState& current_state, bool using_color_fb, - bool using_depth_fb, bool preserve_contents, - std::optional<std::size_t> single_color_target) { +std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers( + OpenGLState& current_state, bool using_color_fb, bool using_depth_fb, bool preserve_contents, + std::optional<std::size_t> single_color_target) { MICROPROFILE_SCOPE(OpenGL_Framebuffer); const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); const auto& regs = gpu.regs; @@ -500,7 +491,7 @@ void RasterizerOpenGL::ConfigureFramebuffers(OpenGLState& current_state, bool us // Only skip if the previous ConfigureFramebuffers call was from the same kind (multiple or // single color targets). This is done because the guest registers may not change but the // host framebuffer may contain different attachments - return; + return current_depth_stencil_usage; } current_framebuffer_config_state = fb_config_state; @@ -570,12 +561,14 @@ void RasterizerOpenGL::ConfigureFramebuffers(OpenGLState& current_state, bool us depth_surface->MarkAsModified(true, res_cache); fbkey.zeta = depth_surface->Texture().handle; - fbkey.stencil_enable = regs.stencil_enable; + fbkey.stencil_enable = regs.stencil_enable && + depth_surface->GetSurfaceParams().type == SurfaceType::DepthStencil; } SetupCachedFramebuffer(fbkey, current_state); - SyncViewport(current_state); + + return current_depth_stencil_usage = {static_cast<bool>(depth_surface), fbkey.stencil_enable}; } void RasterizerOpenGL::Clear() { @@ -643,8 +636,8 @@ void RasterizerOpenGL::Clear() { return; } - ConfigureFramebuffers(clear_state, use_color, use_depth || use_stencil, false, - regs.clear_buffers.RT.Value()); + const auto [clear_depth, clear_stencil] = ConfigureFramebuffers( + clear_state, use_color, use_depth || use_stencil, false, regs.clear_buffers.RT.Value()); if (regs.clear_flags.scissor) { SyncScissorTest(clear_state); } @@ -659,11 +652,11 @@ void RasterizerOpenGL::Clear() { glClearBufferfv(GL_COLOR, regs.clear_buffers.RT, regs.clear_color); } - if (use_depth && use_stencil) { + if (clear_depth && clear_stencil) { glClearBufferfi(GL_DEPTH_STENCIL, 0, regs.clear_depth, regs.clear_stencil); - } else if (use_depth) { + } else if (clear_depth) { glClearBufferfv(GL_DEPTH, 0, ®s.clear_depth); - } else if (use_stencil) { + } else if (clear_stencil) { glClearBufferiv(GL_STENCIL, 0, ®s.clear_stencil); } } @@ -790,11 +783,6 @@ bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs return true; } -bool RasterizerOpenGL::AccelerateFill(const void* config) { - UNREACHABLE(); - return true; -} - bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, u32 pixel_stride) { if (!framebuffer_addr) { @@ -924,13 +912,14 @@ void RasterizerOpenGL::SamplerInfo::SyncWithConfig(const Tegra::Texture::TSCEntr } } -u32 RasterizerOpenGL::SetupConstBuffers(Maxwell::ShaderStage stage, Shader& shader, - GLenum primitive_mode, u32 current_bindpoint) { +void RasterizerOpenGL::SetupConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, + const Shader& shader, GLuint program_handle, + BaseBindings base_bindings) { MICROPROFILE_SCOPE(OpenGL_UBO); const auto& gpu = Core::System::GetInstance().GPU(); const auto& maxwell3d = gpu.Maxwell3D(); const auto& shader_stage = maxwell3d.state.shader_stages[static_cast<std::size_t>(stage)]; - const auto& entries = shader->GetShaderEntries().const_buffer_entries; + const auto& entries = shader->GetShaderEntries().const_buffers; constexpr u64 max_binds = Tegra::Engines::Maxwell3D::Regs::MaxConstBuffers; std::array<GLuint, max_binds> bind_buffers; @@ -965,7 +954,7 @@ u32 RasterizerOpenGL::SetupConstBuffers(Maxwell::ShaderStage stage, Shader& shad } } else { // Buffer is accessed directly, upload just what we use - size = used_buffer.GetSize() * sizeof(float); + size = used_buffer.GetSize(); } // Align the actual size so it ends up being a multiple of vec4 to meet the OpenGL std140 @@ -973,75 +962,73 @@ u32 RasterizerOpenGL::SetupConstBuffers(Maxwell::ShaderStage stage, Shader& shad size = Common::AlignUp(size, sizeof(GLvec4)); ASSERT_MSG(size <= MaxConstbufferSize, "Constbuffer too big"); - GLintptr const_buffer_offset = buffer_cache.UploadMemory( + const GLintptr const_buffer_offset = buffer_cache.UploadMemory( buffer.address, size, static_cast<std::size_t>(uniform_buffer_alignment)); - // Now configure the bindpoint of the buffer inside the shader - glUniformBlockBinding(shader->GetProgramHandle(primitive_mode), - shader->GetProgramResourceIndex(used_buffer), - current_bindpoint + bindpoint); - // Prepare values for multibind bind_buffers[bindpoint] = buffer_cache.GetHandle(); bind_offsets[bindpoint] = const_buffer_offset; bind_sizes[bindpoint] = size; } - glBindBuffersRange(GL_UNIFORM_BUFFER, current_bindpoint, static_cast<GLsizei>(entries.size()), + // The first binding is reserved for emulation values + const GLuint ubo_base_binding = base_bindings.cbuf + 1; + glBindBuffersRange(GL_UNIFORM_BUFFER, ubo_base_binding, static_cast<GLsizei>(entries.size()), bind_buffers.data(), bind_offsets.data(), bind_sizes.data()); +} + +void RasterizerOpenGL::SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, + const Shader& shader, GLenum primitive_mode, + BaseBindings base_bindings) { + // TODO(Rodrigo): Use ARB_multi_bind here + const auto& entries = shader->GetShaderEntries().global_memory_entries; - return current_bindpoint + static_cast<u32>(entries.size()); + for (u32 bindpoint = 0; bindpoint < static_cast<u32>(entries.size()); ++bindpoint) { + const auto& entry = entries[bindpoint]; + const u32 current_bindpoint = base_bindings.gmem + bindpoint; + const auto& region = global_cache.GetGlobalRegion(entry, stage); + + glBindBufferBase(GL_SHADER_STORAGE_BUFFER, current_bindpoint, region->GetBufferHandle()); + } } -u32 RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, Shader& shader, - GLenum primitive_mode, u32 current_unit) { +void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& shader, + GLuint program_handle, BaseBindings base_bindings) { MICROPROFILE_SCOPE(OpenGL_Texture); const auto& gpu = Core::System::GetInstance().GPU(); const auto& maxwell3d = gpu.Maxwell3D(); - const auto& entries = shader->GetShaderEntries().texture_samplers; + const auto& entries = shader->GetShaderEntries().samplers; - ASSERT_MSG(current_unit + entries.size() <= std::size(state.texture_units), + ASSERT_MSG(base_bindings.sampler + entries.size() <= std::size(state.texture_units), "Exceeded the number of active textures."); for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) { const auto& entry = entries[bindpoint]; - const u32 current_bindpoint = current_unit + bindpoint; - - // Bind the uniform to the sampler. - - glProgramUniform1i(shader->GetProgramHandle(primitive_mode), - shader->GetUniformLocation(entry), current_bindpoint); + const u32 current_bindpoint = base_bindings.sampler + bindpoint; + auto& unit = state.texture_units[current_bindpoint]; const auto texture = maxwell3d.GetStageTexture(entry.GetStage(), entry.GetOffset()); - if (!texture.enabled) { - state.texture_units[current_bindpoint].texture = 0; + unit.texture = 0; continue; } texture_samplers[current_bindpoint].SyncWithConfig(texture.tsc); + Surface surface = res_cache.GetTextureSurface(texture, entry); if (surface != nullptr) { - const GLuint handle = + unit.texture = entry.IsArray() ? surface->TextureLayer().handle : surface->Texture().handle; - const GLenum target = entry.IsArray() ? surface->TargetLayer() : surface->Target(); - state.texture_units[current_bindpoint].texture = handle; - state.texture_units[current_bindpoint].target = target; - state.texture_units[current_bindpoint].swizzle.r = - MaxwellToGL::SwizzleSource(texture.tic.x_source); - state.texture_units[current_bindpoint].swizzle.g = - MaxwellToGL::SwizzleSource(texture.tic.y_source); - state.texture_units[current_bindpoint].swizzle.b = - MaxwellToGL::SwizzleSource(texture.tic.z_source); - state.texture_units[current_bindpoint].swizzle.a = - MaxwellToGL::SwizzleSource(texture.tic.w_source); + unit.target = entry.IsArray() ? surface->TargetLayer() : surface->Target(); + unit.swizzle.r = MaxwellToGL::SwizzleSource(texture.tic.x_source); + unit.swizzle.g = MaxwellToGL::SwizzleSource(texture.tic.y_source); + unit.swizzle.b = MaxwellToGL::SwizzleSource(texture.tic.z_source); + unit.swizzle.a = MaxwellToGL::SwizzleSource(texture.tic.w_source); } else { // Can occur when texture addr is null or its memory is unmapped/invalid - state.texture_units[current_bindpoint].texture = 0; + unit.texture = 0; } } - - return current_unit + static_cast<u32>(entries.size()); } void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 21c51f874..7f2bf0f8b 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -56,7 +56,6 @@ public: void FlushAndInvalidateRegion(VAddr addr, u64 size) override; bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, const Tegra::Engines::Fermi2D::Regs::Surface& dst) override; - bool AccelerateFill(const void* config) override; bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, u32 pixel_stride) override; bool AccelerateDrawBatch(bool is_indexed) override; @@ -122,30 +121,25 @@ private: * @param using_depth_fb If true, configure the depth/stencil framebuffer. * @param preserve_contents If true, tries to preserve data from a previously used framebuffer. * @param single_color_target Specifies if a single color buffer target should be used. + * @returns If depth (first) or stencil (second) are being stored in the bound zeta texture + * (requires using_depth_fb to be true) */ - void ConfigureFramebuffers(OpenGLState& current_state, bool use_color_fb = true, - bool using_depth_fb = true, bool preserve_contents = true, - std::optional<std::size_t> single_color_target = {}); + std::pair<bool, bool> ConfigureFramebuffers( + OpenGLState& current_state, bool use_color_fb = true, bool using_depth_fb = true, + bool preserve_contents = true, std::optional<std::size_t> single_color_target = {}); - /** - * Configures the current constbuffers to use for the draw command. - * @param stage The shader stage to configure buffers for. - * @param shader The shader object that contains the specified stage. - * @param current_bindpoint The offset at which to start counting new buffer bindpoints. - * @returns The next available bindpoint for use in the next shader stage. - */ - u32 SetupConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, Shader& shader, - GLenum primitive_mode, u32 current_bindpoint); + /// Configures the current constbuffers to use for the draw command. + void SetupConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, const Shader& shader, + GLuint program_handle, BaseBindings base_bindings); - /** - * Configures the current textures to use for the draw command. - * @param stage The shader stage to configure textures for. - * @param shader The shader object that contains the specified stage. - * @param current_unit The offset at which to start counting unused texture units. - * @returns The next available bindpoint for use in the next shader stage. - */ - u32 SetupTextures(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, Shader& shader, - GLenum primitive_mode, u32 current_unit); + /// Configures the current global memory entries to use for the draw command. + void SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, + const Shader& shader, GLenum primitive_mode, + BaseBindings base_bindings); + + /// Configures the current textures to use for the draw command. + void SetupTextures(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, const Shader& shader, + GLuint program_handle, BaseBindings base_bindings); /// Syncs the viewport and depth range to match the guest state void SyncViewport(OpenGLState& current_state); @@ -221,6 +215,7 @@ private: std::map<FramebufferCacheKey, OGLFramebuffer> framebuffer_cache; FramebufferConfigState current_framebuffer_config_state; + std::pair<bool, bool> current_depth_stencil_usage{}; std::array<SamplerInfo, Tegra::Engines::Maxwell3D::Regs::NumTextureSamplers> texture_samplers; diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index a05b8b936..50286432d 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -128,6 +128,7 @@ std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only, params.height = Common::AlignUp(config.tic.Height(), GetCompressionFactor(params.pixel_format)); params.unaligned_height = config.tic.Height(); params.target = SurfaceTargetFromTextureType(config.tic.texture_type); + params.identity = SurfaceClass::Uploaded; switch (params.target) { case SurfaceTarget::Texture1D: @@ -167,6 +168,7 @@ std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only, } params.is_layered = SurfaceTargetIsLayered(params.target); + params.is_array = SurfaceTargetIsArray(params.target); params.max_mip_level = config.tic.max_mip_level + 1; params.rt = {}; @@ -194,6 +196,7 @@ std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only, params.height = config.height; params.unaligned_height = config.height; params.target = SurfaceTarget::Texture2D; + params.identity = SurfaceClass::RenderTarget; params.depth = 1; params.max_mip_level = 1; params.is_layered = false; @@ -229,6 +232,7 @@ std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only, params.height = zeta_height; params.unaligned_height = zeta_height; params.target = SurfaceTarget::Texture2D; + params.identity = SurfaceClass::DepthBuffer; params.depth = 1; params.max_mip_level = 1; params.is_layered = false; @@ -257,6 +261,7 @@ std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only, params.height = config.height; params.unaligned_height = config.height; params.target = SurfaceTarget::Texture2D; + params.identity = SurfaceClass::Copy; params.depth = 1; params.max_mip_level = 1; params.rt = {}; @@ -574,8 +579,7 @@ CachedSurface::CachedSurface(const SurfaceParams& params) ApplyTextureDefaults(SurfaceTargetToGL(params.target), params.max_mip_level); - LabelGLObject(GL_TEXTURE, texture.handle, params.addr, - SurfaceParams::SurfaceTargetName(params.target)); + OpenGL::LabelGLObject(GL_TEXTURE, texture.handle, params.addr, params.IdentityString()); // Clamp size to mapped GPU memory region // TODO(bunnei): Super Mario Odyssey maps a 0x40000 byte region and then uses it for a 0x80000 @@ -730,7 +734,6 @@ void CachedSurface::FlushGLBuffer() { glPixelStorei(GL_PACK_ROW_LENGTH, 0); ConvertFormatAsNeeded_FlushGLBuffer(gl_buffer[0], params.pixel_format, params.width, params.height); - ASSERT(params.type != SurfaceType::Fill); const u8* const texture_src_data = Memory::GetPointer(params.addr); ASSERT(texture_src_data); if (params.is_tiled) { @@ -877,10 +880,13 @@ void CachedSurface::EnsureTextureView() { UNIMPLEMENTED_IF(gl_is_compressed); const GLenum target{TargetLayer()}; + const GLuint num_layers{target == GL_TEXTURE_CUBE_MAP_ARRAY ? 6u : 1u}; + constexpr GLuint min_layer = 0; + constexpr GLuint min_level = 0; texture_view.Create(); - glTextureView(texture_view.handle, target, texture.handle, gl_internal_format, 0, - params.max_mip_level, 0, 1); + glTextureView(texture_view.handle, target, texture.handle, gl_internal_format, min_level, + params.max_mip_level, min_layer, num_layers); OpenGLState cur_state = OpenGLState::GetCurState(); const auto& old_tex = cur_state.texture_units[0]; @@ -897,9 +903,6 @@ void CachedSurface::EnsureTextureView() { MICROPROFILE_DEFINE(OpenGL_TextureUL, "OpenGL", "Texture Upload", MP_RGB(128, 192, 64)); void CachedSurface::UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle) { - if (params.type == SurfaceType::Fill) - return; - MICROPROFILE_SCOPE(OpenGL_TextureUL); for (u32 i = 0; i < params.max_mip_level; i++) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 37611c4fc..8d7d6722c 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -35,6 +35,14 @@ using PixelFormat = VideoCore::Surface::PixelFormat; using ComponentType = VideoCore::Surface::ComponentType; struct SurfaceParams { + + enum class SurfaceClass { + Uploaded, + RenderTarget, + DepthBuffer, + Copy, + }; + static std::string SurfaceTargetName(SurfaceTarget target) { switch (target) { case SurfaceTarget::Texture1D: @@ -210,6 +218,48 @@ struct SurfaceParams { /// Initializes parameters for caching, should be called after everything has been initialized void InitCacheParameters(Tegra::GPUVAddr gpu_addr); + std::string TargetName() const { + switch (target) { + case SurfaceTarget::Texture1D: + return "1D"; + case SurfaceTarget::Texture2D: + return "2D"; + case SurfaceTarget::Texture3D: + return "3D"; + case SurfaceTarget::Texture1DArray: + return "1DArray"; + case SurfaceTarget::Texture2DArray: + return "2DArray"; + case SurfaceTarget::TextureCubemap: + return "Cube"; + default: + LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", static_cast<u32>(target)); + UNREACHABLE(); + return fmt::format("TUK({})", static_cast<u32>(target)); + } + } + + std::string ClassName() const { + switch (identity) { + case SurfaceClass::Uploaded: + return "UP"; + case SurfaceClass::RenderTarget: + return "RT"; + case SurfaceClass::DepthBuffer: + return "DB"; + case SurfaceClass::Copy: + return "CP"; + default: + LOG_CRITICAL(HW_GPU, "Unimplemented surface_class={}", static_cast<u32>(identity)); + UNREACHABLE(); + return fmt::format("CUK({})", static_cast<u32>(identity)); + } + } + + std::string IdentityString() const { + return ClassName() + '_' + TargetName() + '_' + (is_tiled ? 'T' : 'L'); + } + bool is_tiled; u32 block_width; u32 block_height; @@ -223,8 +273,10 @@ struct SurfaceParams { u32 depth; u32 unaligned_height; SurfaceTarget target; + SurfaceClass identity; u32 max_mip_level; bool is_layered; + bool is_array; bool srgb_conversion; // Parameters used for caching VAddr addr; @@ -255,6 +307,7 @@ struct SurfaceReserveKey : Common::HashableStruct<OpenGL::SurfaceParams> { static SurfaceReserveKey Create(const OpenGL::SurfaceParams& params) { SurfaceReserveKey res; res.state = params; + res.state.identity = {}; // Ignore the origin of the texture res.state.gpu_addr = {}; // Ignore GPU vaddr in caching res.state.rt = {}; // Ignore rt config in caching return res; @@ -294,7 +347,7 @@ public: } const OGLTexture& TextureLayer() { - if (params.is_layered) { + if (params.is_array) { return Texture(); } EnsureTextureView(); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index c785fffa3..90eda7814 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -10,11 +10,15 @@ #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_opengl/gl_rasterizer.h" #include "video_core/renderer_opengl/gl_shader_cache.h" +#include "video_core/renderer_opengl/gl_shader_decompiler.h" #include "video_core/renderer_opengl/gl_shader_manager.h" #include "video_core/renderer_opengl/utils.h" +#include "video_core/shader/shader_ir.h" namespace OpenGL { +using VideoCommon::Shader::ProgramCode; + /// Gets the address for the specified shader stage program static VAddr GetShaderAddress(Maxwell::ShaderProgram program) { const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); @@ -24,42 +28,31 @@ static VAddr GetShaderAddress(Maxwell::ShaderProgram program) { } /// Gets the shader program code from memory for the specified address -static GLShader::ProgramCode GetShaderCode(VAddr addr) { - GLShader::ProgramCode program_code(GLShader::MAX_PROGRAM_CODE_LENGTH); +static ProgramCode GetShaderCode(VAddr addr) { + ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH); Memory::ReadBlock(addr, program_code.data(), program_code.size() * sizeof(u64)); return program_code; } -/// Helper function to set shader uniform block bindings for a single shader stage -static void SetShaderUniformBlockBinding(GLuint shader, const char* name, - Maxwell::ShaderStage binding, std::size_t expected_size) { - const GLuint ub_index = glGetUniformBlockIndex(shader, name); - if (ub_index == GL_INVALID_INDEX) { - return; +/// Gets the shader type from a Maxwell program type +constexpr GLenum GetShaderType(Maxwell::ShaderProgram program_type) { + switch (program_type) { + case Maxwell::ShaderProgram::VertexA: + case Maxwell::ShaderProgram::VertexB: + return GL_VERTEX_SHADER; + case Maxwell::ShaderProgram::Geometry: + return GL_GEOMETRY_SHADER; + case Maxwell::ShaderProgram::Fragment: + return GL_FRAGMENT_SHADER; + default: + return GL_NONE; } - - GLint ub_size = 0; - glGetActiveUniformBlockiv(shader, ub_index, GL_UNIFORM_BLOCK_DATA_SIZE, &ub_size); - ASSERT_MSG(static_cast<std::size_t>(ub_size) == expected_size, - "Uniform block size did not match! Got {}, expected {}", ub_size, expected_size); - glUniformBlockBinding(shader, ub_index, static_cast<GLuint>(binding)); -} - -/// Sets shader uniform block bindings for an entire shader program -static void SetShaderUniformBlockBindings(GLuint shader) { - SetShaderUniformBlockBinding(shader, "vs_config", Maxwell::ShaderStage::Vertex, - sizeof(GLShader::MaxwellUniformData)); - SetShaderUniformBlockBinding(shader, "gs_config", Maxwell::ShaderStage::Geometry, - sizeof(GLShader::MaxwellUniformData)); - SetShaderUniformBlockBinding(shader, "fs_config", Maxwell::ShaderStage::Fragment, - sizeof(GLShader::MaxwellUniformData)); } CachedShader::CachedShader(VAddr addr, Maxwell::ShaderProgram program_type) : addr{addr}, program_type{program_type}, setup{GetShaderCode(addr)} { GLShader::ProgramResult program_result; - GLenum gl_type{}; switch (program_type) { case Maxwell::ShaderProgram::VertexA: @@ -70,17 +63,14 @@ CachedShader::CachedShader(VAddr addr, Maxwell::ShaderProgram program_type) case Maxwell::ShaderProgram::VertexB: CalculateProperties(); program_result = GLShader::GenerateVertexShader(setup); - gl_type = GL_VERTEX_SHADER; break; case Maxwell::ShaderProgram::Geometry: CalculateProperties(); program_result = GLShader::GenerateGeometryShader(setup); - gl_type = GL_GEOMETRY_SHADER; break; case Maxwell::ShaderProgram::Fragment: CalculateProperties(); program_result = GLShader::GenerateFragmentShader(setup); - gl_type = GL_FRAGMENT_SHADER; break; default: LOG_CRITICAL(HW_GPU, "Unimplemented program_type={}", static_cast<u32>(program_type)); @@ -88,59 +78,105 @@ CachedShader::CachedShader(VAddr addr, Maxwell::ShaderProgram program_type) return; } + code = program_result.first; entries = program_result.second; shader_length = entries.shader_length; +} - if (program_type != Maxwell::ShaderProgram::Geometry) { - OGLShader shader; - shader.Create(program_result.first.c_str(), gl_type); - program.Create(true, shader.handle); - SetShaderUniformBlockBindings(program.handle); - LabelGLObject(GL_PROGRAM, program.handle, addr); +std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(GLenum primitive_mode, + BaseBindings base_bindings) { + GLuint handle{}; + if (program_type == Maxwell::ShaderProgram::Geometry) { + handle = GetGeometryShader(primitive_mode, base_bindings); } else { - // Store shader's code to lazily build it on draw - geometry_programs.code = program_result.first; + const auto [entry, is_cache_miss] = programs.try_emplace(base_bindings); + auto& program = entry->second; + if (is_cache_miss) { + std::string source = AllocateBindings(base_bindings); + source += code; + + OGLShader shader; + shader.Create(source.c_str(), GetShaderType(program_type)); + program.Create(true, shader.handle); + LabelGLObject(GL_PROGRAM, program.handle, addr); + } + + handle = program.handle; } + + // Add const buffer and samplers offset reserved by this shader. One UBO binding is reserved for + // emulation values + base_bindings.cbuf += static_cast<u32>(entries.const_buffers.size()) + 1; + base_bindings.gmem += static_cast<u32>(entries.global_memory_entries.size()); + base_bindings.sampler += static_cast<u32>(entries.samplers.size()); + + return {handle, base_bindings}; } -GLuint CachedShader::GetProgramResourceIndex(const GLShader::ConstBufferEntry& buffer) { - const auto search{resource_cache.find(buffer.GetHash())}; - if (search == resource_cache.end()) { - const GLuint index{ - glGetProgramResourceIndex(program.handle, GL_UNIFORM_BLOCK, buffer.GetName().c_str())}; - resource_cache[buffer.GetHash()] = index; - return index; +std::string CachedShader::AllocateBindings(BaseBindings base_bindings) { + std::string code = "#version 430 core\n"; + code += fmt::format("#define EMULATION_UBO_BINDING {}\n", base_bindings.cbuf++); + + for (const auto& cbuf : entries.const_buffers) { + code += fmt::format("#define CBUF_BINDING_{} {}\n", cbuf.GetIndex(), base_bindings.cbuf++); } - return search->second; -} + for (const auto& gmem : entries.global_memory_entries) { + code += fmt::format("#define GMEM_BINDING_{}_{} {}\n", gmem.GetCbufIndex(), + gmem.GetCbufOffset(), base_bindings.gmem++); + } -GLint CachedShader::GetUniformLocation(const GLShader::SamplerEntry& sampler) { - const auto search{uniform_cache.find(sampler.GetHash())}; - if (search == uniform_cache.end()) { - const GLint index{glGetUniformLocation(program.handle, sampler.GetName().c_str())}; - uniform_cache[sampler.GetHash()] = index; - return index; + for (const auto& sampler : entries.samplers) { + code += fmt::format("#define SAMPLER_BINDING_{} {}\n", sampler.GetIndex(), + base_bindings.sampler++); } - return search->second; + return code; +} + +GLuint CachedShader::GetGeometryShader(GLenum primitive_mode, BaseBindings base_bindings) { + const auto [entry, is_cache_miss] = geometry_programs.try_emplace(base_bindings); + auto& programs = entry->second; + + switch (primitive_mode) { + case GL_POINTS: + return LazyGeometryProgram(programs.points, base_bindings, "points", 1, "ShaderPoints"); + case GL_LINES: + case GL_LINE_STRIP: + return LazyGeometryProgram(programs.lines, base_bindings, "lines", 2, "ShaderLines"); + case GL_LINES_ADJACENCY: + case GL_LINE_STRIP_ADJACENCY: + return LazyGeometryProgram(programs.lines_adjacency, base_bindings, "lines_adjacency", 4, + "ShaderLinesAdjacency"); + case GL_TRIANGLES: + case GL_TRIANGLE_STRIP: + case GL_TRIANGLE_FAN: + return LazyGeometryProgram(programs.triangles, base_bindings, "triangles", 3, + "ShaderTriangles"); + case GL_TRIANGLES_ADJACENCY: + case GL_TRIANGLE_STRIP_ADJACENCY: + return LazyGeometryProgram(programs.triangles_adjacency, base_bindings, + "triangles_adjacency", 6, "ShaderTrianglesAdjacency"); + default: + UNREACHABLE_MSG("Unknown primitive mode."); + return LazyGeometryProgram(programs.points, base_bindings, "points", 1, "ShaderPoints"); + } } -GLuint CachedShader::LazyGeometryProgram(OGLProgram& target_program, +GLuint CachedShader::LazyGeometryProgram(OGLProgram& target_program, BaseBindings base_bindings, const std::string& glsl_topology, u32 max_vertices, const std::string& debug_name) { if (target_program.handle != 0) { return target_program.handle; } - std::string source = "#version 430 core\n"; + std::string source = AllocateBindings(base_bindings); source += "layout (" + glsl_topology + ") in;\n"; source += "#define MAX_VERTEX_INPUT " + std::to_string(max_vertices) + '\n'; - source += geometry_programs.code; + source += code; OGLShader shader; shader.Create(source.c_str(), GL_GEOMETRY_SHADER); target_program.Create(true, shader.handle); - SetShaderUniformBlockBindings(target_program.handle); LabelGLObject(GL_PROGRAM, target_program.handle, addr, debug_name); return target_program.handle; }; diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index 768747968..904d15dd0 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -7,11 +7,15 @@ #include <array> #include <map> #include <memory> +#include <tuple> + +#include <glad/glad.h> #include "common/assert.h" #include "common/common_types.h" #include "video_core/rasterizer_cache.h" #include "video_core/renderer_opengl/gl_resource_manager.h" +#include "video_core/renderer_opengl/gl_shader_decompiler.h" #include "video_core/renderer_opengl/gl_shader_gen.h" namespace OpenGL { @@ -22,6 +26,16 @@ class RasterizerOpenGL; using Shader = std::shared_ptr<CachedShader>; using Maxwell = Tegra::Engines::Maxwell3D::Regs; +struct BaseBindings { + u32 cbuf{}; + u32 gmem{}; + u32 sampler{}; + + bool operator<(const BaseBindings& rhs) const { + return std::tie(cbuf, gmem, sampler) < std::tie(rhs.cbuf, rhs.gmem, rhs.sampler); + } +}; + class CachedShader final : public RasterizerCacheObject { public: CachedShader(VAddr addr, Maxwell::ShaderProgram program_type); @@ -43,70 +57,45 @@ public: } /// Gets the GL program handle for the shader - GLuint GetProgramHandle(GLenum primitive_mode) { - if (program_type != Maxwell::ShaderProgram::Geometry) { - return program.handle; - } - switch (primitive_mode) { - case GL_POINTS: - return LazyGeometryProgram(geometry_programs.points, "points", 1, "ShaderPoints"); - case GL_LINES: - case GL_LINE_STRIP: - return LazyGeometryProgram(geometry_programs.lines, "lines", 2, "ShaderLines"); - case GL_LINES_ADJACENCY: - case GL_LINE_STRIP_ADJACENCY: - return LazyGeometryProgram(geometry_programs.lines_adjacency, "lines_adjacency", 4, - "ShaderLinesAdjacency"); - case GL_TRIANGLES: - case GL_TRIANGLE_STRIP: - case GL_TRIANGLE_FAN: - return LazyGeometryProgram(geometry_programs.triangles, "triangles", 3, - "ShaderTriangles"); - case GL_TRIANGLES_ADJACENCY: - case GL_TRIANGLE_STRIP_ADJACENCY: - return LazyGeometryProgram(geometry_programs.triangles_adjacency, "triangles_adjacency", - 6, "ShaderTrianglesAdjacency"); - default: - UNREACHABLE_MSG("Unknown primitive mode."); - return LazyGeometryProgram(geometry_programs.points, "points", 1, "ShaderPoints"); - } - } + std::tuple<GLuint, BaseBindings> GetProgramHandle(GLenum primitive_mode, + BaseBindings base_bindings); - /// Gets the GL program resource location for the specified resource, caching as needed - GLuint GetProgramResourceIndex(const GLShader::ConstBufferEntry& buffer); +private: + // Geometry programs. These are needed because GLSL needs an input topology but it's not + // declared by the hardware. Workaround this issue by generating a different shader per input + // topology class. + struct GeometryPrograms { + OGLProgram points; + OGLProgram lines; + OGLProgram lines_adjacency; + OGLProgram triangles; + OGLProgram triangles_adjacency; + }; - /// Gets the GL uniform location for the specified resource, caching as needed - GLint GetUniformLocation(const GLShader::SamplerEntry& sampler); + std::string AllocateBindings(BaseBindings base_bindings); + + GLuint GetGeometryShader(GLenum primitive_mode, BaseBindings base_bindings); -private: /// Generates a geometry shader or returns one that already exists. - GLuint LazyGeometryProgram(OGLProgram& target_program, const std::string& glsl_topology, - u32 max_vertices, const std::string& debug_name); + GLuint LazyGeometryProgram(OGLProgram& target_program, BaseBindings base_bindings, + const std::string& glsl_topology, u32 max_vertices, + const std::string& debug_name); void CalculateProperties(); - VAddr addr; - std::size_t shader_length; - Maxwell::ShaderProgram program_type; + VAddr addr{}; + std::size_t shader_length{}; + Maxwell::ShaderProgram program_type{}; GLShader::ShaderSetup setup; GLShader::ShaderEntries entries; - // Non-geometry program. - OGLProgram program; + std::string code; - // Geometry programs. These are needed because GLSL needs an input topology but it's not - // declared by the hardware. Workaround this issue by generating a different shader per input - // topology class. - struct { - std::string code; - OGLProgram points; - OGLProgram lines; - OGLProgram lines_adjacency; - OGLProgram triangles; - OGLProgram triangles_adjacency; - } geometry_programs; + std::map<BaseBindings, OGLProgram> programs; + std::map<BaseBindings, GeometryPrograms> geometry_programs; - std::map<u32, GLuint> resource_cache; + std::map<u32, GLuint> cbuf_resource_cache; + std::map<u32, GLuint> gmem_resource_cache; std::map<u32, GLint> uniform_cache; }; diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 1bb09e61b..36035d0d2 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -2,247 +2,42 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include <map> -#include <optional> -#include <set> +#include <array> #include <string> #include <string_view> -#include <unordered_set> +#include <variant> #include <fmt/format.h> +#include "common/alignment.h" #include "common/assert.h" #include "common/common_types.h" -#include "video_core/engines/shader_bytecode.h" -#include "video_core/engines/shader_header.h" +#include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_opengl/gl_rasterizer.h" #include "video_core/renderer_opengl/gl_shader_decompiler.h" +#include "video_core/shader/shader_ir.h" -namespace OpenGL::GLShader::Decompiler { +namespace OpenGL::GLShader { using Tegra::Shader::Attribute; -using Tegra::Shader::Instruction; -using Tegra::Shader::LogicOperation; -using Tegra::Shader::OpCode; +using Tegra::Shader::Header; +using Tegra::Shader::IpaInterpMode; +using Tegra::Shader::IpaMode; +using Tegra::Shader::IpaSampleMode; using Tegra::Shader::Register; -using Tegra::Shader::Sampler; -using Tegra::Shader::SubOp; +using namespace VideoCommon::Shader; -constexpr u32 PROGRAM_END = MAX_PROGRAM_CODE_LENGTH; -constexpr u32 PROGRAM_HEADER_SIZE = sizeof(Tegra::Shader::Header); +using Maxwell = Tegra::Engines::Maxwell3D::Regs; +using ShaderStage = Tegra::Engines::Maxwell3D::Regs::ShaderStage; +using Operation = const OperationNode&; -constexpr u32 MAX_GEOMETRY_BUFFERS = 6; -constexpr u32 MAX_ATTRIBUTES = 0x100; // Size in vec4s, this value is untested +enum : u32 { POSITION_VARYING_LOCATION = 0, GENERIC_VARYING_START_LOCATION = 1 }; +constexpr u32 MAX_CONSTBUFFER_ELEMENTS = + static_cast<u32>(RasterizerOpenGL::MaxConstbufferSize) / (4 * sizeof(float)); +constexpr u32 MAX_GLOBALMEMORY_ELEMENTS = + static_cast<u32>(RasterizerOpenGL::MaxGlobalMemorySize) / sizeof(float); -static const char* INTERNAL_FLAG_NAMES[] = {"zero_flag", "sign_flag", "carry_flag", - "overflow_flag"}; - -enum class InternalFlag : u64 { - ZeroFlag = 0, - SignFlag = 1, - CarryFlag = 2, - OverflowFlag = 3, - Amount -}; - -class DecompileFail : public std::runtime_error { -public: - using std::runtime_error::runtime_error; -}; - -/// Generates code to use for a swizzle operation. -static std::string GetSwizzle(u64 elem) { - ASSERT(elem <= 3); - std::string swizzle = "."; - swizzle += "xyzw"[elem]; - return swizzle; -} - -/// Translate topology -static std::string GetTopologyName(Tegra::Shader::OutputTopology topology) { - switch (topology) { - case Tegra::Shader::OutputTopology::PointList: - return "points"; - case Tegra::Shader::OutputTopology::LineStrip: - return "line_strip"; - case Tegra::Shader::OutputTopology::TriangleStrip: - return "triangle_strip"; - default: - UNIMPLEMENTED_MSG("Unknown output topology: {}", static_cast<u32>(topology)); - return "points"; - } -} - -/// Describes the behaviour of code path of a given entry point and a return point. -enum class ExitMethod { - Undetermined, ///< Internal value. Only occur when analyzing JMP loop. - AlwaysReturn, ///< All code paths reach the return point. - Conditional, ///< Code path reaches the return point or an END instruction conditionally. - AlwaysEnd, ///< All code paths reach a END instruction. -}; - -/// A subroutine is a range of code refereced by a CALL, IF or LOOP instruction. -struct Subroutine { - /// Generates a name suitable for GLSL source code. - std::string GetName() const { - return "sub_" + std::to_string(begin) + '_' + std::to_string(end) + '_' + suffix; - } - - u32 begin; ///< Entry point of the subroutine. - u32 end; ///< Return point of the subroutine. - const std::string& suffix; ///< Suffix of the shader, used to make a unique subroutine name - ExitMethod exit_method; ///< Exit method of the subroutine. - std::set<u32> labels; ///< Addresses refereced by JMP instructions. - - bool operator<(const Subroutine& rhs) const { - return std::tie(begin, end) < std::tie(rhs.begin, rhs.end); - } -}; - -/// Analyzes shader code and produces a set of subroutines. -class ControlFlowAnalyzer { -public: - ControlFlowAnalyzer(const ProgramCode& program_code, u32 main_offset, const std::string& suffix) - : program_code(program_code), shader_coverage_begin(main_offset), - shader_coverage_end(main_offset + 1) { - - // Recursively finds all subroutines. - const Subroutine& program_main = AddSubroutine(main_offset, PROGRAM_END, suffix); - if (program_main.exit_method != ExitMethod::AlwaysEnd) - throw DecompileFail("Program does not always end"); - } - - std::set<Subroutine> GetSubroutines() { - return std::move(subroutines); - } - - std::size_t GetShaderLength() const { - return shader_coverage_end * sizeof(u64); - } - -private: - const ProgramCode& program_code; - std::set<Subroutine> subroutines; - std::map<std::pair<u32, u32>, ExitMethod> exit_method_map; - u32 shader_coverage_begin; - u32 shader_coverage_end; - - /// Adds and analyzes a new subroutine if it is not added yet. - const Subroutine& AddSubroutine(u32 begin, u32 end, const std::string& suffix) { - Subroutine subroutine{begin, end, suffix, ExitMethod::Undetermined, {}}; - - const auto iter = subroutines.find(subroutine); - if (iter != subroutines.end()) { - return *iter; - } - - subroutine.exit_method = Scan(begin, end, subroutine.labels); - if (subroutine.exit_method == ExitMethod::Undetermined) { - throw DecompileFail("Recursive function detected"); - } - - return *subroutines.insert(std::move(subroutine)).first; - } - - /// Merges exit method of two parallel branches. - static ExitMethod ParallelExit(ExitMethod a, ExitMethod b) { - if (a == ExitMethod::Undetermined) { - return b; - } - if (b == ExitMethod::Undetermined) { - return a; - } - if (a == b) { - return a; - } - return ExitMethod::Conditional; - } - - /// Scans a range of code for labels and determines the exit method. - ExitMethod Scan(u32 begin, u32 end, std::set<u32>& labels) { - const auto [iter, inserted] = - exit_method_map.emplace(std::make_pair(begin, end), ExitMethod::Undetermined); - ExitMethod& exit_method = iter->second; - if (!inserted) - return exit_method; - - for (u32 offset = begin; offset != end && offset != PROGRAM_END; ++offset) { - shader_coverage_begin = std::min(shader_coverage_begin, offset); - shader_coverage_end = std::max(shader_coverage_end, offset + 1); - - const Instruction instr = {program_code[offset]}; - if (const auto opcode = OpCode::Decode(instr)) { - switch (opcode->get().GetId()) { - case OpCode::Id::EXIT: { - // The EXIT instruction can be predicated, which means that the shader can - // conditionally end on this instruction. We have to consider the case where the - // condition is not met and check the exit method of that other basic block. - using Tegra::Shader::Pred; - if (instr.pred.pred_index == static_cast<u64>(Pred::UnusedIndex)) { - return exit_method = ExitMethod::AlwaysEnd; - } else { - const ExitMethod not_met = Scan(offset + 1, end, labels); - return exit_method = ParallelExit(ExitMethod::AlwaysEnd, not_met); - } - } - case OpCode::Id::BRA: { - const u32 target = offset + instr.bra.GetBranchTarget(); - labels.insert(target); - const ExitMethod no_jmp = Scan(offset + 1, end, labels); - const ExitMethod jmp = Scan(target, end, labels); - return exit_method = ParallelExit(no_jmp, jmp); - } - case OpCode::Id::SSY: - case OpCode::Id::PBK: { - // The SSY and PBK use a similar encoding as the BRA instruction. - UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, - "Constant buffer branching is not supported"); - const u32 target = offset + instr.bra.GetBranchTarget(); - labels.insert(target); - // Continue scanning for an exit method. - break; - } - } - } - } - return exit_method = ExitMethod::AlwaysReturn; - } -}; - -template <typename T> -class ShaderScopedScope { -public: - explicit ShaderScopedScope(T& writer, std::string_view begin_expr, std::string end_expr) - : writer(writer), end_expr(std::move(end_expr)) { - - if (begin_expr.empty()) { - writer.AddLine('{'); - } else { - writer.AddExpression(begin_expr); - writer.AddLine(" {"); - } - ++writer.scope; - } - - ShaderScopedScope(const ShaderScopedScope&) = delete; - - ~ShaderScopedScope() { - --writer.scope; - if (end_expr.empty()) { - writer.AddLine('}'); - } else { - writer.AddExpression("} "); - writer.AddExpression(end_expr); - writer.AddLine(';'); - } - } - - ShaderScopedScope& operator=(const ShaderScopedScope&) = delete; - -private: - T& writer; - std::string end_expr; -}; +enum class Type { Bool, Bool2, Float, Int, Uint, HalfFloat }; class ShaderWriter { public: @@ -271,16 +66,17 @@ public: shader_source += '\n'; } - std::string GetResult() { - return std::move(shader_source); + std::string GenerateTemporal() { + std::string temporal = "tmp"; + temporal += std::to_string(temporal_index++); + return temporal; } - ShaderScopedScope<ShaderWriter> Scope(std::string_view begin_expr = {}, - std::string end_expr = {}) { - return ShaderScopedScope(*this, begin_expr, end_expr); + std::string GetResult() { + return std::move(shader_source); } - int scope = 0; + s32 scope = 0; private: void AppendIndentation() { @@ -288,3663 +84,1483 @@ private: } std::string shader_source; + u32 temporal_index = 1; }; -/** - * Represents an emulated shader register, used to track the state of that register for emulation - * with GLSL. At this time, a register can be used as a float or an integer. This class is used for - * bookkeeping within the GLSL program. - */ -class GLSLRegister { -public: - enum class Type { - Float, - Integer, - UnsignedInteger, - }; - - GLSLRegister(std::size_t index, const std::string& suffix) : index{index}, suffix{suffix} {} - - /// Gets the GLSL type string for a register - static std::string GetTypeString() { - return "float"; - } - - /// Gets the GLSL register prefix string, used for declarations and referencing - static std::string GetPrefixString() { - return "reg_"; - } - - /// Returns a GLSL string representing the current state of the register - std::string GetString() const { - return GetPrefixString() + std::to_string(index) + '_' + suffix; - } - - /// Returns the index of the register - std::size_t GetIndex() const { - return index; - } - -private: - const std::size_t index; - const std::string& suffix; -}; - -/** - * Used to manage shader registers that are emulated with GLSL. This class keeps track of the state - * of all registers (e.g. whether they are currently being used as Floats or Integers), and - * generates the necessary GLSL code to perform conversions as needed. This class is used for - * bookkeeping within the GLSL program. - */ -class GLSLRegisterManager { -public: - GLSLRegisterManager(ShaderWriter& shader, ShaderWriter& declarations, - const Maxwell3D::Regs::ShaderStage& stage, const std::string& suffix, - const Tegra::Shader::Header& header) - : shader{shader}, declarations{declarations}, stage{stage}, suffix{suffix}, header{header}, - fixed_pipeline_output_attributes_used{}, local_memory_size{0} { - BuildRegisterList(); - BuildInputList(); - } - - void SetConditionalCodesFromExpression(const std::string& expresion) { - SetInternalFlag(InternalFlag::ZeroFlag, "(" + expresion + ") == 0"); - LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete."); - } - - void SetConditionalCodesFromRegister(const Register& reg, u64 dest_elem = 0) { - SetConditionalCodesFromExpression(GetRegister(reg, static_cast<u32>(dest_elem))); - } - - /** - * Returns code that does an integer size conversion for the specified size. - * @param value Value to perform integer size conversion on. - * @param size Register size to use for conversion instructions. - * @returns GLSL string corresponding to the value converted to the specified size. - */ - static std::string ConvertIntegerSize(const std::string& value, Register::Size size) { - switch (size) { - case Register::Size::Byte: - return "((" + value + " << 24) >> 24)"; - case Register::Size::Short: - return "((" + value + " << 16) >> 16)"; - case Register::Size::Word: - // Default - do nothing - return value; - default: - UNREACHABLE_MSG("Unimplemented conversion size: {}", static_cast<u32>(size)); - return value; - } - } - - /** - * Gets a register as an float. - * @param reg The register to get. - * @param elem The element to use for the operation. - * @returns GLSL string corresponding to the register as a float. - */ - std::string GetRegisterAsFloat(const Register& reg, unsigned elem = 0) { - return GetRegister(reg, elem); - } - - /** - * Gets a register as an integer. - * @param reg The register to get. - * @param elem The element to use for the operation. - * @param is_signed Whether to get the register as a signed (or unsigned) integer. - * @param size Register size to use for conversion instructions. - * @returns GLSL string corresponding to the register as an integer. - */ - std::string GetRegisterAsInteger(const Register& reg, unsigned elem = 0, bool is_signed = true, - Register::Size size = Register::Size::Word) { - const std::string func{is_signed ? "floatBitsToInt" : "floatBitsToUint"}; - const std::string value{func + '(' + GetRegister(reg, elem) + ')'}; - return ConvertIntegerSize(value, size); - } - - /** - * Writes code that does a register assignment to float value operation. - * @param reg The destination register to use. - * @param elem The element to use for the operation. - * @param value The code representing the value to assign. - * @param dest_num_components Number of components in the destination. - * @param value_num_components Number of components in the value. - * @param is_saturated Optional, when True, saturates the provided value. - * @param sets_cc Optional, when True, sets the corresponding values to the implemented - * condition flags. - * @param dest_elem Optional, the destination element to use for the operation. - */ - void SetRegisterToFloat(const Register& reg, u64 elem, const std::string& value, - u64 dest_num_components, u64 value_num_components, - bool is_saturated = false, bool sets_cc = false, u64 dest_elem = 0, - bool precise = false) { - const std::string clamped_value = is_saturated ? "clamp(" + value + ", 0.0, 1.0)" : value; - SetRegister(reg, elem, clamped_value, dest_num_components, value_num_components, dest_elem, - precise); - if (sets_cc) { - if (reg == Register::ZeroIndex) { - SetConditionalCodesFromExpression(clamped_value); - } else { - SetConditionalCodesFromRegister(reg, dest_elem); - } - } - } - - /** - * Writes code that does a register assignment to integer value operation. - * @param reg The destination register to use. - * @param elem The element to use for the operation. - * @param value The code representing the value to assign. - * @param dest_num_components Number of components in the destination. - * @param value_num_components Number of components in the value. - * @param is_saturated Optional, when True, saturates the provided value. - * @param sets_cc Optional, when True, sets the corresponding values to the implemented - * condition flags. - * @param dest_elem Optional, the destination element to use for the operation. - * @param size Register size to use for conversion instructions. - */ - void SetRegisterToInteger(const Register& reg, bool is_signed, u64 elem, - const std::string& value, u64 dest_num_components, - u64 value_num_components, bool is_saturated = false, - bool sets_cc = false, u64 dest_elem = 0, - Register::Size size = Register::Size::Word) { - UNIMPLEMENTED_IF(is_saturated); - const std::string final_value = ConvertIntegerSize(value, size); - const std::string func{is_signed ? "intBitsToFloat" : "uintBitsToFloat"}; - - SetRegister(reg, elem, func + '(' + final_value + ')', dest_num_components, - value_num_components, dest_elem, false); - - if (sets_cc) { - if (reg == Register::ZeroIndex) { - SetConditionalCodesFromExpression(final_value); - } else { - SetConditionalCodesFromRegister(reg, dest_elem); - } - } - } - - /** - * Writes code that does a register assignment to a half float value operation. - * @param reg The destination register to use. - * @param elem The element to use for the operation. - * @param value The code representing the value to assign. Type has to be half float. - * @param merge Half float kind of assignment. - * @param dest_num_components Number of components in the destination. - * @param value_num_components Number of components in the value. - * @param is_saturated Optional, when True, saturates the provided value. - * @param dest_elem Optional, the destination element to use for the operation. - */ - void SetRegisterToHalfFloat(const Register& reg, u64 elem, const std::string& value, - Tegra::Shader::HalfMerge merge, u64 dest_num_components, - u64 value_num_components, bool is_saturated = false, - u64 dest_elem = 0) { - UNIMPLEMENTED_IF(is_saturated); - - const std::string result = [&]() { - switch (merge) { - case Tegra::Shader::HalfMerge::H0_H1: - return "uintBitsToFloat(packHalf2x16(" + value + "))"; - case Tegra::Shader::HalfMerge::F32: - // Half float instructions take the first component when doing a float cast. - return "float(" + value + ".x)"; - case Tegra::Shader::HalfMerge::Mrg_H0: - // TODO(Rodrigo): I guess Mrg_H0 and Mrg_H1 take their respective component from the - // pack. I couldn't test this on hardware but it shouldn't really matter since most - // of the time when a Mrg_* flag is used both components will be mirrored. That - // being said, it deserves a test. - return "uintBitsToFloat((" + GetRegisterAsInteger(reg, 0, false) + - " & 0xffff0000) | (packHalf2x16(" + value + ") & 0x0000ffff))"; - case Tegra::Shader::HalfMerge::Mrg_H1: - return "uintBitsToFloat((" + GetRegisterAsInteger(reg, 0, false) + - " & 0x0000ffff) | (packHalf2x16(" + value + ") & 0xffff0000))"; - default: - UNREACHABLE(); - return std::string("0"); - } - }(); - - SetRegister(reg, elem, result, dest_num_components, value_num_components, dest_elem, false); - } - - /** - * Writes code that does a register assignment to input attribute operation. Input attributes - * are stored as floats, so this may require conversion. - * @param reg The destination register to use. - * @param elem The element to use for the operation. - * @param attribute The input attribute to use as the source value. - * @param input_mode The input mode. - * @param vertex The register that decides which vertex to read from (used in GS). - */ - void SetRegisterToInputAttibute(const Register& reg, u64 elem, Attribute::Index attribute, - const Tegra::Shader::IpaMode& input_mode, - std::optional<Register> vertex = {}) { - const std::string dest = GetRegisterAsFloat(reg); - const std::string src = GetInputAttribute(attribute, input_mode, vertex) + GetSwizzle(elem); - shader.AddLine(dest + " = " + src + ';'); - } - - std::string GetLocalMemoryAsFloat(const std::string& index) { - return "lmem[" + index + ']'; - } +/// Generates code to use for a swizzle operation. +static std::string GetSwizzle(u32 elem) { + ASSERT(elem <= 3); + std::string swizzle = "."; + swizzle += "xyzw"[elem]; + return swizzle; +} - std::string GetLocalMemoryAsInteger(const std::string& index, bool is_signed = false) { - const std::string func{is_signed ? "floatToIntBits" : "floatBitsToUint"}; - return func + "(lmem[" + index + "])"; +/// Translate topology +static std::string GetTopologyName(Tegra::Shader::OutputTopology topology) { + switch (topology) { + case Tegra::Shader::OutputTopology::PointList: + return "points"; + case Tegra::Shader::OutputTopology::LineStrip: + return "line_strip"; + case Tegra::Shader::OutputTopology::TriangleStrip: + return "triangle_strip"; + default: + UNIMPLEMENTED_MSG("Unknown output topology: {}", static_cast<u32>(topology)); + return "points"; } +} - void SetLocalMemoryAsFloat(const std::string& index, const std::string& value) { - shader.AddLine("lmem[" + index + "] = " + value + ';'); - } +/// Returns true if an object has to be treated as precise +static bool IsPrecise(Operation operand) { + const auto& meta = operand.GetMeta(); - void SetLocalMemoryAsInteger(const std::string& index, const std::string& value, - bool is_signed = false) { - const std::string func{is_signed ? "intBitsToFloat" : "uintBitsToFloat"}; - shader.AddLine("lmem[" + index + "] = " + func + '(' + value + ");"); + if (const auto arithmetic = std::get_if<MetaArithmetic>(&meta)) { + return arithmetic->precise; } - - std::string GetConditionCode(const Tegra::Shader::ConditionCode cc) const { - switch (cc) { - case Tegra::Shader::ConditionCode::NEU: - return "!(" + GetInternalFlag(InternalFlag::ZeroFlag) + ')'; - default: - UNIMPLEMENTED_MSG("Unimplemented condition code: {}", static_cast<u32>(cc)); - return "false"; - } + if (const auto half_arithmetic = std::get_if<MetaHalfArithmetic>(&meta)) { + return half_arithmetic->precise; } + return false; +} - std::string GetInternalFlag(const InternalFlag flag) const { - const auto index = static_cast<u32>(flag); - ASSERT(index < static_cast<u32>(InternalFlag::Amount)); - - return std::string(INTERNAL_FLAG_NAMES[index]) + '_' + suffix; +static bool IsPrecise(Node node) { + if (const auto operation = std::get_if<OperationNode>(node)) { + return IsPrecise(*operation); } + return false; +} - void SetInternalFlag(const InternalFlag flag, const std::string& value) const { - shader.AddLine(GetInternalFlag(flag) + " = " + value + ';'); - } +class GLSLDecompiler final { +public: + explicit GLSLDecompiler(const ShaderIR& ir, ShaderStage stage, std::string suffix) + : ir{ir}, stage{stage}, suffix{suffix}, header{ir.GetHeader()} {} - /** - * Writes code that does a output attribute assignment to register operation. Output attributes - * are stored as floats, so this may require conversion. - * @param attribute The destination output attribute. - * @param elem The element to use for the operation. - * @param val_reg The register to use as the source value. - * @param buf_reg The register that tells which buffer to write to (used in geometry shaders). - */ - void SetOutputAttributeToRegister(Attribute::Index attribute, u64 elem, const Register& val_reg, - const Register& buf_reg) { - const std::string dest = GetOutputAttribute(attribute); - const std::string src = GetRegisterAsFloat(val_reg); - if (dest.empty()) - return; + void Decompile() { + DeclareVertex(); + DeclareGeometry(); + DeclareRegisters(); + DeclarePredicates(); + DeclareLocalMemory(); + DeclareInternalFlags(); + DeclareInputAttributes(); + DeclareOutputAttributes(); + DeclareConstantBuffers(); + DeclareGlobalMemory(); + DeclareSamplers(); - // Can happen with unknown/unimplemented output attributes, in which case we ignore the - // instruction for now. - if (stage == Maxwell3D::Regs::ShaderStage::Geometry) { - // TODO(Rodrigo): nouveau sets some attributes after setting emitting a geometry - // shader. These instructions use a dirty register as buffer index, to avoid some - // drivers from complaining about out of boundary writes, guard them. - const std::string buf_index{"((" + GetRegisterAsInteger(buf_reg) + ") % " + - std::to_string(MAX_GEOMETRY_BUFFERS) + ')'}; - shader.AddLine("amem[" + buf_index + "][" + - std::to_string(static_cast<u32>(attribute)) + ']' + GetSwizzle(elem) + - " = " + src + ';'); - return; - } + code.AddLine("void execute_" + suffix + "() {"); + ++code.scope; - switch (attribute) { - case Attribute::Index::ClipDistances0123: - case Attribute::Index::ClipDistances4567: { - const u64 index = (attribute == Attribute::Index::ClipDistances4567 ? 4 : 0) + elem; - UNIMPLEMENTED_IF_MSG( - ((header.vtg.clip_distances >> index) & 1) == 0, - "Shader is setting gl_ClipDistance{} without enabling it in the header", index); - - clip_distances[index] = true; - fixed_pipeline_output_attributes_used.insert(attribute); - shader.AddLine(dest + '[' + std::to_string(index) + "] = " + src + ';'); - break; - } - case Attribute::Index::PointSize: - fixed_pipeline_output_attributes_used.insert(attribute); - shader.AddLine(dest + " = " + src + ';'); - break; - default: - shader.AddLine(dest + GetSwizzle(elem) + " = " + src + ';'); - break; - } - } + // VM's program counter + const auto first_address = ir.GetBasicBlocks().begin()->first; + code.AddLine("uint jmp_to = " + std::to_string(first_address) + "u;"); - /// Generates code representing a uniform (C buffer) register, interpreted as the input type. - std::string GetUniform(u64 index, u64 offset, GLSLRegister::Type type, - Register::Size size = Register::Size::Word) { - declr_const_buffers[index].MarkAsUsed(index, offset, stage); - std::string value = 'c' + std::to_string(index) + '[' + std::to_string(offset / 4) + "][" + - std::to_string(offset % 4) + ']'; + // TODO(Subv): Figure out the actual depth of the flow stack, for now it seems + // unlikely that shaders will use 20 nested SSYs and PBKs. + constexpr u32 FLOW_STACK_SIZE = 20; + code.AddLine(fmt::format("uint flow_stack[{}];", FLOW_STACK_SIZE)); + code.AddLine("uint flow_stack_top = 0u;"); - if (type == GLSLRegister::Type::Float) { - // Do nothing, default - } else if (type == GLSLRegister::Type::Integer) { - value = "floatBitsToInt(" + value + ')'; - } else if (type == GLSLRegister::Type::UnsignedInteger) { - value = "floatBitsToUint(" + value + ')'; - } else { - UNREACHABLE(); - } + code.AddLine("while (true) {"); + ++code.scope; - return ConvertIntegerSize(value, size); - } + code.AddLine("switch (jmp_to) {"); - std::string GetUniformIndirect(u64 cbuf_index, s64 offset, const std::string& index_str, - GLSLRegister::Type type) { - declr_const_buffers[cbuf_index].MarkAsUsedIndirect(cbuf_index, stage); + for (const auto& pair : ir.GetBasicBlocks()) { + const auto [address, bb] = pair; + code.AddLine(fmt::format("case 0x{:x}u: {{", address)); + ++code.scope; - const std::string final_offset = fmt::format("({} + {})", index_str, offset / 4); - const std::string value = 'c' + std::to_string(cbuf_index) + '[' + final_offset + " / 4][" + - final_offset + " % 4]"; + VisitBasicBlock(bb); - if (type == GLSLRegister::Type::Float) { - return value; - } else if (type == GLSLRegister::Type::Integer) { - return "floatBitsToInt(" + value + ')'; - } else { - UNREACHABLE(); - return value; + --code.scope; + code.AddLine('}'); } - } - - /// Add declarations. - void GenerateDeclarations(const std::string& suffix) { - GenerateVertex(); - GenerateRegisters(suffix); - GenerateLocalMemory(); - GenerateInternalFlags(); - GenerateInputAttrs(); - GenerateOutputAttrs(); - GenerateConstBuffers(); - GenerateSamplers(); - GenerateGeometry(); - } - /// Returns a list of constant buffer declarations. - std::vector<ConstBufferEntry> GetConstBuffersDeclarations() const { - std::vector<ConstBufferEntry> result; - std::copy_if(declr_const_buffers.begin(), declr_const_buffers.end(), - std::back_inserter(result), [](const auto& entry) { return entry.IsUsed(); }); - return result; - } + code.AddLine("default: return;"); + code.AddLine('}'); - /// Returns a list of samplers used in the shader. - const std::vector<SamplerEntry>& GetSamplers() const { - return used_samplers; - } - - /// Returns an array of the used clip distances. - const std::array<bool, Maxwell::NumClipDistances>& GetClipDistances() const { - return clip_distances; - } - - /// Returns the GLSL sampler used for the input shader sampler, and creates a new one if - /// necessary. - std::string AccessSampler(const Sampler& sampler, Tegra::Shader::TextureType type, - bool is_array, bool is_shadow) { - const auto offset = static_cast<std::size_t>(sampler.index.Value()); - - // If this sampler has already been used, return the existing mapping. - const auto itr = - std::find_if(used_samplers.begin(), used_samplers.end(), - [&](const SamplerEntry& entry) { return entry.GetOffset() == offset; }); - - if (itr != used_samplers.end()) { - ASSERT(itr->GetType() == type && itr->IsArray() == is_array && - itr->IsShadow() == is_shadow); - return itr->GetName(); + for (std::size_t i = 0; i < 2; ++i) { + --code.scope; + code.AddLine('}'); } - - // Otherwise create a new mapping for this sampler - const std::size_t next_index = used_samplers.size(); - const SamplerEntry entry{stage, offset, next_index, type, is_array, is_shadow}; - used_samplers.emplace_back(entry); - return entry.GetName(); } - void SetLocalMemory(u64 lmem) { - local_memory_size = lmem; + std::string GetResult() { + return code.GetResult(); } -private: - /// Generates declarations for registers. - void GenerateRegisters(const std::string& suffix) { - for (const auto& reg : regs) { - declarations.AddLine(GLSLRegister::GetTypeString() + ' ' + reg.GetPrefixString() + - std::to_string(reg.GetIndex()) + '_' + suffix + " = 0;"); + ShaderEntries GetShaderEntries() const { + ShaderEntries entries; + for (const auto& cbuf : ir.GetConstantBuffers()) { + entries.const_buffers.emplace_back(cbuf.second, stage, GetConstBufferBlock(cbuf.first), + cbuf.first); } - declarations.AddNewLine(); - } - - /// Generates declarations for local memory. - void GenerateLocalMemory() { - if (local_memory_size > 0) { - declarations.AddLine("float lmem[" + std::to_string((local_memory_size - 1 + 4) / 4) + - "];"); - declarations.AddNewLine(); + for (const auto& sampler : ir.GetSamplers()) { + entries.samplers.emplace_back(sampler, stage, GetSampler(sampler)); } - } - - /// Generates declarations for internal flags. - void GenerateInternalFlags() { - for (u32 flag = 0; flag < static_cast<u32>(InternalFlag::Amount); flag++) { - const InternalFlag code = static_cast<InternalFlag>(flag); - declarations.AddLine("bool " + GetInternalFlag(code) + " = false;"); + for (const auto& gmem : ir.GetGlobalMemoryBases()) { + entries.global_memory_entries.emplace_back(gmem.cbuf_index, gmem.cbuf_offset, stage, + GetGlobalMemoryBlock(gmem)); } - declarations.AddNewLine(); + entries.clip_distances = ir.GetClipDistances(); + entries.shader_length = ir.GetLength(); + return entries; } - /// Generates declarations for input attributes. - void GenerateInputAttrs() { - for (const auto element : declr_input_attribute) { - // TODO(bunnei): Use proper number of elements for these - u32 idx = - static_cast<u32>(element.first) - static_cast<u32>(Attribute::Index::Attribute_0); - if (stage != Maxwell3D::Regs::ShaderStage::Vertex) { - // If inputs are varyings, add an offset - idx += GENERIC_VARYING_START_LOCATION; - } - - std::string attr{GetInputAttribute(element.first, element.second)}; - if (stage == Maxwell3D::Regs::ShaderStage::Geometry) { - attr = "gs_" + attr + "[]"; - } - declarations.AddLine("layout (location = " + std::to_string(idx) + ") " + - GetInputFlags(element.first) + "in vec4 " + attr + ';'); - } - - declarations.AddNewLine(); - } +private: + using OperationDecompilerFn = std::string (GLSLDecompiler::*)(Operation); + using OperationDecompilersArray = + std::array<OperationDecompilerFn, static_cast<std::size_t>(OperationCode::Amount)>; - /// Generates declarations for output attributes. - void GenerateOutputAttrs() { - for (const auto& index : declr_output_attribute) { - // TODO(bunnei): Use proper number of elements for these - const u32 idx = static_cast<u32>(index) - - static_cast<u32>(Attribute::Index::Attribute_0) + - GENERIC_VARYING_START_LOCATION; - declarations.AddLine("layout (location = " + std::to_string(idx) + ") out vec4 " + - GetOutputAttribute(index) + ';'); - } - declarations.AddNewLine(); - } - - /// Generates declarations for constant buffers. - void GenerateConstBuffers() { - for (const auto& entry : GetConstBuffersDeclarations()) { - declarations.AddLine("layout (std140) uniform " + entry.GetName()); - declarations.AddLine('{'); - declarations.AddLine(" vec4 c" + std::to_string(entry.GetIndex()) + - "[MAX_CONSTBUFFER_ELEMENTS];"); - declarations.AddLine("};"); - declarations.AddNewLine(); - } - declarations.AddNewLine(); - } + void DeclareVertex() { + if (stage != ShaderStage::Vertex) + return; - /// Generates declarations for samplers. - void GenerateSamplers() { - const auto& samplers = GetSamplers(); - for (const auto& sampler : samplers) { - declarations.AddLine("uniform " + sampler.GetTypeString() + ' ' + sampler.GetName() + - ';'); - } - declarations.AddNewLine(); + DeclareVertexRedeclarations(); } - /// Generates declarations used for geometry shaders. - void GenerateGeometry() { - if (stage != Maxwell3D::Regs::ShaderStage::Geometry) + void DeclareGeometry() { + if (stage != ShaderStage::Geometry) return; - declarations.AddLine( - "layout (" + GetTopologyName(header.common3.output_topology) + - ", max_vertices = " + std::to_string(header.common4.max_output_vertices) + ") out;"); - declarations.AddNewLine(); - - declarations.AddLine("vec4 amem[" + std::to_string(MAX_GEOMETRY_BUFFERS) + "][" + - std::to_string(MAX_ATTRIBUTES) + "];"); - declarations.AddNewLine(); - - constexpr char buffer[] = "amem[output_buffer]"; - declarations.AddLine("void emit_vertex(uint output_buffer) {"); - ++declarations.scope; - for (const auto element : declr_output_attribute) { - declarations.AddLine(GetOutputAttribute(element) + " = " + buffer + '[' + - std::to_string(static_cast<u32>(element)) + "];"); - } - - declarations.AddLine("position = " + std::string(buffer) + '[' + - std::to_string(static_cast<u32>(Attribute::Index::Position)) + "];"); + const auto topology = GetTopologyName(header.common3.output_topology); + const auto max_vertices = std::to_string(header.common4.max_output_vertices); + code.AddLine("layout (" + topology + ", max_vertices = " + max_vertices + ") out;"); + code.AddNewLine(); - // If a geometry shader is attached, it will always flip (it's the last stage before - // fragment). For more info about flipping, refer to gl_shader_gen.cpp. - declarations.AddLine("position.xy *= viewport_flip.xy;"); - declarations.AddLine("gl_Position = position;"); - declarations.AddLine("position.w = 1.0;"); - declarations.AddLine("EmitVertex();"); - --declarations.scope; - declarations.AddLine('}'); - declarations.AddNewLine(); + DeclareVertexRedeclarations(); } - void GenerateVertex() { - if (stage != Maxwell3D::Regs::ShaderStage::Vertex) - return; + void DeclareVertexRedeclarations() { bool clip_distances_declared = false; - declarations.AddLine("out gl_PerVertex {"); - ++declarations.scope; - declarations.AddLine("vec4 gl_Position;"); - for (auto& o : fixed_pipeline_output_attributes_used) { + code.AddLine("out gl_PerVertex {"); + ++code.scope; + + code.AddLine("vec4 gl_Position;"); + + for (const auto o : ir.GetOutputAttributes()) { if (o == Attribute::Index::PointSize) - declarations.AddLine("float gl_PointSize;"); + code.AddLine("float gl_PointSize;"); if (!clip_distances_declared && (o == Attribute::Index::ClipDistances0123 || o == Attribute::Index::ClipDistances4567)) { - declarations.AddLine("float gl_ClipDistance[];"); + code.AddLine("float gl_ClipDistance[];"); clip_distances_declared = true; } } - --declarations.scope; - declarations.AddLine("};"); - } - - /// Generates code representing a temporary (GPR) register. - std::string GetRegister(const Register& reg, unsigned elem) { - if (reg == Register::ZeroIndex) { - return "0"; - } - return regs[reg.GetSwizzledIndex(elem)].GetString(); - } - - /** - * Writes code that does a register assignment to value operation. - * @param reg The destination register to use. - * @param elem The element to use for the operation. - * @param value The code representing the value to assign. - * @param dest_num_components Number of components in the destination. - * @param value_num_components Number of components in the value. - * @param dest_elem Optional, the destination element to use for the operation. - */ - void SetRegister(const Register& reg, u64 elem, const std::string& value, - u64 dest_num_components, u64 value_num_components, u64 dest_elem, - bool precise) { - if (reg == Register::ZeroIndex) { - // Setting RZ is a nop in hardware. - return; - } - - std::string dest = GetRegister(reg, static_cast<u32>(dest_elem)); - if (dest_num_components > 1) { - dest += GetSwizzle(elem); - } - - std::string src = '(' + value + ')'; - if (value_num_components > 1) { - src += GetSwizzle(elem); - } - - if (precise && stage != Maxwell3D::Regs::ShaderStage::Fragment) { - const auto scope = shader.Scope(); + --code.scope; + code.AddLine("};"); + code.AddNewLine(); + } - // This avoids optimizations of constant propagation and keeps the code as the original - // Sadly using the precise keyword causes "linking" errors on fragment shaders. - shader.AddLine("precise float tmp = " + src + ';'); - shader.AddLine(dest + " = tmp;"); - } else { - shader.AddLine(dest + " = " + src + ';'); + void DeclareRegisters() { + const auto& registers = ir.GetRegisters(); + for (const u32 gpr : registers) { + code.AddLine("float " + GetRegister(gpr) + " = 0;"); } + if (!registers.empty()) + code.AddNewLine(); } - /// Build the GLSL register list. - void BuildRegisterList() { - regs.reserve(Register::NumRegisters); - - for (std::size_t index = 0; index < Register::NumRegisters; ++index) { - regs.emplace_back(index, suffix); + void DeclarePredicates() { + const auto& predicates = ir.GetPredicates(); + for (const auto pred : predicates) { + code.AddLine("bool " + GetPredicate(pred) + " = false;"); } + if (!predicates.empty()) + code.AddNewLine(); } - void BuildInputList() { - const u32 size = static_cast<u32>(Attribute::Index::Attribute_31) - - static_cast<u32>(Attribute::Index::Attribute_0) + 1; - declr_input_attribute.reserve(size); + void DeclareLocalMemory() { + if (const u64 local_memory_size = header.GetLocalMemorySize(); local_memory_size > 0) { + const auto element_count = Common::AlignUp(local_memory_size, 4) / 4; + code.AddLine("float " + GetLocalMemory() + '[' + std::to_string(element_count) + "];"); + code.AddNewLine(); + } } - /// Generates code representing an input attribute register. - std::string GetInputAttribute(Attribute::Index attribute, - const Tegra::Shader::IpaMode& input_mode, - std::optional<Register> vertex = {}) { - auto GeometryPass = [&](const std::string& name) { - if (stage == Maxwell3D::Regs::ShaderStage::Geometry && vertex) { - // TODO(Rodrigo): Guard geometry inputs against out of bound reads. Some games set - // an 0x80000000 index for those and the shader fails to build. Find out why this - // happens and what's its intent. - return "gs_" + name + '[' + GetRegisterAsInteger(*vertex, 0, false) + - " % MAX_VERTEX_INPUT]"; - } - return name; - }; - - switch (attribute) { - case Attribute::Index::Position: - if (stage != Maxwell3D::Regs::ShaderStage::Fragment) { - return GeometryPass("position"); - } else { - return "vec4(gl_FragCoord.x, gl_FragCoord.y, gl_FragCoord.z, 1.0)"; - } - case Attribute::Index::PointCoord: - return "vec4(gl_PointCoord.x, gl_PointCoord.y, 0, 0)"; - case Attribute::Index::TessCoordInstanceIDVertexID: - // TODO(Subv): Find out what the values are for the first two elements when inside a - // vertex shader, and what's the value of the fourth element when inside a Tess Eval - // shader. - ASSERT(stage == Maxwell3D::Regs::ShaderStage::Vertex); - // Config pack's first value is instance_id. - return "vec4(0, 0, uintBitsToFloat(config_pack[0]), uintBitsToFloat(gl_VertexID))"; - case Attribute::Index::FrontFacing: - // TODO(Subv): Find out what the values are for the other elements. - ASSERT(stage == Maxwell3D::Regs::ShaderStage::Fragment); - return "vec4(0, 0, 0, intBitsToFloat(gl_FrontFacing ? -1 : 0))"; - default: - const u32 index{static_cast<u32>(attribute) - - static_cast<u32>(Attribute::Index::Attribute_0)}; - if (attribute >= Attribute::Index::Attribute_0 && - attribute <= Attribute::Index::Attribute_31) { - if (declr_input_attribute.count(attribute) == 0) { - declr_input_attribute[attribute] = input_mode; - } else { - UNIMPLEMENTED_IF_MSG(declr_input_attribute[attribute] != input_mode, - "Multiple input modes for the same attribute"); - } - return GeometryPass("input_attribute_" + std::to_string(index)); - } - - UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast<u32>(attribute)); + void DeclareInternalFlags() { + for (u32 flag = 0; flag < static_cast<u32>(InternalFlag::Amount); flag++) { + const InternalFlag flag_code = static_cast<InternalFlag>(flag); + code.AddLine("bool " + GetInternalFlag(flag_code) + " = false;"); } - - return "vec4(0, 0, 0, 0)"; + code.AddNewLine(); } - std::string GetInputFlags(const Attribute::Index attribute) { - const Tegra::Shader::IpaSampleMode sample_mode = - declr_input_attribute[attribute].sampling_mode; - const Tegra::Shader::IpaInterpMode interp_mode = - declr_input_attribute[attribute].interpolation_mode; + std::string GetInputFlags(const IpaMode& input_mode) { + const IpaSampleMode sample_mode = input_mode.sampling_mode; + const IpaInterpMode interp_mode = input_mode.interpolation_mode; std::string out; + switch (interp_mode) { - case Tegra::Shader::IpaInterpMode::Flat: { + case IpaInterpMode::Flat: out += "flat "; break; - } - case Tegra::Shader::IpaInterpMode::Linear: { + case IpaInterpMode::Linear: out += "noperspective "; break; - } - case Tegra::Shader::IpaInterpMode::Perspective: { + case IpaInterpMode::Perspective: // Default, Smooth break; - } - default: { + default: UNIMPLEMENTED_MSG("Unhandled IPA interp mode: {}", static_cast<u32>(interp_mode)); } - } switch (sample_mode) { - case Tegra::Shader::IpaSampleMode::Centroid: - // It can be implemented with the "centroid " keyword in glsl + case IpaSampleMode::Centroid: + // It can be implemented with the "centroid " keyword in GLSL UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode centroid"); break; - case Tegra::Shader::IpaSampleMode::Default: + case IpaSampleMode::Default: // Default, n/a break; - default: { + default: UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode: {}", static_cast<u32>(sample_mode)); - break; - } } return out; } - /// Generates code representing the declaration name of an output attribute register. - std::string GetOutputAttribute(Attribute::Index attribute) { - switch (attribute) { - case Attribute::Index::PointSize: - return "gl_PointSize"; - case Attribute::Index::Position: - return "position"; - case Attribute::Index::ClipDistances0123: - case Attribute::Index::ClipDistances4567: { - return "gl_ClipDistance"; - } - default: - const u32 index{static_cast<u32>(attribute) - - static_cast<u32>(Attribute::Index::Attribute_0)}; - if (attribute >= Attribute::Index::Attribute_0) { - declr_output_attribute.insert(attribute); - return "output_attribute_" + std::to_string(index); + void DeclareInputAttributes() { + const auto& attributes = ir.GetInputAttributes(); + for (const auto element : attributes) { + const Attribute::Index index = element.first; + const IpaMode& input_mode = *element.second.begin(); + if (index < Attribute::Index::Attribute_0 || index > Attribute::Index::Attribute_31) { + // Skip when it's not a generic attribute + continue; } - UNIMPLEMENTED_MSG("Unhandled output attribute={}", index); - return {}; - } - } - - ShaderWriter& shader; - ShaderWriter& declarations; - std::vector<GLSLRegister> regs; - std::unordered_map<Attribute::Index, Tegra::Shader::IpaMode> declr_input_attribute; - std::set<Attribute::Index> declr_output_attribute; - std::array<ConstBufferEntry, Maxwell3D::Regs::MaxConstBuffers> declr_const_buffers; - std::vector<SamplerEntry> used_samplers; - const Maxwell3D::Regs::ShaderStage& stage; - const std::string& suffix; - const Tegra::Shader::Header& header; - std::unordered_set<Attribute::Index> fixed_pipeline_output_attributes_used; - std::array<bool, Maxwell::NumClipDistances> clip_distances{}; - u64 local_memory_size; -}; - -class GLSLGenerator { -public: - GLSLGenerator(const std::set<Subroutine>& subroutines, const ProgramCode& program_code, - u32 main_offset, Maxwell3D::Regs::ShaderStage stage, const std::string& suffix, - std::size_t shader_length) - : subroutines(subroutines), program_code(program_code), main_offset(main_offset), - stage(stage), suffix(suffix), shader_length(shader_length) { - std::memcpy(&header, program_code.data(), sizeof(Tegra::Shader::Header)); - local_memory_size = header.GetLocalMemorySize(); - regs.SetLocalMemory(local_memory_size); - Generate(suffix); - } - - std::string GetShaderCode() { - return declarations.GetResult() + shader.GetResult(); - } - - /// Returns entries in the shader that are useful for external functions - ShaderEntries GetEntries() const { - return {regs.GetConstBuffersDeclarations(), regs.GetSamplers(), regs.GetClipDistances(), - shader_length}; - } - -private: - /// Gets the Subroutine object corresponding to the specified address. - const Subroutine& GetSubroutine(u32 begin, u32 end) const { - const auto iter = subroutines.find(Subroutine{begin, end, suffix}); - ASSERT(iter != subroutines.end()); - return *iter; - } - - /// Generates code representing a 19-bit immediate value - static std::string GetImmediate19(const Instruction& instr) { - return fmt::format("uintBitsToFloat({})", instr.alu.GetImm20_19()); - } + ASSERT(element.second.size() > 0); + UNIMPLEMENTED_IF_MSG(element.second.size() > 1, + "Multiple input flag modes are not supported in GLSL"); - /// Generates code representing a 32-bit immediate value - static std::string GetImmediate32(const Instruction& instr) { - return fmt::format("uintBitsToFloat({})", instr.alu.GetImm20_32()); - } + // TODO(bunnei): Use proper number of elements for these + u32 idx = static_cast<u32>(index) - static_cast<u32>(Attribute::Index::Attribute_0); + if (stage != ShaderStage::Vertex) { + // If inputs are varyings, add an offset + idx += GENERIC_VARYING_START_LOCATION; + } - /// Generates code representing a vec2 pair unpacked from a half float immediate - static std::string UnpackHalfImmediate(const Instruction& instr, bool negate) { - const std::string immediate = GetHalfFloat(std::to_string(instr.half_imm.PackImmediates())); - if (!negate) { - return immediate; + std::string attr = GetInputAttribute(index); + if (stage == ShaderStage::Geometry) { + attr = "gs_" + attr + "[]"; + } + code.AddLine("layout (location = " + std::to_string(idx) + ") " + + GetInputFlags(input_mode) + "in vec4 " + attr + ';'); } - const std::string negate_first = instr.half_imm.first_negate != 0 ? "-" : ""; - const std::string negate_second = instr.half_imm.second_negate != 0 ? "-" : ""; - const std::string negate_vec = "vec2(" + negate_first + "1, " + negate_second + "1)"; - - return '(' + immediate + " * " + negate_vec + ')'; - } - - /// Generates code representing a texture sampler. - std::string GetSampler(const Sampler& sampler, Tegra::Shader::TextureType type, bool is_array, - bool is_shadow) { - return regs.AccessSampler(sampler, type, is_array, is_shadow); + if (!attributes.empty()) + code.AddNewLine(); } - /** - * Adds code that calls a subroutine. - * @param subroutine the subroutine to call. - */ - void CallSubroutine(const Subroutine& subroutine) { - if (subroutine.exit_method == ExitMethod::AlwaysEnd) { - shader.AddLine(subroutine.GetName() + "();"); - shader.AddLine("return true;"); - } else if (subroutine.exit_method == ExitMethod::Conditional) { - shader.AddLine("if (" + subroutine.GetName() + "()) { return true; }"); - } else { - shader.AddLine(subroutine.GetName() + "();"); + void DeclareOutputAttributes() { + const auto& attributes = ir.GetOutputAttributes(); + for (const auto index : attributes) { + if (index < Attribute::Index::Attribute_0 || index > Attribute::Index::Attribute_31) { + // Skip when it's not a generic attribute + continue; + } + // TODO(bunnei): Use proper number of elements for these + const auto idx = static_cast<u32>(index) - + static_cast<u32>(Attribute::Index::Attribute_0) + + GENERIC_VARYING_START_LOCATION; + code.AddLine("layout (location = " + std::to_string(idx) + ") out vec4 " + + GetOutputAttribute(index) + ';'); } + if (!attributes.empty()) + code.AddNewLine(); } - /* - * Writes code that assigns a predicate boolean variable. - * @param pred The id of the predicate to write to. - * @param value The expression value to assign to the predicate. - */ - void SetPredicate(u64 pred, const std::string& value) { - using Tegra::Shader::Pred; - // Can't assign to the constant predicate. - ASSERT(pred != static_cast<u64>(Pred::UnusedIndex)); - - std::string variable = 'p' + std::to_string(pred) + '_' + suffix; - shader.AddLine(variable + " = " + value + ';'); - declr_predicates.insert(std::move(variable)); - } - - /* - * Returns the condition to use in the 'if' for a predicated instruction. - * @param instr Instruction to generate the if condition for. - * @returns string containing the predicate condition. - */ - std::string GetPredicateCondition(u64 index, bool negate) { - using Tegra::Shader::Pred; - std::string variable; - - // Index 7 is used as an 'Always True' condition. - if (index == static_cast<u64>(Pred::UnusedIndex)) { - variable = "true"; - } else { - variable = 'p' + std::to_string(index) + '_' + suffix; - declr_predicates.insert(variable); - } - if (negate) { - return "!(" + variable + ')'; - } - - return variable; - } - - /** - * Returns the comparison string to use to compare two values in the 'set' family of - * instructions. - * @param condition The condition used in the 'set'-family instruction. - * @param op_a First operand to use for the comparison. - * @param op_b Second operand to use for the comparison. - * @returns String corresponding to the GLSL operator that matches the desired comparison. - */ - std::string GetPredicateComparison(Tegra::Shader::PredCondition condition, - const std::string& op_a, const std::string& op_b) const { - using Tegra::Shader::PredCondition; - static const std::unordered_map<PredCondition, const char*> PredicateComparisonStrings = { - {PredCondition::LessThan, "<"}, - {PredCondition::Equal, "=="}, - {PredCondition::LessEqual, "<="}, - {PredCondition::GreaterThan, ">"}, - {PredCondition::NotEqual, "!="}, - {PredCondition::GreaterEqual, ">="}, - {PredCondition::LessThanWithNan, "<"}, - {PredCondition::NotEqualWithNan, "!="}, - {PredCondition::LessEqualWithNan, "<="}, - {PredCondition::GreaterThanWithNan, ">"}, - {PredCondition::GreaterEqualWithNan, ">="}}; - - const auto& comparison{PredicateComparisonStrings.find(condition)}; - UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonStrings.end(), - "Unknown predicate comparison operation"); - - std::string predicate{'(' + op_a + ") " + comparison->second + " (" + op_b + ')'}; - if (condition == PredCondition::LessThanWithNan || - condition == PredCondition::NotEqualWithNan || - condition == PredCondition::LessEqualWithNan || - condition == PredCondition::GreaterThanWithNan || - condition == PredCondition::GreaterEqualWithNan) { - predicate += " || isnan(" + op_a + ") || isnan(" + op_b + ')'; - } - - return predicate; - } - - /** - * Returns the operator string to use to combine two predicates in the 'setp' family of - * instructions. - * @params operation The operator used in the 'setp'-family instruction. - * @returns String corresponding to the GLSL operator that matches the desired operator. - */ - std::string GetPredicateCombiner(Tegra::Shader::PredOperation operation) const { - using Tegra::Shader::PredOperation; - static const std::unordered_map<PredOperation, const char*> PredicateOperationStrings = { - {PredOperation::And, "&&"}, - {PredOperation::Or, "||"}, - {PredOperation::Xor, "^^"}, - }; - - auto op = PredicateOperationStrings.find(operation); - UNIMPLEMENTED_IF_MSG(op == PredicateOperationStrings.end(), "Unknown predicate operation"); - return op->second; - } - - /** - * Transforms the input string GLSL operand into one that applies the abs() function and negates - * the output if necessary. When both abs and neg are true, the negation will be applied after - * taking the absolute value. - * @param operand The input operand to take the abs() of, negate, or both. - * @param abs Whether to apply the abs() function to the input operand. - * @param neg Whether to negate the input operand. - * @returns String corresponding to the operand after being transformed by the abs() and - * negation operations. - */ - static std::string GetOperandAbsNeg(const std::string& operand, bool abs, bool neg) { - std::string result = operand; - - if (abs) { - result = "abs(" + result + ')'; + void DeclareConstantBuffers() { + for (const auto& entry : ir.GetConstantBuffers()) { + const auto [index, size] = entry; + code.AddLine("layout (std140, binding = CBUF_BINDING_" + std::to_string(index) + + ") uniform " + GetConstBufferBlock(index) + " {"); + code.AddLine(" vec4 " + GetConstBuffer(index) + "[MAX_CONSTBUFFER_ELEMENTS];"); + code.AddLine("};"); + code.AddNewLine(); } - - if (neg) { - result = "-(" + result + ')'; - } - - return result; - } - - /* - * Transforms the input string GLSL operand into an unpacked half float pair. - * @note This function returns a float type pair instead of a half float pair. This is because - * real half floats are not standardized in GLSL but unpackHalf2x16 (which returns a vec2) is. - * @param operand Input operand. It has to be an unsigned integer. - * @param type How to unpack the unsigned integer to a half float pair. - * @param abs Get the absolute value of unpacked half floats. - * @param neg Get the negative value of unpacked half floats. - * @returns String corresponding to a half float pair. - */ - static std::string GetHalfFloat(const std::string& operand, - Tegra::Shader::HalfType type = Tegra::Shader::HalfType::H0_H1, - bool abs = false, bool neg = false) { - // "vec2" calls emitted in this function are intended to alias components. - const std::string value = [&]() { - switch (type) { - case Tegra::Shader::HalfType::H0_H1: - return "unpackHalf2x16(" + operand + ')'; - case Tegra::Shader::HalfType::F32: - return "vec2(uintBitsToFloat(" + operand + "))"; - case Tegra::Shader::HalfType::H0_H0: - case Tegra::Shader::HalfType::H1_H1: { - const bool high = type == Tegra::Shader::HalfType::H1_H1; - const char unpack_index = "xy"[high ? 1 : 0]; - return "vec2(unpackHalf2x16(" + operand + ")." + unpack_index + ')'; - } - default: - UNREACHABLE(); - return std::string("vec2(0)"); - } - }(); - - return GetOperandAbsNeg(value, abs, neg); - } - - /* - * Returns whether the instruction at the specified offset is a 'sched' instruction. - * Sched instructions always appear before a sequence of 3 instructions. - */ - bool IsSchedInstruction(u32 offset) const { - // sched instructions appear once every 4 instructions. - static constexpr std::size_t SchedPeriod = 4; - u32 absolute_offset = offset - main_offset; - - return (absolute_offset % SchedPeriod) == 0; } - void WriteLogicOperation(Register dest, LogicOperation logic_op, const std::string& op_a, - const std::string& op_b, - Tegra::Shader::PredicateResultMode predicate_mode, - Tegra::Shader::Pred predicate, const bool set_cc) { - std::string result{}; - switch (logic_op) { - case LogicOperation::And: { - result = '(' + op_a + " & " + op_b + ')'; - break; - } - case LogicOperation::Or: { - result = '(' + op_a + " | " + op_b + ')'; - break; - } - case LogicOperation::Xor: { - result = '(' + op_a + " ^ " + op_b + ')'; - break; - } - case LogicOperation::PassB: { - result = op_b; - break; - } - default: - UNIMPLEMENTED_MSG("Unimplemented logic operation={}", static_cast<u32>(logic_op)); + void DeclareGlobalMemory() { + for (const auto& entry : ir.GetGlobalMemoryBases()) { + const std::string binding = + fmt::format("GMEM_BINDING_{}_{}", entry.cbuf_index, entry.cbuf_offset); + code.AddLine("layout (std430, binding = " + binding + ") buffer " + + GetGlobalMemoryBlock(entry) + " {"); + code.AddLine(" float " + GetGlobalMemory(entry) + "[MAX_GLOBALMEMORY_ELEMENTS];"); + code.AddLine("};"); + code.AddNewLine(); } + } - if (dest != Tegra::Shader::Register::ZeroIndex) { - regs.SetRegisterToInteger(dest, true, 0, result, 1, 1, false, set_cc); - } + void DeclareSamplers() { + const auto& samplers = ir.GetSamplers(); + for (const auto& sampler : samplers) { + std::string sampler_type = [&]() { + switch (sampler.GetType()) { + case Tegra::Shader::TextureType::Texture1D: + return "sampler1D"; + case Tegra::Shader::TextureType::Texture2D: + return "sampler2D"; + case Tegra::Shader::TextureType::Texture3D: + return "sampler3D"; + case Tegra::Shader::TextureType::TextureCube: + return "samplerCube"; + default: + UNREACHABLE(); + return "sampler2D"; + } + }(); + if (sampler.IsArray()) + sampler_type += "Array"; + if (sampler.IsShadow()) + sampler_type += "Shadow"; - using Tegra::Shader::PredicateResultMode; - // Write the predicate value depending on the predicate mode. - switch (predicate_mode) { - case PredicateResultMode::None: - // Do nothing. - return; - case PredicateResultMode::NotZero: - // Set the predicate to true if the result is not zero. - SetPredicate(static_cast<u64>(predicate), '(' + result + ") != 0"); - break; - default: - UNIMPLEMENTED_MSG("Unimplemented predicate result mode: {}", - static_cast<u32>(predicate_mode)); + code.AddLine("layout (binding = SAMPLER_BINDING_" + std::to_string(sampler.GetIndex()) + + ") uniform " + sampler_type + ' ' + GetSampler(sampler) + ';'); } + if (!samplers.empty()) + code.AddNewLine(); } - void WriteLop3Instruction(Register dest, const std::string& op_a, const std::string& op_b, - const std::string& op_c, const std::string& imm_lut, - const bool set_cc) { - if (dest == Tegra::Shader::Register::ZeroIndex) { - return; + void VisitBasicBlock(const BasicBlock& bb) { + for (const Node node : bb) { + if (const std::string expr = Visit(node); !expr.empty()) { + code.AddLine(expr); + } } + } - static constexpr std::array<const char*, 32> shift_amounts = { - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", - "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", - "22", "23", "24", "25", "26", "27", "28", "29", "30", "31"}; - - std::string result; - result += '('; - - for (std::size_t i = 0; i < shift_amounts.size(); ++i) { - if (i) - result += '|'; - result += "(((" + imm_lut + " >> (((" + op_c + " >> " + shift_amounts[i] + - ") & 1) | ((" + op_b + " >> " + shift_amounts[i] + ") & 1) << 1 | ((" + op_a + - " >> " + shift_amounts[i] + ") & 1) << 2)) & 1) << " + shift_amounts[i] + ")"; - } + std::string Visit(Node node) { + if (const auto operation = std::get_if<OperationNode>(node)) { + const auto operation_index = static_cast<std::size_t>(operation->GetCode()); + const auto decompiler = operation_decompilers[operation_index]; + if (decompiler == nullptr) { + UNREACHABLE_MSG("Operation decompiler {} not defined", operation_index); + } + return (this->*decompiler)(*operation); - result += ')'; + } else if (const auto gpr = std::get_if<GprNode>(node)) { + const u32 index = gpr->GetIndex(); + if (index == Register::ZeroIndex) { + return "0"; + } + return GetRegister(index); - regs.SetRegisterToInteger(dest, true, 0, result, 1, 1, false, set_cc); - } + } else if (const auto immediate = std::get_if<ImmediateNode>(node)) { + const u32 value = immediate->GetValue(); + if (value < 10) { + // For eyecandy avoid using hex numbers on single digits + return fmt::format("utof({}u)", immediate->GetValue()); + } + return fmt::format("utof(0x{:x}u)", immediate->GetValue()); - void WriteTexsInstructionFloat(const Instruction& instr, const std::string& texture) { - // TEXS has two destination registers and a swizzle. The first two elements in the swizzle - // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1 + } else if (const auto predicate = std::get_if<PredicateNode>(node)) { + const auto value = [&]() -> std::string { + switch (const auto index = predicate->GetIndex(); index) { + case Tegra::Shader::Pred::UnusedIndex: + return "true"; + case Tegra::Shader::Pred::NeverExecute: + return "false"; + default: + return GetPredicate(index); + } + }(); + if (predicate->IsNegated()) { + return "!(" + value + ')'; + } + return value; - std::size_t written_components = 0; - for (u32 component = 0; component < 4; ++component) { - if (!instr.texs.IsComponentEnabled(component)) { - continue; + } else if (const auto abuf = std::get_if<AbufNode>(node)) { + const auto attribute = abuf->GetIndex(); + const auto element = abuf->GetElement(); + + const auto GeometryPass = [&](const std::string& name) { + if (stage == ShaderStage::Geometry && abuf->GetBuffer()) { + // TODO(Rodrigo): Guard geometry inputs against out of bound reads. Some games + // set an 0x80000000 index for those and the shader fails to build. Find out why + // this happens and what's its intent. + return "gs_" + name + "[ftou(" + Visit(abuf->GetBuffer()) + + ") % MAX_VERTEX_INPUT]"; + } + return name; + }; + + switch (attribute) { + case Attribute::Index::Position: + if (stage != ShaderStage::Fragment) { + return GeometryPass("position") + GetSwizzle(element); + } else { + return element == 3 ? "1.0f" : "gl_FragCoord" + GetSwizzle(element); + } + case Attribute::Index::PointCoord: + switch (element) { + case 0: + return "gl_PointCoord.x"; + case 1: + return "gl_PointCoord.y"; + case 2: + case 3: + return "0"; + } + UNREACHABLE(); + return "0"; + case Attribute::Index::TessCoordInstanceIDVertexID: + // TODO(Subv): Find out what the values are for the first two elements when inside a + // vertex shader, and what's the value of the fourth element when inside a Tess Eval + // shader. + ASSERT(stage == ShaderStage::Vertex); + switch (element) { + case 2: + // Config pack's first value is instance_id. + return "uintBitsToFloat(config_pack[0])"; + case 3: + return "uintBitsToFloat(gl_VertexID)"; + } + UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element); + return "0"; + case Attribute::Index::FrontFacing: + // TODO(Subv): Find out what the values are for the other elements. + ASSERT(stage == ShaderStage::Fragment); + switch (element) { + case 3: + return "itof(gl_FrontFacing ? -1 : 0)"; + } + UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element); + return "0"; + default: + if (attribute >= Attribute::Index::Attribute_0 && + attribute <= Attribute::Index::Attribute_31) { + return GeometryPass(GetInputAttribute(attribute)) + GetSwizzle(element); + } + break; } + UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast<u32>(attribute)); + + } else if (const auto cbuf = std::get_if<CbufNode>(node)) { + const Node offset = cbuf->GetOffset(); + if (const auto immediate = std::get_if<ImmediateNode>(offset)) { + // Direct access + const u32 offset_imm = immediate->GetValue(); + ASSERT_MSG(offset_imm % 4 == 0, "Unaligned cbuf direct access"); + return fmt::format("{}[{}][{}]", GetConstBuffer(cbuf->GetIndex()), + offset_imm / (4 * 4), (offset_imm / 4) % 4); + + } else if (std::holds_alternative<OperationNode>(*offset)) { + // Indirect access + const std::string final_offset = code.GenerateTemporal(); + code.AddLine("uint " + final_offset + " = (ftou(" + Visit(offset) + ") / 4) & " + + std::to_string(MAX_CONSTBUFFER_ELEMENTS - 1) + ';'); + return fmt::format("{}[{} / 4][{} % 4]", GetConstBuffer(cbuf->GetIndex()), + final_offset, final_offset); - if (written_components < 2) { - // Write the first two swizzle components to gpr0 and gpr0+1 - regs.SetRegisterToFloat(instr.gpr0, component, texture, 1, 4, false, false, - written_components % 2); } else { - ASSERT(instr.texs.HasTwoDestinations()); - // Write the rest of the swizzle components to gpr28 and gpr28+1 - regs.SetRegisterToFloat(instr.gpr28, component, texture, 1, 4, false, false, - written_components % 2); + UNREACHABLE_MSG("Unmanaged offset node type"); } - ++written_components; - } - } + } else if (const auto gmem = std::get_if<GmemNode>(node)) { + const std::string real = Visit(gmem->GetRealAddress()); + const std::string base = Visit(gmem->GetBaseAddress()); + const std::string final_offset = "(ftou(" + real + ") - ftou(" + base + ")) / 4"; + return fmt::format("{}[{}]", GetGlobalMemory(gmem->GetDescriptor()), final_offset); - void WriteTexsInstructionHalfFloat(const Instruction& instr, const std::string& texture) { - // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half - // float instruction). + } else if (const auto lmem = std::get_if<LmemNode>(node)) { + return fmt::format("{}[ftou({}) / 4]", GetLocalMemory(), Visit(lmem->GetAddress())); - std::array<std::string, 4> components; - u32 written_components = 0; + } else if (const auto internal_flag = std::get_if<InternalFlagNode>(node)) { + return GetInternalFlag(internal_flag->GetFlag()); - for (u32 component = 0; component < 4; ++component) { - if (!instr.texs.IsComponentEnabled(component)) - continue; - components[written_components++] = texture + GetSwizzle(component); - } - if (written_components == 0) - return; + } else if (const auto conditional = std::get_if<ConditionalNode>(node)) { + // It's invalid to call conditional on nested nodes, use an operation instead + code.AddLine("if (" + Visit(conditional->GetCondition()) + ") {"); + ++code.scope; - const auto BuildComponent = [&](std::string low, std::string high, bool high_enabled) { - return "vec2(" + low + ", " + (high_enabled ? high : "0") + ')'; - }; + VisitBasicBlock(conditional->GetCode()); - regs.SetRegisterToHalfFloat( - instr.gpr0, 0, BuildComponent(components[0], components[1], written_components > 1), - Tegra::Shader::HalfMerge::H0_H1, 1, 1); + --code.scope; + code.AddLine('}'); + return {}; - if (written_components > 2) { - ASSERT(instr.texs.HasTwoDestinations()); - regs.SetRegisterToHalfFloat( - instr.gpr28, 0, - BuildComponent(components[2], components[3], written_components > 3), - Tegra::Shader::HalfMerge::H0_H1, 1, 1); + } else if (const auto comment = std::get_if<CommentNode>(node)) { + return "// " + comment->GetText(); } + UNREACHABLE(); + return {}; } - static u32 TextureCoordinates(Tegra::Shader::TextureType texture_type) { - switch (texture_type) { - case Tegra::Shader::TextureType::Texture1D: - return 1; - case Tegra::Shader::TextureType::Texture2D: - return 2; - case Tegra::Shader::TextureType::Texture3D: - case Tegra::Shader::TextureType::TextureCube: - return 3; - default: - UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type)); - return 0; + std::string ApplyPrecise(Operation operation, const std::string& value) { + if (!IsPrecise(operation)) { + return value; } - } + // There's a bug in NVidia's proprietary drivers that makes precise fail on fragment shaders + const std::string precise = stage != ShaderStage::Fragment ? "precise " : ""; - /* - * Emits code to push the input target address to the flow address stack, incrementing the stack - * top. - */ - void EmitPushToFlowStack(u32 target) { - const auto scope = shader.Scope(); - - shader.AddLine("flow_stack[flow_stack_top] = " + std::to_string(target) + "u;"); - shader.AddLine("flow_stack_top++;"); + const std::string temporal = code.GenerateTemporal(); + code.AddLine(precise + "float " + temporal + " = " + value + ';'); + return temporal; } - /* - * Emits code to pop an address from the flow address stack, setting the jump address to the - * popped address and decrementing the stack top. - */ - void EmitPopFromFlowStack() { - const auto scope = shader.Scope(); + std::string VisitOperand(Operation operation, std::size_t operand_index) { + const auto& operand = operation[operand_index]; + const bool parent_precise = IsPrecise(operation); + const bool child_precise = IsPrecise(operand); + const bool child_trivial = !std::holds_alternative<OperationNode>(*operand); + if (!parent_precise || child_precise || child_trivial) { + return Visit(operand); + } - shader.AddLine("flow_stack_top--;"); - shader.AddLine("jmp_to = flow_stack[flow_stack_top];"); - shader.AddLine("break;"); + const std::string temporal = code.GenerateTemporal(); + code.AddLine("float " + temporal + " = " + Visit(operand) + ';'); + return temporal; } - /// Writes the output values from a fragment shader to the corresponding GLSL output variables. - void EmitFragmentOutputsWrite() { - ASSERT(stage == Maxwell3D::Regs::ShaderStage::Fragment); + std::string VisitOperand(Operation operation, std::size_t operand_index, Type type) { + std::string value = VisitOperand(operation, operand_index); - UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, "Samplemask write is unimplemented"); - - shader.AddLine("if (alpha_test[0] != 0) {"); - ++shader.scope; - // We start on the register containing the alpha value in the first RT. - u32 current_reg = 3; - for (u32 render_target = 0; render_target < Maxwell3D::Regs::NumRenderTargets; - ++render_target) { - // TODO(Blinkhawk): verify the behavior of alpha testing on hardware when - // multiple render targets are used. - if (header.ps.IsColorComponentOutputEnabled(render_target, 0) || - header.ps.IsColorComponentOutputEnabled(render_target, 1) || - header.ps.IsColorComponentOutputEnabled(render_target, 2) || - header.ps.IsColorComponentOutputEnabled(render_target, 3)) { - shader.AddLine(fmt::format("if (!AlphaFunc({})) discard;", - regs.GetRegisterAsFloat(current_reg))); - current_reg += 4; + switch (type) { + case Type::Bool: + case Type::Bool2: + case Type::Float: + return value; + case Type::Int: + return "ftoi(" + value + ')'; + case Type::Uint: + return "ftou(" + value + ')'; + case Type::HalfFloat: + const auto half_meta = std::get_if<MetaHalfArithmetic>(&operation.GetMeta()); + if (!half_meta) { + value = "toHalf2(" + value + ')'; } - } - --shader.scope; - shader.AddLine('}'); - // Write the color outputs using the data in the shader registers, disabled - // rendertargets/components are skipped in the register assignment. - current_reg = 0; - for (u32 render_target = 0; render_target < Maxwell3D::Regs::NumRenderTargets; - ++render_target) { - // TODO(Subv): Figure out how dual-source blending is configured in the Switch. - for (u32 component = 0; component < 4; ++component) { - if (header.ps.IsColorComponentOutputEnabled(render_target, component)) { - shader.AddLine(fmt::format("FragColor{}[{}] = {};", render_target, component, - regs.GetRegisterAsFloat(current_reg))); - ++current_reg; - } + switch (half_meta->types.at(operand_index)) { + case Tegra::Shader::HalfType::H0_H1: + return "toHalf2(" + value + ')'; + case Tegra::Shader::HalfType::F32: + return "vec2(" + value + ')'; + case Tegra::Shader::HalfType::H0_H0: + return "vec2(toHalf2(" + value + ")[0])"; + case Tegra::Shader::HalfType::H1_H1: + return "vec2(toHalf2(" + value + ")[1])"; } } - - if (header.ps.omap.depth) { - // The depth output is always 2 registers after the last color output, and current_reg - // already contains one past the last color register. - - shader.AddLine( - "gl_FragDepth = " + - regs.GetRegisterAsFloat(static_cast<Tegra::Shader::Register>(current_reg) + 1) + - ';'); - } + UNREACHABLE(); + return value; } - /// Unpacks a video instruction operand (e.g. VMAD). - std::string GetVideoOperand(const std::string& op, bool is_chunk, bool is_signed, - Tegra::Shader::VideoType type, u64 byte_height) { - const std::string value = [&]() { - if (!is_chunk) { - const auto offset = static_cast<u32>(byte_height * 8); - return "((" + op + " >> " + std::to_string(offset) + ") & 0xff)"; - } - const std::string zero = "0"; - - switch (type) { - case Tegra::Shader::VideoType::Size16_Low: - return '(' + op + " & 0xffff)"; - case Tegra::Shader::VideoType::Size16_High: - return '(' + op + " >> 16)"; - case Tegra::Shader::VideoType::Size32: - // TODO(Rodrigo): From my hardware tests it becomes a bit "mad" when - // this type is used (1 * 1 + 0 == 0x5b800000). Until a better - // explanation is found: abort. - UNIMPLEMENTED(); - return zero; - case Tegra::Shader::VideoType::Invalid: - UNREACHABLE_MSG("Invalid instruction encoding"); - return zero; - default: - UNREACHABLE(); - return zero; + std::string BitwiseCastResult(std::string value, Type type, bool needs_parenthesis = false) { + switch (type) { + case Type::Bool: + case Type::Float: + if (needs_parenthesis) { + return '(' + value + ')'; } - }(); - - if (is_signed) { - return "int(" + value + ')'; - } + return value; + case Type::Int: + return "itof(" + value + ')'; + case Type::Uint: + return "utof(" + value + ')'; + case Type::HalfFloat: + return "fromHalf2(" + value + ')'; + } + UNREACHABLE(); return value; - }; - - /// Gets the A operand for a video instruction. - std::string GetVideoOperandA(Instruction instr) { - return GetVideoOperand(regs.GetRegisterAsInteger(instr.gpr8, 0, false), - instr.video.is_byte_chunk_a != 0, instr.video.signed_a, - instr.video.type_a, instr.video.byte_height_a); } - /// Gets the B operand for a video instruction. - std::string GetVideoOperandB(Instruction instr) { - if (instr.video.use_register_b) { - return GetVideoOperand(regs.GetRegisterAsInteger(instr.gpr20, 0, false), - instr.video.is_byte_chunk_b != 0, instr.video.signed_b, - instr.video.type_b, instr.video.byte_height_b); - } else { - return '(' + - std::to_string(instr.video.signed_b ? static_cast<s16>(instr.alu.GetImm20_16()) - : instr.alu.GetImm20_16()) + - ')'; - } + std::string GenerateUnary(Operation operation, const std::string& func, Type result_type, + Type type_a, bool needs_parenthesis = true) { + return ApplyPrecise(operation, + BitwiseCastResult(func + '(' + VisitOperand(operation, 0, type_a) + ')', + result_type, needs_parenthesis)); } - std::pair<size_t, std::string> ValidateAndGetCoordinateElement( - const Tegra::Shader::TextureType texture_type, const bool depth_compare, - const bool is_array, const bool lod_bias_enabled, size_t max_coords, size_t max_inputs) { - const size_t coord_count = TextureCoordinates(texture_type); - - size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0); - const size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0); - if (total_coord_count > max_coords || total_reg_count > max_inputs) { - UNIMPLEMENTED_MSG("Unsupported Texture operation"); - total_coord_count = std::min(total_coord_count, max_coords); - } - // 1D.DC opengl is using a vec3 but 2nd component is ignored later. - total_coord_count += - (depth_compare && !is_array && texture_type == Tegra::Shader::TextureType::Texture1D) - ? 1 - : 0; - - constexpr std::array<const char*, 5> coord_container{ - {"", "float coord = (", "vec2 coord = vec2(", "vec3 coord = vec3(", - "vec4 coord = vec4("}}; - - return std::pair<size_t, std::string>(coord_count, coord_container[total_coord_count]); - } - - std::string GetTextureCode(const Tegra::Shader::Instruction& instr, - const Tegra::Shader::TextureType texture_type, - const Tegra::Shader::TextureProcessMode process_mode, - const bool depth_compare, const bool is_array, - const size_t bias_offset) { - - if ((texture_type == Tegra::Shader::TextureType::Texture3D && - (is_array || depth_compare)) || - (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && - depth_compare)) { - UNIMPLEMENTED_MSG("This method is not supported."); - } - - const std::string sampler = - GetSampler(instr.sampler, texture_type, is_array, depth_compare); - - const bool lod_needed = process_mode == Tegra::Shader::TextureProcessMode::LZ || - process_mode == Tegra::Shader::TextureProcessMode::LL || - process_mode == Tegra::Shader::TextureProcessMode::LLA; + std::string GenerateBinaryInfix(Operation operation, const std::string& func, Type result_type, + Type type_a, Type type_b) { + const std::string op_a = VisitOperand(operation, 0, type_a); + const std::string op_b = VisitOperand(operation, 1, type_b); - // LOD selection (either via bias or explicit textureLod) not supported in GL for - // sampler2DArrayShadow and samplerCubeArrayShadow. - const bool gl_lod_supported = !( - (texture_type == Tegra::Shader::TextureType::Texture2D && is_array && depth_compare) || - (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && depth_compare)); - - const std::string read_method = lod_needed && gl_lod_supported ? "textureLod(" : "texture("; - std::string texture = read_method + sampler + ", coord"; - - UNIMPLEMENTED_IF(process_mode != Tegra::Shader::TextureProcessMode::None && - !gl_lod_supported); - - if (process_mode != Tegra::Shader::TextureProcessMode::None && gl_lod_supported) { - if (process_mode == Tegra::Shader::TextureProcessMode::LZ) { - texture += ", 0.0"; - } else { - // If present, lod or bias are always stored in the register indexed by the - // gpr20 - // field with an offset depending on the usage of the other registers - texture += ',' + regs.GetRegisterAsFloat(instr.gpr20.Value() + bias_offset); - } - } - texture += ")"; - return texture; - } - - std::pair<std::string, std::string> GetTEXCode( - const Instruction& instr, const Tegra::Shader::TextureType texture_type, - const Tegra::Shader::TextureProcessMode process_mode, const bool depth_compare, - const bool is_array) { - const bool lod_bias_enabled = (process_mode != Tegra::Shader::TextureProcessMode::None && - process_mode != Tegra::Shader::TextureProcessMode::LZ); - - const auto [coord_count, coord_dcl] = ValidateAndGetCoordinateElement( - texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5); - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used - const u64 coord_register = array_register + (is_array ? 1 : 0); - - std::string coord = coord_dcl; - for (size_t i = 0; i < coord_count;) { - coord += regs.GetRegisterAsFloat(coord_register + i); - ++i; - if (i != coord_count) { - coord += ','; - } - } - // 1D.DC in opengl the 2nd component is ignored. - if (depth_compare && !is_array && texture_type == Tegra::Shader::TextureType::Texture1D) { - coord += ",0.0"; - } - if (is_array) { - coord += ',' + regs.GetRegisterAsInteger(array_register); - } - if (depth_compare) { - // Depth is always stored in the register signaled by gpr20 - // or in the next register if lod or bias are used - const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); - coord += ',' + regs.GetRegisterAsFloat(depth_register); - } - coord += ");"; - return std::make_pair( - coord, GetTextureCode(instr, texture_type, process_mode, depth_compare, is_array, 0)); - } - - std::pair<std::string, std::string> GetTEXSCode( - const Instruction& instr, const Tegra::Shader::TextureType texture_type, - const Tegra::Shader::TextureProcessMode process_mode, const bool depth_compare, - const bool is_array) { - const bool lod_bias_enabled = (process_mode != Tegra::Shader::TextureProcessMode::None && - process_mode != Tegra::Shader::TextureProcessMode::LZ); - - const auto [coord_count, coord_dcl] = ValidateAndGetCoordinateElement( - texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4); - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used - const u64 coord_register = array_register + (is_array ? 1 : 0); - const u64 last_coord_register = - (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2)) - ? static_cast<u64>(instr.gpr20.Value()) - : coord_register + 1; - - std::string coord = coord_dcl; - for (size_t i = 0; i < coord_count; ++i) { - const bool last = (i == (coord_count - 1)) && (coord_count > 1); - coord += regs.GetRegisterAsFloat(last ? last_coord_register : coord_register + i); - if (i < coord_count - 1) { - coord += ','; - } - } - - if (is_array) { - coord += ',' + regs.GetRegisterAsInteger(array_register); - } - if (depth_compare) { - // Depth is always stored in the register signaled by gpr20 - // or in the next register if lod or bias are used - const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); - coord += ',' + regs.GetRegisterAsFloat(depth_register); - } - coord += ");"; - - return std::make_pair(coord, - GetTextureCode(instr, texture_type, process_mode, depth_compare, - is_array, (coord_count > 2 ? 1 : 0))); + return ApplyPrecise( + operation, BitwiseCastResult('(' + op_a + ' ' + func + ' ' + op_b + ')', result_type)); } - std::pair<std::string, std::string> GetTLD4Code(const Instruction& instr, - const Tegra::Shader::TextureType texture_type, - const bool depth_compare, const bool is_array) { - - const size_t coord_count = TextureCoordinates(texture_type); - const size_t total_coord_count = coord_count + (is_array ? 1 : 0); - const size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0); - - constexpr std::array<const char*, 5> coord_container{ - {"", "", "vec2 coord = vec2(", "vec3 coord = vec3(", "vec4 coord = vec4("}}; - - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used - const u64 coord_register = array_register + (is_array ? 1 : 0); + std::string GenerateBinaryCall(Operation operation, const std::string& func, Type result_type, + Type type_a, Type type_b) { + const std::string op_a = VisitOperand(operation, 0, type_a); + const std::string op_b = VisitOperand(operation, 1, type_b); - std::string coord = coord_container[total_coord_count]; - for (size_t i = 0; i < coord_count;) { - coord += regs.GetRegisterAsFloat(coord_register + i); - ++i; - if (i != coord_count) { - coord += ','; - } - } - - if (is_array) { - coord += ',' + regs.GetRegisterAsInteger(array_register); - } - coord += ");"; - - const std::string sampler = - GetSampler(instr.sampler, texture_type, is_array, depth_compare); - - std::string texture = "textureGather(" + sampler + ", coord, "; - if (depth_compare) { - // Depth is always stored in the register signaled by gpr20 - texture += regs.GetRegisterAsFloat(instr.gpr20.Value()) + ')'; - } else { - texture += std::to_string(instr.tld4.component) + ')'; - } - return std::make_pair(coord, texture); + return ApplyPrecise(operation, + BitwiseCastResult(func + '(' + op_a + ", " + op_b + ')', result_type)); } - std::pair<std::string, std::string> GetTLDSCode(const Instruction& instr, - const Tegra::Shader::TextureType texture_type, - const bool is_array) { + std::string GenerateTernary(Operation operation, const std::string& func, Type result_type, + Type type_a, Type type_b, Type type_c) { + const std::string op_a = VisitOperand(operation, 0, type_a); + const std::string op_b = VisitOperand(operation, 1, type_b); + const std::string op_c = VisitOperand(operation, 2, type_c); - const size_t coord_count = TextureCoordinates(texture_type); - const size_t total_coord_count = coord_count + (is_array ? 1 : 0); - const bool lod_enabled = - instr.tlds.GetTextureProcessMode() == Tegra::Shader::TextureProcessMode::LL; - - constexpr std::array<const char*, 4> coord_container{ - {"", "int coords = (", "ivec2 coords = ivec2(", "ivec3 coords = ivec3("}}; - - std::string coord = coord_container[total_coord_count]; - - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - - // if is array gpr20 is used - const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value(); - - const u64 last_coord_register = - ((coord_count > 2) || (coord_count == 2 && !lod_enabled)) && !is_array - ? static_cast<u64>(instr.gpr20.Value()) - : coord_register + 1; - - for (size_t i = 0; i < coord_count; ++i) { - const bool last = (i == (coord_count - 1)) && (coord_count > 1); - coord += regs.GetRegisterAsInteger(last ? last_coord_register : coord_register + i); - if (i < coord_count - 1) { - coord += ','; - } - } - if (is_array) { - coord += ',' + regs.GetRegisterAsInteger(array_register); - } - coord += ");"; - - const std::string sampler = GetSampler(instr.sampler, texture_type, is_array, false); - - std::string texture = "texelFetch(" + sampler + ", coords"; - - if (lod_enabled) { - // When lod is used always is in grp20 - texture += ", " + regs.GetRegisterAsInteger(instr.gpr20) + ')'; - } else { - texture += ", 0)"; - } - return std::make_pair(coord, texture); - } - - /** - * Compiles a single instruction from Tegra to GLSL. - * @param offset the offset of the Tegra shader instruction. - * @return the offset of the next instruction to execute. Usually it is the current offset - * + 1. If the current instruction always terminates the program, returns PROGRAM_END. - */ - u32 CompileInstr(u32 offset) { - // Ignore sched instructions when generating code. - if (IsSchedInstruction(offset)) { - return offset + 1; - } - - const Instruction instr = {program_code[offset]}; - const auto opcode = OpCode::Decode(instr); - - // Decoding failure - if (!opcode) { - UNIMPLEMENTED_MSG("Unhandled instruction: {0:x}", instr.value); - return offset + 1; - } - - shader.AddLine( - fmt::format("// {}: {} (0x{:016x})", offset, opcode->get().GetName(), instr.value)); - - using Tegra::Shader::Pred; - UNIMPLEMENTED_IF_MSG(instr.pred.full_pred == Pred::NeverExecute, - "NeverExecute predicate not implemented"); - - // Some instructions (like SSY) don't have a predicate field, they are always - // unconditionally executed. - bool can_be_predicated = OpCode::IsPredicatedInstruction(opcode->get().GetId()); + return ApplyPrecise( + operation, + BitwiseCastResult(func + '(' + op_a + ", " + op_b + ", " + op_c + ')', result_type)); + } - if (can_be_predicated && instr.pred.pred_index != static_cast<u64>(Pred::UnusedIndex)) { - shader.AddLine("if (" + - GetPredicateCondition(instr.pred.pred_index, instr.negate_pred != 0) + - ')'); - shader.AddLine('{'); - ++shader.scope; - } + std::string GenerateQuaternary(Operation operation, const std::string& func, Type result_type, + Type type_a, Type type_b, Type type_c, Type type_d) { + const std::string op_a = VisitOperand(operation, 0, type_a); + const std::string op_b = VisitOperand(operation, 1, type_b); + const std::string op_c = VisitOperand(operation, 2, type_c); + const std::string op_d = VisitOperand(operation, 3, type_d); - switch (opcode->get().GetType()) { - case OpCode::Type::Arithmetic: { - std::string op_a = regs.GetRegisterAsFloat(instr.gpr8); + return ApplyPrecise(operation, BitwiseCastResult(func + '(' + op_a + ", " + op_b + ", " + + op_c + ", " + op_d + ')', + result_type)); + } - std::string op_b; + std::string GenerateTexture(Operation operation, const std::string& func, + bool is_extra_int = false) { + constexpr std::array<const char*, 4> coord_constructors = {"float", "vec2", "vec3", "vec4"}; - if (instr.is_b_imm) { - op_b = GetImmediate19(instr); - } else { - if (instr.is_b_gpr) { - op_b = regs.GetRegisterAsFloat(instr.gpr20); - } else { - op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - } - } + const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + const auto count = static_cast<u32>(operation.GetOperandsCount()); + ASSERT(meta); - switch (opcode->get().GetId()) { - case OpCode::Id::MOV_C: - case OpCode::Id::MOV_R: { - // MOV does not have neither 'abs' nor 'neg' bits. - regs.SetRegisterToFloat(instr.gpr0, 0, op_b, 1, 1); - break; - } + std::string expr = func; + expr += '('; + expr += GetSampler(meta->sampler); + expr += ", "; - case OpCode::Id::FMUL_C: - case OpCode::Id::FMUL_R: - case OpCode::Id::FMUL_IMM: { - // FMUL does not have 'abs' bits and only the second operand has a 'neg' bit. - UNIMPLEMENTED_IF_MSG(instr.fmul.tab5cb8_2 != 0, - "FMUL tab5cb8_2({}) is not implemented", - instr.fmul.tab5cb8_2.Value()); - UNIMPLEMENTED_IF_MSG( - instr.fmul.tab5c68_0 != 1, "FMUL tab5cb8_0({}) is not implemented", - instr.fmul.tab5c68_0 - .Value()); // SMO typical sends 1 here which seems to be the default - - op_b = GetOperandAbsNeg(op_b, false, instr.fmul.negate_b); - - std::string postfactor_op; - if (instr.fmul.postfactor != 0) { - s8 postfactor = static_cast<s8>(instr.fmul.postfactor); - - // postfactor encoded as 3-bit 1's complement in instruction, - // interpreted with below logic. - if (postfactor >= 4) { - postfactor = 7 - postfactor; - } else { - postfactor = 0 - postfactor; - } + expr += coord_constructors[meta->coords_count - 1]; + expr += '('; + for (u32 i = 0; i < count; ++i) { + const bool is_extra = i >= meta->coords_count; + const bool is_array = i == meta->array_index; - if (postfactor > 0) { - postfactor_op = " * " + std::to_string(1 << postfactor); + std::string operand = [&]() { + if (is_extra && is_extra_int) { + if (const auto immediate = std::get_if<ImmediateNode>(operation[i])) { + return std::to_string(static_cast<s32>(immediate->GetValue())); } else { - postfactor_op = " / " + std::to_string(1 << -postfactor); + return "ftoi(" + Visit(operation[i]) + ')'; } - } - - regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " * " + op_b + postfactor_op, 1, 1, - instr.alu.saturate_d, instr.generates_cc, 0, true); - break; - } - case OpCode::Id::FADD_C: - case OpCode::Id::FADD_R: - case OpCode::Id::FADD_IMM: { - op_a = GetOperandAbsNeg(op_a, instr.alu.abs_a, instr.alu.negate_a); - op_b = GetOperandAbsNeg(op_b, instr.alu.abs_b, instr.alu.negate_b); - - regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " + " + op_b, 1, 1, - instr.alu.saturate_d, instr.generates_cc, 0, true); - break; - } - case OpCode::Id::MUFU: { - op_a = GetOperandAbsNeg(op_a, instr.alu.abs_a, instr.alu.negate_a); - switch (instr.sub_op) { - case SubOp::Cos: - regs.SetRegisterToFloat(instr.gpr0, 0, "cos(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Sin: - regs.SetRegisterToFloat(instr.gpr0, 0, "sin(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Ex2: - regs.SetRegisterToFloat(instr.gpr0, 0, "exp2(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Lg2: - regs.SetRegisterToFloat(instr.gpr0, 0, "log2(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Rcp: - regs.SetRegisterToFloat(instr.gpr0, 0, "1.0 / " + op_a, 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Rsq: - regs.SetRegisterToFloat(instr.gpr0, 0, "inversesqrt(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Sqrt: - regs.SetRegisterToFloat(instr.gpr0, 0, "sqrt(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - default: - UNIMPLEMENTED_MSG("Unhandled MUFU sub op={0:x}", - static_cast<unsigned>(instr.sub_op.Value())); - } - break; - } - case OpCode::Id::FMNMX_C: - case OpCode::Id::FMNMX_R: - case OpCode::Id::FMNMX_IMM: { - UNIMPLEMENTED_IF_MSG( - instr.generates_cc, - "Condition codes generation in FMNMX is partially implemented"); - - op_a = GetOperandAbsNeg(op_a, instr.alu.abs_a, instr.alu.negate_a); - op_b = GetOperandAbsNeg(op_b, instr.alu.abs_b, instr.alu.negate_b); - - std::string condition = - GetPredicateCondition(instr.alu.fmnmx.pred, instr.alu.fmnmx.negate_pred != 0); - std::string parameters = op_a + ',' + op_b; - regs.SetRegisterToFloat(instr.gpr0, 0, - '(' + condition + ") ? min(" + parameters + ") : max(" + - parameters + ')', - 1, 1, false, instr.generates_cc, 0, true); - break; - } - case OpCode::Id::RRO_C: - case OpCode::Id::RRO_R: - case OpCode::Id::RRO_IMM: { - // Currently RRO is only implemented as a register move. - op_b = GetOperandAbsNeg(op_b, instr.alu.abs_b, instr.alu.negate_b); - regs.SetRegisterToFloat(instr.gpr0, 0, op_b, 1, 1); - LOG_WARNING(HW_GPU, "RRO instruction is incomplete"); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled arithmetic instruction: {}", opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::ArithmeticImmediate: { - switch (opcode->get().GetId()) { - case OpCode::Id::MOV32_IMM: { - regs.SetRegisterToFloat(instr.gpr0, 0, GetImmediate32(instr), 1, 1); - break; - } - case OpCode::Id::FMUL32_IMM: { - regs.SetRegisterToFloat( - instr.gpr0, 0, - regs.GetRegisterAsFloat(instr.gpr8) + " * " + GetImmediate32(instr), 1, 1, - instr.fmul32.saturate, instr.op_32.generates_cc, 0, true); - break; - } - case OpCode::Id::FADD32I: { - UNIMPLEMENTED_IF_MSG( - instr.op_32.generates_cc, - "Condition codes generation in FADD32I is partially implemented"); - - std::string op_a = regs.GetRegisterAsFloat(instr.gpr8); - std::string op_b = GetImmediate32(instr); - - if (instr.fadd32i.abs_a) { - op_a = "abs(" + op_a + ')'; - } - - if (instr.fadd32i.negate_a) { - op_a = "-(" + op_a + ')'; - } - - if (instr.fadd32i.abs_b) { - op_b = "abs(" + op_b + ')'; - } - - if (instr.fadd32i.negate_b) { - op_b = "-(" + op_b + ')'; - } - - regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " + " + op_b, 1, 1, false, - instr.op_32.generates_cc, 0, true); - break; - } - } - break; - } - case OpCode::Type::Bfe: { - UNIMPLEMENTED_IF(instr.bfe.negate_b); - - std::string op_a = instr.bfe.negate_a ? "-" : ""; - op_a += regs.GetRegisterAsInteger(instr.gpr8); - - switch (opcode->get().GetId()) { - case OpCode::Id::BFE_IMM: { - std::string inner_shift = - '(' + op_a + " << " + std::to_string(instr.bfe.GetLeftShiftValue()) + ')'; - std::string outer_shift = - '(' + inner_shift + " >> " + - std::to_string(instr.bfe.GetLeftShiftValue() + instr.bfe.shift_position) + ')'; - - regs.SetRegisterToInteger(instr.gpr0, true, 0, outer_shift, 1, 1, false, - instr.generates_cc); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled BFE instruction: {}", opcode->get().GetName()); - } - } - - break; - } - case OpCode::Type::Bfi: { - const auto [base, packed_shift] = [&]() -> std::tuple<std::string, std::string> { - switch (opcode->get().GetId()) { - case OpCode::Id::BFI_IMM_R: - return {regs.GetRegisterAsInteger(instr.gpr39, 0, false), - std::to_string(instr.alu.GetSignedImm20_20())}; - default: - UNREACHABLE(); - return {regs.GetRegisterAsInteger(instr.gpr39, 0, false), - std::to_string(instr.alu.GetSignedImm20_20())}; - } - }(); - const std::string offset = '(' + packed_shift + " & 0xff)"; - const std::string bits = "((" + packed_shift + " >> 8) & 0xff)"; - const std::string insert = regs.GetRegisterAsInteger(instr.gpr8, 0, false); - regs.SetRegisterToInteger(instr.gpr0, false, 0, - "bitfieldInsert(" + base + ", " + insert + ", " + offset + - ", " + bits + ')', - 1, 1, false, instr.generates_cc); - break; - } - case OpCode::Type::Shift: { - std::string op_a = regs.GetRegisterAsInteger(instr.gpr8, 0, true); - std::string op_b; - - if (instr.is_b_imm) { - op_b += '(' + std::to_string(instr.alu.GetSignedImm20_20()) + ')'; - } else { - if (instr.is_b_gpr) { - op_b += regs.GetRegisterAsInteger(instr.gpr20); } else { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Integer); + return Visit(operation[i]); } + }(); + if (is_array) { + ASSERT(!is_extra); + operand = "float(ftoi(" + operand + "))"; } - switch (opcode->get().GetId()) { - case OpCode::Id::SHR_C: - case OpCode::Id::SHR_R: - case OpCode::Id::SHR_IMM: { - if (!instr.shift.is_signed) { - // Logical shift right - op_a = "uint(" + op_a + ')'; - } + expr += operand; - // Cast to int is superfluous for arithmetic shift, it's only for a logical shift - regs.SetRegisterToInteger(instr.gpr0, true, 0, "int(" + op_a + " >> " + op_b + ')', - 1, 1, false, instr.generates_cc); - break; - } - case OpCode::Id::SHL_C: - case OpCode::Id::SHL_R: - case OpCode::Id::SHL_IMM: - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in SHL is not implemented"); - regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " << " + op_b, 1, 1, false, - instr.generates_cc); - break; - default: { - UNIMPLEMENTED_MSG("Unhandled shift instruction: {}", opcode->get().GetName()); + if (i + 1 == meta->coords_count) { + expr += ')'; } + if (i + 1 < count) { + expr += ", "; } - break; } - case OpCode::Type::ArithmeticIntegerImmediate: { - std::string op_a = regs.GetRegisterAsInteger(instr.gpr8); - std::string op_b = std::to_string(instr.alu.imm20_32.Value()); - - switch (opcode->get().GetId()) { - case OpCode::Id::IADD32I: - UNIMPLEMENTED_IF_MSG( - instr.op_32.generates_cc, - "Condition codes generation in IADD32I is partially implemented"); - - if (instr.iadd32i.negate_a) - op_a = "-(" + op_a + ')'; - - regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " + " + op_b, 1, 1, - instr.iadd32i.saturate, instr.op_32.generates_cc); - break; - case OpCode::Id::LOP32I: { - - if (instr.alu.lop32i.invert_a) - op_a = "~(" + op_a + ')'; - - if (instr.alu.lop32i.invert_b) - op_b = "~(" + op_b + ')'; - - WriteLogicOperation(instr.gpr0, instr.alu.lop32i.operation, op_a, op_b, - Tegra::Shader::PredicateResultMode::None, - Tegra::Shader::Pred::UnusedIndex, instr.op_32.generates_cc); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled ArithmeticIntegerImmediate instruction: {}", - opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::ArithmeticInteger: { - std::string op_a = regs.GetRegisterAsInteger(instr.gpr8); - std::string op_b; - if (instr.is_b_imm) { - op_b += '(' + std::to_string(instr.alu.GetSignedImm20_20()) + ')'; - } else { - if (instr.is_b_gpr) { - op_b += regs.GetRegisterAsInteger(instr.gpr20); - } else { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Integer); - } - } - - switch (opcode->get().GetId()) { - case OpCode::Id::IADD_C: - case OpCode::Id::IADD_R: - case OpCode::Id::IADD_IMM: { - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in IADD is partially implemented"); - - if (instr.alu_integer.negate_a) - op_a = "-(" + op_a + ')'; - - if (instr.alu_integer.negate_b) - op_b = "-(" + op_b + ')'; - - regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " + " + op_b, 1, 1, - instr.alu.saturate_d, instr.generates_cc); - break; - } - case OpCode::Id::IADD3_C: - case OpCode::Id::IADD3_R: - case OpCode::Id::IADD3_IMM: { - UNIMPLEMENTED_IF_MSG( - instr.generates_cc, - "Condition codes generation in IADD3 is partially implemented"); - - std::string op_c = regs.GetRegisterAsInteger(instr.gpr39); - - auto apply_height = [](auto height, auto& oprand) { - switch (height) { - case Tegra::Shader::IAdd3Height::None: - break; - case Tegra::Shader::IAdd3Height::LowerHalfWord: - oprand = "((" + oprand + ") & 0xFFFF)"; - break; - case Tegra::Shader::IAdd3Height::UpperHalfWord: - oprand = "((" + oprand + ") >> 16)"; - break; - default: - UNIMPLEMENTED_MSG("Unhandled IADD3 height: {}", - static_cast<u32>(height.Value())); - } - }; - - if (opcode->get().GetId() == OpCode::Id::IADD3_R) { - apply_height(instr.iadd3.height_a, op_a); - apply_height(instr.iadd3.height_b, op_b); - apply_height(instr.iadd3.height_c, op_c); - } - - if (instr.iadd3.neg_a) - op_a = "-(" + op_a + ')'; - - if (instr.iadd3.neg_b) - op_b = "-(" + op_b + ')'; - - if (instr.iadd3.neg_c) - op_c = "-(" + op_c + ')'; - - std::string result; - if (opcode->get().GetId() == OpCode::Id::IADD3_R) { - switch (instr.iadd3.mode) { - case Tegra::Shader::IAdd3Mode::RightShift: - // TODO(tech4me): According to - // https://envytools.readthedocs.io/en/latest/hw/graph/maxwell/cuda/int.html?highlight=iadd3 - // The addition between op_a and op_b should be done in uint33, more - // investigation required - result = "(((" + op_a + " + " + op_b + ") >> 16) + " + op_c + ')'; - break; - case Tegra::Shader::IAdd3Mode::LeftShift: - result = "(((" + op_a + " + " + op_b + ") << 16) + " + op_c + ')'; - break; - default: - result = '(' + op_a + " + " + op_b + " + " + op_c + ')'; - break; - } - } else { - result = '(' + op_a + " + " + op_b + " + " + op_c + ')'; - } - - regs.SetRegisterToInteger(instr.gpr0, true, 0, result, 1, 1, false, - instr.generates_cc); - break; - } - case OpCode::Id::ISCADD_C: - case OpCode::Id::ISCADD_R: - case OpCode::Id::ISCADD_IMM: { - UNIMPLEMENTED_IF_MSG( - instr.generates_cc, - "Condition codes generation in ISCADD is partially implemented"); - - if (instr.alu_integer.negate_a) - op_a = "-(" + op_a + ')'; - - if (instr.alu_integer.negate_b) - op_b = "-(" + op_b + ')'; - - const std::string shift = std::to_string(instr.alu_integer.shift_amount.Value()); - - regs.SetRegisterToInteger(instr.gpr0, true, 0, - "((" + op_a + " << " + shift + ") + " + op_b + ')', 1, 1, - false, instr.generates_cc); - break; - } - case OpCode::Id::POPC_C: - case OpCode::Id::POPC_R: - case OpCode::Id::POPC_IMM: { - if (instr.popc.invert) { - op_b = "~(" + op_b + ')'; - } - regs.SetRegisterToInteger(instr.gpr0, true, 0, "bitCount(" + op_b + ')', 1, 1); - break; - } - case OpCode::Id::SEL_C: - case OpCode::Id::SEL_R: - case OpCode::Id::SEL_IMM: { - const std::string condition = - GetPredicateCondition(instr.sel.pred, instr.sel.neg_pred != 0); - regs.SetRegisterToInteger(instr.gpr0, true, 0, - '(' + condition + ") ? " + op_a + " : " + op_b, 1, 1); - break; - } - case OpCode::Id::LOP_C: - case OpCode::Id::LOP_R: - case OpCode::Id::LOP_IMM: { - - if (instr.alu.lop.invert_a) - op_a = "~(" + op_a + ')'; - - if (instr.alu.lop.invert_b) - op_b = "~(" + op_b + ')'; - - WriteLogicOperation(instr.gpr0, instr.alu.lop.operation, op_a, op_b, - instr.alu.lop.pred_result_mode, instr.alu.lop.pred48, - instr.generates_cc); - break; - } - case OpCode::Id::LOP3_C: - case OpCode::Id::LOP3_R: - case OpCode::Id::LOP3_IMM: { - const std::string op_c = regs.GetRegisterAsInteger(instr.gpr39); - std::string lut; - - if (opcode->get().GetId() == OpCode::Id::LOP3_R) { - lut = '(' + std::to_string(instr.alu.lop3.GetImmLut28()) + ')'; - } else { - lut = '(' + std::to_string(instr.alu.lop3.GetImmLut48()) + ')'; - } - - WriteLop3Instruction(instr.gpr0, op_a, op_b, op_c, lut, instr.generates_cc); - break; - } - case OpCode::Id::IMNMX_C: - case OpCode::Id::IMNMX_R: - case OpCode::Id::IMNMX_IMM: { - UNIMPLEMENTED_IF(instr.imnmx.exchange != Tegra::Shader::IMinMaxExchange::None); - UNIMPLEMENTED_IF_MSG( - instr.generates_cc, - "Condition codes generation in IMNMX is partially implemented"); - - const std::string condition = - GetPredicateCondition(instr.imnmx.pred, instr.imnmx.negate_pred != 0); - const std::string parameters = op_a + ',' + op_b; - regs.SetRegisterToInteger(instr.gpr0, instr.imnmx.is_signed, 0, - '(' + condition + ") ? min(" + parameters + ") : max(" + - parameters + ')', - 1, 1, false, instr.generates_cc); - break; - } - case OpCode::Id::LEA_R2: - case OpCode::Id::LEA_R1: - case OpCode::Id::LEA_IMM: - case OpCode::Id::LEA_RZ: - case OpCode::Id::LEA_HI: { - std::string op_c; - - switch (opcode->get().GetId()) { - case OpCode::Id::LEA_R2: { - op_a = regs.GetRegisterAsInteger(instr.gpr20); - op_b = regs.GetRegisterAsInteger(instr.gpr39); - op_c = std::to_string(instr.lea.r2.entry_a); - break; - } - - case OpCode::Id::LEA_R1: { - const bool neg = instr.lea.r1.neg != 0; - op_a = regs.GetRegisterAsInteger(instr.gpr8); - if (neg) - op_a = "-(" + op_a + ')'; - op_b = regs.GetRegisterAsInteger(instr.gpr20); - op_c = std::to_string(instr.lea.r1.entry_a); - break; - } - - case OpCode::Id::LEA_IMM: { - const bool neg = instr.lea.imm.neg != 0; - op_b = regs.GetRegisterAsInteger(instr.gpr8); - if (neg) - op_b = "-(" + op_b + ')'; - op_a = std::to_string(instr.lea.imm.entry_a); - op_c = std::to_string(instr.lea.imm.entry_b); - break; - } - - case OpCode::Id::LEA_RZ: { - const bool neg = instr.lea.rz.neg != 0; - op_b = regs.GetRegisterAsInteger(instr.gpr8); - if (neg) - op_b = "-(" + op_b + ')'; - op_a = regs.GetUniform(instr.lea.rz.cb_index, instr.lea.rz.cb_offset, - GLSLRegister::Type::Integer); - op_c = std::to_string(instr.lea.rz.entry_a); - - break; - } + expr += ')'; + return expr; + } - case OpCode::Id::LEA_HI: - default: { - op_b = regs.GetRegisterAsInteger(instr.gpr8); - op_a = std::to_string(instr.lea.imm.entry_a); - op_c = std::to_string(instr.lea.imm.entry_b); - UNIMPLEMENTED_MSG("Unhandled LEA subinstruction: {}", opcode->get().GetName()); - } - } - UNIMPLEMENTED_IF_MSG(instr.lea.pred48 != static_cast<u64>(Pred::UnusedIndex), - "Unhandled LEA Predicate"); - const std::string value = '(' + op_a + " + (" + op_b + "*(1 << " + op_c + ")))"; - regs.SetRegisterToInteger(instr.gpr0, true, 0, value, 1, 1, false, - instr.generates_cc); + std::string Assign(Operation operation) { + const Node dest = operation[0]; + const Node src = operation[1]; - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled ArithmeticInteger instruction: {}", - opcode->get().GetName()); - } + std::string target; + if (const auto gpr = std::get_if<GprNode>(dest)) { + if (gpr->GetIndex() == Register::ZeroIndex) { + // Writing to Register::ZeroIndex is a no op + return {}; } + target = GetRegister(gpr->GetIndex()); - break; - } - case OpCode::Type::ArithmeticHalf: { - if (opcode->get().GetId() == OpCode::Id::HADD2_C || - opcode->get().GetId() == OpCode::Id::HADD2_R) { - UNIMPLEMENTED_IF(instr.alu_half.ftz != 0); - } - const bool negate_a = - opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0; - const bool negate_b = - opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0; - - const std::string op_a = - GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.alu_half.type_a, - instr.alu_half.abs_a != 0, negate_a); - - std::string op_b; - switch (opcode->get().GetId()) { - case OpCode::Id::HADD2_C: - case OpCode::Id::HMUL2_C: - op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::UnsignedInteger); - break; - case OpCode::Id::HADD2_R: - case OpCode::Id::HMUL2_R: - op_b = regs.GetRegisterAsInteger(instr.gpr20, 0, false); - break; - default: - UNREACHABLE(); - op_b = "0"; - break; - } - op_b = GetHalfFloat(op_b, instr.alu_half.type_b, instr.alu_half.abs_b != 0, negate_b); - - const std::string result = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::HADD2_C: - case OpCode::Id::HADD2_R: - return '(' + op_a + " + " + op_b + ')'; - case OpCode::Id::HMUL2_C: - case OpCode::Id::HMUL2_R: - return '(' + op_a + " * " + op_b + ')'; - default: - UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", - opcode->get().GetName()); - return std::string("0"); - } - }(); - - regs.SetRegisterToHalfFloat(instr.gpr0, 0, result, instr.alu_half.merge, 1, 1, - instr.alu_half.saturate != 0); - break; - } - case OpCode::Type::ArithmeticHalfImmediate: { - if (opcode->get().GetId() == OpCode::Id::HADD2_IMM) { - UNIMPLEMENTED_IF(instr.alu_half_imm.ftz != 0); - } else { - UNIMPLEMENTED_IF(instr.alu_half_imm.precision != - Tegra::Shader::HalfPrecision::None); - } - - const std::string op_a = GetHalfFloat( - regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.alu_half_imm.type_a, - instr.alu_half_imm.abs_a != 0, instr.alu_half_imm.negate_a != 0); - - const std::string op_b = UnpackHalfImmediate(instr, true); - - const std::string result = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::HADD2_IMM: - return op_a + " + " + op_b; - case OpCode::Id::HMUL2_IMM: - return op_a + " * " + op_b; + } else if (const auto abuf = std::get_if<AbufNode>(dest)) { + target = [&]() -> std::string { + switch (const auto attribute = abuf->GetIndex(); abuf->GetIndex()) { + case Attribute::Index::Position: + return "position" + GetSwizzle(abuf->GetElement()); + case Attribute::Index::PointSize: + return "gl_PointSize"; + case Attribute::Index::ClipDistances0123: + return "gl_ClipDistance[" + std::to_string(abuf->GetElement()) + ']'; + case Attribute::Index::ClipDistances4567: + return "gl_ClipDistance[" + std::to_string(abuf->GetElement() + 4) + ']'; default: - UNREACHABLE(); - return std::string("0"); + if (attribute >= Attribute::Index::Attribute_0 && + attribute <= Attribute::Index::Attribute_31) { + return GetOutputAttribute(attribute) + GetSwizzle(abuf->GetElement()); + } + UNIMPLEMENTED_MSG("Unhandled output attribute: {}", + static_cast<u32>(attribute)); + return "0"; } }(); - regs.SetRegisterToHalfFloat(instr.gpr0, 0, result, instr.alu_half_imm.merge, 1, 1, - instr.alu_half_imm.saturate != 0); - break; - } - case OpCode::Type::Ffma: { - const std::string op_a = regs.GetRegisterAsFloat(instr.gpr8); - std::string op_b = instr.ffma.negate_b ? "-" : ""; - std::string op_c = instr.ffma.negate_c ? "-" : ""; - - UNIMPLEMENTED_IF_MSG(instr.ffma.cc != 0, "FFMA cc not implemented"); - UNIMPLEMENTED_IF_MSG( - instr.ffma.tab5980_0 != 1, "FFMA tab5980_0({}) not implemented", - instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO - UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_1 != 0, "FFMA tab5980_1({}) not implemented", - instr.ffma.tab5980_1.Value()); - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in FFMA is partially implemented"); - - switch (opcode->get().GetId()) { - case OpCode::Id::FFMA_CR: { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - op_c += regs.GetRegisterAsFloat(instr.gpr39); - break; - } - case OpCode::Id::FFMA_RR: { - op_b += regs.GetRegisterAsFloat(instr.gpr20); - op_c += regs.GetRegisterAsFloat(instr.gpr39); - break; - } - case OpCode::Id::FFMA_RC: { - op_b += regs.GetRegisterAsFloat(instr.gpr39); - op_c += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - break; - } - case OpCode::Id::FFMA_IMM: { - op_b += GetImmediate19(instr); - op_c += regs.GetRegisterAsFloat(instr.gpr39); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled FFMA instruction: {}", opcode->get().GetName()); - } - } + } else if (const auto lmem = std::get_if<LmemNode>(dest)) { + target = GetLocalMemory() + "[ftou(" + Visit(lmem->GetAddress()) + ") / 4]"; - regs.SetRegisterToFloat(instr.gpr0, 0, "fma(" + op_a + ", " + op_b + ", " + op_c + ')', - 1, 1, instr.alu.saturate_d, instr.generates_cc, 0, true); - break; + } else { + UNREACHABLE_MSG("Assign called without a proper target"); } - case OpCode::Type::Hfma2: { - if (opcode->get().GetId() == OpCode::Id::HFMA2_RR) { - UNIMPLEMENTED_IF(instr.hfma2.rr.precision != Tegra::Shader::HalfPrecision::None); - } else { - UNIMPLEMENTED_IF(instr.hfma2.precision != Tegra::Shader::HalfPrecision::None); - } - const bool saturate = opcode->get().GetId() == OpCode::Id::HFMA2_RR - ? instr.hfma2.rr.saturate != 0 - : instr.hfma2.saturate != 0; - - const std::string op_a = - GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hfma2.type_a); - std::string op_b, op_c; - - switch (opcode->get().GetId()) { - case OpCode::Id::HFMA2_CR: - op_b = GetHalfFloat(regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::UnsignedInteger), - instr.hfma2.type_b, false, instr.hfma2.negate_b); - op_c = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), - instr.hfma2.type_reg39, false, instr.hfma2.negate_c); - break; - case OpCode::Id::HFMA2_RC: - op_b = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), - instr.hfma2.type_reg39, false, instr.hfma2.negate_b); - op_c = GetHalfFloat(regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::UnsignedInteger), - instr.hfma2.type_b, false, instr.hfma2.negate_c); - break; - case OpCode::Id::HFMA2_RR: - op_b = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false), - instr.hfma2.type_b, false, instr.hfma2.negate_b); - op_c = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), - instr.hfma2.rr.type_c, false, instr.hfma2.rr.negate_c); - break; - case OpCode::Id::HFMA2_IMM_R: - op_b = UnpackHalfImmediate(instr, true); - op_c = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), - instr.hfma2.type_reg39, false, instr.hfma2.negate_c); - break; - default: - UNREACHABLE(); - op_c = op_b = "vec2(0)"; - break; - } - const std::string result = '(' + op_a + " * " + op_b + " + " + op_c + ')'; + code.AddLine(target + " = " + Visit(src) + ';'); + return {}; + } - regs.SetRegisterToHalfFloat(instr.gpr0, 0, result, instr.hfma2.merge, 1, 1, saturate); - break; + std::string Composite(Operation operation) { + std::string value = "vec4("; + for (std::size_t i = 0; i < 4; ++i) { + value += Visit(operation[i]); + if (i < 3) + value += ", "; } - case OpCode::Type::Conversion: { - switch (opcode->get().GetId()) { - case OpCode::Id::I2I_R: { - UNIMPLEMENTED_IF(instr.conversion.selector); - - std::string op_a = regs.GetRegisterAsInteger( - instr.gpr20, 0, instr.conversion.is_input_signed, instr.conversion.src_size); + value += ')'; + return value; + } - if (instr.conversion.abs_a) { - op_a = "abs(" + op_a + ')'; - } + template <Type type> + std::string Add(Operation operation) { + return GenerateBinaryInfix(operation, "+", type, type, type); + } - if (instr.conversion.negate_a) { - op_a = "-(" + op_a + ')'; - } + template <Type type> + std::string Mul(Operation operation) { + return GenerateBinaryInfix(operation, "*", type, type, type); + } - regs.SetRegisterToInteger(instr.gpr0, instr.conversion.is_output_signed, 0, op_a, 1, - 1, instr.alu.saturate_d, instr.generates_cc, 0, - instr.conversion.dest_size); - break; - } - case OpCode::Id::I2F_R: - case OpCode::Id::I2F_C: { - UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word); - UNIMPLEMENTED_IF(instr.conversion.selector); - std::string op_a; - - if (instr.is_b_gpr) { - op_a = - regs.GetRegisterAsInteger(instr.gpr20, 0, instr.conversion.is_input_signed, - instr.conversion.src_size); - } else { - op_a = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - instr.conversion.is_input_signed - ? GLSLRegister::Type::Integer - : GLSLRegister::Type::UnsignedInteger, - instr.conversion.src_size); - } + template <Type type> + std::string Div(Operation operation) { + return GenerateBinaryInfix(operation, "/", type, type, type); + } - if (instr.conversion.abs_a) { - op_a = "abs(" + op_a + ')'; - } + template <Type type> + std::string Fma(Operation operation) { + return GenerateTernary(operation, "fma", type, type, type, type); + } - if (instr.conversion.negate_a) { - op_a = "-(" + op_a + ')'; - } + template <Type type> + std::string Negate(Operation operation) { + return GenerateUnary(operation, "-", type, type, true); + } - regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1, false, instr.generates_cc); - break; - } - case OpCode::Id::F2F_R: { - UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word); - UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); - std::string op_a = regs.GetRegisterAsFloat(instr.gpr20); + template <Type type> + std::string Absolute(Operation operation) { + return GenerateUnary(operation, "abs", type, type, false); + } - if (instr.conversion.abs_a) { - op_a = "abs(" + op_a + ')'; - } + std::string FClamp(Operation operation) { + return GenerateTernary(operation, "clamp", Type::Float, Type::Float, Type::Float, + Type::Float); + } - if (instr.conversion.negate_a) { - op_a = "-(" + op_a + ')'; - } + template <Type type> + std::string Min(Operation operation) { + return GenerateBinaryCall(operation, "min", type, type, type); + } - switch (instr.conversion.f2f.rounding) { - case Tegra::Shader::F2fRoundingOp::None: - break; - case Tegra::Shader::F2fRoundingOp::Round: - op_a = "roundEven(" + op_a + ')'; - break; - case Tegra::Shader::F2fRoundingOp::Floor: - op_a = "floor(" + op_a + ')'; - break; - case Tegra::Shader::F2fRoundingOp::Ceil: - op_a = "ceil(" + op_a + ')'; - break; - case Tegra::Shader::F2fRoundingOp::Trunc: - op_a = "trunc(" + op_a + ')'; - break; - default: - UNIMPLEMENTED_MSG("Unimplemented F2F rounding mode {}", - static_cast<u32>(instr.conversion.f2f.rounding.Value())); - break; - } + template <Type type> + std::string Max(Operation operation) { + return GenerateBinaryCall(operation, "max", type, type, type); + } - regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1, instr.alu.saturate_d, - instr.generates_cc); - break; - } - case OpCode::Id::F2I_R: - case OpCode::Id::F2I_C: { - UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); - std::string op_a{}; + std::string Select(Operation operation) { + const std::string condition = Visit(operation[0]); + const std::string true_case = Visit(operation[1]); + const std::string false_case = Visit(operation[2]); + return ApplyPrecise(operation, + '(' + condition + " ? " + true_case + " : " + false_case + ')'); + } - if (instr.is_b_gpr) { - op_a = regs.GetRegisterAsFloat(instr.gpr20); - } else { - op_a = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - } + std::string FCos(Operation operation) { + return GenerateUnary(operation, "cos", Type::Float, Type::Float, false); + } - if (instr.conversion.abs_a) { - op_a = "abs(" + op_a + ')'; - } + std::string FSin(Operation operation) { + return GenerateUnary(operation, "sin", Type::Float, Type::Float, false); + } - if (instr.conversion.negate_a) { - op_a = "-(" + op_a + ')'; - } + std::string FExp2(Operation operation) { + return GenerateUnary(operation, "exp2", Type::Float, Type::Float, false); + } - switch (instr.conversion.f2i.rounding) { - case Tegra::Shader::F2iRoundingOp::None: - break; - case Tegra::Shader::F2iRoundingOp::Floor: - op_a = "floor(" + op_a + ')'; - break; - case Tegra::Shader::F2iRoundingOp::Ceil: - op_a = "ceil(" + op_a + ')'; - break; - case Tegra::Shader::F2iRoundingOp::Trunc: - op_a = "trunc(" + op_a + ')'; - break; - default: - UNIMPLEMENTED_MSG("Unimplemented F2I rounding mode {}", - static_cast<u32>(instr.conversion.f2i.rounding.Value())); - break; - } + std::string FLog2(Operation operation) { + return GenerateUnary(operation, "log2", Type::Float, Type::Float, false); + } - if (instr.conversion.is_output_signed) { - op_a = "int(" + op_a + ')'; - } else { - op_a = "uint(" + op_a + ')'; - } + std::string FInverseSqrt(Operation operation) { + return GenerateUnary(operation, "inversesqrt", Type::Float, Type::Float, false); + } - regs.SetRegisterToInteger(instr.gpr0, instr.conversion.is_output_signed, 0, op_a, 1, - 1, false, instr.generates_cc, 0, - instr.conversion.dest_size); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled conversion instruction: {}", opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::Memory: { - switch (opcode->get().GetId()) { - case OpCode::Id::LD_A: { - // Note: Shouldn't this be interp mode flat? As in no interpolation made. - UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex, - "Indirect attribute loads are not supported"); - UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, - "Unaligned attribute loads are not supported"); - - Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Perspective, - Tegra::Shader::IpaSampleMode::Default}; - - u64 next_element = instr.attribute.fmt20.element; - u64 next_index = static_cast<u64>(instr.attribute.fmt20.index.Value()); - - const auto LoadNextElement = [&](u32 reg_offset) { - regs.SetRegisterToInputAttibute(instr.gpr0.Value() + reg_offset, next_element, - static_cast<Attribute::Index>(next_index), - input_mode, instr.gpr39.Value()); - - // Load the next attribute element into the following register. If the element - // to load goes beyond the vec4 size, load the first element of the next - // attribute. - next_element = (next_element + 1) % 4; - next_index = next_index + (next_element == 0 ? 1 : 0); - }; - - const u32 num_words = static_cast<u32>(instr.attribute.fmt20.size.Value()) + 1; - for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { - LoadNextElement(reg_offset); - } - break; - } - case OpCode::Id::LD_C: { - UNIMPLEMENTED_IF(instr.ld_c.unknown != 0); - - const auto scope = shader.Scope(); - - shader.AddLine("uint index = (" + regs.GetRegisterAsInteger(instr.gpr8, 0, false) + - " / 4) & (MAX_CONSTBUFFER_ELEMENTS - 1);"); - - const std::string op_a = - regs.GetUniformIndirect(instr.cbuf36.index, instr.cbuf36.offset + 0, "index", - GLSLRegister::Type::Float); - - switch (instr.ld_c.type.Value()) { - case Tegra::Shader::UniformType::Single: - regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1); - break; - - case Tegra::Shader::UniformType::Double: { - const std::string op_b = - regs.GetUniformIndirect(instr.cbuf36.index, instr.cbuf36.offset + 4, - "index", GLSLRegister::Type::Float); - regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1); - regs.SetRegisterToFloat(instr.gpr0.Value() + 1, 0, op_b, 1, 1); - break; - } - default: - UNIMPLEMENTED_MSG("Unhandled type: {}", - static_cast<unsigned>(instr.ld_c.type.Value())); - } - break; - } - case OpCode::Id::LD_L: { - UNIMPLEMENTED_IF_MSG(instr.ld_l.unknown == 1, "LD_L Unhandled mode: {}", - static_cast<unsigned>(instr.ld_l.unknown.Value())); + std::string FSqrt(Operation operation) { + return GenerateUnary(operation, "sqrt", Type::Float, Type::Float, false); + } - const auto scope = shader.Scope(); + std::string FRoundEven(Operation operation) { + return GenerateUnary(operation, "roundEven", Type::Float, Type::Float, false); + } - std::string op = '(' + regs.GetRegisterAsInteger(instr.gpr8, 0, false) + " + " + - std::to_string(instr.smem_imm.Value()) + ')'; + std::string FFloor(Operation operation) { + return GenerateUnary(operation, "floor", Type::Float, Type::Float, false); + } - shader.AddLine("uint index = (" + op + " / 4);"); + std::string FCeil(Operation operation) { + return GenerateUnary(operation, "ceil", Type::Float, Type::Float, false); + } - const std::string op_a = regs.GetLocalMemoryAsFloat("index"); + std::string FTrunc(Operation operation) { + return GenerateUnary(operation, "trunc", Type::Float, Type::Float, false); + } - switch (instr.ldst_sl.type.Value()) { - case Tegra::Shader::StoreType::Bytes32: - regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1); - break; - default: - UNIMPLEMENTED_MSG("LD_L Unhandled type: {}", - static_cast<unsigned>(instr.ldst_sl.type.Value())); - } - break; - } - case OpCode::Id::ST_A: { - UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex, - "Indirect attribute loads are not supported"); - UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, - "Unaligned attribute loads are not supported"); - - u64 next_element = instr.attribute.fmt20.element; - u64 next_index = static_cast<u64>(instr.attribute.fmt20.index.Value()); - - const auto StoreNextElement = [&](u32 reg_offset) { - regs.SetOutputAttributeToRegister(static_cast<Attribute::Index>(next_index), - next_element, instr.gpr0.Value() + reg_offset, - instr.gpr39.Value()); - - // Load the next attribute element into the following register. If the element - // to load goes beyond the vec4 size, load the first element of the next - // attribute. - next_element = (next_element + 1) % 4; - next_index = next_index + (next_element == 0 ? 1 : 0); - }; - - const u32 num_words = static_cast<u32>(instr.attribute.fmt20.size.Value()) + 1; - for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { - StoreNextElement(reg_offset); - } + template <Type type> + std::string FCastInteger(Operation operation) { + return GenerateUnary(operation, "float", Type::Float, type, false); + } - break; - } - case OpCode::Id::ST_L: { - UNIMPLEMENTED_IF_MSG(instr.st_l.unknown == 0, "ST_L Unhandled mode: {}", - static_cast<unsigned>(instr.st_l.unknown.Value())); + std::string ICastFloat(Operation operation) { + return GenerateUnary(operation, "int", Type::Int, Type::Float, false); + } - const auto scope = shader.Scope(); + std::string ICastUnsigned(Operation operation) { + return GenerateUnary(operation, "int", Type::Int, Type::Uint, false); + } - std::string op = '(' + regs.GetRegisterAsInteger(instr.gpr8, 0, false) + " + " + - std::to_string(instr.smem_imm.Value()) + ')'; + template <Type type> + std::string LogicalShiftLeft(Operation operation) { + return GenerateBinaryInfix(operation, "<<", type, type, Type::Uint); + } - shader.AddLine("uint index = (" + op + " / 4);"); + std::string ILogicalShiftRight(Operation operation) { + const std::string op_a = VisitOperand(operation, 0, Type::Uint); + const std::string op_b = VisitOperand(operation, 1, Type::Uint); - switch (instr.ldst_sl.type.Value()) { - case Tegra::Shader::StoreType::Bytes32: - regs.SetLocalMemoryAsFloat("index", regs.GetRegisterAsFloat(instr.gpr0)); - break; - default: - UNIMPLEMENTED_MSG("ST_L Unhandled type: {}", - static_cast<unsigned>(instr.ldst_sl.type.Value())); - } - break; - } - case OpCode::Id::TEX: { - Tegra::Shader::TextureType texture_type{instr.tex.texture_type}; - const bool is_array = instr.tex.array != 0; - const bool depth_compare = - instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); - const auto process_mode = instr.tex.GetTextureProcessMode(); - UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - - const auto [coord, texture] = - GetTEXCode(instr, texture_type, process_mode, depth_compare, is_array); - - const auto scope = shader.Scope(); - shader.AddLine(coord); - - if (depth_compare) { - regs.SetRegisterToFloat(instr.gpr0, 0, texture, 1, 1); - } else { - shader.AddLine("vec4 texture_tmp = " + texture + ';'); - std::size_t dest_elem{}; - for (std::size_t elem = 0; elem < 4; ++elem) { - if (!instr.tex.IsComponentEnabled(elem)) { - // Skip disabled components - continue; - } - regs.SetRegisterToFloat(instr.gpr0, elem, "texture_tmp", 1, 4, false, false, - dest_elem); - ++dest_elem; - } - } - break; - } - case OpCode::Id::TEXS: { - Tegra::Shader::TextureType texture_type{instr.texs.GetTextureType()}; - const bool is_array{instr.texs.IsArrayTexture()}; - const bool depth_compare = - instr.texs.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); - const auto process_mode = instr.texs.GetTextureProcessMode(); + return ApplyPrecise(operation, + BitwiseCastResult("int(" + op_a + " >> " + op_b + ')', Type::Int)); + } - UNIMPLEMENTED_IF_MSG(instr.texs.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); + std::string IArithmeticShiftRight(Operation operation) { + return GenerateBinaryInfix(operation, ">>", Type::Int, Type::Int, Type::Uint); + } - const auto scope = shader.Scope(); + template <Type type> + std::string BitwiseAnd(Operation operation) { + return GenerateBinaryInfix(operation, "&", type, type, type); + } - auto [coord, texture] = - GetTEXSCode(instr, texture_type, process_mode, depth_compare, is_array); + template <Type type> + std::string BitwiseOr(Operation operation) { + return GenerateBinaryInfix(operation, "|", type, type, type); + } - shader.AddLine(coord); + template <Type type> + std::string BitwiseXor(Operation operation) { + return GenerateBinaryInfix(operation, "^", type, type, type); + } - if (depth_compare) { - texture = "vec4(" + texture + ')'; - } - shader.AddLine("vec4 texture_tmp = " + texture + ';'); + template <Type type> + std::string BitwiseNot(Operation operation) { + return GenerateUnary(operation, "~", type, type, false); + } - if (instr.texs.fp32_flag) { - WriteTexsInstructionFloat(instr, "texture_tmp"); - } else { - WriteTexsInstructionHalfFloat(instr, "texture_tmp"); - } - break; - } - case OpCode::Id::TLDS: { - const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()}; - const bool is_array{instr.tlds.IsArrayTexture()}; + std::string UCastFloat(Operation operation) { + return GenerateUnary(operation, "uint", Type::Uint, Type::Float, false); + } - UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::MZ), - "MZ is not implemented"); + std::string UCastSigned(Operation operation) { + return GenerateUnary(operation, "uint", Type::Uint, Type::Int, false); + } - const auto [coord, texture] = GetTLDSCode(instr, texture_type, is_array); + std::string UShiftRight(Operation operation) { + return GenerateBinaryInfix(operation, ">>", Type::Uint, Type::Uint, Type::Uint); + } - const auto scope = shader.Scope(); + template <Type type> + std::string BitfieldInsert(Operation operation) { + return GenerateQuaternary(operation, "bitfieldInsert", type, type, type, Type::Int, + Type::Int); + } - shader.AddLine(coord); - shader.AddLine("vec4 texture_tmp = " + texture + ';'); - WriteTexsInstructionFloat(instr, "texture_tmp"); - break; - } - case OpCode::Id::TLD4: { - - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), - "NDV is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::PTP), - "PTP is not implemented"); - - auto texture_type = instr.tld4.texture_type.Value(); - const bool depth_compare = - instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); - const bool is_array = instr.tld4.array != 0; - - const auto [coord, texture] = - GetTLD4Code(instr, texture_type, depth_compare, is_array); - - const auto scope = shader.Scope(); - - shader.AddLine(coord); - std::size_t dest_elem{}; - - shader.AddLine("vec4 texture_tmp = " + texture + ';'); - for (std::size_t elem = 0; elem < 4; ++elem) { - if (!instr.tex.IsComponentEnabled(elem)) { - // Skip disabled components - continue; - } - regs.SetRegisterToFloat(instr.gpr0, elem, "texture_tmp", 1, 4, false, false, - dest_elem); - ++dest_elem; - } - break; - } - case OpCode::Id::TLD4S: { - UNIMPLEMENTED_IF_MSG( - instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - UNIMPLEMENTED_IF_MSG( - instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); + template <Type type> + std::string BitfieldExtract(Operation operation) { + return GenerateTernary(operation, "bitfieldExtract", type, type, Type::Int, Type::Int); + } - const auto scope = shader.Scope(); + template <Type type> + std::string BitCount(Operation operation) { + return GenerateUnary(operation, "bitCount", type, type, false); + } - std::string coords; + std::string HNegate(Operation operation) { + const auto GetNegate = [&](std::size_t index) -> std::string { + return VisitOperand(operation, index, Type::Bool) + " ? -1 : 1"; + }; + const std::string value = '(' + VisitOperand(operation, 0, Type::HalfFloat) + " * vec2(" + + GetNegate(1) + ", " + GetNegate(2) + "))"; + return BitwiseCastResult(value, Type::HalfFloat); + } - const bool depth_compare = - instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); + std::string HMergeF32(Operation operation) { + return "float(toHalf2(" + Visit(operation[0]) + ")[0])"; + } - const std::string sampler = GetSampler( - instr.sampler, Tegra::Shader::TextureType::Texture2D, false, depth_compare); + std::string HMergeH0(Operation operation) { + return "fromHalf2(vec2(toHalf2(" + Visit(operation[0]) + ")[1], toHalf2(" + + Visit(operation[1]) + ")[0]))"; + } - const std::string op_a = regs.GetRegisterAsFloat(instr.gpr8); - coords = "vec2 coords = vec2(" + op_a + ", "; - std::string texture = "textureGather(" + sampler + ", coords, "; + std::string HMergeH1(Operation operation) { + return "fromHalf2(vec2(toHalf2(" + Visit(operation[0]) + ")[0], toHalf2(" + + Visit(operation[1]) + ")[1]))"; + } - if (!depth_compare) { - const std::string op_b = regs.GetRegisterAsFloat(instr.gpr20); - coords += op_b + ");"; - texture += std::to_string(instr.tld4s.component) + ')'; - } else { - const std::string op_b = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - const std::string op_c = regs.GetRegisterAsFloat(instr.gpr20); - coords += op_b + ");"; - texture += op_c + ')'; - } - shader.AddLine(coords); - shader.AddLine("vec4 texture_tmp = " + texture + ';'); - WriteTexsInstructionFloat(instr, "texture_tmp"); - break; - } - case OpCode::Id::TXQ: { - UNIMPLEMENTED_IF_MSG(instr.txq.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - - const auto scope = shader.Scope(); - - // TODO: The new commits on the texture refactor, change the way samplers work. - // Sadly, not all texture instructions specify the type of texture their sampler - // uses. This must be fixed at a later instance. - const std::string sampler = - GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false); - switch (instr.txq.query_type) { - case Tegra::Shader::TextureQueryType::Dimension: { - const std::string texture = "textureSize(" + sampler + ", " + - regs.GetRegisterAsInteger(instr.gpr8) + ')'; - const std::string mip_level = "textureQueryLevels(" + sampler + ')'; - shader.AddLine("ivec2 sizes = " + texture + ';'); - - regs.SetRegisterToInteger(instr.gpr0.Value() + 0, true, 0, "sizes.x", 1, 1); - regs.SetRegisterToInteger(instr.gpr0.Value() + 1, true, 0, "sizes.y", 1, 1); - regs.SetRegisterToInteger(instr.gpr0.Value() + 2, true, 0, "0", 1, 1); - regs.SetRegisterToInteger(instr.gpr0.Value() + 3, true, 0, mip_level, 1, 1); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled texture query type: {}", - static_cast<u32>(instr.txq.query_type.Value())); - } - } - break; - } - case OpCode::Id::TMML: { - UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), - "NDV is not implemented"); - - const std::string x = regs.GetRegisterAsFloat(instr.gpr8); - const bool is_array = instr.tmml.array != 0; - auto texture_type = instr.tmml.texture_type.Value(); - const std::string sampler = - GetSampler(instr.sampler, texture_type, is_array, false); - - const auto scope = shader.Scope(); - - // TODO: Add coordinates for different samplers once other texture types are - // implemented. - switch (texture_type) { - case Tegra::Shader::TextureType::Texture1D: { - shader.AddLine("float coords = " + x + ';'); - break; - } - case Tegra::Shader::TextureType::Texture2D: { - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - shader.AddLine("vec2 coords = vec2(" + x + ", " + y + ");"); - break; - } - default: - UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); + std::string HPack2(Operation operation) { + return "utof(packHalf2x16(vec2(" + Visit(operation[0]) + ", " + Visit(operation[1]) + ")))"; + } - // Fallback to interpreting as a 2D texture for now - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - shader.AddLine("vec2 coords = vec2(" + x + ", " + y + ");"); - texture_type = Tegra::Shader::TextureType::Texture2D; - } + template <Type type> + std::string LogicalLessThan(Operation operation) { + return GenerateBinaryInfix(operation, "<", Type::Bool, type, type); + } - const std::string texture = "textureQueryLod(" + sampler + ", coords)"; - shader.AddLine("vec2 tmp = " + texture + " * vec2(256.0, 256.0);"); + template <Type type> + std::string LogicalEqual(Operation operation) { + return GenerateBinaryInfix(operation, "==", Type::Bool, type, type); + } - regs.SetRegisterToInteger(instr.gpr0, true, 0, "int(tmp.y)", 1, 1); - regs.SetRegisterToInteger(instr.gpr0.Value() + 1, false, 0, "uint(tmp.x)", 1, 1); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::FloatSetPredicate: { - const std::string op_a = - GetOperandAbsNeg(regs.GetRegisterAsFloat(instr.gpr8), instr.fsetp.abs_a != 0, - instr.fsetp.neg_a != 0); + template <Type type> + std::string LogicalLessEqual(Operation operation) { + return GenerateBinaryInfix(operation, "<=", Type::Bool, type, type); + } - std::string op_b; + template <Type type> + std::string LogicalGreaterThan(Operation operation) { + return GenerateBinaryInfix(operation, ">", Type::Bool, type, type); + } - if (instr.is_b_imm) { - op_b += '(' + GetImmediate19(instr) + ')'; - } else { - if (instr.is_b_gpr) { - op_b += regs.GetRegisterAsFloat(instr.gpr20); - } else { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - } - } + template <Type type> + std::string LogicalNotEqual(Operation operation) { + return GenerateBinaryInfix(operation, "!=", Type::Bool, type, type); + } - if (instr.fsetp.abs_b) { - op_b = "abs(" + op_b + ')'; - } + template <Type type> + std::string LogicalGreaterEqual(Operation operation) { + return GenerateBinaryInfix(operation, ">=", Type::Bool, type, type); + } - // We can't use the constant predicate as destination. - ASSERT(instr.fsetp.pred3 != static_cast<u64>(Pred::UnusedIndex)); + std::string LogicalFIsNan(Operation operation) { + return GenerateUnary(operation, "isnan", Type::Bool, Type::Float, false); + } - const std::string second_pred = - GetPredicateCondition(instr.fsetp.pred39, instr.fsetp.neg_pred != 0); + std::string LogicalAssign(Operation operation) { + const Node dest = operation[0]; + const Node src = operation[1]; - const std::string combiner = GetPredicateCombiner(instr.fsetp.op); + std::string target; - const std::string predicate = GetPredicateComparison(instr.fsetp.cond, op_a, op_b); - // Set the primary predicate to the result of Predicate OP SecondPredicate - SetPredicate(instr.fsetp.pred3, - '(' + predicate + ") " + combiner + " (" + second_pred + ')'); + if (const auto pred = std::get_if<PredicateNode>(dest)) { + ASSERT_MSG(!pred->IsNegated(), "Negating logical assignment"); - if (instr.fsetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, - // if enabled - SetPredicate(instr.fsetp.pred0, - "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); + const auto index = pred->GetIndex(); + switch (index) { + case Tegra::Shader::Pred::NeverExecute: + case Tegra::Shader::Pred::UnusedIndex: + // Writing to these predicates is a no-op + return {}; } - break; + target = GetPredicate(index); + } else if (const auto flag = std::get_if<InternalFlagNode>(dest)) { + target = GetInternalFlag(flag->GetFlag()); } - case OpCode::Type::IntegerSetPredicate: { - const std::string op_a = - regs.GetRegisterAsInteger(instr.gpr8, 0, instr.isetp.is_signed); - std::string op_b; - if (instr.is_b_imm) { - op_b += '(' + std::to_string(instr.alu.GetSignedImm20_20()) + ')'; - } else { - if (instr.is_b_gpr) { - op_b += regs.GetRegisterAsInteger(instr.gpr20, 0, instr.isetp.is_signed); - } else { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Integer); - } - } - - // We can't use the constant predicate as destination. - ASSERT(instr.isetp.pred3 != static_cast<u64>(Pred::UnusedIndex)); + code.AddLine(target + " = " + Visit(src) + ';'); + return {}; + } - const std::string second_pred = - GetPredicateCondition(instr.isetp.pred39, instr.isetp.neg_pred != 0); + std::string LogicalAnd(Operation operation) { + return GenerateBinaryInfix(operation, "&&", Type::Bool, Type::Bool, Type::Bool); + } - const std::string combiner = GetPredicateCombiner(instr.isetp.op); + std::string LogicalOr(Operation operation) { + return GenerateBinaryInfix(operation, "||", Type::Bool, Type::Bool, Type::Bool); + } - const std::string predicate = GetPredicateComparison(instr.isetp.cond, op_a, op_b); - // Set the primary predicate to the result of Predicate OP SecondPredicate - SetPredicate(instr.isetp.pred3, - '(' + predicate + ") " + combiner + " (" + second_pred + ')'); + std::string LogicalXor(Operation operation) { + return GenerateBinaryInfix(operation, "^^", Type::Bool, Type::Bool, Type::Bool); + } - if (instr.isetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, - // if enabled - SetPredicate(instr.isetp.pred0, - "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); - } - break; - } - case OpCode::Type::HalfSetPredicate: { - UNIMPLEMENTED_IF(instr.hsetp2.ftz != 0); - - const std::string op_a = - GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hsetp2.type_a, - instr.hsetp2.abs_a, instr.hsetp2.negate_a); - - const std::string op_b = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::HSETP2_R: - return GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false), - instr.hsetp2.type_b, instr.hsetp2.abs_a, - instr.hsetp2.negate_b); - default: - UNREACHABLE(); - return std::string("vec2(0)"); - } - }(); + std::string LogicalNegate(Operation operation) { + return GenerateUnary(operation, "!", Type::Bool, Type::Bool, false); + } - // We can't use the constant predicate as destination. - ASSERT(instr.hsetp2.pred3 != static_cast<u64>(Pred::UnusedIndex)); + std::string LogicalPick2(Operation operation) { + const std::string pair = VisitOperand(operation, 0, Type::Bool2); + return pair + '[' + VisitOperand(operation, 1, Type::Uint) + ']'; + } - const std::string second_pred = - GetPredicateCondition(instr.hsetp2.pred39, instr.hsetp2.neg_pred != 0); + std::string LogicalAll2(Operation operation) { + return GenerateUnary(operation, "all", Type::Bool, Type::Bool2); + } - const std::string combiner = GetPredicateCombiner(instr.hsetp2.op); + std::string LogicalAny2(Operation operation) { + return GenerateUnary(operation, "any", Type::Bool, Type::Bool2); + } - const std::string component_combiner = instr.hsetp2.h_and ? "&&" : "||"; - const std::string predicate = - '(' + GetPredicateComparison(instr.hsetp2.cond, op_a + ".x", op_b + ".x") + ' ' + - component_combiner + ' ' + - GetPredicateComparison(instr.hsetp2.cond, op_a + ".y", op_b + ".y") + ')'; + std::string Logical2HLessThan(Operation operation) { + return GenerateBinaryCall(operation, "lessThan", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } - // Set the primary predicate to the result of Predicate OP SecondPredicate - SetPredicate(instr.hsetp2.pred3, - '(' + predicate + ") " + combiner + " (" + second_pred + ')'); + std::string Logical2HEqual(Operation operation) { + return GenerateBinaryCall(operation, "equal", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } - if (instr.hsetp2.pred0 != static_cast<u64>(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, - // if enabled - SetPredicate(instr.hsetp2.pred0, - "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); - } - break; - } - case OpCode::Type::PredicateSetRegister: { - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in PSET is partially implemented"); - - const std::string op_a = - GetPredicateCondition(instr.pset.pred12, instr.pset.neg_pred12 != 0); - const std::string op_b = - GetPredicateCondition(instr.pset.pred29, instr.pset.neg_pred29 != 0); - - const std::string second_pred = - GetPredicateCondition(instr.pset.pred39, instr.pset.neg_pred39 != 0); - - const std::string combiner = GetPredicateCombiner(instr.pset.op); - - const std::string predicate = - '(' + op_a + ") " + GetPredicateCombiner(instr.pset.cond) + " (" + op_b + ')'; - const std::string result = '(' + predicate + ") " + combiner + " (" + second_pred + ')'; - if (instr.pset.bf == 0) { - const std::string value = '(' + result + ") ? 0xFFFFFFFF : 0"; - regs.SetRegisterToInteger(instr.gpr0, false, 0, value, 1, 1, false, - instr.generates_cc); - } else { - const std::string value = '(' + result + ") ? 1.0 : 0.0"; - regs.SetRegisterToFloat(instr.gpr0, 0, value, 1, 1, false, instr.generates_cc); - } - break; - } - case OpCode::Type::PredicateSetPredicate: { - switch (opcode->get().GetId()) { - case OpCode::Id::PSETP: { - const std::string op_a = - GetPredicateCondition(instr.psetp.pred12, instr.psetp.neg_pred12 != 0); - const std::string op_b = - GetPredicateCondition(instr.psetp.pred29, instr.psetp.neg_pred29 != 0); - - // We can't use the constant predicate as destination. - ASSERT(instr.psetp.pred3 != static_cast<u64>(Pred::UnusedIndex)); - - const std::string second_pred = - GetPredicateCondition(instr.psetp.pred39, instr.psetp.neg_pred39 != 0); - - const std::string combiner = GetPredicateCombiner(instr.psetp.op); - - const std::string predicate = - '(' + op_a + ") " + GetPredicateCombiner(instr.psetp.cond) + " (" + op_b + ')'; - - // Set the primary predicate to the result of Predicate OP SecondPredicate - SetPredicate(instr.psetp.pred3, - '(' + predicate + ") " + combiner + " (" + second_pred + ')'); - - if (instr.psetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, - // if enabled - SetPredicate(instr.psetp.pred0, - "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); - } - break; - } - case OpCode::Id::CSETP: { - const std::string pred = - GetPredicateCondition(instr.csetp.pred39, instr.csetp.neg_pred39 != 0); - const std::string combiner = GetPredicateCombiner(instr.csetp.op); - const std::string condition_code = regs.GetConditionCode(instr.csetp.cc); - if (instr.csetp.pred3 != static_cast<u64>(Pred::UnusedIndex)) { - SetPredicate(instr.csetp.pred3, - '(' + condition_code + ") " + combiner + " (" + pred + ')'); - } - if (instr.csetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { - SetPredicate(instr.csetp.pred0, - "!(" + condition_code + ") " + combiner + " (" + pred + ')'); - } - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled predicate instruction: {}", opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::RegisterSetPredicate: { - UNIMPLEMENTED_IF(instr.r2p.mode != Tegra::Shader::R2pMode::Pr); + std::string Logical2HLessEqual(Operation operation) { + return GenerateBinaryCall(operation, "lessThanEqual", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } - const std::string apply_mask = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::R2P_IMM: - return std::to_string(instr.r2p.immediate_mask); - default: - UNREACHABLE(); - return std::to_string(instr.r2p.immediate_mask); - } - }(); - const std::string mask = '(' + regs.GetRegisterAsInteger(instr.gpr8, 0, false) + - " >> " + std::to_string(instr.r2p.byte) + ')'; + std::string Logical2HGreaterThan(Operation operation) { + return GenerateBinaryCall(operation, "greaterThan", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } - constexpr u64 programmable_preds = 7; - for (u64 pred = 0; pred < programmable_preds; ++pred) { - const auto shift = std::to_string(1 << pred); + std::string Logical2HNotEqual(Operation operation) { + return GenerateBinaryCall(operation, "notEqual", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } - shader.AddLine("if ((" + apply_mask + " & " + shift + ") != 0) {"); - ++shader.scope; + std::string Logical2HGreaterEqual(Operation operation) { + return GenerateBinaryCall(operation, "greaterThanEqual", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } - SetPredicate(pred, '(' + mask + " & " + shift + ") != 0"); + std::string F4Texture(Operation operation) { + const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + ASSERT(meta); - --shader.scope; - shader.AddLine('}'); - } - break; + std::string expr = GenerateTexture(operation, "texture"); + if (meta->sampler.IsShadow()) { + expr = "vec4(" + expr + ')'; } - case OpCode::Type::FloatSet: { - const std::string op_a = GetOperandAbsNeg(regs.GetRegisterAsFloat(instr.gpr8), - instr.fset.abs_a != 0, instr.fset.neg_a != 0); - - std::string op_b; - - if (instr.is_b_imm) { - const std::string imm = GetImmediate19(instr); - op_b = imm; - } else { - if (instr.is_b_gpr) { - op_b = regs.GetRegisterAsFloat(instr.gpr20); - } else { - op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - } - } - - op_b = GetOperandAbsNeg(op_b, instr.fset.abs_b != 0, instr.fset.neg_b != 0); - - // The fset instruction sets a register to 1.0 or -1 (depending on the bf bit) if the - // condition is true, and to 0 otherwise. - const std::string second_pred = - GetPredicateCondition(instr.fset.pred39, instr.fset.neg_pred != 0); - - const std::string combiner = GetPredicateCombiner(instr.fset.op); + return expr + GetSwizzle(meta->element); + } - const std::string predicate = "((" + - GetPredicateComparison(instr.fset.cond, op_a, op_b) + - ") " + combiner + " (" + second_pred + "))"; + std::string F4TextureLod(Operation operation) { + const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + ASSERT(meta); - if (instr.fset.bf) { - regs.SetRegisterToFloat(instr.gpr0, 0, predicate + " ? 1.0 : 0.0", 1, 1, false, - instr.generates_cc); - } else { - regs.SetRegisterToInteger(instr.gpr0, false, 0, predicate + " ? 0xFFFFFFFF : 0", 1, - 1, false, instr.generates_cc); - } - break; + std::string expr = GenerateTexture(operation, "textureLod"); + if (meta->sampler.IsShadow()) { + expr = "vec4(" + expr + ')'; } - case OpCode::Type::IntegerSet: { - const std::string op_a = regs.GetRegisterAsInteger(instr.gpr8, 0, instr.iset.is_signed); + return expr + GetSwizzle(meta->element); + } - std::string op_b; + std::string F4TextureGather(Operation operation) { + const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + ASSERT(meta); - if (instr.is_b_imm) { - op_b = std::to_string(instr.alu.GetSignedImm20_20()); - } else { - if (instr.is_b_gpr) { - op_b = regs.GetRegisterAsInteger(instr.gpr20, 0, instr.iset.is_signed); - } else { - op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Integer); - } - } - - // The iset instruction sets a register to 1.0 or -1 (depending on the bf bit) if the - // condition is true, and to 0 otherwise. - const std::string second_pred = - GetPredicateCondition(instr.iset.pred39, instr.iset.neg_pred != 0); + return GenerateTexture(operation, "textureGather", !meta->sampler.IsShadow()) + + GetSwizzle(meta->element); + } - const std::string combiner = GetPredicateCombiner(instr.iset.op); + std::string F4TextureQueryDimensions(Operation operation) { + const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + ASSERT(meta); - const std::string predicate = "((" + - GetPredicateComparison(instr.iset.cond, op_a, op_b) + - ") " + combiner + " (" + second_pred + "))"; + const std::string sampler = GetSampler(meta->sampler); + const std::string lod = VisitOperand(operation, 0, Type::Int); - if (instr.iset.bf) { - regs.SetRegisterToFloat(instr.gpr0, 0, predicate + " ? 1.0 : 0.0", 1, 1); - } else { - regs.SetRegisterToInteger(instr.gpr0, false, 0, predicate + " ? 0xFFFFFFFF : 0", 1, - 1); - } - break; + switch (meta->element) { + case 0: + case 1: + return "textureSize(" + sampler + ", " + lod + ')' + GetSwizzle(meta->element); + case 2: + return "0"; + case 3: + return "textureQueryLevels(" + sampler + ')'; } - case OpCode::Type::HalfSet: { - UNIMPLEMENTED_IF(instr.hset2.ftz != 0); - - const std::string op_a = - GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hset2.type_a, - instr.hset2.abs_a != 0, instr.hset2.negate_a != 0); - - const std::string op_b = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::HSET2_R: - return GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false), - instr.hset2.type_b, instr.hset2.abs_b != 0, - instr.hset2.negate_b != 0); - default: - UNREACHABLE(); - return std::string("vec2(0)"); - } - }(); - - const std::string second_pred = - GetPredicateCondition(instr.hset2.pred39, instr.hset2.neg_pred != 0); - - const std::string combiner = GetPredicateCombiner(instr.hset2.op); - - // HSET2 operates on each half float in the pack. - std::string result; - for (int i = 0; i < 2; ++i) { - const std::string float_value = i == 0 ? "0x00003c00" : "0x3c000000"; - const std::string integer_value = i == 0 ? "0x0000ffff" : "0xffff0000"; - const std::string value = instr.hset2.bf == 1 ? float_value : integer_value; + UNREACHABLE(); + return "0"; + } - const std::string comp = std::string(".") + "xy"[i]; - const std::string predicate = - "((" + GetPredicateComparison(instr.hset2.cond, op_a + comp, op_b + comp) + - ") " + combiner + " (" + second_pred + "))"; + std::string F4TextureQueryLod(Operation operation) { + const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + ASSERT(meta); - result += '(' + predicate + " ? " + value + " : 0)"; - if (i == 0) { - result += " | "; - } - } - regs.SetRegisterToInteger(instr.gpr0, false, 0, '(' + result + ')', 1, 1); - break; + if (meta->element < 2) { + return "itof(int((" + GenerateTexture(operation, "textureQueryLod") + " * vec2(256))" + + GetSwizzle(meta->element) + "))"; } - case OpCode::Type::Xmad: { - UNIMPLEMENTED_IF(instr.xmad.sign_a); - UNIMPLEMENTED_IF(instr.xmad.sign_b); - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in XMAD is partially implemented"); - - std::string op_a{regs.GetRegisterAsInteger(instr.gpr8, 0, instr.xmad.sign_a)}; - std::string op_b; - std::string op_c; - - // TODO(bunnei): Needs to be fixed once op_a or op_b is signed - UNIMPLEMENTED_IF(instr.xmad.sign_a != instr.xmad.sign_b); - const bool is_signed{instr.xmad.sign_a == 1}; - - bool is_merge{}; - switch (opcode->get().GetId()) { - case OpCode::Id::XMAD_CR: { - is_merge = instr.xmad.merge_56; - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - instr.xmad.sign_b ? GLSLRegister::Type::Integer - : GLSLRegister::Type::UnsignedInteger); - op_c += regs.GetRegisterAsInteger(instr.gpr39, 0, is_signed); - break; - } - case OpCode::Id::XMAD_RR: { - is_merge = instr.xmad.merge_37; - op_b += regs.GetRegisterAsInteger(instr.gpr20, 0, instr.xmad.sign_b); - op_c += regs.GetRegisterAsInteger(instr.gpr39, 0, is_signed); - break; - } - case OpCode::Id::XMAD_RC: { - op_b += regs.GetRegisterAsInteger(instr.gpr39, 0, instr.xmad.sign_b); - op_c += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - is_signed ? GLSLRegister::Type::Integer - : GLSLRegister::Type::UnsignedInteger); - break; - } - case OpCode::Id::XMAD_IMM: { - is_merge = instr.xmad.merge_37; - op_b += std::to_string(instr.xmad.imm20_16); - op_c += regs.GetRegisterAsInteger(instr.gpr39, 0, is_signed); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled XMAD instruction: {}", opcode->get().GetName()); - } - } + return "0"; + } - // TODO(bunnei): Ensure this is right with signed operands - if (instr.xmad.high_a) { - op_a = "((" + op_a + ") >> 16)"; - } else { - op_a = "((" + op_a + ") & 0xFFFF)"; - } + std::string F4TexelFetch(Operation operation) { + constexpr std::array<const char*, 4> constructors = {"int", "ivec2", "ivec3", "ivec4"}; + const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + const auto count = static_cast<u32>(operation.GetOperandsCount()); + ASSERT(meta); - std::string src2 = '(' + op_b + ')'; // Preserve original source 2 - if (instr.xmad.high_b) { - op_b = '(' + src2 + " >> 16)"; - } else { - op_b = '(' + src2 + " & 0xFFFF)"; - } + std::string expr = "texelFetch("; + expr += GetSampler(meta->sampler); + expr += ", "; - std::string product = '(' + op_a + " * " + op_b + ')'; - if (instr.xmad.product_shift_left) { - product = '(' + product + " << 16)"; - } + expr += constructors[meta->coords_count - 1]; + expr += '('; + for (u32 i = 0; i < count; ++i) { + expr += VisitOperand(operation, i, Type::Int); - switch (instr.xmad.mode) { - case Tegra::Shader::XmadMode::None: - break; - case Tegra::Shader::XmadMode::CLo: - op_c = "((" + op_c + ") & 0xFFFF)"; - break; - case Tegra::Shader::XmadMode::CHi: - op_c = "((" + op_c + ") >> 16)"; - break; - case Tegra::Shader::XmadMode::CBcc: - op_c = "((" + op_c + ") + (" + src2 + "<< 16))"; - break; - default: { - UNIMPLEMENTED_MSG("Unhandled XMAD mode: {}", - static_cast<u32>(instr.xmad.mode.Value())); - } + if (i + 1 == meta->coords_count) { + expr += ')'; } - - std::string sum{'(' + product + " + " + op_c + ')'}; - if (is_merge) { - sum = "((" + sum + " & 0xFFFF) | (" + src2 + "<< 16))"; + if (i + 1 < count) { + expr += ", "; } - - regs.SetRegisterToInteger(instr.gpr0, is_signed, 0, sum, 1, 1, false, - instr.generates_cc); - break; } - default: { - switch (opcode->get().GetId()) { - case OpCode::Id::EXIT: { - const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; - UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, - "EXIT condition code used: {}", static_cast<u32>(cc)); - - if (stage == Maxwell3D::Regs::ShaderStage::Fragment) { - EmitFragmentOutputsWrite(); - } - - switch (instr.flow.cond) { - case Tegra::Shader::FlowCondition::Always: - shader.AddLine("return true;"); - if (instr.pred.pred_index == static_cast<u64>(Pred::UnusedIndex)) { - // If this is an unconditional exit then just end processing here, - // otherwise we have to account for the possibility of the condition - // not being met, so continue processing the next instruction. - offset = PROGRAM_END - 1; - } - break; - - case Tegra::Shader::FlowCondition::Fcsm_Tr: - // TODO(bunnei): What is this used for? If we assume this conditon is not - // satisifed, dual vertex shaders in Farming Simulator make more sense - UNIMPLEMENTED_MSG("Skipping unknown FlowCondition::Fcsm_Tr"); - break; - - default: - UNIMPLEMENTED_MSG("Unhandled flow condition: {}", - static_cast<u32>(instr.flow.cond.Value())); - } - break; - } - case OpCode::Id::KIL: { - UNIMPLEMENTED_IF(instr.flow.cond != Tegra::Shader::FlowCondition::Always); + expr += ')'; + return expr + GetSwizzle(meta->element); + } - const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; - UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, - "KIL condition code used: {}", static_cast<u32>(cc)); + std::string Branch(Operation operation) { + const auto target = std::get_if<ImmediateNode>(operation[0]); + UNIMPLEMENTED_IF(!target); - // Enclose "discard" in a conditional, so that GLSL compilation does not complain - // about unexecuted instructions that may follow this. - shader.AddLine("if (true) {"); - ++shader.scope; - shader.AddLine("discard;"); - --shader.scope; - shader.AddLine("}"); + code.AddLine(fmt::format("jmp_to = 0x{:x}u;", target->GetValue())); + code.AddLine("break;"); + return {}; + } - break; - } - case OpCode::Id::OUT_R: { - UNIMPLEMENTED_IF_MSG(instr.gpr20.Value() != Register::ZeroIndex, - "Stream buffer is not supported"); - ASSERT_MSG(stage == Maxwell3D::Regs::ShaderStage::Geometry, - "OUT is expected to be used in a geometry shader."); - - if (instr.out.emit) { - // gpr0 is used to store the next address. Hardware returns a pointer but - // we just return the next index with a cyclic cap. - const std::string current{regs.GetRegisterAsInteger(instr.gpr8, 0, false)}; - const std::string next = "((" + current + " + 1" + ") % " + - std::to_string(MAX_GEOMETRY_BUFFERS) + ')'; - shader.AddLine("emit_vertex(" + current + ");"); - regs.SetRegisterToInteger(instr.gpr0, false, 0, next, 1, 1); - } - if (instr.out.cut) { - shader.AddLine("EndPrimitive();"); - } + std::string PushFlowStack(Operation operation) { + const auto target = std::get_if<ImmediateNode>(operation[0]); + UNIMPLEMENTED_IF(!target); - break; - } - case OpCode::Id::MOV_SYS: { - switch (instr.sys20) { - case Tegra::Shader::SystemVariable::InvocationInfo: { - LOG_WARNING(HW_GPU, "MOV_SYS instruction with InvocationInfo is incomplete"); - regs.SetRegisterToInteger(instr.gpr0, false, 0, "0u", 1, 1); - break; - } - case Tegra::Shader::SystemVariable::Ydirection: { - // Config pack's third value is Y_NEGATE's state. - regs.SetRegisterToFloat(instr.gpr0, 0, "uintBitsToFloat(config_pack[2])", 1, 1); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled system move: {}", - static_cast<u32>(instr.sys20.Value())); - } - } - break; - } - case OpCode::Id::ISBERD: { - UNIMPLEMENTED_IF(instr.isberd.o != 0); - UNIMPLEMENTED_IF(instr.isberd.skew != 0); - UNIMPLEMENTED_IF(instr.isberd.shift != Tegra::Shader::IsberdShift::None); - UNIMPLEMENTED_IF(instr.isberd.mode != Tegra::Shader::IsberdMode::None); - ASSERT_MSG(stage == Maxwell3D::Regs::ShaderStage::Geometry, - "ISBERD is expected to be used in a geometry shader."); - LOG_WARNING(HW_GPU, "ISBERD instruction is incomplete"); - regs.SetRegisterToFloat(instr.gpr0, 0, regs.GetRegisterAsFloat(instr.gpr8), 1, 1); - break; - } - case OpCode::Id::BRA: { - UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, - "BRA with constant buffers are not implemented"); - - const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; - const u32 target = offset + instr.bra.GetBranchTarget(); - if (cc != Tegra::Shader::ConditionCode::T) { - const std::string condition_code = regs.GetConditionCode(cc); - shader.AddLine("if (" + condition_code + "){"); - shader.scope++; - shader.AddLine("{ jmp_to = " + std::to_string(target) + "u; break; }"); - shader.scope--; - shader.AddLine('}'); - } else { - shader.AddLine("{ jmp_to = " + std::to_string(target) + "u; break; }"); - } - break; - } - case OpCode::Id::IPA: { - const auto& attribute = instr.attribute.fmt28; - const auto& reg = instr.gpr0; - - Tegra::Shader::IpaMode input_mode{instr.ipa.interp_mode.Value(), - instr.ipa.sample_mode.Value()}; - regs.SetRegisterToInputAttibute(reg, attribute.element, attribute.index, - input_mode); + code.AddLine(fmt::format("flow_stack[flow_stack_top++] = 0x{:x}u;", target->GetValue())); + return {}; + } - if (instr.ipa.saturate) { - regs.SetRegisterToFloat(reg, 0, regs.GetRegisterAsFloat(reg), 1, 1, true); - } - break; - } - case OpCode::Id::SSY: { - // The SSY opcode tells the GPU where to re-converge divergent execution paths, it - // sets the target of the jump that the SYNC instruction will make. The SSY opcode - // has a similar structure to the BRA opcode. - UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, - "Constant buffer flow is not supported"); - - const u32 target = offset + instr.bra.GetBranchTarget(); - EmitPushToFlowStack(target); - break; - } - case OpCode::Id::PBK: { - // PBK pushes to a stack the address where BRK will jump to. This shares stack with - // SSY but using SYNC on a PBK address will kill the shader execution. We don't - // emulate this because it's very unlikely a driver will emit such invalid shader. - UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, - "Constant buffer PBK is not supported"); - - const u32 target = offset + instr.bra.GetBranchTarget(); - EmitPushToFlowStack(target); - break; - } - case OpCode::Id::SYNC: { - const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; - UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, - "SYNC condition code used: {}", static_cast<u32>(cc)); + std::string PopFlowStack(Operation operation) { + code.AddLine("jmp_to = flow_stack[--flow_stack_top];"); + code.AddLine("break;"); + return {}; + } - // The SYNC opcode jumps to the address previously set by the SSY opcode - EmitPopFromFlowStack(); - break; + std::string Exit(Operation operation) { + if (stage != ShaderStage::Fragment) { + code.AddLine("return;"); + return {}; + } + const auto& used_registers = ir.GetRegisters(); + const auto SafeGetRegister = [&](u32 reg) -> std::string { + // TODO(Rodrigo): Replace with contains once C++20 releases + if (used_registers.find(reg) != used_registers.end()) { + return GetRegister(reg); } - case OpCode::Id::BRK: { - // The BRK opcode jumps to the address previously set by the PBK opcode - const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; - UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, - "BRK condition code used: {}", static_cast<u32>(cc)); + return "0.0f"; + }; - EmitPopFromFlowStack(); - break; - } - case OpCode::Id::DEPBAR: { - // TODO(Subv): Find out if we actually have to care about this instruction or if - // the GLSL compiler takes care of that for us. - LOG_WARNING(HW_GPU, "DEPBAR instruction is stubbed"); - break; - } - case OpCode::Id::VMAD: { - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in VMAD is not implemented"); - - const bool result_signed = instr.video.signed_a == 1 || instr.video.signed_b == 1; - const std::string op_a = GetVideoOperandA(instr); - const std::string op_b = GetVideoOperandB(instr); - const std::string op_c = regs.GetRegisterAsInteger(instr.gpr39, 0, result_signed); - - std::string result = '(' + op_a + " * " + op_b + " + " + op_c + ')'; - - switch (instr.vmad.shr) { - case Tegra::Shader::VmadShr::Shr7: - result = '(' + result + " >> 7)"; - break; - case Tegra::Shader::VmadShr::Shr15: - result = '(' + result + " >> 15)"; - break; - } + UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, "Sample mask write is unimplemented"); - regs.SetRegisterToInteger(instr.gpr0, result_signed, 1, result, 1, 1, - instr.vmad.saturate, instr.vmad.cc); - break; + code.AddLine("if (alpha_test[0] != 0) {"); + ++code.scope; + // We start on the register containing the alpha value in the first RT. + u32 current_reg = 3; + for (u32 render_target = 0; render_target < Maxwell::NumRenderTargets; ++render_target) { + // TODO(Blinkhawk): verify the behavior of alpha testing on hardware when + // multiple render targets are used. + if (header.ps.IsColorComponentOutputEnabled(render_target, 0) || + header.ps.IsColorComponentOutputEnabled(render_target, 1) || + header.ps.IsColorComponentOutputEnabled(render_target, 2) || + header.ps.IsColorComponentOutputEnabled(render_target, 3)) { + code.AddLine( + fmt::format("if (!AlphaFunc({})) discard;", SafeGetRegister(current_reg))); + current_reg += 4; } - case OpCode::Id::VSETP: { - const std::string op_a = GetVideoOperandA(instr); - const std::string op_b = GetVideoOperandB(instr); - - // We can't use the constant predicate as destination. - ASSERT(instr.vsetp.pred3 != static_cast<u64>(Pred::UnusedIndex)); - - const std::string second_pred = GetPredicateCondition(instr.vsetp.pred39, false); - - const std::string combiner = GetPredicateCombiner(instr.vsetp.op); - - const std::string predicate = GetPredicateComparison(instr.vsetp.cond, op_a, op_b); - // Set the primary predicate to the result of Predicate OP SecondPredicate - SetPredicate(instr.vsetp.pred3, - '(' + predicate + ") " + combiner + " (" + second_pred + ')'); + } + --code.scope; + code.AddLine('}'); - if (instr.vsetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, - // if enabled - SetPredicate(instr.vsetp.pred0, - "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); + // Write the color outputs using the data in the shader registers, disabled + // rendertargets/components are skipped in the register assignment. + current_reg = 0; + for (u32 render_target = 0; render_target < Maxwell::NumRenderTargets; ++render_target) { + // TODO(Subv): Figure out how dual-source blending is configured in the Switch. + for (u32 component = 0; component < 4; ++component) { + if (header.ps.IsColorComponentOutputEnabled(render_target, component)) { + code.AddLine(fmt::format("FragColor{}[{}] = {};", render_target, component, + SafeGetRegister(current_reg))); + ++current_reg; } - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled instruction: {}", opcode->get().GetName()); - break; - } } - - break; - } } - // Close the predicate condition scope. - if (can_be_predicated && instr.pred.pred_index != static_cast<u64>(Pred::UnusedIndex)) { - --shader.scope; - shader.AddLine('}'); + if (header.ps.omap.depth) { + // The depth output is always 2 registers after the last color output, and current_reg + // already contains one past the last color register. + code.AddLine("gl_FragDepth = " + SafeGetRegister(current_reg + 1) + ';'); } - return offset + 1; + code.AddLine("return;"); + return {}; } - /** - * Compiles a range of instructions from Tegra to GLSL. - * @param begin the offset of the starting instruction. - * @param end the offset where the compilation should stop (exclusive). - * @return the offset of the next instruction to compile. PROGRAM_END if the program - * terminates. - */ - u32 CompileRange(u32 begin, u32 end) { - u32 program_counter; - for (program_counter = begin; program_counter < (begin > end ? PROGRAM_END : end);) { - program_counter = CompileInstr(program_counter); - } - return program_counter; + std::string Discard(Operation operation) { + // Enclose "discard" in a conditional, so that GLSL compilation does not complain + // about unexecuted instructions that may follow this. + code.AddLine("if (true) {"); + ++code.scope; + code.AddLine("discard;"); + --code.scope; + code.AddLine("}"); + return {}; } - void Generate(const std::string& suffix) { - // Add declarations for all subroutines - for (const auto& subroutine : subroutines) { - shader.AddLine("bool " + subroutine.GetName() + "();"); - } - shader.AddNewLine(); - - // Add the main entry point - shader.AddLine("bool exec_" + suffix + "() {"); - ++shader.scope; - CallSubroutine(GetSubroutine(main_offset, PROGRAM_END)); - --shader.scope; - shader.AddLine("}\n"); - - // Add definitions for all subroutines - for (const auto& subroutine : subroutines) { - std::set<u32> labels = subroutine.labels; - - shader.AddLine("bool " + subroutine.GetName() + "() {"); - ++shader.scope; - - if (labels.empty()) { - if (CompileRange(subroutine.begin, subroutine.end) != PROGRAM_END) { - shader.AddLine("return false;"); - } - } else { - labels.insert(subroutine.begin); - shader.AddLine("uint jmp_to = " + std::to_string(subroutine.begin) + "u;"); - - // TODO(Subv): Figure out the actual depth of the flow stack, for now it seems - // unlikely that shaders will use 20 nested SSYs and PBKs. - constexpr u32 FLOW_STACK_SIZE = 20; - shader.AddLine("uint flow_stack[" + std::to_string(FLOW_STACK_SIZE) + "];"); - shader.AddLine("uint flow_stack_top = 0u;"); + std::string EmitVertex(Operation operation) { + ASSERT_MSG(stage == ShaderStage::Geometry, + "EmitVertex is expected to be used in a geometry shader."); - shader.AddLine("while (true) {"); - ++shader.scope; + // If a geometry shader is attached, it will always flip (it's the last stage before + // fragment). For more info about flipping, refer to gl_shader_gen.cpp. + code.AddLine("position.xy *= viewport_flip.xy;"); + code.AddLine("gl_Position = position;"); + code.AddLine("position.w = 1.0;"); + code.AddLine("EmitVertex();"); + return {}; + } + + std::string EndPrimitive(Operation operation) { + ASSERT_MSG(stage == ShaderStage::Geometry, + "EndPrimitive is expected to be used in a geometry shader."); + + code.AddLine("EndPrimitive();"); + return {}; + } + + std::string YNegate(Operation operation) { + // Config pack's third value is Y_NEGATE's state. + return "uintBitsToFloat(config_pack[2])"; + } + + static constexpr OperationDecompilersArray operation_decompilers = { + &GLSLDecompiler::Assign, + + &GLSLDecompiler::Select, + + &GLSLDecompiler::Add<Type::Float>, + &GLSLDecompiler::Mul<Type::Float>, + &GLSLDecompiler::Div<Type::Float>, + &GLSLDecompiler::Fma<Type::Float>, + &GLSLDecompiler::Negate<Type::Float>, + &GLSLDecompiler::Absolute<Type::Float>, + &GLSLDecompiler::FClamp, + &GLSLDecompiler::Min<Type::Float>, + &GLSLDecompiler::Max<Type::Float>, + &GLSLDecompiler::FCos, + &GLSLDecompiler::FSin, + &GLSLDecompiler::FExp2, + &GLSLDecompiler::FLog2, + &GLSLDecompiler::FInverseSqrt, + &GLSLDecompiler::FSqrt, + &GLSLDecompiler::FRoundEven, + &GLSLDecompiler::FFloor, + &GLSLDecompiler::FCeil, + &GLSLDecompiler::FTrunc, + &GLSLDecompiler::FCastInteger<Type::Int>, + &GLSLDecompiler::FCastInteger<Type::Uint>, + + &GLSLDecompiler::Add<Type::Int>, + &GLSLDecompiler::Mul<Type::Int>, + &GLSLDecompiler::Div<Type::Int>, + &GLSLDecompiler::Negate<Type::Int>, + &GLSLDecompiler::Absolute<Type::Int>, + &GLSLDecompiler::Min<Type::Int>, + &GLSLDecompiler::Max<Type::Int>, + + &GLSLDecompiler::ICastFloat, + &GLSLDecompiler::ICastUnsigned, + &GLSLDecompiler::LogicalShiftLeft<Type::Int>, + &GLSLDecompiler::ILogicalShiftRight, + &GLSLDecompiler::IArithmeticShiftRight, + &GLSLDecompiler::BitwiseAnd<Type::Int>, + &GLSLDecompiler::BitwiseOr<Type::Int>, + &GLSLDecompiler::BitwiseXor<Type::Int>, + &GLSLDecompiler::BitwiseNot<Type::Int>, + &GLSLDecompiler::BitfieldInsert<Type::Int>, + &GLSLDecompiler::BitfieldExtract<Type::Int>, + &GLSLDecompiler::BitCount<Type::Int>, + + &GLSLDecompiler::Add<Type::Uint>, + &GLSLDecompiler::Mul<Type::Uint>, + &GLSLDecompiler::Div<Type::Uint>, + &GLSLDecompiler::Min<Type::Uint>, + &GLSLDecompiler::Max<Type::Uint>, + &GLSLDecompiler::UCastFloat, + &GLSLDecompiler::UCastSigned, + &GLSLDecompiler::LogicalShiftLeft<Type::Uint>, + &GLSLDecompiler::UShiftRight, + &GLSLDecompiler::UShiftRight, + &GLSLDecompiler::BitwiseAnd<Type::Uint>, + &GLSLDecompiler::BitwiseOr<Type::Uint>, + &GLSLDecompiler::BitwiseXor<Type::Uint>, + &GLSLDecompiler::BitwiseNot<Type::Uint>, + &GLSLDecompiler::BitfieldInsert<Type::Uint>, + &GLSLDecompiler::BitfieldExtract<Type::Uint>, + &GLSLDecompiler::BitCount<Type::Uint>, + + &GLSLDecompiler::Add<Type::HalfFloat>, + &GLSLDecompiler::Mul<Type::HalfFloat>, + &GLSLDecompiler::Fma<Type::HalfFloat>, + &GLSLDecompiler::Absolute<Type::HalfFloat>, + &GLSLDecompiler::HNegate, + &GLSLDecompiler::HMergeF32, + &GLSLDecompiler::HMergeH0, + &GLSLDecompiler::HMergeH1, + &GLSLDecompiler::HPack2, + + &GLSLDecompiler::LogicalAssign, + &GLSLDecompiler::LogicalAnd, + &GLSLDecompiler::LogicalOr, + &GLSLDecompiler::LogicalXor, + &GLSLDecompiler::LogicalNegate, + &GLSLDecompiler::LogicalPick2, + &GLSLDecompiler::LogicalAll2, + &GLSLDecompiler::LogicalAny2, + + &GLSLDecompiler::LogicalLessThan<Type::Float>, + &GLSLDecompiler::LogicalEqual<Type::Float>, + &GLSLDecompiler::LogicalLessEqual<Type::Float>, + &GLSLDecompiler::LogicalGreaterThan<Type::Float>, + &GLSLDecompiler::LogicalNotEqual<Type::Float>, + &GLSLDecompiler::LogicalGreaterEqual<Type::Float>, + &GLSLDecompiler::LogicalFIsNan, + + &GLSLDecompiler::LogicalLessThan<Type::Int>, + &GLSLDecompiler::LogicalEqual<Type::Int>, + &GLSLDecompiler::LogicalLessEqual<Type::Int>, + &GLSLDecompiler::LogicalGreaterThan<Type::Int>, + &GLSLDecompiler::LogicalNotEqual<Type::Int>, + &GLSLDecompiler::LogicalGreaterEqual<Type::Int>, + + &GLSLDecompiler::LogicalLessThan<Type::Uint>, + &GLSLDecompiler::LogicalEqual<Type::Uint>, + &GLSLDecompiler::LogicalLessEqual<Type::Uint>, + &GLSLDecompiler::LogicalGreaterThan<Type::Uint>, + &GLSLDecompiler::LogicalNotEqual<Type::Uint>, + &GLSLDecompiler::LogicalGreaterEqual<Type::Uint>, + + &GLSLDecompiler::Logical2HLessThan, + &GLSLDecompiler::Logical2HEqual, + &GLSLDecompiler::Logical2HLessEqual, + &GLSLDecompiler::Logical2HGreaterThan, + &GLSLDecompiler::Logical2HNotEqual, + &GLSLDecompiler::Logical2HGreaterEqual, + + &GLSLDecompiler::F4Texture, + &GLSLDecompiler::F4TextureLod, + &GLSLDecompiler::F4TextureGather, + &GLSLDecompiler::F4TextureQueryDimensions, + &GLSLDecompiler::F4TextureQueryLod, + &GLSLDecompiler::F4TexelFetch, + + &GLSLDecompiler::Branch, + &GLSLDecompiler::PushFlowStack, + &GLSLDecompiler::PopFlowStack, + &GLSLDecompiler::Exit, + &GLSLDecompiler::Discard, + + &GLSLDecompiler::EmitVertex, + &GLSLDecompiler::EndPrimitive, + + &GLSLDecompiler::YNegate, + }; - shader.AddLine("switch (jmp_to) {"); + std::string GetRegister(u32 index) const { + return GetDeclarationWithSuffix(index, "gpr"); + } - for (auto label : labels) { - shader.AddLine("case " + std::to_string(label) + "u: {"); - ++shader.scope; + std::string GetPredicate(Tegra::Shader::Pred pred) const { + return GetDeclarationWithSuffix(static_cast<u32>(pred), "pred"); + } - const auto next_it = labels.lower_bound(label + 1); - const u32 next_label = next_it == labels.end() ? subroutine.end : *next_it; + std::string GetInputAttribute(Attribute::Index attribute) const { + const auto index{static_cast<u32>(attribute) - + static_cast<u32>(Attribute::Index::Attribute_0)}; + return GetDeclarationWithSuffix(index, "input_attr"); + } - const u32 compile_end = CompileRange(label, next_label); - if (compile_end > next_label && compile_end != PROGRAM_END) { - // This happens only when there is a label inside a IF/LOOP block - shader.AddLine(" jmp_to = " + std::to_string(compile_end) + "u; break; }"); - labels.emplace(compile_end); - } + std::string GetOutputAttribute(Attribute::Index attribute) const { + const auto index{static_cast<u32>(attribute) - + static_cast<u32>(Attribute::Index::Attribute_0)}; + return GetDeclarationWithSuffix(index, "output_attr"); + } - --shader.scope; - shader.AddLine('}'); - } + std::string GetConstBuffer(u32 index) const { + return GetDeclarationWithSuffix(index, "cbuf"); + } - shader.AddLine("default: return false;"); - shader.AddLine('}'); + std::string GetGlobalMemory(const GlobalMemoryBase& descriptor) const { + return fmt::format("gmem_{}_{}_{}", descriptor.cbuf_index, descriptor.cbuf_offset, suffix); + } - --shader.scope; - shader.AddLine('}'); + std::string GetGlobalMemoryBlock(const GlobalMemoryBase& descriptor) const { + return fmt::format("gmem_block_{}_{}_{}", descriptor.cbuf_index, descriptor.cbuf_offset, + suffix); + } - shader.AddLine("return false;"); - } + std::string GetConstBufferBlock(u32 index) const { + return GetDeclarationWithSuffix(index, "cbuf_block"); + } - --shader.scope; - shader.AddLine("}\n"); + std::string GetLocalMemory() const { + return "lmem_" + suffix; + } - DEBUG_ASSERT(shader.scope == 0); - } + std::string GetInternalFlag(InternalFlag flag) const { + constexpr std::array<const char*, 4> InternalFlagNames = {"zero_flag", "sign_flag", + "carry_flag", "overflow_flag"}; + const auto index = static_cast<u32>(flag); + ASSERT(index < static_cast<u32>(InternalFlag::Amount)); - GenerateDeclarations(); + return std::string(InternalFlagNames[index]) + '_' + suffix; } - /// Add declarations for registers - void GenerateDeclarations() { - regs.GenerateDeclarations(suffix); + std::string GetSampler(const Sampler& sampler) const { + return GetDeclarationWithSuffix(static_cast<u32>(sampler.GetIndex()), "sampler"); + } - for (const auto& pred : declr_predicates) { - declarations.AddLine("bool " + pred + " = false;"); - } - declarations.AddNewLine(); + std::string GetDeclarationWithSuffix(u32 index, const std::string& name) const { + return name + '_' + std::to_string(index) + '_' + suffix; } -private: - const std::set<Subroutine>& subroutines; - const ProgramCode& program_code; - Tegra::Shader::Header header; - const u32 main_offset; - Maxwell3D::Regs::ShaderStage stage; - const std::string& suffix; - u64 local_memory_size; - std::size_t shader_length; - - ShaderWriter shader; - ShaderWriter declarations; - GLSLRegisterManager regs{shader, declarations, stage, suffix, header}; - - // Declarations - std::set<std::string> declr_predicates; -}; // namespace OpenGL::GLShader::Decompiler + const ShaderIR& ir; + const ShaderStage stage; + const std::string suffix; + const Header header; + + ShaderWriter code; +}; std::string GetCommonDeclarations() { - return fmt::format("#define MAX_CONSTBUFFER_ELEMENTS {}\n", - RasterizerOpenGL::MaxConstbufferSize / sizeof(GLvec4)); + const auto cbuf = std::to_string(MAX_CONSTBUFFER_ELEMENTS); + const auto gmem = std::to_string(MAX_GLOBALMEMORY_ELEMENTS); + return "#define MAX_CONSTBUFFER_ELEMENTS " + cbuf + "\n" + + "#define MAX_GLOBALMEMORY_ELEMENTS " + gmem + "\n" + + "#define ftoi floatBitsToInt\n" + "#define ftou floatBitsToUint\n" + "#define itof intBitsToFloat\n" + "#define utof uintBitsToFloat\n\n" + "float fromHalf2(vec2 pair) {\n" + " return utof(packHalf2x16(pair));\n" + "}\n\n" + "vec2 toHalf2(float value) {\n" + " return unpackHalf2x16(ftou(value));\n" + "}\n"; } -std::optional<ProgramResult> DecompileProgram(const ProgramCode& program_code, u32 main_offset, - Maxwell3D::Regs::ShaderStage stage, - const std::string& suffix) { - try { - ControlFlowAnalyzer analyzer(program_code, main_offset, suffix); - const auto subroutines = analyzer.GetSubroutines(); - GLSLGenerator generator(subroutines, program_code, main_offset, stage, suffix, - analyzer.GetShaderLength()); - return ProgramResult{generator.GetShaderCode(), generator.GetEntries()}; - } catch (const DecompileFail& exception) { - LOG_ERROR(HW_GPU, "Shader decompilation failed: {}", exception.what()); - } - return {}; +ProgramResult Decompile(const ShaderIR& ir, Maxwell::ShaderStage stage, const std::string& suffix) { + GLSLDecompiler decompiler(ir, stage, suffix); + decompiler.Decompile(); + return {decompiler.GetResult(), decompiler.GetShaderEntries()}; } -} // namespace OpenGL::GLShader::Decompiler +} // namespace OpenGL::GLShader
\ No newline at end of file diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h index d01a4a7ee..0856a1361 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.h +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h @@ -5,21 +5,106 @@ #pragma once #include <array> -#include <functional> -#include <optional> #include <string> +#include <utility> +#include <vector> #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" -#include "video_core/renderer_opengl/gl_shader_gen.h" +#include "video_core/shader/shader_ir.h" -namespace OpenGL::GLShader::Decompiler { +namespace VideoCommon::Shader { +class ShaderIR; +} -using Tegra::Engines::Maxwell3D; +namespace OpenGL::GLShader { + +using Maxwell = Tegra::Engines::Maxwell3D::Regs; + +class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer { +public: + explicit ConstBufferEntry(const VideoCommon::Shader::ConstBuffer& entry, + Maxwell::ShaderStage stage, const std::string& name, u32 index) + : VideoCommon::Shader::ConstBuffer{entry}, stage{stage}, name{name}, index{index} {} + + const std::string& GetName() const { + return name; + } + + Maxwell::ShaderStage GetStage() const { + return stage; + } + + u32 GetIndex() const { + return index; + } + +private: + std::string name; + Maxwell::ShaderStage stage{}; + u32 index{}; +}; + +class SamplerEntry : public VideoCommon::Shader::Sampler { +public: + explicit SamplerEntry(const VideoCommon::Shader::Sampler& entry, Maxwell::ShaderStage stage, + const std::string& name) + : VideoCommon::Shader::Sampler{entry}, stage{stage}, name{name} {} + + const std::string& GetName() const { + return name; + } + + Maxwell::ShaderStage GetStage() const { + return stage; + } + +private: + std::string name; + Maxwell::ShaderStage stage{}; +}; + +class GlobalMemoryEntry { +public: + explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, Maxwell::ShaderStage stage, + std::string name) + : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, stage{stage}, name{std::move(name)} {} + + u32 GetCbufIndex() const { + return cbuf_index; + } + + u32 GetCbufOffset() const { + return cbuf_offset; + } + + const std::string& GetName() const { + return name; + } + + Maxwell::ShaderStage GetStage() const { + return stage; + } + +private: + u32 cbuf_index{}; + u32 cbuf_offset{}; + Maxwell::ShaderStage stage{}; + std::string name; +}; + +struct ShaderEntries { + std::vector<ConstBufferEntry> const_buffers; + std::vector<SamplerEntry> samplers; + std::vector<GlobalMemoryEntry> global_memory_entries; + std::array<bool, Maxwell::NumClipDistances> clip_distances{}; + std::size_t shader_length{}; +}; + +using ProgramResult = std::pair<std::string, ShaderEntries>; std::string GetCommonDeclarations(); -std::optional<ProgramResult> DecompileProgram(const ProgramCode& program_code, u32 main_offset, - Maxwell3D::Regs::ShaderStage stage, - const std::string& suffix); +ProgramResult Decompile(const VideoCommon::Shader::ShaderIR& ir, Maxwell::ShaderStage stage, + const std::string& suffix); -} // namespace OpenGL::GLShader::Decompiler +} // namespace OpenGL::GLShader
\ No newline at end of file diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp index 5d0819dc5..04e1db911 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.cpp +++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp @@ -7,63 +7,57 @@ #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_opengl/gl_shader_decompiler.h" #include "video_core/renderer_opengl/gl_shader_gen.h" +#include "video_core/shader/shader_ir.h" namespace OpenGL::GLShader { using Tegra::Engines::Maxwell3D; +using VideoCommon::Shader::ProgramCode; +using VideoCommon::Shader::ShaderIR; static constexpr u32 PROGRAM_OFFSET{10}; ProgramResult GenerateVertexShader(const ShaderSetup& setup) { - std::string out = "#version 430 core\n"; - out += "#extension GL_ARB_separate_shader_objects : enable\n\n"; const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); + + std::string out = "#extension GL_ARB_separate_shader_objects : enable\n\n"; out += "// Shader Unique Id: VS" + id + "\n\n"; - out += Decompiler::GetCommonDeclarations(); + out += GetCommonDeclarations(); out += R"( - layout (location = 0) out vec4 position; -layout(std140) uniform vs_config { +layout (std140, binding = EMULATION_UBO_BINDING) uniform vs_config { vec4 viewport_flip; uvec4 config_pack; // instance_id, flip_stage, y_direction, padding uvec4 alpha_test; }; -)"; - - if (setup.IsDualProgram()) { - out += "bool exec_vertex_b();\n"; - } - ProgramResult program = - Decompiler::DecompileProgram(setup.program.code, PROGRAM_OFFSET, - Maxwell3D::Regs::ShaderStage::Vertex, "vertex") - .value_or(ProgramResult()); +)"; + ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET); + ProgramResult program = Decompile(program_ir, Maxwell3D::Regs::ShaderStage::Vertex, "vertex"); out += program.first; if (setup.IsDualProgram()) { + ShaderIR program_ir_b(setup.program.code_b, PROGRAM_OFFSET); ProgramResult program_b = - Decompiler::DecompileProgram(setup.program.code_b, PROGRAM_OFFSET, - Maxwell3D::Regs::ShaderStage::Vertex, "vertex_b") - .value_or(ProgramResult()); + Decompile(program_ir_b, Maxwell3D::Regs::ShaderStage::Vertex, "vertex_b"); + out += program_b.first; } out += R"( - void main() { position = vec4(0.0, 0.0, 0.0, 0.0); - exec_vertex(); + execute_vertex(); )"; if (setup.IsDualProgram()) { - out += " exec_vertex_b();"; + out += " execute_vertex_b();"; } out += R"( - // Check if the flip stage is VertexB // Config pack's second value is flip_stage if (config_pack[1] == 1) { @@ -77,73 +71,62 @@ void main() { if (config_pack[1] == 1) { position.w = 1.0; } -} - -)"; +})"; return {out, program.second}; } ProgramResult GenerateGeometryShader(const ShaderSetup& setup) { - // Version is intentionally skipped in shader generation, it's added by the lazy compilation. - std::string out = "#extension GL_ARB_separate_shader_objects : enable\n\n"; const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); + + std::string out = "#extension GL_ARB_separate_shader_objects : enable\n\n"; out += "// Shader Unique Id: GS" + id + "\n\n"; - out += Decompiler::GetCommonDeclarations(); - out += "bool exec_geometry();\n"; + out += GetCommonDeclarations(); - ProgramResult program = - Decompiler::DecompileProgram(setup.program.code, PROGRAM_OFFSET, - Maxwell3D::Regs::ShaderStage::Geometry, "geometry") - .value_or(ProgramResult()); out += R"( -out gl_PerVertex { - vec4 gl_Position; -}; - layout (location = 0) in vec4 gs_position[]; layout (location = 0) out vec4 position; -layout (std140) uniform gs_config { +layout (std140, binding = EMULATION_UBO_BINDING) uniform gs_config { vec4 viewport_flip; uvec4 config_pack; // instance_id, flip_stage, y_direction, padding uvec4 alpha_test; }; -void main() { - exec_geometry(); -} - )"; + ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET); + ProgramResult program = + Decompile(program_ir, Maxwell3D::Regs::ShaderStage::Geometry, "geometry"); out += program.first; + + out += R"( +void main() { + execute_geometry(); +};)"; + return {out, program.second}; } ProgramResult GenerateFragmentShader(const ShaderSetup& setup) { - std::string out = "#version 430 core\n"; - out += "#extension GL_ARB_separate_shader_objects : enable\n\n"; const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); + + std::string out = "#extension GL_ARB_separate_shader_objects : enable\n\n"; out += "// Shader Unique Id: FS" + id + "\n\n"; - out += Decompiler::GetCommonDeclarations(); - out += "bool exec_fragment();\n"; + out += GetCommonDeclarations(); - ProgramResult program = - Decompiler::DecompileProgram(setup.program.code, PROGRAM_OFFSET, - Maxwell3D::Regs::ShaderStage::Fragment, "fragment") - .value_or(ProgramResult()); out += R"( -layout(location = 0) out vec4 FragColor0; -layout(location = 1) out vec4 FragColor1; -layout(location = 2) out vec4 FragColor2; -layout(location = 3) out vec4 FragColor3; -layout(location = 4) out vec4 FragColor4; -layout(location = 5) out vec4 FragColor5; -layout(location = 6) out vec4 FragColor6; -layout(location = 7) out vec4 FragColor7; +layout (location = 0) out vec4 FragColor0; +layout (location = 1) out vec4 FragColor1; +layout (location = 2) out vec4 FragColor2; +layout (location = 3) out vec4 FragColor3; +layout (location = 4) out vec4 FragColor4; +layout (location = 5) out vec4 FragColor5; +layout (location = 6) out vec4 FragColor6; +layout (location = 7) out vec4 FragColor7; layout (location = 0) in vec4 position; -layout (std140) uniform fs_config { +layout (std140, binding = EMULATION_UBO_BINDING) uniform fs_config { vec4 viewport_flip; uvec4 config_pack; // instance_id, flip_stage, y_direction, padding uvec4 alpha_test; @@ -173,12 +156,20 @@ bool AlphaFunc(in float value) { } } +)"; + ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET); + ProgramResult program = + Decompile(program_ir, Maxwell3D::Regs::ShaderStage::Fragment, "fragment"); + + out += program.first; + + out += R"( void main() { - exec_fragment(); + execute_fragment(); } )"; - out += program.first; return {out, program.second}; } -} // namespace OpenGL::GLShader + +} // namespace OpenGL::GLShader
\ No newline at end of file diff --git a/src/video_core/renderer_opengl/gl_shader_gen.h b/src/video_core/renderer_opengl/gl_shader_gen.h index fcc20d3b4..ac5e6917b 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.h +++ b/src/video_core/renderer_opengl/gl_shader_gen.h @@ -10,164 +10,12 @@ #include "common/common_types.h" #include "video_core/engines/shader_bytecode.h" +#include "video_core/renderer_opengl/gl_shader_decompiler.h" +#include "video_core/shader/shader_ir.h" namespace OpenGL::GLShader { -constexpr std::size_t MAX_PROGRAM_CODE_LENGTH{0x1000}; -using ProgramCode = std::vector<u64>; - -enum : u32 { POSITION_VARYING_LOCATION = 0, GENERIC_VARYING_START_LOCATION = 1 }; - -class ConstBufferEntry { - using Maxwell = Tegra::Engines::Maxwell3D::Regs; - -public: - void MarkAsUsed(u64 index, u64 offset, Maxwell::ShaderStage stage) { - is_used = true; - this->index = static_cast<unsigned>(index); - this->stage = stage; - max_offset = std::max(max_offset, static_cast<unsigned>(offset)); - } - - void MarkAsUsedIndirect(u64 index, Maxwell::ShaderStage stage) { - is_used = true; - is_indirect = true; - this->index = static_cast<unsigned>(index); - this->stage = stage; - } - - bool IsUsed() const { - return is_used; - } - - bool IsIndirect() const { - return is_indirect; - } - - unsigned GetIndex() const { - return index; - } - - unsigned GetSize() const { - return max_offset + 1; - } - - std::string GetName() const { - return BufferBaseNames[static_cast<std::size_t>(stage)] + std::to_string(index); - } - - u32 GetHash() const { - return (static_cast<u32>(stage) << 16) | index; - } - -private: - static constexpr std::array<const char*, Maxwell::MaxShaderStage> BufferBaseNames = { - "buffer_vs_c", "buffer_tessc_c", "buffer_tesse_c", "buffer_gs_c", "buffer_fs_c", - }; - - bool is_used{}; - bool is_indirect{}; - unsigned index{}; - unsigned max_offset{}; - Maxwell::ShaderStage stage; -}; - -class SamplerEntry { - using Maxwell = Tegra::Engines::Maxwell3D::Regs; - -public: - SamplerEntry(Maxwell::ShaderStage stage, std::size_t offset, std::size_t index, - Tegra::Shader::TextureType type, bool is_array, bool is_shadow) - : offset(offset), stage(stage), sampler_index(index), type(type), is_array(is_array), - is_shadow(is_shadow) {} - - std::size_t GetOffset() const { - return offset; - } - - std::size_t GetIndex() const { - return sampler_index; - } - - Maxwell::ShaderStage GetStage() const { - return stage; - } - - std::string GetName() const { - return std::string(TextureSamplerNames[static_cast<std::size_t>(stage)]) + '_' + - std::to_string(sampler_index); - } - - std::string GetTypeString() const { - using Tegra::Shader::TextureType; - std::string glsl_type; - - switch (type) { - case TextureType::Texture1D: - glsl_type = "sampler1D"; - break; - case TextureType::Texture2D: - glsl_type = "sampler2D"; - break; - case TextureType::Texture3D: - glsl_type = "sampler3D"; - break; - case TextureType::TextureCube: - glsl_type = "samplerCube"; - break; - default: - UNIMPLEMENTED(); - } - if (is_array) - glsl_type += "Array"; - if (is_shadow) - glsl_type += "Shadow"; - return glsl_type; - } - - Tegra::Shader::TextureType GetType() const { - return type; - } - - bool IsArray() const { - return is_array; - } - - bool IsShadow() const { - return is_shadow; - } - - u32 GetHash() const { - return (static_cast<u32>(stage) << 16) | static_cast<u32>(sampler_index); - } - - static std::string GetArrayName(Maxwell::ShaderStage stage) { - return TextureSamplerNames[static_cast<std::size_t>(stage)]; - } - -private: - static constexpr std::array<const char*, Maxwell::MaxShaderStage> TextureSamplerNames = { - "tex_vs", "tex_tessc", "tex_tesse", "tex_gs", "tex_fs", - }; - - /// Offset in TSC memory from which to read the sampler object, as specified by the sampling - /// instruction. - std::size_t offset; - Maxwell::ShaderStage stage; ///< Shader stage where this sampler was used. - std::size_t sampler_index; ///< Value used to index into the generated GLSL sampler array. - Tegra::Shader::TextureType type; ///< The type used to sample this texture (Texture2D, etc) - bool is_array; ///< Whether the texture is being sampled as an array texture or not. - bool is_shadow; ///< Whether the texture is being sampled as a depth texture or not. -}; - -struct ShaderEntries { - std::vector<ConstBufferEntry> const_buffer_entries; - std::vector<SamplerEntry> texture_samplers; - std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances> clip_distances; - std::size_t shader_length; -}; - -using ProgramResult = std::pair<std::string, ShaderEntries>; +using VideoCommon::Shader::ProgramCode; struct ShaderSetup { explicit ShaderSetup(ProgramCode program_code) { diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp new file mode 100644 index 000000000..812983a99 --- /dev/null +++ b/src/video_core/shader/decode.cpp @@ -0,0 +1,206 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <cstring> +#include <set> + +#include <fmt/format.h> + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/engines/shader_header.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +namespace { + +/// Merges exit method of two parallel branches. +constexpr ExitMethod ParallelExit(ExitMethod a, ExitMethod b) { + if (a == ExitMethod::Undetermined) { + return b; + } + if (b == ExitMethod::Undetermined) { + return a; + } + if (a == b) { + return a; + } + return ExitMethod::Conditional; +} + +/** + * Returns whether the instruction at the specified offset is a 'sched' instruction. + * Sched instructions always appear before a sequence of 3 instructions. + */ +constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) { + constexpr u32 SchedPeriod = 4; + u32 absolute_offset = offset - main_offset; + + return (absolute_offset % SchedPeriod) == 0; +} + +} // namespace + +void ShaderIR::Decode() { + std::memcpy(&header, program_code.data(), sizeof(Tegra::Shader::Header)); + + std::set<u32> labels; + const ExitMethod exit_method = Scan(main_offset, MAX_PROGRAM_LENGTH, labels); + if (exit_method != ExitMethod::AlwaysEnd) { + UNREACHABLE_MSG("Program does not always end"); + } + + if (labels.empty()) { + basic_blocks.insert({main_offset, DecodeRange(main_offset, MAX_PROGRAM_LENGTH)}); + return; + } + + labels.insert(main_offset); + + for (const u32 label : labels) { + const auto next_it = labels.lower_bound(label + 1); + const u32 next_label = next_it == labels.end() ? MAX_PROGRAM_LENGTH : *next_it; + + basic_blocks.insert({label, DecodeRange(label, next_label)}); + } +} + +ExitMethod ShaderIR::Scan(u32 begin, u32 end, std::set<u32>& labels) { + const auto [iter, inserted] = + exit_method_map.emplace(std::make_pair(begin, end), ExitMethod::Undetermined); + ExitMethod& exit_method = iter->second; + if (!inserted) + return exit_method; + + for (u32 offset = begin; offset != end && offset != MAX_PROGRAM_LENGTH; ++offset) { + coverage_begin = std::min(coverage_begin, offset); + coverage_end = std::max(coverage_end, offset + 1); + + const Instruction instr = {program_code[offset]}; + const auto opcode = OpCode::Decode(instr); + if (!opcode) + continue; + switch (opcode->get().GetId()) { + case OpCode::Id::EXIT: { + // The EXIT instruction can be predicated, which means that the shader can conditionally + // end on this instruction. We have to consider the case where the condition is not met + // and check the exit method of that other basic block. + using Tegra::Shader::Pred; + if (instr.pred.pred_index == static_cast<u64>(Pred::UnusedIndex)) { + return exit_method = ExitMethod::AlwaysEnd; + } else { + const ExitMethod not_met = Scan(offset + 1, end, labels); + return exit_method = ParallelExit(ExitMethod::AlwaysEnd, not_met); + } + } + case OpCode::Id::BRA: { + const u32 target = offset + instr.bra.GetBranchTarget(); + labels.insert(target); + const ExitMethod no_jmp = Scan(offset + 1, end, labels); + const ExitMethod jmp = Scan(target, end, labels); + return exit_method = ParallelExit(no_jmp, jmp); + } + case OpCode::Id::SSY: + case OpCode::Id::PBK: { + // The SSY and PBK use a similar encoding as the BRA instruction. + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "Constant buffer branching is not supported"); + const u32 target = offset + instr.bra.GetBranchTarget(); + labels.insert(target); + // Continue scanning for an exit method. + break; + } + } + } + return exit_method = ExitMethod::AlwaysReturn; +} + +BasicBlock ShaderIR::DecodeRange(u32 begin, u32 end) { + BasicBlock basic_block; + for (u32 pc = begin; pc < (begin > end ? MAX_PROGRAM_LENGTH : end);) { + pc = DecodeInstr(basic_block, pc); + } + return basic_block; +} + +u32 ShaderIR::DecodeInstr(BasicBlock& bb, u32 pc) { + // Ignore sched instructions when generating code. + if (IsSchedInstruction(pc, main_offset)) { + return pc + 1; + } + + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + // Decoding failure + if (!opcode) { + UNIMPLEMENTED_MSG("Unhandled instruction: {0:x}", instr.value); + return pc + 1; + } + + bb.push_back( + Comment(fmt::format("{}: {} (0x{:016x})", pc, opcode->get().GetName(), instr.value))); + + using Tegra::Shader::Pred; + UNIMPLEMENTED_IF_MSG(instr.pred.full_pred == Pred::NeverExecute, + "NeverExecute predicate not implemented"); + + static const std::map<OpCode::Type, u32 (ShaderIR::*)(BasicBlock&, const BasicBlock&, u32)> + decoders = { + {OpCode::Type::Arithmetic, &ShaderIR::DecodeArithmetic}, + {OpCode::Type::ArithmeticImmediate, &ShaderIR::DecodeArithmeticImmediate}, + {OpCode::Type::Bfe, &ShaderIR::DecodeBfe}, + {OpCode::Type::Bfi, &ShaderIR::DecodeBfi}, + {OpCode::Type::Shift, &ShaderIR::DecodeShift}, + {OpCode::Type::ArithmeticInteger, &ShaderIR::DecodeArithmeticInteger}, + {OpCode::Type::ArithmeticIntegerImmediate, &ShaderIR::DecodeArithmeticIntegerImmediate}, + {OpCode::Type::ArithmeticHalf, &ShaderIR::DecodeArithmeticHalf}, + {OpCode::Type::ArithmeticHalfImmediate, &ShaderIR::DecodeArithmeticHalfImmediate}, + {OpCode::Type::Ffma, &ShaderIR::DecodeFfma}, + {OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2}, + {OpCode::Type::Conversion, &ShaderIR::DecodeConversion}, + {OpCode::Type::Memory, &ShaderIR::DecodeMemory}, + {OpCode::Type::FloatSetPredicate, &ShaderIR::DecodeFloatSetPredicate}, + {OpCode::Type::IntegerSetPredicate, &ShaderIR::DecodeIntegerSetPredicate}, + {OpCode::Type::HalfSetPredicate, &ShaderIR::DecodeHalfSetPredicate}, + {OpCode::Type::PredicateSetRegister, &ShaderIR::DecodePredicateSetRegister}, + {OpCode::Type::PredicateSetPredicate, &ShaderIR::DecodePredicateSetPredicate}, + {OpCode::Type::RegisterSetPredicate, &ShaderIR::DecodeRegisterSetPredicate}, + {OpCode::Type::FloatSet, &ShaderIR::DecodeFloatSet}, + {OpCode::Type::IntegerSet, &ShaderIR::DecodeIntegerSet}, + {OpCode::Type::HalfSet, &ShaderIR::DecodeHalfSet}, + {OpCode::Type::Video, &ShaderIR::DecodeVideo}, + {OpCode::Type::Xmad, &ShaderIR::DecodeXmad}, + }; + + std::vector<Node> tmp_block; + if (const auto decoder = decoders.find(opcode->get().GetType()); decoder != decoders.end()) { + pc = (this->*decoder->second)(tmp_block, bb, pc); + } else { + pc = DecodeOther(tmp_block, bb, pc); + } + + // Some instructions (like SSY) don't have a predicate field, they are always unconditionally + // executed. + const bool can_be_predicated = OpCode::IsPredicatedInstruction(opcode->get().GetId()); + const auto pred_index = static_cast<u32>(instr.pred.pred_index); + + if (can_be_predicated && pred_index != static_cast<u32>(Pred::UnusedIndex)) { + bb.push_back( + Conditional(GetPredicate(pred_index, instr.negate_pred != 0), std::move(tmp_block))); + } else { + for (auto& node : tmp_block) { + bb.push_back(std::move(node)); + } + } + + return pc + 1; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic.cpp b/src/video_core/shader/decode/arithmetic.cpp new file mode 100644 index 000000000..51b8d55d4 --- /dev/null +++ b/src/video_core/shader/decode/arithmetic.cpp @@ -0,0 +1,155 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::SubOp; + +u32 ShaderIR::DecodeArithmetic(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + Node op_a = GetRegister(instr.gpr8); + + Node op_b = [&]() -> Node { + if (instr.is_b_imm) { + return GetImmediate19(instr); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + } + }(); + + switch (opcode->get().GetId()) { + case OpCode::Id::MOV_C: + case OpCode::Id::MOV_R: { + // MOV does not have neither 'abs' nor 'neg' bits. + SetRegister(bb, instr.gpr0, op_b); + break; + } + case OpCode::Id::FMUL_C: + case OpCode::Id::FMUL_R: + case OpCode::Id::FMUL_IMM: { + // FMUL does not have 'abs' bits and only the second operand has a 'neg' bit. + UNIMPLEMENTED_IF_MSG(instr.fmul.tab5cb8_2 != 0, "FMUL tab5cb8_2({}) is not implemented", + instr.fmul.tab5cb8_2.Value()); + UNIMPLEMENTED_IF_MSG( + instr.fmul.tab5c68_0 != 1, "FMUL tab5cb8_0({}) is not implemented", + instr.fmul.tab5c68_0.Value()); // SMO typical sends 1 here which seems to be the default + + op_b = GetOperandAbsNegFloat(op_b, false, instr.fmul.negate_b); + + // TODO(Rodrigo): Should precise be used when there's a postfactor? + Node value = Operation(OperationCode::FMul, PRECISE, op_a, op_b); + + if (instr.fmul.postfactor != 0) { + auto postfactor = static_cast<s32>(instr.fmul.postfactor); + + // Postfactor encoded as 3-bit 1's complement in instruction, interpreted with below + // logic. + if (postfactor >= 4) { + postfactor = 7 - postfactor; + } else { + postfactor = 0 - postfactor; + } + + if (postfactor > 0) { + value = Operation(OperationCode::FMul, NO_PRECISE, value, + Immediate(static_cast<f32>(1 << postfactor))); + } else { + value = Operation(OperationCode::FDiv, NO_PRECISE, value, + Immediate(static_cast<f32>(1 << -postfactor))); + } + } + + value = GetSaturatedFloat(value, instr.alu.saturate_d); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::FADD_C: + case OpCode::Id::FADD_R: + case OpCode::Id::FADD_IMM: { + op_a = GetOperandAbsNegFloat(op_a, instr.alu.abs_a, instr.alu.negate_a); + op_b = GetOperandAbsNegFloat(op_b, instr.alu.abs_b, instr.alu.negate_b); + + Node value = Operation(OperationCode::FAdd, PRECISE, op_a, op_b); + value = GetSaturatedFloat(value, instr.alu.saturate_d); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::MUFU: { + op_a = GetOperandAbsNegFloat(op_a, instr.alu.abs_a, instr.alu.negate_a); + + Node value = [&]() { + switch (instr.sub_op) { + case SubOp::Cos: + return Operation(OperationCode::FCos, PRECISE, op_a); + case SubOp::Sin: + return Operation(OperationCode::FSin, PRECISE, op_a); + case SubOp::Ex2: + return Operation(OperationCode::FExp2, PRECISE, op_a); + case SubOp::Lg2: + return Operation(OperationCode::FLog2, PRECISE, op_a); + case SubOp::Rcp: + return Operation(OperationCode::FDiv, PRECISE, Immediate(1.0f), op_a); + case SubOp::Rsq: + return Operation(OperationCode::FInverseSqrt, PRECISE, op_a); + case SubOp::Sqrt: + return Operation(OperationCode::FSqrt, PRECISE, op_a); + default: + UNIMPLEMENTED_MSG("Unhandled MUFU sub op={0:x}", + static_cast<unsigned>(instr.sub_op.Value())); + return Immediate(0); + } + }(); + value = GetSaturatedFloat(value, instr.alu.saturate_d); + + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::FMNMX_C: + case OpCode::Id::FMNMX_R: + case OpCode::Id::FMNMX_IMM: { + op_a = GetOperandAbsNegFloat(op_a, instr.alu.abs_a, instr.alu.negate_a); + op_b = GetOperandAbsNegFloat(op_b, instr.alu.abs_b, instr.alu.negate_b); + + const Node condition = GetPredicate(instr.alu.fmnmx.pred, instr.alu.fmnmx.negate_pred != 0); + + const Node min = Operation(OperationCode::FMin, NO_PRECISE, op_a, op_b); + const Node max = Operation(OperationCode::FMax, NO_PRECISE, op_a, op_b); + const Node value = Operation(OperationCode::Select, NO_PRECISE, condition, min, max); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::RRO_C: + case OpCode::Id::RRO_R: + case OpCode::Id::RRO_IMM: { + // Currently RRO is only implemented as a register move. + op_b = GetOperandAbsNegFloat(op_b, instr.alu.abs_b, instr.alu.negate_b); + SetRegister(bb, instr.gpr0, op_b); + LOG_WARNING(HW_GPU, "RRO instruction is incomplete"); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled arithmetic instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic_half.cpp b/src/video_core/shader/decode/arithmetic_half.cpp new file mode 100644 index 000000000..37eef2bf2 --- /dev/null +++ b/src/video_core/shader/decode/arithmetic_half.cpp @@ -0,0 +1,70 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeArithmeticHalf(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + if (opcode->get().GetId() == OpCode::Id::HADD2_C || + opcode->get().GetId() == OpCode::Id::HADD2_R) { + UNIMPLEMENTED_IF(instr.alu_half.ftz != 0); + } + UNIMPLEMENTED_IF_MSG(instr.alu_half.saturate != 0, "Half float saturation not implemented"); + + const bool negate_a = + opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0; + const bool negate_b = + opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0; + + const Node op_a = GetOperandAbsNegHalf(GetRegister(instr.gpr8), instr.alu_half.abs_a, negate_a); + + // instr.alu_half.type_a + + Node op_b = [&]() { + switch (opcode->get().GetId()) { + case OpCode::Id::HADD2_C: + case OpCode::Id::HMUL2_C: + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + case OpCode::Id::HADD2_R: + case OpCode::Id::HMUL2_R: + return GetRegister(instr.gpr20); + default: + UNREACHABLE(); + return Immediate(0); + } + }(); + op_b = GetOperandAbsNegHalf(op_b, instr.alu_half.abs_b, negate_b); + + Node value = [&]() { + MetaHalfArithmetic meta{true, {instr.alu_half_imm.type_a, instr.alu_half.type_b}}; + switch (opcode->get().GetId()) { + case OpCode::Id::HADD2_C: + case OpCode::Id::HADD2_R: + return Operation(OperationCode::HAdd, meta, op_a, op_b); + case OpCode::Id::HMUL2_C: + case OpCode::Id::HMUL2_R: + return Operation(OperationCode::HMul, meta, op_a, op_b); + default: + UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", opcode->get().GetName()); + return Immediate(0); + } + }(); + value = HalfMerge(GetRegister(instr.gpr0), value, instr.alu_half.merge); + + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic_half_immediate.cpp b/src/video_core/shader/decode/arithmetic_half_immediate.cpp new file mode 100644 index 000000000..7b4f7d284 --- /dev/null +++ b/src/video_core/shader/decode/arithmetic_half_immediate.cpp @@ -0,0 +1,51 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeArithmeticHalfImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + if (opcode->get().GetId() == OpCode::Id::HADD2_IMM) { + UNIMPLEMENTED_IF(instr.alu_half_imm.ftz != 0); + } else { + UNIMPLEMENTED_IF(instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::None); + } + UNIMPLEMENTED_IF_MSG(instr.alu_half_imm.saturate != 0, + "Half float immediate saturation not implemented"); + + Node op_a = GetRegister(instr.gpr8); + op_a = GetOperandAbsNegHalf(op_a, instr.alu_half_imm.abs_a, instr.alu_half_imm.negate_a); + + const Node op_b = UnpackHalfImmediate(instr, true); + + Node value = [&]() { + MetaHalfArithmetic meta{true, {instr.alu_half_imm.type_a}}; + switch (opcode->get().GetId()) { + case OpCode::Id::HADD2_IMM: + return Operation(OperationCode::HAdd, meta, op_a, op_b); + case OpCode::Id::HMUL2_IMM: + return Operation(OperationCode::HMul, meta, op_a, op_b); + default: + UNREACHABLE(); + return Immediate(0); + } + }(); + value = HalfMerge(GetRegister(instr.gpr0), value, instr.alu_half_imm.merge); + + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic_immediate.cpp b/src/video_core/shader/decode/arithmetic_immediate.cpp new file mode 100644 index 000000000..4fd3db54e --- /dev/null +++ b/src/video_core/shader/decode/arithmetic_immediate.cpp @@ -0,0 +1,52 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeArithmeticImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::MOV32_IMM: { + SetRegister(bb, instr.gpr0, GetImmediate32(instr)); + break; + } + case OpCode::Id::FMUL32_IMM: { + Node value = + Operation(OperationCode::FMul, PRECISE, GetRegister(instr.gpr8), GetImmediate32(instr)); + value = GetSaturatedFloat(value, instr.fmul32.saturate); + + SetInternalFlagsFromFloat(bb, value, instr.op_32.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::FADD32I: { + const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fadd32i.abs_a, + instr.fadd32i.negate_a); + const Node op_b = GetOperandAbsNegFloat(GetImmediate32(instr), instr.fadd32i.abs_b, + instr.fadd32i.negate_b); + + const Node value = Operation(OperationCode::FAdd, PRECISE, op_a, op_b); + SetInternalFlagsFromFloat(bb, value, instr.op_32.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled arithmetic immediate instruction: {}", + opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp new file mode 100644 index 000000000..cc9a76a19 --- /dev/null +++ b/src/video_core/shader/decode/arithmetic_integer.cpp @@ -0,0 +1,287 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::IAdd3Height; +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; +using Tegra::Shader::Register; + +u32 ShaderIR::DecodeArithmeticInteger(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + Node op_a = GetRegister(instr.gpr8); + Node op_b = [&]() { + if (instr.is_b_imm) { + return Immediate(instr.alu.GetSignedImm20_20()); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + } + }(); + + switch (opcode->get().GetId()) { + case OpCode::Id::IADD_C: + case OpCode::Id::IADD_R: + case OpCode::Id::IADD_IMM: { + UNIMPLEMENTED_IF_MSG(instr.alu.saturate_d, "IADD saturation not implemented"); + + op_a = GetOperandAbsNegInteger(op_a, false, instr.alu_integer.negate_a, true); + op_b = GetOperandAbsNegInteger(op_b, false, instr.alu_integer.negate_b, true); + + const Node value = Operation(OperationCode::IAdd, PRECISE, op_a, op_b); + + SetInternalFlagsFromInteger(bb, value, instr.op_32.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::IADD3_C: + case OpCode::Id::IADD3_R: + case OpCode::Id::IADD3_IMM: { + Node op_c = GetRegister(instr.gpr39); + + const auto ApplyHeight = [&](IAdd3Height height, Node value) { + switch (height) { + case IAdd3Height::None: + return value; + case IAdd3Height::LowerHalfWord: + return BitfieldExtract(value, 0, 16); + case IAdd3Height::UpperHalfWord: + return BitfieldExtract(value, 16, 16); + default: + UNIMPLEMENTED_MSG("Unhandled IADD3 height: {}", static_cast<u32>(height)); + return Immediate(0); + } + }; + + if (opcode->get().GetId() == OpCode::Id::IADD3_R) { + op_a = ApplyHeight(instr.iadd3.height_a, op_a); + op_b = ApplyHeight(instr.iadd3.height_b, op_b); + op_c = ApplyHeight(instr.iadd3.height_c, op_c); + } + + op_a = GetOperandAbsNegInteger(op_a, false, instr.iadd3.neg_a, true); + op_b = GetOperandAbsNegInteger(op_b, false, instr.iadd3.neg_b, true); + op_c = GetOperandAbsNegInteger(op_c, false, instr.iadd3.neg_c, true); + + const Node value = [&]() { + const Node add_ab = Operation(OperationCode::IAdd, NO_PRECISE, op_a, op_b); + if (opcode->get().GetId() != OpCode::Id::IADD3_R) { + return Operation(OperationCode::IAdd, NO_PRECISE, add_ab, op_c); + } + const Node shifted = [&]() { + switch (instr.iadd3.mode) { + case Tegra::Shader::IAdd3Mode::RightShift: + // TODO(tech4me): According to + // https://envytools.readthedocs.io/en/latest/hw/graph/maxwell/cuda/int.html?highlight=iadd3 + // The addition between op_a and op_b should be done in uint33, more + // investigation required + return Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, add_ab, + Immediate(16)); + case Tegra::Shader::IAdd3Mode::LeftShift: + return Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, add_ab, + Immediate(16)); + default: + return add_ab; + } + }(); + return Operation(OperationCode::IAdd, NO_PRECISE, shifted, op_c); + }(); + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::ISCADD_C: + case OpCode::Id::ISCADD_R: + case OpCode::Id::ISCADD_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in ISCADD is not implemented"); + + op_a = GetOperandAbsNegInteger(op_a, false, instr.alu_integer.negate_a, true); + op_b = GetOperandAbsNegInteger(op_b, false, instr.alu_integer.negate_b, true); + + const Node shift = Immediate(static_cast<u32>(instr.alu_integer.shift_amount)); + const Node shifted_a = Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, op_a, shift); + const Node value = Operation(OperationCode::IAdd, NO_PRECISE, shifted_a, op_b); + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::POPC_C: + case OpCode::Id::POPC_R: + case OpCode::Id::POPC_IMM: { + if (instr.popc.invert) { + op_b = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_b); + } + const Node value = Operation(OperationCode::IBitCount, PRECISE, op_b); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::SEL_C: + case OpCode::Id::SEL_R: + case OpCode::Id::SEL_IMM: { + const Node condition = GetPredicate(instr.sel.pred, instr.sel.neg_pred != 0); + const Node value = Operation(OperationCode::Select, PRECISE, condition, op_a, op_b); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::LOP_C: + case OpCode::Id::LOP_R: + case OpCode::Id::LOP_IMM: { + if (instr.alu.lop.invert_a) + op_a = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_a); + if (instr.alu.lop.invert_b) + op_b = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_b); + + WriteLogicOperation(bb, instr.gpr0, instr.alu.lop.operation, op_a, op_b, + instr.alu.lop.pred_result_mode, instr.alu.lop.pred48, + instr.generates_cc); + break; + } + case OpCode::Id::LOP3_C: + case OpCode::Id::LOP3_R: + case OpCode::Id::LOP3_IMM: { + const Node op_c = GetRegister(instr.gpr39); + const Node lut = [&]() { + if (opcode->get().GetId() == OpCode::Id::LOP3_R) { + return Immediate(instr.alu.lop3.GetImmLut28()); + } else { + return Immediate(instr.alu.lop3.GetImmLut48()); + } + }(); + + WriteLop3Instruction(bb, instr.gpr0, op_a, op_b, op_c, lut, instr.generates_cc); + break; + } + case OpCode::Id::IMNMX_C: + case OpCode::Id::IMNMX_R: + case OpCode::Id::IMNMX_IMM: { + UNIMPLEMENTED_IF(instr.imnmx.exchange != Tegra::Shader::IMinMaxExchange::None); + + const bool is_signed = instr.imnmx.is_signed; + + const Node condition = GetPredicate(instr.imnmx.pred, instr.imnmx.negate_pred != 0); + const Node min = SignedOperation(OperationCode::IMin, is_signed, NO_PRECISE, op_a, op_b); + const Node max = SignedOperation(OperationCode::IMax, is_signed, NO_PRECISE, op_a, op_b); + const Node value = Operation(OperationCode::Select, NO_PRECISE, condition, min, max); + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::LEA_R2: + case OpCode::Id::LEA_R1: + case OpCode::Id::LEA_IMM: + case OpCode::Id::LEA_RZ: + case OpCode::Id::LEA_HI: { + const auto [op_a, op_b, op_c] = [&]() -> std::tuple<Node, Node, Node> { + switch (opcode->get().GetId()) { + case OpCode::Id::LEA_R2: { + return {GetRegister(instr.gpr20), GetRegister(instr.gpr39), + Immediate(static_cast<u32>(instr.lea.r2.entry_a))}; + } + + case OpCode::Id::LEA_R1: { + const bool neg = instr.lea.r1.neg != 0; + return {GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true), + GetRegister(instr.gpr20), + Immediate(static_cast<u32>(instr.lea.r1.entry_a))}; + } + + case OpCode::Id::LEA_IMM: { + const bool neg = instr.lea.imm.neg != 0; + return {Immediate(static_cast<u32>(instr.lea.imm.entry_a)), + GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true), + Immediate(static_cast<u32>(instr.lea.imm.entry_b))}; + } + + case OpCode::Id::LEA_RZ: { + const bool neg = instr.lea.rz.neg != 0; + return {GetConstBuffer(instr.lea.rz.cb_index, instr.lea.rz.cb_offset), + GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true), + Immediate(static_cast<u32>(instr.lea.rz.entry_a))}; + } + + case OpCode::Id::LEA_HI: + default: + UNIMPLEMENTED_MSG("Unhandled LEA subinstruction: {}", opcode->get().GetName()); + + return {Immediate(static_cast<u32>(instr.lea.imm.entry_a)), GetRegister(instr.gpr8), + Immediate(static_cast<u32>(instr.lea.imm.entry_b))}; + } + }(); + + UNIMPLEMENTED_IF_MSG(instr.lea.pred48 != static_cast<u64>(Pred::UnusedIndex), + "Unhandled LEA Predicate"); + + const Node shifted_c = + Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, Immediate(1), op_c); + const Node mul_bc = Operation(OperationCode::IMul, NO_PRECISE, op_b, shifted_c); + const Node value = Operation(OperationCode::IAdd, NO_PRECISE, op_a, mul_bc); + + SetRegister(bb, instr.gpr0, value); + + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled ArithmeticInteger instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +void ShaderIR::WriteLop3Instruction(BasicBlock& bb, Register dest, Node op_a, Node op_b, Node op_c, + Node imm_lut, bool sets_cc) { + constexpr u32 lop_iterations = 32; + const Node one = Immediate(1); + const Node two = Immediate(2); + + Node value{}; + for (u32 i = 0; i < lop_iterations; ++i) { + const Node shift_amount = Immediate(i); + + const Node a = Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, op_c, shift_amount); + const Node pack_0 = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, a, one); + + const Node b = Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, op_b, shift_amount); + const Node c = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, b, one); + const Node pack_1 = Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, c, one); + + const Node d = Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, op_a, shift_amount); + const Node e = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, d, one); + const Node pack_2 = Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, e, two); + + const Node pack_01 = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, pack_0, pack_1); + const Node pack_012 = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, pack_01, pack_2); + + const Node shifted_bit = + Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, imm_lut, pack_012); + const Node bit = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, shifted_bit, one); + + const Node right = + Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, bit, shift_amount); + + if (i > 0) { + value = Operation(OperationCode::IBitwiseOr, NO_PRECISE, value, right); + } else { + value = right; + } + } + + SetInternalFlagsFromInteger(bb, value, sets_cc); + SetRegister(bb, dest, value); +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic_integer_immediate.cpp b/src/video_core/shader/decode/arithmetic_integer_immediate.cpp new file mode 100644 index 000000000..b26a6e473 --- /dev/null +++ b/src/video_core/shader/decode/arithmetic_integer_immediate.cpp @@ -0,0 +1,96 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::LogicOperation; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; +using Tegra::Shader::PredicateResultMode; +using Tegra::Shader::Register; + +u32 ShaderIR::DecodeArithmeticIntegerImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + Node op_a = GetRegister(instr.gpr8); + Node op_b = Immediate(static_cast<s32>(instr.alu.imm20_32)); + + switch (opcode->get().GetId()) { + case OpCode::Id::IADD32I: { + UNIMPLEMENTED_IF_MSG(instr.iadd32i.saturate, "IADD32I saturation is not implemented"); + + op_a = GetOperandAbsNegInteger(op_a, false, instr.iadd32i.negate_a, true); + + const Node value = Operation(OperationCode::IAdd, PRECISE, op_a, op_b); + + SetInternalFlagsFromInteger(bb, value, instr.op_32.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::LOP32I: { + if (instr.alu.lop32i.invert_a) + op_a = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_a); + + if (instr.alu.lop32i.invert_b) + op_b = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_b); + + WriteLogicOperation(bb, instr.gpr0, instr.alu.lop32i.operation, op_a, op_b, + PredicateResultMode::None, Pred::UnusedIndex, instr.op_32.generates_cc); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled ArithmeticIntegerImmediate instruction: {}", + opcode->get().GetName()); + } + + return pc; +} + +void ShaderIR::WriteLogicOperation(BasicBlock& bb, Register dest, LogicOperation logic_op, + Node op_a, Node op_b, PredicateResultMode predicate_mode, + Pred predicate, bool sets_cc) { + const Node result = [&]() { + switch (logic_op) { + case LogicOperation::And: + return Operation(OperationCode::IBitwiseAnd, PRECISE, op_a, op_b); + case LogicOperation::Or: + return Operation(OperationCode::IBitwiseOr, PRECISE, op_a, op_b); + case LogicOperation::Xor: + return Operation(OperationCode::IBitwiseXor, PRECISE, op_a, op_b); + case LogicOperation::PassB: + return op_b; + default: + UNIMPLEMENTED_MSG("Unimplemented logic operation={}", static_cast<u32>(logic_op)); + return Immediate(0); + } + }(); + + SetInternalFlagsFromInteger(bb, result, sets_cc); + SetRegister(bb, dest, result); + + // Write the predicate value depending on the predicate mode. + switch (predicate_mode) { + case PredicateResultMode::None: + // Do nothing. + return; + case PredicateResultMode::NotZero: { + // Set the predicate to true if the result is not zero. + const Node compare = Operation(OperationCode::LogicalINotEqual, result, Immediate(0)); + SetPredicate(bb, static_cast<u64>(predicate), compare); + break; + } + default: + UNIMPLEMENTED_MSG("Unimplemented predicate result mode: {}", + static_cast<u32>(predicate_mode)); + } +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/bfe.cpp b/src/video_core/shader/decode/bfe.cpp new file mode 100644 index 000000000..0734141b0 --- /dev/null +++ b/src/video_core/shader/decode/bfe.cpp @@ -0,0 +1,49 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeBfe(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF(instr.bfe.negate_b); + + Node op_a = GetRegister(instr.gpr8); + op_a = GetOperandAbsNegInteger(op_a, false, instr.bfe.negate_a, false); + + switch (opcode->get().GetId()) { + case OpCode::Id::BFE_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in BFE is not implemented"); + + const Node inner_shift_imm = Immediate(static_cast<u32>(instr.bfe.GetLeftShiftValue())); + const Node outer_shift_imm = + Immediate(static_cast<u32>(instr.bfe.GetLeftShiftValue() + instr.bfe.shift_position)); + + const Node inner_shift = + Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, op_a, inner_shift_imm); + const Node outer_shift = + Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, inner_shift, outer_shift_imm); + + SetInternalFlagsFromInteger(bb, outer_shift, instr.generates_cc); + SetRegister(bb, instr.gpr0, outer_shift); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled BFE instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/bfi.cpp b/src/video_core/shader/decode/bfi.cpp new file mode 100644 index 000000000..942d6729d --- /dev/null +++ b/src/video_core/shader/decode/bfi.cpp @@ -0,0 +1,41 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeBfi(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const auto [base, packed_shift] = [&]() -> std::tuple<Node, Node> { + switch (opcode->get().GetId()) { + case OpCode::Id::BFI_IMM_R: + return {GetRegister(instr.gpr39), Immediate(instr.alu.GetSignedImm20_20())}; + default: + UNREACHABLE(); + return {Immediate(0), Immediate(0)}; + } + }(); + const Node insert = GetRegister(instr.gpr8); + const Node offset = BitfieldExtract(packed_shift, 0, 8); + const Node bits = BitfieldExtract(packed_shift, 8, 8); + + const Node value = + Operation(OperationCode::UBitfieldInsert, PRECISE, base, insert, offset, bits); + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/conversion.cpp b/src/video_core/shader/decode/conversion.cpp new file mode 100644 index 000000000..728a393a1 --- /dev/null +++ b/src/video_core/shader/decode/conversion.cpp @@ -0,0 +1,149 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Register; + +u32 ShaderIR::DecodeConversion(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::I2I_R: { + UNIMPLEMENTED_IF(instr.conversion.selector); + + const bool input_signed = instr.conversion.is_input_signed; + const bool output_signed = instr.conversion.is_output_signed; + + Node value = GetRegister(instr.gpr20); + value = ConvertIntegerSize(value, instr.conversion.src_size, input_signed); + + value = GetOperandAbsNegInteger(value, instr.conversion.abs_a, instr.conversion.negate_a, + input_signed); + if (input_signed != output_signed) { + value = SignedOperation(OperationCode::ICastUnsigned, output_signed, NO_PRECISE, value); + } + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::I2F_R: + case OpCode::Id::I2F_C: { + UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word); + UNIMPLEMENTED_IF(instr.conversion.selector); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in I2F is not implemented"); + + Node value = [&]() { + if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + } + }(); + const bool input_signed = instr.conversion.is_input_signed; + value = ConvertIntegerSize(value, instr.conversion.src_size, input_signed); + value = GetOperandAbsNegInteger(value, instr.conversion.abs_a, false, input_signed); + value = SignedOperation(OperationCode::FCastInteger, input_signed, PRECISE, value); + value = GetOperandAbsNegFloat(value, false, instr.conversion.negate_a); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::F2F_R: + case OpCode::Id::F2F_C: { + UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word); + UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in F2F is not implemented"); + + Node value = [&]() { + if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + } + }(); + + value = GetOperandAbsNegFloat(value, instr.conversion.abs_a, instr.conversion.negate_a); + + value = [&]() { + switch (instr.conversion.f2f.rounding) { + case Tegra::Shader::F2fRoundingOp::None: + return value; + case Tegra::Shader::F2fRoundingOp::Round: + return Operation(OperationCode::FRoundEven, PRECISE, value); + case Tegra::Shader::F2fRoundingOp::Floor: + return Operation(OperationCode::FFloor, PRECISE, value); + case Tegra::Shader::F2fRoundingOp::Ceil: + return Operation(OperationCode::FCeil, PRECISE, value); + case Tegra::Shader::F2fRoundingOp::Trunc: + return Operation(OperationCode::FTrunc, PRECISE, value); + } + UNIMPLEMENTED_MSG("Unimplemented F2F rounding mode {}", + static_cast<u32>(instr.conversion.f2f.rounding.Value())); + return Immediate(0); + }(); + value = GetSaturatedFloat(value, instr.alu.saturate_d); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::F2I_R: + case OpCode::Id::F2I_C: { + UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in F2I is not implemented"); + Node value = [&]() { + if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + } + }(); + + value = GetOperandAbsNegFloat(value, instr.conversion.abs_a, instr.conversion.negate_a); + + value = [&]() { + switch (instr.conversion.f2i.rounding) { + case Tegra::Shader::F2iRoundingOp::None: + return value; + case Tegra::Shader::F2iRoundingOp::Floor: + return Operation(OperationCode::FFloor, PRECISE, value); + case Tegra::Shader::F2iRoundingOp::Ceil: + return Operation(OperationCode::FCeil, PRECISE, value); + case Tegra::Shader::F2iRoundingOp::Trunc: + return Operation(OperationCode::FTrunc, PRECISE, value); + default: + UNIMPLEMENTED_MSG("Unimplemented F2I rounding mode {}", + static_cast<u32>(instr.conversion.f2i.rounding.Value())); + return Immediate(0); + } + }(); + const bool is_signed = instr.conversion.is_output_signed; + value = SignedOperation(OperationCode::ICastFloat, is_signed, PRECISE, value); + value = ConvertIntegerSize(value, instr.conversion.dest_size, is_signed); + + SetRegister(bb, instr.gpr0, value); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled conversion instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/decode_integer_set.cpp b/src/video_core/shader/decode/decode_integer_set.cpp new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/src/video_core/shader/decode/decode_integer_set.cpp diff --git a/src/video_core/shader/decode/ffma.cpp b/src/video_core/shader/decode/ffma.cpp new file mode 100644 index 000000000..52f39d3ff --- /dev/null +++ b/src/video_core/shader/decode/ffma.cpp @@ -0,0 +1,59 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeFfma(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF_MSG(instr.ffma.cc != 0, "FFMA cc not implemented"); + UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_0 != 1, "FFMA tab5980_0({}) not implemented", + instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO + UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_1 != 0, "FFMA tab5980_1({}) not implemented", + instr.ffma.tab5980_1.Value()); + + const Node op_a = GetRegister(instr.gpr8); + + auto [op_b, op_c] = [&]() -> std::tuple<Node, Node> { + switch (opcode->get().GetId()) { + case OpCode::Id::FFMA_CR: { + return {GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()), + GetRegister(instr.gpr39)}; + } + case OpCode::Id::FFMA_RR: + return {GetRegister(instr.gpr20), GetRegister(instr.gpr39)}; + case OpCode::Id::FFMA_RC: { + return {GetRegister(instr.gpr39), + GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset())}; + } + case OpCode::Id::FFMA_IMM: + return {GetImmediate19(instr), GetRegister(instr.gpr39)}; + default: + UNIMPLEMENTED_MSG("Unhandled FFMA instruction: {}", opcode->get().GetName()); + return {Immediate(0), Immediate(0)}; + } + }(); + + op_b = GetOperandAbsNegFloat(op_b, false, instr.ffma.negate_b); + op_c = GetOperandAbsNegFloat(op_c, false, instr.ffma.negate_c); + + Node value = Operation(OperationCode::FFma, PRECISE, op_a, op_b, op_c); + value = GetSaturatedFloat(value, instr.alu.saturate_d); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/float_set.cpp b/src/video_core/shader/decode/float_set.cpp new file mode 100644 index 000000000..9f9da2278 --- /dev/null +++ b/src/video_core/shader/decode/float_set.cpp @@ -0,0 +1,58 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeFloatSet(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fset.abs_a != 0, + instr.fset.neg_a != 0); + + Node op_b = [&]() { + if (instr.is_b_imm) { + return GetImmediate19(instr); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + } + }(); + + op_b = GetOperandAbsNegFloat(op_b, instr.fset.abs_b != 0, instr.fset.neg_b != 0); + + // The fset instruction sets a register to 1.0 or -1 (depending on the bf bit) if the + // condition is true, and to 0 otherwise. + const Node second_pred = GetPredicate(instr.fset.pred39, instr.fset.neg_pred != 0); + + const OperationCode combiner = GetPredicateCombiner(instr.fset.op); + const Node first_pred = GetPredicateComparisonFloat(instr.fset.cond, op_a, op_b); + + const Node predicate = Operation(combiner, first_pred, second_pred); + + const Node true_value = instr.fset.bf ? Immediate(1.0f) : Immediate(-1); + const Node false_value = instr.fset.bf ? Immediate(0.0f) : Immediate(0); + const Node value = + Operation(OperationCode::Select, PRECISE, predicate, true_value, false_value); + + if (instr.fset.bf) { + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + } else { + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + } + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/float_set_predicate.cpp b/src/video_core/shader/decode/float_set_predicate.cpp new file mode 100644 index 000000000..dd3aef6f2 --- /dev/null +++ b/src/video_core/shader/decode/float_set_predicate.cpp @@ -0,0 +1,56 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; + +u32 ShaderIR::DecodeFloatSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fsetp.abs_a != 0, + instr.fsetp.neg_a != 0); + Node op_b = [&]() { + if (instr.is_b_imm) { + return GetImmediate19(instr); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + } + }(); + op_b = GetOperandAbsNegFloat(op_b, instr.fsetp.abs_b, false); + + // We can't use the constant predicate as destination. + ASSERT(instr.fsetp.pred3 != static_cast<u64>(Pred::UnusedIndex)); + + const Node predicate = GetPredicateComparisonFloat(instr.fsetp.cond, op_a, op_b); + const Node second_pred = GetPredicate(instr.fsetp.pred39, instr.fsetp.neg_pred != 0); + + const OperationCode combiner = GetPredicateCombiner(instr.fsetp.op); + const Node value = Operation(combiner, predicate, second_pred); + + // Set the primary predicate to the result of Predicate OP SecondPredicate + SetPredicate(bb, instr.fsetp.pred3, value); + + if (instr.fsetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, + // if enabled + const Node negated_pred = Operation(OperationCode::LogicalNegate, predicate); + const Node second_value = Operation(combiner, negated_pred, second_pred); + SetPredicate(bb, instr.fsetp.pred0, second_value); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/half_set.cpp b/src/video_core/shader/decode/half_set.cpp new file mode 100644 index 000000000..dfd7cb98f --- /dev/null +++ b/src/video_core/shader/decode/half_set.cpp @@ -0,0 +1,67 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <array> + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeHalfSet(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF(instr.hset2.ftz != 0); + + // instr.hset2.type_a + // instr.hset2.type_b + Node op_a = GetRegister(instr.gpr8); + Node op_b = [&]() { + switch (opcode->get().GetId()) { + case OpCode::Id::HSET2_R: + return GetRegister(instr.gpr20); + default: + UNREACHABLE(); + return Immediate(0); + } + }(); + + op_a = GetOperandAbsNegHalf(op_a, instr.hset2.abs_a, instr.hset2.negate_a); + op_b = GetOperandAbsNegHalf(op_b, instr.hset2.abs_b, instr.hset2.negate_b); + + const Node second_pred = GetPredicate(instr.hset2.pred39, instr.hset2.neg_pred); + + MetaHalfArithmetic meta{false, {instr.hset2.type_a, instr.hset2.type_b}}; + const Node comparison_pair = GetPredicateComparisonHalf(instr.hset2.cond, meta, op_a, op_b); + + const OperationCode combiner = GetPredicateCombiner(instr.hset2.op); + + // HSET2 operates on each half float in the pack. + std::array<Node, 2> values; + for (u32 i = 0; i < 2; ++i) { + const u32 raw_value = instr.hset2.bf ? 0x3c00 : 0xffff; + const Node true_value = Immediate(raw_value << (i * 16)); + const Node false_value = Immediate(0); + + const Node comparison = + Operation(OperationCode::LogicalPick2, comparison_pair, Immediate(i)); + const Node predicate = Operation(combiner, comparison, second_pred); + + values[i] = + Operation(OperationCode::Select, NO_PRECISE, predicate, true_value, false_value); + } + + const Node value = Operation(OperationCode::UBitwiseOr, NO_PRECISE, values[0], values[1]); + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/half_set_predicate.cpp b/src/video_core/shader/decode/half_set_predicate.cpp new file mode 100644 index 000000000..53c44ae5a --- /dev/null +++ b/src/video_core/shader/decode/half_set_predicate.cpp @@ -0,0 +1,62 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; + +u32 ShaderIR::DecodeHalfSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF(instr.hsetp2.ftz != 0); + + Node op_a = GetRegister(instr.gpr8); + op_a = GetOperandAbsNegHalf(op_a, instr.hsetp2.abs_a, instr.hsetp2.negate_a); + + const Node op_b = [&]() { + switch (opcode->get().GetId()) { + case OpCode::Id::HSETP2_R: + return GetOperandAbsNegHalf(GetRegister(instr.gpr20), instr.hsetp2.abs_a, + instr.hsetp2.negate_b); + default: + UNREACHABLE(); + return Immediate(0); + } + }(); + + // We can't use the constant predicate as destination. + ASSERT(instr.hsetp2.pred3 != static_cast<u64>(Pred::UnusedIndex)); + + const Node second_pred = GetPredicate(instr.hsetp2.pred39, instr.hsetp2.neg_pred != 0); + + const OperationCode combiner = GetPredicateCombiner(instr.hsetp2.op); + const OperationCode pair_combiner = + instr.hsetp2.h_and ? OperationCode::LogicalAll2 : OperationCode::LogicalAny2; + + MetaHalfArithmetic meta = {false, {instr.hsetp2.type_a, instr.hsetp2.type_b}}; + const Node comparison = GetPredicateComparisonHalf(instr.hsetp2.cond, meta, op_a, op_b); + const Node first_pred = Operation(pair_combiner, comparison); + + // Set the primary predicate to the result of Predicate OP SecondPredicate + const Node value = Operation(combiner, first_pred, second_pred); + SetPredicate(bb, instr.hsetp2.pred3, value); + + if (instr.hsetp2.pred0 != static_cast<u64>(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, if enabled + const Node negated_pred = Operation(OperationCode::LogicalNegate, first_pred); + SetPredicate(bb, instr.hsetp2.pred0, Operation(combiner, negated_pred, second_pred)); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/hfma2.cpp b/src/video_core/shader/decode/hfma2.cpp new file mode 100644 index 000000000..43a0a9e10 --- /dev/null +++ b/src/video_core/shader/decode/hfma2.cpp @@ -0,0 +1,77 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <tuple> + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::HalfPrecision; +using Tegra::Shader::HalfType; +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeHfma2(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + if (opcode->get().GetId() == OpCode::Id::HFMA2_RR) { + UNIMPLEMENTED_IF(instr.hfma2.rr.precision != HalfPrecision::None); + } else { + UNIMPLEMENTED_IF(instr.hfma2.precision != HalfPrecision::None); + } + + constexpr auto identity = HalfType::H0_H1; + + const HalfType type_a = instr.hfma2.type_a; + const Node op_a = GetRegister(instr.gpr8); + + bool neg_b{}, neg_c{}; + auto [saturate, type_b, op_b, type_c, + op_c] = [&]() -> std::tuple<bool, HalfType, Node, HalfType, Node> { + switch (opcode->get().GetId()) { + case OpCode::Id::HFMA2_CR: + neg_b = instr.hfma2.negate_b; + neg_c = instr.hfma2.negate_c; + return {instr.hfma2.saturate, instr.hfma2.type_b, + GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()), + instr.hfma2.type_reg39, GetRegister(instr.gpr39)}; + case OpCode::Id::HFMA2_RC: + neg_b = instr.hfma2.negate_b; + neg_c = instr.hfma2.negate_c; + return {instr.hfma2.saturate, instr.hfma2.type_reg39, GetRegister(instr.gpr39), + instr.hfma2.type_b, + GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset())}; + case OpCode::Id::HFMA2_RR: + neg_b = instr.hfma2.rr.negate_b; + neg_c = instr.hfma2.rr.negate_c; + return {instr.hfma2.rr.saturate, instr.hfma2.type_b, GetRegister(instr.gpr20), + instr.hfma2.rr.type_c, GetRegister(instr.gpr39)}; + case OpCode::Id::HFMA2_IMM_R: + neg_c = instr.hfma2.negate_c; + return {instr.hfma2.saturate, identity, UnpackHalfImmediate(instr, true), + instr.hfma2.type_reg39, GetRegister(instr.gpr39)}; + default: + return {false, identity, Immediate(0), identity, Immediate(0)}; + } + }(); + UNIMPLEMENTED_IF_MSG(saturate, "HFMA2 saturation is not implemented"); + + op_b = GetOperandAbsNegHalf(op_b, false, neg_b); + op_c = GetOperandAbsNegHalf(op_c, false, neg_c); + + MetaHalfArithmetic meta{true, {type_a, type_b, type_c}}; + Node value = Operation(OperationCode::HFma, meta, op_a, op_b, op_c); + value = HalfMerge(GetRegister(instr.gpr0), value, instr.hfma2.merge); + + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/integer_set.cpp b/src/video_core/shader/decode/integer_set.cpp new file mode 100644 index 000000000..16eb3985f --- /dev/null +++ b/src/video_core/shader/decode/integer_set.cpp @@ -0,0 +1,50 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeIntegerSet(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = GetRegister(instr.gpr8); + const Node op_b = [&]() { + if (instr.is_b_imm) { + return Immediate(instr.alu.GetSignedImm20_20()); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + } + }(); + + // The iset instruction sets a register to 1.0 or -1 (depending on the bf bit) if the condition + // is true, and to 0 otherwise. + const Node second_pred = GetPredicate(instr.iset.pred39, instr.iset.neg_pred != 0); + const Node first_pred = + GetPredicateComparisonInteger(instr.iset.cond, instr.iset.is_signed, op_a, op_b); + + const OperationCode combiner = GetPredicateCombiner(instr.iset.op); + + const Node predicate = Operation(combiner, first_pred, second_pred); + + const Node true_value = instr.iset.bf ? Immediate(1.0f) : Immediate(-1); + const Node false_value = instr.iset.bf ? Immediate(0.0f) : Immediate(0); + const Node value = + Operation(OperationCode::Select, PRECISE, predicate, true_value, false_value); + + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/integer_set_predicate.cpp b/src/video_core/shader/decode/integer_set_predicate.cpp new file mode 100644 index 000000000..daf97174b --- /dev/null +++ b/src/video_core/shader/decode/integer_set_predicate.cpp @@ -0,0 +1,53 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; + +u32 ShaderIR::DecodeIntegerSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = GetRegister(instr.gpr8); + + const Node op_b = [&]() { + if (instr.is_b_imm) { + return Immediate(instr.alu.GetSignedImm20_20()); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + } + }(); + + // We can't use the constant predicate as destination. + ASSERT(instr.isetp.pred3 != static_cast<u64>(Pred::UnusedIndex)); + + const Node second_pred = GetPredicate(instr.isetp.pred39, instr.isetp.neg_pred != 0); + const Node predicate = + GetPredicateComparisonInteger(instr.isetp.cond, instr.isetp.is_signed, op_a, op_b); + + // Set the primary predicate to the result of Predicate OP SecondPredicate + const OperationCode combiner = GetPredicateCombiner(instr.isetp.op); + const Node value = Operation(combiner, predicate, second_pred); + SetPredicate(bb, instr.isetp.pred3, value); + + if (instr.isetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, if enabled + const Node negated_pred = Operation(OperationCode::LogicalNegate, predicate); + SetPredicate(bb, instr.isetp.pred0, Operation(combiner, negated_pred, second_pred)); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp new file mode 100644 index 000000000..3dd26da20 --- /dev/null +++ b/src/video_core/shader/decode/memory.cpp @@ -0,0 +1,771 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <vector> +#include <fmt/format.h> + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Attribute; +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Register; +using Tegra::Shader::TextureMiscMode; +using Tegra::Shader::TextureProcessMode; +using Tegra::Shader::TextureType; + +static std::size_t GetCoordCount(TextureType texture_type) { + switch (texture_type) { + case TextureType::Texture1D: + return 1; + case TextureType::Texture2D: + return 2; + case TextureType::Texture3D: + case TextureType::TextureCube: + return 3; + default: + UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type)); + return 0; + } +} + +u32 ShaderIR::DecodeMemory(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::LD_A: { + // Note: Shouldn't this be interp mode flat? As in no interpolation made. + UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex, + "Indirect attribute loads are not supported"); + UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, + "Unaligned attribute loads are not supported"); + + Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Perspective, + Tegra::Shader::IpaSampleMode::Default}; + + u64 next_element = instr.attribute.fmt20.element; + auto next_index = static_cast<u64>(instr.attribute.fmt20.index.Value()); + + const auto LoadNextElement = [&](u32 reg_offset) { + const Node buffer = GetRegister(instr.gpr39); + const Node attribute = GetInputAttribute(static_cast<Attribute::Index>(next_index), + next_element, input_mode, buffer); + + SetRegister(bb, instr.gpr0.Value() + reg_offset, attribute); + + // Load the next attribute element into the following register. If the element + // to load goes beyond the vec4 size, load the first element of the next + // attribute. + next_element = (next_element + 1) % 4; + next_index = next_index + (next_element == 0 ? 1 : 0); + }; + + const u32 num_words = static_cast<u32>(instr.attribute.fmt20.size.Value()) + 1; + for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { + LoadNextElement(reg_offset); + } + break; + } + case OpCode::Id::LD_C: { + UNIMPLEMENTED_IF(instr.ld_c.unknown != 0); + + Node index = GetRegister(instr.gpr8); + + const Node op_a = + GetConstBufferIndirect(instr.cbuf36.index, instr.cbuf36.GetOffset() + 0, index); + + switch (instr.ld_c.type.Value()) { + case Tegra::Shader::UniformType::Single: + SetRegister(bb, instr.gpr0, op_a); + break; + + case Tegra::Shader::UniformType::Double: { + const Node op_b = + GetConstBufferIndirect(instr.cbuf36.index, instr.cbuf36.GetOffset() + 4, index); + + SetTemporal(bb, 0, op_a); + SetTemporal(bb, 1, op_b); + SetRegister(bb, instr.gpr0, GetTemporal(0)); + SetRegister(bb, instr.gpr0.Value() + 1, GetTemporal(1)); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled type: {}", static_cast<unsigned>(instr.ld_c.type.Value())); + } + break; + } + case OpCode::Id::LD_L: { + UNIMPLEMENTED_IF_MSG(instr.ld_l.unknown == 1, "LD_L Unhandled mode: {}", + static_cast<u32>(instr.ld_l.unknown.Value())); + + const auto GetLmem = [&](s32 offset) { + ASSERT(offset % 4 == 0); + const Node immediate_offset = Immediate(static_cast<s32>(instr.smem_imm) + offset); + const Node address = Operation(OperationCode::IAdd, NO_PRECISE, GetRegister(instr.gpr8), + immediate_offset); + return GetLocalMemory(address); + }; + + switch (instr.ldst_sl.type.Value()) { + case Tegra::Shader::StoreType::Bits32: + case Tegra::Shader::StoreType::Bits64: + case Tegra::Shader::StoreType::Bits128: { + const u32 count = [&]() { + switch (instr.ldst_sl.type.Value()) { + case Tegra::Shader::StoreType::Bits32: + return 1; + case Tegra::Shader::StoreType::Bits64: + return 2; + case Tegra::Shader::StoreType::Bits128: + return 4; + default: + UNREACHABLE(); + return 0; + } + }(); + for (u32 i = 0; i < count; ++i) + SetTemporal(bb, i, GetLmem(i * 4)); + for (u32 i = 0; i < count; ++i) + SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + break; + } + default: + UNIMPLEMENTED_MSG("LD_L Unhandled type: {}", + static_cast<u32>(instr.ldst_sl.type.Value())); + } + break; + } + case OpCode::Id::LDG: { + const u32 count = [&]() { + switch (instr.ldg.type) { + case Tegra::Shader::UniformType::Single: + return 1; + case Tegra::Shader::UniformType::Double: + return 2; + case Tegra::Shader::UniformType::Quad: + case Tegra::Shader::UniformType::UnsignedQuad: + return 4; + default: + UNIMPLEMENTED_MSG("Unimplemented LDG size!"); + return 1; + } + }(); + + const Node addr_register = GetRegister(instr.gpr8); + const Node base_address = TrackCbuf(addr_register, code, static_cast<s64>(code.size())); + const auto cbuf = std::get_if<CbufNode>(base_address); + ASSERT(cbuf != nullptr); + const auto cbuf_offset_imm = std::get_if<ImmediateNode>(cbuf->GetOffset()); + ASSERT(cbuf_offset_imm != nullptr); + const auto cbuf_offset = cbuf_offset_imm->GetValue(); + + bb.push_back(Comment( + fmt::format("Base address is c[0x{:x}][0x{:x}]", cbuf->GetIndex(), cbuf_offset))); + + const GlobalMemoryBase descriptor{cbuf->GetIndex(), cbuf_offset}; + used_global_memory_bases.insert(descriptor); + + const Node immediate_offset = + Immediate(static_cast<u32>(instr.ldg.immediate_offset.Value())); + const Node base_real_address = + Operation(OperationCode::UAdd, NO_PRECISE, immediate_offset, addr_register); + + for (u32 i = 0; i < count; ++i) { + const Node it_offset = Immediate(i * 4); + const Node real_address = + Operation(OperationCode::UAdd, NO_PRECISE, base_real_address, it_offset); + const Node gmem = StoreNode(GmemNode(real_address, base_address, descriptor)); + + SetTemporal(bb, i, gmem); + } + for (u32 i = 0; i < count; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + } + break; + } + case OpCode::Id::ST_A: { + UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex, + "Indirect attribute loads are not supported"); + UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, + "Unaligned attribute loads are not supported"); + + u64 next_element = instr.attribute.fmt20.element; + auto next_index = static_cast<u64>(instr.attribute.fmt20.index.Value()); + + const auto StoreNextElement = [&](u32 reg_offset) { + const auto dest = GetOutputAttribute(static_cast<Attribute::Index>(next_index), + next_element, GetRegister(instr.gpr39)); + const auto src = GetRegister(instr.gpr0.Value() + reg_offset); + + bb.push_back(Operation(OperationCode::Assign, dest, src)); + + // Load the next attribute element into the following register. If the element + // to load goes beyond the vec4 size, load the first element of the next + // attribute. + next_element = (next_element + 1) % 4; + next_index = next_index + (next_element == 0 ? 1 : 0); + }; + + const u32 num_words = static_cast<u32>(instr.attribute.fmt20.size.Value()) + 1; + for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { + StoreNextElement(reg_offset); + } + + break; + } + case OpCode::Id::ST_L: { + UNIMPLEMENTED_IF_MSG(instr.st_l.unknown == 0, "ST_L Unhandled mode: {}", + static_cast<u32>(instr.st_l.unknown.Value())); + + const auto GetLmemAddr = [&](s32 offset) { + ASSERT(offset % 4 == 0); + const Node immediate = Immediate(static_cast<s32>(instr.smem_imm) + offset); + return Operation(OperationCode::IAdd, NO_PRECISE, GetRegister(instr.gpr8), immediate); + }; + + switch (instr.ldst_sl.type.Value()) { + case Tegra::Shader::StoreType::Bits128: + SetLocalMemory(bb, GetLmemAddr(12), GetRegister(instr.gpr0.Value() + 3)); + SetLocalMemory(bb, GetLmemAddr(8), GetRegister(instr.gpr0.Value() + 2)); + case Tegra::Shader::StoreType::Bits64: + SetLocalMemory(bb, GetLmemAddr(4), GetRegister(instr.gpr0.Value() + 1)); + case Tegra::Shader::StoreType::Bits32: + SetLocalMemory(bb, GetLmemAddr(0), GetRegister(instr.gpr0)); + break; + default: + UNIMPLEMENTED_MSG("ST_L Unhandled type: {}", + static_cast<u32>(instr.ldst_sl.type.Value())); + } + break; + } + case OpCode::Id::TEX: { + UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + + if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete"); + } + + const TextureType texture_type{instr.tex.texture_type}; + const bool is_array = instr.tex.array != 0; + const bool depth_compare = instr.tex.UsesMiscMode(TextureMiscMode::DC); + const auto process_mode = instr.tex.GetTextureProcessMode(); + WriteTexInstructionFloat( + bb, instr, GetTexCode(instr, texture_type, process_mode, depth_compare, is_array)); + break; + } + case OpCode::Id::TEXS: { + const TextureType texture_type{instr.texs.GetTextureType()}; + const bool is_array{instr.texs.IsArrayTexture()}; + const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC); + const auto process_mode = instr.texs.GetTextureProcessMode(); + + if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete"); + } + + const Node4 components = + GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array); + + if (instr.texs.fp32_flag) { + WriteTexsInstructionFloat(bb, instr, components); + } else { + WriteTexsInstructionHalfFloat(bb, instr, components); + } + break; + } + case OpCode::Id::TLD4: { + ASSERT(instr.tld4.array == 0); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV), + "NDV is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP), + "PTP is not implemented"); + + if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete"); + } + + const auto texture_type = instr.tld4.texture_type.Value(); + const bool depth_compare = instr.tld4.UsesMiscMode(TextureMiscMode::DC); + const bool is_array = instr.tld4.array != 0; + WriteTexInstructionFloat(bb, instr, + GetTld4Code(instr, texture_type, depth_compare, is_array)); + break; + } + case OpCode::Id::TLD4S: { + UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + + if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete"); + } + + const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); + const Node op_a = GetRegister(instr.gpr8); + const Node op_b = GetRegister(instr.gpr20); + + std::vector<Node> coords; + + // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. + if (depth_compare) { + // Note: TLD4S coordinate encoding works just like TEXS's + const Node op_y = GetRegister(instr.gpr8.Value() + 1); + coords.push_back(op_a); + coords.push_back(op_y); + coords.push_back(op_b); + } else { + coords.push_back(op_a); + coords.push_back(op_b); + } + const auto num_coords = static_cast<u32>(coords.size()); + coords.push_back(Immediate(static_cast<u32>(instr.tld4s.component))); + + const auto& sampler = + GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare); + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto params = coords; + MetaTexture meta{sampler, element, num_coords}; + values[element] = + Operation(OperationCode::F4TextureGather, std::move(meta), std::move(params)); + } + + WriteTexsInstructionFloat(bb, instr, values); + break; + } + case OpCode::Id::TXQ: { + if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete"); + } + + // TODO: The new commits on the texture refactor, change the way samplers work. + // Sadly, not all texture instructions specify the type of texture their sampler + // uses. This must be fixed at a later instance. + const auto& sampler = + GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false); + + u32 indexer = 0; + switch (instr.txq.query_type) { + case Tegra::Shader::TextureQueryType::Dimension: { + for (u32 element = 0; element < 4; ++element) { + if (instr.txq.IsComponentEnabled(element)) { + MetaTexture meta{sampler, element}; + const Node value = Operation(OperationCode::F4TextureQueryDimensions, + std::move(meta), GetRegister(instr.gpr8)); + SetTemporal(bb, indexer++, value); + } + } + for (u32 i = 0; i < indexer; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + } + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled texture query type: {}", + static_cast<u32>(instr.txq.query_type.Value())); + } + break; + } + case OpCode::Id::TMML: { + UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), + "NDV is not implemented"); + + if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete"); + } + + auto texture_type = instr.tmml.texture_type.Value(); + const bool is_array = instr.tmml.array != 0; + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); + + std::vector<Node> coords; + + // TODO: Add coordinates for different samplers once other texture types are implemented. + switch (texture_type) { + case TextureType::Texture1D: + coords.push_back(GetRegister(instr.gpr8)); + break; + case TextureType::Texture2D: + coords.push_back(GetRegister(instr.gpr8.Value() + 0)); + coords.push_back(GetRegister(instr.gpr8.Value() + 1)); + break; + default: + UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); + + // Fallback to interpreting as a 2D texture for now + coords.push_back(GetRegister(instr.gpr8.Value() + 0)); + coords.push_back(GetRegister(instr.gpr8.Value() + 1)); + texture_type = TextureType::Texture2D; + } + + for (u32 element = 0; element < 2; ++element) { + auto params = coords; + MetaTexture meta_texture{sampler, element, static_cast<u32>(coords.size())}; + const Node value = + Operation(OperationCode::F4TextureQueryLod, meta_texture, std::move(params)); + SetTemporal(bb, element, value); + } + for (u32 element = 0; element < 2; ++element) { + SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element)); + } + + break; + } + case OpCode::Id::TLDS: { + const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()}; + const bool is_array{instr.tlds.IsArrayTexture()}; + + UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented"); + + if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete"); + } + + WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array)); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type, + bool is_array, bool is_shadow) { + const auto offset = static_cast<std::size_t>(sampler.index.Value()); + + // If this sampler has already been used, return the existing mapping. + const auto itr = + std::find_if(used_samplers.begin(), used_samplers.end(), + [&](const Sampler& entry) { return entry.GetOffset() == offset; }); + if (itr != used_samplers.end()) { + ASSERT(itr->GetType() == type && itr->IsArray() == is_array && + itr->IsShadow() == is_shadow); + return *itr; + } + + // Otherwise create a new mapping for this sampler + const std::size_t next_index = used_samplers.size(); + const Sampler entry{offset, next_index, type, is_array, is_shadow}; + return *used_samplers.emplace(entry).first; +} + +void ShaderIR::WriteTexInstructionFloat(BasicBlock& bb, Instruction instr, + const Node4& components) { + u32 dest_elem = 0; + for (u32 elem = 0; elem < 4; ++elem) { + if (!instr.tex.IsComponentEnabled(elem)) { + // Skip disabled components + continue; + } + SetTemporal(bb, dest_elem++, components[elem]); + } + // After writing values in temporals, move them to the real registers + for (u32 i = 0; i < dest_elem; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + } +} + +void ShaderIR::WriteTexsInstructionFloat(BasicBlock& bb, Instruction instr, + const Node4& components) { + // TEXS has two destination registers and a swizzle. The first two elements in the swizzle + // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1 + + u32 dest_elem = 0; + for (u32 component = 0; component < 4; ++component) { + if (!instr.texs.IsComponentEnabled(component)) + continue; + SetTemporal(bb, dest_elem++, components[component]); + } + + for (u32 i = 0; i < dest_elem; ++i) { + if (i < 2) { + // Write the first two swizzle components to gpr0 and gpr0+1 + SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i)); + } else { + ASSERT(instr.texs.HasTwoDestinations()); + // Write the rest of the swizzle components to gpr28 and gpr28+1 + SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i)); + } + } +} + +void ShaderIR::WriteTexsInstructionHalfFloat(BasicBlock& bb, Instruction instr, + const Node4& components) { + // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half + // float instruction). + + Node4 values; + u32 dest_elem = 0; + for (u32 component = 0; component < 4; ++component) { + if (!instr.texs.IsComponentEnabled(component)) + continue; + values[dest_elem++] = components[component]; + } + if (dest_elem == 0) + return; + + std::generate(values.begin() + dest_elem, values.end(), [&]() { return Immediate(0); }); + + const Node first_value = Operation(OperationCode::HPack2, values[0], values[1]); + if (dest_elem <= 2) { + SetRegister(bb, instr.gpr0, first_value); + return; + } + + SetTemporal(bb, 0, first_value); + SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3])); + + SetRegister(bb, instr.gpr0, GetTemporal(0)); + SetRegister(bb, instr.gpr28, GetTemporal(1)); +} + +Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, + TextureProcessMode process_mode, bool depth_compare, bool is_array, + std::size_t array_offset, std::size_t bias_offset, + std::vector<Node>&& coords) { + UNIMPLEMENTED_IF_MSG( + (texture_type == TextureType::Texture3D && (is_array || depth_compare)) || + (texture_type == TextureType::TextureCube && is_array && depth_compare), + "This method is not supported."); + + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); + + const bool lod_needed = process_mode == TextureProcessMode::LZ || + process_mode == TextureProcessMode::LL || + process_mode == TextureProcessMode::LLA; + + // LOD selection (either via bias or explicit textureLod) not supported in GL for + // sampler2DArrayShadow and samplerCubeArrayShadow. + const bool gl_lod_supported = + !((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && depth_compare) || + (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && depth_compare)); + + const OperationCode read_method = + lod_needed && gl_lod_supported ? OperationCode::F4TextureLod : OperationCode::F4Texture; + + UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported); + + std::optional<u32> array_offset_value; + if (is_array) + array_offset_value = static_cast<u32>(array_offset); + + const auto coords_count = static_cast<u32>(coords.size()); + + if (process_mode != TextureProcessMode::None && gl_lod_supported) { + if (process_mode == TextureProcessMode::LZ) { + coords.push_back(Immediate(0.0f)); + } else { + // If present, lod or bias are always stored in the register indexed by the gpr20 + // field with an offset depending on the usage of the other registers + coords.push_back(GetRegister(instr.gpr20.Value() + bias_offset)); + } + } + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto params = coords; + MetaTexture meta{sampler, element, coords_count, array_offset_value}; + values[element] = Operation(read_method, std::move(meta), std::move(params)); + } + + return values; +} + +Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type, + TextureProcessMode process_mode, bool depth_compare, bool is_array) { + const bool lod_bias_enabled = + (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); + + const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( + texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5); + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used + const u64 coord_register = array_register + (is_array ? 1 : 0); + + std::vector<Node> coords; + for (std::size_t i = 0; i < coord_count; ++i) { + coords.push_back(GetRegister(coord_register + i)); + } + // 1D.DC in opengl the 2nd component is ignored. + if (depth_compare && !is_array && texture_type == TextureType::Texture1D) { + coords.push_back(Immediate(0.0f)); + } + std::size_t array_offset{}; + if (is_array) { + array_offset = coords.size(); + coords.push_back(GetRegister(array_register)); + } + if (depth_compare) { + // Depth is always stored in the register signaled by gpr20 + // or in the next register if lod or bias are used + const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); + coords.push_back(GetRegister(depth_register)); + } + // Fill ignored coordinates + while (coords.size() < total_coord_count) { + coords.push_back(Immediate(0)); + } + + return GetTextureCode(instr, texture_type, process_mode, depth_compare, is_array, array_offset, + 0, std::move(coords)); +} + +Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type, + TextureProcessMode process_mode, bool depth_compare, bool is_array) { + const bool lod_bias_enabled = + (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); + + const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( + texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4); + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used + const u64 coord_register = array_register + (is_array ? 1 : 0); + const u64 last_coord_register = + (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2)) + ? static_cast<u64>(instr.gpr20.Value()) + : coord_register + 1; + + std::vector<Node> coords; + for (std::size_t i = 0; i < coord_count; ++i) { + const bool last = (i == (coord_count - 1)) && (coord_count > 1); + coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); + } + + std::size_t array_offset{}; + if (is_array) { + array_offset = coords.size(); + coords.push_back(GetRegister(array_register)); + } + if (depth_compare) { + // Depth is always stored in the register signaled by gpr20 + // or in the next register if lod or bias are used + const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); + coords.push_back(GetRegister(depth_register)); + } + // Fill ignored coordinates + while (coords.size() < total_coord_count) { + coords.push_back(Immediate(0)); + } + + return GetTextureCode(instr, texture_type, process_mode, depth_compare, is_array, array_offset, + (coord_count > 2 ? 1 : 0), std::move(coords)); +} + +Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare, + bool is_array) { + const std::size_t coord_count = GetCoordCount(texture_type); + const std::size_t total_coord_count = coord_count + (is_array ? 1 : 0); + const std::size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0); + + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used + const u64 coord_register = array_register + (is_array ? 1 : 0); + + std::vector<Node> coords; + + for (size_t i = 0; i < coord_count; ++i) { + coords.push_back(GetRegister(coord_register + i)); + } + std::optional<u32> array_offset; + if (is_array) { + array_offset = static_cast<u32>(coords.size()); + coords.push_back(GetRegister(array_register)); + } + + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto params = coords; + MetaTexture meta{sampler, element, static_cast<u32>(coords.size()), array_offset}; + values[element] = + Operation(OperationCode::F4TextureGather, std::move(meta), std::move(params)); + } + + return values; +} + +Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { + const std::size_t type_coord_count = GetCoordCount(texture_type); + const std::size_t total_coord_count = type_coord_count + (is_array ? 1 : 0); + const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; + + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // if is array gpr20 is used + const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value(); + + const u64 last_coord_register = + ((type_coord_count > 2) || (type_coord_count == 2 && !lod_enabled)) && !is_array + ? static_cast<u64>(instr.gpr20.Value()) + : coord_register + 1; + + std::vector<Node> coords; + + for (std::size_t i = 0; i < type_coord_count; ++i) { + const bool last = (i == (type_coord_count - 1)) && (type_coord_count > 1); + coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); + } + std::optional<u32> array_offset; + if (is_array) { + array_offset = static_cast<u32>(coords.size()); + coords.push_back(GetRegister(array_register)); + } + const auto coords_count = static_cast<u32>(coords.size()); + + if (lod_enabled) { + // When lod is used always is in grp20 + coords.push_back(GetRegister(instr.gpr20)); + } else { + coords.push_back(Immediate(0)); + } + + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto params = coords; + MetaTexture meta{sampler, element, coords_count, array_offset}; + values[element] = + Operation(OperationCode::F4TexelFetch, std::move(meta), std::move(params)); + } + return values; +} + +std::tuple<std::size_t, std::size_t> ShaderIR::ValidateAndGetCoordinateElement( + TextureType texture_type, bool depth_compare, bool is_array, bool lod_bias_enabled, + std::size_t max_coords, std::size_t max_inputs) { + const std::size_t coord_count = GetCoordCount(texture_type); + + std::size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0); + const std::size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0); + if (total_coord_count > max_coords || total_reg_count > max_inputs) { + UNIMPLEMENTED_MSG("Unsupported Texture operation"); + total_coord_count = std::min(total_coord_count, max_coords); + } + // 1D.DC OpenGL is using a vec3 but 2nd component is ignored later. + total_coord_count += + (depth_compare && !is_array && texture_type == TextureType::Texture1D) ? 1 : 0; + + return {coord_count, total_coord_count}; +} + +} // namespace VideoCommon::Shader diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp new file mode 100644 index 000000000..c1e5f4efb --- /dev/null +++ b/src/video_core/shader/decode/other.cpp @@ -0,0 +1,178 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::ConditionCode; +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Register; + +u32 ShaderIR::DecodeOther(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::EXIT: { + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "EXIT condition code used: {}", + static_cast<u32>(cc)); + + switch (instr.flow.cond) { + case Tegra::Shader::FlowCondition::Always: + bb.push_back(Operation(OperationCode::Exit)); + if (instr.pred.pred_index == static_cast<u64>(Tegra::Shader::Pred::UnusedIndex)) { + // If this is an unconditional exit then just end processing here, + // otherwise we have to account for the possibility of the condition + // not being met, so continue processing the next instruction. + pc = MAX_PROGRAM_LENGTH - 1; + } + break; + + case Tegra::Shader::FlowCondition::Fcsm_Tr: + // TODO(bunnei): What is this used for? If we assume this conditon is not + // satisifed, dual vertex shaders in Farming Simulator make more sense + UNIMPLEMENTED_MSG("Skipping unknown FlowCondition::Fcsm_Tr"); + break; + + default: + UNIMPLEMENTED_MSG("Unhandled flow condition: {}", + static_cast<u32>(instr.flow.cond.Value())); + } + break; + } + case OpCode::Id::KIL: { + UNIMPLEMENTED_IF(instr.flow.cond != Tegra::Shader::FlowCondition::Always); + + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "KIL condition code used: {}", + static_cast<u32>(cc)); + + bb.push_back(Operation(OperationCode::Discard)); + break; + } + case OpCode::Id::MOV_SYS: { + switch (instr.sys20) { + case Tegra::Shader::SystemVariable::InvocationInfo: { + LOG_WARNING(HW_GPU, "MOV_SYS instruction with InvocationInfo is incomplete"); + SetRegister(bb, instr.gpr0, Immediate(0u)); + break; + } + case Tegra::Shader::SystemVariable::Ydirection: { + // Config pack's third value is Y_NEGATE's state. + SetRegister(bb, instr.gpr0, Operation(OperationCode::YNegate)); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled system move: {}", static_cast<u32>(instr.sys20.Value())); + } + break; + } + case OpCode::Id::BRA: { + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "BRA with constant buffers are not implemented"); + + const u32 target = pc + instr.bra.GetBranchTarget(); + const Node branch = Operation(OperationCode::Branch, Immediate(target)); + + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + if (cc != Tegra::Shader::ConditionCode::T) { + bb.push_back(Conditional(GetConditionCode(cc), {branch})); + } else { + bb.push_back(branch); + } + break; + } + case OpCode::Id::SSY: { + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "Constant buffer flow is not supported"); + + // The SSY opcode tells the GPU where to re-converge divergent execution paths, it sets the + // target of the jump that the SYNC instruction will make. The SSY opcode has a similar + // structure to the BRA opcode. + const u32 target = pc + instr.bra.GetBranchTarget(); + bb.push_back(Operation(OperationCode::PushFlowStack, Immediate(target))); + break; + } + case OpCode::Id::PBK: { + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "Constant buffer PBK is not supported"); + + // PBK pushes to a stack the address where BRK will jump to. This shares stack with SSY but + // using SYNC on a PBK address will kill the shader execution. We don't emulate this because + // it's very unlikely a driver will emit such invalid shader. + const u32 target = pc + instr.bra.GetBranchTarget(); + bb.push_back(Operation(OperationCode::PushFlowStack, Immediate(target))); + break; + } + case OpCode::Id::SYNC: { + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "SYNC condition code used: {}", + static_cast<u32>(cc)); + + // The SYNC opcode jumps to the address previously set by the SSY opcode + bb.push_back(Operation(OperationCode::PopFlowStack)); + break; + } + case OpCode::Id::BRK: { + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "BRK condition code used: {}", + static_cast<u32>(cc)); + + // The BRK opcode jumps to the address previously set by the PBK opcode + bb.push_back(Operation(OperationCode::PopFlowStack)); + break; + } + case OpCode::Id::IPA: { + const auto& attribute = instr.attribute.fmt28; + const Tegra::Shader::IpaMode input_mode{instr.ipa.interp_mode.Value(), + instr.ipa.sample_mode.Value()}; + + const Node attr = GetInputAttribute(attribute.index, attribute.element, input_mode); + const Node value = GetSaturatedFloat(attr, instr.ipa.saturate); + + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::OUT_R: { + UNIMPLEMENTED_IF_MSG(instr.gpr20.Value() != Register::ZeroIndex, + "Stream buffer is not supported"); + + if (instr.out.emit) { + // gpr0 is used to store the next address and gpr8 contains the address to emit. + // Hardware uses pointers here but we just ignore it + bb.push_back(Operation(OperationCode::EmitVertex)); + SetRegister(bb, instr.gpr0, Immediate(0)); + } + if (instr.out.cut) { + bb.push_back(Operation(OperationCode::EndPrimitive)); + } + break; + } + case OpCode::Id::ISBERD: { + UNIMPLEMENTED_IF(instr.isberd.o != 0); + UNIMPLEMENTED_IF(instr.isberd.skew != 0); + UNIMPLEMENTED_IF(instr.isberd.shift != Tegra::Shader::IsberdShift::None); + UNIMPLEMENTED_IF(instr.isberd.mode != Tegra::Shader::IsberdMode::None); + LOG_WARNING(HW_GPU, "ISBERD instruction is incomplete"); + SetRegister(bb, instr.gpr0, GetRegister(instr.gpr8)); + break; + } + case OpCode::Id::DEPBAR: { + LOG_WARNING(HW_GPU, "DEPBAR instruction is stubbed"); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/predicate_set_predicate.cpp b/src/video_core/shader/decode/predicate_set_predicate.cpp new file mode 100644 index 000000000..1717f0653 --- /dev/null +++ b/src/video_core/shader/decode/predicate_set_predicate.cpp @@ -0,0 +1,67 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; + +u32 ShaderIR::DecodePredicateSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::PSETP: { + const Node op_a = GetPredicate(instr.psetp.pred12, instr.psetp.neg_pred12 != 0); + const Node op_b = GetPredicate(instr.psetp.pred29, instr.psetp.neg_pred29 != 0); + + // We can't use the constant predicate as destination. + ASSERT(instr.psetp.pred3 != static_cast<u64>(Pred::UnusedIndex)); + + const Node second_pred = GetPredicate(instr.psetp.pred39, instr.psetp.neg_pred39 != 0); + + const OperationCode combiner = GetPredicateCombiner(instr.psetp.op); + const Node predicate = Operation(combiner, op_a, op_b); + + // Set the primary predicate to the result of Predicate OP SecondPredicate + SetPredicate(bb, instr.psetp.pred3, Operation(combiner, predicate, second_pred)); + + if (instr.psetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, if + // enabled + SetPredicate(bb, instr.psetp.pred0, + Operation(combiner, Operation(OperationCode::LogicalNegate, predicate), + second_pred)); + } + break; + } + case OpCode::Id::CSETP: { + const Node pred = GetPredicate(instr.csetp.pred39, instr.csetp.neg_pred39 != 0); + const Node condition_code = GetConditionCode(instr.csetp.cc); + + const OperationCode combiner = GetPredicateCombiner(instr.csetp.op); + + if (instr.csetp.pred3 != static_cast<u64>(Pred::UnusedIndex)) { + SetPredicate(bb, instr.csetp.pred3, Operation(combiner, condition_code, pred)); + } + if (instr.csetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { + const Node neg_cc = Operation(OperationCode::LogicalNegate, condition_code); + SetPredicate(bb, instr.csetp.pred0, Operation(combiner, neg_cc, pred)); + } + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled predicate instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/predicate_set_register.cpp b/src/video_core/shader/decode/predicate_set_register.cpp new file mode 100644 index 000000000..8bd15fb00 --- /dev/null +++ b/src/video_core/shader/decode/predicate_set_register.cpp @@ -0,0 +1,46 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodePredicateSetRegister(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in PSET is not implemented"); + + const Node op_a = GetPredicate(instr.pset.pred12, instr.pset.neg_pred12 != 0); + const Node op_b = GetPredicate(instr.pset.pred29, instr.pset.neg_pred29 != 0); + const Node first_pred = Operation(GetPredicateCombiner(instr.pset.cond), op_a, op_b); + + const Node second_pred = GetPredicate(instr.pset.pred39, instr.pset.neg_pred39 != 0); + + const OperationCode combiner = GetPredicateCombiner(instr.pset.op); + const Node predicate = Operation(combiner, first_pred, second_pred); + + const Node true_value = instr.pset.bf ? Immediate(1.0f) : Immediate(0xffffffff); + const Node false_value = instr.pset.bf ? Immediate(0.0f) : Immediate(0); + const Node value = + Operation(OperationCode::Select, PRECISE, predicate, true_value, false_value); + + if (instr.pset.bf) { + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + } else { + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + } + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/register_set_predicate.cpp b/src/video_core/shader/decode/register_set_predicate.cpp new file mode 100644 index 000000000..bdb4424a6 --- /dev/null +++ b/src/video_core/shader/decode/register_set_predicate.cpp @@ -0,0 +1,51 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeRegisterSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF(instr.r2p.mode != Tegra::Shader::R2pMode::Pr); + + const Node apply_mask = [&]() { + switch (opcode->get().GetId()) { + case OpCode::Id::R2P_IMM: + return Immediate(static_cast<u32>(instr.r2p.immediate_mask)); + default: + UNREACHABLE(); + return Immediate(static_cast<u32>(instr.r2p.immediate_mask)); + } + }(); + const Node mask = GetRegister(instr.gpr8); + const auto offset = static_cast<u32>(instr.r2p.byte) * 8; + + constexpr u32 programmable_preds = 7; + for (u64 pred = 0; pred < programmable_preds; ++pred) { + const auto shift = static_cast<u32>(pred); + + const Node apply_compare = BitfieldExtract(apply_mask, shift, 1); + const Node condition = + Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0)); + + const Node value_compare = BitfieldExtract(mask, offset + shift, 1); + const Node value = Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0)); + + const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value); + bb.push_back(Conditional(condition, {code})); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/shift.cpp b/src/video_core/shader/decode/shift.cpp new file mode 100644 index 000000000..6623f8ff9 --- /dev/null +++ b/src/video_core/shader/decode/shift.cpp @@ -0,0 +1,55 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeShift(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = GetRegister(instr.gpr8); + const Node op_b = [&]() { + if (instr.is_b_imm) { + return Immediate(instr.alu.GetSignedImm20_20()); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); + } + }(); + + switch (opcode->get().GetId()) { + case OpCode::Id::SHR_C: + case OpCode::Id::SHR_R: + case OpCode::Id::SHR_IMM: { + const Node value = SignedOperation(OperationCode::IArithmeticShiftRight, + instr.shift.is_signed, PRECISE, op_a, op_b); + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::SHL_C: + case OpCode::Id::SHL_R: + case OpCode::Id::SHL_IMM: { + const Node value = Operation(OperationCode::ILogicalShiftLeft, PRECISE, op_a, op_b); + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled shift instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/video.cpp b/src/video_core/shader/decode/video.cpp new file mode 100644 index 000000000..c3432356d --- /dev/null +++ b/src/video_core/shader/decode/video.cpp @@ -0,0 +1,111 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; +using Tegra::Shader::VideoType; +using Tegra::Shader::VmadShr; + +u32 ShaderIR::DecodeVideo(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = + GetVideoOperand(GetRegister(instr.gpr8), instr.video.is_byte_chunk_a, instr.video.signed_a, + instr.video.type_a, instr.video.byte_height_a); + const Node op_b = [&]() { + if (instr.video.use_register_b) { + return GetVideoOperand(GetRegister(instr.gpr20), instr.video.is_byte_chunk_b, + instr.video.signed_b, instr.video.type_b, + instr.video.byte_height_b); + } + if (instr.video.signed_b) { + const auto imm = static_cast<s16>(instr.alu.GetImm20_16()); + return Immediate(static_cast<u32>(imm)); + } else { + return Immediate(instr.alu.GetImm20_16()); + } + }(); + + switch (opcode->get().GetId()) { + case OpCode::Id::VMAD: { + const bool result_signed = instr.video.signed_a == 1 || instr.video.signed_b == 1; + const Node op_c = GetRegister(instr.gpr39); + + Node value = SignedOperation(OperationCode::IMul, result_signed, NO_PRECISE, op_a, op_b); + value = SignedOperation(OperationCode::IAdd, result_signed, NO_PRECISE, value, op_c); + + if (instr.vmad.shr == VmadShr::Shr7 || instr.vmad.shr == VmadShr::Shr15) { + const Node shift = Immediate(instr.vmad.shr == VmadShr::Shr7 ? 7 : 15); + value = + SignedOperation(OperationCode::IArithmeticShiftRight, result_signed, value, shift); + } + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::VSETP: { + // We can't use the constant predicate as destination. + ASSERT(instr.vsetp.pred3 != static_cast<u64>(Pred::UnusedIndex)); + + const bool sign = instr.video.signed_a == 1 || instr.video.signed_b == 1; + const Node first_pred = GetPredicateComparisonInteger(instr.vsetp.cond, sign, op_a, op_b); + const Node second_pred = GetPredicate(instr.vsetp.pred39, false); + + const OperationCode combiner = GetPredicateCombiner(instr.vsetp.op); + + // Set the primary predicate to the result of Predicate OP SecondPredicate + SetPredicate(bb, instr.vsetp.pred3, Operation(combiner, first_pred, second_pred)); + + if (instr.vsetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, + // if enabled + const Node negate_pred = Operation(OperationCode::LogicalNegate, first_pred); + SetPredicate(bb, instr.vsetp.pred0, Operation(combiner, negate_pred, second_pred)); + } + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled video instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +Node ShaderIR::GetVideoOperand(Node op, bool is_chunk, bool is_signed, + Tegra::Shader::VideoType type, u64 byte_height) { + if (!is_chunk) { + return BitfieldExtract(op, static_cast<u32>(byte_height * 8), 8); + } + const Node zero = Immediate(0); + + switch (type) { + case Tegra::Shader::VideoType::Size16_Low: + return BitfieldExtract(op, 0, 16); + case Tegra::Shader::VideoType::Size16_High: + return BitfieldExtract(op, 16, 16); + case Tegra::Shader::VideoType::Size32: + // TODO(Rodrigo): From my hardware tests it becomes a bit "mad" when this type is used + // (1 * 1 + 0 == 0x5b800000). Until a better explanation is found: abort. + UNIMPLEMENTED(); + return zero; + case Tegra::Shader::VideoType::Invalid: + UNREACHABLE_MSG("Invalid instruction encoding"); + return zero; + default: + UNREACHABLE(); + return zero; + } +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/decode/xmad.cpp b/src/video_core/shader/decode/xmad.cpp new file mode 100644 index 000000000..9cb864500 --- /dev/null +++ b/src/video_core/shader/decode/xmad.cpp @@ -0,0 +1,98 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeXmad(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF(instr.xmad.sign_a); + UNIMPLEMENTED_IF(instr.xmad.sign_b); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in XMAD is not implemented"); + + Node op_a = GetRegister(instr.gpr8); + + // TODO(bunnei): Needs to be fixed once op_a or op_b is signed + UNIMPLEMENTED_IF(instr.xmad.sign_a != instr.xmad.sign_b); + const bool is_signed_a = instr.xmad.sign_a == 1; + const bool is_signed_b = instr.xmad.sign_b == 1; + const bool is_signed_c = is_signed_a; + + auto [is_merge, op_b, op_c] = [&]() -> std::tuple<bool, Node, Node> { + switch (opcode->get().GetId()) { + case OpCode::Id::XMAD_CR: + return {instr.xmad.merge_56, + GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()), + GetRegister(instr.gpr39)}; + case OpCode::Id::XMAD_RR: + return {instr.xmad.merge_37, GetRegister(instr.gpr20), GetRegister(instr.gpr39)}; + case OpCode::Id::XMAD_RC: + return {false, GetRegister(instr.gpr39), + GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset())}; + case OpCode::Id::XMAD_IMM: + return {instr.xmad.merge_37, Immediate(static_cast<u32>(instr.xmad.imm20_16)), + GetRegister(instr.gpr39)}; + } + UNIMPLEMENTED_MSG("Unhandled XMAD instruction: {}", opcode->get().GetName()); + return {false, Immediate(0), Immediate(0)}; + }(); + + op_a = BitfieldExtract(op_a, instr.xmad.high_a ? 16 : 0, 16); + + const Node original_b = op_b; + op_b = BitfieldExtract(op_b, instr.xmad.high_b ? 16 : 0, 16); + + // TODO(Rodrigo): Use an appropiate sign for this operation + Node product = Operation(OperationCode::IMul, NO_PRECISE, op_a, op_b); + if (instr.xmad.product_shift_left) { + product = Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, product, Immediate(16)); + } + + const Node original_c = op_c; + op_c = [&]() { + switch (instr.xmad.mode) { + case Tegra::Shader::XmadMode::None: + return original_c; + case Tegra::Shader::XmadMode::CLo: + return BitfieldExtract(original_c, 0, 16); + case Tegra::Shader::XmadMode::CHi: + return BitfieldExtract(original_c, 16, 16); + case Tegra::Shader::XmadMode::CBcc: { + const Node shifted_b = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed_b, + NO_PRECISE, original_b, Immediate(16)); + return SignedOperation(OperationCode::IAdd, is_signed_c, NO_PRECISE, original_c, + shifted_b); + } + default: + UNIMPLEMENTED_MSG("Unhandled XMAD mode: {}", static_cast<u32>(instr.xmad.mode.Value())); + return Immediate(0); + } + }(); + + // TODO(Rodrigo): Use an appropiate sign for this operation + Node sum = Operation(OperationCode::IAdd, product, op_c); + if (is_merge) { + const Node a = BitfieldExtract(sum, 0, 16); + const Node b = + Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, original_b, Immediate(16)); + sum = Operation(OperationCode::IBitwiseOr, NO_PRECISE, a, b); + } + + SetInternalFlagsFromInteger(bb, sum, instr.generates_cc); + SetRegister(bb, instr.gpr0, sum); + + return pc; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp new file mode 100644 index 000000000..d7747103e --- /dev/null +++ b/src/video_core/shader/shader_ir.cpp @@ -0,0 +1,444 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <cmath> +#include <unordered_map> + +#include "common/assert.h" +#include "common/common_types.h" +#include "common/logging/log.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Attribute; +using Tegra::Shader::Instruction; +using Tegra::Shader::IpaMode; +using Tegra::Shader::Pred; +using Tegra::Shader::PredCondition; +using Tegra::Shader::PredOperation; +using Tegra::Shader::Register; + +Node ShaderIR::StoreNode(NodeData&& node_data) { + auto store = std::make_unique<NodeData>(node_data); + const Node node = store.get(); + stored_nodes.push_back(std::move(store)); + return node; +} + +Node ShaderIR::Conditional(Node condition, std::vector<Node>&& code) { + return StoreNode(ConditionalNode(condition, std::move(code))); +} + +Node ShaderIR::Comment(const std::string& text) { + return StoreNode(CommentNode(text)); +} + +Node ShaderIR::Immediate(u32 value) { + return StoreNode(ImmediateNode(value)); +} + +Node ShaderIR::GetRegister(Register reg) { + if (reg != Register::ZeroIndex) { + used_registers.insert(static_cast<u32>(reg)); + } + return StoreNode(GprNode(reg)); +} + +Node ShaderIR::GetImmediate19(Instruction instr) { + return Immediate(instr.alu.GetImm20_19()); +} + +Node ShaderIR::GetImmediate32(Instruction instr) { + return Immediate(instr.alu.GetImm20_32()); +} + +Node ShaderIR::GetConstBuffer(u64 index_, u64 offset_) { + const auto index = static_cast<u32>(index_); + const auto offset = static_cast<u32>(offset_); + + const auto [entry, is_new] = used_cbufs.try_emplace(index); + entry->second.MarkAsUsed(offset); + + return StoreNode(CbufNode(index, Immediate(offset))); +} + +Node ShaderIR::GetConstBufferIndirect(u64 index_, u64 offset_, Node node) { + const auto index = static_cast<u32>(index_); + const auto offset = static_cast<u32>(offset_); + + const auto [entry, is_new] = used_cbufs.try_emplace(index); + entry->second.MarkAsUsedIndirect(); + + const Node final_offset = Operation(OperationCode::UAdd, NO_PRECISE, node, Immediate(offset)); + return StoreNode(CbufNode(index, final_offset)); +} + +Node ShaderIR::GetPredicate(u64 pred_, bool negated) { + const auto pred = static_cast<Pred>(pred_); + if (pred != Pred::UnusedIndex && pred != Pred::NeverExecute) { + used_predicates.insert(pred); + } + + return StoreNode(PredicateNode(pred, negated)); +} + +Node ShaderIR::GetPredicate(bool immediate) { + return GetPredicate(static_cast<u64>(immediate ? Pred::UnusedIndex : Pred::NeverExecute)); +} + +Node ShaderIR::GetInputAttribute(Attribute::Index index, u64 element, + const Tegra::Shader::IpaMode& input_mode, Node buffer) { + const auto [entry, is_new] = + used_input_attributes.emplace(std::make_pair(index, std::set<Tegra::Shader::IpaMode>{})); + entry->second.insert(input_mode); + + return StoreNode(AbufNode(index, static_cast<u32>(element), input_mode, buffer)); +} + +Node ShaderIR::GetOutputAttribute(Attribute::Index index, u64 element, Node buffer) { + if (index == Attribute::Index::ClipDistances0123 || + index == Attribute::Index::ClipDistances4567) { + const auto clip_index = + static_cast<u32>((index == Attribute::Index::ClipDistances4567 ? 1 : 0) + element); + used_clip_distances.at(clip_index) = true; + } + used_output_attributes.insert(index); + + return StoreNode(AbufNode(index, static_cast<u32>(element), buffer)); +} + +Node ShaderIR::GetInternalFlag(InternalFlag flag, bool negated) { + const Node node = StoreNode(InternalFlagNode(flag)); + if (negated) { + return Operation(OperationCode::LogicalNegate, node); + } + return node; +} + +Node ShaderIR::GetLocalMemory(Node address) { + return StoreNode(LmemNode(address)); +} + +Node ShaderIR::GetTemporal(u32 id) { + return GetRegister(Register::ZeroIndex + 1 + id); +} + +Node ShaderIR::GetOperandAbsNegFloat(Node value, bool absolute, bool negate) { + if (absolute) { + value = Operation(OperationCode::FAbsolute, NO_PRECISE, value); + } + if (negate) { + value = Operation(OperationCode::FNegate, NO_PRECISE, value); + } + return value; +} + +Node ShaderIR::GetSaturatedFloat(Node value, bool saturate) { + if (!saturate) { + return value; + } + const Node positive_zero = Immediate(std::copysignf(0, 1)); + const Node positive_one = Immediate(1.0f); + return Operation(OperationCode::FClamp, NO_PRECISE, value, positive_zero, positive_one); +} + +Node ShaderIR::ConvertIntegerSize(Node value, Tegra::Shader::Register::Size size, bool is_signed) { + switch (size) { + case Register::Size::Byte: + value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE, value, + Immediate(24)); + value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, value, + Immediate(24)); + return value; + case Register::Size::Short: + value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE, value, + Immediate(16)); + value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, value, + Immediate(16)); + case Register::Size::Word: + // Default - do nothing + return value; + default: + UNREACHABLE_MSG("Unimplemented conversion size: {}", static_cast<u32>(size)); + return value; + } +} + +Node ShaderIR::GetOperandAbsNegInteger(Node value, bool absolute, bool negate, bool is_signed) { + if (!is_signed) { + // Absolute or negate on an unsigned is pointless + return value; + } + if (absolute) { + value = Operation(OperationCode::IAbsolute, NO_PRECISE, value); + } + if (negate) { + value = Operation(OperationCode::INegate, NO_PRECISE, value); + } + return value; +} + +Node ShaderIR::UnpackHalfImmediate(Instruction instr, bool has_negation) { + const Node value = Immediate(instr.half_imm.PackImmediates()); + if (!has_negation) { + return value; + } + const Node first_negate = GetPredicate(instr.half_imm.first_negate != 0); + const Node second_negate = GetPredicate(instr.half_imm.second_negate != 0); + + return Operation(OperationCode::HNegate, HALF_NO_PRECISE, value, first_negate, second_negate); +} + +Node ShaderIR::HalfMerge(Node dest, Node src, Tegra::Shader::HalfMerge merge) { + switch (merge) { + case Tegra::Shader::HalfMerge::H0_H1: + return src; + case Tegra::Shader::HalfMerge::F32: + return Operation(OperationCode::HMergeF32, src); + case Tegra::Shader::HalfMerge::Mrg_H0: + return Operation(OperationCode::HMergeH0, dest, src); + case Tegra::Shader::HalfMerge::Mrg_H1: + return Operation(OperationCode::HMergeH1, dest, src); + } + UNREACHABLE(); + return src; +} + +Node ShaderIR::GetOperandAbsNegHalf(Node value, bool absolute, bool negate) { + if (absolute) { + value = Operation(OperationCode::HAbsolute, HALF_NO_PRECISE, value); + } + if (negate) { + value = Operation(OperationCode::HNegate, HALF_NO_PRECISE, value, GetPredicate(true), + GetPredicate(true)); + } + return value; +} + +Node ShaderIR::GetPredicateComparisonFloat(PredCondition condition, Node op_a, Node op_b) { + static const std::unordered_map<PredCondition, OperationCode> PredicateComparisonTable = { + {PredCondition::LessThan, OperationCode::LogicalFLessThan}, + {PredCondition::Equal, OperationCode::LogicalFEqual}, + {PredCondition::LessEqual, OperationCode::LogicalFLessEqual}, + {PredCondition::GreaterThan, OperationCode::LogicalFGreaterThan}, + {PredCondition::NotEqual, OperationCode::LogicalFNotEqual}, + {PredCondition::GreaterEqual, OperationCode::LogicalFGreaterEqual}, + {PredCondition::LessThanWithNan, OperationCode::LogicalFLessThan}, + {PredCondition::NotEqualWithNan, OperationCode::LogicalFNotEqual}, + {PredCondition::LessEqualWithNan, OperationCode::LogicalFLessEqual}, + {PredCondition::GreaterThanWithNan, OperationCode::LogicalFGreaterThan}, + {PredCondition::GreaterEqualWithNan, OperationCode::LogicalFGreaterEqual}}; + + const auto comparison{PredicateComparisonTable.find(condition)}; + UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), + "Unknown predicate comparison operation"); + + Node predicate = Operation(comparison->second, NO_PRECISE, op_a, op_b); + + if (condition == PredCondition::LessThanWithNan || + condition == PredCondition::NotEqualWithNan || + condition == PredCondition::LessEqualWithNan || + condition == PredCondition::GreaterThanWithNan || + condition == PredCondition::GreaterEqualWithNan) { + + predicate = Operation(OperationCode::LogicalOr, predicate, + Operation(OperationCode::LogicalFIsNan, op_a)); + predicate = Operation(OperationCode::LogicalOr, predicate, + Operation(OperationCode::LogicalFIsNan, op_b)); + } + + return predicate; +} + +Node ShaderIR::GetPredicateComparisonInteger(PredCondition condition, bool is_signed, Node op_a, + Node op_b) { + static const std::unordered_map<PredCondition, OperationCode> PredicateComparisonTable = { + {PredCondition::LessThan, OperationCode::LogicalILessThan}, + {PredCondition::Equal, OperationCode::LogicalIEqual}, + {PredCondition::LessEqual, OperationCode::LogicalILessEqual}, + {PredCondition::GreaterThan, OperationCode::LogicalIGreaterThan}, + {PredCondition::NotEqual, OperationCode::LogicalINotEqual}, + {PredCondition::GreaterEqual, OperationCode::LogicalIGreaterEqual}, + {PredCondition::LessThanWithNan, OperationCode::LogicalILessThan}, + {PredCondition::NotEqualWithNan, OperationCode::LogicalINotEqual}, + {PredCondition::LessEqualWithNan, OperationCode::LogicalILessEqual}, + {PredCondition::GreaterThanWithNan, OperationCode::LogicalIGreaterThan}, + {PredCondition::GreaterEqualWithNan, OperationCode::LogicalIGreaterEqual}}; + + const auto comparison{PredicateComparisonTable.find(condition)}; + UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), + "Unknown predicate comparison operation"); + + Node predicate = SignedOperation(comparison->second, is_signed, NO_PRECISE, op_a, op_b); + + UNIMPLEMENTED_IF_MSG(condition == PredCondition::LessThanWithNan || + condition == PredCondition::NotEqualWithNan || + condition == PredCondition::LessEqualWithNan || + condition == PredCondition::GreaterThanWithNan || + condition == PredCondition::GreaterEqualWithNan, + "NaN comparisons for integers are not implemented"); + return predicate; +} + +Node ShaderIR::GetPredicateComparisonHalf(Tegra::Shader::PredCondition condition, + const MetaHalfArithmetic& meta, Node op_a, Node op_b) { + + UNIMPLEMENTED_IF_MSG(condition == PredCondition::LessThanWithNan || + condition == PredCondition::NotEqualWithNan || + condition == PredCondition::LessEqualWithNan || + condition == PredCondition::GreaterThanWithNan || + condition == PredCondition::GreaterEqualWithNan, + "Unimplemented NaN comparison for half floats"); + + static const std::unordered_map<PredCondition, OperationCode> PredicateComparisonTable = { + {PredCondition::LessThan, OperationCode::Logical2HLessThan}, + {PredCondition::Equal, OperationCode::Logical2HEqual}, + {PredCondition::LessEqual, OperationCode::Logical2HLessEqual}, + {PredCondition::GreaterThan, OperationCode::Logical2HGreaterThan}, + {PredCondition::NotEqual, OperationCode::Logical2HNotEqual}, + {PredCondition::GreaterEqual, OperationCode::Logical2HGreaterEqual}, + {PredCondition::LessThanWithNan, OperationCode::Logical2HLessThan}, + {PredCondition::NotEqualWithNan, OperationCode::Logical2HNotEqual}, + {PredCondition::LessEqualWithNan, OperationCode::Logical2HLessEqual}, + {PredCondition::GreaterThanWithNan, OperationCode::Logical2HGreaterThan}, + {PredCondition::GreaterEqualWithNan, OperationCode::Logical2HGreaterEqual}}; + + const auto comparison{PredicateComparisonTable.find(condition)}; + UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), + "Unknown predicate comparison operation"); + + const Node predicate = Operation(comparison->second, meta, op_a, op_b); + + return predicate; +} + +OperationCode ShaderIR::GetPredicateCombiner(PredOperation operation) { + static const std::unordered_map<PredOperation, OperationCode> PredicateOperationTable = { + {PredOperation::And, OperationCode::LogicalAnd}, + {PredOperation::Or, OperationCode::LogicalOr}, + {PredOperation::Xor, OperationCode::LogicalXor}, + }; + + const auto op = PredicateOperationTable.find(operation); + UNIMPLEMENTED_IF_MSG(op == PredicateOperationTable.end(), "Unknown predicate operation"); + return op->second; +} + +Node ShaderIR::GetConditionCode(Tegra::Shader::ConditionCode cc) { + switch (cc) { + case Tegra::Shader::ConditionCode::NEU: + return GetInternalFlag(InternalFlag::Zero, true); + default: + UNIMPLEMENTED_MSG("Unimplemented condition code: {}", static_cast<u32>(cc)); + return GetPredicate(static_cast<u64>(Pred::NeverExecute)); + } +} + +void ShaderIR::SetRegister(BasicBlock& bb, Register dest, Node src) { + bb.push_back(Operation(OperationCode::Assign, GetRegister(dest), src)); +} + +void ShaderIR::SetPredicate(BasicBlock& bb, u64 dest, Node src) { + bb.push_back(Operation(OperationCode::LogicalAssign, GetPredicate(dest), src)); +} + +void ShaderIR::SetInternalFlag(BasicBlock& bb, InternalFlag flag, Node value) { + bb.push_back(Operation(OperationCode::LogicalAssign, GetInternalFlag(flag), value)); +} + +void ShaderIR::SetLocalMemory(BasicBlock& bb, Node address, Node value) { + bb.push_back(Operation(OperationCode::Assign, GetLocalMemory(address), value)); +} + +void ShaderIR::SetTemporal(BasicBlock& bb, u32 id, Node value) { + SetRegister(bb, Register::ZeroIndex + 1 + id, value); +} + +void ShaderIR::SetInternalFlagsFromFloat(BasicBlock& bb, Node value, bool sets_cc) { + if (!sets_cc) { + return; + } + const Node zerop = Operation(OperationCode::LogicalFEqual, value, Immediate(0.0f)); + SetInternalFlag(bb, InternalFlag::Zero, zerop); + LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete"); +} + +void ShaderIR::SetInternalFlagsFromInteger(BasicBlock& bb, Node value, bool sets_cc) { + if (!sets_cc) { + return; + } + const Node zerop = Operation(OperationCode::LogicalIEqual, value, Immediate(0)); + SetInternalFlag(bb, InternalFlag::Zero, zerop); + LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete"); +} + +Node ShaderIR::BitfieldExtract(Node value, u32 offset, u32 bits) { + return Operation(OperationCode::UBitfieldExtract, NO_PRECISE, value, Immediate(offset), + Immediate(bits)); +} + +/*static*/ OperationCode ShaderIR::SignedToUnsignedCode(OperationCode operation_code, + bool is_signed) { + if (is_signed) { + return operation_code; + } + switch (operation_code) { + case OperationCode::FCastInteger: + return OperationCode::FCastUInteger; + case OperationCode::IAdd: + return OperationCode::UAdd; + case OperationCode::IMul: + return OperationCode::UMul; + case OperationCode::IDiv: + return OperationCode::UDiv; + case OperationCode::IMin: + return OperationCode::UMin; + case OperationCode::IMax: + return OperationCode::UMax; + case OperationCode::ICastFloat: + return OperationCode::UCastFloat; + case OperationCode::ICastUnsigned: + return OperationCode::UCastSigned; + case OperationCode::ILogicalShiftLeft: + return OperationCode::ULogicalShiftLeft; + case OperationCode::ILogicalShiftRight: + return OperationCode::ULogicalShiftRight; + case OperationCode::IArithmeticShiftRight: + return OperationCode::UArithmeticShiftRight; + case OperationCode::IBitwiseAnd: + return OperationCode::UBitwiseAnd; + case OperationCode::IBitwiseOr: + return OperationCode::UBitwiseOr; + case OperationCode::IBitwiseXor: + return OperationCode::UBitwiseXor; + case OperationCode::IBitwiseNot: + return OperationCode::UBitwiseNot; + case OperationCode::IBitfieldInsert: + return OperationCode::UBitfieldInsert; + case OperationCode::IBitCount: + return OperationCode::UBitCount; + case OperationCode::LogicalILessThan: + return OperationCode::LogicalULessThan; + case OperationCode::LogicalIEqual: + return OperationCode::LogicalUEqual; + case OperationCode::LogicalILessEqual: + return OperationCode::LogicalULessEqual; + case OperationCode::LogicalIGreaterThan: + return OperationCode::LogicalUGreaterThan; + case OperationCode::LogicalINotEqual: + return OperationCode::LogicalUNotEqual; + case OperationCode::LogicalIGreaterEqual: + return OperationCode::LogicalUGreaterEqual; + case OperationCode::INegate: + UNREACHABLE_MSG("Can't negate an unsigned integer"); + case OperationCode::IAbsolute: + UNREACHABLE_MSG("Can't apply absolute to an unsigned integer"); + } + UNREACHABLE_MSG("Unknown signed operation with code={}", static_cast<u32>(operation_code)); + return {}; +} + +} // namespace VideoCommon::Shader
\ No newline at end of file diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h new file mode 100644 index 000000000..6e42e3dfb --- /dev/null +++ b/src/video_core/shader/shader_ir.h @@ -0,0 +1,823 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <cstring> +#include <map> +#include <set> +#include <string> +#include <tuple> +#include <variant> +#include <vector> + +#include "common/common_types.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/engines/shader_header.h" + +namespace VideoCommon::Shader { + +class OperationNode; +class ConditionalNode; +class GprNode; +class ImmediateNode; +class InternalFlagNode; +class PredicateNode; +class AbufNode; ///< Attribute buffer +class CbufNode; ///< Constant buffer +class LmemNode; ///< Local memory +class GmemNode; ///< Global memory +class CommentNode; + +using ProgramCode = std::vector<u64>; + +using NodeData = + std::variant<OperationNode, ConditionalNode, GprNode, ImmediateNode, InternalFlagNode, + PredicateNode, AbufNode, CbufNode, LmemNode, GmemNode, CommentNode>; +using Node = const NodeData*; +using Node4 = std::array<Node, 4>; +using BasicBlock = std::vector<Node>; + +constexpr u32 MAX_PROGRAM_LENGTH = 0x1000; + +enum class OperationCode { + Assign, /// (float& dest, float src) -> void + + Select, /// (MetaArithmetic, bool pred, float a, float b) -> float + + FAdd, /// (MetaArithmetic, float a, float b) -> float + FMul, /// (MetaArithmetic, float a, float b) -> float + FDiv, /// (MetaArithmetic, float a, float b) -> float + FFma, /// (MetaArithmetic, float a, float b, float c) -> float + FNegate, /// (MetaArithmetic, float a) -> float + FAbsolute, /// (MetaArithmetic, float a) -> float + FClamp, /// (MetaArithmetic, float value, float min, float max) -> float + FMin, /// (MetaArithmetic, float a, float b) -> float + FMax, /// (MetaArithmetic, float a, float b) -> float + FCos, /// (MetaArithmetic, float a) -> float + FSin, /// (MetaArithmetic, float a) -> float + FExp2, /// (MetaArithmetic, float a) -> float + FLog2, /// (MetaArithmetic, float a) -> float + FInverseSqrt, /// (MetaArithmetic, float a) -> float + FSqrt, /// (MetaArithmetic, float a) -> float + FRoundEven, /// (MetaArithmetic, float a) -> float + FFloor, /// (MetaArithmetic, float a) -> float + FCeil, /// (MetaArithmetic, float a) -> float + FTrunc, /// (MetaArithmetic, float a) -> float + FCastInteger, /// (MetaArithmetic, int a) -> float + FCastUInteger, /// (MetaArithmetic, uint a) -> float + + IAdd, /// (MetaArithmetic, int a, int b) -> int + IMul, /// (MetaArithmetic, int a, int b) -> int + IDiv, /// (MetaArithmetic, int a, int b) -> int + INegate, /// (MetaArithmetic, int a) -> int + IAbsolute, /// (MetaArithmetic, int a) -> int + IMin, /// (MetaArithmetic, int a, int b) -> int + IMax, /// (MetaArithmetic, int a, int b) -> int + ICastFloat, /// (MetaArithmetic, float a) -> int + ICastUnsigned, /// (MetaArithmetic, uint a) -> int + ILogicalShiftLeft, /// (MetaArithmetic, int a, uint b) -> int + ILogicalShiftRight, /// (MetaArithmetic, int a, uint b) -> int + IArithmeticShiftRight, /// (MetaArithmetic, int a, uint b) -> int + IBitwiseAnd, /// (MetaArithmetic, int a, int b) -> int + IBitwiseOr, /// (MetaArithmetic, int a, int b) -> int + IBitwiseXor, /// (MetaArithmetic, int a, int b) -> int + IBitwiseNot, /// (MetaArithmetic, int a) -> int + IBitfieldInsert, /// (MetaArithmetic, int base, int insert, int offset, int bits) -> int + IBitfieldExtract, /// (MetaArithmetic, int value, int offset, int offset) -> int + IBitCount, /// (MetaArithmetic, int) -> int + + UAdd, /// (MetaArithmetic, uint a, uint b) -> uint + UMul, /// (MetaArithmetic, uint a, uint b) -> uint + UDiv, /// (MetaArithmetic, uint a, uint b) -> uint + UMin, /// (MetaArithmetic, uint a, uint b) -> uint + UMax, /// (MetaArithmetic, uint a, uint b) -> uint + UCastFloat, /// (MetaArithmetic, float a) -> uint + UCastSigned, /// (MetaArithmetic, int a) -> uint + ULogicalShiftLeft, /// (MetaArithmetic, uint a, uint b) -> uint + ULogicalShiftRight, /// (MetaArithmetic, uint a, uint b) -> uint + UArithmeticShiftRight, /// (MetaArithmetic, uint a, uint b) -> uint + UBitwiseAnd, /// (MetaArithmetic, uint a, uint b) -> uint + UBitwiseOr, /// (MetaArithmetic, uint a, uint b) -> uint + UBitwiseXor, /// (MetaArithmetic, uint a, uint b) -> uint + UBitwiseNot, /// (MetaArithmetic, uint a) -> uint + UBitfieldInsert, /// (MetaArithmetic, uint base, uint insert, int offset, int bits) -> uint + UBitfieldExtract, /// (MetaArithmetic, uint value, int offset, int offset) -> uint + UBitCount, /// (MetaArithmetic, uint) -> uint + + HAdd, /// (MetaHalfArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 + HMul, /// (MetaHalfArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 + HFma, /// (MetaHalfArithmetic, f16vec2 a, f16vec2 b, f16vec2 c) -> f16vec2 + HAbsolute, /// (f16vec2 a) -> f16vec2 + HNegate, /// (f16vec2 a, bool first, bool second) -> f16vec2 + HMergeF32, /// (f16vec2 src) -> float + HMergeH0, /// (f16vec2 dest, f16vec2 src) -> f16vec2 + HMergeH1, /// (f16vec2 dest, f16vec2 src) -> f16vec2 + HPack2, /// (float a, float b) -> f16vec2 + + LogicalAssign, /// (bool& dst, bool src) -> void + LogicalAnd, /// (bool a, bool b) -> bool + LogicalOr, /// (bool a, bool b) -> bool + LogicalXor, /// (bool a, bool b) -> bool + LogicalNegate, /// (bool a) -> bool + LogicalPick2, /// (bool2 pair, uint index) -> bool + LogicalAll2, /// (bool2 a) -> bool + LogicalAny2, /// (bool2 a) -> bool + + LogicalFLessThan, /// (float a, float b) -> bool + LogicalFEqual, /// (float a, float b) -> bool + LogicalFLessEqual, /// (float a, float b) -> bool + LogicalFGreaterThan, /// (float a, float b) -> bool + LogicalFNotEqual, /// (float a, float b) -> bool + LogicalFGreaterEqual, /// (float a, float b) -> bool + LogicalFIsNan, /// (float a) -> bool + + LogicalILessThan, /// (int a, int b) -> bool + LogicalIEqual, /// (int a, int b) -> bool + LogicalILessEqual, /// (int a, int b) -> bool + LogicalIGreaterThan, /// (int a, int b) -> bool + LogicalINotEqual, /// (int a, int b) -> bool + LogicalIGreaterEqual, /// (int a, int b) -> bool + + LogicalULessThan, /// (uint a, uint b) -> bool + LogicalUEqual, /// (uint a, uint b) -> bool + LogicalULessEqual, /// (uint a, uint b) -> bool + LogicalUGreaterThan, /// (uint a, uint b) -> bool + LogicalUNotEqual, /// (uint a, uint b) -> bool + LogicalUGreaterEqual, /// (uint a, uint b) -> bool + + Logical2HLessThan, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + Logical2HEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + Logical2HLessEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + Logical2HGreaterThan, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + Logical2HNotEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + Logical2HGreaterEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + + F4Texture, /// (MetaTexture, float[N] coords, float[M] params) -> float4 + F4TextureLod, /// (MetaTexture, float[N] coords, float[M] params) -> float4 + F4TextureGather, /// (MetaTexture, float[N] coords, float[M] params) -> float4 + F4TextureQueryDimensions, /// (MetaTexture, float a) -> float4 + F4TextureQueryLod, /// (MetaTexture, float[N] coords) -> float4 + F4TexelFetch, /// (MetaTexture, int[N], int) -> float4 + + Branch, /// (uint branch_target) -> void + PushFlowStack, /// (uint branch_target) -> void + PopFlowStack, /// () -> void + Exit, /// () -> void + Discard, /// () -> void + + EmitVertex, /// () -> void + EndPrimitive, /// () -> void + + YNegate, /// () -> float + + Amount, +}; + +enum class InternalFlag { + Zero = 0, + Sign = 1, + Carry = 2, + Overflow = 3, + Amount = 4, +}; + +/// Describes the behaviour of code path of a given entry point and a return point. +enum class ExitMethod { + Undetermined, ///< Internal value. Only occur when analyzing JMP loop. + AlwaysReturn, ///< All code paths reach the return point. + Conditional, ///< Code path reaches the return point or an END instruction conditionally. + AlwaysEnd, ///< All code paths reach a END instruction. +}; + +class Sampler { +public: + explicit Sampler(std::size_t offset, std::size_t index, Tegra::Shader::TextureType type, + bool is_array, bool is_shadow) + : offset{offset}, index{index}, type{type}, is_array{is_array}, is_shadow{is_shadow} {} + + std::size_t GetOffset() const { + return offset; + } + + std::size_t GetIndex() const { + return index; + } + + Tegra::Shader::TextureType GetType() const { + return type; + } + + bool IsArray() const { + return is_array; + } + + bool IsShadow() const { + return is_shadow; + } + + bool operator<(const Sampler& rhs) const { + return std::tie(offset, index, type, is_array, is_shadow) < + std::tie(rhs.offset, rhs.index, rhs.type, rhs.is_array, rhs.is_shadow); + } + +private: + /// Offset in TSC memory from which to read the sampler object, as specified by the sampling + /// instruction. + std::size_t offset{}; + std::size_t index{}; ///< Value used to index into the generated GLSL sampler array. + Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc) + bool is_array{}; ///< Whether the texture is being sampled as an array texture or not. + bool is_shadow{}; ///< Whether the texture is being sampled as a depth texture or not. +}; + +class ConstBuffer { +public: + void MarkAsUsed(u64 offset) { + max_offset = std::max(max_offset, static_cast<u32>(offset)); + } + + void MarkAsUsedIndirect() { + is_indirect = true; + } + + bool IsIndirect() const { + return is_indirect; + } + + u32 GetSize() const { + return max_offset + sizeof(float); + } + +private: + u32 max_offset{}; + bool is_indirect{}; +}; + +struct GlobalMemoryBase { + u32 cbuf_index{}; + u32 cbuf_offset{}; + + bool operator<(const GlobalMemoryBase& rhs) const { + return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset); + } +}; + +struct MetaArithmetic { + bool precise{}; +}; + +struct MetaHalfArithmetic { + bool precise{}; + std::array<Tegra::Shader::HalfType, 3> types = {Tegra::Shader::HalfType::H0_H1, + Tegra::Shader::HalfType::H0_H1, + Tegra::Shader::HalfType::H0_H1}; +}; + +struct MetaTexture { + const Sampler& sampler; + u32 element{}; + u32 coords_count{}; + std::optional<u32> array_index; +}; + +constexpr MetaArithmetic PRECISE = {true}; +constexpr MetaArithmetic NO_PRECISE = {false}; +constexpr MetaHalfArithmetic HALF_NO_PRECISE = {false}; + +using Meta = std::variant<MetaArithmetic, MetaHalfArithmetic, MetaTexture>; + +/// Holds any kind of operation that can be done in the IR +class OperationNode final { +public: + template <typename... T> + explicit constexpr OperationNode(OperationCode code) : code{code}, meta{} {} + + template <typename... T> + explicit constexpr OperationNode(OperationCode code, Meta&& meta) + : code{code}, meta{std::move(meta)} {} + + template <typename... T> + explicit constexpr OperationNode(OperationCode code, const T*... operands) + : OperationNode(code, {}, operands...) {} + + template <typename... T> + explicit constexpr OperationNode(OperationCode code, Meta&& meta, const T*... operands_) + : code{code}, meta{std::move(meta)} { + + auto operands_list = {operands_...}; + for (auto& operand : operands_list) { + operands.push_back(operand); + } + } + + explicit OperationNode(OperationCode code, Meta&& meta, std::vector<Node>&& operands) + : code{code}, meta{meta}, operands{std::move(operands)} {} + + explicit OperationNode(OperationCode code, std::vector<Node>&& operands) + : code{code}, meta{}, operands{std::move(operands)} {} + + OperationCode GetCode() const { + return code; + } + + const Meta& GetMeta() const { + return meta; + } + + std::size_t GetOperandsCount() const { + return operands.size(); + } + + Node operator[](std::size_t operand_index) const { + return operands.at(operand_index); + } + +private: + const OperationCode code; + const Meta meta; + std::vector<Node> operands; +}; + +/// Encloses inside any kind of node that returns a boolean conditionally-executed code +class ConditionalNode final { +public: + explicit ConditionalNode(Node condition, std::vector<Node>&& code) + : condition{condition}, code{std::move(code)} {} + + Node GetCondition() const { + return condition; + } + + const std::vector<Node>& GetCode() const { + return code; + } + +private: + const Node condition; ///< Condition to be satisfied + std::vector<Node> code; ///< Code to execute +}; + +/// A general purpose register +class GprNode final { +public: + explicit constexpr GprNode(Tegra::Shader::Register index) : index{index} {} + + u32 GetIndex() const { + return static_cast<u32>(index); + } + +private: + const Tegra::Shader::Register index; +}; + +/// A 32-bits value that represents an immediate value +class ImmediateNode final { +public: + explicit constexpr ImmediateNode(u32 value) : value{value} {} + + u32 GetValue() const { + return value; + } + +private: + const u32 value; +}; + +/// One of Maxwell's internal flags +class InternalFlagNode final { +public: + explicit constexpr InternalFlagNode(InternalFlag flag) : flag{flag} {} + + InternalFlag GetFlag() const { + return flag; + } + +private: + const InternalFlag flag; +}; + +/// A predicate register, it can be negated without additional nodes +class PredicateNode final { +public: + explicit constexpr PredicateNode(Tegra::Shader::Pred index, bool negated) + : index{index}, negated{negated} {} + + Tegra::Shader::Pred GetIndex() const { + return index; + } + + bool IsNegated() const { + return negated; + } + +private: + const Tegra::Shader::Pred index; + const bool negated; +}; + +/// Attribute buffer memory (known as attributes or varyings in GLSL terms) +class AbufNode final { +public: + explicit constexpr AbufNode(Tegra::Shader::Attribute::Index index, u32 element, + const Tegra::Shader::IpaMode& input_mode, Node buffer = {}) + : input_mode{input_mode}, buffer{buffer}, index{index}, element{element} {} + + explicit constexpr AbufNode(Tegra::Shader::Attribute::Index index, u32 element, + Node buffer = {}) + : input_mode{}, buffer{buffer}, index{index}, element{element} {} + + Tegra::Shader::IpaMode GetInputMode() const { + return input_mode; + } + + Tegra::Shader::Attribute::Index GetIndex() const { + return index; + } + + u32 GetElement() const { + return element; + } + + Node GetBuffer() const { + return buffer; + } + +private: + const Tegra::Shader::IpaMode input_mode; + const Node buffer; + const Tegra::Shader::Attribute::Index index; + const u32 element; +}; + +/// Constant buffer node, usually mapped to uniform buffers in GLSL +class CbufNode final { +public: + explicit constexpr CbufNode(u32 index, Node offset) : index{index}, offset{offset} {} + + u32 GetIndex() const { + return index; + } + + Node GetOffset() const { + return offset; + } + +private: + const u32 index; + const Node offset; +}; + +/// Local memory node +class LmemNode final { +public: + explicit constexpr LmemNode(Node address) : address{address} {} + + Node GetAddress() const { + return address; + } + +private: + const Node address; +}; + +/// Global memory node +class GmemNode final { +public: + explicit constexpr GmemNode(Node real_address, Node base_address, + const GlobalMemoryBase& descriptor) + : real_address{real_address}, base_address{base_address}, descriptor{descriptor} {} + + Node GetRealAddress() const { + return real_address; + } + + Node GetBaseAddress() const { + return base_address; + } + + const GlobalMemoryBase& GetDescriptor() const { + return descriptor; + } + +private: + const Node real_address; + const Node base_address; + const GlobalMemoryBase descriptor; +}; + +/// Commentary, can be dropped +class CommentNode final { +public: + explicit CommentNode(std::string text) : text{std::move(text)} {} + + const std::string& GetText() const { + return text; + } + +private: + std::string text; +}; + +class ShaderIR final { +public: + explicit ShaderIR(const ProgramCode& program_code, u32 main_offset) + : program_code{program_code}, main_offset{main_offset} { + + Decode(); + } + + const std::map<u32, BasicBlock>& GetBasicBlocks() const { + return basic_blocks; + } + + const std::set<u32>& GetRegisters() const { + return used_registers; + } + + const std::set<Tegra::Shader::Pred>& GetPredicates() const { + return used_predicates; + } + + const std::map<Tegra::Shader::Attribute::Index, std::set<Tegra::Shader::IpaMode>>& + GetInputAttributes() const { + return used_input_attributes; + } + + const std::set<Tegra::Shader::Attribute::Index>& GetOutputAttributes() const { + return used_output_attributes; + } + + const std::map<u32, ConstBuffer>& GetConstantBuffers() const { + return used_cbufs; + } + + const std::set<Sampler>& GetSamplers() const { + return used_samplers; + } + + const std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances>& GetClipDistances() + const { + return used_clip_distances; + } + + const std::set<GlobalMemoryBase>& GetGlobalMemoryBases() const { + return used_global_memory_bases; + } + + std::size_t GetLength() const { + return static_cast<std::size_t>(coverage_end * sizeof(u64)); + } + + const Tegra::Shader::Header& GetHeader() const { + return header; + } + +private: + void Decode(); + + ExitMethod Scan(u32 begin, u32 end, std::set<u32>& labels); + + BasicBlock DecodeRange(u32 begin, u32 end); + + /** + * Decodes a single instruction from Tegra to IR. + * @param bb Basic block where the nodes will be written to. + * @param pc Program counter. Offset to decode. + * @return Next address to decode. + */ + u32 DecodeInstr(BasicBlock& bb, u32 pc); + + u32 DecodeArithmetic(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeArithmeticImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeBfe(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeBfi(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeShift(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeArithmeticInteger(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeArithmeticIntegerImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeArithmeticHalf(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeArithmeticHalfImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeFfma(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeHfma2(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeConversion(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeMemory(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeFloatSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeIntegerSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeHalfSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodePredicateSetRegister(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodePredicateSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeRegisterSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeFloatSet(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeIntegerSet(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeHalfSet(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeVideo(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeXmad(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeOther(BasicBlock& bb, const BasicBlock& code, u32 pc); + + /// Internalizes node's data and returns a managed pointer to a clone of that node + Node StoreNode(NodeData&& node_data); + + /// Creates a conditional node + Node Conditional(Node condition, std::vector<Node>&& code); + /// Creates a commentary + Node Comment(const std::string& text); + /// Creates an u32 immediate + Node Immediate(u32 value); + /// Creates a s32 immediate + Node Immediate(s32 value) { + return Immediate(static_cast<u32>(value)); + } + /// Creates a f32 immediate + Node Immediate(f32 value) { + u32 integral; + std::memcpy(&integral, &value, sizeof(u32)); + return Immediate(integral); + } + + /// Generates a node for a passed register. + Node GetRegister(Tegra::Shader::Register reg); + /// Generates a node representing a 19-bit immediate value + Node GetImmediate19(Tegra::Shader::Instruction instr); + /// Generates a node representing a 32-bit immediate value + Node GetImmediate32(Tegra::Shader::Instruction instr); + /// Generates a node representing a constant buffer + Node GetConstBuffer(u64 index, u64 offset); + /// Generates a node representing a constant buffer with a variadic offset + Node GetConstBufferIndirect(u64 index, u64 offset, Node node); + /// Generates a node for a passed predicate. It can be optionally negated + Node GetPredicate(u64 pred, bool negated = false); + /// Generates a predicate node for an immediate true or false value + Node GetPredicate(bool immediate); + /// Generates a node representing an input attribute. Keeps track of used attributes. + Node GetInputAttribute(Tegra::Shader::Attribute::Index index, u64 element, + const Tegra::Shader::IpaMode& input_mode, Node buffer = {}); + /// Generates a node representing an output attribute. Keeps track of used attributes. + Node GetOutputAttribute(Tegra::Shader::Attribute::Index index, u64 element, Node buffer); + /// Generates a node representing an internal flag + Node GetInternalFlag(InternalFlag flag, bool negated = false); + /// Generates a node representing a local memory address + Node GetLocalMemory(Node address); + /// Generates a temporal, internally it uses a post-RZ register + Node GetTemporal(u32 id); + + /// Sets a register. src value must be a number-evaluated node. + void SetRegister(BasicBlock& bb, Tegra::Shader::Register dest, Node src); + /// Sets a predicate. src value must be a bool-evaluated node + void SetPredicate(BasicBlock& bb, u64 dest, Node src); + /// Sets an internal flag. src value must be a bool-evaluated node + void SetInternalFlag(BasicBlock& bb, InternalFlag flag, Node value); + /// Sets a local memory address. address and value must be a number-evaluated node + void SetLocalMemory(BasicBlock& bb, Node address, Node value); + /// Sets a temporal. Internally it uses a post-RZ register + void SetTemporal(BasicBlock& bb, u32 id, Node value); + + /// Sets internal flags from a float + void SetInternalFlagsFromFloat(BasicBlock& bb, Node value, bool sets_cc = true); + /// Sets internal flags from an integer + void SetInternalFlagsFromInteger(BasicBlock& bb, Node value, bool sets_cc = true); + + /// Conditionally absolute/negated float. Absolute is applied first + Node GetOperandAbsNegFloat(Node value, bool absolute, bool negate); + /// Conditionally saturates a float + Node GetSaturatedFloat(Node value, bool saturate = true); + + /// Converts an integer to different sizes. + Node ConvertIntegerSize(Node value, Tegra::Shader::Register::Size size, bool is_signed); + /// Conditionally absolute/negated integer. Absolute is applied first + Node GetOperandAbsNegInteger(Node value, bool absolute, bool negate, bool is_signed); + + /// Unpacks a half immediate from an instruction + Node UnpackHalfImmediate(Tegra::Shader::Instruction instr, bool has_negation); + /// Merges a half pair into another value + Node HalfMerge(Node dest, Node src, Tegra::Shader::HalfMerge merge); + /// Conditionally absolute/negated half float pair. Absolute is applied first + Node GetOperandAbsNegHalf(Node value, bool absolute, bool negate); + + /// Returns a predicate comparing two floats + Node GetPredicateComparisonFloat(Tegra::Shader::PredCondition condition, Node op_a, Node op_b); + /// Returns a predicate comparing two integers + Node GetPredicateComparisonInteger(Tegra::Shader::PredCondition condition, bool is_signed, + Node op_a, Node op_b); + /// Returns a predicate comparing two half floats. meta consumes how both pairs will be compared + Node GetPredicateComparisonHalf(Tegra::Shader::PredCondition condition, + const MetaHalfArithmetic& meta, Node op_a, Node op_b); + + /// Returns a predicate combiner operation + OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation); + + /// Returns a condition code evaluated from internal flags + Node GetConditionCode(Tegra::Shader::ConditionCode cc); + + /// Accesses a texture sampler + const Sampler& GetSampler(const Tegra::Shader::Sampler& sampler, + Tegra::Shader::TextureType type, bool is_array, bool is_shadow); + + /// Extracts a sequence of bits from a node + Node BitfieldExtract(Node value, u32 offset, u32 bits); + + void WriteTexInstructionFloat(BasicBlock& bb, Tegra::Shader::Instruction instr, + const Node4& components); + + void WriteTexsInstructionFloat(BasicBlock& bb, Tegra::Shader::Instruction instr, + const Node4& components); + void WriteTexsInstructionHalfFloat(BasicBlock& bb, Tegra::Shader::Instruction instr, + const Node4& components); + + Node4 GetTexCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, + Tegra::Shader::TextureProcessMode process_mode, bool depth_compare, + bool is_array); + + Node4 GetTexsCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, + Tegra::Shader::TextureProcessMode process_mode, bool depth_compare, + bool is_array); + + Node4 GetTld4Code(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, + bool depth_compare, bool is_array); + + Node4 GetTldsCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, + bool is_array); + + std::tuple<std::size_t, std::size_t> ValidateAndGetCoordinateElement( + Tegra::Shader::TextureType texture_type, bool depth_compare, bool is_array, + bool lod_bias_enabled, std::size_t max_coords, std::size_t max_inputs); + + Node4 GetTextureCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, + Tegra::Shader::TextureProcessMode process_mode, bool depth_compare, + bool is_array, std::size_t array_offset, std::size_t bias_offset, + std::vector<Node>&& coords); + + Node GetVideoOperand(Node op, bool is_chunk, bool is_signed, Tegra::Shader::VideoType type, + u64 byte_height); + + void WriteLogicOperation(BasicBlock& bb, Tegra::Shader::Register dest, + Tegra::Shader::LogicOperation logic_op, Node op_a, Node op_b, + Tegra::Shader::PredicateResultMode predicate_mode, + Tegra::Shader::Pred predicate, bool sets_cc); + void WriteLop3Instruction(BasicBlock& bb, Tegra::Shader::Register dest, Node op_a, Node op_b, + Node op_c, Node imm_lut, bool sets_cc); + + Node TrackCbuf(Node tracked, const BasicBlock& code, s64 cursor); + + std::pair<Node, s64> TrackRegister(const GprNode* tracked, const BasicBlock& code, s64 cursor); + + template <typename... T> + Node Operation(OperationCode code, const T*... operands) { + return StoreNode(OperationNode(code, operands...)); + } + + template <typename... T> + Node Operation(OperationCode code, Meta&& meta, const T*... operands) { + return StoreNode(OperationNode(code, std::move(meta), operands...)); + } + + template <typename... T> + Node Operation(OperationCode code, std::vector<Node>&& operands) { + return StoreNode(OperationNode(code, std::move(operands))); + } + + template <typename... T> + Node Operation(OperationCode code, Meta&& meta, std::vector<Node>&& operands) { + return StoreNode(OperationNode(code, std::move(meta), std::move(operands))); + } + + template <typename... T> + Node SignedOperation(OperationCode code, bool is_signed, const T*... operands) { + return StoreNode(OperationNode(SignedToUnsignedCode(code, is_signed), operands...)); + } + + template <typename... T> + Node SignedOperation(OperationCode code, bool is_signed, Meta&& meta, const T*... operands) { + return StoreNode( + OperationNode(SignedToUnsignedCode(code, is_signed), std::move(meta), operands...)); + } + + static OperationCode SignedToUnsignedCode(OperationCode operation_code, bool is_signed); + + const ProgramCode& program_code; + const u32 main_offset; + + u32 coverage_begin{}; + u32 coverage_end{}; + std::map<std::pair<u32, u32>, ExitMethod> exit_method_map; + + std::map<u32, BasicBlock> basic_blocks; + + std::vector<std::unique_ptr<NodeData>> stored_nodes; + + std::set<u32> used_registers; + std::set<Tegra::Shader::Pred> used_predicates; + std::map<Tegra::Shader::Attribute::Index, std::set<Tegra::Shader::IpaMode>> + used_input_attributes; + std::set<Tegra::Shader::Attribute::Index> used_output_attributes; + std::map<u32, ConstBuffer> used_cbufs; + std::set<Sampler> used_samplers; + std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances> used_clip_distances{}; + std::set<GlobalMemoryBase> used_global_memory_bases; + + Tegra::Shader::Header header; +}; + +} // namespace VideoCommon::Shader diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp new file mode 100644 index 000000000..d6d29ee9f --- /dev/null +++ b/src/video_core/shader/track.cpp @@ -0,0 +1,76 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <utility> +#include <variant> + +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +namespace { +std::pair<Node, s64> FindOperation(const BasicBlock& code, s64 cursor, + OperationCode operation_code) { + for (; cursor >= 0; --cursor) { + const Node node = code[cursor]; + if (const auto operation = std::get_if<OperationNode>(node)) { + if (operation->GetCode() == operation_code) + return {node, cursor}; + } + } + return {}; +} +} // namespace + +Node ShaderIR::TrackCbuf(Node tracked, const BasicBlock& code, s64 cursor) { + if (const auto cbuf = std::get_if<CbufNode>(tracked)) { + // Cbuf found, but it has to be immediate + return std::holds_alternative<ImmediateNode>(*cbuf->GetOffset()) ? tracked : nullptr; + } + if (const auto gpr = std::get_if<GprNode>(tracked)) { + if (gpr->GetIndex() == Tegra::Shader::Register::ZeroIndex) { + return nullptr; + } + // Reduce the cursor in one to avoid infinite loops when the instruction sets the same + // register that it uses as operand + const auto [source, new_cursor] = TrackRegister(gpr, code, cursor - 1); + if (!source) { + return nullptr; + } + return TrackCbuf(source, code, new_cursor); + } + if (const auto operation = std::get_if<OperationNode>(tracked)) { + for (std::size_t i = 0; i < operation->GetOperandsCount(); ++i) { + if (const auto found = TrackCbuf((*operation)[i], code, cursor)) { + // Cbuf found in operand + return found; + } + } + return nullptr; + } + return nullptr; +} + +std::pair<Node, s64> ShaderIR::TrackRegister(const GprNode* tracked, const BasicBlock& code, + s64 cursor) { + for (; cursor >= 0; --cursor) { + const auto [found_node, new_cursor] = FindOperation(code, cursor, OperationCode::Assign); + if (!found_node) { + return {}; + } + const auto operation = std::get_if<OperationNode>(found_node); + ASSERT(operation); + + const auto& target = (*operation)[0]; + if (const auto gpr_target = std::get_if<GprNode>(target)) { + if (gpr_target->GetIndex() == tracked->GetIndex()) { + return {(*operation)[1], new_cursor}; + } + } + } + return {}; +} + +} // namespace VideoCommon::Shader diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp index 1a344229f..2f6612a35 100644 --- a/src/video_core/surface.cpp +++ b/src/video_core/surface.cpp @@ -50,6 +50,24 @@ bool SurfaceTargetIsLayered(SurfaceTarget target) { } } +bool SurfaceTargetIsArray(SurfaceTarget target) { + switch (target) { + case SurfaceTarget::Texture1D: + case SurfaceTarget::Texture2D: + case SurfaceTarget::Texture3D: + case SurfaceTarget::TextureCubemap: + return false; + case SurfaceTarget::Texture1DArray: + case SurfaceTarget::Texture2DArray: + case SurfaceTarget::TextureCubeArray: + return true; + default: + LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", static_cast<u32>(target)); + UNREACHABLE(); + return false; + } +} + PixelFormat PixelFormatFromDepthFormat(Tegra::DepthFormat format) { switch (format) { case Tegra::DepthFormat::S8_Z24_UNORM: diff --git a/src/video_core/surface.h b/src/video_core/surface.h index c2259c3c2..b783e4b27 100644 --- a/src/video_core/surface.h +++ b/src/video_core/surface.h @@ -109,8 +109,7 @@ enum class SurfaceType { ColorTexture = 0, Depth = 1, DepthStencil = 2, - Fill = 3, - Invalid = 4, + Invalid = 3, }; enum class SurfaceTarget { @@ -441,6 +440,8 @@ SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_t bool SurfaceTargetIsLayered(SurfaceTarget target); +bool SurfaceTargetIsArray(SurfaceTarget target); + PixelFormat PixelFormatFromDepthFormat(Tegra::DepthFormat format); PixelFormat PixelFormatFromRenderTargetFormat(Tegra::RenderTargetFormat format); diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h index e7c78bee2..bdb40dacf 100644 --- a/src/video_core/textures/texture.h +++ b/src/video_core/textures/texture.h @@ -182,7 +182,7 @@ struct TICEntry { }; union { BitField<0, 16, u32> height_minus_1; - BitField<16, 15, u32> depth_minus_1; + BitField<16, 14, u32> depth_minus_1; }; union { BitField<6, 13, u32> mip_lod_bias; diff --git a/src/video_core/video_core.cpp b/src/video_core/video_core.cpp index f7de3471b..0b8ccdd44 100644 --- a/src/video_core/video_core.cpp +++ b/src/video_core/video_core.cpp @@ -16,9 +16,10 @@ std::unique_ptr<RendererBase> CreateRenderer(Core::Frontend::EmuWindow& emu_wind } u16 GetResolutionScaleFactor(const RendererBase& renderer) { - return !Settings::values.resolution_factor - ? renderer.GetRenderWindow().GetFramebufferLayout().GetScalingRatio() - : Settings::values.resolution_factor; + return static_cast<u16>( + Settings::values.resolution_factor + ? Settings::values.resolution_factor + : renderer.GetRenderWindow().GetFramebufferLayout().GetScalingRatio()); } } // namespace VideoCore diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index 0c0864742..f50225d5f 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -13,7 +13,6 @@ #include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" -#include "core/hle/kernel/timer.h" #include "core/hle/kernel/wait_object.h" #include "core/memory.h" @@ -155,8 +154,6 @@ std::unique_ptr<WaitTreeWaitObject> WaitTreeWaitObject::make(const Kernel::WaitO switch (object.GetHandleType()) { case Kernel::HandleType::ReadableEvent: return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::ReadableEvent&>(object)); - case Kernel::HandleType::Timer: - return std::make_unique<WaitTreeTimer>(static_cast<const Kernel::Timer&>(object)); case Kernel::HandleType::Thread: return std::make_unique<WaitTreeThread>(static_cast<const Kernel::Thread&>(object)); default: @@ -348,23 +345,6 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeEvent::GetChildren() const { return list; } -WaitTreeTimer::WaitTreeTimer(const Kernel::Timer& object) : WaitTreeWaitObject(object) {} -WaitTreeTimer::~WaitTreeTimer() = default; - -std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeTimer::GetChildren() const { - std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren()); - - const auto& timer = static_cast<const Kernel::Timer&>(object); - - list.push_back(std::make_unique<WaitTreeText>( - tr("reset type = %1").arg(GetResetTypeQString(timer.GetResetType())))); - list.push_back( - std::make_unique<WaitTreeText>(tr("initial delay = %1").arg(timer.GetInitialDelay()))); - list.push_back( - std::make_unique<WaitTreeText>(tr("interval delay = %1").arg(timer.GetIntervalDelay()))); - return list; -} - WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::SharedPtr<Kernel::Thread>>& list) : thread_list(list) {} WaitTreeThreadList::~WaitTreeThreadList() = default; diff --git a/src/yuzu/debugger/wait_tree.h b/src/yuzu/debugger/wait_tree.h index e639ef412..365c3dbfe 100644 --- a/src/yuzu/debugger/wait_tree.h +++ b/src/yuzu/debugger/wait_tree.h @@ -20,7 +20,6 @@ namespace Kernel { class ReadableEvent; class WaitObject; class Thread; -class Timer; } // namespace Kernel class WaitTreeThread; @@ -150,15 +149,6 @@ public: std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; }; -class WaitTreeTimer : public WaitTreeWaitObject { - Q_OBJECT -public: - explicit WaitTreeTimer(const Kernel::Timer& object); - ~WaitTreeTimer() override; - - std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; -}; - class WaitTreeThreadList : public WaitTreeExpandableItem { Q_OBJECT public: |