summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/CMakeLists.txt8
-rw-r--r--src/video_core/engines/fermi_2d.cpp7
-rw-r--r--src/video_core/engines/fermi_2d.h2
-rw-r--r--src/video_core/engines/kepler_compute.cpp3
-rw-r--r--src/video_core/engines/kepler_compute.h3
-rw-r--r--src/video_core/engines/kepler_memory.cpp2
-rw-r--r--src/video_core/engines/kepler_memory.h1
-rw-r--r--src/video_core/engines/maxwell_dma.cpp5
-rw-r--r--src/video_core/engines/maxwell_dma.h1
-rw-r--r--src/video_core/engines/shader_bytecode.h1
-rw-r--r--src/video_core/gpu.cpp5
-rw-r--r--src/video_core/gpu.h55
-rw-r--r--src/video_core/gpu_asynch.cpp37
-rw-r--r--src/video_core/gpu_asynch.h37
-rw-r--r--src/video_core/gpu_synch.cpp37
-rw-r--r--src/video_core/gpu_synch.h29
-rw-r--r--src/video_core/gpu_thread.cpp152
-rw-r--r--src/video_core/gpu_thread.h136
-rw-r--r--src/video_core/renderer_base.cpp1
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp6
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.cpp115
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp27
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp2
-rw-r--r--src/video_core/surface.cpp2
-rw-r--r--src/video_core/textures/astc.cpp80
-rw-r--r--src/video_core/textures/astc.h2
-rw-r--r--src/video_core/textures/convert.cpp92
-rw-r--r--src/video_core/textures/convert.h18
-rw-r--r--src/video_core/textures/decoders.cpp6
-rw-r--r--src/video_core/textures/decoders.h18
31 files changed, 691 insertions, 200 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 3e9d2b3be..57f31cd58 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -17,6 +17,12 @@ add_library(video_core STATIC
engines/shader_header.h
gpu.cpp
gpu.h
+ gpu_asynch.cpp
+ gpu_asynch.h
+ gpu_synch.cpp
+ gpu_synch.h
+ gpu_thread.cpp
+ gpu_thread.h
macro_interpreter.cpp
macro_interpreter.h
memory_manager.cpp
@@ -94,6 +100,8 @@ add_library(video_core STATIC
surface.h
textures/astc.cpp
textures/astc.h
+ textures/convert.cpp
+ textures/convert.h
textures/decoders.cpp
textures/decoders.h
textures/texture.h
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index 540dcc52c..03b7ee5d8 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -2,12 +2,11 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include "core/core.h"
-#include "core/memory.h"
+#include "common/assert.h"
+#include "common/logging/log.h"
+#include "common/math_util.h"
#include "video_core/engines/fermi_2d.h"
-#include "video_core/engines/maxwell_3d.h"
#include "video_core/rasterizer_interface.h"
-#include "video_core/textures/decoders.h"
namespace Tegra::Engines {
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index c69f74cc5..80523e320 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -5,7 +5,7 @@
#pragma once
#include <array>
-#include "common/assert.h"
+#include <cstddef>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 4ca856b6b..b1d950460 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -2,9 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "common/assert.h"
#include "common/logging/log.h"
-#include "core/core.h"
-#include "core/memory.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/memory_manager.h"
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h
index df0a32e0f..6575afd0f 100644
--- a/src/video_core/engines/kepler_compute.h
+++ b/src/video_core/engines/kepler_compute.h
@@ -5,8 +5,7 @@
#pragma once
#include <array>
-#include "common/assert.h"
-#include "common/bit_field.h"
+#include <cstddef>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "video_core/gpu.h"
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 4f6126116..aae2a4019 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -48,7 +48,7 @@ void KeplerMemory::ProcessData(u32 data) {
// We have to invalidate the destination region to evict any outdated surfaces from the cache.
// We do this before actually writing the new data because the destination address might contain
// a dirty surface that will have to be written back to memory.
- rasterizer.InvalidateRegion(*dest_address, sizeof(u32));
+ Core::System::GetInstance().GPU().InvalidateRegion(*dest_address, sizeof(u32));
Memory::Write32(*dest_address, data);
system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index f680c2ad9..9181e9d80 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -5,6 +5,7 @@
#pragma once
#include <array>
+#include <cstddef>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 529a14ec7..9dfea5999 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include "common/assert.h"
+#include "common/logging/log.h"
#include "core/core.h"
#include "core/memory.h"
#include "video_core/engines/maxwell_3d.h"
@@ -91,12 +92,12 @@ void MaxwellDMA::HandleCopy() {
const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) {
// TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated
// copying.
- rasterizer.FlushRegion(*source_cpu, src_size);
+ Core::System::GetInstance().GPU().FlushRegion(*source_cpu, src_size);
// We have to invalidate the destination region to evict any outdated surfaces from the
// cache. We do this before actually writing the new data because the destination address
// might contain a dirty surface that will have to be written back to memory.
- rasterizer.InvalidateRegion(*dest_cpu, dst_size);
+ Core::System::GetInstance().GPU().InvalidateRegion(*dest_cpu, dst_size);
};
if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) {
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index cf75aeb12..34c369320 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -5,6 +5,7 @@
#pragma once
#include <array>
+#include <cstddef>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index 252592edd..c7eb15b6a 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -6,7 +6,6 @@
#include <bitset>
#include <optional>
-#include <string>
#include <tuple>
#include <vector>
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index ac30d1a89..08abf8ac9 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -12,7 +12,7 @@
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/gpu.h"
-#include "video_core/rasterizer_interface.h"
+#include "video_core/renderer_base.h"
namespace Tegra {
@@ -28,7 +28,8 @@ u32 FramebufferConfig::BytesPerPixel(PixelFormat format) {
UNREACHABLE();
}
-GPU::GPU(Core::System& system, VideoCore::RasterizerInterface& rasterizer) {
+GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer) : renderer{renderer} {
+ auto& rasterizer{renderer.Rasterizer()};
memory_manager = std::make_unique<Tegra::MemoryManager>();
dma_pusher = std::make_unique<Tegra::DmaPusher>(*this);
maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager);
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 6313702f2..14a421cc1 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -16,8 +16,8 @@ class System;
}
namespace VideoCore {
-class RasterizerInterface;
-}
+class RendererBase;
+} // namespace VideoCore
namespace Tegra {
@@ -119,9 +119,10 @@ enum class EngineID {
MAXWELL_DMA_COPY_A = 0xB0B5,
};
-class GPU final {
+class GPU {
public:
- explicit GPU(Core::System& system, VideoCore::RasterizerInterface& rasterizer);
+ explicit GPU(Core::System& system, VideoCore::RendererBase& renderer);
+
~GPU();
struct MethodCall {
@@ -200,8 +201,42 @@ public:
};
} regs{};
+ /// Push GPU command entries to be processed
+ virtual void PushGPUEntries(Tegra::CommandList&& entries) = 0;
+
+ /// Swap buffers (render frame)
+ virtual void SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) = 0;
+
+ /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
+ virtual void FlushRegion(VAddr addr, u64 size) = 0;
+
+ /// Notify rasterizer that any caches of the specified region should be invalidated
+ virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
+
+ /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
+ virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
+
private:
+ void ProcessBindMethod(const MethodCall& method_call);
+ void ProcessSemaphoreTriggerMethod();
+ void ProcessSemaphoreRelease();
+ void ProcessSemaphoreAcquire();
+
+ /// Calls a GPU puller method.
+ void CallPullerMethod(const MethodCall& method_call);
+
+ /// Calls a GPU engine method.
+ void CallEngineMethod(const MethodCall& method_call);
+
+ /// Determines where the method should be executed.
+ bool ExecuteMethodOnEngine(const MethodCall& method_call);
+
+protected:
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
+ VideoCore::RendererBase& renderer;
+
+private:
std::unique_ptr<Tegra::MemoryManager> memory_manager;
/// Mapping of command subchannels to their bound engine ids.
@@ -217,18 +252,6 @@ private:
std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
/// Inline memory engine
std::unique_ptr<Engines::KeplerMemory> kepler_memory;
-
- void ProcessBindMethod(const MethodCall& method_call);
- void ProcessSemaphoreTriggerMethod();
- void ProcessSemaphoreRelease();
- void ProcessSemaphoreAcquire();
-
- // Calls a GPU puller method.
- void CallPullerMethod(const MethodCall& method_call);
- // Calls a GPU engine method.
- void CallEngineMethod(const MethodCall& method_call);
- // Determines where the method should be executed.
- bool ExecuteMethodOnEngine(const MethodCall& method_call);
};
#define ASSERT_REG_POSITION(field_name, position) \
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp
new file mode 100644
index 000000000..ad0a747e3
--- /dev/null
+++ b/src/video_core/gpu_asynch.cpp
@@ -0,0 +1,37 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "video_core/gpu_asynch.h"
+#include "video_core/gpu_thread.h"
+#include "video_core/renderer_base.h"
+
+namespace VideoCommon {
+
+GPUAsynch::GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer)
+ : Tegra::GPU(system, renderer), gpu_thread{renderer, *dma_pusher} {}
+
+GPUAsynch::~GPUAsynch() = default;
+
+void GPUAsynch::PushGPUEntries(Tegra::CommandList&& entries) {
+ gpu_thread.SubmitList(std::move(entries));
+}
+
+void GPUAsynch::SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
+ gpu_thread.SwapBuffers(std::move(framebuffer));
+}
+
+void GPUAsynch::FlushRegion(VAddr addr, u64 size) {
+ gpu_thread.FlushRegion(addr, size);
+}
+
+void GPUAsynch::InvalidateRegion(VAddr addr, u64 size) {
+ gpu_thread.InvalidateRegion(addr, size);
+}
+
+void GPUAsynch::FlushAndInvalidateRegion(VAddr addr, u64 size) {
+ gpu_thread.FlushAndInvalidateRegion(addr, size);
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h
new file mode 100644
index 000000000..58046f3e9
--- /dev/null
+++ b/src/video_core/gpu_asynch.h
@@ -0,0 +1,37 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "video_core/gpu.h"
+#include "video_core/gpu_thread.h"
+
+namespace VideoCore {
+class RendererBase;
+} // namespace VideoCore
+
+namespace VideoCommon {
+
+namespace GPUThread {
+class ThreadManager;
+} // namespace GPUThread
+
+/// Implementation of GPU interface that runs the GPU asynchronously
+class GPUAsynch : public Tegra::GPU {
+public:
+ explicit GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer);
+ ~GPUAsynch();
+
+ void PushGPUEntries(Tegra::CommandList&& entries) override;
+ void SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override;
+ void FlushRegion(VAddr addr, u64 size) override;
+ void InvalidateRegion(VAddr addr, u64 size) override;
+ void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
+
+private:
+ GPUThread::ThreadManager gpu_thread;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/gpu_synch.cpp b/src/video_core/gpu_synch.cpp
new file mode 100644
index 000000000..4c00b96c7
--- /dev/null
+++ b/src/video_core/gpu_synch.cpp
@@ -0,0 +1,37 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "video_core/gpu_synch.h"
+#include "video_core/renderer_base.h"
+
+namespace VideoCommon {
+
+GPUSynch::GPUSynch(Core::System& system, VideoCore::RendererBase& renderer)
+ : Tegra::GPU(system, renderer) {}
+
+GPUSynch::~GPUSynch() = default;
+
+void GPUSynch::PushGPUEntries(Tegra::CommandList&& entries) {
+ dma_pusher->Push(std::move(entries));
+ dma_pusher->DispatchCalls();
+}
+
+void GPUSynch::SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
+ renderer.SwapBuffers(std::move(framebuffer));
+}
+
+void GPUSynch::FlushRegion(VAddr addr, u64 size) {
+ renderer.Rasterizer().FlushRegion(addr, size);
+}
+
+void GPUSynch::InvalidateRegion(VAddr addr, u64 size) {
+ renderer.Rasterizer().InvalidateRegion(addr, size);
+}
+
+void GPUSynch::FlushAndInvalidateRegion(VAddr addr, u64 size) {
+ renderer.Rasterizer().FlushAndInvalidateRegion(addr, size);
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/gpu_synch.h b/src/video_core/gpu_synch.h
new file mode 100644
index 000000000..658f683e2
--- /dev/null
+++ b/src/video_core/gpu_synch.h
@@ -0,0 +1,29 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "video_core/gpu.h"
+
+namespace VideoCore {
+class RendererBase;
+} // namespace VideoCore
+
+namespace VideoCommon {
+
+/// Implementation of GPU interface that runs the GPU synchronously
+class GPUSynch : public Tegra::GPU {
+public:
+ explicit GPUSynch(Core::System& system, VideoCore::RendererBase& renderer);
+ ~GPUSynch();
+
+ void PushGPUEntries(Tegra::CommandList&& entries) override;
+ void SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override;
+ void FlushRegion(VAddr addr, u64 size) override;
+ void InvalidateRegion(VAddr addr, u64 size) override;
+ void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
new file mode 100644
index 000000000..c5bdd2a17
--- /dev/null
+++ b/src/video_core/gpu_thread.cpp
@@ -0,0 +1,152 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/microprofile.h"
+#include "core/frontend/scope_acquire_window_context.h"
+#include "core/settings.h"
+#include "video_core/dma_pusher.h"
+#include "video_core/gpu.h"
+#include "video_core/gpu_thread.h"
+#include "video_core/renderer_base.h"
+
+namespace VideoCommon::GPUThread {
+
+/// Executes a single GPU thread command
+static void ExecuteCommand(CommandData* command, VideoCore::RendererBase& renderer,
+ Tegra::DmaPusher& dma_pusher) {
+ if (const auto submit_list = std::get_if<SubmitListCommand>(command)) {
+ dma_pusher.Push(std::move(submit_list->entries));
+ dma_pusher.DispatchCalls();
+ } else if (const auto data = std::get_if<SwapBuffersCommand>(command)) {
+ renderer.SwapBuffers(data->framebuffer);
+ } else if (const auto data = std::get_if<FlushRegionCommand>(command)) {
+ renderer.Rasterizer().FlushRegion(data->addr, data->size);
+ } else if (const auto data = std::get_if<InvalidateRegionCommand>(command)) {
+ renderer.Rasterizer().InvalidateRegion(data->addr, data->size);
+ } else if (const auto data = std::get_if<FlushAndInvalidateRegionCommand>(command)) {
+ renderer.Rasterizer().FlushAndInvalidateRegion(data->addr, data->size);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+/// Runs the GPU thread
+static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher,
+ SynchState& state) {
+
+ MicroProfileOnThreadCreate("GpuThread");
+
+ auto WaitForWakeup = [&]() {
+ std::unique_lock<std::mutex> lock{state.signal_mutex};
+ state.signal_condition.wait(lock, [&] { return !state.is_idle || !state.is_running; });
+ };
+
+ // Wait for first GPU command before acquiring the window context
+ WaitForWakeup();
+
+ // If emulation was stopped during disk shader loading, abort before trying to acquire context
+ if (!state.is_running) {
+ return;
+ }
+
+ Core::Frontend::ScopeAcquireWindowContext acquire_context{renderer.GetRenderWindow()};
+
+ while (state.is_running) {
+ if (!state.is_running) {
+ return;
+ }
+
+ {
+ // Thread has been woken up, so make the previous write queue the next read queue
+ std::lock_guard<std::mutex> lock{state.signal_mutex};
+ std::swap(state.push_queue, state.pop_queue);
+ }
+
+ // Execute all of the GPU commands
+ while (!state.pop_queue->empty()) {
+ ExecuteCommand(&state.pop_queue->front(), renderer, dma_pusher);
+ state.pop_queue->pop();
+ }
+
+ state.UpdateIdleState();
+
+ // Signal that the GPU thread has finished processing commands
+ if (state.is_idle) {
+ state.idle_condition.notify_one();
+ }
+
+ // Wait for CPU thread to send more GPU commands
+ WaitForWakeup();
+ }
+}
+
+ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher)
+ : renderer{renderer}, dma_pusher{dma_pusher}, thread{RunThread, std::ref(renderer),
+ std::ref(dma_pusher), std::ref(state)},
+ thread_id{thread.get_id()} {}
+
+ThreadManager::~ThreadManager() {
+ {
+ // Notify GPU thread that a shutdown is pending
+ std::lock_guard<std::mutex> lock{state.signal_mutex};
+ state.is_running = false;
+ }
+
+ state.signal_condition.notify_one();
+ thread.join();
+}
+
+void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
+ if (entries.empty()) {
+ return;
+ }
+
+ PushCommand(SubmitListCommand(std::move(entries)), false, false);
+}
+
+void ThreadManager::SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
+ PushCommand(SwapBuffersCommand(std::move(framebuffer)), true, false);
+}
+
+void ThreadManager::FlushRegion(VAddr addr, u64 size) {
+ // Block the CPU when using accurate emulation
+ PushCommand(FlushRegionCommand(addr, size), Settings::values.use_accurate_gpu_emulation, false);
+}
+
+void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
+ PushCommand(InvalidateRegionCommand(addr, size), true, true);
+}
+
+void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) {
+ InvalidateRegion(addr, size);
+}
+
+void ThreadManager::PushCommand(CommandData&& command_data, bool wait_for_idle, bool allow_on_cpu) {
+ {
+ std::lock_guard<std::mutex> lock{state.signal_mutex};
+
+ if ((allow_on_cpu && state.is_idle) || IsGpuThread()) {
+ // Execute the command synchronously on the current thread
+ ExecuteCommand(&command_data, renderer, dma_pusher);
+ return;
+ }
+
+ // Push the command to the GPU thread
+ state.UpdateIdleState();
+ state.push_queue->emplace(command_data);
+ }
+
+ // Signal the GPU thread that commands are pending
+ state.signal_condition.notify_one();
+
+ if (wait_for_idle) {
+ // Wait for the GPU to be idle (all commands to be executed)
+ std::unique_lock<std::mutex> lock{state.idle_mutex};
+ state.idle_condition.wait(lock, [this] { return static_cast<bool>(state.is_idle); });
+ }
+}
+
+} // namespace VideoCommon::GPUThread
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
new file mode 100644
index 000000000..2ad8214cc
--- /dev/null
+++ b/src/video_core/gpu_thread.h
@@ -0,0 +1,136 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <atomic>
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <optional>
+#include <thread>
+#include <variant>
+
+namespace Tegra {
+struct FramebufferConfig;
+class DmaPusher;
+} // namespace Tegra
+
+namespace VideoCore {
+class RendererBase;
+} // namespace VideoCore
+
+namespace VideoCommon::GPUThread {
+
+/// Command to signal to the GPU thread that a command list is ready for processing
+struct SubmitListCommand final {
+ explicit SubmitListCommand(Tegra::CommandList&& entries) : entries{std::move(entries)} {}
+
+ Tegra::CommandList entries;
+};
+
+/// Command to signal to the GPU thread that a swap buffers is pending
+struct SwapBuffersCommand final {
+ explicit SwapBuffersCommand(std::optional<const Tegra::FramebufferConfig> framebuffer)
+ : framebuffer{std::move(framebuffer)} {}
+
+ std::optional<const Tegra::FramebufferConfig> framebuffer;
+};
+
+/// Command to signal to the GPU thread to flush a region
+struct FlushRegionCommand final {
+ explicit constexpr FlushRegionCommand(VAddr addr, u64 size) : addr{addr}, size{size} {}
+
+ const VAddr addr;
+ const u64 size;
+};
+
+/// Command to signal to the GPU thread to invalidate a region
+struct InvalidateRegionCommand final {
+ explicit constexpr InvalidateRegionCommand(VAddr addr, u64 size) : addr{addr}, size{size} {}
+
+ const VAddr addr;
+ const u64 size;
+};
+
+/// Command to signal to the GPU thread to flush and invalidate a region
+struct FlushAndInvalidateRegionCommand final {
+ explicit constexpr FlushAndInvalidateRegionCommand(VAddr addr, u64 size)
+ : addr{addr}, size{size} {}
+
+ const VAddr addr;
+ const u64 size;
+};
+
+using CommandData = std::variant<SubmitListCommand, SwapBuffersCommand, FlushRegionCommand,
+ InvalidateRegionCommand, FlushAndInvalidateRegionCommand>;
+
+/// Struct used to synchronize the GPU thread
+struct SynchState final {
+ std::atomic<bool> is_running{true};
+ std::atomic<bool> is_idle{true};
+ std::condition_variable signal_condition;
+ std::mutex signal_mutex;
+ std::condition_variable idle_condition;
+ std::mutex idle_mutex;
+
+ // We use two queues for sending commands to the GPU thread, one for writing (push_queue) to and
+ // one for reading from (pop_queue). These are swapped whenever the current pop_queue becomes
+ // empty. This allows for efficient thread-safe access, as it does not require any copies.
+
+ using CommandQueue = std::queue<CommandData>;
+ std::array<CommandQueue, 2> command_queues;
+ CommandQueue* push_queue{&command_queues[0]};
+ CommandQueue* pop_queue{&command_queues[1]};
+
+ void UpdateIdleState() {
+ std::lock_guard<std::mutex> lock{idle_mutex};
+ is_idle = command_queues[0].empty() && command_queues[1].empty();
+ }
+};
+
+/// Class used to manage the GPU thread
+class ThreadManager final {
+public:
+ explicit ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher);
+ ~ThreadManager();
+
+ /// Push GPU command entries to be processed
+ void SubmitList(Tegra::CommandList&& entries);
+
+ /// Swap buffers (render frame)
+ void SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
+
+ /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
+ void FlushRegion(VAddr addr, u64 size);
+
+ /// Notify rasterizer that any caches of the specified region should be invalidated
+ void InvalidateRegion(VAddr addr, u64 size);
+
+ /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
+ void FlushAndInvalidateRegion(VAddr addr, u64 size);
+
+ /// Waits the caller until the GPU thread is idle, used for synchronization
+ void WaitForIdle();
+
+private:
+ /// Pushes a command to be executed by the GPU thread
+ void PushCommand(CommandData&& command_data, bool wait_for_idle, bool allow_on_cpu);
+
+ /// Returns true if this is called by the GPU thread
+ bool IsGpuThread() const {
+ return std::this_thread::get_id() == thread_id;
+ }
+
+private:
+ SynchState state;
+ std::thread thread;
+ std::thread::id thread_id;
+ VideoCore::RendererBase& renderer;
+ Tegra::DmaPusher& dma_pusher;
+};
+
+} // namespace VideoCommon::GPUThread
diff --git a/src/video_core/renderer_base.cpp b/src/video_core/renderer_base.cpp
index 94223f45f..919d1f2d4 100644
--- a/src/video_core/renderer_base.cpp
+++ b/src/video_core/renderer_base.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "common/logging/log.h"
#include "core/frontend/emu_window.h"
#include "core/settings.h"
#include "video_core/renderer_base.h"
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 321d9dd3d..168288088 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -749,11 +749,7 @@ void RasterizerOpenGL::FlushAll() {}
void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
-
- if (Settings::values.use_accurate_gpu_emulation) {
- // Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit
- res_cache.FlushRegion(addr, size);
- }
+ res_cache.FlushRegion(addr, size);
}
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
index b5a9722f9..876698b37 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
@@ -21,7 +21,7 @@
#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
#include "video_core/renderer_opengl/utils.h"
#include "video_core/surface.h"
-#include "video_core/textures/astc.h"
+#include "video_core/textures/convert.h"
#include "video_core/textures/decoders.h"
namespace OpenGL {
@@ -597,103 +597,6 @@ CachedSurface::CachedSurface(const SurfaceParams& params)
}
}
-static void ConvertS8Z24ToZ24S8(std::vector<u8>& data, u32 width, u32 height, bool reverse) {
- union S8Z24 {
- BitField<0, 24, u32> z24;
- BitField<24, 8, u32> s8;
- };
- static_assert(sizeof(S8Z24) == 4, "S8Z24 is incorrect size");
-
- union Z24S8 {
- BitField<0, 8, u32> s8;
- BitField<8, 24, u32> z24;
- };
- static_assert(sizeof(Z24S8) == 4, "Z24S8 is incorrect size");
-
- S8Z24 s8z24_pixel{};
- Z24S8 z24s8_pixel{};
- constexpr auto bpp{GetBytesPerPixel(PixelFormat::S8Z24)};
- for (std::size_t y = 0; y < height; ++y) {
- for (std::size_t x = 0; x < width; ++x) {
- const std::size_t offset{bpp * (y * width + x)};
- if (reverse) {
- std::memcpy(&z24s8_pixel, &data[offset], sizeof(Z24S8));
- s8z24_pixel.s8.Assign(z24s8_pixel.s8);
- s8z24_pixel.z24.Assign(z24s8_pixel.z24);
- std::memcpy(&data[offset], &s8z24_pixel, sizeof(S8Z24));
- } else {
- std::memcpy(&s8z24_pixel, &data[offset], sizeof(S8Z24));
- z24s8_pixel.s8.Assign(s8z24_pixel.s8);
- z24s8_pixel.z24.Assign(s8z24_pixel.z24);
- std::memcpy(&data[offset], &z24s8_pixel, sizeof(Z24S8));
- }
- }
- }
-}
-
-/**
- * Helper function to perform software conversion (as needed) when loading a buffer from Switch
- * memory. This is for Maxwell pixel formats that cannot be represented as-is in OpenGL or with
- * typical desktop GPUs.
- */
-static void ConvertFormatAsNeeded_LoadGLBuffer(std::vector<u8>& data, PixelFormat pixel_format,
- u32 width, u32 height, u32 depth) {
- switch (pixel_format) {
- case PixelFormat::ASTC_2D_4X4:
- case PixelFormat::ASTC_2D_8X8:
- case PixelFormat::ASTC_2D_8X5:
- case PixelFormat::ASTC_2D_5X4:
- case PixelFormat::ASTC_2D_5X5:
- case PixelFormat::ASTC_2D_4X4_SRGB:
- case PixelFormat::ASTC_2D_8X8_SRGB:
- case PixelFormat::ASTC_2D_8X5_SRGB:
- case PixelFormat::ASTC_2D_5X4_SRGB:
- case PixelFormat::ASTC_2D_5X5_SRGB:
- case PixelFormat::ASTC_2D_10X8:
- case PixelFormat::ASTC_2D_10X8_SRGB: {
- // Convert ASTC pixel formats to RGBA8, as most desktop GPUs do not support ASTC.
- u32 block_width{};
- u32 block_height{};
- std::tie(block_width, block_height) = GetASTCBlockSize(pixel_format);
- data =
- Tegra::Texture::ASTC::Decompress(data, width, height, depth, block_width, block_height);
- break;
- }
- case PixelFormat::S8Z24:
- // Convert the S8Z24 depth format to Z24S8, as OpenGL does not support S8Z24.
- ConvertS8Z24ToZ24S8(data, width, height, false);
- break;
- }
-}
-
-/**
- * Helper function to perform software conversion (as needed) when flushing a buffer from OpenGL to
- * Switch memory. This is for Maxwell pixel formats that cannot be represented as-is in OpenGL or
- * with typical desktop GPUs.
- */
-static void ConvertFormatAsNeeded_FlushGLBuffer(std::vector<u8>& data, PixelFormat pixel_format,
- u32 width, u32 height) {
- switch (pixel_format) {
- case PixelFormat::ASTC_2D_4X4:
- case PixelFormat::ASTC_2D_8X8:
- case PixelFormat::ASTC_2D_4X4_SRGB:
- case PixelFormat::ASTC_2D_8X8_SRGB:
- case PixelFormat::ASTC_2D_5X5:
- case PixelFormat::ASTC_2D_5X5_SRGB:
- case PixelFormat::ASTC_2D_10X8:
- case PixelFormat::ASTC_2D_10X8_SRGB: {
- LOG_CRITICAL(HW_GPU, "Conversion of format {} after texture flushing is not implemented",
- static_cast<u32>(pixel_format));
- UNREACHABLE();
- break;
- }
- case PixelFormat::S8Z24:
- // Convert the Z24S8 depth format to S8Z24, as OpenGL does not support S8Z24.
- ConvertS8Z24ToZ24S8(data, width, height, true);
- break;
- }
-}
-
MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 192, 64));
void CachedSurface::LoadGLBuffer() {
MICROPROFILE_SCOPE(OpenGL_SurfaceLoad);
@@ -722,8 +625,16 @@ void CachedSurface::LoadGLBuffer() {
}
}
for (u32 i = 0; i < params.max_mip_level; i++) {
- ConvertFormatAsNeeded_LoadGLBuffer(gl_buffer[i], params.pixel_format, params.MipWidth(i),
- params.MipHeight(i), params.MipDepth(i));
+ const u32 width = params.MipWidth(i);
+ const u32 height = params.MipHeight(i);
+ const u32 depth = params.MipDepth(i);
+ if (VideoCore::Surface::IsPixelFormatASTC(params.pixel_format)) {
+ // Reserve size for RGBA8 conversion
+ constexpr std::size_t rgba_bpp = 4;
+ gl_buffer[i].resize(std::max(gl_buffer[i].size(), width * height * depth * rgba_bpp));
+ }
+ Tegra::Texture::ConvertFromGuestToHost(gl_buffer[i].data(), params.pixel_format, width,
+ height, depth, true, true);
}
}
@@ -746,8 +657,8 @@ void CachedSurface::FlushGLBuffer() {
glGetTextureImage(texture.handle, 0, tuple.format, tuple.type,
static_cast<GLsizei>(gl_buffer[0].size()), gl_buffer[0].data());
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
- ConvertFormatAsNeeded_FlushGLBuffer(gl_buffer[0], params.pixel_format, params.width,
- params.height);
+ Tegra::Texture::ConvertFromHostToGuest(gl_buffer[0].data(), params.pixel_format, params.width,
+ params.height, params.depth, true, true);
const u8* const texture_src_data = Memory::GetPointer(params.addr);
ASSERT(texture_src_data);
if (params.is_tiled) {
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index e60b2eb44..8b510b6ae 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -244,6 +244,21 @@ void RendererOpenGL::InitOpenGLObjects() {
LoadColorToActiveGLTexture(0, 0, 0, 0, screen_info.texture);
}
+void RendererOpenGL::AddTelemetryFields() {
+ const char* const gl_version{reinterpret_cast<char const*>(glGetString(GL_VERSION))};
+ const char* const gpu_vendor{reinterpret_cast<char const*>(glGetString(GL_VENDOR))};
+ const char* const gpu_model{reinterpret_cast<char const*>(glGetString(GL_RENDERER))};
+
+ LOG_INFO(Render_OpenGL, "GL_VERSION: {}", gl_version);
+ LOG_INFO(Render_OpenGL, "GL_VENDOR: {}", gpu_vendor);
+ LOG_INFO(Render_OpenGL, "GL_RENDERER: {}", gpu_model);
+
+ auto& telemetry_session = system.TelemetrySession();
+ telemetry_session.AddField(Telemetry::FieldType::UserSystem, "GPU_Vendor", gpu_vendor);
+ telemetry_session.AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model);
+ telemetry_session.AddField(Telemetry::FieldType::UserSystem, "GPU_OpenGL_Version", gl_version);
+}
+
void RendererOpenGL::CreateRasterizer() {
if (rasterizer) {
return;
@@ -466,17 +481,7 @@ bool RendererOpenGL::Init() {
glDebugMessageCallback(DebugHandler, nullptr);
}
- const char* gl_version{reinterpret_cast<char const*>(glGetString(GL_VERSION))};
- const char* gpu_vendor{reinterpret_cast<char const*>(glGetString(GL_VENDOR))};
- const char* gpu_model{reinterpret_cast<char const*>(glGetString(GL_RENDERER))};
-
- LOG_INFO(Render_OpenGL, "GL_VERSION: {}", gl_version);
- LOG_INFO(Render_OpenGL, "GL_VENDOR: {}", gpu_vendor);
- LOG_INFO(Render_OpenGL, "GL_RENDERER: {}", gpu_model);
-
- Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Vendor", gpu_vendor);
- Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model);
- Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_OpenGL_Version", gl_version);
+ AddTelemetryFields();
if (!GLAD_GL_VERSION_4_3) {
return false;
diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h
index c168fa89e..6cbf9d2cb 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.h
+++ b/src/video_core/renderer_opengl/renderer_opengl.h
@@ -60,6 +60,7 @@ public:
private:
void InitOpenGLObjects();
+ void AddTelemetryFields();
void CreateRasterizer();
void ConfigureFramebufferTexture(TextureInfo& texture,
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 18b7b94a1..4a33a6c84 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -8,7 +8,7 @@
#include <tuple>
#include "common/alignment.h"
-#include "core/core.h"
+#include "common/assert.h"
#include "core/memory.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp
index 044ba116a..a7ac26d71 100644
--- a/src/video_core/surface.cpp
+++ b/src/video_core/surface.cpp
@@ -89,8 +89,6 @@ PixelFormat PixelFormatFromDepthFormat(Tegra::DepthFormat format) {
PixelFormat PixelFormatFromRenderTargetFormat(Tegra::RenderTargetFormat format) {
switch (format) {
- // TODO (Hexagon12): Converting SRGBA to RGBA is a hack and doesn't completely correct the
- // gamma.
case Tegra::RenderTargetFormat::RGBA8_SRGB:
return PixelFormat::RGBA8_SRGB;
case Tegra::RenderTargetFormat::RGBA8_UNORM:
diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp
index bc50a4876..b508d64e9 100644
--- a/src/video_core/textures/astc.cpp
+++ b/src/video_core/textures/astc.cpp
@@ -23,28 +23,12 @@
#include "video_core/textures/astc.h"
-class BitStream {
+class InputBitStream {
public:
- explicit BitStream(unsigned char* ptr, int nBits = 0, int start_offset = 0)
+ explicit InputBitStream(const unsigned char* ptr, int nBits = 0, int start_offset = 0)
: m_NumBits(nBits), m_CurByte(ptr), m_NextBit(start_offset % 8) {}
- ~BitStream() = default;
-
- int GetBitsWritten() const {
- return m_BitsWritten;
- }
-
- void WriteBitsR(unsigned int val, unsigned int nBits) {
- for (unsigned int i = 0; i < nBits; i++) {
- WriteBit((val >> (nBits - i - 1)) & 1);
- }
- }
-
- void WriteBits(unsigned int val, unsigned int nBits) {
- for (unsigned int i = 0; i < nBits; i++) {
- WriteBit((val >> i) & 1);
- }
- }
+ ~InputBitStream() = default;
int GetBitsRead() const {
return m_BitsRead;
@@ -71,6 +55,38 @@ public:
}
private:
+ const int m_NumBits;
+ const unsigned char* m_CurByte;
+ int m_NextBit = 0;
+ int m_BitsRead = 0;
+
+ bool done = false;
+};
+
+class OutputBitStream {
+public:
+ explicit OutputBitStream(unsigned char* ptr, int nBits = 0, int start_offset = 0)
+ : m_NumBits(nBits), m_CurByte(ptr), m_NextBit(start_offset % 8) {}
+
+ ~OutputBitStream() = default;
+
+ int GetBitsWritten() const {
+ return m_BitsWritten;
+ }
+
+ void WriteBitsR(unsigned int val, unsigned int nBits) {
+ for (unsigned int i = 0; i < nBits; i++) {
+ WriteBit((val >> (nBits - i - 1)) & 1);
+ }
+ }
+
+ void WriteBits(unsigned int val, unsigned int nBits) {
+ for (unsigned int i = 0; i < nBits; i++) {
+ WriteBit((val >> i) & 1);
+ }
+ }
+
+private:
void WriteBit(int b) {
if (done)
@@ -238,8 +254,8 @@ public:
// Fills result with the values that are encoded in the given
// bitstream. We must know beforehand what the maximum possible
// value is, and how many values we're decoding.
- static void DecodeIntegerSequence(std::vector<IntegerEncodedValue>& result, BitStream& bits,
- uint32_t maxRange, uint32_t nValues) {
+ static void DecodeIntegerSequence(std::vector<IntegerEncodedValue>& result,
+ InputBitStream& bits, uint32_t maxRange, uint32_t nValues) {
// Determine encoding parameters
IntegerEncodedValue val = IntegerEncodedValue::CreateEncoding(maxRange);
@@ -267,7 +283,7 @@ public:
}
private:
- static void DecodeTritBlock(BitStream& bits, std::vector<IntegerEncodedValue>& result,
+ static void DecodeTritBlock(InputBitStream& bits, std::vector<IntegerEncodedValue>& result,
uint32_t nBitsPerValue) {
// Implement the algorithm in section C.2.12
uint32_t m[5];
@@ -327,7 +343,7 @@ private:
}
}
- static void DecodeQuintBlock(BitStream& bits, std::vector<IntegerEncodedValue>& result,
+ static void DecodeQuintBlock(InputBitStream& bits, std::vector<IntegerEncodedValue>& result,
uint32_t nBitsPerValue) {
// Implement the algorithm in section C.2.12
uint32_t m[3];
@@ -406,7 +422,7 @@ struct TexelWeightParams {
}
};
-static TexelWeightParams DecodeBlockInfo(BitStream& strm) {
+static TexelWeightParams DecodeBlockInfo(InputBitStream& strm) {
TexelWeightParams params;
// Read the entire block mode all at once
@@ -605,7 +621,7 @@ static TexelWeightParams DecodeBlockInfo(BitStream& strm) {
return params;
}
-static void FillVoidExtentLDR(BitStream& strm, uint32_t* const outBuf, uint32_t blockWidth,
+static void FillVoidExtentLDR(InputBitStream& strm, uint32_t* const outBuf, uint32_t blockWidth,
uint32_t blockHeight) {
// Don't actually care about the void extent, just read the bits...
for (int i = 0; i < 4; ++i) {
@@ -821,7 +837,7 @@ static void DecodeColorValues(uint32_t* out, uint8_t* data, const uint32_t* mode
// We now have enough to decode our integer sequence.
std::vector<IntegerEncodedValue> decodedColorValues;
- BitStream colorStream(data);
+ InputBitStream colorStream(data);
IntegerEncodedValue::DecodeIntegerSequence(decodedColorValues, colorStream, range, nValues);
// Once we have the decoded values, we need to dequantize them to the 0-255 range
@@ -1365,9 +1381,9 @@ static void ComputeEndpoints(Pixel& ep1, Pixel& ep2, const uint32_t*& colorValue
#undef READ_INT_VALUES
}
-static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
+static void DecompressBlock(const uint8_t inBuf[16], const uint32_t blockWidth,
const uint32_t blockHeight, uint32_t* outBuf) {
- BitStream strm(inBuf);
+ InputBitStream strm(inBuf);
TexelWeightParams weightParams = DecodeBlockInfo(strm);
// Was there an error?
@@ -1421,7 +1437,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
// Define color data.
uint8_t colorEndpointData[16];
memset(colorEndpointData, 0, sizeof(colorEndpointData));
- BitStream colorEndpointStream(colorEndpointData, 16 * 8, 0);
+ OutputBitStream colorEndpointStream(colorEndpointData, 16 * 8, 0);
// Read extra config data...
uint32_t baseCEM = 0;
@@ -1549,7 +1565,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
memset(texelWeightData + clearByteStart, 0, 16 - clearByteStart);
std::vector<IntegerEncodedValue> texelWeightValues;
- BitStream weightStream(texelWeightData);
+ InputBitStream weightStream(texelWeightData);
IntegerEncodedValue::DecodeIntegerSequence(texelWeightValues, weightStream,
weightParams.m_MaxWeight,
@@ -1597,7 +1613,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
namespace Tegra::Texture::ASTC {
-std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint32_t height,
+std::vector<uint8_t> Decompress(const uint8_t* data, uint32_t width, uint32_t height,
uint32_t depth, uint32_t block_width, uint32_t block_height) {
uint32_t blockIdx = 0;
std::vector<uint8_t> outData(height * width * depth * 4);
@@ -1605,7 +1621,7 @@ std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint
for (uint32_t j = 0; j < height; j += block_height) {
for (uint32_t i = 0; i < width; i += block_width) {
- uint8_t* blockPtr = data.data() + blockIdx * 16;
+ const uint8_t* blockPtr = data + blockIdx * 16;
// Blocks can be at most 12x12
uint32_t uncompData[144];
diff --git a/src/video_core/textures/astc.h b/src/video_core/textures/astc.h
index d419dd025..991cdba72 100644
--- a/src/video_core/textures/astc.h
+++ b/src/video_core/textures/astc.h
@@ -9,7 +9,7 @@
namespace Tegra::Texture::ASTC {
-std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint32_t height,
+std::vector<uint8_t> Decompress(const uint8_t* data, uint32_t width, uint32_t height,
uint32_t depth, uint32_t block_width, uint32_t block_height);
} // namespace Tegra::Texture::ASTC
diff --git a/src/video_core/textures/convert.cpp b/src/video_core/textures/convert.cpp
new file mode 100644
index 000000000..5e439f036
--- /dev/null
+++ b/src/video_core/textures/convert.cpp
@@ -0,0 +1,92 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <cstring>
+#include <tuple>
+#include <vector>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/logging/log.h"
+#include "video_core/textures/astc.h"
+#include "video_core/textures/convert.h"
+
+namespace Tegra::Texture {
+
+using VideoCore::Surface::PixelFormat;
+
+template <bool reverse>
+void SwapS8Z24ToZ24S8(u8* data, u32 width, u32 height) {
+ union S8Z24 {
+ BitField<0, 24, u32> z24;
+ BitField<24, 8, u32> s8;
+ };
+ static_assert(sizeof(S8Z24) == 4, "S8Z24 is incorrect size");
+
+ union Z24S8 {
+ BitField<0, 8, u32> s8;
+ BitField<8, 24, u32> z24;
+ };
+ static_assert(sizeof(Z24S8) == 4, "Z24S8 is incorrect size");
+
+ S8Z24 s8z24_pixel{};
+ Z24S8 z24s8_pixel{};
+ constexpr auto bpp{
+ VideoCore::Surface::GetBytesPerPixel(VideoCore::Surface::PixelFormat::S8Z24)};
+ for (std::size_t y = 0; y < height; ++y) {
+ for (std::size_t x = 0; x < width; ++x) {
+ const std::size_t offset{bpp * (y * width + x)};
+ if constexpr (reverse) {
+ std::memcpy(&z24s8_pixel, &data[offset], sizeof(Z24S8));
+ s8z24_pixel.s8.Assign(z24s8_pixel.s8);
+ s8z24_pixel.z24.Assign(z24s8_pixel.z24);
+ std::memcpy(&data[offset], &s8z24_pixel, sizeof(S8Z24));
+ } else {
+ std::memcpy(&s8z24_pixel, &data[offset], sizeof(S8Z24));
+ z24s8_pixel.s8.Assign(s8z24_pixel.s8);
+ z24s8_pixel.z24.Assign(s8z24_pixel.z24);
+ std::memcpy(&data[offset], &z24s8_pixel, sizeof(Z24S8));
+ }
+ }
+ }
+}
+
+static void ConvertS8Z24ToZ24S8(u8* data, u32 width, u32 height) {
+ SwapS8Z24ToZ24S8<false>(data, width, height);
+}
+
+static void ConvertZ24S8ToS8Z24(u8* data, u32 width, u32 height) {
+ SwapS8Z24ToZ24S8<true>(data, width, height);
+}
+
+void ConvertFromGuestToHost(u8* data, PixelFormat pixel_format, u32 width, u32 height, u32 depth,
+ bool convert_astc, bool convert_s8z24) {
+ if (convert_astc && IsPixelFormatASTC(pixel_format)) {
+ // Convert ASTC pixel formats to RGBA8, as most desktop GPUs do not support ASTC.
+ u32 block_width{};
+ u32 block_height{};
+ std::tie(block_width, block_height) = GetASTCBlockSize(pixel_format);
+ const std::vector<u8> rgba8_data =
+ Tegra::Texture::ASTC::Decompress(data, width, height, depth, block_width, block_height);
+ std::copy(rgba8_data.begin(), rgba8_data.end(), data);
+
+ } else if (convert_s8z24 && pixel_format == PixelFormat::S8Z24) {
+ Tegra::Texture::ConvertS8Z24ToZ24S8(data, width, height);
+ }
+}
+
+void ConvertFromHostToGuest(u8* data, PixelFormat pixel_format, u32 width, u32 height, u32 depth,
+ bool convert_astc, bool convert_s8z24) {
+ if (convert_astc && IsPixelFormatASTC(pixel_format)) {
+ LOG_CRITICAL(HW_GPU, "Conversion of format {} after texture flushing is not implemented",
+ static_cast<u32>(pixel_format));
+ UNREACHABLE();
+
+ } else if (convert_s8z24 && pixel_format == PixelFormat::S8Z24) {
+ Tegra::Texture::ConvertZ24S8ToS8Z24(data, width, height);
+ }
+}
+
+} // namespace Tegra::Texture \ No newline at end of file
diff --git a/src/video_core/textures/convert.h b/src/video_core/textures/convert.h
new file mode 100644
index 000000000..07cd8b5da
--- /dev/null
+++ b/src/video_core/textures/convert.h
@@ -0,0 +1,18 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "video_core/surface.h"
+
+namespace Tegra::Texture {
+
+void ConvertFromGuestToHost(u8* data, VideoCore::Surface::PixelFormat pixel_format, u32 width,
+ u32 height, u32 depth, bool convert_astc, bool convert_s8z24);
+
+void ConvertFromHostToGuest(u8* data, VideoCore::Surface::PixelFormat pixel_format, u32 width,
+ u32 height, u32 depth, bool convert_astc, bool convert_s8z24);
+
+} // namespace Tegra::Texture \ No newline at end of file
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 5db75de22..cad7340f5 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -103,8 +103,8 @@ void FastProcessBlock(u8* const swizzled_data, u8* const unswizzled_data, const
const u32 swizzle_offset{y_address + table[(xb / fast_swizzle_align) % 4]};
const u32 out_x = xb * out_bytes_per_pixel / bytes_per_pixel;
const u32 pixel_index{out_x + pixel_base};
- data_ptrs[unswizzle] = swizzled_data + swizzle_offset;
- data_ptrs[!unswizzle] = unswizzled_data + pixel_index;
+ data_ptrs[unswizzle ? 1 : 0] = swizzled_data + swizzle_offset;
+ data_ptrs[unswizzle ? 0 : 1] = unswizzled_data + pixel_index;
std::memcpy(data_ptrs[0], data_ptrs[1], fast_swizzle_align);
}
pixel_base += stride_x;
@@ -154,7 +154,7 @@ void SwizzledData(u8* const swizzled_data, u8* const unswizzled_data, const bool
for (u32 xb = 0; xb < blocks_on_x; xb++) {
const u32 x_start = xb * block_x_elements;
const u32 x_end = std::min(width, x_start + block_x_elements);
- if (fast) {
+ if constexpr (fast) {
FastProcessBlock(swizzled_data, unswizzled_data, unswizzle, x_start, y_start,
z_start, x_end, y_end, z_end, tile_offset, xy_block_size,
layer_z, stride_x, bytes_per_pixel, out_bytes_per_pixel);
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index 85b7e9f7b..65df86890 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -16,16 +16,13 @@ inline std::size_t GetGOBSize() {
return 512;
}
-/**
- * Unswizzles a swizzled texture without changing its format.
- */
+/// Unswizzles a swizzled texture without changing its format.
void UnswizzleTexture(u8* unswizzled_data, VAddr address, u32 tile_size_x, u32 tile_size_y,
u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
u32 block_height = TICEntry::DefaultBlockHeight,
u32 block_depth = TICEntry::DefaultBlockHeight, u32 width_spacing = 0);
-/**
- * Unswizzles a swizzled texture without changing its format.
- */
+
+/// Unswizzles a swizzled texture without changing its format.
std::vector<u8> UnswizzleTexture(VAddr address, u32 tile_size_x, u32 tile_size_y,
u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
u32 block_height = TICEntry::DefaultBlockHeight,
@@ -37,15 +34,11 @@ void CopySwizzledData(u32 width, u32 height, u32 depth, u32 bytes_per_pixel,
u32 out_bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data,
bool unswizzle, u32 block_height, u32 block_depth, u32 width_spacing);
-/**
- * Decodes an unswizzled texture into a A8R8G8B8 texture.
- */
+/// Decodes an unswizzled texture into a A8R8G8B8 texture.
std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat format, u32 width,
u32 height);
-/**
- * This function calculates the correct size of a texture depending if it's tiled or not.
- */
+/// This function calculates the correct size of a texture depending if it's tiled or not.
std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
u32 block_height, u32 block_depth);
@@ -53,6 +46,7 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height
void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data,
u32 block_height);
+
/// Copies a tiled subrectangle into a linear surface.
void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width,
u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data,