summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/CMakeLists.txt60
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h176
-rw-r--r--src/video_core/cdma_pusher.cpp29
-rw-r--r--src/video_core/cdma_pusher.h18
-rw-r--r--src/video_core/command_classes/host1x.cpp29
-rw-r--r--src/video_core/control/channel_state.cpp40
-rw-r--r--src/video_core/control/channel_state.h68
-rw-r--r--src/video_core/control/channel_state_cache.cpp14
-rw-r--r--src/video_core/control/channel_state_cache.h101
-rw-r--r--src/video_core/control/channel_state_cache.inc86
-rw-r--r--src/video_core/control/scheduler.cpp32
-rw-r--r--src/video_core/control/scheduler.h37
-rw-r--r--src/video_core/dirty_flags.cpp24
-rw-r--r--src/video_core/dma_pusher.cpp26
-rw-r--r--src/video_core/dma_pusher.h39
-rw-r--r--src/video_core/engines/engine_upload.cpp46
-rw-r--r--src/video_core/engines/engine_upload.h6
-rw-r--r--src/video_core/engines/kepler_compute.cpp13
-rw-r--r--src/video_core/engines/kepler_memory.cpp13
-rw-r--r--src/video_core/engines/maxwell_3d.cpp578
-rw-r--r--src/video_core/engines/maxwell_3d.h3877
-rw-r--r--src/video_core/engines/maxwell_dma.cpp236
-rw-r--r--src/video_core/engines/maxwell_dma.h8
-rw-r--r--src/video_core/engines/puller.cpp305
-rw-r--r--src/video_core/engines/puller.h177
-rw-r--r--src/video_core/fence_manager.h104
-rw-r--r--src/video_core/gpu.cpp706
-rw-r--r--src/video_core/gpu.h143
-rw-r--r--src/video_core/gpu_thread.cpp24
-rw-r--r--src/video_core/gpu_thread.h14
-rw-r--r--src/video_core/host1x/codecs/codec.cpp (renamed from src/video_core/command_classes/codecs/codec.cpp)44
-rw-r--r--src/video_core/host1x/codecs/codec.h (renamed from src/video_core/command_classes/codecs/codec.h)21
-rw-r--r--src/video_core/host1x/codecs/h264.cpp (renamed from src/video_core/command_classes/codecs/h264.cpp)17
-rw-r--r--src/video_core/host1x/codecs/h264.h (renamed from src/video_core/command_classes/codecs/h264.h)16
-rw-r--r--src/video_core/host1x/codecs/vp8.cpp (renamed from src/video_core/command_classes/codecs/vp8.cpp)12
-rw-r--r--src/video_core/host1x/codecs/vp8.h (renamed from src/video_core/command_classes/codecs/vp8.h)15
-rw-r--r--src/video_core/host1x/codecs/vp9.cpp (renamed from src/video_core/command_classes/codecs/vp9.cpp)23
-rw-r--r--src/video_core/host1x/codecs/vp9.h (renamed from src/video_core/command_classes/codecs/vp9.h)22
-rw-r--r--src/video_core/host1x/codecs/vp9_types.h (renamed from src/video_core/command_classes/codecs/vp9_types.h)1
-rw-r--r--src/video_core/host1x/control.cpp33
-rw-r--r--src/video_core/host1x/control.h (renamed from src/video_core/command_classes/host1x.h)20
-rw-r--r--src/video_core/host1x/host1x.cpp17
-rw-r--r--src/video_core/host1x/host1x.h57
-rw-r--r--src/video_core/host1x/nvdec.cpp (renamed from src/video_core/command_classes/nvdec.cpp)11
-rw-r--r--src/video_core/host1x/nvdec.h (renamed from src/video_core/command_classes/nvdec.h)14
-rw-r--r--src/video_core/host1x/nvdec_common.h (renamed from src/video_core/command_classes/nvdec_common.h)4
-rw-r--r--src/video_core/host1x/sync_manager.cpp (renamed from src/video_core/command_classes/sync_manager.cpp)13
-rw-r--r--src/video_core/host1x/sync_manager.h (renamed from src/video_core/command_classes/sync_manager.h)12
-rw-r--r--src/video_core/host1x/syncpoint_manager.cpp106
-rw-r--r--src/video_core/host1x/syncpoint_manager.h98
-rw-r--r--src/video_core/host1x/vic.cpp (renamed from src/video_core/command_classes/vic.cpp)36
-rw-r--r--src/video_core/host1x/vic.h (renamed from src/video_core/command_classes/vic.h)13
-rw-r--r--src/video_core/host_shaders/astc_decoder.comp2
-rw-r--r--src/video_core/macro/macro.cpp1
-rw-r--r--src/video_core/macro/macro_hle.cpp118
-rw-r--r--src/video_core/macro/macro_interpreter.cpp2
-rw-r--r--src/video_core/macro/macro_jit_x64.cpp64
-rw-r--r--src/video_core/memory_manager.cpp785
-rw-r--r--src/video_core/memory_manager.h189
-rw-r--r--src/video_core/pte_kind.h264
-rw-r--r--src/video_core/query_cache.h22
-rw-r--r--src/video_core/rasterizer_interface.h22
-rw-r--r--src/video_core/renderer_base.cpp8
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_compute_pipeline.cpp20
-rw-r--r--src/video_core/renderer_opengl/gl_compute_pipeline.h16
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.cpp13
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.h6
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.cpp58
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.h20
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.h3
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp353
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h26
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp86
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h9
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.cpp70
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.h83
-rw-r--r--src/video_core/renderer_opengl/maxwell_to_gl.h291
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp2
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp303
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h15
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp341
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.h2
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp8
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp19
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h7
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.cpp15
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp32
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h28
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp104
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp14
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp266
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h34
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp8
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp68
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.h27
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp15
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp47
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h16
-rw-r--r--src/video_core/shader_cache.cpp37
-rw-r--r--src/video_core/shader_cache.h15
-rw-r--r--src/video_core/shader_environment.cpp23
-rw-r--r--src/video_core/shader_environment.h2
-rw-r--r--src/video_core/surface.cpp17
-rw-r--r--src/video_core/surface.h16
-rw-r--r--src/video_core/texture_cache/descriptor_table.h2
-rw-r--r--src/video_core/texture_cache/format_lookup_table.cpp8
-rw-r--r--src/video_core/texture_cache/formatter.h8
-rw-r--r--src/video_core/texture_cache/image_base.cpp13
-rw-r--r--src/video_core/texture_cache/image_base.h3
-rw-r--r--src/video_core/texture_cache/image_info.cpp37
-rw-r--r--src/video_core/texture_cache/render_targets.h1
-rw-r--r--src/video_core/texture_cache/texture_cache.cpp15
-rw-r--r--src/video_core/texture_cache/texture_cache.h229
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h85
-rw-r--r--src/video_core/texture_cache/util.cpp3
-rw-r--r--src/video_core/textures/astc.cpp8
-rw-r--r--src/video_core/textures/decoders.cpp242
-rw-r--r--src/video_core/textures/decoders.h33
-rw-r--r--src/video_core/transform_feedback.cpp118
-rw-r--r--src/video_core/transform_feedback.h3
-rw-r--r--src/video_core/vulkan_common/vulkan_wrapper.h20
129 files changed, 8069 insertions, 4439 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 5b3808351..106991969 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -4,7 +4,7 @@
add_subdirectory(host_shaders)
if(LIBVA_FOUND)
- set_source_files_properties(command_classes/codecs/codec.cpp
+ set_source_files_properties(host1x/codecs/codec.cpp
PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1)
list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES})
endif()
@@ -15,26 +15,14 @@ add_library(video_core STATIC
buffer_cache/buffer_cache.h
cdma_pusher.cpp
cdma_pusher.h
- command_classes/codecs/codec.cpp
- command_classes/codecs/codec.h
- command_classes/codecs/h264.cpp
- command_classes/codecs/h264.h
- command_classes/codecs/vp8.cpp
- command_classes/codecs/vp8.h
- command_classes/codecs/vp9.cpp
- command_classes/codecs/vp9.h
- command_classes/codecs/vp9_types.h
- command_classes/host1x.cpp
- command_classes/host1x.h
- command_classes/nvdec.cpp
- command_classes/nvdec.h
- command_classes/nvdec_common.h
- command_classes/sync_manager.cpp
- command_classes/sync_manager.h
- command_classes/vic.cpp
- command_classes/vic.h
compatible_formats.cpp
compatible_formats.h
+ control/channel_state.cpp
+ control/channel_state.h
+ control/channel_state_cache.cpp
+ control/channel_state_cache.h
+ control/scheduler.cpp
+ control/scheduler.h
delayed_destruction_ring.h
dirty_flags.cpp
dirty_flags.h
@@ -54,7 +42,31 @@ add_library(video_core STATIC
engines/maxwell_3d.h
engines/maxwell_dma.cpp
engines/maxwell_dma.h
+ engines/puller.cpp
+ engines/puller.h
framebuffer_config.h
+ host1x/codecs/codec.cpp
+ host1x/codecs/codec.h
+ host1x/codecs/h264.cpp
+ host1x/codecs/h264.h
+ host1x/codecs/vp8.cpp
+ host1x/codecs/vp8.h
+ host1x/codecs/vp9.cpp
+ host1x/codecs/vp9.h
+ host1x/codecs/vp9_types.h
+ host1x/control.cpp
+ host1x/control.h
+ host1x/host1x.cpp
+ host1x/host1x.h
+ host1x/nvdec.cpp
+ host1x/nvdec.h
+ host1x/nvdec_common.h
+ host1x/sync_manager.cpp
+ host1x/sync_manager.h
+ host1x/syncpoint_manager.cpp
+ host1x/syncpoint_manager.h
+ host1x/vic.cpp
+ host1x/vic.h
macro/macro.cpp
macro/macro.h
macro/macro_hle.cpp
@@ -70,6 +82,7 @@ add_library(video_core STATIC
gpu_thread.h
memory_manager.cpp
memory_manager.h
+ pte_kind.h
query_cache.h
rasterizer_accelerated.cpp
rasterizer_accelerated.h
@@ -195,6 +208,7 @@ add_library(video_core STATIC
texture_cache/render_targets.h
texture_cache/samples_helper.h
texture_cache/slot_vector.h
+ texture_cache/texture_cache.cpp
texture_cache/texture_cache.h
texture_cache/texture_cache_base.h
texture_cache/types.h
@@ -265,14 +279,8 @@ if (MSVC)
else()
target_compile_options(video_core PRIVATE
-Werror=conversion
- -Wno-error=sign-conversion
- -Werror=pessimizing-move
- -Werror=redundant-move
- -Werror=type-limits
- $<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess>
- $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
- $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
+ -Wno-sign-conversion
)
endif()
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index f015dae56..2ba33543c 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -5,7 +5,6 @@
#include <algorithm>
#include <array>
-#include <deque>
#include <memory>
#include <mutex>
#include <numeric>
@@ -23,6 +22,7 @@
#include "common/settings.h"
#include "core/memory.h"
#include "video_core/buffer_cache/buffer_base.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/delayed_destruction_ring.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/kepler_compute.h"
@@ -56,7 +56,7 @@ using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFE
using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>;
template <typename P>
-class BufferCache {
+class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
// Page size for caching purposes.
// This is unrelated to the CPU page size and it can be changed as it seems optimal.
@@ -116,10 +116,7 @@ public:
static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
- Runtime& runtime_);
+ Core::Memory::Memory& cpu_memory_, Runtime& runtime_);
void TickFrame();
@@ -129,7 +126,7 @@ public:
void DownloadMemory(VAddr cpu_addr, u64 size);
- bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<u8> inlined_buffer);
+ bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
@@ -353,7 +350,7 @@ private:
void NotifyBufferDeletion();
- [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr) const;
+ [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr, bool is_written = false) const;
[[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
PixelFormat format);
@@ -367,9 +364,6 @@ private:
void ClearDownload(IntervalType subtract_interval);
VideoCore::RasterizerInterface& rasterizer;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
- Tegra::MemoryManager& gpu_memory;
Core::Memory::Memory& cpu_memory;
SlotVector<Buffer> slot_buffers;
@@ -444,12 +438,8 @@ private:
template <class P>
BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
- Runtime& runtime_)
- : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
- kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} {
+ Core::Memory::Memory& cpu_memory_, Runtime& runtime_)
+ : runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_} {
// Ensure the first slot is used for the null buffer
void(slot_buffers.insert(runtime, NullBufferParams{}));
common_ranges.clear();
@@ -552,8 +542,8 @@ void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
template <class P>
bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
- const std::optional<VAddr> cpu_src_address = gpu_memory.GpuToCpuAddress(src_address);
- const std::optional<VAddr> cpu_dest_address = gpu_memory.GpuToCpuAddress(dest_address);
+ const std::optional<VAddr> cpu_src_address = gpu_memory->GpuToCpuAddress(src_address);
+ const std::optional<VAddr> cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address);
if (!cpu_src_address || !cpu_dest_address) {
return false;
}
@@ -611,7 +601,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
template <class P>
bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
- const std::optional<VAddr> cpu_dst_address = gpu_memory.GpuToCpuAddress(dst_address);
+ const std::optional<VAddr> cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address);
if (!cpu_dst_address) {
return false;
}
@@ -635,7 +625,7 @@ bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
template <class P>
void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
u32 size) {
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
const Binding binding{
.cpu_addr = *cpu_addr,
.size = size,
@@ -673,7 +663,7 @@ void BufferCache<P>::BindHostGeometryBuffers(bool is_indexed) {
if (is_indexed) {
BindHostIndexBuffer();
} else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) {
runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count);
}
@@ -733,9 +723,9 @@ void BufferCache<P>::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index,
enabled_storage_buffers[stage] |= 1U << ssbo_index;
written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index;
- const auto& cbufs = maxwell3d.state.shader_stages[stage];
+ const auto& cbufs = maxwell3d->state.shader_stages[stage];
const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset;
- storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr);
+ storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr, is_written);
}
template <class P>
@@ -770,12 +760,12 @@ void BufferCache<P>::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index,
enabled_compute_storage_buffers |= 1U << ssbo_index;
written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index;
- const auto& launch_desc = kepler_compute.launch_description;
+ const auto& launch_desc = kepler_compute->launch_description;
ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0);
const auto& cbufs = launch_desc.const_buffer_config;
const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset;
- compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr);
+ compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr, is_written);
}
template <class P>
@@ -836,6 +826,19 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
const bool is_accuracy_normal =
Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal;
+ auto it = committed_ranges.begin();
+ while (it != committed_ranges.end()) {
+ auto& current_intervals = *it;
+ auto next_it = std::next(it);
+ while (next_it != committed_ranges.end()) {
+ for (auto& interval : *next_it) {
+ current_intervals.subtract(interval);
+ }
+ next_it++;
+ }
+ it++;
+ }
+
boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads;
u64 total_size_bytes = 0;
u64 largest_copy = 0;
@@ -991,19 +994,19 @@ void BufferCache<P>::BindHostIndexBuffer() {
const u32 size = index_buffer.size;
SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
- const u32 new_offset = offset + maxwell3d.regs.index_array.first *
- maxwell3d.regs.index_array.FormatSizeInBytes();
+ const u32 new_offset = offset + maxwell3d->regs.index_buffer.first *
+ maxwell3d->regs.index_buffer.FormatSizeInBytes();
runtime.BindIndexBuffer(buffer, new_offset, size);
} else {
- runtime.BindIndexBuffer(maxwell3d.regs.draw.topology, maxwell3d.regs.index_array.format,
- maxwell3d.regs.index_array.first, maxwell3d.regs.index_array.count,
- buffer, offset, size);
+ runtime.BindIndexBuffer(maxwell3d->regs.draw.topology, maxwell3d->regs.index_buffer.format,
+ maxwell3d->regs.index_buffer.first,
+ maxwell3d->regs.index_buffer.count, buffer, offset, size);
}
}
template <class P>
void BufferCache<P>::BindHostVertexBuffers() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
const Binding& binding = vertex_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
@@ -1014,7 +1017,7 @@ void BufferCache<P>::BindHostVertexBuffers() {
}
flags[Dirty::VertexBuffer0 + index] = false;
- const u32 stride = maxwell3d.regs.vertex_array[index].stride;
+ const u32 stride = maxwell3d->regs.vertex_streams[index].stride;
const u32 offset = buffer.Offset(binding.cpu_addr);
runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride);
}
@@ -1154,7 +1157,7 @@ void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
template <class P>
void BufferCache<P>::BindHostTransformFeedbackBuffers() {
- if (maxwell3d.regs.tfb_enabled == 0) {
+ if (maxwell3d->regs.transform_feedback_enabled == 0) {
return;
}
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
@@ -1239,16 +1242,19 @@ void BufferCache<P>::BindHostComputeTextureBuffers() {
template <class P>
void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
- if (is_indexed) {
- UpdateIndexBuffer();
- }
- UpdateVertexBuffers();
- UpdateTransformFeedbackBuffers();
- for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
- UpdateUniformBuffers(stage);
- UpdateStorageBuffers(stage);
- UpdateTextureBuffers(stage);
- }
+ do {
+ has_deleted_buffers = false;
+ if (is_indexed) {
+ UpdateIndexBuffer();
+ }
+ UpdateVertexBuffers();
+ UpdateTransformFeedbackBuffers();
+ for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
+ UpdateUniformBuffers(stage);
+ UpdateStorageBuffers(stage);
+ UpdateTextureBuffers(stage);
+ }
+ } while (has_deleted_buffers);
}
template <class P>
@@ -1262,8 +1268,8 @@ template <class P>
void BufferCache<P>::UpdateIndexBuffer() {
// We have to check for the dirty flags and index count
// The index count is currently changed without updating the dirty flags
- const auto& index_array = maxwell3d.regs.index_array;
- auto& flags = maxwell3d.dirty.flags;
+ const auto& index_array = maxwell3d->regs.index_buffer;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) {
return;
}
@@ -1272,7 +1278,7 @@ void BufferCache<P>::UpdateIndexBuffer() {
const GPUVAddr gpu_addr_begin = index_array.StartAddress();
const GPUVAddr gpu_addr_end = index_array.EndAddress();
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes();
const u32 size = std::min(address_size, draw_size);
@@ -1289,8 +1295,8 @@ void BufferCache<P>::UpdateIndexBuffer() {
template <class P>
void BufferCache<P>::UpdateVertexBuffers() {
- auto& flags = maxwell3d.dirty.flags;
- if (!maxwell3d.dirty.flags[Dirty::VertexBuffers]) {
+ auto& flags = maxwell3d->dirty.flags;
+ if (!maxwell3d->dirty.flags[Dirty::VertexBuffers]) {
return;
}
flags[Dirty::VertexBuffers] = false;
@@ -1302,33 +1308,25 @@ void BufferCache<P>::UpdateVertexBuffers() {
template <class P>
void BufferCache<P>::UpdateVertexBuffer(u32 index) {
- if (!maxwell3d.dirty.flags[Dirty::VertexBuffer0 + index]) {
+ if (!maxwell3d->dirty.flags[Dirty::VertexBuffer0 + index]) {
return;
}
- const auto& array = maxwell3d.regs.vertex_array[index];
- const auto& limit = maxwell3d.regs.vertex_array_limit[index];
- const GPUVAddr gpu_addr_begin = array.StartAddress();
- const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1;
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin);
- u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
- if (address_size >= 64_MiB) {
- // Reported vertex buffer size is very large, cap to mapped buffer size
- GPUVAddr submapped_addr_end = gpu_addr_begin;
-
- const auto ranges{gpu_memory.GetSubmappedRange(gpu_addr_begin, address_size)};
- if (ranges.size() > 0) {
- const auto& [addr, size] = *ranges.begin();
- submapped_addr_end = addr + size;
- }
-
- address_size =
- std::min(address_size, static_cast<u32>(submapped_addr_end - gpu_addr_begin));
- }
- const u32 size = address_size; // TODO: Analyze stride and number of vertices
- if (array.enable == 0 || size == 0 || !cpu_addr) {
+ const auto& array = maxwell3d->regs.vertex_streams[index];
+ const auto& limit = maxwell3d->regs.vertex_stream_limits[index];
+ const GPUVAddr gpu_addr_begin = array.Address();
+ const GPUVAddr gpu_addr_end = limit.Address() + 1;
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
+ u32 address_size = static_cast<u32>(
+ std::min(gpu_addr_end - gpu_addr_begin, static_cast<u64>(std::numeric_limits<u32>::max())));
+ if (array.enable == 0 || address_size == 0 || !cpu_addr) {
vertex_buffers[index] = NULL_BINDING;
return;
}
+ if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
+ address_size =
+ static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size));
+ }
+ const u32 size = address_size; // TODO: Analyze stride and number of vertices
vertex_buffers[index] = Binding{
.cpu_addr = *cpu_addr,
.size = size,
@@ -1382,7 +1380,7 @@ void BufferCache<P>::UpdateTextureBuffers(size_t stage) {
template <class P>
void BufferCache<P>::UpdateTransformFeedbackBuffers() {
- if (maxwell3d.regs.tfb_enabled == 0) {
+ if (maxwell3d->regs.transform_feedback_enabled == 0) {
return;
}
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
@@ -1392,11 +1390,11 @@ void BufferCache<P>::UpdateTransformFeedbackBuffers() {
template <class P>
void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
- const auto& binding = maxwell3d.regs.tfb_bindings[index];
- const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset;
- const u32 size = binding.buffer_size;
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
- if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) {
+ const auto& binding = maxwell3d->regs.transform_feedback.buffers[index];
+ const GPUVAddr gpu_addr = binding.Address() + binding.start_offset;
+ const u32 size = binding.size;
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
+ if (binding.enable == 0 || size == 0 || !cpu_addr) {
transform_feedback_buffers[index] = NULL_BINDING;
return;
}
@@ -1414,10 +1412,10 @@ void BufferCache<P>::UpdateComputeUniformBuffers() {
ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
Binding& binding = compute_uniform_buffers[index];
binding = NULL_BINDING;
- const auto& launch_desc = kepler_compute.launch_description;
+ const auto& launch_desc = kepler_compute->launch_description;
if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) {
const auto& cbuf = launch_desc.const_buffer_config[index];
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(cbuf.Address());
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address());
if (cpu_addr) {
binding.cpu_addr = *cpu_addr;
binding.size = cbuf.size;
@@ -1567,6 +1565,8 @@ BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size);
const u32 size = static_cast<u32>(overlap.end - overlap.begin);
const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size);
+ auto& new_buffer = slot_buffers[new_buffer_id];
+ runtime.ClearBuffer(new_buffer, 0, new_buffer.SizeBytes(), 0);
for (const BufferId overlap_id : overlap.ids) {
JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap);
}
@@ -1695,7 +1695,7 @@ void BufferCache<P>::MappedUploadMemory(Buffer& buffer, u64 total_size_bytes,
template <class P>
bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size,
- std::span<u8> inlined_buffer) {
+ std::span<const u8> inlined_buffer) {
const bool is_dirty = IsRegionRegistered(dest_address, copy_size);
if (!is_dirty) {
return false;
@@ -1831,7 +1831,7 @@ void BufferCache<P>::NotifyBufferDeletion() {
dirty_uniform_buffers.fill(~u32{0});
uniform_buffer_binding_sizes.fill({});
}
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
flags[Dirty::IndexBuffer] = true;
flags[Dirty::VertexBuffers] = true;
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
@@ -1841,16 +1841,18 @@ void BufferCache<P>::NotifyBufferDeletion() {
}
template <class P>
-typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr) const {
- const GPUVAddr gpu_addr = gpu_memory.Read<u64>(ssbo_addr);
- const u32 size = gpu_memory.Read<u32>(ssbo_addr + 8);
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr,
+ bool is_written) const {
+ const GPUVAddr gpu_addr = gpu_memory->Read<u64>(ssbo_addr);
+ const u32 size = gpu_memory->Read<u32>(ssbo_addr + 8);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
if (!cpu_addr || size == 0) {
return NULL_BINDING;
}
+ const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
const Binding binding{
.cpu_addr = *cpu_addr,
- .size = size,
+ .size = is_written ? size : static_cast<u32>(cpu_end - *cpu_addr),
.buffer_id = BufferId{},
};
return binding;
@@ -1859,7 +1861,7 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
template <class P>
typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(
GPUVAddr gpu_addr, u32 size, PixelFormat format) {
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
TextureBufferBinding binding;
if (!cpu_addr || size == 0) {
binding.cpu_addr = 0;
diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp
index 8e890a85e..28a2d2090 100644
--- a/src/video_core/cdma_pusher.cpp
+++ b/src/video_core/cdma_pusher.cpp
@@ -2,20 +2,22 @@
// SPDX-License-Identifier: MIT
#include <bit>
-#include "command_classes/host1x.h"
-#include "command_classes/nvdec.h"
-#include "command_classes/vic.h"
#include "video_core/cdma_pusher.h"
-#include "video_core/command_classes/sync_manager.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/control.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/nvdec.h"
+#include "video_core/host1x/nvdec_common.h"
+#include "video_core/host1x/sync_manager.h"
+#include "video_core/host1x/vic.h"
+#include "video_core/memory_manager.h"
namespace Tegra {
-CDmaPusher::CDmaPusher(GPU& gpu_)
- : gpu{gpu_}, nvdec_processor(std::make_shared<Nvdec>(gpu)),
- vic_processor(std::make_unique<Vic>(gpu, nvdec_processor)),
- host1x_processor(std::make_unique<Host1x>(gpu)),
- sync_manager(std::make_unique<SyncptIncrManager>(gpu)) {}
+CDmaPusher::CDmaPusher(Host1x::Host1x& host1x_)
+ : host1x{host1x_}, nvdec_processor(std::make_shared<Host1x::Nvdec>(host1x)),
+ vic_processor(std::make_unique<Host1x::Vic>(host1x, nvdec_processor)),
+ host1x_processor(std::make_unique<Host1x::Control>(host1x)),
+ sync_manager(std::make_unique<Host1x::SyncptIncrManager>(host1x)) {}
CDmaPusher::~CDmaPusher() = default;
@@ -109,16 +111,17 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
case ThiMethod::SetMethod1:
LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})",
static_cast<u32>(vic_thi_state.method_0), data);
- vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), data);
+ vic_processor->ProcessMethod(static_cast<Host1x::Vic::Method>(vic_thi_state.method_0),
+ data);
break;
default:
break;
}
break;
- case ChClassId::Host1x:
+ case ChClassId::Control:
// This device is mainly for syncpoint synchronization
LOG_DEBUG(Service_NVDRV, "Host1X Class Method");
- host1x_processor->ProcessMethod(static_cast<Host1x::Method>(offset), data);
+ host1x_processor->ProcessMethod(static_cast<Host1x::Control::Method>(offset), data);
break;
default:
UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class));
diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h
index d6ffef95f..83112dfce 100644
--- a/src/video_core/cdma_pusher.h
+++ b/src/video_core/cdma_pusher.h
@@ -12,11 +12,13 @@
namespace Tegra {
-class GPU;
+namespace Host1x {
+class Control;
class Host1x;
class Nvdec;
class SyncptIncrManager;
class Vic;
+} // namespace Host1x
enum class ChSubmissionMode : u32 {
SetClass = 0,
@@ -30,7 +32,7 @@ enum class ChSubmissionMode : u32 {
enum class ChClassId : u32 {
NoClass = 0x0,
- Host1x = 0x1,
+ Control = 0x1,
VideoEncodeMpeg = 0x20,
VideoEncodeNvEnc = 0x21,
VideoStreamingVi = 0x30,
@@ -88,7 +90,7 @@ enum class ThiMethod : u32 {
class CDmaPusher {
public:
- explicit CDmaPusher(GPU& gpu_);
+ explicit CDmaPusher(Host1x::Host1x& host1x);
~CDmaPusher();
/// Process the command entry
@@ -101,11 +103,11 @@ private:
/// Write arguments value to the ThiRegisters member at the specified offset
void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument);
- GPU& gpu;
- std::shared_ptr<Tegra::Nvdec> nvdec_processor;
- std::unique_ptr<Tegra::Vic> vic_processor;
- std::unique_ptr<Tegra::Host1x> host1x_processor;
- std::unique_ptr<SyncptIncrManager> sync_manager;
+ Host1x::Host1x& host1x;
+ std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
+ std::unique_ptr<Tegra::Host1x::Vic> vic_processor;
+ std::unique_ptr<Tegra::Host1x::Control> host1x_processor;
+ std::unique_ptr<Host1x::SyncptIncrManager> sync_manager;
ChClassId current_class{};
ThiRegisters vic_thi_state{};
ThiRegisters nvdec_thi_state{};
diff --git a/src/video_core/command_classes/host1x.cpp b/src/video_core/command_classes/host1x.cpp
deleted file mode 100644
index 11855fe10..000000000
--- a/src/video_core/command_classes/host1x.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "common/assert.h"
-#include "video_core/command_classes/host1x.h"
-#include "video_core/gpu.h"
-
-Tegra::Host1x::Host1x(GPU& gpu_) : gpu(gpu_) {}
-
-Tegra::Host1x::~Host1x() = default;
-
-void Tegra::Host1x::ProcessMethod(Method method, u32 argument) {
- switch (method) {
- case Method::LoadSyncptPayload32:
- syncpoint_value = argument;
- break;
- case Method::WaitSyncpt:
- case Method::WaitSyncpt32:
- Execute(argument);
- break;
- default:
- UNIMPLEMENTED_MSG("Host1x method 0x{:X}", static_cast<u32>(method));
- break;
- }
-}
-
-void Tegra::Host1x::Execute(u32 data) {
- gpu.WaitFence(data, syncpoint_value);
-}
diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp
new file mode 100644
index 000000000..cdecc3a91
--- /dev/null
+++ b/src/video_core/control/channel_state.cpp
@@ -0,0 +1,40 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/assert.h"
+#include "video_core/control/channel_state.h"
+#include "video_core/dma_pusher.h"
+#include "video_core/engines/fermi_2d.h"
+#include "video_core/engines/kepler_compute.h"
+#include "video_core/engines/kepler_memory.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/engines/maxwell_dma.h"
+#include "video_core/engines/puller.h"
+#include "video_core/memory_manager.h"
+
+namespace Tegra::Control {
+
+ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {}
+
+void ChannelState::Init(Core::System& system, GPU& gpu) {
+ ASSERT(memory_manager);
+ dma_pusher = std::make_unique<Tegra::DmaPusher>(system, gpu, *memory_manager, *this);
+ maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, *memory_manager);
+ fermi_2d = std::make_unique<Engines::Fermi2D>();
+ kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager);
+ maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
+ kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
+ initialized = true;
+}
+
+void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
+ dma_pusher->BindRasterizer(rasterizer);
+ memory_manager->BindRasterizer(rasterizer);
+ maxwell_3d->BindRasterizer(rasterizer);
+ fermi_2d->BindRasterizer(rasterizer);
+ kepler_memory->BindRasterizer(rasterizer);
+ kepler_compute->BindRasterizer(rasterizer);
+ maxwell_dma->BindRasterizer(rasterizer);
+}
+
+} // namespace Tegra::Control
diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h
new file mode 100644
index 000000000..3a7b9872c
--- /dev/null
+++ b/src/video_core/control/channel_state.h
@@ -0,0 +1,68 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <memory>
+
+#include "common/common_types.h"
+
+namespace Core {
+class System;
+}
+
+namespace VideoCore {
+class RasterizerInterface;
+}
+
+namespace Tegra {
+
+class GPU;
+
+namespace Engines {
+class Puller;
+class Fermi2D;
+class Maxwell3D;
+class MaxwellDMA;
+class KeplerCompute;
+class KeplerMemory;
+} // namespace Engines
+
+class MemoryManager;
+class DmaPusher;
+
+namespace Control {
+
+struct ChannelState {
+ explicit ChannelState(s32 bind_id);
+ ChannelState(const ChannelState& state) = delete;
+ ChannelState& operator=(const ChannelState&) = delete;
+ ChannelState(ChannelState&& other) noexcept = default;
+ ChannelState& operator=(ChannelState&& other) noexcept = default;
+
+ void Init(Core::System& system, GPU& gpu);
+
+ void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
+
+ s32 bind_id = -1;
+ /// 3D engine
+ std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
+ /// 2D engine
+ std::unique_ptr<Engines::Fermi2D> fermi_2d;
+ /// Compute engine
+ std::unique_ptr<Engines::KeplerCompute> kepler_compute;
+ /// DMA engine
+ std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
+ /// Inline memory engine
+ std::unique_ptr<Engines::KeplerMemory> kepler_memory;
+
+ std::shared_ptr<MemoryManager> memory_manager;
+
+ std::unique_ptr<DmaPusher> dma_pusher;
+
+ bool initialized{};
+};
+
+} // namespace Control
+
+} // namespace Tegra
diff --git a/src/video_core/control/channel_state_cache.cpp b/src/video_core/control/channel_state_cache.cpp
new file mode 100644
index 000000000..4ebeb6356
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.cpp
@@ -0,0 +1,14 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "video_core/control/channel_state_cache.inc"
+
+namespace VideoCommon {
+
+ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& channel_state)
+ : maxwell3d{*channel_state.maxwell_3d}, kepler_compute{*channel_state.kepler_compute},
+ gpu_memory{*channel_state.memory_manager} {}
+
+template class VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo>;
+
+} // namespace VideoCommon
diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h
new file mode 100644
index 000000000..584a0c26c
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.h
@@ -0,0 +1,101 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <deque>
+#include <limits>
+#include <mutex>
+#include <optional>
+#include <unordered_map>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Tegra {
+
+namespace Engines {
+class Maxwell3D;
+class KeplerCompute;
+} // namespace Engines
+
+class MemoryManager;
+
+namespace Control {
+struct ChannelState;
+}
+
+} // namespace Tegra
+
+namespace VideoCommon {
+
+class ChannelInfo {
+public:
+ ChannelInfo() = delete;
+ explicit ChannelInfo(Tegra::Control::ChannelState& state);
+ ChannelInfo(const ChannelInfo& state) = delete;
+ ChannelInfo& operator=(const ChannelInfo&) = delete;
+ ChannelInfo(ChannelInfo&& other) = default;
+ ChannelInfo& operator=(ChannelInfo&& other) = default;
+
+ Tegra::Engines::Maxwell3D& maxwell3d;
+ Tegra::Engines::KeplerCompute& kepler_compute;
+ Tegra::MemoryManager& gpu_memory;
+};
+
+template <class P>
+class ChannelSetupCaches {
+public:
+ /// Operations for seting the channel of execution.
+ virtual ~ChannelSetupCaches();
+
+ /// Create channel state.
+ virtual void CreateChannel(Tegra::Control::ChannelState& channel);
+
+ /// Bind a channel for execution.
+ void BindToChannel(s32 id);
+
+ /// Erase channel's state.
+ void EraseChannel(s32 id);
+
+ Tegra::MemoryManager* GetFromID(size_t id) const {
+ std::unique_lock<std::mutex> lk(config_mutex);
+ const auto ref = address_spaces.find(id);
+ return ref->second.gpu_memory;
+ }
+
+ std::optional<size_t> getStorageID(size_t id) const {
+ std::unique_lock<std::mutex> lk(config_mutex);
+ const auto ref = address_spaces.find(id);
+ if (ref == address_spaces.end()) {
+ return std::nullopt;
+ }
+ return ref->second.storage_id;
+ }
+
+protected:
+ static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
+
+ P* channel_state;
+ size_t current_channel_id{UNSET_CHANNEL};
+ size_t current_address_space{};
+ Tegra::Engines::Maxwell3D* maxwell3d;
+ Tegra::Engines::KeplerCompute* kepler_compute;
+ Tegra::MemoryManager* gpu_memory;
+
+ std::deque<P> channel_storage;
+ std::deque<size_t> free_channel_ids;
+ std::unordered_map<s32, size_t> channel_map;
+ std::vector<size_t> active_channel_ids;
+ struct AddresSpaceRef {
+ size_t ref_count;
+ size_t storage_id;
+ Tegra::MemoryManager* gpu_memory;
+ };
+ std::unordered_map<size_t, AddresSpaceRef> address_spaces;
+ mutable std::mutex config_mutex;
+
+ virtual void OnGPUASRegister([[maybe_unused]] size_t map_id) {}
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/control/channel_state_cache.inc b/src/video_core/control/channel_state_cache.inc
new file mode 100644
index 000000000..460313893
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.inc
@@ -0,0 +1,86 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <algorithm>
+
+#include "video_core/control/channel_state.h"
+#include "video_core/control/channel_state_cache.h"
+#include "video_core/engines/kepler_compute.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/memory_manager.h"
+
+namespace VideoCommon {
+
+template <class P>
+ChannelSetupCaches<P>::~ChannelSetupCaches() = default;
+
+template <class P>
+void ChannelSetupCaches<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) {
+ std::unique_lock<std::mutex> lk(config_mutex);
+ ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0);
+ auto new_id = [this, &channel]() {
+ if (!free_channel_ids.empty()) {
+ auto id = free_channel_ids.front();
+ free_channel_ids.pop_front();
+ new (&channel_storage[id]) P(channel);
+ return id;
+ }
+ channel_storage.emplace_back(channel);
+ return channel_storage.size() - 1;
+ }();
+ channel_map.emplace(channel.bind_id, new_id);
+ if (current_channel_id != UNSET_CHANNEL) {
+ channel_state = &channel_storage[current_channel_id];
+ }
+ active_channel_ids.push_back(new_id);
+ auto as_it = address_spaces.find(channel.memory_manager->GetID());
+ if (as_it != address_spaces.end()) {
+ as_it->second.ref_count++;
+ return;
+ }
+ AddresSpaceRef new_gpu_mem_ref{
+ .ref_count = 1,
+ .storage_id = address_spaces.size(),
+ .gpu_memory = channel.memory_manager.get(),
+ };
+ address_spaces.emplace(channel.memory_manager->GetID(), new_gpu_mem_ref);
+ OnGPUASRegister(channel.memory_manager->GetID());
+}
+
+/// Bind a channel for execution.
+template <class P>
+void ChannelSetupCaches<P>::BindToChannel(s32 id) {
+ std::unique_lock<std::mutex> lk(config_mutex);
+ auto it = channel_map.find(id);
+ ASSERT(it != channel_map.end() && id >= 0);
+ current_channel_id = it->second;
+ channel_state = &channel_storage[current_channel_id];
+ maxwell3d = &channel_state->maxwell3d;
+ kepler_compute = &channel_state->kepler_compute;
+ gpu_memory = &channel_state->gpu_memory;
+ current_address_space = gpu_memory->GetID();
+}
+
+/// Erase channel's channel_state.
+template <class P>
+void ChannelSetupCaches<P>::EraseChannel(s32 id) {
+ std::unique_lock<std::mutex> lk(config_mutex);
+ const auto it = channel_map.find(id);
+ ASSERT(it != channel_map.end() && id >= 0);
+ const auto this_id = it->second;
+ free_channel_ids.push_back(this_id);
+ channel_map.erase(it);
+ if (this_id == current_channel_id) {
+ current_channel_id = UNSET_CHANNEL;
+ channel_state = nullptr;
+ maxwell3d = nullptr;
+ kepler_compute = nullptr;
+ gpu_memory = nullptr;
+ } else if (current_channel_id != UNSET_CHANNEL) {
+ channel_state = &channel_storage[current_channel_id];
+ }
+ active_channel_ids.erase(
+ std::find(active_channel_ids.begin(), active_channel_ids.end(), this_id));
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp
new file mode 100644
index 000000000..f7cbe204e
--- /dev/null
+++ b/src/video_core/control/scheduler.cpp
@@ -0,0 +1,32 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <memory>
+
+#include "common/assert.h"
+#include "video_core/control/channel_state.h"
+#include "video_core/control/scheduler.h"
+#include "video_core/gpu.h"
+
+namespace Tegra::Control {
+Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {}
+
+Scheduler::~Scheduler() = default;
+
+void Scheduler::Push(s32 channel, CommandList&& entries) {
+ std::unique_lock lk(scheduling_guard);
+ auto it = channels.find(channel);
+ ASSERT(it != channels.end());
+ auto channel_state = it->second;
+ gpu.BindChannel(channel_state->bind_id);
+ channel_state->dma_pusher->Push(std::move(entries));
+ channel_state->dma_pusher->DispatchCalls();
+}
+
+void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
+ s32 channel = new_channel->bind_id;
+ std::unique_lock lk(scheduling_guard);
+ channels.emplace(channel, new_channel);
+}
+
+} // namespace Tegra::Control
diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h
new file mode 100644
index 000000000..44addf61c
--- /dev/null
+++ b/src/video_core/control/scheduler.h
@@ -0,0 +1,37 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+
+#include "video_core/dma_pusher.h"
+
+namespace Tegra {
+
+class GPU;
+
+namespace Control {
+
+struct ChannelState;
+
+class Scheduler {
+public:
+ explicit Scheduler(GPU& gpu_);
+ ~Scheduler();
+
+ void Push(s32 channel, CommandList&& entries);
+
+ void DeclareChannel(std::shared_ptr<ChannelState> new_channel);
+
+private:
+ std::unordered_map<s32, std::shared_ptr<ChannelState>> channels;
+ std::mutex scheduling_guard;
+ GPU& gpu;
+};
+
+} // namespace Control
+
+} // namespace Tegra
diff --git a/src/video_core/dirty_flags.cpp b/src/video_core/dirty_flags.cpp
index 9dc4341f0..c2ecc12f5 100644
--- a/src/video_core/dirty_flags.cpp
+++ b/src/video_core/dirty_flags.cpp
@@ -17,21 +17,23 @@ using Tegra::Engines::Maxwell3D;
void SetupDirtyVertexBuffers(Maxwell3D::DirtyState::Tables& tables) {
static constexpr std::size_t num_array = 3;
for (std::size_t i = 0; i < Maxwell3D::Regs::NumVertexArrays; ++i) {
- const std::size_t array_offset = OFF(vertex_array) + i * NUM(vertex_array[0]);
- const std::size_t limit_offset = OFF(vertex_array_limit) + i * NUM(vertex_array_limit[0]);
+ const std::size_t array_offset = OFF(vertex_streams) + i * NUM(vertex_streams[0]);
+ const std::size_t limit_offset =
+ OFF(vertex_stream_limits) + i * NUM(vertex_stream_limits[0]);
FillBlock(tables, array_offset, num_array, VertexBuffer0 + i, VertexBuffers);
- FillBlock(tables, limit_offset, NUM(vertex_array_limit), VertexBuffer0 + i, VertexBuffers);
+ FillBlock(tables, limit_offset, NUM(vertex_stream_limits), VertexBuffer0 + i,
+ VertexBuffers);
}
}
void SetupIndexBuffer(Maxwell3D::DirtyState::Tables& tables) {
- FillBlock(tables[0], OFF(index_array), NUM(index_array), IndexBuffer);
+ FillBlock(tables[0], OFF(index_buffer), NUM(index_buffer), IndexBuffer);
}
void SetupDirtyDescriptors(Maxwell3D::DirtyState::Tables& tables) {
- FillBlock(tables[0], OFF(tic), NUM(tic), Descriptors);
- FillBlock(tables[0], OFF(tsc), NUM(tsc), Descriptors);
+ FillBlock(tables[0], OFF(tex_header), NUM(tex_header), Descriptors);
+ FillBlock(tables[0], OFF(tex_sampler), NUM(tex_sampler), Descriptors);
}
void SetupDirtyRenderTargets(Maxwell3D::DirtyState::Tables& tables) {
@@ -42,7 +44,7 @@ void SetupDirtyRenderTargets(Maxwell3D::DirtyState::Tables& tables) {
FillBlock(tables[0], begin + rt * num_per_rt, num_per_rt, ColorBuffer0 + rt);
}
FillBlock(tables[1], begin, num, RenderTargets);
- FillBlock(tables[0], OFF(render_area), NUM(render_area), RenderTargets);
+ FillBlock(tables[0], OFF(surface_clip), NUM(surface_clip), RenderTargets);
tables[0][OFF(rt_control)] = RenderTargets;
tables[1][OFF(rt_control)] = RenderTargetControl;
@@ -52,15 +54,15 @@ void SetupDirtyRenderTargets(Maxwell3D::DirtyState::Tables& tables) {
const u8 flag = zeta_flags[i];
auto& table = tables[i];
table[OFF(zeta_enable)] = flag;
- table[OFF(zeta_width)] = flag;
- table[OFF(zeta_height)] = flag;
+ table[OFF(zeta_size.width)] = flag;
+ table[OFF(zeta_size.height)] = flag;
FillBlock(table, OFF(zeta), NUM(zeta), flag);
}
}
void SetupDirtyShaders(Maxwell3D::DirtyState::Tables& tables) {
- FillBlock(tables[0], OFF(shader_config[0]),
- NUM(shader_config[0]) * Maxwell3D::Regs::MaxShaderProgram, Shaders);
+ FillBlock(tables[0], OFF(pipelines), NUM(pipelines[0]) * Maxwell3D::Regs::MaxShaderProgram,
+ Shaders);
}
} // Anonymous namespace
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 29b8582ab..9835e3ac1 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -12,7 +12,10 @@
namespace Tegra {
-DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_) : gpu{gpu_}, system{system_} {}
+DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
+ Control::ChannelState& channel_state_)
+ : gpu{gpu_}, system{system_}, memory_manager{memory_manager_}, puller{gpu_, memory_manager_,
+ *this, channel_state_} {}
DmaPusher::~DmaPusher() = default;
@@ -21,8 +24,6 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128,
void DmaPusher::DispatchCalls() {
MICROPROFILE_SCOPE(DispatchCalls);
- gpu.SyncGuestHost();
-
dma_pushbuffer_subindex = 0;
dma_state.is_last_call = true;
@@ -33,7 +34,6 @@ void DmaPusher::DispatchCalls() {
}
}
gpu.FlushCommands();
- gpu.SyncGuestHost();
gpu.OnCommandListEnd();
}
@@ -76,11 +76,11 @@ bool DmaPusher::Step() {
// Push buffer non-empty, read a word
command_headers.resize(command_list_header.size);
if (Settings::IsGPULevelHigh()) {
- gpu.MemoryManager().ReadBlock(dma_get, command_headers.data(),
- command_list_header.size * sizeof(u32));
+ memory_manager.ReadBlock(dma_get, command_headers.data(),
+ command_list_header.size * sizeof(u32));
} else {
- gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
- command_list_header.size * sizeof(u32));
+ memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(),
+ command_list_header.size * sizeof(u32));
}
}
for (std::size_t index = 0; index < command_headers.size();) {
@@ -154,7 +154,7 @@ void DmaPusher::SetState(const CommandHeader& command_header) {
void DmaPusher::CallMethod(u32 argument) const {
if (dma_state.method < non_puller_methods) {
- gpu.CallMethod(GPU::MethodCall{
+ puller.CallPullerMethod(Engines::Puller::MethodCall{
dma_state.method,
argument,
dma_state.subchannel,
@@ -168,12 +168,16 @@ void DmaPusher::CallMethod(u32 argument) const {
void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
if (dma_state.method < non_puller_methods) {
- gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
- dma_state.method_count);
+ puller.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
+ dma_state.method_count);
} else {
subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start,
num_methods, dma_state.method_count);
}
}
+void DmaPusher::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
+ puller.BindRasterizer(rasterizer);
+}
+
} // namespace Tegra
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index 872fd146a..938f0f11c 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -10,6 +10,7 @@
#include "common/bit_field.h"
#include "common/common_types.h"
#include "video_core/engines/engine_interface.h"
+#include "video_core/engines/puller.h"
namespace Core {
class System;
@@ -17,7 +18,12 @@ class System;
namespace Tegra {
+namespace Control {
+struct ChannelState;
+}
+
class GPU;
+class MemoryManager;
enum class SubmissionMode : u32 {
IncreasingOld = 0,
@@ -31,24 +37,32 @@ enum class SubmissionMode : u32 {
// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence
// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4.
// So the values you see in docs might be multiplied by 4.
+// Register documentation:
+// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/classes/host/cla26f.h
+//
+// Register Description (approx):
+// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/manuals/volta/gv100/dev_pbdma.ref.txt
enum class BufferMethods : u32 {
BindObject = 0x0,
+ Illegal = 0x1,
Nop = 0x2,
SemaphoreAddressHigh = 0x4,
SemaphoreAddressLow = 0x5,
- SemaphoreSequence = 0x6,
- SemaphoreTrigger = 0x7,
- NotifyIntr = 0x8,
+ SemaphoreSequencePayload = 0x6,
+ SemaphoreOperation = 0x7,
+ NonStallInterrupt = 0x8,
WrcacheFlush = 0x9,
- Unk28 = 0xA,
- UnkCacheFlush = 0xB,
+ MemOpA = 0xA,
+ MemOpB = 0xB,
+ MemOpC = 0xC,
+ MemOpD = 0xD,
RefCnt = 0x14,
SemaphoreAcquire = 0x1A,
SemaphoreRelease = 0x1B,
- FenceValue = 0x1C,
- FenceAction = 0x1D,
- WaitForInterrupt = 0x1E,
- Unk7c = 0x1F,
+ SyncpointPayload = 0x1C,
+ SyncpointOperation = 0x1D,
+ WaitForIdle = 0x1E,
+ CRCCheck = 0x1F,
Yield = 0x20,
NonPullerMethods = 0x40,
};
@@ -102,7 +116,8 @@ struct CommandList final {
*/
class DmaPusher final {
public:
- explicit DmaPusher(Core::System& system_, GPU& gpu_);
+ explicit DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
+ Control::ChannelState& channel_state_);
~DmaPusher();
void Push(CommandList&& entries) {
@@ -115,6 +130,8 @@ public:
subchannels[subchannel_id] = engine;
}
+ void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
+
private:
static constexpr u32 non_puller_methods = 0x40;
static constexpr u32 max_subchannels = 8;
@@ -148,6 +165,8 @@ private:
GPU& gpu;
Core::System& system;
+ MemoryManager& memory_manager;
+ mutable Engines::Puller puller;
};
} // namespace Tegra
diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp
index 6ff5b1eca..a34819234 100644
--- a/src/video_core/engines/engine_upload.cpp
+++ b/src/video_core/engines/engine_upload.cpp
@@ -3,6 +3,7 @@
#include <cstring>
+#include "common/algorithm.h"
#include "common/assert.h"
#include "video_core/engines/engine_upload.h"
#include "video_core/memory_manager.h"
@@ -34,21 +35,48 @@ void State::ProcessData(const u32 data, const bool is_last_call) {
if (!is_last_call) {
return;
}
+ ProcessData(inner_buffer);
+}
+
+void State::ProcessData(const u32* data, size_t num_data) {
+ std::span<const u8> read_buffer(reinterpret_cast<const u8*>(data), num_data * sizeof(u32));
+ ProcessData(read_buffer);
+}
+
+void State::ProcessData(std::span<const u8> read_buffer) {
const GPUVAddr address{regs.dest.Address()};
if (is_linear) {
- rasterizer->AccelerateInlineToMemory(address, copy_size, inner_buffer);
+ if (regs.line_count == 1) {
+ rasterizer->AccelerateInlineToMemory(address, copy_size, read_buffer);
+ } else {
+ for (u32 line = 0; line < regs.line_count; ++line) {
+ const GPUVAddr dest_line = address + static_cast<size_t>(line) * regs.dest.pitch;
+ memory_manager.WriteBlockUnsafe(
+ dest_line, read_buffer.data() + static_cast<size_t>(line) * regs.line_length_in,
+ regs.line_length_in);
+ }
+ memory_manager.InvalidateRegion(address, regs.dest.pitch * regs.line_count);
+ }
} else {
- UNIMPLEMENTED_IF(regs.dest.z != 0);
- UNIMPLEMENTED_IF(regs.dest.depth != 1);
- UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 0);
- UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 0);
+ u32 width = regs.dest.width;
+ u32 x_elements = regs.line_length_in;
+ u32 x_offset = regs.dest.x;
+ const u32 bpp_shift = Common::FoldRight(
+ 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
+ width, x_elements, x_offset, static_cast<u32>(address));
+ width >>= bpp_shift;
+ x_elements >>= bpp_shift;
+ x_offset >>= bpp_shift;
+ const u32 bytes_per_pixel = 1U << bpp_shift;
const std::size_t dst_size = Tegra::Texture::CalculateSize(
- true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 0);
+ true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
+ regs.dest.BlockHeight(), regs.dest.BlockDepth());
tmp_buffer.resize(dst_size);
memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
- Tegra::Texture::SwizzleKepler(regs.dest.width, regs.dest.height, regs.dest.x, regs.dest.y,
- regs.dest.BlockHeight(), copy_size, inner_buffer.data(),
- tmp_buffer.data());
+ Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width,
+ regs.dest.height, regs.dest.depth, x_offset, regs.dest.y,
+ x_elements, regs.line_count, regs.dest.BlockHeight(),
+ regs.dest.BlockDepth(), regs.line_length_in);
memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size);
}
}
diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h
index 94ff3314a..f08f6e36a 100644
--- a/src/video_core/engines/engine_upload.h
+++ b/src/video_core/engines/engine_upload.h
@@ -3,6 +3,7 @@
#pragma once
+#include <span>
#include <vector>
#include "common/bit_field.h"
#include "common/common_types.h"
@@ -33,7 +34,7 @@ struct Registers {
u32 width;
u32 height;
u32 depth;
- u32 z;
+ u32 layer;
u32 x;
u32 y;
@@ -62,11 +63,14 @@ public:
void ProcessExec(bool is_linear_);
void ProcessData(u32 data, bool is_last_call);
+ void ProcessData(const u32* data, size_t num_data);
/// Binds a rasterizer to this engine.
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
private:
+ void ProcessData(std::span<const u8> read_buffer);
+
u32 write_offset = 0;
u32 copy_size = 0;
std::vector<u8> inner_buffer;
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 5db254d94..7c50bdbe0 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -36,8 +36,6 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
}
case KEPLER_COMPUTE_REG_INDEX(data_upload): {
upload_state.ProcessData(method_argument, is_last_call);
- if (is_last_call) {
- }
break;
}
case KEPLER_COMPUTE_REG_INDEX(launch):
@@ -50,8 +48,15 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
u32 methods_pending) {
- for (std::size_t i = 0; i < amount; i++) {
- CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
+ switch (method) {
+ case KEPLER_COMPUTE_REG_INDEX(data_upload):
+ upload_state.ProcessData(base_start, static_cast<size_t>(amount));
+ return;
+ default:
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
+ }
+ break;
}
}
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index e2b029542..a3fbab1e5 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -33,8 +33,6 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
}
case KEPLERMEMORY_REG_INDEX(data): {
upload_state.ProcessData(method_argument, is_last_call);
- if (is_last_call) {
- }
break;
}
}
@@ -42,8 +40,15 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
u32 methods_pending) {
- for (std::size_t i = 0; i < amount; i++) {
- CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
+ switch (method) {
+ case KEPLERMEMORY_REG_INDEX(data):
+ upload_state.ProcessData(base_start, static_cast<size_t>(amount));
+ return;
+ default:
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
+ }
+ break;
}
}
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 3a4646289..f9794dfe4 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -56,37 +56,37 @@ void Maxwell3D::InitializeRegisterDefaults() {
// Doom and Bomberman seems to use the uninitialized registers and just enable blend
// so initialize blend registers with sane values
- regs.blend.equation_rgb = Regs::Blend::Equation::Add;
- regs.blend.factor_source_rgb = Regs::Blend::Factor::One;
- regs.blend.factor_dest_rgb = Regs::Blend::Factor::Zero;
- regs.blend.equation_a = Regs::Blend::Equation::Add;
- regs.blend.factor_source_a = Regs::Blend::Factor::One;
- regs.blend.factor_dest_a = Regs::Blend::Factor::Zero;
- for (auto& blend : regs.independent_blend) {
- blend.equation_rgb = Regs::Blend::Equation::Add;
- blend.factor_source_rgb = Regs::Blend::Factor::One;
- blend.factor_dest_rgb = Regs::Blend::Factor::Zero;
- blend.equation_a = Regs::Blend::Equation::Add;
- blend.factor_source_a = Regs::Blend::Factor::One;
- blend.factor_dest_a = Regs::Blend::Factor::Zero;
- }
- regs.stencil_front_op_fail = Regs::StencilOp::Keep;
- regs.stencil_front_op_zfail = Regs::StencilOp::Keep;
- regs.stencil_front_op_zpass = Regs::StencilOp::Keep;
- regs.stencil_front_func_func = Regs::ComparisonOp::Always;
+ regs.blend.color_op = Regs::Blend::Equation::Add_D3D;
+ regs.blend.color_source = Regs::Blend::Factor::One_D3D;
+ regs.blend.color_dest = Regs::Blend::Factor::Zero_D3D;
+ regs.blend.alpha_op = Regs::Blend::Equation::Add_D3D;
+ regs.blend.alpha_source = Regs::Blend::Factor::One_D3D;
+ regs.blend.alpha_dest = Regs::Blend::Factor::Zero_D3D;
+ for (auto& blend : regs.blend_per_target) {
+ blend.color_op = Regs::Blend::Equation::Add_D3D;
+ blend.color_source = Regs::Blend::Factor::One_D3D;
+ blend.color_dest = Regs::Blend::Factor::Zero_D3D;
+ blend.alpha_op = Regs::Blend::Equation::Add_D3D;
+ blend.alpha_source = Regs::Blend::Factor::One_D3D;
+ blend.alpha_dest = Regs::Blend::Factor::Zero_D3D;
+ }
+ regs.stencil_front_op.fail = Regs::StencilOp::Op::Keep_D3D;
+ regs.stencil_front_op.zfail = Regs::StencilOp::Op::Keep_D3D;
+ regs.stencil_front_op.zpass = Regs::StencilOp::Op::Keep_D3D;
+ regs.stencil_front_op.func = Regs::ComparisonOp::Always_GL;
regs.stencil_front_func_mask = 0xFFFFFFFF;
regs.stencil_front_mask = 0xFFFFFFFF;
regs.stencil_two_side_enable = 1;
- regs.stencil_back_op_fail = Regs::StencilOp::Keep;
- regs.stencil_back_op_zfail = Regs::StencilOp::Keep;
- regs.stencil_back_op_zpass = Regs::StencilOp::Keep;
- regs.stencil_back_func_func = Regs::ComparisonOp::Always;
+ regs.stencil_back_op.fail = Regs::StencilOp::Op::Keep_D3D;
+ regs.stencil_back_op.zfail = Regs::StencilOp::Op::Keep_D3D;
+ regs.stencil_back_op.zpass = Regs::StencilOp::Op::Keep_D3D;
+ regs.stencil_back_op.func = Regs::ComparisonOp::Always_GL;
regs.stencil_back_func_mask = 0xFFFFFFFF;
regs.stencil_back_mask = 0xFFFFFFFF;
- regs.depth_test_func = Regs::ComparisonOp::Always;
- regs.front_face = Regs::FrontFace::CounterClockWise;
- regs.cull_face = Regs::CullFace::Back;
+ regs.depth_test_func = Regs::ComparisonOp::Always_GL;
+ regs.gl_front_face = Regs::FrontFace::CounterClockWise;
+ regs.gl_cull_face = Regs::CullFace::Back;
// TODO(Rodrigo): Most games do not set a point size. I think this is a case of a
// register carrying a default value. Assume it's OpenGL's default (1).
@@ -107,20 +107,25 @@ void Maxwell3D::InitializeRegisterDefaults() {
// NVN games expect these values to be enabled at boot
regs.rasterize_enable = 1;
- regs.rt_separate_frag_data = 1;
+ regs.color_target_mrt_enable = 1;
regs.framebuffer_srgb = 1;
regs.line_width_aliased = 1.0f;
regs.line_width_smooth = 1.0f;
- regs.front_face = Maxwell3D::Regs::FrontFace::ClockWise;
+ regs.gl_front_face = Maxwell3D::Regs::FrontFace::ClockWise;
regs.polygon_mode_back = Maxwell3D::Regs::PolygonMode::Fill;
regs.polygon_mode_front = Maxwell3D::Regs::PolygonMode::Fill;
shadow_state = regs;
- mme_inline[MAXWELL3D_REG_INDEX(draw.vertex_end_gl)] = true;
- mme_inline[MAXWELL3D_REG_INDEX(draw.vertex_begin_gl)] = true;
- mme_inline[MAXWELL3D_REG_INDEX(vertex_buffer.count)] = true;
- mme_inline[MAXWELL3D_REG_INDEX(index_array.count)] = true;
+ draw_command[MAXWELL3D_REG_INDEX(draw.end)] = true;
+ draw_command[MAXWELL3D_REG_INDEX(draw.begin)] = true;
+ draw_command[MAXWELL3D_REG_INDEX(vertex_buffer.first)] = true;
+ draw_command[MAXWELL3D_REG_INDEX(vertex_buffer.count)] = true;
+ draw_command[MAXWELL3D_REG_INDEX(index_buffer.first)] = true;
+ draw_command[MAXWELL3D_REG_INDEX(index_buffer.count)] = true;
+ draw_command[MAXWELL3D_REG_INDEX(draw_inline_index)] = true;
+ draw_command[MAXWELL3D_REG_INDEX(inline_index_2x16.even)] = true;
+ draw_command[MAXWELL3D_REG_INDEX(inline_index_4x8.index0)] = true;
}
void Maxwell3D::ProcessMacro(u32 method, const u32* base_start, u32 amount, bool is_last_call) {
@@ -173,77 +178,77 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
case MAXWELL3D_REG_INDEX(shadow_ram_control):
shadow_state.shadow_ram_control = static_cast<Regs::ShadowRamControl>(nonshadow_argument);
return;
- case MAXWELL3D_REG_INDEX(macros.upload_address):
- return macro_engine->ClearCode(regs.macros.upload_address);
- case MAXWELL3D_REG_INDEX(macros.data):
- return macro_engine->AddCode(regs.macros.upload_address, argument);
- case MAXWELL3D_REG_INDEX(macros.bind):
+ case MAXWELL3D_REG_INDEX(load_mme.instruction_ptr):
+ return macro_engine->ClearCode(regs.load_mme.instruction_ptr);
+ case MAXWELL3D_REG_INDEX(load_mme.instruction):
+ return macro_engine->AddCode(regs.load_mme.instruction_ptr, argument);
+ case MAXWELL3D_REG_INDEX(load_mme.start_address):
return ProcessMacroBind(argument);
- case MAXWELL3D_REG_INDEX(firmware[4]):
+ case MAXWELL3D_REG_INDEX(falcon[4]):
return ProcessFirmwareCall4();
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data):
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 1:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 2:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 3:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 4:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 5:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 6:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 7:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 8:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 9:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 10:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 11:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 12:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 13:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 14:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer):
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 1:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 2:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 3:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 4:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 5:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 6:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 7:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 8:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 9:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 10:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 11:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 12:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 13:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 14:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 15:
return ProcessCBData(argument);
- case MAXWELL3D_REG_INDEX(cb_bind[0]):
+ case MAXWELL3D_REG_INDEX(bind_groups[0].raw_config):
return ProcessCBBind(0);
- case MAXWELL3D_REG_INDEX(cb_bind[1]):
+ case MAXWELL3D_REG_INDEX(bind_groups[1].raw_config):
return ProcessCBBind(1);
- case MAXWELL3D_REG_INDEX(cb_bind[2]):
+ case MAXWELL3D_REG_INDEX(bind_groups[2].raw_config):
return ProcessCBBind(2);
- case MAXWELL3D_REG_INDEX(cb_bind[3]):
+ case MAXWELL3D_REG_INDEX(bind_groups[3].raw_config):
return ProcessCBBind(3);
- case MAXWELL3D_REG_INDEX(cb_bind[4]):
+ case MAXWELL3D_REG_INDEX(bind_groups[4].raw_config):
return ProcessCBBind(4);
- case MAXWELL3D_REG_INDEX(draw.vertex_end_gl):
- return DrawArrays();
- case MAXWELL3D_REG_INDEX(small_index):
- regs.index_array.count = regs.small_index.count;
- regs.index_array.first = regs.small_index.first;
+ case MAXWELL3D_REG_INDEX(index_buffer32_first):
+ regs.index_buffer.count = regs.index_buffer32_first.count;
+ regs.index_buffer.first = regs.index_buffer32_first.first;
dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
- return DrawArrays();
- case MAXWELL3D_REG_INDEX(small_index_2):
- regs.index_array.count = regs.small_index_2.count;
- regs.index_array.first = regs.small_index_2.first;
+ return ProcessDraw();
+ case MAXWELL3D_REG_INDEX(index_buffer16_first):
+ regs.index_buffer.count = regs.index_buffer16_first.count;
+ regs.index_buffer.first = regs.index_buffer16_first.first;
dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
- return DrawArrays();
+ return ProcessDraw();
+ case MAXWELL3D_REG_INDEX(index_buffer8_first):
+ regs.index_buffer.count = regs.index_buffer8_first.count;
+ regs.index_buffer.first = regs.index_buffer8_first.first;
+ dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
+ return ProcessDraw();
case MAXWELL3D_REG_INDEX(topology_override):
use_topology_override = true;
return;
- case MAXWELL3D_REG_INDEX(clear_buffers):
+ case MAXWELL3D_REG_INDEX(clear_surface):
return ProcessClearBuffers();
- case MAXWELL3D_REG_INDEX(query.query_get):
+ case MAXWELL3D_REG_INDEX(report_semaphore.query):
return ProcessQueryGet();
- case MAXWELL3D_REG_INDEX(condition.mode):
+ case MAXWELL3D_REG_INDEX(render_enable.mode):
return ProcessQueryCondition();
- case MAXWELL3D_REG_INDEX(counter_reset):
+ case MAXWELL3D_REG_INDEX(clear_report_value):
return ProcessCounterReset();
case MAXWELL3D_REG_INDEX(sync_info):
return ProcessSyncPoint();
- case MAXWELL3D_REG_INDEX(exec_upload):
- return upload_state.ProcessExec(regs.exec_upload.linear != 0);
- case MAXWELL3D_REG_INDEX(data_upload):
+ case MAXWELL3D_REG_INDEX(launch_dma):
+ return upload_state.ProcessExec(regs.launch_dma.memory_layout.Value() ==
+ Regs::LaunchDMA::Layout::Pitch);
+ case MAXWELL3D_REG_INDEX(inline_data):
upload_state.ProcessData(argument, is_last_call);
- if (is_last_call) {
- }
return;
case MAXWELL3D_REG_INDEX(fragment_barrier):
return rasterizer->FragmentBarrier();
- case MAXWELL3D_REG_INDEX(tiled_cache_barrier):
- return rasterizer->TiledCacheBarrier();
}
}
@@ -257,14 +262,13 @@ void Maxwell3D::CallMacroMethod(u32 method, const std::vector<u32>& parameters)
// Execute the current macro.
macro_engine->Execute(macro_positions[entry], parameters);
- if (mme_draw.current_mode != MMEDrawMode::Undefined) {
- FlushMMEInlineDraw();
- }
+
+ ProcessDeferredDraw();
}
void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
- // It is an error to write to a register other than the current macro's ARG register before it
- // has finished execution.
+ // It is an error to write to a register other than the current macro's ARG register before
+ // it has finished execution.
if (executing_macro != 0) {
ASSERT(method == executing_macro + 1);
}
@@ -279,9 +283,33 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
ASSERT_MSG(method < Regs::NUM_REGS,
"Invalid Maxwell3D register, increase the size of the Regs structure");
- const u32 argument = ProcessShadowRam(method, method_argument);
- ProcessDirtyRegisters(method, argument);
- ProcessMethodCall(method, argument, method_argument, is_last_call);
+ if (draw_command[method]) {
+ regs.reg_array[method] = method_argument;
+ deferred_draw_method.push_back(method);
+ auto u32_to_u8 = [&](const u32 argument) {
+ inline_index_draw_indexes.push_back(static_cast<u8>(argument & 0x000000ff));
+ inline_index_draw_indexes.push_back(static_cast<u8>((argument & 0x0000ff00) >> 8));
+ inline_index_draw_indexes.push_back(static_cast<u8>((argument & 0x00ff0000) >> 16));
+ inline_index_draw_indexes.push_back(static_cast<u8>((argument & 0xff000000) >> 24));
+ };
+ if (MAXWELL3D_REG_INDEX(draw_inline_index) == method) {
+ u32_to_u8(method_argument);
+ } else if (MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method) {
+ u32_to_u8(regs.inline_index_2x16.even);
+ u32_to_u8(regs.inline_index_2x16.odd);
+ } else if (MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) {
+ u32_to_u8(regs.inline_index_4x8.index0);
+ u32_to_u8(regs.inline_index_4x8.index1);
+ u32_to_u8(regs.inline_index_4x8.index2);
+ u32_to_u8(regs.inline_index_4x8.index3);
+ }
+ } else {
+ ProcessDeferredDraw();
+
+ const u32 argument = ProcessShadowRam(method, method_argument);
+ ProcessDirtyRegisters(method, argument);
+ ProcessMethodCall(method, argument, method_argument, is_last_call);
+ }
}
void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
@@ -293,24 +321,27 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
return;
}
switch (method) {
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data):
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 1:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 2:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 3:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 4:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 5:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 6:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 7:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 8:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 9:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 10:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 11:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 12:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 13:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 14:
- case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer):
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 1:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 2:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 3:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 4:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 5:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 6:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 7:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 8:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 9:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 10:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 11:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 12:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 13:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 14:
+ case MAXWELL3D_REG_INDEX(const_buffer.buffer) + 15:
ProcessCBMultiData(base_start, amount);
break;
+ case MAXWELL3D_REG_INDEX(inline_data):
+ upload_state.ProcessData(base_start, static_cast<size_t>(amount));
+ return;
default:
for (std::size_t i = 0; i < amount; i++) {
CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
@@ -319,54 +350,6 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
}
}
-void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) {
- if (mme_draw.current_mode == MMEDrawMode::Undefined) {
- if (mme_draw.gl_begin_consume) {
- mme_draw.current_mode = expected_mode;
- mme_draw.current_count = count;
- mme_draw.instance_count = 1;
- mme_draw.gl_begin_consume = false;
- mme_draw.gl_end_count = 0;
- }
- return;
- } else {
- if (mme_draw.current_mode == expected_mode && count == mme_draw.current_count &&
- mme_draw.instance_mode && mme_draw.gl_begin_consume) {
- mme_draw.instance_count++;
- mme_draw.gl_begin_consume = false;
- return;
- } else {
- FlushMMEInlineDraw();
- }
- }
- // Tail call in case it needs to retry.
- StepInstance(expected_mode, count);
-}
-
-void Maxwell3D::CallMethodFromMME(u32 method, u32 method_argument) {
- if (mme_inline[method]) {
- regs.reg_array[method] = method_argument;
- if (method == MAXWELL3D_REG_INDEX(vertex_buffer.count) ||
- method == MAXWELL3D_REG_INDEX(index_array.count)) {
- const MMEDrawMode expected_mode = method == MAXWELL3D_REG_INDEX(vertex_buffer.count)
- ? MMEDrawMode::Array
- : MMEDrawMode::Indexed;
- StepInstance(expected_mode, method_argument);
- } else if (method == MAXWELL3D_REG_INDEX(draw.vertex_begin_gl)) {
- mme_draw.instance_mode =
- (regs.draw.instance_next != 0) || (regs.draw.instance_cont != 0);
- mme_draw.gl_begin_consume = true;
- } else {
- mme_draw.gl_end_count++;
- }
- } else {
- if (mme_draw.current_mode != MMEDrawMode::Undefined) {
- FlushMMEInlineDraw();
- }
- CallMethod(method, method_argument, true);
- }
-}
-
void Maxwell3D::ProcessTopologyOverride() {
using PrimitiveTopology = Maxwell3D::Regs::PrimitiveTopology;
using PrimitiveTopologyOverride = Maxwell3D::Regs::PrimitiveTopologyOverride;
@@ -396,46 +379,12 @@ void Maxwell3D::ProcessTopologyOverride() {
}
}
-void Maxwell3D::FlushMMEInlineDraw() {
- LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(),
- regs.vertex_buffer.count);
- ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?");
- ASSERT(mme_draw.instance_count == mme_draw.gl_end_count);
-
- // Both instance configuration registers can not be set at the same time.
- ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont,
- "Illegal combination of instancing parameters");
-
- ProcessTopologyOverride();
-
- const bool is_indexed = mme_draw.current_mode == MMEDrawMode::Indexed;
- if (ShouldExecute()) {
- rasterizer->Draw(is_indexed, true);
- }
-
- // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if
- // the game is trying to draw indexed or direct mode. This needs to be verified on HW still -
- // it's possible that it is incorrect and that there is some other register used to specify the
- // drawing mode.
- if (is_indexed) {
- regs.index_array.count = 0;
- } else {
- regs.vertex_buffer.count = 0;
- }
- mme_draw.current_mode = MMEDrawMode::Undefined;
- mme_draw.current_count = 0;
- mme_draw.instance_count = 0;
- mme_draw.instance_mode = false;
- mme_draw.gl_begin_consume = false;
- mme_draw.gl_end_count = 0;
-}
-
void Maxwell3D::ProcessMacroUpload(u32 data) {
- macro_engine->AddCode(regs.macros.upload_address++, data);
+ macro_engine->AddCode(regs.load_mme.instruction_ptr++, data);
}
void Maxwell3D::ProcessMacroBind(u32 data) {
- macro_positions[regs.macros.entry++] = data;
+ macro_positions[regs.load_mme.start_address_ptr++] = data;
}
void Maxwell3D::ProcessFirmwareCall4() {
@@ -443,22 +392,14 @@ void Maxwell3D::ProcessFirmwareCall4() {
// Firmware call 4 is a blob that changes some registers depending on its parameters.
// These registers don't affect emulation and so are stubbed by setting 0xd00 to 1.
- regs.reg_array[0xd00] = 1;
+ regs.shadow_scratch[0] = 1;
}
void Maxwell3D::StampQueryResult(u64 payload, bool long_query) {
- struct LongQueryResult {
- u64_le value;
- u64_le timestamp;
- };
- static_assert(sizeof(LongQueryResult) == 16, "LongQueryResult has wrong size");
- const GPUVAddr sequence_address{regs.query.QueryAddress()};
+ const GPUVAddr sequence_address{regs.report_semaphore.Address()};
if (long_query) {
- // Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast
- // GPU, this command may actually take a while to complete in real hardware due to GPU
- // wait queues.
- LongQueryResult query_result{payload, system.GPU().GetTicks()};
- memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result));
+ memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
+ memory_manager.Write<u64>(sequence_address, payload);
} else {
memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload));
}
@@ -466,31 +407,45 @@ void Maxwell3D::StampQueryResult(u64 payload, bool long_query) {
void Maxwell3D::ProcessQueryGet() {
// TODO(Subv): Support the other query units.
- if (regs.query.query_get.unit != Regs::QueryUnit::Crop) {
- LOG_DEBUG(HW_GPU, "Units other than CROP are unimplemented");
- }
-
- switch (regs.query.query_get.operation) {
- case Regs::QueryOperation::Release:
- if (regs.query.query_get.fence == 1) {
- rasterizer->SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence);
+ if (regs.report_semaphore.query.location != Regs::ReportSemaphore::Location::All) {
+ LOG_DEBUG(HW_GPU, "Locations other than ALL are unimplemented");
+ }
+
+ switch (regs.report_semaphore.query.operation) {
+ case Regs::ReportSemaphore::Operation::Release:
+ if (regs.report_semaphore.query.short_query != 0) {
+ const GPUVAddr sequence_address{regs.report_semaphore.Address()};
+ const u32 payload = regs.report_semaphore.payload;
+ std::function<void()> operation([this, sequence_address, payload] {
+ memory_manager.Write<u32>(sequence_address, payload);
+ });
+ rasterizer->SignalFence(std::move(operation));
} else {
- StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0);
+ struct LongQueryResult {
+ u64_le value;
+ u64_le timestamp;
+ };
+ const GPUVAddr sequence_address{regs.report_semaphore.Address()};
+ const u32 payload = regs.report_semaphore.payload;
+ [this, sequence_address, payload] {
+ memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
+ memory_manager.Write<u64>(sequence_address, payload);
+ }();
}
break;
- case Regs::QueryOperation::Acquire:
+ case Regs::ReportSemaphore::Operation::Acquire:
// TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that
// matches the current payload.
UNIMPLEMENTED_MSG("Unimplemented query operation ACQUIRE");
break;
- case Regs::QueryOperation::Counter:
+ case Regs::ReportSemaphore::Operation::ReportOnly:
if (const std::optional<u64> result = GetQueryResult()) {
// If the query returns an empty optional it means it's cached and deferred.
// In this case we have a non-empty result, so we stamp it immediately.
- StampQueryResult(*result, regs.query.query_get.short_query == 0);
+ StampQueryResult(*result, regs.report_semaphore.query.short_query == 0);
}
break;
- case Regs::QueryOperation::Trap:
+ case Regs::ReportSemaphore::Operation::Trap:
UNIMPLEMENTED_MSG("Unimplemented query operation TRAP");
break;
default:
@@ -500,31 +455,31 @@ void Maxwell3D::ProcessQueryGet() {
}
void Maxwell3D::ProcessQueryCondition() {
- const GPUVAddr condition_address{regs.condition.Address()};
- switch (regs.condition.mode) {
- case Regs::ConditionMode::Always: {
+ const GPUVAddr condition_address{regs.render_enable.Address()};
+ switch (regs.render_enable.mode) {
+ case Regs::RenderEnable::Mode::True: {
execute_on = true;
break;
}
- case Regs::ConditionMode::Never: {
+ case Regs::RenderEnable::Mode::False: {
execute_on = false;
break;
}
- case Regs::ConditionMode::ResNonZero: {
- Regs::QueryCompare cmp;
+ case Regs::RenderEnable::Mode::Conditional: {
+ Regs::ReportSemaphore::Compare cmp;
memory_manager.ReadBlock(condition_address, &cmp, sizeof(cmp));
execute_on = cmp.initial_sequence != 0U && cmp.initial_mode != 0U;
break;
}
- case Regs::ConditionMode::Equal: {
- Regs::QueryCompare cmp;
+ case Regs::RenderEnable::Mode::IfEqual: {
+ Regs::ReportSemaphore::Compare cmp;
memory_manager.ReadBlock(condition_address, &cmp, sizeof(cmp));
execute_on =
cmp.initial_sequence == cmp.current_sequence && cmp.initial_mode == cmp.current_mode;
break;
}
- case Regs::ConditionMode::NotEqual: {
- Regs::QueryCompare cmp;
+ case Regs::RenderEnable::Mode::IfNotEqual: {
+ Regs::ReportSemaphore::Compare cmp;
memory_manager.ReadBlock(condition_address, &cmp, sizeof(cmp));
execute_on =
cmp.initial_sequence != cmp.current_sequence || cmp.initial_mode != cmp.current_mode;
@@ -539,108 +494,73 @@ void Maxwell3D::ProcessQueryCondition() {
}
void Maxwell3D::ProcessCounterReset() {
- switch (regs.counter_reset) {
- case Regs::CounterReset::SampleCnt:
+ switch (regs.clear_report_value) {
+ case Regs::ClearReport::ZPassPixelCount:
rasterizer->ResetCounter(QueryType::SamplesPassed);
break;
default:
- LOG_DEBUG(Render_OpenGL, "Unimplemented counter reset={}", regs.counter_reset);
+ LOG_DEBUG(Render_OpenGL, "Unimplemented counter reset={}", regs.clear_report_value);
break;
}
}
void Maxwell3D::ProcessSyncPoint() {
const u32 sync_point = regs.sync_info.sync_point.Value();
- const u32 increment = regs.sync_info.increment.Value();
- [[maybe_unused]] const u32 cache_flush = regs.sync_info.unknown.Value();
- if (increment) {
- rasterizer->SignalSyncPoint(sync_point);
- }
-}
-
-void Maxwell3D::DrawArrays() {
- LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(),
- regs.vertex_buffer.count);
- ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?");
-
- // Both instance configuration registers can not be set at the same time.
- ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont,
- "Illegal combination of instancing parameters");
-
- ProcessTopologyOverride();
-
- if (regs.draw.instance_next) {
- // Increment the current instance *before* drawing.
- state.current_instance += 1;
- } else if (!regs.draw.instance_cont) {
- // Reset the current instance to 0.
- state.current_instance = 0;
- }
-
- const bool is_indexed{regs.index_array.count && !regs.vertex_buffer.count};
- if (ShouldExecute()) {
- rasterizer->Draw(is_indexed, false);
- }
-
- // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if
- // the game is trying to draw indexed or direct mode. This needs to be verified on HW still -
- // it's possible that it is incorrect and that there is some other register used to specify the
- // drawing mode.
- if (is_indexed) {
- regs.index_array.count = 0;
- } else {
- regs.vertex_buffer.count = 0;
+ const u32 cache_flush = regs.sync_info.clean_l2.Value();
+ if (cache_flush != 0) {
+ rasterizer->InvalidateGPUCache();
}
+ rasterizer->SignalSyncPoint(sync_point);
}
std::optional<u64> Maxwell3D::GetQueryResult() {
- switch (regs.query.query_get.select) {
- case Regs::QuerySelect::Payload:
- return regs.query.query_sequence;
- case Regs::QuerySelect::SamplesPassed:
+ switch (regs.report_semaphore.query.report) {
+ case Regs::ReportSemaphore::Report::Payload:
+ return regs.report_semaphore.payload;
+ case Regs::ReportSemaphore::Report::ZPassPixelCount64:
// Deferred.
- rasterizer->Query(regs.query.QueryAddress(), QueryType::SamplesPassed,
+ rasterizer->Query(regs.report_semaphore.Address(), QueryType::SamplesPassed,
system.GPU().GetTicks());
return std::nullopt;
default:
- LOG_DEBUG(HW_GPU, "Unimplemented query select type {}",
- regs.query.query_get.select.Value());
+ LOG_DEBUG(HW_GPU, "Unimplemented query report type {}",
+ regs.report_semaphore.query.report.Value());
return 1;
}
}
void Maxwell3D::ProcessCBBind(size_t stage_index) {
// Bind the buffer currently in CB_ADDRESS to the specified index in the desired shader stage.
- const auto& bind_data = regs.cb_bind[stage_index];
- auto& buffer = state.shader_stages[stage_index].const_buffers[bind_data.index];
+ const auto& bind_data = regs.bind_groups[stage_index];
+ auto& buffer = state.shader_stages[stage_index].const_buffers[bind_data.shader_slot];
buffer.enabled = bind_data.valid.Value() != 0;
- buffer.address = regs.const_buffer.BufferAddress();
- buffer.size = regs.const_buffer.cb_size;
+ buffer.address = regs.const_buffer.Address();
+ buffer.size = regs.const_buffer.size;
const bool is_enabled = bind_data.valid.Value() != 0;
if (!is_enabled) {
- rasterizer->DisableGraphicsUniformBuffer(stage_index, bind_data.index);
+ rasterizer->DisableGraphicsUniformBuffer(stage_index, bind_data.shader_slot);
return;
}
- const GPUVAddr gpu_addr = regs.const_buffer.BufferAddress();
- const u32 size = regs.const_buffer.cb_size;
- rasterizer->BindGraphicsUniformBuffer(stage_index, bind_data.index, gpu_addr, size);
+ const GPUVAddr gpu_addr = regs.const_buffer.Address();
+ const u32 size = regs.const_buffer.size;
+ rasterizer->BindGraphicsUniformBuffer(stage_index, bind_data.shader_slot, gpu_addr, size);
}
void Maxwell3D::ProcessCBMultiData(const u32* start_base, u32 amount) {
// Write the input value to the current const buffer at the current position.
- const GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
+ const GPUVAddr buffer_address = regs.const_buffer.Address();
ASSERT(buffer_address != 0);
// Don't allow writing past the end of the buffer.
- ASSERT(regs.const_buffer.cb_pos <= regs.const_buffer.cb_size);
+ ASSERT(regs.const_buffer.offset <= regs.const_buffer.size);
- const GPUVAddr address{buffer_address + regs.const_buffer.cb_pos};
+ const GPUVAddr address{buffer_address + regs.const_buffer.offset};
const size_t copy_size = amount * sizeof(u32);
memory_manager.WriteBlock(address, start_base, copy_size);
// Increment the current buffer position.
- regs.const_buffer.cb_pos += static_cast<u32>(copy_size);
+ regs.const_buffer.offset += static_cast<u32>(copy_size);
}
void Maxwell3D::ProcessCBData(u32 value) {
@@ -648,7 +568,8 @@ void Maxwell3D::ProcessCBData(u32 value) {
}
Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
- const GPUVAddr tic_address_gpu{regs.tic.Address() + tic_index * sizeof(Texture::TICEntry)};
+ const GPUVAddr tic_address_gpu{regs.tex_header.Address() +
+ tic_index * sizeof(Texture::TICEntry)};
Texture::TICEntry tic_entry;
memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));
@@ -657,7 +578,8 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
}
Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const {
- const GPUVAddr tsc_address_gpu{regs.tsc.Address() + tsc_index * sizeof(Texture::TSCEntry)};
+ const GPUVAddr tsc_address_gpu{regs.tex_sampler.Address() +
+ tsc_index * sizeof(Texture::TSCEntry)};
Texture::TSCEntry tsc_entry;
memory_manager.ReadBlockUnsafe(tsc_address_gpu, &tsc_entry, sizeof(Texture::TSCEntry));
@@ -673,4 +595,90 @@ void Maxwell3D::ProcessClearBuffers() {
rasterizer->Clear();
}
+void Maxwell3D::ProcessDraw(u32 instance_count) {
+ LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(),
+ regs.vertex_buffer.count);
+
+ ASSERT_MSG(!(regs.index_buffer.count && regs.vertex_buffer.count), "Both indexed and direct?");
+
+ // Both instance configuration registers can not be set at the same time.
+ ASSERT_MSG(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::First ||
+ regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged,
+ "Illegal combination of instancing parameters");
+
+ ProcessTopologyOverride();
+
+ const bool is_indexed = regs.index_buffer.count && !regs.vertex_buffer.count;
+ if (ShouldExecute()) {
+ rasterizer->Draw(is_indexed, instance_count);
+ }
+
+ if (is_indexed) {
+ regs.index_buffer.count = 0;
+ } else {
+ regs.vertex_buffer.count = 0;
+ }
+}
+
+void Maxwell3D::ProcessDeferredDraw() {
+ if (deferred_draw_method.empty()) {
+ return;
+ }
+
+ enum class DrawMode {
+ Undefined,
+ General,
+ Instance,
+ };
+ DrawMode draw_mode{DrawMode::Undefined};
+ u32 instance_count = 1;
+
+ u32 index = 0;
+ u32 method = 0;
+ u32 method_count = static_cast<u32>(deferred_draw_method.size());
+ for (; index < method_count &&
+ (method = deferred_draw_method[index]) != MAXWELL3D_REG_INDEX(draw.begin);
+ ++index)
+ ;
+
+ if (MAXWELL3D_REG_INDEX(draw.begin) != method) {
+ return;
+ }
+
+ // The minimum number of methods for drawing must be greater than or equal to
+ // 3[draw.begin->vertex(index)count(first)->draw.end] to avoid errors in index mode drawing
+ if ((method_count - index) < 3) {
+ return;
+ }
+ draw_mode = (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) ||
+ (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged)
+ ? DrawMode::Instance
+ : DrawMode::General;
+
+ // Drawing will only begin with draw.begin or index_buffer method, other methods directly
+ // clear
+ if (draw_mode == DrawMode::Undefined) {
+ deferred_draw_method.clear();
+ return;
+ }
+
+ if (draw_mode == DrawMode::Instance) {
+ ASSERT_MSG(deferred_draw_method.size() % 4 == 0, "Instance mode method size error");
+ instance_count = static_cast<u32>(method_count - index) / 4;
+ } else {
+ method = deferred_draw_method[index + 1];
+ if (MAXWELL3D_REG_INDEX(draw_inline_index) == method ||
+ MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method ||
+ MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) {
+ regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4);
+ regs.index_buffer.format = Regs::IndexFormat::UnsignedInt;
+ }
+ }
+
+ ProcessDraw(instance_count);
+
+ deferred_draw_method.clear();
+ inline_index_draw_indexes.clear();
+}
+
} // namespace Tegra::Engines
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 5f9eb208c..a948fcb14 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -39,12 +39,15 @@ namespace Tegra::Engines {
/**
* This Engine is known as GF100_3D. Documentation can be found in:
+ * https://github.com/NVIDIA/open-gpu-doc/blob/master/classes/3d/clb197.h
* https://github.com/envytools/envytools/blob/master/rnndb/graph/gf100_3d.xml
* https://cgit.freedesktop.org/mesa/mesa/tree/src/gallium/drivers/nouveau/nvc0/nvc0_3d.xml.h
+ *
+ * Note: nVidia have confirmed that their open docs have had parts redacted, so this list is
+ * currently incomplete, and the gaps are still worth exploring.
*/
-#define MAXWELL3D_REG_INDEX(field_name) \
- (offsetof(Tegra::Engines::Maxwell3D::Regs, field_name) / sizeof(u32))
+#define MAXWELL3D_REG_INDEX(field_name) (offsetof(Maxwell3D::Regs, field_name) / sizeof(u32))
class Maxwell3D final : public EngineInterface {
public:
@@ -55,7 +58,6 @@ public:
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
/// Register structure of the Maxwell3D engine.
- /// TODO(Subv): This structure will need to be made bigger as more registers are discovered.
struct Regs {
static constexpr std::size_t NUM_REGS = 0xE00;
@@ -74,90 +76,515 @@ public:
static constexpr std::size_t MaxConstBuffers = 18;
static constexpr std::size_t MaxConstBufferSize = 0x10000;
- enum class QueryOperation : u32 {
- Release = 0,
- Acquire = 1,
- Counter = 2,
- Trap = 3,
+ struct ID {
+ union {
+ BitField<0, 16, u32> cls;
+ BitField<16, 5, u32> engine;
+ };
};
- enum class QueryUnit : u32 {
- VFetch = 1,
- VP = 2,
- Rast = 4,
- StrmOut = 5,
- GP = 6,
- ZCull = 7,
- Prop = 10,
- Crop = 15,
+ struct LoadMME {
+ u32 instruction_ptr;
+ u32 instruction;
+ u32 start_address_ptr;
+ u32 start_address;
};
- enum class QuerySelect : u32 {
- Payload = 0,
- TimeElapsed = 2,
- TransformFeedbackPrimitivesGenerated = 11,
- PrimitivesGenerated = 18,
- SamplesPassed = 21,
- TransformFeedbackUnknown = 26,
+ struct Notify {
+ u32 address_high;
+ u32 address_low;
+ u32 type;
+
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
};
- struct QueryCompare {
- u32 initial_sequence;
- u32 initial_mode;
- u32 unknown1;
- u32 unknown2;
- u32 current_sequence;
- u32 current_mode;
+ struct PeerSemaphore {
+ u32 address_high;
+ u32 address_low;
+
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
};
- enum class QuerySyncCondition : u32 {
- NotEqual = 0,
- GreaterThan = 1,
+ struct GlobalRender {
+ enum class Mode : u32 {
+ False = 0,
+ True = 1,
+ Conditional = 2,
+ IfEqual = 3,
+ IfNotEqual = 4,
+ };
+ u32 offset_high;
+ u32 offset_low;
+ Mode mode;
+
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(offset_high) << 32) |
+ offset_low);
+ }
};
- enum class ConditionMode : u32 {
- Never = 0,
- Always = 1,
- ResNonZero = 2,
- Equal = 3,
- NotEqual = 4,
+ enum class ReductionOp : u32 {
+ RedAdd = 0,
+ RedMin = 1,
+ RedMax = 2,
+ RedInc = 3,
+ RedDec = 4,
+ RedAnd = 5,
+ RedOr = 6,
+ RedXor = 7,
};
- enum class ShaderProgram : u32 {
- VertexA = 0,
- VertexB = 1,
- TesselationControl = 2,
- TesselationEval = 3,
- Geometry = 4,
- Fragment = 5,
+ struct LaunchDMA {
+ enum class Layout : u32 {
+ Blocklinear = 0,
+ Pitch = 1,
+ };
+
+ enum class CompletionType : u32 {
+ FlushDisable = 0,
+ FlushOnly = 1,
+ Release = 2,
+ };
+
+ union {
+ BitField<0, 1, Layout> memory_layout;
+ BitField<4, 2, CompletionType> completion_type;
+ BitField<8, 2, u32> interrupt_type;
+ BitField<12, 1, u32> sem_size;
+ BitField<1, 1, u32> reduction_enable;
+ BitField<13, 3, ReductionOp> reduction_op;
+ BitField<2, 2, u32> reduction_format;
+ BitField<6, 1, u32> sysmembar_disable;
+ };
+ };
+
+ struct I2M {
+ u32 address_high;
+ u32 address_low;
+ u32 payload;
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ u32 nop0;
+ u32 nop1;
+ u32 nop2;
+ u32 nop3;
+ };
+
+ struct OpportunisticEarlyZ {
+ BitField<0, 5, u32> threshold;
+
+ u32 Threshold() const {
+ switch (threshold) {
+ case 0x0:
+ return 0;
+ case 0x1F:
+ return 0x1F;
+ default:
+ // Thresholds begin at 0x10 (1 << 4)
+ // Threshold is in the range 0x1 to 0x13
+ return 1 << (4 + threshold.Value() - 1);
+ }
+ }
+ };
+
+ struct GeometryShaderDmFifo {
+ union {
+ BitField<0, 13, u32> raster_on;
+ BitField<16, 13, u32> raster_off;
+ BitField<31, 1, u32> spill_enabled;
+ };
+ };
+
+ struct L2CacheControl {
+ enum class EvictPolicy : u32 {
+ First = 0,
+ Normal = 1,
+ Last = 2,
+ };
+
+ union {
+ BitField<4, 2, EvictPolicy> policy;
+ };
+ };
+
+ struct InvalidateShaderCache {
+ union {
+ BitField<0, 1, u32> instruction;
+ BitField<4, 1, u32> data;
+ BitField<12, 1, u32> constant;
+ BitField<1, 1, u32> locks;
+ BitField<2, 1, u32> flush_data;
+ };
+ };
+
+ struct SyncInfo {
+ enum class Condition : u32 {
+ StreamOutWritesDone = 0,
+ RopWritesDone = 1,
+ };
+
+ union {
+ BitField<0, 16, u32> sync_point;
+ BitField<16, 1, u32> clean_l2;
+ BitField<20, 1, Condition> condition;
+ };
+ };
+
+ struct SurfaceClipBlockId {
+ union {
+ BitField<0, 4, u32> block_width;
+ BitField<4, 4, u32> block_height;
+ BitField<8, 4, u32> block_depth;
+ };
+ };
+
+ struct DecompressSurface {
+ union {
+ BitField<0, 3, u32> mrt_select;
+ BitField<4, 16, u32> rt_array_index;
+ };
+ };
+
+ struct ZCullRopBypass {
+ union {
+ BitField<0, 1, u32> enable;
+ BitField<4, 1, u32> no_stall;
+ BitField<8, 1, u32> cull_everything;
+ BitField<12, 4, u32> threshold;
+ };
+ };
+
+ struct ZCullSubregion {
+ union {
+ BitField<0, 1, u32> enable;
+ BitField<4, 24, u32> normalized_aliquots;
+ };
+ };
+
+ struct RasterBoundingBox {
+ enum class Mode : u32 {
+ BoundingBox = 0,
+ FullViewport = 1,
+ };
+
+ union {
+ BitField<0, 1, Mode> mode;
+ BitField<4, 8, u32> pad;
+ };
+ };
+
+ struct IteratedBlendOptimization {
+ enum class Noop : u32 {
+ Never = 0,
+ SourceRGBA0000 = 1,
+ SourceAlpha = 2,
+ SourceRGBA0001 = 3,
+ };
+
+ union {
+ BitField<0, 1, Noop> noop;
+ };
+ };
+
+ struct ZCullSubregionAllocation {
+ enum class Format : u32 {
+ Z_16x16x2_4x4 = 0,
+ ZS_16x16_4x4 = 1,
+ Z_16x16_4x2 = 2,
+ Z_16x16_2x4 = 3,
+ Z_16x8_4x4 = 4,
+ Z_8x8_4x2 = 5,
+ Z_8x8_2x4 = 6,
+ Z_16x16_4x8 = 7,
+ Z_4x8_2x2 = 8,
+ ZS_16x8_4x2 = 9,
+ ZS_16x8_2x4 = 10,
+ ZS_8x8_2x2 = 11,
+ Z_4x8_1x1 = 12,
+ None = 15,
+ };
+
+ union {
+ BitField<0, 8, u32> id;
+ BitField<8, 16, u32> aliquots;
+ BitField<24, 4, Format> format;
+ };
+ };
+
+ enum class ZCullSubregionAlgorithm : u32 {
+ Static = 0,
+ Adaptive = 1,
+ };
+
+ struct PixelShaderOutputSampleMaskUsage {
+ union {
+ BitField<0, 1, u32> enable;
+ BitField<1, 1, u32> qualify_by_aa;
+ };
+ };
+
+ struct L1Configuration {
+ enum class AddressableMemory : u32 {
+ Size16Kb = 0,
+ Size48Kb = 3,
+ };
+ union {
+ BitField<0, 3, AddressableMemory> direct_addressable_memory;
+ };
+ };
+
+ struct SPAVersion {
+ union {
+ BitField<0, 8, u32> minor;
+ BitField<8, 8, u32> major;
+ };
+ };
+
+ struct SnapGrid {
+ enum class Location : u32 {
+ Pixel2x2 = 1,
+ Pixel4x4 = 2,
+ Pixel8x8 = 3,
+ Pixel16x16 = 4,
+ Pixel32x32 = 5,
+ Pixel64x64 = 6,
+ Pixel128x128 = 7,
+ Pixel256x256 = 8,
+ };
+
+ enum class Mode : u32 {
+ RTNE = 0,
+ Tesla = 1,
+ };
+
+ struct {
+ union {
+ BitField<0, 4, Location> location;
+ BitField<8, 1, Mode> rounding_mode;
+ };
+ } line;
+
+ struct {
+ union {
+ BitField<0, 4, Location> location;
+ BitField<8, 1, Mode> rounding_mode;
+ };
+ } non_line;
+ };
+
+ struct Tessellation {
+ enum class DomainType : u32 {
+ Isolines = 0,
+ Triangles = 1,
+ Quads = 2,
+ };
+
+ enum class Spacing : u32 {
+ Integer = 0,
+ FractionalOdd = 1,
+ FractionalEven = 2,
+ };
+
+ enum class OutputPrimitives : u32 {
+ Points = 0,
+ Lines = 1,
+ Triangles_CW = 2,
+ Triangles_CCW = 3,
+ };
+
+ struct Parameters {
+ union {
+ BitField<0, 2, DomainType> domain_type;
+ BitField<4, 2, Spacing> spacing;
+ BitField<8, 2, OutputPrimitives> output_primitives;
+ };
+ } params;
+
+ struct LOD {
+ std::array<f32, 4> outer;
+ std::array<f32, 2> inner;
+ } lod;
+
+ std::array<u32, 9> reserved;
+ };
+
+ struct SubTilingPerf {
+ struct {
+ union {
+ BitField<0, 8, u32> spm_triangle_register_file_per;
+ BitField<8, 8, u32> spm_pixel_output_buffer_per;
+ BitField<16, 8, u32> spm_triangle_ram_per;
+ BitField<24, 8, u32> max_quads_per;
+ };
+ } knob_a;
+
+ struct {
+ union {
+ BitField<0, 8, u32> max_primitives_per;
+ };
+ } knob_b;
+
+ u32 knob_c;
+ };
+
+ struct ZCullSubregionReport {
+ enum class ReportType : u32 {
+ DepthTest = 0,
+ DepthTestNoAccept = 1,
+ DepthTestLateZ = 2,
+ StencilTest = 3,
+ };
+
+ union {
+ BitField<0, 1, u32> enabled;
+ BitField<4, 8, u32> subregion_id;
+ } to_report;
+
+ union {
+ BitField<0, 1, u32> enabled;
+ BitField<4, 3, ReportType> type;
+ } report_type;
+ };
+
+ struct BalancedPrimitiveWorkload {
+ union {
+ BitField<0, 1, u32> unpartitioned_mode;
+ BitField<4, 1, u32> timesliced_mode;
+ };
+ };
+
+ struct TransformFeedback {
+ struct Buffer {
+ u32 enable;
+ u32 address_high;
+ u32 address_low;
+ s32 size;
+ s32 start_offset;
+ INSERT_PADDING_BYTES_NOINIT(0xC);
+
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
+ static_assert(sizeof(Buffer) == 0x20);
+
+ struct Control {
+ u32 stream;
+ u32 varying_count;
+ u32 stride;
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ };
+ static_assert(sizeof(Control) == 0x10);
+
+ std::array<TransformFeedback::Buffer, NumTransformFeedbackBuffers> buffers;
+
+ INSERT_PADDING_BYTES_NOINIT(0x300);
+
+ std::array<TransformFeedback::Control, NumTransformFeedbackBuffers> controls;
+ };
+
+ struct HybridAntiAliasControl {
+ enum class Centroid : u32 {
+ PerFragment = 0,
+ PerPass = 1,
+ };
+ union {
+ BitField<0, 4, u32> passes;
+ BitField<4, 1, Centroid> centroid;
+ BitField<5, 1, u32> passes_extended;
+ };
+ };
+
+ struct ShaderLocalMemory {
+ u32 base_address;
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ u32 address_high;
+ u32 address_low;
+ u32 size_high;
+ u32 size_low;
+ u32 default_size_per_warp;
+
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+
+ u64 Size() const {
+ return (static_cast<u64>(size_high) << 32) | size_low;
+ }
+ };
+
+ struct ZCullRegion {
+ u32 width;
+ u32 height;
+ u32 depth;
+ u32 offset;
+ INSERT_PADDING_BYTES_NOINIT(0xC);
+ u32 fetch_streams_once;
+ union {
+ BitField<0, 16, u32> start_aliquot;
+ BitField<16, 16, u32> aliquot_count;
+ } location;
+ u32 aliquots_per_layer;
+ u32 storage_address_high;
+ u32 storage_address_low;
+ u32 storage_limit_address_high;
+ u32 storage_limit_address_low;
+
+ GPUVAddr StorageAddress() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(storage_address_high) << 32) |
+ storage_address_low);
+ }
+ GPUVAddr StorageLimitAddress() const {
+ return static_cast<GPUVAddr>(
+ (static_cast<GPUVAddr>(storage_limit_address_high) << 32) |
+ storage_limit_address_low);
+ }
+ };
+
+ struct ZetaReadOnly {
+ union {
+ BitField<0, 1, u32> enable_z;
+ BitField<4, 1, u32> enable_stencil;
+ };
};
struct VertexAttribute {
enum class Size : u32 {
Invalid = 0x0,
- Size_32_32_32_32 = 0x01,
- Size_32_32_32 = 0x02,
- Size_16_16_16_16 = 0x03,
- Size_32_32 = 0x04,
- Size_16_16_16 = 0x05,
- Size_8_8_8_8 = 0x0a,
- Size_16_16 = 0x0f,
- Size_32 = 0x12,
- Size_8_8_8 = 0x13,
- Size_8_8 = 0x18,
- Size_16 = 0x1b,
- Size_8 = 0x1d,
- Size_10_10_10_2 = 0x30,
- Size_11_11_10 = 0x31,
+ Size_R32_G32_B32_A32 = 0x01,
+ Size_R32_G32_B32 = 0x02,
+ Size_R16_G16_B16_A16 = 0x03,
+ Size_R32_G32 = 0x04,
+ Size_R16_G16_B16 = 0x05,
+ Size_R8_G8_B8_A8 = 0x0A,
+ Size_R16_G16 = 0x0F,
+ Size_R32 = 0x12,
+ Size_R8_G8_B8 = 0x13,
+ Size_R8_G8 = 0x18,
+ Size_R16 = 0x1B,
+ Size_R8 = 0x1D,
+ Size_A2_B10_G10_R10 = 0x30,
+ Size_B10_G11_R11 = 0x31,
+ Size_G8_R8 = 0x32,
+ Size_X8_B8_G8_R8 = 0x33,
+ Size_A8 = 0x34,
};
enum class Type : u32 {
- SignedNorm = 1,
- UnsignedNorm = 2,
- SignedInt = 3,
- UnsignedInt = 4,
- UnsignedScaled = 5,
- SignedScaled = 6,
+ UnusedEnumDoNotUseBecauseItWillGoAway = 0,
+ SNorm = 1,
+ UNorm = 2,
+ SInt = 3,
+ UInt = 4,
+ UScaled = 5,
+ SScaled = 6,
Float = 7,
};
@@ -173,33 +600,36 @@ public:
u32 ComponentCount() const {
switch (size) {
- case Size::Size_32_32_32_32:
+ case Size::Size_R32_G32_B32_A32:
return 4;
- case Size::Size_32_32_32:
+ case Size::Size_R32_G32_B32:
return 3;
- case Size::Size_16_16_16_16:
+ case Size::Size_R16_G16_B16_A16:
return 4;
- case Size::Size_32_32:
+ case Size::Size_R32_G32:
return 2;
- case Size::Size_16_16_16:
+ case Size::Size_R16_G16_B16:
return 3;
- case Size::Size_8_8_8_8:
+ case Size::Size_R8_G8_B8_A8:
+ case Size::Size_X8_B8_G8_R8:
return 4;
- case Size::Size_16_16:
+ case Size::Size_R16_G16:
return 2;
- case Size::Size_32:
+ case Size::Size_R32:
return 1;
- case Size::Size_8_8_8:
+ case Size::Size_R8_G8_B8:
return 3;
- case Size::Size_8_8:
+ case Size::Size_R8_G8:
+ case Size::Size_G8_R8:
return 2;
- case Size::Size_16:
+ case Size::Size_R16:
return 1;
- case Size::Size_8:
+ case Size::Size_R8:
+ case Size::Size_A8:
return 1;
- case Size::Size_10_10_10_2:
+ case Size::Size_A2_B10_G10_R10:
return 4;
- case Size::Size_11_11_10:
+ case Size::Size_B10_G11_R11:
return 3;
default:
ASSERT(false);
@@ -209,33 +639,36 @@ public:
u32 SizeInBytes() const {
switch (size) {
- case Size::Size_32_32_32_32:
+ case Size::Size_R32_G32_B32_A32:
return 16;
- case Size::Size_32_32_32:
+ case Size::Size_R32_G32_B32:
return 12;
- case Size::Size_16_16_16_16:
+ case Size::Size_R16_G16_B16_A16:
return 8;
- case Size::Size_32_32:
+ case Size::Size_R32_G32:
return 8;
- case Size::Size_16_16_16:
+ case Size::Size_R16_G16_B16:
return 6;
- case Size::Size_8_8_8_8:
+ case Size::Size_R8_G8_B8_A8:
+ case Size::Size_X8_B8_G8_R8:
return 4;
- case Size::Size_16_16:
+ case Size::Size_R16_G16:
return 4;
- case Size::Size_32:
+ case Size::Size_R32:
return 4;
- case Size::Size_8_8_8:
+ case Size::Size_R8_G8_B8:
return 3;
- case Size::Size_8_8:
+ case Size::Size_R8_G8:
+ case Size::Size_G8_R8:
return 2;
- case Size::Size_16:
+ case Size::Size_R16:
return 2;
- case Size::Size_8:
+ case Size::Size_R8:
+ case Size::Size_A8:
return 1;
- case Size::Size_10_10_10_2:
+ case Size::Size_A2_B10_G10_R10:
return 4;
- case Size::Size_11_11_10:
+ case Size::Size_B10_G11_R11:
return 4;
default:
ASSERT(false);
@@ -245,34 +678,36 @@ public:
std::string SizeString() const {
switch (size) {
- case Size::Size_32_32_32_32:
+ case Size::Size_R32_G32_B32_A32:
return "32_32_32_32";
- case Size::Size_32_32_32:
+ case Size::Size_R32_G32_B32:
return "32_32_32";
- case Size::Size_16_16_16_16:
+ case Size::Size_R16_G16_B16_A16:
return "16_16_16_16";
- case Size::Size_32_32:
+ case Size::Size_R32_G32:
return "32_32";
- case Size::Size_16_16_16:
+ case Size::Size_R16_G16_B16:
return "16_16_16";
- case Size::Size_8_8_8_8:
+ case Size::Size_R8_G8_B8_A8:
return "8_8_8_8";
- case Size::Size_16_16:
+ case Size::Size_R16_G16:
return "16_16";
- case Size::Size_32:
+ case Size::Size_R32:
return "32";
- case Size::Size_8_8_8:
+ case Size::Size_R8_G8_B8:
return "8_8_8";
- case Size::Size_8_8:
+ case Size::Size_R8_G8:
+ case Size::Size_G8_R8:
return "8_8";
- case Size::Size_16:
+ case Size::Size_R16:
return "16";
- case Size::Size_8:
+ case Size::Size_R8:
+ case Size::Size_A8:
return "8";
- case Size::Size_10_10_10_2:
- return "10_10_10_2";
- case Size::Size_11_11_10:
- return "11_11_10";
+ case Size::Size_A2_B10_G10_R10:
+ return "2_10_10_10";
+ case Size::Size_B10_G11_R11:
+ return "10_11_12";
default:
ASSERT(false);
return {};
@@ -281,17 +716,19 @@ public:
std::string TypeString() const {
switch (type) {
- case Type::SignedNorm:
+ case Type::UnusedEnumDoNotUseBecauseItWillGoAway:
+ return "Unused";
+ case Type::SNorm:
return "SNORM";
- case Type::UnsignedNorm:
+ case Type::UNorm:
return "UNORM";
- case Type::SignedInt:
+ case Type::SInt:
return "SINT";
- case Type::UnsignedInt:
+ case Type::UInt:
return "UINT";
- case Type::UnsignedScaled:
+ case Type::UScaled:
return "USCALED";
- case Type::SignedScaled:
+ case Type::SScaled:
return "SSCALED";
case Type::Float:
return "FLOAT";
@@ -301,7 +738,7 @@ public:
}
bool IsNormalized() const {
- return (type == Type::SignedNorm) || (type == Type::UnsignedNorm);
+ return (type == Type::SNorm) || (type == Type::UNorm);
}
bool IsValid() const {
@@ -312,6 +749,7 @@ public:
return hex < other.hex;
}
};
+ static_assert(sizeof(VertexAttribute) == 0x4);
struct MsaaSampleLocation {
union {
@@ -342,9 +780,96 @@ public:
}
};
- enum class DepthMode : u32 {
- MinusOneToOne = 0,
- ZeroToOne = 1,
+ struct MultisampleCoverageToColor {
+ union {
+ BitField<0, 1, u32> enable;
+ BitField<4, 3, u32> target;
+ };
+ };
+
+ struct DecompressZetaSurface {
+ union {
+ BitField<0, 1, u32> z_enable;
+ BitField<4, 1, u32> stencil_enable;
+ };
+ };
+
+ struct ZetaSparse {
+ enum class UnmappedCompare : u32 {
+ Unmapped = 0,
+ FailAlways = 1,
+ };
+ union {
+ BitField<0, 1, u32> enable;
+ BitField<1, 1, UnmappedCompare> unmapped_compare;
+ };
+ };
+
+ struct RtControl {
+ union {
+ BitField<0, 4, u32> count;
+ BitField<4, 3, u32> target0;
+ BitField<7, 3, u32> target1;
+ BitField<10, 3, u32> target2;
+ BitField<13, 3, u32> target3;
+ BitField<16, 3, u32> target4;
+ BitField<19, 3, u32> target5;
+ BitField<22, 3, u32> target6;
+ BitField<25, 3, u32> target7;
+ };
+
+ u32 Map(std::size_t index) const {
+ const std::array<u32, NumRenderTargets> maps{target0, target1, target2, target3,
+ target4, target5, target6, target7};
+ ASSERT(index < maps.size());
+ return maps[index];
+ }
+ };
+
+ struct CompressionThresholdSamples {
+ u32 samples;
+
+ u32 Samples() {
+ if (samples == 0) {
+ return 0;
+ }
+ return 1 << (samples - 1);
+ }
+ };
+
+ struct PixelShaderInterlockControl {
+ enum class TileMode : u32 {
+ NoConflictDetect = 0,
+ DetectSampleConflict = 1,
+ DetectPixelConflict = 2,
+ };
+ enum class TileSize : u32 {
+ Size_16x16 = 0,
+ Size_8x8 = 1,
+ };
+ enum class FragmentOrder : u32 {
+ FragmentOrdered = 0,
+ FragmentUnordered = 1,
+ };
+ union {
+ BitField<0, 2, TileMode> tile_mode;
+ BitField<2, 1, TileSize> tile_size;
+ BitField<3, 1, FragmentOrder> fragment_order;
+ };
+ };
+
+ struct ZetaSize {
+ enum class DimensionControl : u32 {
+ DepthDefinesArray = 0,
+ ArraySizeOne = 1,
+ };
+
+ u32 width;
+ u32 height;
+ union {
+ BitField<0, 16, u32> depth;
+ BitField<16, 1, DimensionControl> dim_control;
+ };
};
enum class PrimitiveTopology : u32 {
@@ -358,15 +883,21 @@ public:
Quads = 0x7,
QuadStrip = 0x8,
Polygon = 0x9,
- LinesAdjacency = 0xa,
- LineStripAdjacency = 0xb,
- TrianglesAdjacency = 0xc,
- TriangleStripAdjacency = 0xd,
- Patches = 0xe,
+ LinesAdjacency = 0xA,
+ LineStripAdjacency = 0xB,
+ TrianglesAdjacency = 0xC,
+ TriangleStripAdjacency = 0xD,
+ Patches = 0xE,
+ };
+
+ struct VertexArray {
+ union {
+ BitField<0, 16, u32> start;
+ BitField<16, 12, u32> count;
+ BitField<28, 3, PrimitiveTopology> topology;
+ };
};
- // Constants as from NVC0_3D_UNK1970_D3D
- // https://gitlab.freedesktop.org/mesa/mesa/-/blob/main/src/gallium/drivers/nouveau/nvc0/nvc0_3d.xml.h#L1598
enum class PrimitiveTopologyOverride : u32 {
None = 0x0,
Points = 0x1,
@@ -374,11 +905,32 @@ public:
LineStrip = 0x3,
Triangles = 0x4,
TriangleStrip = 0x5,
- LinesAdjacency = 0xa,
- LineStripAdjacency = 0xb,
- TrianglesAdjacency = 0xc,
- TriangleStripAdjacency = 0xd,
- Patches = 0xe,
+ LinesAdjacency = 0xA,
+ LineStripAdjacency = 0xB,
+ TrianglesAdjacency = 0xC,
+ TriangleStripAdjacency = 0xD,
+ Patches = 0xE,
+
+ LegacyPoints = 0x1001,
+ LegacyIndexedLines = 0x1002,
+ LegacyIndexedTriangles = 0x1003,
+ LegacyLines = 0x100F,
+ LegacyLineStrip = 0x1010,
+ LegacyIndexedLineStrip = 0x1011,
+ LegacyTriangles = 0x1012,
+ LegacyTriangleStrip = 0x1013,
+ LegacyIndexedTriangleStrip = 0x1014,
+ LegacyTriangleFan = 0x1015,
+ LegacyIndexedTriangleFan = 0x1016,
+ LegacyTriangleFanImm = 0x1017,
+ LegacyLinesImm = 0x1018,
+ LegacyIndexedTriangles2 = 0x101A,
+ LegacyIndexedLines2 = 0x101B,
+ };
+
+ enum class DepthMode : u32 {
+ MinusOneToOne = 0,
+ ZeroToOne = 1,
};
enum class IndexFormat : u32 {
@@ -388,183 +940,143 @@ public:
};
enum class ComparisonOp : u32 {
- // These values are used by Nouveau and most games, they correspond to the OpenGL token
- // values for these operations.
- Never = 0x200,
- Less = 0x201,
- Equal = 0x202,
- LessEqual = 0x203,
- Greater = 0x204,
- NotEqual = 0x205,
- GreaterEqual = 0x206,
- Always = 0x207,
-
- // These values are used by some games, they seem to be NV04 values.
- NeverOld = 1,
- LessOld = 2,
- EqualOld = 3,
- LessEqualOld = 4,
- GreaterOld = 5,
- NotEqualOld = 6,
- GreaterEqualOld = 7,
- AlwaysOld = 8,
- };
-
- enum class LogicOperation : u32 {
- Clear = 0x1500,
- And = 0x1501,
- AndReverse = 0x1502,
- Copy = 0x1503,
- AndInverted = 0x1504,
- NoOp = 0x1505,
- Xor = 0x1506,
- Or = 0x1507,
- Nor = 0x1508,
- Equiv = 0x1509,
- Invert = 0x150A,
- OrReverse = 0x150B,
- CopyInverted = 0x150C,
- OrInverted = 0x150D,
- Nand = 0x150E,
- Set = 0x150F,
- };
-
- enum class StencilOp : u32 {
- Keep = 1,
- Zero = 2,
- Replace = 3,
- Incr = 4,
- Decr = 5,
- Invert = 6,
- IncrWrap = 7,
- DecrWrap = 8,
- KeepOGL = 0x1E00,
- ZeroOGL = 0,
- ReplaceOGL = 0x1E01,
- IncrOGL = 0x1E02,
- DecrOGL = 0x1E03,
- InvertOGL = 0x150A,
- IncrWrapOGL = 0x8507,
- DecrWrapOGL = 0x8508,
- };
-
- enum class CounterReset : u32 {
- SampleCnt = 0x01,
- Unk02 = 0x02,
- Unk03 = 0x03,
- Unk04 = 0x04,
- EmittedPrimitives = 0x10, // Not tested
- Unk11 = 0x11,
- Unk12 = 0x12,
- Unk13 = 0x13,
- Unk15 = 0x15,
- Unk16 = 0x16,
- Unk17 = 0x17,
- Unk18 = 0x18,
- Unk1A = 0x1A,
- Unk1B = 0x1B,
- Unk1C = 0x1C,
- Unk1D = 0x1D,
- Unk1E = 0x1E,
- GeneratedPrimitives = 0x1F,
+ Never_D3D = 1,
+ Less_D3D = 2,
+ Equal_D3D = 3,
+ LessEqual_D3D = 4,
+ Greater_D3D = 5,
+ NotEqual_D3D = 6,
+ GreaterEqual_D3D = 7,
+ Always_D3D = 8,
+
+ Never_GL = 0x200,
+ Less_GL = 0x201,
+ Equal_GL = 0x202,
+ LessEqual_GL = 0x203,
+ Greater_GL = 0x204,
+ NotEqual_GL = 0x205,
+ GreaterEqual_GL = 0x206,
+ Always_GL = 0x207,
+ };
+
+ enum class ClearReport : u32 {
+ ZPassPixelCount = 0x01,
+ ZCullStats = 0x02,
+ StreamingPrimitvesNeededMinusSucceeded = 0x03,
+ AlphaBetaClocks = 0x04,
+ StreamingPrimitivesSucceeded = 0x10,
+ StreamingPrimitivesNeeded = 0x11,
+ VerticesGenerated = 0x12,
+ PrimitivesGenerated = 0x13,
+ VertexShaderInvocations = 0x15,
+ TessellationInitInvocations = 0x16,
+ TessellationShaderInvocations = 0x17,
+ TessellationShaderPrimitivesGenerated = 0x18,
+ GeometryShaderInvocations = 0x1A,
+ GeometryShaderPrimitivesGenerated = 0x1B,
+ ClipperInvocations = 0x1C,
+ ClipperPrimitivesGenerated = 0x1D,
+ PixelShaderInvocations = 0x1E,
+ VtgPrimitivesOut = 0x1F,
};
enum class FrontFace : u32 {
- ClockWise = 0x0900,
- CounterClockWise = 0x0901,
+ ClockWise = 0x900,
+ CounterClockWise = 0x901,
};
enum class CullFace : u32 {
- Front = 0x0404,
- Back = 0x0405,
- FrontAndBack = 0x0408,
+ Front = 0x404,
+ Back = 0x405,
+ FrontAndBack = 0x408,
};
struct Blend {
enum class Equation : u32 {
- Add = 1,
- Subtract = 2,
- ReverseSubtract = 3,
- Min = 4,
- Max = 5,
-
- // These values are used by Nouveau and some games.
- AddGL = 0x8006,
- MinGL = 0x8007,
- MaxGL = 0x8008,
- SubtractGL = 0x800a,
- ReverseSubtractGL = 0x800b
+ Add_D3D = 1,
+ Subtract_D3D = 2,
+ ReverseSubtract_D3D = 3,
+ Min_D3D = 4,
+ Max_D3D = 5,
+
+ Add_GL = 0x8006,
+ Min_GL = 0x8007,
+ Max_GL = 0x8008,
+ Subtract_GL = 0x800A,
+ ReverseSubtract_GL = 0x800B
};
enum class Factor : u32 {
- Zero = 0x1,
- One = 0x2,
- SourceColor = 0x3,
- OneMinusSourceColor = 0x4,
- SourceAlpha = 0x5,
- OneMinusSourceAlpha = 0x6,
- DestAlpha = 0x7,
- OneMinusDestAlpha = 0x8,
- DestColor = 0x9,
- OneMinusDestColor = 0xa,
- SourceAlphaSaturate = 0xb,
- Source1Color = 0x10,
- OneMinusSource1Color = 0x11,
- Source1Alpha = 0x12,
- OneMinusSource1Alpha = 0x13,
- ConstantColor = 0x61,
- OneMinusConstantColor = 0x62,
- ConstantAlpha = 0x63,
- OneMinusConstantAlpha = 0x64,
-
- // These values are used by Nouveau and some games.
- ZeroGL = 0x4000,
- OneGL = 0x4001,
- SourceColorGL = 0x4300,
- OneMinusSourceColorGL = 0x4301,
- SourceAlphaGL = 0x4302,
- OneMinusSourceAlphaGL = 0x4303,
- DestAlphaGL = 0x4304,
- OneMinusDestAlphaGL = 0x4305,
- DestColorGL = 0x4306,
- OneMinusDestColorGL = 0x4307,
- SourceAlphaSaturateGL = 0x4308,
- ConstantColorGL = 0xc001,
- OneMinusConstantColorGL = 0xc002,
- ConstantAlphaGL = 0xc003,
- OneMinusConstantAlphaGL = 0xc004,
- Source1ColorGL = 0xc900,
- OneMinusSource1ColorGL = 0xc901,
- Source1AlphaGL = 0xc902,
- OneMinusSource1AlphaGL = 0xc903,
+ Zero_D3D = 0x1,
+ One_D3D = 0x2,
+ SourceColor_D3D = 0x3,
+ OneMinusSourceColor_D3D = 0x4,
+ SourceAlpha_D3D = 0x5,
+ OneMinusSourceAlpha_D3D = 0x6,
+ DestAlpha_D3D = 0x7,
+ OneMinusDestAlpha_D3D = 0x8,
+ DestColor_D3D = 0x9,
+ OneMinusDestColor_D3D = 0xA,
+ SourceAlphaSaturate_D3D = 0xB,
+ BothSourceAlpha_D3D = 0xC,
+ OneMinusBothSourceAlpha_D3D = 0xD,
+ BlendFactor_D3D = 0xE,
+ OneMinusBlendFactor_D3D = 0xF,
+ Source1Color_D3D = 0x10,
+ OneMinusSource1Color_D3D = 0x11,
+ Source1Alpha_D3D = 0x12,
+ OneMinusSource1Alpha_D3D = 0x13,
+
+ Zero_GL = 0x4000,
+ One_GL = 0x4001,
+ SourceColor_GL = 0x4300,
+ OneMinusSourceColor_GL = 0x4301,
+ SourceAlpha_GL = 0x4302,
+ OneMinusSourceAlpha_GL = 0x4303,
+ DestAlpha_GL = 0x4304,
+ OneMinusDestAlpha_GL = 0x4305,
+ DestColor_GL = 0x4306,
+ OneMinusDestColor_GL = 0x4307,
+ SourceAlphaSaturate_GL = 0x4308,
+ ConstantColor_GL = 0xC001,
+ OneMinusConstantColor_GL = 0xC002,
+ ConstantAlpha_GL = 0xC003,
+ OneMinusConstantAlpha_GL = 0xC004,
+ Source1Color_GL = 0xC900,
+ OneMinusSource1Color_GL = 0xC901,
+ Source1Alpha_GL = 0xC902,
+ OneMinusSource1Alpha_GL = 0xC903,
};
u32 separate_alpha;
- Equation equation_rgb;
- Factor factor_source_rgb;
- Factor factor_dest_rgb;
- Equation equation_a;
- Factor factor_source_a;
- Factor factor_dest_a;
- INSERT_PADDING_WORDS_NOINIT(1);
+ Equation color_op;
+ Factor color_source;
+ Factor color_dest;
+ Equation alpha_op;
+ Factor alpha_source;
+ u32 enable_global_color_key;
+ Factor alpha_dest;
+
+ u32 single_rop_control_enable;
+ u32 enable[NumRenderTargets];
};
- enum class TessellationPrimitive : u32 {
- Isolines = 0,
- Triangles = 1,
- Quads = 2,
- };
-
- enum class TessellationSpacing : u32 {
- Equal = 0,
- FractionalOdd = 1,
- FractionalEven = 2,
+ struct BlendPerTarget {
+ u32 separate_alpha;
+ Blend::Equation color_op;
+ Blend::Factor color_source;
+ Blend::Factor color_dest;
+ Blend::Equation alpha_op;
+ Blend::Factor alpha_source;
+ Blend::Factor alpha_dest;
+ INSERT_PADDING_BYTES_NOINIT(0x4);
};
+ static_assert(sizeof(BlendPerTarget) == 0x20);
enum class PolygonMode : u32 {
- Point = 0x1b00,
- Line = 0x1b01,
- Fill = 0x1b02,
+ Point = 0x1B00,
+ Line = 0x1B01,
+ Fill = 0x1B02,
};
enum class ShadowRamControl : u32 {
@@ -589,18 +1101,22 @@ public:
NegativeW = 7,
};
- enum class SamplerIndex : u32 {
+ enum class SamplerBinding : u32 {
Independently = 0,
- ViaHeaderIndex = 1,
+ ViaHeaderBinding = 1,
};
struct TileMode {
+ enum class DimensionControl : u32 {
+ DepthDefinesArray = 0,
+ DepthDefinesDepth = 1,
+ };
union {
BitField<0, 4, u32> block_width;
BitField<4, 4, u32> block_height;
BitField<8, 4, u32> block_depth;
BitField<12, 1, u32> is_pitch_linear;
- BitField<16, 1, u32> is_3d;
+ BitField<16, 1, DimensionControl> dim_control;
};
};
static_assert(sizeof(TileMode) == 4);
@@ -616,23 +1132,25 @@ public:
BitField<0, 16, u32> depth;
BitField<16, 1, u32> volume;
};
- u32 layer_stride;
+ u32 array_pitch;
u32 base_layer;
- INSERT_PADDING_WORDS_NOINIT(7);
+ u32 mark_ieee_clean;
+ INSERT_PADDING_BYTES_NOINIT(0x18);
GPUVAddr Address() const {
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
address_low);
}
};
+ static_assert(sizeof(RenderTargetConfig) == 0x40);
struct ColorMask {
union {
u32 raw;
- BitField<0, 4, u32> R;
- BitField<4, 4, u32> G;
- BitField<8, 4, u32> B;
- BitField<12, 4, u32> A;
+ BitField<0, 1, u32> R;
+ BitField<4, 1, u32> G;
+ BitField<8, 1, u32> B;
+ BitField<12, 1, u32> A;
};
};
@@ -643,6 +1161,7 @@ public:
f32 translate_x;
f32 translate_y;
f32 translate_z;
+
union {
u32 raw;
BitField<0, 3, ViewportSwizzle> x;
@@ -650,7 +1169,11 @@ public:
BitField<8, 3, ViewportSwizzle> z;
BitField<12, 3, ViewportSwizzle> w;
} swizzle;
- INSERT_PADDING_WORDS_NOINIT(1);
+
+ union {
+ BitField<0, 5, u32> x;
+ BitField<8, 5, u32> y;
+ } snap_grid_precision;
Common::Rectangle<f32> GetRect() const {
return {
@@ -677,21 +1200,14 @@ public:
return translate_y + std::fabs(scale_y) - GetY();
}
};
+ static_assert(sizeof(ViewportTransform) == 0x20);
- struct ScissorTest {
- u32 enable;
- union {
- BitField<0, 16, u32> min_x;
- BitField<16, 16, u32> max_x;
- };
- union {
- BitField<0, 16, u32> min_y;
- BitField<16, 16, u32> max_y;
+ struct Viewport {
+ enum class PixelCenter : u32 {
+ HalfIntegers = 0,
+ Integers = 1,
};
- u32 fill;
- };
- struct ViewPort {
union {
BitField<0, 16, u32> x;
BitField<16, 16, u32> width;
@@ -703,726 +1219,1816 @@ public:
float depth_range_near;
float depth_range_far;
};
+ static_assert(sizeof(Viewport) == 0x10);
- struct TransformFeedbackBinding {
- u32 buffer_enable;
- u32 address_high;
- u32 address_low;
- s32 buffer_size;
- s32 buffer_offset;
- INSERT_PADDING_WORDS_NOINIT(3);
-
- GPUVAddr Address() const {
- return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
- address_low);
- }
+ struct Window {
+ union {
+ BitField<0, 16, u32> x_min;
+ BitField<16, 16, u32> x_max;
+ };
+ union {
+ BitField<0, 16, u32> y_min;
+ BitField<16, 16, u32> y_max;
+ };
};
- static_assert(sizeof(TransformFeedbackBinding) == 32);
+ static_assert(sizeof(Window) == 0x8);
- struct TransformFeedbackLayout {
- u32 stream;
- u32 varying_count;
- u32 stride;
- INSERT_PADDING_WORDS_NOINIT(1);
+ struct ClipIdExtent {
+ union {
+ BitField<0, 16, u32> x;
+ BitField<16, 16, u32> width;
+ };
+ union {
+ BitField<0, 16, u32> y;
+ BitField<16, 16, u32> height;
+ };
+ };
+ static_assert(sizeof(ClipIdExtent) == 0x8);
+
+ enum class VisibleCallLimit : u32 {
+ Limit0 = 0,
+ Limit1 = 1,
+ Limit2 = 2,
+ Limit4 = 3,
+ Limit8 = 4,
+ Limit16 = 5,
+ Limit32 = 6,
+ Limit64 = 7,
+ Limit128 = 8,
+ None = 15,
};
- static_assert(sizeof(TransformFeedbackLayout) == 16);
-
- bool IsShaderConfigEnabled(std::size_t index) const {
- // The VertexB is always enabled.
- if (index == static_cast<std::size_t>(Regs::ShaderProgram::VertexB)) {
- return true;
- }
- return shader_config[index].enable != 0;
- }
-
- bool IsShaderConfigEnabled(Regs::ShaderProgram type) const {
- return IsShaderConfigEnabled(static_cast<std::size_t>(type));
- }
-
- union {
- struct {
- INSERT_PADDING_WORDS_NOINIT(0x44);
-
- u32 wait_for_idle;
- struct {
- u32 upload_address;
- u32 data;
- u32 entry;
- u32 bind;
- } macros;
+ struct StatisticsCounter {
+ union {
+ BitField<0, 1, u32> da_vertices;
+ BitField<1, 1, u32> da_primitives;
+ BitField<2, 1, u32> vs_invocations;
+ BitField<3, 1, u32> gs_invocations;
+ BitField<4, 1, u32> gs_primitives;
+ BitField<5, 1, u32> streaming_primitives_succeeded;
+ BitField<6, 1, u32> streaming_primitives_needed;
+ BitField<7, 1, u32> clipper_invocations;
+ BitField<8, 1, u32> clipper_primitives;
+ BitField<9, 1, u32> ps_invocations;
+ BitField<11, 1, u32> ti_invocations;
+ BitField<12, 1, u32> ts_invocations;
+ BitField<13, 1, u32> ts_primitives;
+ BitField<14, 1, u32> total_streaming_primitives_needed_succeeded;
+ BitField<10, 1, u32> vtg_primitives_out;
+ BitField<15, 1, u32> alpha_beta_clocks;
+ };
+ };
- ShadowRamControl shadow_ram_control;
+ struct ClearRect {
+ union {
+ BitField<0, 16, u32> x_min;
+ BitField<16, 16, u32> x_max;
+ };
+ union {
+ BitField<0, 16, u32> y_min;
+ BitField<16, 16, u32> y_max;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x16);
+ struct VertexBuffer {
+ u32 first;
+ u32 count;
+ };
- Upload::Registers upload;
- struct {
- union {
- BitField<0, 1, u32> linear;
- };
- } exec_upload;
+ struct InvalidateShaderCacheNoWFI {
+ union {
+ BitField<0, 1, u32> instruction;
+ BitField<4, 1, u32> global_data;
+ BitField<12, 1, u32> constant;
+ };
+ };
- u32 data_upload;
+ struct ZCullSerialization {
+ enum class Applied : u32 {
+ Always = 0,
+ LateZ = 1,
+ OutOfGamutZ = 2,
+ LateZOrOutOfGamutZ = 3,
+ };
+ union {
+ BitField<0, 1, u32> enable;
+ BitField<4, 2, Applied> applied;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x16);
+ struct ZCullDirFormat {
+ enum class Zdir : u32 {
+ Less = 0,
+ Greater = 1,
+ };
+ enum class Zformat : u32 {
+ MSB = 0,
+ FP = 1,
+ Ztrick = 2,
+ Zf32 = 3,
+ };
- u32 force_early_fragment_tests;
+ union {
+ BitField<0, 16, Zdir> dir;
+ BitField<16, 16, Zformat> format;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x2D);
+ struct IteratedBlend {
+ union {
+ BitField<0, 1, u32> enable;
+ BitField<1, 1, u32> enable_alpha;
+ };
+ u32 pass_count;
+ };
- struct {
- union {
- BitField<0, 16, u32> sync_point;
- BitField<16, 1, u32> unknown;
- BitField<20, 1, u32> increment;
- };
- } sync_info;
+ struct ZCullCriterion {
+ enum class Sfunc : u32 {
+ Never = 0,
+ Less = 1,
+ Equal = 2,
+ LessOrEqual = 3,
+ Greater = 4,
+ NotEqual = 5,
+ GreaterOrEqual = 6,
+ Always = 7,
+ };
- INSERT_PADDING_WORDS_NOINIT(0x15);
+ union {
+ BitField<0, 8, Sfunc> sfunc;
+ BitField<8, 1, u32> no_invalidate;
+ BitField<9, 1, u32> force_match;
+ BitField<16, 8, u32> sref;
+ BitField<24, 8, u32> smask;
+ };
+ };
- union {
- BitField<0, 2, TessellationPrimitive> prim;
- BitField<4, 2, TessellationSpacing> spacing;
- BitField<8, 1, u32> cw;
- BitField<9, 1, u32> connected;
- } tess_mode;
+ struct LoadIteratedBlend {
+ enum class Test : u32 {
+ False = 0,
+ True = 1,
+ Equal = 2,
+ NotEqual = 3,
+ LessThan = 4,
+ LessOrEqual = 5,
+ Greater = 6,
+ GreaterOrEqual = 7,
+ };
+ enum class Operation : u32 {
+ AddProducts = 0,
+ SubProducts = 1,
+ Min = 2,
+ Max = 3,
+ Reciprocal = 4,
+ Add = 5,
+ Sub = 6,
+ };
+ enum class OperandA : u32 {
+ SrcRGB = 0,
+ DstRGB = 1,
+ SrcAAA = 2,
+ DstAAA = 3,
+ Temp0_RGB = 4,
+ Temp1_RGB = 5,
+ Temp2_RGB = 6,
+ PBR_RGB = 7,
+ };
+ enum class OperandB : u32 {
+ Zero = 0,
+ One = 1,
+ SrcRGB = 2,
+ SrcAAA = 3,
+ OneMinusSrcAAA = 4,
+ DstRGB = 5,
+ DstAAA = 6,
+ OneMinusDstAAA = 7,
+ Temp0_RGB = 9,
+ Temp1_RGB = 10,
+ Temp2_RGB = 11,
+ PBR_RGB = 12,
+ ConstRGB = 13,
+ ZeroATimesB = 14,
+ };
+ enum class Swizzle : u32 {
+ RGB = 0,
+ GBR = 1,
+ RRR = 2,
+ GGG = 3,
+ BBB = 4,
+ RToA = 5,
+ };
+ enum class WriteMask : u32 {
+ RGB = 0,
+ ROnly = 1,
+ GOnly = 2,
+ BOnly = 3,
+ };
+ enum class Pass : u32 {
+ Temp0 = 0,
+ Temp1 = 1,
+ Temp2 = 2,
+ None = 3,
+ };
- std::array<f32, 4> tess_level_outer;
- std::array<f32, 2> tess_level_inner;
+ u32 instruction_ptr;
+ union {
+ BitField<0, 3, Test> test;
+ BitField<3, 3, Operation> operation;
+ BitField<6, 3, u32> const_input;
+ BitField<9, 3, OperandA> operand_a;
+ BitField<12, 4, OperandB> operand_b;
+ BitField<16, 3, OperandA> operand_c;
+ BitField<19, 4, OperandB> operand_d;
+ BitField<23, 3, Swizzle> output_swizzle;
+ BitField<26, 2, WriteMask> output_mask;
+ BitField<28, 2, Pass> output_pass;
+ BitField<31, 1, u32> test_enabled;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x10);
+ struct ScissorTest {
+ u32 enable;
+ union {
+ BitField<0, 16, u32> min_x;
+ BitField<16, 16, u32> max_x;
+ };
+ union {
+ BitField<0, 16, u32> min_y;
+ BitField<16, 16, u32> max_y;
+ };
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ };
+ static_assert(sizeof(ScissorTest) == 0x10);
- u32 rasterize_enable;
+ struct VPCPerf {
+ union {
+ BitField<0, 8, u32> culled_small_lines;
+ BitField<8, 8, u32> culled_small_triangles;
+ BitField<16, 8, u32> nonculled_lines_and_points;
+ BitField<24, 8, u32> nonculled_triangles;
+ };
+ };
- std::array<TransformFeedbackBinding, NumTransformFeedbackBuffers> tfb_bindings;
+ struct ConstantColorRendering {
+ u32 enabled;
+ u32 red;
+ u32 green;
+ u32 blue;
+ u32 alpha;
+ };
- INSERT_PADDING_WORDS_NOINIT(0xC0);
+ struct VertexStreamSubstitute {
+ u32 address_high;
+ u32 address_low;
- std::array<TransformFeedbackLayout, NumTransformFeedbackBuffers> tfb_layouts;
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
- INSERT_PADDING_WORDS_NOINIT(0x1);
+ struct VTGWarpWatermarks {
+ union {
+ BitField<0, 16, u32> low;
+ BitField<16, 16, u32> high;
+ };
+ };
- u32 tfb_enabled;
+ struct SampleMask {
+ struct Target {
+ union {
+ BitField<0, 1, u32> raster_out;
+ BitField<4, 1, u32> color_target;
+ };
+ u32 target;
+ };
+ struct Pos {
+ u32 x0_y0;
+ u32 x1_y0;
+ u32 x0_y1;
+ u32 x1_y1;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x2E);
+ enum class NonMultisampledZ : u32 {
+ PerSample = 0,
+ PixelCenter = 1,
+ };
- std::array<RenderTargetConfig, NumRenderTargets> rt;
+ enum class TIRMode : u32 {
+ Disabled = 0,
+ RasterNTargetM = 1,
+ };
- std::array<ViewportTransform, NumViewports> viewport_transform;
+ enum class AntiAliasRaster : u32 {
+ Mode1x1 = 0,
+ Mode2x2 = 2,
+ Mode4x2_D3D = 4,
+ Mode2x1_D3D = 5,
+ Mode4x4 = 6,
+ };
- std::array<ViewPort, NumViewports> viewports;
+ struct SurfaceClipIDMemory {
+ u32 address_high;
+ u32 address_low;
- INSERT_PADDING_WORDS_NOINIT(0x1D);
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
- struct {
- u32 first;
- u32 count;
- } vertex_buffer;
+ struct TIRModulation {
+ enum class Component : u32 {
+ None = 0,
+ RGB = 1,
+ AlphaOnly = 2,
+ RGBA = 3,
+ };
+ enum class Function : u32 {
+ Linear = 0,
+ Table = 1,
+ };
+ Component component;
+ Function function;
+ };
- DepthMode depth_mode;
+ struct Zeta {
+ u32 address_high;
+ u32 address_low;
+ Tegra::DepthFormat format;
+ TileMode tile_mode;
+ u32 array_pitch;
- float clear_color[4];
- float clear_depth;
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
- INSERT_PADDING_WORDS_NOINIT(0x3);
+ struct SurfaceClip {
+ union {
+ BitField<0, 16, u32> x;
+ BitField<16, 16, u32> width;
+ };
+ union {
+ BitField<0, 16, u32> y;
+ BitField<16, 16, u32> height;
+ };
+ };
- s32 clear_stencil;
+ enum class L2CacheControlPolicy : u32 {
+ First = 0,
+ Normal = 1,
+ Last = 2,
+ };
- INSERT_PADDING_WORDS_NOINIT(0x2);
+ struct L2CacheVAFRequests {
+ union {
+ BitField<0, 1, u32> system_memory_volatile;
+ BitField<4, 2, L2CacheControlPolicy> policy;
+ };
+ };
- PolygonMode polygon_mode_front;
- PolygonMode polygon_mode_back;
+ enum class ViewportMulticast : u32 {
+ ViewportOrder = 0,
+ PrimitiveOrder = 1,
+ };
- INSERT_PADDING_WORDS_NOINIT(0x3);
+ struct TIRModulationCoeff {
+ union {
+ BitField<0, 8, u32> table_v0;
+ BitField<8, 8, u32> table_v1;
+ BitField<16, 8, u32> table_v2;
+ BitField<24, 8, u32> table_v3;
+ };
+ };
+ static_assert(sizeof(TIRModulationCoeff) == 0x4);
- u32 polygon_offset_point_enable;
- u32 polygon_offset_line_enable;
- u32 polygon_offset_fill_enable;
+ struct ReduceColorThreshold {
+ union {
+ BitField<0, 8, u32> all_hit_once;
+ BitField<16, 8, u32> all_covered;
+ };
+ };
- u32 patch_vertices;
+ struct ClearControl {
+ union {
+ BitField<0, 1, u32> respect_stencil_mask;
+ BitField<4, 1, u32> use_clear_rect;
+ BitField<8, 1, u32> use_scissor;
+ BitField<12, 1, u32> use_viewport_clip0;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x4);
+ struct L2CacheRopNonInterlockedReads {
+ union {
+ BitField<4, 2, L2CacheControlPolicy> policy;
+ };
+ };
- u32 fragment_barrier;
+ struct VertexOutputAttributeSkipMasks {
+ struct Attributes {
+ union {
+ BitField<0, 1, u32> attribute0_comp0;
+ BitField<1, 1, u32> attribute0_comp1;
+ BitField<2, 1, u32> attribute0_comp2;
+ BitField<3, 1, u32> attribute0_comp3;
+ BitField<4, 1, u32> attribute1_comp0;
+ BitField<5, 1, u32> attribute1_comp1;
+ BitField<6, 1, u32> attribute1_comp2;
+ BitField<7, 1, u32> attribute1_comp3;
+ BitField<8, 1, u32> attribute2_comp0;
+ BitField<9, 1, u32> attribute2_comp1;
+ BitField<10, 1, u32> attribute2_comp2;
+ BitField<11, 1, u32> attribute2_comp3;
+ BitField<12, 1, u32> attribute3_comp0;
+ BitField<13, 1, u32> attribute3_comp1;
+ BitField<14, 1, u32> attribute3_comp2;
+ BitField<15, 1, u32> attribute3_comp3;
+ BitField<16, 1, u32> attribute4_comp0;
+ BitField<17, 1, u32> attribute4_comp1;
+ BitField<18, 1, u32> attribute4_comp2;
+ BitField<19, 1, u32> attribute4_comp3;
+ BitField<20, 1, u32> attribute5_comp0;
+ BitField<21, 1, u32> attribute5_comp1;
+ BitField<22, 1, u32> attribute5_comp2;
+ BitField<23, 1, u32> attribute5_comp3;
+ BitField<24, 1, u32> attribute6_comp0;
+ BitField<25, 1, u32> attribute6_comp1;
+ BitField<26, 1, u32> attribute6_comp2;
+ BitField<27, 1, u32> attribute6_comp3;
+ BitField<28, 1, u32> attribute7_comp0;
+ BitField<29, 1, u32> attribute7_comp1;
+ BitField<30, 1, u32> attribute7_comp2;
+ BitField<31, 1, u32> attribute7_comp3;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x7);
+ std::array<Attributes, 2> a;
+ std::array<Attributes, 2> b;
+ };
- std::array<ScissorTest, NumViewports> scissor_test;
+ struct TIRControl {
+ union {
+ BitField<0, 1, u32> z_pass_pixel_count_use_raster_samples;
+ BitField<4, 1, u32> alpha_coverage_use_raster_samples;
+ BitField<1, 1, u32> reduce_coverage;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x15);
+ enum class FillViaTriangleMode : u32 {
+ Disabled = 0,
+ FillAll = 1,
+ FillBoundingBox = 2,
+ };
- s32 stencil_back_func_ref;
- u32 stencil_back_mask;
- u32 stencil_back_func_mask;
+ struct PsTicketDispenserValue {
+ union {
+ BitField<0, 8, u32> index;
+ BitField<8, 16, u32> value;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x5);
+ struct RegisterWatermarks {
+ union {
+ BitField<0, 16, u32> low;
+ BitField<16, 16, u32> high;
+ };
+ };
- u32 invalidate_texture_data_cache;
+ enum class InvalidateCacheLines : u32 {
+ All = 0,
+ One = 1,
+ };
- INSERT_PADDING_WORDS_NOINIT(0x1);
+ struct InvalidateTextureDataCacheNoWfi {
+ union {
+ BitField<0, 1, InvalidateCacheLines> lines;
+ BitField<4, 22, u32> tag;
+ };
+ };
- u32 tiled_cache_barrier;
+ struct ZCullRegionEnable {
+ union {
+ BitField<0, 1, u32> enable_z;
+ BitField<4, 1, u32> enable_stencil;
+ BitField<1, 1, u32> rect_clear;
+ BitField<2, 1, u32> use_rt_array_index;
+ BitField<5, 16, u32> rt_array_index;
+ BitField<3, 1, u32> make_conservative;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x4);
+ enum class FillMode : u32 {
+ Point = 1,
+ Wireframe = 2,
+ Solid = 3,
+ };
- u32 color_mask_common;
+ enum class ShadeMode : u32 {
+ Flat = 0x1,
+ Gouraud = 0x2,
+ GL_Flat = 0x1D00,
+ GL_Smooth = 0x1D01,
+ };
- INSERT_PADDING_WORDS_NOINIT(0x2);
+ enum class AlphaToCoverageDither : u32 {
+ Footprint_1x1 = 0,
+ Footprint_2x2 = 1,
+ Footprint_1x1_Virtual = 2,
+ };
- f32 depth_bounds[2];
+ struct InlineIndex4x8 {
+ union {
+ BitField<0, 30, u32> count;
+ BitField<30, 2, u32> start;
+ };
+ union {
+ BitField<0, 8, u32> index0;
+ BitField<8, 8, u32> index1;
+ BitField<16, 8, u32> index2;
+ BitField<24, 8, u32> index3;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x2);
+ enum class D3DCullMode : u32 {
+ None = 0,
+ CW = 1,
+ CCW = 2,
+ };
- u32 rt_separate_frag_data;
+ struct BlendColor {
+ f32 r;
+ f32 g;
+ f32 b;
+ f32 a;
+ };
- INSERT_PADDING_WORDS_NOINIT(0x1);
+ struct StencilOp {
+ enum class Op : u32 {
+ Keep_D3D = 1,
+ Zero_D3D = 2,
+ Replace_D3D = 3,
+ IncrSaturate_D3D = 4,
+ DecrSaturate_D3D = 5,
+ Invert_D3D = 6,
+ Incr_D3D = 7,
+ Decr_D3D = 8,
+
+ Keep_GL = 0x1E00,
+ Zero_GL = 0,
+ Replace_GL = 0x1E01,
+ IncrSaturate_GL = 0x1E02,
+ DecrSaturate_GL = 0x1E03,
+ Invert_GL = 0x150A,
+ Incr_GL = 0x8507,
+ Decr_GL = 0x8508,
+ };
- u32 multisample_raster_enable;
- u32 multisample_raster_samples;
- std::array<u32, 4> multisample_sample_mask;
+ Op fail;
+ Op zfail;
+ Op zpass;
+ ComparisonOp func;
+ };
- INSERT_PADDING_WORDS_NOINIT(0x5);
+ struct PsSaturate {
+ // Opposite of DepthMode
+ enum class Depth : u32 {
+ ZeroToOne = 0,
+ MinusOneToOne = 1,
+ };
- struct {
- u32 address_high;
- u32 address_low;
- Tegra::DepthFormat format;
- TileMode tile_mode;
- u32 layer_stride;
+ union {
+ BitField<0, 1, u32> output0_enable;
+ BitField<1, 1, Depth> output0_range;
+ BitField<4, 1, u32> output1_enable;
+ BitField<5, 1, Depth> output1_range;
+ BitField<8, 1, u32> output2_enable;
+ BitField<9, 1, Depth> output2_range;
+ BitField<12, 1, u32> output3_enable;
+ BitField<13, 1, Depth> output3_range;
+ BitField<16, 1, u32> output4_enable;
+ BitField<17, 1, Depth> output4_range;
+ BitField<20, 1, u32> output5_enable;
+ BitField<21, 1, Depth> output5_range;
+ BitField<24, 1, u32> output6_enable;
+ BitField<25, 1, Depth> output6_range;
+ BitField<28, 1, u32> output7_enable;
+ BitField<29, 1, Depth> output7_range;
+ };
- GPUVAddr Address() const {
- return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
- address_low);
- }
- } zeta;
+ bool AnyEnabled() const {
+ return output0_enable || output1_enable || output2_enable || output3_enable ||
+ output4_enable || output5_enable || output6_enable || output7_enable;
+ }
+ };
- struct {
- union {
- BitField<0, 16, u32> x;
- BitField<16, 16, u32> width;
- };
- union {
- BitField<0, 16, u32> y;
- BitField<16, 16, u32> height;
- };
- } render_area;
+ struct WindowOrigin {
+ enum class Mode : u32 {
+ UpperLeft = 0,
+ LowerLeft = 1,
+ };
+ union {
+ BitField<0, 1, Mode> mode;
+ BitField<4, 1, u32> flip_y;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x3F);
+ struct IteratedBlendConstants {
+ u32 r;
+ u32 g;
+ u32 b;
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ };
+ static_assert(sizeof(IteratedBlendConstants) == 0x10);
+ struct UserClip {
+ struct Enable {
union {
- BitField<0, 4, u32> stencil;
- BitField<4, 4, u32> unknown;
- BitField<8, 4, u32> scissor;
- BitField<12, 4, u32> viewport;
- } clear_flags;
-
- INSERT_PADDING_WORDS_NOINIT(0x10);
-
- u32 fill_rectangle;
-
- INSERT_PADDING_WORDS_NOINIT(0x2);
-
- u32 conservative_raster_enable;
-
- INSERT_PADDING_WORDS_NOINIT(0x5);
-
- std::array<VertexAttribute, NumVertexAttributes> vertex_attrib_format;
+ u32 raw;
+ BitField<0, 1, u32> plane0;
+ BitField<1, 1, u32> plane1;
+ BitField<2, 1, u32> plane2;
+ BitField<3, 1, u32> plane3;
+ BitField<4, 1, u32> plane4;
+ BitField<5, 1, u32> plane5;
+ BitField<6, 1, u32> plane6;
+ BitField<7, 1, u32> plane7;
+ };
- std::array<MsaaSampleLocation, 4> multisample_sample_locations;
+ bool AnyEnabled() const {
+ return plane0 || plane1 || plane2 || plane3 || plane4 || plane5 || plane6 ||
+ plane7;
+ }
+ };
- INSERT_PADDING_WORDS_NOINIT(0x2);
+ struct Op {
+ enum class ClipOrCull : u32 {
+ Clip = 0,
+ Cull = 1,
+ };
union {
- BitField<0, 1, u32> enable;
- BitField<4, 3, u32> target;
- } multisample_coverage_to_color;
-
- INSERT_PADDING_WORDS_NOINIT(0x8);
-
- struct {
- union {
- BitField<0, 4, u32> count;
- BitField<4, 3, u32> map_0;
- BitField<7, 3, u32> map_1;
- BitField<10, 3, u32> map_2;
- BitField<13, 3, u32> map_3;
- BitField<16, 3, u32> map_4;
- BitField<19, 3, u32> map_5;
- BitField<22, 3, u32> map_6;
- BitField<25, 3, u32> map_7;
- };
-
- u32 Map(std::size_t index) const {
- const std::array<u32, NumRenderTargets> maps{map_0, map_1, map_2, map_3,
- map_4, map_5, map_6, map_7};
- ASSERT(index < maps.size());
- return maps[index];
- }
- } rt_control;
-
- INSERT_PADDING_WORDS_NOINIT(0x2);
-
- u32 zeta_width;
- u32 zeta_height;
- union {
- BitField<0, 16, u32> zeta_depth;
- BitField<16, 1, u32> zeta_volume;
+ u32 raw;
+ BitField<0, 1, ClipOrCull> plane0;
+ BitField<4, 1, ClipOrCull> plane1;
+ BitField<8, 1, ClipOrCull> plane2;
+ BitField<12, 1, ClipOrCull> plane3;
+ BitField<16, 1, ClipOrCull> plane4;
+ BitField<20, 1, ClipOrCull> plane5;
+ BitField<24, 1, ClipOrCull> plane6;
+ BitField<28, 1, ClipOrCull> plane7;
};
+ };
+ };
- SamplerIndex sampler_index;
+ struct AntiAliasAlphaControl {
+ union {
+ BitField<0, 1, u32> alpha_to_coverage;
+ BitField<4, 1, u32> alpha_to_one;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x2);
+ struct RenderEnable {
+ enum class Override : u32 {
+ UseRenderEnable = 0,
+ AlwaysRender = 1,
+ NeverRender = 2,
+ };
- std::array<u32, 8> gp_passthrough_mask;
+ enum class Mode : u32 {
+ False = 0,
+ True = 1,
+ Conditional = 2,
+ IfEqual = 3,
+ IfNotEqual = 4,
+ };
- INSERT_PADDING_WORDS_NOINIT(0x1B);
+ u32 address_high;
+ u32 address_low;
+ Mode mode;
- u32 depth_test_enable;
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
- INSERT_PADDING_WORDS_NOINIT(0x5);
+ struct TexSampler {
+ u32 address_high;
+ u32 address_low;
+ u32 limit;
- u32 independent_blend_enable;
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
- u32 depth_write_enabled;
+ struct TexHeader {
+ u32 address_high;
+ u32 address_low;
+ u32 limit;
- u32 alpha_test_enabled;
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
- INSERT_PADDING_WORDS_NOINIT(0x6);
+ enum class ZCullRegionFormat : u32 {
+ Z_4x4 = 0,
+ ZS_4x4 = 1,
+ Z_4x2 = 2,
+ Z_2x4 = 3,
+ Z_16x8_4x4 = 4,
+ Z_8x8_4x2 = 5,
+ Z_8x8_2x4 = 6,
+ Z_16x16_4x8 = 7,
+ Z_4x8_2x2 = 8,
+ ZS_16x8_4x2 = 9,
+ ZS_16x8_2x4 = 10,
+ ZS_8x8_2x2 = 11,
+ Z_4x8_1x1 = 12,
+ };
+
+ struct RtLayer {
+ enum class Control {
+ LayerSelectsLayer = 0,
+ GeometryShaderSelectsLayer = 1,
+ };
+
+ union {
+ BitField<0, 16, u32> layer;
+ BitField<16, 1, u32> control;
+ };
+ };
- u32 d3d_cull_mode;
+ struct InlineIndex2x16 {
+ union {
+ BitField<0, 31, u32> count;
+ BitField<31, 1, u32> start_odd;
+ };
+ union {
+ BitField<0, 16, u32> even;
+ BitField<16, 16, u32> odd;
+ };
+ };
- ComparisonOp depth_test_func;
- float alpha_test_ref;
- ComparisonOp alpha_test_func;
- u32 draw_tfb_stride;
- struct {
- float r;
- float g;
- float b;
- float a;
- } blend_color;
+ struct VertexGlobalBaseOffset {
+ u32 address_high;
+ u32 address_low;
- INSERT_PADDING_WORDS_NOINIT(0x4);
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
- struct {
- u32 separate_alpha;
- Blend::Equation equation_rgb;
- Blend::Factor factor_source_rgb;
- Blend::Factor factor_dest_rgb;
- Blend::Equation equation_a;
- Blend::Factor factor_source_a;
- INSERT_PADDING_WORDS_NOINIT(1);
- Blend::Factor factor_dest_a;
+ struct ZCullRegionPixelOffset {
+ u32 width;
+ u32 height;
+ };
- u32 enable_common;
- u32 enable[NumRenderTargets];
- } blend;
+ struct PointSprite {
+ enum class RMode : u32 {
+ Zero = 0,
+ FromR = 1,
+ FromS = 2,
+ };
+ enum class Origin : u32 {
+ Bottom = 0,
+ Top = 1,
+ };
+ enum class Texture : u32 {
+ Passthrough = 0,
+ Generate = 1,
+ };
- u32 stencil_enable;
- StencilOp stencil_front_op_fail;
- StencilOp stencil_front_op_zfail;
- StencilOp stencil_front_op_zpass;
- ComparisonOp stencil_front_func_func;
- s32 stencil_front_func_ref;
- u32 stencil_front_func_mask;
- u32 stencil_front_mask;
+ union {
+ BitField<0, 2, RMode> rmode;
+ BitField<2, 1, Origin> origin;
+ BitField<3, 1, Texture> texture0;
+ BitField<4, 1, Texture> texture1;
+ BitField<5, 1, Texture> texture2;
+ BitField<6, 1, Texture> texture3;
+ BitField<7, 1, Texture> texture4;
+ BitField<8, 1, Texture> texture5;
+ BitField<9, 1, Texture> texture6;
+ BitField<10, 1, Texture> texture7;
+ BitField<11, 1, Texture> texture8;
+ BitField<12, 1, Texture> texture9;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x2);
+ struct ProgramRegion {
+ u32 address_high;
+ u32 address_low;
- u32 frag_color_clamp;
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
- union {
- BitField<0, 1, u32> y_negate;
- BitField<4, 1, u32> triangle_rast_flip;
- } screen_y_control;
+ struct DefaultAttributes {
+ enum class Diffuse : u32 {
+ Vector_0001 = 0,
+ Vector_1111 = 1,
+ };
+ enum class Specular : u32 {
+ Vector_0000 = 0,
+ Vector_0001 = 1,
+ };
+ enum class Vector : u32 {
+ Vector_0000 = 0,
+ Vector_0001 = 1,
+ };
+ enum class FixedFncTexture : u32 {
+ Vector_0000 = 0,
+ Vector_0001 = 1,
+ };
+ enum class DX9Color0 : u32 {
+ Vector_0000 = 0,
+ Vector_1111 = 1,
+ };
+ enum class DX9Color1To15 : u32 {
+ Vector_0000 = 0,
+ Vector_0001 = 1,
+ };
- float line_width_smooth;
- float line_width_aliased;
+ union {
+ BitField<0, 1, Diffuse> color_front_diffuse;
+ BitField<1, 1, Specular> color_front_specular;
+ BitField<2, 1, Vector> generic_vector;
+ BitField<3, 1, FixedFncTexture> fixed_fnc_texture;
+ BitField<4, 1, DX9Color0> dx9_color0;
+ BitField<5, 1, DX9Color1To15> dx9_color1_to_15;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x1B);
+ struct Draw {
+ enum class PrimitiveId : u32 {
+ First = 0,
+ Unchanged = 1,
+ };
+ enum class InstanceId : u32 {
+ First = 0,
+ Subsequent = 1,
+ Unchanged = 2,
+ };
+ enum class SplitMode : u32 {
+ NormalBeginNormal = 0,
+ NormalBeginOpen = 1,
+ OpenBeginOpen = 2,
+ OpenBeginNormal = 3,
+ };
- u32 invalidate_sampler_cache_no_wfi;
- u32 invalidate_texture_header_cache_no_wfi;
+ u32 end;
+ union {
+ u32 begin;
+ BitField<0, 16, PrimitiveTopology> topology;
+ BitField<24, 1, PrimitiveId> primitive_id;
+ BitField<26, 2, InstanceId> instance_id;
+ BitField<29, 2, SplitMode> split_mode;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x2);
+ struct VertexIdCopy {
+ union {
+ BitField<0, 1, u32> enable;
+ BitField<4, 8, u32> attribute_slot;
+ };
+ };
- u32 vb_element_base;
- u32 vb_base_instance;
+ struct ShaderBasedCull {
+ union {
+ BitField<1, 1, u32> batch_cull_enable;
+ BitField<0, 1, u32> before_fetch_enable;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x35);
+ struct ClassVersion {
+ union {
+ BitField<0, 16, u32> current;
+ BitField<16, 16, u32> oldest_supported;
+ };
+ };
- u32 clip_distance_enabled;
+ struct PrimitiveRestart {
+ u32 enabled;
+ u32 index;
+ };
- u32 samplecnt_enable;
+ struct OutputVertexId {
+ union {
+ BitField<12, 1, u32> uses_array_start;
+ };
+ };
- float point_size;
+ enum class PointCenterMode : u32 {
+ GL = 0,
+ D3D = 1,
+ };
- INSERT_PADDING_WORDS_NOINIT(0x1);
+ enum class LineSmoothParams : u32 {
+ Falloff_1_00 = 0,
+ Falloff_1_33 = 1,
+ Falloff_1_66 = 2,
+ };
- u32 point_sprite_enable;
+ struct LineSmoothEdgeTable {
+ union {
+ BitField<0, 8, u32> v0;
+ BitField<8, 8, u32> v1;
+ BitField<16, 8, u32> v2;
+ BitField<24, 8, u32> v3;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x3);
+ struct LineStippleParams {
+ union {
+ BitField<0, 8, u32> factor;
+ BitField<8, 16, u32> pattern;
+ };
+ };
- CounterReset counter_reset;
+ enum class ProvokingVertex : u32 {
+ First = 0,
+ Last = 1,
+ };
- u32 multisample_enable;
+ struct ShaderControl {
+ enum class Partial : u32 {
+ Zero = 0,
+ Infinity = 1,
+ };
+ enum class FP32NanBehavior : u32 {
+ Legacy = 0,
+ FP64Compatible = 1,
+ };
+ enum class FP32F2INanBehavior : u32 {
+ PassZero = 0,
+ PassIndefinite = 1,
+ };
- u32 zeta_enable;
+ union {
+ BitField<0, 1, Partial> default_partial;
+ BitField<1, 1, FP32NanBehavior> fp32_nan_behavior;
+ BitField<2, 1, FP32F2INanBehavior> fp32_f2i_nan_behavior;
+ };
+ };
- union {
- BitField<0, 1, u32> alpha_to_coverage;
- BitField<4, 1, u32> alpha_to_one;
- } multisample_control;
+ struct SphVersion {
+ union {
+ BitField<0, 16, u32> current;
+ BitField<16, 16, u32> oldest_supported;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x4);
+ struct AlphaToCoverageOverride {
+ union {
+ BitField<0, 1, u32> qualify_by_anti_alias_enable;
+ BitField<1, 1, u32> qualify_by_ps_sample_mask_enable;
+ };
+ };
- struct {
- u32 address_high;
- u32 address_low;
- ConditionMode mode;
+ struct AamVersion {
+ union {
+ BitField<0, 16, u32> current;
+ BitField<16, 16, u32> oldest_supported;
+ };
+ };
- GPUVAddr Address() const {
- return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
- address_low);
- }
- } condition;
+ struct IndexBuffer {
+ u32 start_addr_high;
+ u32 start_addr_low;
+ u32 limit_addr_high;
+ u32 limit_addr_low;
+ IndexFormat format;
+ u32 first;
+ u32 count;
+
+ unsigned FormatSizeInBytes() const {
+ switch (format) {
+ case IndexFormat::UnsignedByte:
+ return 1;
+ case IndexFormat::UnsignedShort:
+ return 2;
+ case IndexFormat::UnsignedInt:
+ return 4;
+ }
+ ASSERT(false);
+ return 1;
+ }
- struct {
- u32 address_high;
- u32 address_low;
- u32 limit;
+ GPUVAddr StartAddress() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(start_addr_high) << 32) |
+ start_addr_low);
+ }
- GPUVAddr Address() const {
- return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
- address_low);
- }
- } tsc;
+ GPUVAddr EndAddress() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_addr_high) << 32) |
+ limit_addr_low);
+ }
- INSERT_PADDING_WORDS_NOINIT(0x1);
+ /// Adjust the index buffer offset so it points to the first desired index.
+ GPUVAddr IndexStart() const {
+ return StartAddress() +
+ static_cast<size_t>(first) * static_cast<size_t>(FormatSizeInBytes());
+ }
+ };
- float polygon_offset_factor;
+ struct IndexBufferSmall {
+ union {
+ BitField<0, 16, u32> first;
+ BitField<16, 12, u32> count;
+ BitField<28, 4, PrimitiveTopology> topology;
+ };
+ };
- u32 line_smooth_enable;
+ struct VertexStreamInstances {
+ std::array<u32, NumVertexArrays> is_instanced;
- struct {
- u32 address_high;
- u32 address_low;
- u32 limit;
+ /// Returns whether the vertex array specified by index is supposed to be
+ /// accessed per instance or not.
+ bool IsInstancingEnabled(std::size_t index) const {
+ return is_instanced[index];
+ }
+ };
- GPUVAddr Address() const {
- return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
- address_low);
- }
- } tic;
+ struct AttributePointSize {
+ union {
+ BitField<0, 1, u32> enabled;
+ BitField<4, 8, u32> slot;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x5);
+ struct ViewportClipControl {
+ enum class GeometryGuardband : u32 {
+ Scale256 = 0,
+ Scale1 = 1,
+ };
+ enum class GeometryClip : u32 {
+ WZero = 0,
+ Passthrough = 1,
+ FrustumXY = 2,
+ FrustumXYZ = 3,
+ WZeroNoZCull = 4,
+ FrustumZ = 5,
+ WZeroTriFillOrClip = 6,
+ };
+ enum class GeometryGuardbandZ : u32 {
+ SameAsXY = 0,
+ Scale256 = 1,
+ Scale1 = 2,
+ };
- u32 stencil_two_side_enable;
- StencilOp stencil_back_op_fail;
- StencilOp stencil_back_op_zfail;
- StencilOp stencil_back_op_zpass;
- ComparisonOp stencil_back_func_func;
+ union {
+ BitField<0, 1, u32> depth_0_to_1;
+ BitField<3, 1, u32> pixel_min_z;
+ BitField<4, 1, u32> pixel_max_z;
+ BitField<7, 1, GeometryGuardband> geometry_guardband;
+ BitField<11, 3, GeometryClip> geometry_clip;
+ BitField<1, 2, GeometryGuardbandZ> geometry_guardband_z;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x4);
+ enum class PrimitiveTopologyControl : u32 {
+ UseInBeginMethods = 0,
+ UseSeparateState = 1,
+ };
- u32 framebuffer_srgb;
+ struct WindowClip {
+ enum class Type : u32 {
+ Inclusive = 0,
+ Exclusive = 1,
+ ClipAll = 2,
+ };
- float polygon_offset_units;
+ u32 enable;
+ Type type;
+ };
- INSERT_PADDING_WORDS_NOINIT(0x4);
+ enum class InvalidateZCull : u32 {
+ Invalidate = 0,
+ };
- Tegra::Texture::MsaaMode multisample_mode;
+ struct ZCull {
+ union {
+ BitField<0, 1, u32> z_enable;
+ BitField<1, 1, u32> stencil_enable;
+ };
+ union {
+ BitField<0, 1, u32> z_min_enbounded;
+ BitField<1, 1, u32> z_max_unbounded;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0xC);
+ struct LogicOp {
+ enum class Op : u32 {
+ Clear = 0x1500,
+ And = 0x1501,
+ AndReverse = 0x1502,
+ Copy = 0x1503,
+ AndInverted = 0x1504,
+ NoOp = 0x1505,
+ Xor = 0x1506,
+ Or = 0x1507,
+ Nor = 0x1508,
+ Equiv = 0x1509,
+ Invert = 0x150A,
+ OrReverse = 0x150B,
+ CopyInverted = 0x150C,
+ OrInverted = 0x150D,
+ Nand = 0x150E,
+ Set = 0x150F,
+ };
- union {
- BitField<2, 1, u32> coord_origin;
- BitField<3, 10, u32> enable;
- } point_coord_replace;
-
- struct {
- u32 code_address_high;
- u32 code_address_low;
-
- GPUVAddr CodeAddress() const {
- return static_cast<GPUVAddr>(
- (static_cast<GPUVAddr>(code_address_high) << 32) | code_address_low);
- }
- } code_address;
- INSERT_PADDING_WORDS_NOINIT(1);
-
- struct {
- u32 vertex_end_gl;
- union {
- u32 vertex_begin_gl;
- BitField<0, 16, PrimitiveTopology> topology;
- BitField<26, 1, u32> instance_next;
- BitField<27, 1, u32> instance_cont;
- };
- } draw;
-
- INSERT_PADDING_WORDS_NOINIT(0xA);
-
- struct {
- u32 enabled;
- u32 index;
- } primitive_restart;
-
- INSERT_PADDING_WORDS_NOINIT(0xE);
-
- u32 provoking_vertex_last;
-
- INSERT_PADDING_WORDS_NOINIT(0x50);
-
- struct {
- u32 start_addr_high;
- u32 start_addr_low;
- u32 end_addr_high;
- u32 end_addr_low;
- IndexFormat format;
- u32 first;
- u32 count;
-
- unsigned FormatSizeInBytes() const {
- switch (format) {
- case IndexFormat::UnsignedByte:
- return 1;
- case IndexFormat::UnsignedShort:
- return 2;
- case IndexFormat::UnsignedInt:
- return 4;
- }
- ASSERT(false);
- return 1;
- }
-
- GPUVAddr StartAddress() const {
- return static_cast<GPUVAddr>(
- (static_cast<GPUVAddr>(start_addr_high) << 32) | start_addr_low);
- }
-
- GPUVAddr EndAddress() const {
- return static_cast<GPUVAddr>((static_cast<GPUVAddr>(end_addr_high) << 32) |
- end_addr_low);
- }
-
- /// Adjust the index buffer offset so it points to the first desired index.
- GPUVAddr IndexStart() const {
- return StartAddress() + static_cast<size_t>(first) *
- static_cast<size_t>(FormatSizeInBytes());
- }
- } index_array;
+ u32 enable;
+ Op op;
+ };
- union {
- BitField<0, 16, u32> first;
- BitField<16, 16, u32> count;
- } small_index;
+ struct ClearSurface {
+ union {
+ u32 raw;
+ BitField<0, 1, u32> Z;
+ BitField<1, 1, u32> S;
+ BitField<2, 1, u32> R;
+ BitField<3, 1, u32> G;
+ BitField<4, 1, u32> B;
+ BitField<5, 1, u32> A;
+ BitField<6, 4, u32> RT;
+ BitField<10, 16, u32> layer;
+ };
+ };
- union {
- BitField<0, 16, u32> first;
- BitField<16, 16, u32> count;
- } small_index_2;
+ struct ReportSemaphore {
+ struct Compare {
+ u32 initial_sequence;
+ u32 initial_mode;
+ u32 unknown1;
+ u32 unknown2;
+ u32 current_sequence;
+ u32 current_mode;
+ };
- INSERT_PADDING_WORDS_NOINIT(0x5);
+ enum class Operation : u32 {
+ Release = 0,
+ Acquire = 1,
+ ReportOnly = 2,
+ Trap = 3,
+ };
- INSERT_PADDING_WORDS_NOINIT(0x1F);
+ enum class Release : u32 {
+ AfterAllPreceedingReads = 0,
+ AfterAllPreceedingWrites = 1,
+ };
- float polygon_offset_clamp;
+ enum class Acquire : u32 {
+ BeforeAnyFollowingWrites = 0,
+ BeforeAnyFollowingReads = 1,
+ };
- struct {
- u32 is_instanced[NumVertexArrays];
+ enum class Location : u32 {
+ None = 0,
+ VertexFetch = 1,
+ VertexShader = 2,
+ VPC = 4,
+ StreamingOutput = 5,
+ GeometryShader = 6,
+ ZCull = 7,
+ TessellationInit = 8,
+ TessellationShader = 9,
+ PixelShader = 10,
+ DepthTest = 12,
+ All = 15,
+ };
- /// Returns whether the vertex array specified by index is supposed to be
- /// accessed per instance or not.
- bool IsInstancingEnabled(std::size_t index) const {
- return is_instanced[index];
- }
- } instanced_arrays;
+ enum class Comparison : u32 {
+ NotEqual = 0,
+ GreaterOrEqual = 1,
+ };
- INSERT_PADDING_WORDS_NOINIT(0x4);
+ enum class Report : u32 {
+ Payload = 0, // "None" in docs, but confirmed via hardware to return the payload
+ VerticesGenerated = 1,
+ ZPassPixelCount = 2,
+ PrimitivesGenerated = 3,
+ AlphaBetaClocks = 4,
+ VertexShaderInvocations = 5,
+ StreamingPrimitivesNeededMinusSucceeded = 6,
+ GeometryShaderInvocations = 7,
+ GeometryShaderPrimitivesGenerated = 9,
+ ZCullStats0 = 10,
+ StreamingPrimitivesSucceeded = 11,
+ ZCullStats1 = 12,
+ StreamingPrimitivesNeeded = 13,
+ ZCullStats2 = 14,
+ ClipperInvocations = 15,
+ ZCullStats3 = 16,
+ ClipperPrimitivesGenerated = 17,
+ VtgPrimitivesOut = 18,
+ PixelShaderInvocations = 19,
+ ZPassPixelCount64 = 21,
+ IEEECleanColorTarget = 24,
+ IEEECleanZetaTarget = 25,
+ StreamingByteCount = 26,
+ TessellationInitInvocations = 27,
+ BoundingRectangle = 28,
+ TessellationShaderInvocations = 29,
+ TotalStreamingPrimitivesNeededMinusSucceeded = 30,
+ TessellationShaderPrimitivesGenerated = 31,
+ };
- union {
- BitField<0, 1, u32> enable;
- BitField<4, 8, u32> unk4;
- } vp_point_size;
+ u32 address_high;
+ u32 address_low;
+ u32 payload;
+ union {
+ u32 raw;
+ BitField<0, 2, Operation> operation;
+ BitField<4, 1, Release> release;
+ BitField<8, 1, Acquire> acquire;
+ BitField<12, 4, Location> location;
+ BitField<16, 1, Comparison> comparison;
+ BitField<20, 1, u32> awaken_enable;
+ BitField<23, 5, Report> report;
+ BitField<28, 1, u32> short_query;
+ BitField<5, 3, u32> sub_report;
+ BitField<21, 1, u32> dword_number;
+ BitField<2, 1, u32> disable_flush;
+ BitField<3, 1, u32> reduction_enable;
+ BitField<9, 3, ReductionOp> reduction_op;
+ BitField<17, 2, u32> format_signed;
+ } query;
- INSERT_PADDING_WORDS_NOINIT(1);
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
- u32 cull_test_enabled;
- FrontFace front_face;
- CullFace cull_face;
+ struct VertexStream {
+ union {
+ BitField<0, 12, u32> stride;
+ BitField<12, 1, u32> enable;
+ };
+ u32 address_high;
+ u32 address_low;
+ u32 frequency;
- u32 pixel_center_integer;
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
- INSERT_PADDING_WORDS_NOINIT(0x1);
+ bool IsEnabled() const {
+ return enable != 0 && Address() != 0;
+ }
+ };
+ static_assert(sizeof(VertexStream) == 0x10);
- u32 viewport_transform_enabled;
+ struct VertexStreamLimit {
+ u32 address_high;
+ u32 address_low;
- INSERT_PADDING_WORDS_NOINIT(0x3);
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
+ static_assert(sizeof(VertexStreamLimit) == 0x8);
- union {
- BitField<0, 1, u32> depth_range_0_1;
- BitField<3, 1, u32> depth_clamp_near;
- BitField<4, 1, u32> depth_clamp_far;
- BitField<11, 1, u32> depth_clamp_disabled;
- } view_volume_clip_control;
+ enum class ShaderType : u32 {
+ VertexA = 0,
+ VertexB = 1,
+ TessellationInit = 2,
+ Tessellation = 3,
+ Geometry = 4,
+ Pixel = 5,
+ };
- INSERT_PADDING_WORDS_NOINIT(0xC);
+ struct Pipeline {
+ union {
+ BitField<0, 1, u32> enable;
+ BitField<4, 4, ShaderType> program;
+ };
+ u32 offset;
+ u32 reservedA;
+ u32 register_count;
+ u32 binding_group;
+ std::array<u32, 4> reserved;
+ INSERT_PADDING_BYTES_NOINIT(0x1C);
+ };
+ static_assert(sizeof(Pipeline) == 0x40);
- PrimitiveTopologyOverride topology_override;
+ bool IsShaderConfigEnabled(std::size_t index) const {
+ // The VertexB is always enabled.
+ if (index == static_cast<std::size_t>(ShaderType::VertexB)) {
+ return true;
+ }
+ return pipelines[index].enable != 0;
+ }
- INSERT_PADDING_WORDS_NOINIT(0x12);
+ bool IsShaderConfigEnabled(ShaderType type) const {
+ return IsShaderConfigEnabled(static_cast<std::size_t>(type));
+ }
- u32 depth_bounds_enable;
+ struct ConstantBuffer {
+ u32 size;
+ u32 address_high;
+ u32 address_low;
+ u32 offset;
+ std::array<u32, NumCBData> buffer;
- INSERT_PADDING_WORDS_NOINIT(1);
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ };
- struct {
- u32 enable;
- LogicOperation operation;
- } logic_op;
+ struct BindGroup {
+ std::array<u32, 4> reserved;
+ union {
+ u32 raw_config;
+ BitField<0, 1, u32> valid;
+ BitField<4, 5, u32> shader_slot;
+ };
+ INSERT_PADDING_BYTES_NOINIT(0xC);
+ };
+ static_assert(sizeof(BindGroup) == 0x20);
- INSERT_PADDING_WORDS_NOINIT(0x1);
+ struct StreamOutLayout {
+ union {
+ BitField<0, 8, u32> attribute0;
+ BitField<8, 8, u32> attribute1;
+ BitField<16, 8, u32> attribute2;
+ BitField<24, 8, u32> attribute3;
+ };
+ };
+ struct ShaderPerformance {
+ struct ControlA {
union {
- u32 raw;
- BitField<0, 1, u32> Z;
- BitField<1, 1, u32> S;
- BitField<2, 1, u32> R;
- BitField<3, 1, u32> G;
- BitField<4, 1, u32> B;
- BitField<5, 1, u32> A;
- BitField<6, 4, u32> RT;
- BitField<10, 11, u32> layer;
- } clear_buffers;
- INSERT_PADDING_WORDS_NOINIT(0xB);
- std::array<ColorMask, NumRenderTargets> color_mask;
- INSERT_PADDING_WORDS_NOINIT(0x38);
-
- struct {
- u32 query_address_high;
- u32 query_address_low;
- u32 query_sequence;
- union {
- u32 raw;
- BitField<0, 2, QueryOperation> operation;
- BitField<4, 1, u32> fence;
- BitField<12, 4, QueryUnit> unit;
- BitField<16, 1, QuerySyncCondition> sync_cond;
- BitField<23, 5, QuerySelect> select;
- BitField<28, 1, u32> short_query;
- } query_get;
-
- GPUVAddr QueryAddress() const {
- return static_cast<GPUVAddr>(
- (static_cast<GPUVAddr>(query_address_high) << 32) | query_address_low);
- }
- } query;
-
- INSERT_PADDING_WORDS_NOINIT(0x3C);
-
- struct {
- union {
- BitField<0, 12, u32> stride;
- BitField<12, 1, u32> enable;
- };
- u32 start_high;
- u32 start_low;
- u32 divisor;
-
- GPUVAddr StartAddress() const {
- return static_cast<GPUVAddr>((static_cast<GPUVAddr>(start_high) << 32) |
- start_low);
- }
-
- bool IsEnabled() const {
- return enable != 0 && StartAddress() != 0;
- }
-
- } vertex_array[NumVertexArrays];
-
- Blend independent_blend[NumRenderTargets];
-
- struct {
- u32 limit_high;
- u32 limit_low;
-
- GPUVAddr LimitAddress() const {
- return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) |
- limit_low);
- }
- } vertex_array_limit[NumVertexArrays];
-
- struct {
- union {
- BitField<0, 1, u32> enable;
- BitField<4, 4, ShaderProgram> program;
- };
- u32 offset;
- INSERT_PADDING_WORDS_NOINIT(14);
- } shader_config[MaxShaderProgram];
-
- INSERT_PADDING_WORDS_NOINIT(0x60);
-
- u32 firmware[0x20];
-
- struct {
- u32 cb_size;
- u32 cb_address_high;
- u32 cb_address_low;
- u32 cb_pos;
- std::array<u32, NumCBData> cb_data;
-
- GPUVAddr BufferAddress() const {
- return static_cast<GPUVAddr>(
- (static_cast<GPUVAddr>(cb_address_high) << 32) | cb_address_low);
- }
- } const_buffer;
-
- INSERT_PADDING_WORDS_NOINIT(0x10);
-
- struct {
- union {
- u32 raw_config;
- BitField<0, 1, u32> valid;
- BitField<4, 5, u32> index;
- };
- INSERT_PADDING_WORDS_NOINIT(7);
- } cb_bind[MaxShaderStage];
-
- INSERT_PADDING_WORDS_NOINIT(0x56);
-
- u32 tex_cb_index;
-
- INSERT_PADDING_WORDS_NOINIT(0x7D);
-
- std::array<std::array<u8, 128>, NumTransformFeedbackBuffers> tfb_varying_locs;
-
- INSERT_PADDING_WORDS_NOINIT(0x298);
-
- struct {
- /// Compressed address of a buffer that holds information about bound SSBOs.
- /// This address is usually bound to c0 in the shaders.
- u32 buffer_address;
-
- GPUVAddr BufferAddress() const {
- return static_cast<GPUVAddr>(buffer_address) << 8;
- }
- } ssbo_info;
+ BitField<0, 2, u32> event0;
+ BitField<2, 3, u32> bit0;
+ BitField<5, 2, u32> event1;
+ BitField<7, 3, u32> bit1;
+ BitField<10, 2, u32> event2;
+ BitField<12, 3, u32> bit2;
+ BitField<15, 2, u32> event3;
+ BitField<17, 3, u32> bit3;
+ BitField<20, 2, u32> event4;
+ BitField<22, 3, u32> bit4;
+ BitField<25, 2, u32> event5;
+ BitField<27, 3, u32> bit5;
+ BitField<30, 2, u32> spare;
+ };
+ };
- INSERT_PADDING_WORDS_NOINIT(0x11);
+ struct ControlB {
+ union {
+ BitField<0, 1, u32> edge;
+ BitField<1, 2, u32> mode;
+ BitField<3, 1, u32> windowed;
+ BitField<4, 16, u32> func;
+ };
+ };
- struct {
- u32 address[MaxShaderStage];
- u32 size[MaxShaderStage];
- } tex_info_buffers;
+ std::array<u32, 8> values_upper;
+ std::array<u32, 8> values;
+ std::array<u32, 8> events;
+ std::array<ControlA, 8> control_a;
+ std::array<ControlB, 8> control_b;
+ u32 trap_control_mask;
+ u32 start_shader_mask;
+ u32 stop_shader_mask;
+ };
- INSERT_PADDING_WORDS_NOINIT(0xCC);
+ // clang-format off
+ union {
+ struct {
+ ID object_id; ///< 0x0000
+ INSERT_PADDING_BYTES_NOINIT(0xFC);
+ u32 nop; ///< 0x0100
+ Notify notify; ///< 0x0104
+ u32 wait_for_idle; ///< 0x0110
+ LoadMME load_mme; ///< 0x0114
+ ShadowRamControl shadow_ram_control; ///< 0x0124
+ PeerSemaphore peer; ///< 0x0128
+ GlobalRender global_render; ///< 0x0130
+ u32 go_idle; ///< 0x013C
+ u32 trigger; ///< 0x0140
+ u32 trigger_wfi; ///< 0x0144
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ u32 instrumentation_method_header; ///< 0x0150
+ u32 instrumentation_method_data; ///< 0x0154
+ INSERT_PADDING_BYTES_NOINIT(0x28);
+ Upload::Registers upload; ///< 0x0180
+ LaunchDMA launch_dma; ///< 0x01B0
+ u32 inline_data; ///< 0x01B4
+ INSERT_PADDING_BYTES_NOINIT(0x24);
+ I2M i2m; ///< 0x01DC
+ u32 run_ds_now; ///< 0x0200
+ OpportunisticEarlyZ opportunistic_early_z; ///< 0x0204
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 aliased_line_width_enabled; ///< 0x020C
+ u32 mandated_early_z; ///< 0x0210
+ GeometryShaderDmFifo gs_dm_fifo; ///< 0x0214
+ L2CacheControl l2_cache_control; ///< 0x0218
+ InvalidateShaderCache invalidate_shader_cache; ///< 0x021C
+ INSERT_PADDING_BYTES_NOINIT(0xA8);
+ SyncInfo sync_info; ///< 0x02C8
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 prim_circular_buffer_throttle; ///< 0x02D0
+ u32 flush_invalidate_rop_mini_cache; ///< 0x02D4
+ SurfaceClipBlockId surface_clip_block_id; ///< 0x02D8
+ u32 alpha_circular_buffer_size; ///< 0x02DC
+ DecompressSurface decompress_surface; ///< 0x02E0
+ ZCullRopBypass zcull_rop_bypass; ///< 0x02E4
+ ZCullSubregion zcull_subregion; ///< 0x02E8
+ RasterBoundingBox raster_bounding_box; ///< 0x02EC
+ u32 peer_semaphore_release; ///< 0x02F0
+ u32 iterated_blend_optimization; ///< 0x02F4
+ ZCullSubregionAllocation zcull_subregion_allocation; ///< 0x02F8
+ ZCullSubregionAlgorithm zcull_subregion_algorithm; ///< 0x02FC
+ PixelShaderOutputSampleMaskUsage ps_output_sample_mask_usage; ///< 0x0300
+ u32 draw_zero_index; ///< 0x0304
+ L1Configuration l1_configuration; ///< 0x0308
+ u32 render_enable_control_load_const_buffer; ///< 0x030C
+ SPAVersion spa_version; ///< 0x0310
+ u32 ieee_clean_update; ///< 0x0314
+ SnapGrid snap_grid; ///< 0x0318
+ Tessellation tessellation; ///< 0x0320
+ SubTilingPerf sub_tiling_perf; ///< 0x0360
+ ZCullSubregionReport zcull_subregion_report; ///< 0x036C
+ BalancedPrimitiveWorkload balanced_primitive_workload; ///< 0x0374
+ u32 max_patches_per_batch; ///< 0x0378
+ u32 rasterize_enable; ///< 0x037C
+ TransformFeedback transform_feedback; ///< 0x0380
+ u32 raster_input; ///< 0x0740
+ u32 transform_feedback_enabled; ///< 0x0744
+ u32 primitive_restart_topology_change_enable; ///< 0x0748
+ u32 alpha_fraction; ///< 0x074C
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ HybridAntiAliasControl hybrid_aa_control; ///< 0x0754
+ INSERT_PADDING_BYTES_NOINIT(0x24);
+ ShaderLocalMemory shader_local_memory; ///< 0x077C
+ u32 color_zero_bandwidth_clear; ///< 0x07A4
+ u32 z_zero_bandwidth_clear; ///< 0x07A8
+ u32 isbe_save_restore_program_offset; ///< 0x07AC
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ ZCullRegion zcull_region; ///< 0x07C0
+ ZetaReadOnly zeta_read_only; ///< 0x07F8
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ std::array<RenderTargetConfig, NumRenderTargets> rt; ///< 0x0800
+ std::array<ViewportTransform, NumViewports> viewport_transform; ///< 0x0A00
+ std::array<Viewport, NumViewports> viewports; ///< 0x0C00
+ std::array<Window, 8> windows; ///< 0x0D00
+ std::array<ClipIdExtent, 4> clip_id_extent; ///< 0x0D40
+ u32 max_geometry_instances_per_task; ///< 0x0D60
+ VisibleCallLimit visible_call_limit; ///< 0x0D64
+ StatisticsCounter statistics_count; ///< 0x0D68
+ ClearRect clear_rect; ///< 0x0D6C
+ VertexBuffer vertex_buffer; ///< 0x0D74
+ DepthMode depth_mode; ///< 0x0D7C
+ std::array<f32, 4> clear_color; ///< 0x0D80
+ f32 clear_depth; ///< 0x0D90
+ u32 shader_cache_icache_prefetch; ///< 0x0D94
+ u32 force_transition_to_beta; ///< 0x0D98
+ u32 reduce_colour_thresholds; ///< 0x0D9C
+ s32 clear_stencil; ///< 0x0DA0
+ InvalidateShaderCacheNoWFI invalidate_shader_cache_no_wfi; ///< 0x0DA4
+ ZCullSerialization zcull_serialization; ///< 0x0DA8
+ PolygonMode polygon_mode_front; ///< 0x0DAC
+ PolygonMode polygon_mode_back; ///< 0x0DB0
+ u32 polygon_smooth; ///< 0x0DB4
+ u32 zeta_mark_clean_ieee; ///< 0x0DB8
+ ZCullDirFormat zcull_dir_format; ///< 0x0DBC
+ u32 polygon_offset_point_enable; ///< 0x0DC0
+ u32 polygon_offset_line_enable; ///< 0x0DC4
+ u32 polygon_offset_fill_enable; ///< 0x0DC8
+ u32 patch_vertices; ///< 0x0DCC
+ IteratedBlend iterated_blend; ///< 0x0DD0
+ ZCullCriterion zcull_criteria; ///< 0x0DD8
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 fragment_barrier; ///< 0x0DE0
+ u32 sm_timeout; ///< 0x0DE4
+ u32 primitive_restart_array; ///< 0x0DE8
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ LoadIteratedBlend load_iterated_blend; ///< 0x0DF0
+ u32 window_offset_x; ///< 0x0DF8
+ u32 window_offset_y; ///< 0x0DFC
+ std::array<ScissorTest, NumViewports> scissor_test; ///< 0x0E00
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ u32 select_texture_headers; ///< 0x0F10
+ VPCPerf vpc_perf; ///< 0x0F14
+ u32 pm_local_trigger; ///< 0x0F18
+ u32 post_z_pixel_imask; ///< 0x0F1C
+ INSERT_PADDING_BYTES_NOINIT(0x20);
+ ConstantColorRendering const_color_rendering; ///< 0x0F40
+ s32 stencil_back_ref; ///< 0x0F54
+ u32 stencil_back_mask; ///< 0x0F58
+ u32 stencil_back_func_mask; ///< 0x0F5C
+ INSERT_PADDING_BYTES_NOINIT(0x24);
+ VertexStreamSubstitute vertex_stream_substitute; ///< 0x0F84
+ u32 line_mode_clip_generated_edge_do_not_draw; ///< 0x0F8C
+ u32 color_mask_common; ///< 0x0F90
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ VTGWarpWatermarks vtg_warp_watermarks; ///< 0x0F98
+ f32 depth_bounds[2]; ///< 0x0F9C
+ SampleMask::Target sample_mask_target; ///< 0x0FA4
+ u32 color_target_mrt_enable; ///< 0x0FAC
+ NonMultisampledZ non_multisampled_z; ///< 0x0FB0
+ TIRMode tir_mode; ///< 0x0FB4
+ AntiAliasRaster anti_alias_raster; ///< 0x0FB8
+ SampleMask::Pos sample_mask_pos; ///< 0x0FBC
+ SurfaceClipIDMemory surface_clip_id_memory; ///< 0x0FCC
+ TIRModulation tir_modulation; ///< 0x0FD4
+ u32 blend_control_allow_float_pixel_kills; ///< 0x0FDC
+ Zeta zeta; ///< 0x0FE0
+ SurfaceClip surface_clip; ///< 0x0FF4
+ u32 tiled_cache_treat_heavy_as_light; ///< 0x0FFC
+ L2CacheVAFRequests l2_cache_vaf; ///< 0x1000
+ ViewportMulticast viewport_multicast; ///< 0x1004
+ u32 tessellation_cut_height; ///< 0x1008
+ u32 max_gs_instances_per_task; ///< 0x100C
+ u32 max_gs_output_vertices_per_task; ///< 0x1010
+ u32 reserved_sw_method0; ///< 0x1014
+ u32 gs_output_cb_storage_multiplier; ///< 0x1018
+ u32 beta_cb_storage_constant; ///< 0x101C
+ u32 ti_output_cb_storage_multiplier; ///< 0x1020
+ u32 alpha_cb_storage_constraint; ///< 0x1024
+ u32 reserved_sw_method1; ///< 0x1028
+ u32 reserved_sw_method2; ///< 0x102C
+ std::array<TIRModulationCoeff, 5> tir_modulation_coeff; ///< 0x1030
+ std::array<u32, 15> spare_nop; ///< 0x1044
+ INSERT_PADDING_BYTES_NOINIT(0x30);
+ std::array<u32, 7> reserved_sw_method3_to_7; ///< 0x10B0
+ ReduceColorThreshold reduce_color_thresholds_unorm8; ///< 0x10CC
+ std::array<u32, 4> reserved_sw_method10_to_13; ///< 0x10D0
+ ReduceColorThreshold reduce_color_thresholds_unorm10; ///< 0x10E0
+ ReduceColorThreshold reduce_color_thresholds_unorm16; ///< 0x10E4
+ ReduceColorThreshold reduce_color_thresholds_fp11; ///< 0x10E8
+ ReduceColorThreshold reduce_color_thresholds_fp16; ///< 0x10EC
+ ReduceColorThreshold reduce_color_thresholds_srgb8; ///< 0x10F0
+ u32 unbind_all_constant_buffers; ///< 0x10F4
+ ClearControl clear_control; ///< 0x10F8
+ L2CacheRopNonInterlockedReads l2_cache_rop_non_interlocked_reads; ///< 0x10FC
+ u32 reserved_sw_method14; ///< 0x1100
+ u32 reserved_sw_method15; ///< 0x1104
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 no_operation_data_high; ///< 0x110C
+ u32 depth_bias_control; ///< 0x1110
+ u32 pm_trigger_end; ///< 0x1114
+ u32 vertex_id_base; ///< 0x1118
+ u32 stencil_compression_enabled; ///< 0x111C
+ VertexOutputAttributeSkipMasks vertex_output_attribute_skip_masks; ///< 0x1120
+ TIRControl tir_control; ///< 0x1130
+ u32 mutable_method_treat_mutable_as_heavy; ///< 0x1134
+ u32 post_ps_use_pre_ps_coverage; ///< 0x1138
+ FillViaTriangleMode fill_via_triangle_mode; ///< 0x113C
+ u32 blend_per_format_snorm8_unorm16_snorm16_enabled; ///< 0x1140
+ u32 flush_pending_writes_sm_gloal_store; ///< 0x1144
+ INSERT_PADDING_BYTES_NOINIT(0x18);
+ std::array<VertexAttribute, NumVertexAttributes> vertex_attrib_format; ///< 0x1160
+ std::array<MsaaSampleLocation, 4> multisample_sample_locations; ///< 0x11E0
+ u32 offset_render_target_index_by_viewport_index; ///< 0x11F0
+ u32 force_heavy_method_sync; ///< 0x11F4
+ MultisampleCoverageToColor multisample_coverage_to_color; ///< 0x11F8
+ DecompressZetaSurface decompress_zeta_surface; ///< 0x11FC
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ ZetaSparse zeta_sparse; ///< 0x1208
+ u32 invalidate_sampler_cache; ///< 0x120C
+ u32 invalidate_texture_header_cache; ///< 0x1210
+ VertexArray vertex_array_instance_first; ///< 0x1214
+ VertexArray vertex_array_instance_subsequent; ///< 0x1218
+ RtControl rt_control; ///< 0x121C
+ CompressionThresholdSamples compression_threshold_samples; ///< 0x1220
+ PixelShaderInterlockControl ps_interlock_control; ///< 0x1224
+ ZetaSize zeta_size; ///< 0x1228
+ SamplerBinding sampler_binding; ///< 0x1234
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 draw_auto_byte_count; ///< 0x123C
+ std::array<u32, 8> post_vtg_shader_attrib_skip_mask; ///< 0x1240
+ PsTicketDispenserValue ps_ticket_dispenser_value; ///< 0x1260
+ INSERT_PADDING_BYTES_NOINIT(0x1C);
+ u32 circular_buffer_size; ///< 0x1280
+ RegisterWatermarks vtg_register_watermarks; ///< 0x1284
+ InvalidateTextureDataCacheNoWfi invalidate_texture_cache_no_wfi; ///< 0x1288
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ L2CacheRopNonInterlockedReads l2_cache_rop_interlocked_reads; ///< 0x1290
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ u32 primitive_restart_topology_change_index; ///< 0x12A4
+ INSERT_PADDING_BYTES_NOINIT(0x20);
+ ZCullRegionEnable zcull_region_enable; ///< 0x12C8
+ u32 depth_test_enable; ///< 0x12CC
+ FillMode fill_mode; ///< 0x12D0
+ ShadeMode shade_mode; ///< 0x12D4
+ L2CacheRopNonInterlockedReads l2_cache_rop_non_interlocked_writes; ///< 0x12D8
+ L2CacheRopNonInterlockedReads l2_cache_rop_interlocked_writes; ///< 0x12DC
+ AlphaToCoverageDither alpha_to_coverage_dither; ///< 0x12E0
+ u32 blend_per_target_enabled; ///< 0x12E4
+ u32 depth_write_enabled; ///< 0x12E8
+ u32 alpha_test_enabled; ///< 0x12EC
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ InlineIndex4x8 inline_index_4x8; ///< 0x1300
+ D3DCullMode d3d_cull_mode; ///< 0x1308
+ ComparisonOp depth_test_func; ///< 0x130C
+ f32 alpha_test_ref; ///< 0x1310
+ ComparisonOp alpha_test_func; ///< 0x1314
+ u32 draw_auto_stride; ///< 0x1318
+ BlendColor blend_color; ///< 0x131C
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ InvalidateCacheLines invalidate_sampler_cache_lines; ///< 0x1330
+ InvalidateCacheLines invalidate_texture_header_cache_lines; ///< 0x1334
+ InvalidateCacheLines invalidate_texture_data_cache_lines; ///< 0x1338
+ Blend blend; ///< 0x133C
+ u32 stencil_enable; ///< 0x1380
+ StencilOp stencil_front_op; ///< 0x1384
+ s32 stencil_front_ref; ///< 0x1394
+ s32 stencil_front_func_mask; ///< 0x1398
+ s32 stencil_front_mask; ///< 0x139C
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 draw_auto_start_byte_count; ///< 0x13A4
+ PsSaturate frag_color_clamp; ///< 0x13A8
+ WindowOrigin window_origin; ///< 0x13AC
+ f32 line_width_smooth; ///< 0x13B0
+ f32 line_width_aliased; ///< 0x13B4
+ INSERT_PADDING_BYTES_NOINIT(0x60);
+ u32 line_override_multisample; ///< 0x1418
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 alpha_hysteresis_rounds; ///< 0x1420
+ InvalidateCacheLines invalidate_sampler_cache_no_wfi; ///< 0x1424
+ InvalidateCacheLines invalidate_texture_header_cache_no_wfi; ///< 0x1428
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ u32 global_base_vertex_index; ///< 0x1434
+ u32 global_base_instance_index; ///< 0x1438
+ INSERT_PADDING_BYTES_NOINIT(0x14);
+ RegisterWatermarks ps_warp_watermarks; ///< 0x1450
+ RegisterWatermarks ps_regster_watermarks; ///< 0x1454
+ INSERT_PADDING_BYTES_NOINIT(0xC);
+ u32 store_zcull; ///< 0x1464
+ INSERT_PADDING_BYTES_NOINIT(0x18);
+ std::array<IteratedBlendConstants, NumRenderTargets>
+ iterated_blend_constants; ///< 0x1480
+ u32 load_zcull; ///< 0x1500
+ u32 surface_clip_id_height; ///< 0x1504
+ Window surface_clip_id_clear_rect; ///< 0x1508
+ UserClip::Enable user_clip_enable; ///< 0x1510
+ u32 zpass_pixel_count_enable; ///< 0x1514
+ f32 point_size; ///< 0x1518
+ u32 zcull_stats_enable; ///< 0x151C
+ u32 point_sprite_enable; ///< 0x1520
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 shader_exceptions_enable; ///< 0x1528
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ ClearReport clear_report_value; ///< 0x1530
+ u32 anti_alias_enable; ///< 0x1534
+ u32 zeta_enable; ///< 0x1538
+ AntiAliasAlphaControl anti_alias_alpha_control; ///< 0x153C
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ RenderEnable render_enable; ///< 0x1550
+ TexSampler tex_sampler; ///< 0x155C
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ f32 slope_scale_depth_bias; ///< 0x156C
+ u32 line_anti_alias_enable; ///< 0x1570
+ TexHeader tex_header; ///< 0x1574
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ u32 active_zcull_region_id; ///< 0x1590
+ u32 stencil_two_side_enable; ///< 0x1594
+ StencilOp stencil_back_op; ///< 0x1598
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ u32 framebuffer_srgb; ///< 0x15B8
+ f32 depth_bias; ///< 0x15BC
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ ZCullRegionFormat zcull_region_format; ///< 0x15C8
+ RtLayer rt_layer; ///< 0x15CC
+ Tegra::Texture::MsaaMode anti_alias_samples_mode; ///< 0x15D0
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ u32 edge_flag; ///< 0x15E4
+ u32 draw_inline_index; ///< 0x15E8
+ InlineIndex2x16 inline_index_2x16; ///< 0x15EC
+ VertexGlobalBaseOffset vertex_global_base_offset; ///< 0x15F4
+ ZCullRegionPixelOffset zcull_region_pixel_offset; ///< 0x15FC
+ PointSprite point_sprite; ///< 0x1604
+ ProgramRegion program_region; ///< 0x1608
+ DefaultAttributes default_attributes; ///< 0x1610
+ Draw draw; ///< 0x1614
+ VertexIdCopy vertex_id_copy; ///< 0x161C
+ u32 add_to_primitive_id; ///< 0x1620
+ u32 load_to_primitive_id; ///< 0x1624
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ ShaderBasedCull shader_based_cull; ///< 0x162C
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ ClassVersion class_version; ///< 0x1638
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ PrimitiveRestart primitive_restart; ///< 0x1644
+ OutputVertexId output_vertex_id; ///< 0x164C
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ u32 anti_alias_point_enable; ///< 0x1658
+ PointCenterMode point_center_mode; ///< 0x165C
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ LineSmoothParams line_smooth_params; ///< 0x1668
+ u32 line_stipple_enable; ///< 0x166C
+ std::array<LineSmoothEdgeTable, 4> line_smooth_edge_table; ///< 0x1670
+ LineStippleParams line_stipple_params; ///< 0x1680
+ ProvokingVertex provoking_vertex; ///< 0x1684
+ u32 two_sided_light_enabled; ///< 0x1688
+ u32 polygon_stipple_enabled; ///< 0x168C
+ ShaderControl shader_control; ///< 0x1690
+ INSERT_PADDING_BYTES_NOINIT(0xC);
+ ClassVersion class_version_check; ///< 0x16A0
+ SphVersion sph_version; ///< 0x16A4
+ SphVersion sph_version_check; ///< 0x16A8
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ AlphaToCoverageOverride alpha_to_coverage_override; ///< 0x16B4
+ INSERT_PADDING_BYTES_NOINIT(0x48);
+ std::array<u32, 32> polygon_stipple_pattern; ///< 0x1700
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ AamVersion aam_version; ///< 0x1790
+ AamVersion aam_version_check; ///< 0x1794
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 zeta_layer_offset; ///< 0x179C
+ INSERT_PADDING_BYTES_NOINIT(0x28);
+ IndexBuffer index_buffer; ///< 0x17C8
+ IndexBufferSmall index_buffer32_first; ///< 0x17E4
+ IndexBufferSmall index_buffer16_first; ///< 0x17E8
+ IndexBufferSmall index_buffer8_first; ///< 0x17EC
+ IndexBufferSmall index_buffer32_subsequent; ///< 0x17F0
+ IndexBufferSmall index_buffer16_subsequent; ///< 0x17F4
+ IndexBufferSmall index_buffer8_subsequent; ///< 0x17F8
+ INSERT_PADDING_BYTES_NOINIT(0x80);
+ f32 depth_bias_clamp; ///< 0x187C
+ VertexStreamInstances vertex_stream_instances; ///< 0x1880
+ INSERT_PADDING_BYTES_NOINIT(0x10);
+ AttributePointSize point_size_attribute; ///< 0x1910
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 gl_cull_test_enabled; ///< 0x1918
+ FrontFace gl_front_face; ///< 0x191C
+ CullFace gl_cull_face; ///< 0x1920
+ Viewport::PixelCenter viewport_pixel_center; ///< 0x1924
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 viewport_scale_offset_enbled; ///< 0x192C
+ INSERT_PADDING_BYTES_NOINIT(0xC);
+ ViewportClipControl viewport_clip_control; ///< 0x193C
+ UserClip::Op user_clip_op; ///< 0x1940
+ RenderEnable::Override render_enable_override; ///< 0x1944
+ PrimitiveTopologyControl primitive_topology_control; ///< 0x1948
+ WindowClip window_clip_enable; ///< 0x194C
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ InvalidateZCull invalidate_zcull; ///< 0x1958
+ INSERT_PADDING_BYTES_NOINIT(0xC);
+ ZCull zcull; ///< 0x1968
+ PrimitiveTopologyOverride topology_override; ///< 0x1970
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 zcull_sync; ///< 0x1978
+ u32 clip_id_test_enable; ///< 0x197C
+ u32 surface_clip_id_width; ///< 0x1980
+ u32 clip_id; ///< 0x1984
+ INSERT_PADDING_BYTES_NOINIT(0x34);
+ u32 depth_bounds_enable; ///< 0x19BC
+ u32 blend_float_zero_times_anything_is_zero; ///< 0x19C0
+ LogicOp logic_op; ///< 0x19C4
+ u32 z_compression_enable; ///< 0x19CC
+ ClearSurface clear_surface; ///< 0x19D0
+ u32 clear_clip_id_surface; ///< 0x19D4
+ INSERT_PADDING_BYTES_NOINIT(0x8);
+ std::array<u32, NumRenderTargets> color_compression_enable; ///< 0x19E0
+ std::array<ColorMask, NumRenderTargets> color_mask; ///< 0x1A00
+ INSERT_PADDING_BYTES_NOINIT(0xC);
+ u32 pipe_nop; ///< 0x1A2C
+ std::array<u32, 4> spare; ///< 0x1A30
+ INSERT_PADDING_BYTES_NOINIT(0xC0);
+ ReportSemaphore report_semaphore; ///< 0x1B00
+ INSERT_PADDING_BYTES_NOINIT(0xF0);
+ std::array<VertexStream, NumVertexArrays> vertex_streams; ///< 0x1C00
+ BlendPerTarget blend_per_target[NumRenderTargets]; ///< 0x1E00
+ std::array<VertexStreamLimit, NumVertexArrays> vertex_stream_limits; ///< 0x1F00
+ std::array<Pipeline, MaxShaderProgram> pipelines; ///< 0x2000
+ INSERT_PADDING_BYTES_NOINIT(0x180);
+ u32 falcon[32]; ///< 0x2300
+ ConstantBuffer const_buffer; ///< 0x2380
+ INSERT_PADDING_BYTES_NOINIT(0x30);
+ BindGroup bind_groups[MaxShaderStage]; ///< 0x2400
+ INSERT_PADDING_BYTES_NOINIT(0x160);
+ u32 color_clamp_enable; ///< 0x2600
+ INSERT_PADDING_BYTES_NOINIT(0x4);
+ u32 bindless_texture_const_buffer_slot; ///< 0x2608
+ u32 trap_handler; ///< 0x260C
+ INSERT_PADDING_BYTES_NOINIT(0x1F0);
+ std::array<std::array<StreamOutLayout, 32>, NumTransformFeedbackBuffers>
+ stream_out_layout; ///< 0x2800
+ INSERT_PADDING_BYTES_NOINIT(0x93C);
+ ShaderPerformance shader_performance; ///< 0x333C
+ INSERT_PADDING_BYTES_NOINIT(0x18);
+ std::array<u32, 0x100> shadow_scratch; ///< 0x3400
};
std::array<u32, NUM_REGS> reg_array;
};
};
+ // clang-format on
Regs regs{};
@@ -1438,8 +3044,6 @@ public:
};
std::array<ShaderStageInfo, Regs::MaxShaderStage> shader_stages;
-
- u32 current_instance = 0; ///< Current instance to be used to simulate instanced rendering.
};
State state{};
@@ -1454,11 +3058,6 @@ public:
void CallMultiMethod(u32 method, const u32* base_start, u32 amount,
u32 methods_pending) override;
- /// Write the value to the register identified by method.
- void CallMethodFromMME(u32 method, u32 method_argument);
-
- void FlushMMEInlineDraw();
-
bool ShouldExecute() const {
return execute_on;
}
@@ -1471,21 +3070,6 @@ public:
return *rasterizer;
}
- enum class MMEDrawMode : u32 {
- Undefined,
- Array,
- Indexed,
- };
-
- struct MMEDrawState {
- MMEDrawMode current_mode{MMEDrawMode::Undefined};
- u32 current_count{};
- u32 instance_count{};
- bool instance_mode{};
- bool gl_begin_consume{};
- u32 gl_end_count{};
- } mme_draw;
-
struct DirtyState {
using Flags = std::bitset<std::numeric_limits<u8>::max()>;
using Table = std::array<u8, Regs::NUM_REGS>;
@@ -1495,6 +3079,8 @@ public:
Tables tables{};
} dirty;
+ std::vector<u8> inline_index_draw_indexes;
+
private:
void InitializeRegisterDefaults();
@@ -1554,14 +3140,12 @@ private:
/// Handles a write to the CB_BIND register.
void ProcessCBBind(size_t stage_index);
- /// Handles a write to the VERTEX_END_GL register, triggering a draw.
- void DrawArrays();
-
/// Handles use of topology overrides (e.g., to avoid using a topology assigned from a macro)
void ProcessTopologyOverride();
- // Handles a instance drawcall from MME
- void StepInstance(MMEDrawMode expected_mode, u32 count);
+ void ProcessDraw(u32 instance_count = 1);
+
+ void ProcessDeferredDraw();
/// Returns a query's value or an empty object if the value will be deferred through a cache.
std::optional<u64> GetQueryResult();
@@ -1574,8 +3158,6 @@ private:
/// Start offsets of each macro in macro_memory
std::array<u32, 0x80> macro_positions{};
- std::array<bool, Regs::NUM_REGS> mme_inline{};
-
/// Macro method that is currently being executed / being fed parameters.
u32 executing_macro = 0;
/// Parameters that have been submitted to the macro call so far.
@@ -1588,150 +3170,355 @@ private:
bool execute_on{true};
bool use_topology_override{false};
+
+ std::array<bool, Regs::NUM_REGS> draw_command{};
+ std::vector<u32> deferred_draw_method;
};
#define ASSERT_REG_POSITION(field_name, position) \
- static_assert(offsetof(Maxwell3D::Regs, field_name) == position * 4, \
+ static_assert(offsetof(Maxwell3D::Regs, field_name) == position, \
"Field " #field_name " has invalid position")
-ASSERT_REG_POSITION(wait_for_idle, 0x44);
-ASSERT_REG_POSITION(macros, 0x45);
-ASSERT_REG_POSITION(shadow_ram_control, 0x49);
-ASSERT_REG_POSITION(upload, 0x60);
-ASSERT_REG_POSITION(exec_upload, 0x6C);
-ASSERT_REG_POSITION(data_upload, 0x6D);
-ASSERT_REG_POSITION(force_early_fragment_tests, 0x84);
-ASSERT_REG_POSITION(sync_info, 0xB2);
-ASSERT_REG_POSITION(tess_mode, 0xC8);
-ASSERT_REG_POSITION(tess_level_outer, 0xC9);
-ASSERT_REG_POSITION(tess_level_inner, 0xCD);
-ASSERT_REG_POSITION(rasterize_enable, 0xDF);
-ASSERT_REG_POSITION(tfb_bindings, 0xE0);
-ASSERT_REG_POSITION(tfb_layouts, 0x1C0);
-ASSERT_REG_POSITION(tfb_enabled, 0x1D1);
-ASSERT_REG_POSITION(rt, 0x200);
-ASSERT_REG_POSITION(viewport_transform, 0x280);
-ASSERT_REG_POSITION(viewports, 0x300);
-ASSERT_REG_POSITION(vertex_buffer, 0x35D);
-ASSERT_REG_POSITION(depth_mode, 0x35F);
-ASSERT_REG_POSITION(clear_color[0], 0x360);
-ASSERT_REG_POSITION(clear_depth, 0x364);
-ASSERT_REG_POSITION(clear_stencil, 0x368);
-ASSERT_REG_POSITION(polygon_mode_front, 0x36B);
-ASSERT_REG_POSITION(polygon_mode_back, 0x36C);
-ASSERT_REG_POSITION(polygon_offset_point_enable, 0x370);
-ASSERT_REG_POSITION(polygon_offset_line_enable, 0x371);
-ASSERT_REG_POSITION(polygon_offset_fill_enable, 0x372);
-ASSERT_REG_POSITION(patch_vertices, 0x373);
-ASSERT_REG_POSITION(fragment_barrier, 0x378);
-ASSERT_REG_POSITION(scissor_test, 0x380);
-ASSERT_REG_POSITION(stencil_back_func_ref, 0x3D5);
-ASSERT_REG_POSITION(stencil_back_mask, 0x3D6);
-ASSERT_REG_POSITION(stencil_back_func_mask, 0x3D7);
-ASSERT_REG_POSITION(invalidate_texture_data_cache, 0x3DD);
-ASSERT_REG_POSITION(tiled_cache_barrier, 0x3DF);
-ASSERT_REG_POSITION(color_mask_common, 0x3E4);
-ASSERT_REG_POSITION(depth_bounds, 0x3E7);
-ASSERT_REG_POSITION(rt_separate_frag_data, 0x3EB);
-ASSERT_REG_POSITION(multisample_raster_enable, 0x3ED);
-ASSERT_REG_POSITION(multisample_raster_samples, 0x3EE);
-ASSERT_REG_POSITION(multisample_sample_mask, 0x3EF);
-ASSERT_REG_POSITION(zeta, 0x3F8);
-ASSERT_REG_POSITION(render_area, 0x3FD);
-ASSERT_REG_POSITION(clear_flags, 0x43E);
-ASSERT_REG_POSITION(fill_rectangle, 0x44F);
-ASSERT_REG_POSITION(conservative_raster_enable, 0x452);
-ASSERT_REG_POSITION(vertex_attrib_format, 0x458);
-ASSERT_REG_POSITION(multisample_sample_locations, 0x478);
-ASSERT_REG_POSITION(multisample_coverage_to_color, 0x47E);
-ASSERT_REG_POSITION(rt_control, 0x487);
-ASSERT_REG_POSITION(zeta_width, 0x48a);
-ASSERT_REG_POSITION(zeta_height, 0x48b);
-ASSERT_REG_POSITION(zeta_depth, 0x48c);
-ASSERT_REG_POSITION(sampler_index, 0x48D);
-ASSERT_REG_POSITION(gp_passthrough_mask, 0x490);
-ASSERT_REG_POSITION(depth_test_enable, 0x4B3);
-ASSERT_REG_POSITION(independent_blend_enable, 0x4B9);
-ASSERT_REG_POSITION(depth_write_enabled, 0x4BA);
-ASSERT_REG_POSITION(alpha_test_enabled, 0x4BB);
-ASSERT_REG_POSITION(d3d_cull_mode, 0x4C2);
-ASSERT_REG_POSITION(depth_test_func, 0x4C3);
-ASSERT_REG_POSITION(alpha_test_ref, 0x4C4);
-ASSERT_REG_POSITION(alpha_test_func, 0x4C5);
-ASSERT_REG_POSITION(draw_tfb_stride, 0x4C6);
-ASSERT_REG_POSITION(blend_color, 0x4C7);
-ASSERT_REG_POSITION(blend, 0x4CF);
-ASSERT_REG_POSITION(stencil_enable, 0x4E0);
-ASSERT_REG_POSITION(stencil_front_op_fail, 0x4E1);
-ASSERT_REG_POSITION(stencil_front_op_zfail, 0x4E2);
-ASSERT_REG_POSITION(stencil_front_op_zpass, 0x4E3);
-ASSERT_REG_POSITION(stencil_front_func_func, 0x4E4);
-ASSERT_REG_POSITION(stencil_front_func_ref, 0x4E5);
-ASSERT_REG_POSITION(stencil_front_func_mask, 0x4E6);
-ASSERT_REG_POSITION(stencil_front_mask, 0x4E7);
-ASSERT_REG_POSITION(frag_color_clamp, 0x4EA);
-ASSERT_REG_POSITION(screen_y_control, 0x4EB);
-ASSERT_REG_POSITION(line_width_smooth, 0x4EC);
-ASSERT_REG_POSITION(line_width_aliased, 0x4ED);
-ASSERT_REG_POSITION(invalidate_sampler_cache_no_wfi, 0x509);
-ASSERT_REG_POSITION(invalidate_texture_header_cache_no_wfi, 0x50A);
-ASSERT_REG_POSITION(vb_element_base, 0x50D);
-ASSERT_REG_POSITION(vb_base_instance, 0x50E);
-ASSERT_REG_POSITION(clip_distance_enabled, 0x544);
-ASSERT_REG_POSITION(samplecnt_enable, 0x545);
-ASSERT_REG_POSITION(point_size, 0x546);
-ASSERT_REG_POSITION(point_sprite_enable, 0x548);
-ASSERT_REG_POSITION(counter_reset, 0x54C);
-ASSERT_REG_POSITION(multisample_enable, 0x54D);
-ASSERT_REG_POSITION(zeta_enable, 0x54E);
-ASSERT_REG_POSITION(multisample_control, 0x54F);
-ASSERT_REG_POSITION(condition, 0x554);
-ASSERT_REG_POSITION(tsc, 0x557);
-ASSERT_REG_POSITION(polygon_offset_factor, 0x55B);
-ASSERT_REG_POSITION(line_smooth_enable, 0x55C);
-ASSERT_REG_POSITION(tic, 0x55D);
-ASSERT_REG_POSITION(stencil_two_side_enable, 0x565);
-ASSERT_REG_POSITION(stencil_back_op_fail, 0x566);
-ASSERT_REG_POSITION(stencil_back_op_zfail, 0x567);
-ASSERT_REG_POSITION(stencil_back_op_zpass, 0x568);
-ASSERT_REG_POSITION(stencil_back_func_func, 0x569);
-ASSERT_REG_POSITION(framebuffer_srgb, 0x56E);
-ASSERT_REG_POSITION(polygon_offset_units, 0x56F);
-ASSERT_REG_POSITION(multisample_mode, 0x574);
-ASSERT_REG_POSITION(point_coord_replace, 0x581);
-ASSERT_REG_POSITION(code_address, 0x582);
-ASSERT_REG_POSITION(draw, 0x585);
-ASSERT_REG_POSITION(primitive_restart, 0x591);
-ASSERT_REG_POSITION(provoking_vertex_last, 0x5A1);
-ASSERT_REG_POSITION(index_array, 0x5F2);
-ASSERT_REG_POSITION(small_index, 0x5F9);
-ASSERT_REG_POSITION(polygon_offset_clamp, 0x61F);
-ASSERT_REG_POSITION(instanced_arrays, 0x620);
-ASSERT_REG_POSITION(vp_point_size, 0x644);
-ASSERT_REG_POSITION(cull_test_enabled, 0x646);
-ASSERT_REG_POSITION(front_face, 0x647);
-ASSERT_REG_POSITION(cull_face, 0x648);
-ASSERT_REG_POSITION(pixel_center_integer, 0x649);
-ASSERT_REG_POSITION(viewport_transform_enabled, 0x64B);
-ASSERT_REG_POSITION(view_volume_clip_control, 0x64F);
-ASSERT_REG_POSITION(topology_override, 0x65C);
-ASSERT_REG_POSITION(depth_bounds_enable, 0x66F);
-ASSERT_REG_POSITION(logic_op, 0x671);
-ASSERT_REG_POSITION(clear_buffers, 0x674);
-ASSERT_REG_POSITION(color_mask, 0x680);
-ASSERT_REG_POSITION(query, 0x6C0);
-ASSERT_REG_POSITION(vertex_array[0], 0x700);
-ASSERT_REG_POSITION(independent_blend, 0x780);
-ASSERT_REG_POSITION(vertex_array_limit[0], 0x7C0);
-ASSERT_REG_POSITION(shader_config[0], 0x800);
-ASSERT_REG_POSITION(firmware, 0x8C0);
-ASSERT_REG_POSITION(const_buffer, 0x8E0);
-ASSERT_REG_POSITION(cb_bind[0], 0x904);
-ASSERT_REG_POSITION(tex_cb_index, 0x982);
-ASSERT_REG_POSITION(tfb_varying_locs, 0xA00);
-ASSERT_REG_POSITION(ssbo_info, 0xD18);
-ASSERT_REG_POSITION(tex_info_buffers.address[0], 0xD2A);
-ASSERT_REG_POSITION(tex_info_buffers.size[0], 0xD2F);
+ASSERT_REG_POSITION(object_id, 0x0000);
+ASSERT_REG_POSITION(nop, 0x0100);
+ASSERT_REG_POSITION(notify, 0x0104);
+ASSERT_REG_POSITION(wait_for_idle, 0x0110);
+ASSERT_REG_POSITION(load_mme, 0x0114);
+ASSERT_REG_POSITION(shadow_ram_control, 0x0124);
+ASSERT_REG_POSITION(peer, 0x0128);
+ASSERT_REG_POSITION(global_render, 0x0130);
+ASSERT_REG_POSITION(go_idle, 0x013C);
+ASSERT_REG_POSITION(trigger, 0x0140);
+ASSERT_REG_POSITION(trigger_wfi, 0x0144);
+ASSERT_REG_POSITION(instrumentation_method_header, 0x0150);
+ASSERT_REG_POSITION(instrumentation_method_data, 0x0154);
+ASSERT_REG_POSITION(upload, 0x0180);
+ASSERT_REG_POSITION(launch_dma, 0x01B0);
+ASSERT_REG_POSITION(inline_data, 0x01B4);
+ASSERT_REG_POSITION(i2m, 0x01DC);
+ASSERT_REG_POSITION(run_ds_now, 0x0200);
+ASSERT_REG_POSITION(opportunistic_early_z, 0x0204);
+ASSERT_REG_POSITION(aliased_line_width_enabled, 0x020C);
+ASSERT_REG_POSITION(mandated_early_z, 0x0210);
+ASSERT_REG_POSITION(gs_dm_fifo, 0x0214);
+ASSERT_REG_POSITION(l2_cache_control, 0x0218);
+ASSERT_REG_POSITION(invalidate_shader_cache, 0x021C);
+ASSERT_REG_POSITION(sync_info, 0x02C8);
+ASSERT_REG_POSITION(prim_circular_buffer_throttle, 0x02D0);
+ASSERT_REG_POSITION(flush_invalidate_rop_mini_cache, 0x02D4);
+ASSERT_REG_POSITION(surface_clip_block_id, 0x02D8);
+ASSERT_REG_POSITION(alpha_circular_buffer_size, 0x02DC);
+ASSERT_REG_POSITION(decompress_surface, 0x02E0);
+ASSERT_REG_POSITION(zcull_rop_bypass, 0x02E4);
+ASSERT_REG_POSITION(zcull_subregion, 0x02E8);
+ASSERT_REG_POSITION(raster_bounding_box, 0x02EC);
+ASSERT_REG_POSITION(peer_semaphore_release, 0x02F0);
+ASSERT_REG_POSITION(iterated_blend_optimization, 0x02F4);
+ASSERT_REG_POSITION(zcull_subregion_allocation, 0x02F8);
+ASSERT_REG_POSITION(zcull_subregion_algorithm, 0x02FC);
+ASSERT_REG_POSITION(ps_output_sample_mask_usage, 0x0300);
+ASSERT_REG_POSITION(draw_zero_index, 0x0304);
+ASSERT_REG_POSITION(l1_configuration, 0x0308);
+ASSERT_REG_POSITION(render_enable_control_load_const_buffer, 0x030C);
+ASSERT_REG_POSITION(spa_version, 0x0310);
+ASSERT_REG_POSITION(ieee_clean_update, 0x0314);
+ASSERT_REG_POSITION(snap_grid, 0x0318);
+ASSERT_REG_POSITION(tessellation, 0x0320);
+ASSERT_REG_POSITION(sub_tiling_perf, 0x0360);
+ASSERT_REG_POSITION(zcull_subregion_report, 0x036C);
+ASSERT_REG_POSITION(balanced_primitive_workload, 0x0374);
+ASSERT_REG_POSITION(max_patches_per_batch, 0x0378);
+ASSERT_REG_POSITION(rasterize_enable, 0x037C);
+ASSERT_REG_POSITION(transform_feedback, 0x0380);
+ASSERT_REG_POSITION(transform_feedback.controls, 0x0700);
+ASSERT_REG_POSITION(raster_input, 0x0740);
+ASSERT_REG_POSITION(transform_feedback_enabled, 0x0744);
+ASSERT_REG_POSITION(primitive_restart_topology_change_enable, 0x0748);
+ASSERT_REG_POSITION(alpha_fraction, 0x074C);
+ASSERT_REG_POSITION(hybrid_aa_control, 0x0754);
+ASSERT_REG_POSITION(shader_local_memory, 0x077C);
+ASSERT_REG_POSITION(color_zero_bandwidth_clear, 0x07A4);
+ASSERT_REG_POSITION(z_zero_bandwidth_clear, 0x07A8);
+ASSERT_REG_POSITION(isbe_save_restore_program_offset, 0x07AC);
+ASSERT_REG_POSITION(zcull_region, 0x07C0);
+ASSERT_REG_POSITION(zeta_read_only, 0x07F8);
+ASSERT_REG_POSITION(rt, 0x0800);
+ASSERT_REG_POSITION(viewport_transform, 0x0A00);
+ASSERT_REG_POSITION(viewports, 0x0C00);
+ASSERT_REG_POSITION(windows, 0x0D00);
+ASSERT_REG_POSITION(clip_id_extent, 0x0D40);
+ASSERT_REG_POSITION(max_geometry_instances_per_task, 0x0D60);
+ASSERT_REG_POSITION(visible_call_limit, 0x0D64);
+ASSERT_REG_POSITION(statistics_count, 0x0D68);
+ASSERT_REG_POSITION(clear_rect, 0x0D6C);
+ASSERT_REG_POSITION(vertex_buffer, 0x0D74);
+ASSERT_REG_POSITION(depth_mode, 0x0D7C);
+ASSERT_REG_POSITION(clear_color, 0x0D80);
+ASSERT_REG_POSITION(clear_depth, 0x0D90);
+ASSERT_REG_POSITION(shader_cache_icache_prefetch, 0x0D94);
+ASSERT_REG_POSITION(force_transition_to_beta, 0x0D98);
+ASSERT_REG_POSITION(reduce_colour_thresholds, 0x0D9C);
+ASSERT_REG_POSITION(clear_stencil, 0x0DA0);
+ASSERT_REG_POSITION(invalidate_shader_cache_no_wfi, 0x0DA4);
+ASSERT_REG_POSITION(zcull_serialization, 0x0DA8);
+ASSERT_REG_POSITION(polygon_mode_front, 0x0DAC);
+ASSERT_REG_POSITION(polygon_mode_back, 0x0DB0);
+ASSERT_REG_POSITION(polygon_smooth, 0x0DB4);
+ASSERT_REG_POSITION(zeta_mark_clean_ieee, 0x0DB8);
+ASSERT_REG_POSITION(zcull_dir_format, 0x0DBC);
+ASSERT_REG_POSITION(polygon_offset_point_enable, 0x0DC0);
+ASSERT_REG_POSITION(polygon_offset_line_enable, 0x0DC4);
+ASSERT_REG_POSITION(polygon_offset_fill_enable, 0x0DC8);
+ASSERT_REG_POSITION(patch_vertices, 0x0DCC);
+ASSERT_REG_POSITION(iterated_blend, 0x0DD0);
+ASSERT_REG_POSITION(zcull_criteria, 0x0DD8);
+ASSERT_REG_POSITION(fragment_barrier, 0x0DE0);
+ASSERT_REG_POSITION(sm_timeout, 0x0DE4);
+ASSERT_REG_POSITION(primitive_restart_array, 0x0DE8);
+ASSERT_REG_POSITION(load_iterated_blend, 0x0DF0);
+ASSERT_REG_POSITION(window_offset_x, 0x0DF8);
+ASSERT_REG_POSITION(window_offset_y, 0x0DFC);
+ASSERT_REG_POSITION(scissor_test, 0x0E00);
+ASSERT_REG_POSITION(select_texture_headers, 0x0F10);
+ASSERT_REG_POSITION(vpc_perf, 0x0F14);
+ASSERT_REG_POSITION(pm_local_trigger, 0x0F18);
+ASSERT_REG_POSITION(post_z_pixel_imask, 0x0F1C);
+ASSERT_REG_POSITION(const_color_rendering, 0x0F40);
+ASSERT_REG_POSITION(stencil_back_ref, 0x0F54);
+ASSERT_REG_POSITION(stencil_back_mask, 0x0F58);
+ASSERT_REG_POSITION(stencil_back_func_mask, 0x0F5C);
+ASSERT_REG_POSITION(vertex_stream_substitute, 0x0F84);
+ASSERT_REG_POSITION(line_mode_clip_generated_edge_do_not_draw, 0x0F8C);
+ASSERT_REG_POSITION(color_mask_common, 0x0F90);
+ASSERT_REG_POSITION(vtg_warp_watermarks, 0x0F98);
+ASSERT_REG_POSITION(depth_bounds, 0x0F9C);
+ASSERT_REG_POSITION(sample_mask_target, 0x0FA4);
+ASSERT_REG_POSITION(color_target_mrt_enable, 0x0FAC);
+ASSERT_REG_POSITION(non_multisampled_z, 0x0FB0);
+ASSERT_REG_POSITION(tir_mode, 0x0FB4);
+ASSERT_REG_POSITION(anti_alias_raster, 0x0FB8);
+ASSERT_REG_POSITION(sample_mask_pos, 0x0FBC);
+ASSERT_REG_POSITION(surface_clip_id_memory, 0x0FCC);
+ASSERT_REG_POSITION(tir_modulation, 0x0FD4);
+ASSERT_REG_POSITION(blend_control_allow_float_pixel_kills, 0x0FDC);
+ASSERT_REG_POSITION(zeta, 0x0FE0);
+ASSERT_REG_POSITION(surface_clip, 0x0FF4);
+ASSERT_REG_POSITION(tiled_cache_treat_heavy_as_light, 0x0FFC);
+ASSERT_REG_POSITION(l2_cache_vaf, 0x1000);
+ASSERT_REG_POSITION(viewport_multicast, 0x1004);
+ASSERT_REG_POSITION(tessellation_cut_height, 0x1008);
+ASSERT_REG_POSITION(max_gs_instances_per_task, 0x100C);
+ASSERT_REG_POSITION(max_gs_output_vertices_per_task, 0x1010);
+ASSERT_REG_POSITION(reserved_sw_method0, 0x1014);
+ASSERT_REG_POSITION(gs_output_cb_storage_multiplier, 0x1018);
+ASSERT_REG_POSITION(beta_cb_storage_constant, 0x101C);
+ASSERT_REG_POSITION(ti_output_cb_storage_multiplier, 0x1020);
+ASSERT_REG_POSITION(alpha_cb_storage_constraint, 0x1024);
+ASSERT_REG_POSITION(reserved_sw_method1, 0x1028);
+ASSERT_REG_POSITION(reserved_sw_method2, 0x102C);
+ASSERT_REG_POSITION(tir_modulation_coeff, 0x1030);
+ASSERT_REG_POSITION(spare_nop, 0x1044);
+ASSERT_REG_POSITION(reserved_sw_method3_to_7, 0x10B0);
+ASSERT_REG_POSITION(reduce_color_thresholds_unorm8, 0x10CC);
+ASSERT_REG_POSITION(reserved_sw_method10_to_13, 0x10D0);
+ASSERT_REG_POSITION(reduce_color_thresholds_unorm10, 0x10E0);
+ASSERT_REG_POSITION(reduce_color_thresholds_unorm16, 0x10E4);
+ASSERT_REG_POSITION(reduce_color_thresholds_fp11, 0x10E8);
+ASSERT_REG_POSITION(reduce_color_thresholds_fp16, 0x10EC);
+ASSERT_REG_POSITION(reduce_color_thresholds_srgb8, 0x10F0);
+ASSERT_REG_POSITION(unbind_all_constant_buffers, 0x10F4);
+ASSERT_REG_POSITION(clear_control, 0x10F8);
+ASSERT_REG_POSITION(l2_cache_rop_non_interlocked_reads, 0x10FC);
+ASSERT_REG_POSITION(reserved_sw_method14, 0x1100);
+ASSERT_REG_POSITION(reserved_sw_method15, 0x1104);
+ASSERT_REG_POSITION(no_operation_data_high, 0x110C);
+ASSERT_REG_POSITION(depth_bias_control, 0x1110);
+ASSERT_REG_POSITION(pm_trigger_end, 0x1114);
+ASSERT_REG_POSITION(vertex_id_base, 0x1118);
+ASSERT_REG_POSITION(stencil_compression_enabled, 0x111C);
+ASSERT_REG_POSITION(vertex_output_attribute_skip_masks, 0x1120);
+ASSERT_REG_POSITION(tir_control, 0x1130);
+ASSERT_REG_POSITION(mutable_method_treat_mutable_as_heavy, 0x1134);
+ASSERT_REG_POSITION(post_ps_use_pre_ps_coverage, 0x1138);
+ASSERT_REG_POSITION(fill_via_triangle_mode, 0x113C);
+ASSERT_REG_POSITION(blend_per_format_snorm8_unorm16_snorm16_enabled, 0x1140);
+ASSERT_REG_POSITION(flush_pending_writes_sm_gloal_store, 0x1144);
+ASSERT_REG_POSITION(vertex_attrib_format, 0x1160);
+ASSERT_REG_POSITION(multisample_sample_locations, 0x11E0);
+ASSERT_REG_POSITION(offset_render_target_index_by_viewport_index, 0x11F0);
+ASSERT_REG_POSITION(force_heavy_method_sync, 0x11F4);
+ASSERT_REG_POSITION(multisample_coverage_to_color, 0x11F8);
+ASSERT_REG_POSITION(decompress_zeta_surface, 0x11FC);
+ASSERT_REG_POSITION(zeta_sparse, 0x1208);
+ASSERT_REG_POSITION(invalidate_sampler_cache, 0x120C);
+ASSERT_REG_POSITION(invalidate_texture_header_cache, 0x1210);
+ASSERT_REG_POSITION(vertex_array_instance_first, 0x1214);
+ASSERT_REG_POSITION(vertex_array_instance_subsequent, 0x1218);
+ASSERT_REG_POSITION(rt_control, 0x121C);
+ASSERT_REG_POSITION(compression_threshold_samples, 0x1220);
+ASSERT_REG_POSITION(ps_interlock_control, 0x1224);
+ASSERT_REG_POSITION(zeta_size, 0x1228);
+ASSERT_REG_POSITION(sampler_binding, 0x1234);
+ASSERT_REG_POSITION(draw_auto_byte_count, 0x123C);
+ASSERT_REG_POSITION(post_vtg_shader_attrib_skip_mask, 0x1240);
+ASSERT_REG_POSITION(ps_ticket_dispenser_value, 0x1260);
+ASSERT_REG_POSITION(circular_buffer_size, 0x1280);
+ASSERT_REG_POSITION(vtg_register_watermarks, 0x1284);
+ASSERT_REG_POSITION(invalidate_texture_cache_no_wfi, 0x1288);
+ASSERT_REG_POSITION(l2_cache_rop_interlocked_reads, 0x1290);
+ASSERT_REG_POSITION(primitive_restart_topology_change_index, 0x12A4);
+ASSERT_REG_POSITION(zcull_region_enable, 0x12C8);
+ASSERT_REG_POSITION(depth_test_enable, 0x12CC);
+ASSERT_REG_POSITION(fill_mode, 0x12D0);
+ASSERT_REG_POSITION(shade_mode, 0x12D4);
+ASSERT_REG_POSITION(l2_cache_rop_non_interlocked_writes, 0x12D8);
+ASSERT_REG_POSITION(l2_cache_rop_interlocked_writes, 0x12DC);
+ASSERT_REG_POSITION(alpha_to_coverage_dither, 0x12E0);
+ASSERT_REG_POSITION(blend_per_target_enabled, 0x12E4);
+ASSERT_REG_POSITION(depth_write_enabled, 0x12E8);
+ASSERT_REG_POSITION(alpha_test_enabled, 0x12EC);
+ASSERT_REG_POSITION(inline_index_4x8, 0x1300);
+ASSERT_REG_POSITION(d3d_cull_mode, 0x1308);
+ASSERT_REG_POSITION(depth_test_func, 0x130C);
+ASSERT_REG_POSITION(alpha_test_ref, 0x1310);
+ASSERT_REG_POSITION(alpha_test_func, 0x1314);
+ASSERT_REG_POSITION(draw_auto_stride, 0x1318);
+ASSERT_REG_POSITION(blend_color, 0x131C);
+ASSERT_REG_POSITION(invalidate_sampler_cache_lines, 0x1330);
+ASSERT_REG_POSITION(invalidate_texture_header_cache_lines, 0x1334);
+ASSERT_REG_POSITION(invalidate_texture_data_cache_lines, 0x1338);
+ASSERT_REG_POSITION(blend, 0x133C);
+ASSERT_REG_POSITION(stencil_enable, 0x1380);
+ASSERT_REG_POSITION(stencil_front_op, 0x1384);
+ASSERT_REG_POSITION(stencil_front_ref, 0x1394);
+ASSERT_REG_POSITION(stencil_front_func_mask, 0x1398);
+ASSERT_REG_POSITION(stencil_front_mask, 0x139C);
+ASSERT_REG_POSITION(draw_auto_start_byte_count, 0x13A4);
+ASSERT_REG_POSITION(frag_color_clamp, 0x13A8);
+ASSERT_REG_POSITION(window_origin, 0x13AC);
+ASSERT_REG_POSITION(line_width_smooth, 0x13B0);
+ASSERT_REG_POSITION(line_width_aliased, 0x13B4);
+ASSERT_REG_POSITION(line_override_multisample, 0x1418);
+ASSERT_REG_POSITION(alpha_hysteresis_rounds, 0x1420);
+ASSERT_REG_POSITION(invalidate_sampler_cache_no_wfi, 0x1424);
+ASSERT_REG_POSITION(invalidate_texture_header_cache_no_wfi, 0x1428);
+ASSERT_REG_POSITION(global_base_vertex_index, 0x1434);
+ASSERT_REG_POSITION(global_base_instance_index, 0x1438);
+ASSERT_REG_POSITION(ps_warp_watermarks, 0x1450);
+ASSERT_REG_POSITION(ps_regster_watermarks, 0x1454);
+ASSERT_REG_POSITION(store_zcull, 0x1464);
+ASSERT_REG_POSITION(iterated_blend_constants, 0x1480);
+ASSERT_REG_POSITION(load_zcull, 0x1500);
+ASSERT_REG_POSITION(surface_clip_id_height, 0x1504);
+ASSERT_REG_POSITION(surface_clip_id_clear_rect, 0x1508);
+ASSERT_REG_POSITION(user_clip_enable, 0x1510);
+ASSERT_REG_POSITION(zpass_pixel_count_enable, 0x1514);
+ASSERT_REG_POSITION(point_size, 0x1518);
+ASSERT_REG_POSITION(zcull_stats_enable, 0x151C);
+ASSERT_REG_POSITION(point_sprite_enable, 0x1520);
+ASSERT_REG_POSITION(shader_exceptions_enable, 0x1528);
+ASSERT_REG_POSITION(clear_report_value, 0x1530);
+ASSERT_REG_POSITION(anti_alias_enable, 0x1534);
+ASSERT_REG_POSITION(zeta_enable, 0x1538);
+ASSERT_REG_POSITION(anti_alias_alpha_control, 0x153C);
+ASSERT_REG_POSITION(render_enable, 0x1550);
+ASSERT_REG_POSITION(tex_sampler, 0x155C);
+ASSERT_REG_POSITION(slope_scale_depth_bias, 0x156C);
+ASSERT_REG_POSITION(line_anti_alias_enable, 0x1570);
+ASSERT_REG_POSITION(tex_header, 0x1574);
+ASSERT_REG_POSITION(active_zcull_region_id, 0x1590);
+ASSERT_REG_POSITION(stencil_two_side_enable, 0x1594);
+ASSERT_REG_POSITION(stencil_back_op, 0x1598);
+ASSERT_REG_POSITION(framebuffer_srgb, 0x15B8);
+ASSERT_REG_POSITION(depth_bias, 0x15BC);
+ASSERT_REG_POSITION(zcull_region_format, 0x15C8);
+ASSERT_REG_POSITION(rt_layer, 0x15CC);
+ASSERT_REG_POSITION(anti_alias_samples_mode, 0x15D0);
+ASSERT_REG_POSITION(edge_flag, 0x15E4);
+ASSERT_REG_POSITION(draw_inline_index, 0x15E8);
+ASSERT_REG_POSITION(inline_index_2x16, 0x15EC);
+ASSERT_REG_POSITION(vertex_global_base_offset, 0x15F4);
+ASSERT_REG_POSITION(zcull_region_pixel_offset, 0x15FC);
+ASSERT_REG_POSITION(point_sprite, 0x1604);
+ASSERT_REG_POSITION(program_region, 0x1608);
+ASSERT_REG_POSITION(default_attributes, 0x1610);
+ASSERT_REG_POSITION(draw, 0x1614);
+ASSERT_REG_POSITION(vertex_id_copy, 0x161C);
+ASSERT_REG_POSITION(add_to_primitive_id, 0x1620);
+ASSERT_REG_POSITION(load_to_primitive_id, 0x1624);
+ASSERT_REG_POSITION(shader_based_cull, 0x162C);
+ASSERT_REG_POSITION(class_version, 0x1638);
+ASSERT_REG_POSITION(primitive_restart, 0x1644);
+ASSERT_REG_POSITION(output_vertex_id, 0x164C);
+ASSERT_REG_POSITION(anti_alias_point_enable, 0x1658);
+ASSERT_REG_POSITION(point_center_mode, 0x165C);
+ASSERT_REG_POSITION(line_smooth_params, 0x1668);
+ASSERT_REG_POSITION(line_stipple_enable, 0x166C);
+ASSERT_REG_POSITION(line_smooth_edge_table, 0x1670);
+ASSERT_REG_POSITION(line_stipple_params, 0x1680);
+ASSERT_REG_POSITION(provoking_vertex, 0x1684);
+ASSERT_REG_POSITION(two_sided_light_enabled, 0x1688);
+ASSERT_REG_POSITION(polygon_stipple_enabled, 0x168C);
+ASSERT_REG_POSITION(shader_control, 0x1690);
+ASSERT_REG_POSITION(class_version_check, 0x16A0);
+ASSERT_REG_POSITION(sph_version, 0x16A4);
+ASSERT_REG_POSITION(sph_version_check, 0x16A8);
+ASSERT_REG_POSITION(alpha_to_coverage_override, 0x16B4);
+ASSERT_REG_POSITION(polygon_stipple_pattern, 0x1700);
+ASSERT_REG_POSITION(aam_version, 0x1790);
+ASSERT_REG_POSITION(aam_version_check, 0x1794);
+ASSERT_REG_POSITION(zeta_layer_offset, 0x179C);
+ASSERT_REG_POSITION(index_buffer, 0x17C8);
+ASSERT_REG_POSITION(index_buffer32_first, 0x17E4);
+ASSERT_REG_POSITION(index_buffer16_first, 0x17E8);
+ASSERT_REG_POSITION(index_buffer8_first, 0x17EC);
+ASSERT_REG_POSITION(index_buffer32_subsequent, 0x17F0);
+ASSERT_REG_POSITION(index_buffer16_subsequent, 0x17F4);
+ASSERT_REG_POSITION(index_buffer8_subsequent, 0x17F8);
+ASSERT_REG_POSITION(depth_bias_clamp, 0x187C);
+ASSERT_REG_POSITION(vertex_stream_instances, 0x1880);
+ASSERT_REG_POSITION(point_size_attribute, 0x1910);
+ASSERT_REG_POSITION(gl_cull_test_enabled, 0x1918);
+ASSERT_REG_POSITION(gl_front_face, 0x191C);
+ASSERT_REG_POSITION(gl_cull_face, 0x1920);
+ASSERT_REG_POSITION(viewport_pixel_center, 0x1924);
+ASSERT_REG_POSITION(viewport_scale_offset_enbled, 0x192C);
+ASSERT_REG_POSITION(viewport_clip_control, 0x193C);
+ASSERT_REG_POSITION(user_clip_op, 0x1940);
+ASSERT_REG_POSITION(render_enable_override, 0x1944);
+ASSERT_REG_POSITION(primitive_topology_control, 0x1948);
+ASSERT_REG_POSITION(window_clip_enable, 0x194C);
+ASSERT_REG_POSITION(invalidate_zcull, 0x1958);
+ASSERT_REG_POSITION(zcull, 0x1968);
+ASSERT_REG_POSITION(topology_override, 0x1970);
+ASSERT_REG_POSITION(zcull_sync, 0x1978);
+ASSERT_REG_POSITION(clip_id_test_enable, 0x197C);
+ASSERT_REG_POSITION(surface_clip_id_width, 0x1980);
+ASSERT_REG_POSITION(clip_id, 0x1984);
+ASSERT_REG_POSITION(depth_bounds_enable, 0x19BC);
+ASSERT_REG_POSITION(blend_float_zero_times_anything_is_zero, 0x19C0);
+ASSERT_REG_POSITION(logic_op, 0x19C4);
+ASSERT_REG_POSITION(z_compression_enable, 0x19CC);
+ASSERT_REG_POSITION(clear_surface, 0x19D0);
+ASSERT_REG_POSITION(clear_clip_id_surface, 0x19D4);
+ASSERT_REG_POSITION(color_compression_enable, 0x19E0);
+ASSERT_REG_POSITION(color_mask, 0x1A00);
+ASSERT_REG_POSITION(pipe_nop, 0x1A2C);
+ASSERT_REG_POSITION(spare, 0x1A30);
+ASSERT_REG_POSITION(report_semaphore, 0x1B00);
+ASSERT_REG_POSITION(vertex_streams, 0x1C00);
+ASSERT_REG_POSITION(blend_per_target, 0x1E00);
+ASSERT_REG_POSITION(vertex_stream_limits, 0x1F00);
+ASSERT_REG_POSITION(pipelines, 0x2000);
+ASSERT_REG_POSITION(falcon, 0x2300);
+ASSERT_REG_POSITION(const_buffer, 0x2380);
+ASSERT_REG_POSITION(bind_groups, 0x2400);
+ASSERT_REG_POSITION(color_clamp_enable, 0x2600);
+ASSERT_REG_POSITION(bindless_texture_const_buffer_slot, 0x2608);
+ASSERT_REG_POSITION(trap_handler, 0x260C);
+ASSERT_REG_POSITION(stream_out_layout, 0x2800);
+ASSERT_REG_POSITION(shader_performance, 0x333C);
+ASSERT_REG_POSITION(shadow_scratch, 0x3400);
#undef ASSERT_REG_POSITION
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 0efe58282..4eb7a100d 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -1,6 +1,7 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "common/algorithm.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
@@ -54,90 +55,124 @@ void MaxwellDMA::Launch() {
const LaunchDMA& launch = regs.launch_dma;
ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
- ASSERT(regs.dst_params.origin.x == 0);
- ASSERT(regs.dst_params.origin.y == 0);
- const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
- const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
+ if (launch.multi_line_enable) {
+ const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
+ const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
- if (!is_src_pitch && !is_dst_pitch) {
- // If both the source and the destination are in block layout, assert.
- UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
- return;
- }
+ if (!is_src_pitch && !is_dst_pitch) {
+ // If both the source and the destination are in block layout, assert.
+ UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
+ return;
+ }
- if (is_src_pitch && is_dst_pitch) {
- CopyPitchToPitch();
+ if (is_src_pitch && is_dst_pitch) {
+ for (u32 line = 0; line < regs.line_count; ++line) {
+ const GPUVAddr source_line =
+ regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
+ const GPUVAddr dest_line =
+ regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
+ memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
+ }
+ } else {
+ if (!is_src_pitch && is_dst_pitch) {
+ CopyBlockLinearToPitch();
+ } else {
+ CopyPitchToBlockLinear();
+ }
+ }
} else {
- ASSERT(launch.multi_line_enable == 1);
-
- if (!is_src_pitch && is_dst_pitch) {
- CopyBlockLinearToPitch();
+ // TODO: allow multisized components.
+ auto& accelerate = rasterizer->AccessAccelerateDMA();
+ const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
+ if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) {
+ ASSERT(regs.remap_const.component_size_minus_one == 3);
+ accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
+ std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
+ memory_manager.WriteBlockUnsafe(regs.offset_out,
+ reinterpret_cast<u8*>(tmp_buffer.data()),
+ regs.line_length_in * sizeof(u32));
} else {
- CopyPitchToBlockLinear();
+ auto convert_linear_2_blocklinear_addr = [](u64 address) {
+ return (address & ~0x1f0ULL) | ((address & 0x40) >> 2) | ((address & 0x10) << 1) |
+ ((address & 0x180) >> 1) | ((address & 0x20) << 3);
+ };
+ auto src_kind = memory_manager.GetPageKind(regs.offset_in);
+ auto dst_kind = memory_manager.GetPageKind(regs.offset_out);
+ const bool is_src_pitch = IsPitchKind(static_cast<PTEKind>(src_kind));
+ const bool is_dst_pitch = IsPitchKind(static_cast<PTEKind>(dst_kind));
+ if (!is_src_pitch && is_dst_pitch) {
+ std::vector<u8> tmp_buffer(regs.line_length_in);
+ std::vector<u8> dst_buffer(regs.line_length_in);
+ memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
+ regs.line_length_in);
+ for (u32 offset = 0; offset < regs.line_length_in; ++offset) {
+ dst_buffer[offset] =
+ tmp_buffer[convert_linear_2_blocklinear_addr(regs.offset_in + offset) -
+ regs.offset_in];
+ }
+ memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in);
+ } else if (is_src_pitch && !is_dst_pitch) {
+ std::vector<u8> tmp_buffer(regs.line_length_in);
+ std::vector<u8> dst_buffer(regs.line_length_in);
+ memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
+ regs.line_length_in);
+ for (u32 offset = 0; offset < regs.line_length_in; ++offset) {
+ dst_buffer[convert_linear_2_blocklinear_addr(regs.offset_out + offset) -
+ regs.offset_out] = tmp_buffer[offset];
+ }
+ memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in);
+ } else {
+ if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
+ std::vector<u8> tmp_buffer(regs.line_length_in);
+ memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
+ regs.line_length_in);
+ memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(),
+ regs.line_length_in);
+ }
+ }
}
}
- ReleaseSemaphore();
-}
-void MaxwellDMA::CopyPitchToPitch() {
- // When `multi_line_enable` bit is enabled we copy a 2D image of dimensions
- // (line_length_in, line_count).
- // Otherwise the copy is performed as if we were copying a 1D buffer of length line_length_in.
- const bool remap_enabled = regs.launch_dma.remap_enable != 0;
- if (regs.launch_dma.multi_line_enable) {
- UNIMPLEMENTED_IF(remap_enabled);
-
- // Perform a line-by-line copy.
- // We're going to take a subrect of size (line_length_in, line_count) from the source
- // rectangle. There is no need to manually flush/invalidate the regions because CopyBlock
- // does that for us.
- for (u32 line = 0; line < regs.line_count; ++line) {
- const GPUVAddr source_line = regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
- const GPUVAddr dest_line = regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
- memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
- }
- return;
- }
- // TODO: allow multisized components.
- auto& accelerate = rasterizer->AccessAccelerateDMA();
- const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
- const bool is_buffer_clear = remap_enabled && is_const_a_dst;
- if (is_buffer_clear) {
- ASSERT(regs.remap_const.component_size_minus_one == 3);
- accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
- std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
- memory_manager.WriteBlockUnsafe(regs.offset_out, reinterpret_cast<u8*>(tmp_buffer.data()),
- regs.line_length_in * sizeof(u32));
- return;
- }
- UNIMPLEMENTED_IF(remap_enabled);
- if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
- std::vector<u8> tmp_buffer(regs.line_length_in);
- memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), regs.line_length_in);
- memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(), regs.line_length_in);
- }
+ ReleaseSemaphore();
}
void MaxwellDMA::CopyBlockLinearToPitch() {
UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0);
- UNIMPLEMENTED_IF(regs.src_params.block_size.depth != 0);
UNIMPLEMENTED_IF(regs.src_params.layer != 0);
+ const bool is_remapping = regs.launch_dma.remap_enable != 0;
+
// Optimized path for micro copies.
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
- if (dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X &&
+ if (!is_remapping && dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X &&
regs.src_params.height > GOB_SIZE_Y) {
FastCopyBlockLinearToPitch();
return;
}
// Deswizzle the input and copy it over.
- UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
- const u32 bytes_per_pixel =
- regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1;
const Parameters& src_params = regs.src_params;
- const u32 width = src_params.width;
+
+ const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
+ const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
+
+ const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
+
+ u32 width = src_params.width;
+ u32 x_elements = regs.line_length_in;
+ u32 x_offset = src_params.origin.x;
+ u32 bpp_shift = 0U;
+ if (!is_remapping) {
+ bpp_shift = Common::FoldRight(
+ 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
+ width, x_elements, x_offset, static_cast<u32>(regs.offset_in));
+ width >>= bpp_shift;
+ x_elements >>= bpp_shift;
+ x_offset >>= bpp_shift;
+ }
+
+ const u32 bytes_per_pixel = base_bpp << bpp_shift;
const u32 height = src_params.height;
const u32 depth = src_params.depth;
const u32 block_height = src_params.block_size.height;
@@ -155,30 +190,45 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
- UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, width, bytes_per_pixel,
- block_height, src_params.origin.x, src_params.origin.y, write_buffer.data(),
- read_buffer.data());
+ UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
+ src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
+ regs.pitch_out);
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
}
void MaxwellDMA::CopyPitchToBlockLinear() {
UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one");
- UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
+ UNIMPLEMENTED_IF(regs.dst_params.layer != 0);
+
+ const bool is_remapping = regs.launch_dma.remap_enable != 0;
+ const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
+ const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
const auto& dst_params = regs.dst_params;
- const u32 bytes_per_pixel =
- regs.launch_dma.remap_enable ? regs.pitch_in / regs.line_length_in : 1;
- const u32 width = dst_params.width;
+
+ const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
+
+ u32 width = dst_params.width;
+ u32 x_elements = regs.line_length_in;
+ u32 x_offset = dst_params.origin.x;
+ u32 bpp_shift = 0U;
+ if (!is_remapping) {
+ bpp_shift = Common::FoldRight(
+ 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
+ width, x_elements, x_offset, static_cast<u32>(regs.offset_out));
+ width >>= bpp_shift;
+ x_elements >>= bpp_shift;
+ x_offset >>= bpp_shift;
+ }
+
+ const u32 bytes_per_pixel = base_bpp << bpp_shift;
const u32 height = dst_params.height;
const u32 depth = dst_params.depth;
const u32 block_height = dst_params.block_size.height;
const u32 block_depth = dst_params.block_size.depth;
const size_t dst_size =
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
- const size_t dst_layer_size =
- CalculateSize(true, bytes_per_pixel, width, height, 1, block_height, block_depth);
-
const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
if (read_buffer.size() < src_size) {
@@ -188,32 +238,23 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
write_buffer.resize(dst_size);
}
+ memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
if (Settings::IsGPULevelExtreme()) {
- memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
} else {
- memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size);
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
}
// If the input is linear and the output is tiled, swizzle the input and copy it over.
- if (regs.dst_params.block_size.depth > 0) {
- ASSERT(dst_params.layer == 0);
- SwizzleSliceToVoxel(regs.line_length_in, regs.line_count, regs.pitch_in, width, height,
- bytes_per_pixel, block_height, block_depth, dst_params.origin.x,
- dst_params.origin.y, write_buffer.data(), read_buffer.data());
- } else {
- SwizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_in, width, bytes_per_pixel,
- write_buffer.data() + dst_layer_size * dst_params.layer, read_buffer.data(),
- block_height, dst_params.origin.x, dst_params.origin.y);
- }
+ SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
+ dst_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
+ regs.pitch_in);
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
}
void MaxwellDMA::FastCopyBlockLinearToPitch() {
- const u32 bytes_per_pixel =
- regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1;
+ const u32 bytes_per_pixel = 1U;
const size_t src_size = GOB_SIZE;
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
u32 pos_x = regs.src_params.origin.x;
@@ -239,9 +280,10 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
}
- UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, regs.src_params.width,
- bytes_per_pixel, regs.src_params.block_size.height, pos_x, pos_y,
- write_buffer.data(), read_buffer.data());
+ UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, regs.src_params.width,
+ regs.src_params.height, 1, pos_x, pos_y, regs.line_length_in, regs.line_count,
+ regs.src_params.block_size.height, regs.src_params.block_size.depth,
+ regs.pitch_out);
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
}
@@ -249,16 +291,24 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
void MaxwellDMA::ReleaseSemaphore() {
const auto type = regs.launch_dma.semaphore_type;
const GPUVAddr address = regs.semaphore.address;
+ const u32 payload = regs.semaphore.payload;
switch (type) {
case LaunchDMA::SemaphoreType::NONE:
break;
- case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE:
- memory_manager.Write<u32>(address, regs.semaphore.payload);
+ case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: {
+ std::function<void()> operation(
+ [this, address, payload] { memory_manager.Write<u32>(address, payload); });
+ rasterizer->SignalFence(std::move(operation));
break;
- case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE:
- memory_manager.Write<u64>(address, static_cast<u64>(regs.semaphore.payload));
- memory_manager.Write<u64>(address + 8, system.GPU().GetTicks());
+ }
+ case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: {
+ std::function<void()> operation([this, address, payload] {
+ memory_manager.Write<u64>(address + sizeof(u64), system.GPU().GetTicks());
+ memory_manager.Write<u64>(address, payload);
+ });
+ rasterizer->SignalFence(std::move(operation));
break;
+ }
default:
ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value()));
}
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 074bac92c..953e34adc 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -189,10 +189,16 @@ public:
BitField<4, 3, Swizzle> dst_y;
BitField<8, 3, Swizzle> dst_z;
BitField<12, 3, Swizzle> dst_w;
+ BitField<0, 12, u32> dst_components_raw;
BitField<16, 2, u32> component_size_minus_one;
BitField<20, 2, u32> num_src_components_minus_one;
BitField<24, 2, u32> num_dst_components_minus_one;
};
+
+ Swizzle GetComponent(size_t i) const {
+ const u32 raw = dst_components_raw;
+ return static_cast<Swizzle>((raw >> (i * 3)) & 0x7);
+ }
};
static_assert(sizeof(RemapConst) == 12);
@@ -213,8 +219,6 @@ private:
/// registers.
void Launch();
- void CopyPitchToPitch();
-
void CopyBlockLinearToPitch();
void CopyPitchToBlockLinear();
diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp
new file mode 100644
index 000000000..3977bb0fb
--- /dev/null
+++ b/src/video_core/engines/puller.cpp
@@ -0,0 +1,305 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/assert.h"
+#include "common/logging/log.h"
+#include "common/settings.h"
+#include "core/core.h"
+#include "video_core/control/channel_state.h"
+#include "video_core/dma_pusher.h"
+#include "video_core/engines/fermi_2d.h"
+#include "video_core/engines/kepler_compute.h"
+#include "video_core/engines/kepler_memory.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/engines/maxwell_dma.h"
+#include "video_core/engines/puller.h"
+#include "video_core/gpu.h"
+#include "video_core/memory_manager.h"
+#include "video_core/rasterizer_interface.h"
+
+namespace Tegra::Engines {
+
+Puller::Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher_,
+ Control::ChannelState& channel_state_)
+ : gpu{gpu_}, memory_manager{memory_manager_}, dma_pusher{dma_pusher_}, channel_state{
+ channel_state_} {}
+
+Puller::~Puller() = default;
+
+void Puller::ProcessBindMethod(const MethodCall& method_call) {
+ // Bind the current subchannel to the desired engine id.
+ LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
+ method_call.argument);
+ const auto engine_id = static_cast<EngineID>(method_call.argument);
+ bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
+ switch (engine_id) {
+ case EngineID::FERMI_TWOD_A:
+ dma_pusher.BindSubchannel(channel_state.fermi_2d.get(), method_call.subchannel);
+ break;
+ case EngineID::MAXWELL_B:
+ dma_pusher.BindSubchannel(channel_state.maxwell_3d.get(), method_call.subchannel);
+ break;
+ case EngineID::KEPLER_COMPUTE_B:
+ dma_pusher.BindSubchannel(channel_state.kepler_compute.get(), method_call.subchannel);
+ break;
+ case EngineID::MAXWELL_DMA_COPY_A:
+ dma_pusher.BindSubchannel(channel_state.maxwell_dma.get(), method_call.subchannel);
+ break;
+ case EngineID::KEPLER_INLINE_TO_MEMORY_B:
+ dma_pusher.BindSubchannel(channel_state.kepler_memory.get(), method_call.subchannel);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
+ }
+}
+
+void Puller::ProcessFenceActionMethod() {
+ switch (regs.fence_action.op) {
+ case Puller::FenceOperation::Acquire:
+ // UNIMPLEMENTED_MSG("Channel Scheduling pending.");
+ // WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
+ rasterizer->ReleaseFences();
+ break;
+ case Puller::FenceOperation::Increment:
+ rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
+ }
+}
+
+void Puller::ProcessSemaphoreTriggerMethod() {
+ const auto semaphoreOperationMask = 0xF;
+ const auto op =
+ static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
+ if (op == GpuSemaphoreOperation::WriteLong) {
+ const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
+ const u32 payload = regs.semaphore_sequence;
+ [this, sequence_address, payload] {
+ memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks());
+ memory_manager.Write<u64>(sequence_address, payload);
+ }();
+ } else {
+ do {
+ const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())};
+ regs.acquire_source = true;
+ regs.acquire_value = regs.semaphore_sequence;
+ if (op == GpuSemaphoreOperation::AcquireEqual) {
+ regs.acquire_active = true;
+ regs.acquire_mode = false;
+ if (word != regs.acquire_value) {
+ rasterizer->ReleaseFences();
+ continue;
+ }
+ } else if (op == GpuSemaphoreOperation::AcquireGequal) {
+ regs.acquire_active = true;
+ regs.acquire_mode = true;
+ if (word < regs.acquire_value) {
+ rasterizer->ReleaseFences();
+ continue;
+ }
+ } else if (op == GpuSemaphoreOperation::AcquireMask) {
+ if (word && regs.semaphore_sequence == 0) {
+ rasterizer->ReleaseFences();
+ continue;
+ }
+ } else {
+ LOG_ERROR(HW_GPU, "Invalid semaphore operation");
+ }
+ } while (false);
+ }
+}
+
+void Puller::ProcessSemaphoreRelease() {
+ const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
+ const u32 payload = regs.semaphore_release;
+ std::function<void()> operation([this, sequence_address, payload] {
+ memory_manager.Write<u32>(sequence_address, payload);
+ });
+ rasterizer->SyncOperation(std::move(operation));
+}
+
+void Puller::ProcessSemaphoreAcquire() {
+ u32 word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
+ const auto value = regs.semaphore_acquire;
+ while (word != value) {
+ regs.acquire_active = true;
+ regs.acquire_value = value;
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ rasterizer->ReleaseFences();
+ word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
+ // TODO(kemathe73) figure out how to do the acquire_timeout
+ regs.acquire_mode = false;
+ regs.acquire_source = false;
+ }
+}
+
+/// Calls a GPU puller method.
+void Puller::CallPullerMethod(const MethodCall& method_call) {
+ regs.reg_array[method_call.method] = method_call.argument;
+ const auto method = static_cast<BufferMethods>(method_call.method);
+
+ switch (method) {
+ case BufferMethods::BindObject: {
+ ProcessBindMethod(method_call);
+ break;
+ }
+ case BufferMethods::Nop:
+ case BufferMethods::SemaphoreAddressHigh:
+ case BufferMethods::SemaphoreAddressLow:
+ case BufferMethods::SemaphoreSequencePayload:
+ case BufferMethods::SyncpointPayload:
+ break;
+ case BufferMethods::WrcacheFlush:
+ case BufferMethods::RefCnt:
+ rasterizer->SignalReference();
+ break;
+ case BufferMethods::SyncpointOperation:
+ ProcessFenceActionMethod();
+ break;
+ case BufferMethods::WaitForIdle:
+ rasterizer->WaitForIdle();
+ break;
+ case BufferMethods::SemaphoreOperation: {
+ ProcessSemaphoreTriggerMethod();
+ break;
+ }
+ case BufferMethods::NonStallInterrupt: {
+ LOG_ERROR(HW_GPU, "Special puller engine method NonStallInterrupt not implemented");
+ break;
+ }
+ case BufferMethods::MemOpA: {
+ LOG_ERROR(HW_GPU, "Memory Operation A");
+ break;
+ }
+ case BufferMethods::MemOpB: {
+ // Implement this better.
+ rasterizer->InvalidateGPUCache();
+ break;
+ }
+ case BufferMethods::MemOpC:
+ case BufferMethods::MemOpD: {
+ LOG_ERROR(HW_GPU, "Memory Operation C,D");
+ break;
+ }
+ case BufferMethods::SemaphoreAcquire: {
+ ProcessSemaphoreAcquire();
+ break;
+ }
+ case BufferMethods::SemaphoreRelease: {
+ ProcessSemaphoreRelease();
+ break;
+ }
+ case BufferMethods::Yield: {
+ // TODO(Kmather73): Research and implement this method.
+ LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
+ break;
+ }
+ default:
+ LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
+ break;
+ }
+}
+
+/// Calls a GPU engine method.
+void Puller::CallEngineMethod(const MethodCall& method_call) {
+ const EngineID engine = bound_engines[method_call.subchannel];
+
+ switch (engine) {
+ case EngineID::FERMI_TWOD_A:
+ channel_state.fermi_2d->CallMethod(method_call.method, method_call.argument,
+ method_call.IsLastCall());
+ break;
+ case EngineID::MAXWELL_B:
+ channel_state.maxwell_3d->CallMethod(method_call.method, method_call.argument,
+ method_call.IsLastCall());
+ break;
+ case EngineID::KEPLER_COMPUTE_B:
+ channel_state.kepler_compute->CallMethod(method_call.method, method_call.argument,
+ method_call.IsLastCall());
+ break;
+ case EngineID::MAXWELL_DMA_COPY_A:
+ channel_state.maxwell_dma->CallMethod(method_call.method, method_call.argument,
+ method_call.IsLastCall());
+ break;
+ case EngineID::KEPLER_INLINE_TO_MEMORY_B:
+ channel_state.kepler_memory->CallMethod(method_call.method, method_call.argument,
+ method_call.IsLastCall());
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented engine");
+ }
+}
+
+/// Calls a GPU engine multivalue method.
+void Puller::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ const EngineID engine = bound_engines[subchannel];
+
+ switch (engine) {
+ case EngineID::FERMI_TWOD_A:
+ channel_state.fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_B:
+ channel_state.maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_COMPUTE_B:
+ channel_state.kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_DMA_COPY_A:
+ channel_state.maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_INLINE_TO_MEMORY_B:
+ channel_state.kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented engine");
+ }
+}
+
+/// Calls a GPU method.
+void Puller::CallMethod(const MethodCall& method_call) {
+ LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method,
+ method_call.subchannel);
+
+ ASSERT(method_call.subchannel < bound_engines.size());
+
+ if (ExecuteMethodOnEngine(method_call.method)) {
+ CallEngineMethod(method_call);
+ } else {
+ CallPullerMethod(method_call);
+ }
+}
+
+/// Calls a GPU multivalue method.
+void Puller::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
+
+ ASSERT(subchannel < bound_engines.size());
+
+ if (ExecuteMethodOnEngine(method)) {
+ CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
+ } else {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallPullerMethod(MethodCall{
+ method,
+ base_start[i],
+ subchannel,
+ methods_pending - static_cast<u32>(i),
+ });
+ }
+ }
+}
+
+void Puller::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
+ rasterizer = rasterizer_;
+}
+
+/// Determines where the method should be executed.
+[[nodiscard]] bool Puller::ExecuteMethodOnEngine(u32 method) {
+ const auto buffer_method = static_cast<BufferMethods>(method);
+ return buffer_method >= BufferMethods::NonPullerMethods;
+}
+
+} // namespace Tegra::Engines
diff --git a/src/video_core/engines/puller.h b/src/video_core/engines/puller.h
new file mode 100644
index 000000000..d4175ee94
--- /dev/null
+++ b/src/video_core/engines/puller.h
@@ -0,0 +1,177 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <array>
+#include <cstddef>
+#include <vector>
+#include "common/bit_field.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "video_core/engines/engine_interface.h"
+
+namespace Core {
+class System;
+}
+
+namespace Tegra {
+class MemoryManager;
+class DmaPusher;
+
+enum class EngineID {
+ FERMI_TWOD_A = 0x902D, // 2D Engine
+ MAXWELL_B = 0xB197, // 3D Engine
+ KEPLER_COMPUTE_B = 0xB1C0,
+ KEPLER_INLINE_TO_MEMORY_B = 0xA140,
+ MAXWELL_DMA_COPY_A = 0xB0B5,
+};
+
+namespace Control {
+struct ChannelState;
+}
+} // namespace Tegra
+
+namespace VideoCore {
+class RasterizerInterface;
+}
+
+namespace Tegra::Engines {
+
+class Puller final {
+public:
+ struct MethodCall {
+ u32 method{};
+ u32 argument{};
+ u32 subchannel{};
+ u32 method_count{};
+
+ explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
+ : method(method_), argument(argument_), subchannel(subchannel_),
+ method_count(method_count_) {}
+
+ [[nodiscard]] bool IsLastCall() const {
+ return method_count <= 1;
+ }
+ };
+
+ enum class FenceOperation : u32 {
+ Acquire = 0,
+ Increment = 1,
+ };
+
+ union FenceAction {
+ u32 raw;
+ BitField<0, 1, FenceOperation> op;
+ BitField<8, 24, u32> syncpoint_id;
+ };
+
+ explicit Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher,
+ Control::ChannelState& channel_state);
+ ~Puller();
+
+ void CallMethod(const MethodCall& method_call);
+
+ void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
+ void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
+
+ void CallPullerMethod(const MethodCall& method_call);
+
+ void CallEngineMethod(const MethodCall& method_call);
+
+ void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
+private:
+ Tegra::GPU& gpu;
+
+ MemoryManager& memory_manager;
+ DmaPusher& dma_pusher;
+ Control::ChannelState& channel_state;
+ VideoCore::RasterizerInterface* rasterizer = nullptr;
+
+ static constexpr std::size_t NUM_REGS = 0x800;
+ struct Regs {
+ static constexpr size_t NUM_REGS = 0x40;
+
+ union {
+ struct {
+ INSERT_PADDING_WORDS_NOINIT(0x4);
+ struct {
+ u32 address_high;
+ u32 address_low;
+
+ [[nodiscard]] GPUVAddr SemaphoreAddress() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ } semaphore_address;
+
+ u32 semaphore_sequence;
+ u32 semaphore_trigger;
+ INSERT_PADDING_WORDS_NOINIT(0xC);
+
+ // The pusher and the puller share the reference counter, the pusher only has read
+ // access
+ u32 reference_count;
+ INSERT_PADDING_WORDS_NOINIT(0x5);
+
+ u32 semaphore_acquire;
+ u32 semaphore_release;
+ u32 fence_value;
+ FenceAction fence_action;
+ INSERT_PADDING_WORDS_NOINIT(0xE2);
+
+ // Puller state
+ u32 acquire_mode;
+ u32 acquire_source;
+ u32 acquire_active;
+ u32 acquire_timeout;
+ u32 acquire_value;
+ };
+ std::array<u32, NUM_REGS> reg_array;
+ };
+ } regs{};
+
+ void ProcessBindMethod(const MethodCall& method_call);
+ void ProcessFenceActionMethod();
+ void ProcessSemaphoreAcquire();
+ void ProcessSemaphoreRelease();
+ void ProcessSemaphoreTriggerMethod();
+ [[nodiscard]] bool ExecuteMethodOnEngine(u32 method);
+
+ /// Mapping of command subchannels to their bound engine ids
+ std::array<EngineID, 8> bound_engines{};
+
+ enum class GpuSemaphoreOperation {
+ AcquireEqual = 0x1,
+ WriteLong = 0x2,
+ AcquireGequal = 0x4,
+ AcquireMask = 0x8,
+ };
+
+#define ASSERT_REG_POSITION(field_name, position) \
+ static_assert(offsetof(Regs, field_name) == position * 4, \
+ "Field " #field_name " has invalid position")
+
+ ASSERT_REG_POSITION(semaphore_address, 0x4);
+ ASSERT_REG_POSITION(semaphore_sequence, 0x6);
+ ASSERT_REG_POSITION(semaphore_trigger, 0x7);
+ ASSERT_REG_POSITION(reference_count, 0x14);
+ ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
+ ASSERT_REG_POSITION(semaphore_release, 0x1B);
+ ASSERT_REG_POSITION(fence_value, 0x1C);
+ ASSERT_REG_POSITION(fence_action, 0x1D);
+
+ ASSERT_REG_POSITION(acquire_mode, 0x100);
+ ASSERT_REG_POSITION(acquire_source, 0x101);
+ ASSERT_REG_POSITION(acquire_active, 0x102);
+ ASSERT_REG_POSITION(acquire_timeout, 0x103);
+ ASSERT_REG_POSITION(acquire_value, 0x104);
+
+#undef ASSERT_REG_POSITION
+};
+
+} // namespace Tegra::Engines
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index 1e9832ddd..c390ac91b 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -4,40 +4,24 @@
#pragma once
#include <algorithm>
+#include <cstring>
+#include <deque>
+#include <functional>
+#include <memory>
#include <queue>
#include "common/common_types.h"
#include "video_core/delayed_destruction_ring.h"
#include "video_core/gpu.h"
-#include "video_core/memory_manager.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/syncpoint_manager.h"
#include "video_core/rasterizer_interface.h"
namespace VideoCommon {
class FenceBase {
public:
- explicit FenceBase(u32 payload_, bool is_stubbed_)
- : address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {}
-
- explicit FenceBase(GPUVAddr address_, u32 payload_, bool is_stubbed_)
- : address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {}
-
- GPUVAddr GetAddress() const {
- return address;
- }
-
- u32 GetPayload() const {
- return payload;
- }
-
- bool IsSemaphore() const {
- return is_semaphore;
- }
-
-private:
- GPUVAddr address;
- u32 payload;
- bool is_semaphore;
+ explicit FenceBase(bool is_stubbed_) : is_stubbed{is_stubbed_} {}
protected:
bool is_stubbed;
@@ -57,30 +41,28 @@ public:
buffer_cache.AccumulateFlushes();
}
- void SignalSemaphore(GPUVAddr addr, u32 value) {
+ void SyncOperation(std::function<void()>&& func) {
+ uncommitted_operations.emplace_back(std::move(func));
+ }
+
+ void SignalFence(std::function<void()>&& func) {
TryReleasePendingFences();
const bool should_flush = ShouldFlush();
CommitAsyncFlushes();
- TFence new_fence = CreateFence(addr, value, !should_flush);
+ uncommitted_operations.emplace_back(std::move(func));
+ CommitOperations();
+ TFence new_fence = CreateFence(!should_flush);
fences.push(new_fence);
QueueFence(new_fence);
if (should_flush) {
rasterizer.FlushCommands();
}
- rasterizer.SyncGuestHost();
}
void SignalSyncPoint(u32 value) {
- TryReleasePendingFences();
- const bool should_flush = ShouldFlush();
- CommitAsyncFlushes();
- TFence new_fence = CreateFence(value, !should_flush);
- fences.push(new_fence);
- QueueFence(new_fence);
- if (should_flush) {
- rasterizer.FlushCommands();
- }
- rasterizer.SyncGuestHost();
+ syncpoint_manager.IncrementGuest(value);
+ std::function<void()> func([this, value] { syncpoint_manager.IncrementHost(value); });
+ SignalFence(std::move(func));
}
void WaitPendingFences() {
@@ -90,11 +72,10 @@ public:
WaitFence(current_fence);
}
PopAsyncFlushes();
- if (current_fence->IsSemaphore()) {
- gpu_memory.template Write<u32>(current_fence->GetAddress(),
- current_fence->GetPayload());
- } else {
- gpu.IncrementSyncPoint(current_fence->GetPayload());
+ auto operations = std::move(pending_operations.front());
+ pending_operations.pop_front();
+ for (auto& operation : operations) {
+ operation();
}
PopFence();
}
@@ -104,16 +85,14 @@ protected:
explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
TTextureCache& texture_cache_, TTBufferCache& buffer_cache_,
TQueryCache& query_cache_)
- : rasterizer{rasterizer_}, gpu{gpu_}, gpu_memory{gpu.MemoryManager()},
+ : rasterizer{rasterizer_}, gpu{gpu_}, syncpoint_manager{gpu.Host1x().GetSyncpointManager()},
texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {}
virtual ~FenceManager() = default;
- /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is
+ /// Creates a Fence Interface, does not create a backend fence if 'is_stubbed' is
/// true
- virtual TFence CreateFence(u32 value, bool is_stubbed) = 0;
- /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true
- virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0;
+ virtual TFence CreateFence(bool is_stubbed) = 0;
/// Queues a fence into the backend if the fence isn't stubbed.
virtual void QueueFence(TFence& fence) = 0;
/// Notifies that the backend fence has been signaled/reached in host GPU.
@@ -123,7 +102,7 @@ protected:
VideoCore::RasterizerInterface& rasterizer;
Tegra::GPU& gpu;
- Tegra::MemoryManager& gpu_memory;
+ Tegra::Host1x::SyncpointManager& syncpoint_manager;
TTextureCache& texture_cache;
TTBufferCache& buffer_cache;
TQueryCache& query_cache;
@@ -136,11 +115,10 @@ private:
return;
}
PopAsyncFlushes();
- if (current_fence->IsSemaphore()) {
- gpu_memory.template Write<u32>(current_fence->GetAddress(),
- current_fence->GetPayload());
- } else {
- gpu.IncrementSyncPoint(current_fence->GetPayload());
+ auto operations = std::move(pending_operations.front());
+ pending_operations.pop_front();
+ for (auto& operation : operations) {
+ operation();
}
PopFence();
}
@@ -159,16 +137,20 @@ private:
}
void PopAsyncFlushes() {
- std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
- texture_cache.PopAsyncFlushes();
- buffer_cache.PopAsyncFlushes();
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.PopAsyncFlushes();
+ buffer_cache.PopAsyncFlushes();
+ }
query_cache.PopAsyncFlushes();
}
void CommitAsyncFlushes() {
- std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
- texture_cache.CommitAsyncFlushes();
- buffer_cache.CommitAsyncFlushes();
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.CommitAsyncFlushes();
+ buffer_cache.CommitAsyncFlushes();
+ }
query_cache.CommitAsyncFlushes();
}
@@ -177,7 +159,13 @@ private:
fences.pop();
}
+ void CommitOperations() {
+ pending_operations.emplace_back(std::move(uncommitted_operations));
+ }
+
std::queue<TFence> fences;
+ std::deque<std::function<void()>> uncommitted_operations;
+ std::deque<std::deque<std::function<void()>>> pending_operations;
DelayedDestructionRing<TFence, 6> delayed_destruction_ring;
};
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 33431f2a0..28b38273e 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -14,10 +14,11 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/frontend/emu_window.h"
-#include "core/hardware_interrupt_manager.h"
#include "core/hle/service/nvdrv/nvdata.h"
#include "core/perf_stats.h"
#include "video_core/cdma_pusher.h"
+#include "video_core/control/channel_state.h"
+#include "video_core/control/scheduler.h"
#include "video_core/dma_pusher.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/engines/kepler_compute.h"
@@ -26,75 +27,64 @@
#include "video_core/engines/maxwell_dma.h"
#include "video_core/gpu.h"
#include "video_core/gpu_thread.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/syncpoint_manager.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_base.h"
#include "video_core/shader_notify.h"
namespace Tegra {
-MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
-
struct GPU::Impl {
explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_)
- : gpu{gpu_}, system{system_}, memory_manager{std::make_unique<Tegra::MemoryManager>(
- system)},
- dma_pusher{std::make_unique<Tegra::DmaPusher>(system, gpu)}, use_nvdec{use_nvdec_},
- maxwell_3d{std::make_unique<Engines::Maxwell3D>(system, *memory_manager)},
- fermi_2d{std::make_unique<Engines::Fermi2D>()},
- kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)},
- maxwell_dma{std::make_unique<Engines::MaxwellDMA>(system, *memory_manager)},
- kepler_memory{std::make_unique<Engines::KeplerMemory>(system, *memory_manager)},
+ : gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_},
shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_},
- gpu_thread{system_, is_async_} {}
+ gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {}
~Impl() = default;
- /// Binds a renderer to the GPU.
- void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
- renderer = std::move(renderer_);
- rasterizer = renderer->ReadRasterizer();
-
- memory_manager->BindRasterizer(rasterizer);
- maxwell_3d->BindRasterizer(rasterizer);
- fermi_2d->BindRasterizer(rasterizer);
- kepler_compute->BindRasterizer(rasterizer);
- kepler_memory->BindRasterizer(rasterizer);
- maxwell_dma->BindRasterizer(rasterizer);
+ std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) {
+ auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id);
+ channels.emplace(channel_id, channel_state);
+ scheduler->DeclareChannel(channel_state);
+ return channel_state;
}
- /// Calls a GPU method.
- void CallMethod(const GPU::MethodCall& method_call) {
- LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method,
- method_call.subchannel);
+ void BindChannel(s32 channel_id) {
+ if (bound_channel == channel_id) {
+ return;
+ }
+ auto it = channels.find(channel_id);
+ ASSERT(it != channels.end());
+ bound_channel = channel_id;
+ current_channel = it->second.get();
- ASSERT(method_call.subchannel < bound_engines.size());
+ rasterizer->BindChannel(*current_channel);
+ }
- if (ExecuteMethodOnEngine(method_call.method)) {
- CallEngineMethod(method_call);
- } else {
- CallPullerMethod(method_call);
- }
+ std::shared_ptr<Control::ChannelState> AllocateChannel() {
+ return CreateChannel(new_channel_id++);
}
- /// Calls a GPU multivalue method.
- void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
- u32 methods_pending) {
- LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
+ void InitChannel(Control::ChannelState& to_init) {
+ to_init.Init(system, gpu);
+ to_init.BindRasterizer(rasterizer);
+ rasterizer->InitializeChannel(to_init);
+ }
- ASSERT(subchannel < bound_engines.size());
+ void InitAddressSpace(Tegra::MemoryManager& memory_manager) {
+ memory_manager.BindRasterizer(rasterizer);
+ }
- if (ExecuteMethodOnEngine(method)) {
- CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
- } else {
- for (std::size_t i = 0; i < amount; i++) {
- CallPullerMethod(GPU::MethodCall{
- method,
- base_start[i],
- subchannel,
- methods_pending - static_cast<u32>(i),
- });
- }
- }
+ void ReleaseChannel(Control::ChannelState& to_release) {
+ UNIMPLEMENTED();
+ }
+
+ /// Binds a renderer to the GPU.
+ void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
+ renderer = std::move(renderer_);
+ rasterizer = renderer->ReadRasterizer();
+ host1x.MemoryManager().BindRasterizer(rasterizer);
}
/// Flush all current written commands into the host GPU for execution.
@@ -103,85 +93,82 @@ struct GPU::Impl {
}
/// Synchronizes CPU writes with Host GPU memory.
- void SyncGuestHost() {
- rasterizer->SyncGuestHost();
+ void InvalidateGPUCache() {
+ rasterizer->InvalidateGPUCache();
}
/// Signal the ending of command list.
void OnCommandListEnd() {
- if (is_async) {
- // This command only applies to asynchronous GPU mode
- gpu_thread.OnCommandListEnd();
- }
+ gpu_thread.OnCommandListEnd();
}
/// Request a host GPU memory flush from the CPU.
- [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size) {
- std::unique_lock lck{flush_request_mutex};
- const u64 fence = ++last_flush_fence;
- flush_requests.emplace_back(fence, addr, size);
+ template <typename Func>
+ [[nodiscard]] u64 RequestSyncOperation(Func&& action) {
+ std::unique_lock lck{sync_request_mutex};
+ const u64 fence = ++last_sync_fence;
+ sync_requests.emplace_back(action);
return fence;
}
/// Obtains current flush request fence id.
- [[nodiscard]] u64 CurrentFlushRequestFence() const {
- return current_flush_fence.load(std::memory_order_relaxed);
+ [[nodiscard]] u64 CurrentSyncRequestFence() const {
+ return current_sync_fence.load(std::memory_order_relaxed);
+ }
+
+ void WaitForSyncOperation(const u64 fence) {
+ std::unique_lock lck{sync_request_mutex};
+ sync_request_cv.wait(lck, [this, fence] { return CurrentSyncRequestFence() >= fence; });
}
/// Tick pending requests within the GPU.
void TickWork() {
- std::unique_lock lck{flush_request_mutex};
- while (!flush_requests.empty()) {
- auto& request = flush_requests.front();
- const u64 fence = request.fence;
- const VAddr addr = request.addr;
- const std::size_t size = request.size;
- flush_requests.pop_front();
- flush_request_mutex.unlock();
- rasterizer->FlushRegion(addr, size);
- current_flush_fence.store(fence);
- flush_request_mutex.lock();
+ std::unique_lock lck{sync_request_mutex};
+ while (!sync_requests.empty()) {
+ auto request = std::move(sync_requests.front());
+ sync_requests.pop_front();
+ sync_request_mutex.unlock();
+ request();
+ current_sync_fence.fetch_add(1, std::memory_order_release);
+ sync_request_mutex.lock();
+ sync_request_cv.notify_all();
}
}
/// Returns a reference to the Maxwell3D GPU engine.
[[nodiscard]] Engines::Maxwell3D& Maxwell3D() {
- return *maxwell_3d;
+ ASSERT(current_channel);
+ return *current_channel->maxwell_3d;
}
/// Returns a const reference to the Maxwell3D GPU engine.
[[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const {
- return *maxwell_3d;
+ ASSERT(current_channel);
+ return *current_channel->maxwell_3d;
}
/// Returns a reference to the KeplerCompute GPU engine.
[[nodiscard]] Engines::KeplerCompute& KeplerCompute() {
- return *kepler_compute;
+ ASSERT(current_channel);
+ return *current_channel->kepler_compute;
}
/// Returns a reference to the KeplerCompute GPU engine.
[[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const {
- return *kepler_compute;
- }
-
- /// Returns a reference to the GPU memory manager.
- [[nodiscard]] Tegra::MemoryManager& MemoryManager() {
- return *memory_manager;
- }
-
- /// Returns a const reference to the GPU memory manager.
- [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const {
- return *memory_manager;
+ ASSERT(current_channel);
+ return *current_channel->kepler_compute;
}
/// Returns a reference to the GPU DMA pusher.
[[nodiscard]] Tegra::DmaPusher& DmaPusher() {
- return *dma_pusher;
+ ASSERT(current_channel);
+ return *current_channel->dma_pusher;
}
/// Returns a const reference to the GPU DMA pusher.
[[nodiscard]] const Tegra::DmaPusher& DmaPusher() const {
- return *dma_pusher;
+ ASSERT(current_channel);
+ return *current_channel->dma_pusher;
}
/// Returns a reference to the underlying renderer.
@@ -204,77 +191,6 @@ struct GPU::Impl {
return *shader_notify;
}
- /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
- void WaitFence(u32 syncpoint_id, u32 value) {
- // Synced GPU, is always in sync
- if (!is_async) {
- return;
- }
- if (syncpoint_id == UINT32_MAX) {
- // TODO: Research what this does.
- LOG_ERROR(HW_GPU, "Waiting for syncpoint -1 not implemented");
- return;
- }
- MICROPROFILE_SCOPE(GPU_wait);
- std::unique_lock lock{sync_mutex};
- sync_cv.wait(lock, [=, this] {
- if (shutting_down.load(std::memory_order_relaxed)) {
- // We're shutting down, ensure no threads continue to wait for the next syncpoint
- return true;
- }
- return syncpoints.at(syncpoint_id).load() >= value;
- });
- }
-
- void IncrementSyncPoint(u32 syncpoint_id) {
- auto& syncpoint = syncpoints.at(syncpoint_id);
- syncpoint++;
- std::scoped_lock lock{sync_mutex};
- sync_cv.notify_all();
- auto& interrupt = syncpt_interrupts.at(syncpoint_id);
- if (!interrupt.empty()) {
- u32 value = syncpoint.load();
- auto it = interrupt.begin();
- while (it != interrupt.end()) {
- if (value >= *it) {
- TriggerCpuInterrupt(syncpoint_id, *it);
- it = interrupt.erase(it);
- continue;
- }
- it++;
- }
- }
- }
-
- [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const {
- return syncpoints.at(syncpoint_id).load();
- }
-
- void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
- std::scoped_lock lock{sync_mutex};
- auto& interrupt = syncpt_interrupts.at(syncpoint_id);
- bool contains = std::any_of(interrupt.begin(), interrupt.end(),
- [value](u32 in_value) { return in_value == value; });
- if (contains) {
- return;
- }
- interrupt.emplace_back(value);
- }
-
- [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
- std::scoped_lock lock{sync_mutex};
- auto& interrupt = syncpt_interrupts.at(syncpoint_id);
- const auto iter =
- std::find_if(interrupt.begin(), interrupt.end(),
- [value](u32 interrupt_value) { return value == interrupt_value; });
-
- if (iter == interrupt.end()) {
- return false;
- }
- interrupt.erase(iter);
- return true;
- }
-
[[nodiscard]] u64 GetTicks() const {
// This values were reversed engineered by fincs from NVN
// The gpu clock is reported in units of 385/625 nanoseconds
@@ -306,7 +222,7 @@ struct GPU::Impl {
/// This can be used to launch any necessary threads and register any necessary
/// core timing events.
void Start() {
- gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher);
+ gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler);
cpu_context = renderer->GetRenderWindow().CreateSharedContext();
cpu_context->MakeCurrent();
}
@@ -328,8 +244,8 @@ struct GPU::Impl {
}
/// Push GPU command entries to be processed
- void PushGPUEntries(Tegra::CommandList&& entries) {
- gpu_thread.SubmitList(std::move(entries));
+ void PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
+ gpu_thread.SubmitList(channel, std::move(entries));
}
/// Push GPU command buffer entries to be processed
@@ -339,7 +255,7 @@ struct GPU::Impl {
}
if (!cdma_pushers.contains(id)) {
- cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(gpu));
+ cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(host1x));
}
// SubmitCommandBuffer would make the nvdec operations async, this is not currently working
@@ -376,308 +292,55 @@ struct GPU::Impl {
gpu_thread.FlushAndInvalidateRegion(addr, size);
}
- void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const {
- auto& interrupt_manager = system.InterruptManager();
- interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value);
- }
-
- void ProcessBindMethod(const GPU::MethodCall& method_call) {
- // Bind the current subchannel to the desired engine id.
- LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
- method_call.argument);
- const auto engine_id = static_cast<EngineID>(method_call.argument);
- bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
- switch (engine_id) {
- case EngineID::FERMI_TWOD_A:
- dma_pusher->BindSubchannel(fermi_2d.get(), method_call.subchannel);
- break;
- case EngineID::MAXWELL_B:
- dma_pusher->BindSubchannel(maxwell_3d.get(), method_call.subchannel);
- break;
- case EngineID::KEPLER_COMPUTE_B:
- dma_pusher->BindSubchannel(kepler_compute.get(), method_call.subchannel);
- break;
- case EngineID::MAXWELL_DMA_COPY_A:
- dma_pusher->BindSubchannel(maxwell_dma.get(), method_call.subchannel);
- break;
- case EngineID::KEPLER_INLINE_TO_MEMORY_B:
- dma_pusher->BindSubchannel(kepler_memory.get(), method_call.subchannel);
- break;
- default:
- UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
- }
- }
-
- void ProcessFenceActionMethod() {
- switch (regs.fence_action.op) {
- case GPU::FenceOperation::Acquire:
- WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
- break;
- case GPU::FenceOperation::Increment:
- IncrementSyncPoint(regs.fence_action.syncpoint_id);
- break;
- default:
- UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
- }
- }
-
- void ProcessWaitForInterruptMethod() {
- // TODO(bunnei) ImplementMe
- LOG_WARNING(HW_GPU, "(STUBBED) called");
- }
-
- void ProcessSemaphoreTriggerMethod() {
- const auto semaphoreOperationMask = 0xF;
- const auto op =
- static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
- if (op == GpuSemaphoreOperation::WriteLong) {
- struct Block {
- u32 sequence;
- u32 zeros = 0;
- u64 timestamp;
- };
-
- Block block{};
- block.sequence = regs.semaphore_sequence;
- // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of
- // CoreTiming
- block.timestamp = GetTicks();
- memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block,
- sizeof(block));
- } else {
- const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())};
- if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
- (op == GpuSemaphoreOperation::AcquireGequal &&
- static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
- (op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) {
- // Nothing to do in this case
+ void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
+ std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) {
+ size_t current_request_counter{};
+ {
+ std::unique_lock<std::mutex> lk(request_swap_mutex);
+ if (free_swap_counters.empty()) {
+ current_request_counter = request_swap_counters.size();
+ request_swap_counters.emplace_back(num_fences);
} else {
- regs.acquire_source = true;
- regs.acquire_value = regs.semaphore_sequence;
- if (op == GpuSemaphoreOperation::AcquireEqual) {
- regs.acquire_active = true;
- regs.acquire_mode = false;
- } else if (op == GpuSemaphoreOperation::AcquireGequal) {
- regs.acquire_active = true;
- regs.acquire_mode = true;
- } else if (op == GpuSemaphoreOperation::AcquireMask) {
- // TODO(kemathe) The acquire mask operation waits for a value that, ANDed with
- // semaphore_sequence, gives a non-0 result
- LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented");
- } else {
- LOG_ERROR(HW_GPU, "Invalid semaphore operation");
- }
+ current_request_counter = free_swap_counters.front();
+ request_swap_counters[current_request_counter] = num_fences;
+ free_swap_counters.pop_front();
}
}
- }
-
- void ProcessSemaphoreRelease() {
- memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(),
- regs.semaphore_release);
- }
-
- void ProcessSemaphoreAcquire() {
- const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress());
- const auto value = regs.semaphore_acquire;
- if (word != value) {
- regs.acquire_active = true;
- regs.acquire_value = value;
- // TODO(kemathe73) figure out how to do the acquire_timeout
- regs.acquire_mode = false;
- regs.acquire_source = false;
- }
- }
-
- /// Calls a GPU puller method.
- void CallPullerMethod(const GPU::MethodCall& method_call) {
- regs.reg_array[method_call.method] = method_call.argument;
- const auto method = static_cast<BufferMethods>(method_call.method);
-
- switch (method) {
- case BufferMethods::BindObject: {
- ProcessBindMethod(method_call);
- break;
- }
- case BufferMethods::Nop:
- case BufferMethods::SemaphoreAddressHigh:
- case BufferMethods::SemaphoreAddressLow:
- case BufferMethods::SemaphoreSequence:
- break;
- case BufferMethods::UnkCacheFlush:
- rasterizer->SyncGuestHost();
- break;
- case BufferMethods::WrcacheFlush:
- rasterizer->SignalReference();
- break;
- case BufferMethods::FenceValue:
- break;
- case BufferMethods::RefCnt:
- rasterizer->SignalReference();
- break;
- case BufferMethods::FenceAction:
- ProcessFenceActionMethod();
- break;
- case BufferMethods::WaitForInterrupt:
- rasterizer->WaitForIdle();
- break;
- case BufferMethods::SemaphoreTrigger: {
- ProcessSemaphoreTriggerMethod();
- break;
- }
- case BufferMethods::NotifyIntr: {
- // TODO(Kmather73): Research and implement this method.
- LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented");
- break;
- }
- case BufferMethods::Unk28: {
- // TODO(Kmather73): Research and implement this method.
- LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented");
- break;
- }
- case BufferMethods::SemaphoreAcquire: {
- ProcessSemaphoreAcquire();
- break;
- }
- case BufferMethods::SemaphoreRelease: {
- ProcessSemaphoreRelease();
- break;
- }
- case BufferMethods::Yield: {
- // TODO(Kmather73): Research and implement this method.
- LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
- break;
- }
- default:
- LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
- break;
- }
- }
-
- /// Calls a GPU engine method.
- void CallEngineMethod(const GPU::MethodCall& method_call) {
- const EngineID engine = bound_engines[method_call.subchannel];
-
- switch (engine) {
- case EngineID::FERMI_TWOD_A:
- fermi_2d->CallMethod(method_call.method, method_call.argument,
- method_call.IsLastCall());
- break;
- case EngineID::MAXWELL_B:
- maxwell_3d->CallMethod(method_call.method, method_call.argument,
- method_call.IsLastCall());
- break;
- case EngineID::KEPLER_COMPUTE_B:
- kepler_compute->CallMethod(method_call.method, method_call.argument,
- method_call.IsLastCall());
- break;
- case EngineID::MAXWELL_DMA_COPY_A:
- maxwell_dma->CallMethod(method_call.method, method_call.argument,
- method_call.IsLastCall());
- break;
- case EngineID::KEPLER_INLINE_TO_MEMORY_B:
- kepler_memory->CallMethod(method_call.method, method_call.argument,
- method_call.IsLastCall());
- break;
- default:
- UNIMPLEMENTED_MSG("Unimplemented engine");
- }
- }
-
- /// Calls a GPU engine multivalue method.
- void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
- u32 methods_pending) {
- const EngineID engine = bound_engines[subchannel];
-
- switch (engine) {
- case EngineID::FERMI_TWOD_A:
- fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
- break;
- case EngineID::MAXWELL_B:
- maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
- break;
- case EngineID::KEPLER_COMPUTE_B:
- kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
- break;
- case EngineID::MAXWELL_DMA_COPY_A:
- maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
- break;
- case EngineID::KEPLER_INLINE_TO_MEMORY_B:
- kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
- break;
- default:
- UNIMPLEMENTED_MSG("Unimplemented engine");
- }
- }
-
- /// Determines where the method should be executed.
- [[nodiscard]] bool ExecuteMethodOnEngine(u32 method) {
- const auto buffer_method = static_cast<BufferMethods>(method);
- return buffer_method >= BufferMethods::NonPullerMethods;
- }
-
- struct Regs {
- static constexpr size_t NUM_REGS = 0x40;
-
- union {
- struct {
- INSERT_PADDING_WORDS_NOINIT(0x4);
- struct {
- u32 address_high;
- u32 address_low;
-
- [[nodiscard]] GPUVAddr SemaphoreAddress() const {
- return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
- address_low);
+ const auto wait_fence =
+ RequestSyncOperation([this, current_request_counter, framebuffer, fences, num_fences] {
+ auto& syncpoint_manager = host1x.GetSyncpointManager();
+ if (num_fences == 0) {
+ renderer->SwapBuffers(framebuffer);
+ }
+ const auto executer = [this, current_request_counter,
+ framebuffer_copy = *framebuffer]() {
+ {
+ std::unique_lock<std::mutex> lk(request_swap_mutex);
+ if (--request_swap_counters[current_request_counter] != 0) {
+ return;
+ }
+ free_swap_counters.push_back(current_request_counter);
}
- } semaphore_address;
-
- u32 semaphore_sequence;
- u32 semaphore_trigger;
- INSERT_PADDING_WORDS_NOINIT(0xC);
-
- // The pusher and the puller share the reference counter, the pusher only has read
- // access
- u32 reference_count;
- INSERT_PADDING_WORDS_NOINIT(0x5);
-
- u32 semaphore_acquire;
- u32 semaphore_release;
- u32 fence_value;
- GPU::FenceAction fence_action;
- INSERT_PADDING_WORDS_NOINIT(0xE2);
-
- // Puller state
- u32 acquire_mode;
- u32 acquire_source;
- u32 acquire_active;
- u32 acquire_timeout;
- u32 acquire_value;
- };
- std::array<u32, NUM_REGS> reg_array;
- };
- } regs{};
+ renderer->SwapBuffers(&framebuffer_copy);
+ };
+ for (size_t i = 0; i < num_fences; i++) {
+ syncpoint_manager.RegisterGuestAction(fences[i].id, fences[i].value, executer);
+ }
+ });
+ gpu_thread.TickGPU();
+ WaitForSyncOperation(wait_fence);
+ }
GPU& gpu;
Core::System& system;
- std::unique_ptr<Tegra::MemoryManager> memory_manager;
- std::unique_ptr<Tegra::DmaPusher> dma_pusher;
+ Host1x::Host1x& host1x;
+
std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers;
std::unique_ptr<VideoCore::RendererBase> renderer;
VideoCore::RasterizerInterface* rasterizer = nullptr;
const bool use_nvdec;
- /// Mapping of command subchannels to their bound engine ids
- std::array<EngineID, 8> bound_engines{};
- /// 3D engine
- std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
- /// 2D engine
- std::unique_ptr<Engines::Fermi2D> fermi_2d;
- /// Compute engine
- std::unique_ptr<Engines::KeplerCompute> kepler_compute;
- /// DMA engine
- std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
- /// Inline memory engine
- std::unique_ptr<Engines::KeplerMemory> kepler_memory;
+ s32 new_channel_id{1};
/// Shader build notifier
std::unique_ptr<VideoCore::ShaderNotify> shader_notify;
/// When true, we are about to shut down emulation session, so terminate outstanding tasks
@@ -692,51 +355,25 @@ struct GPU::Impl {
std::condition_variable sync_cv;
- struct FlushRequest {
- explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_)
- : fence{fence_}, addr{addr_}, size{size_} {}
- u64 fence;
- VAddr addr;
- std::size_t size;
- };
-
- std::list<FlushRequest> flush_requests;
- std::atomic<u64> current_flush_fence{};
- u64 last_flush_fence{};
- std::mutex flush_request_mutex;
+ std::list<std::function<void()>> sync_requests;
+ std::atomic<u64> current_sync_fence{};
+ u64 last_sync_fence{};
+ std::mutex sync_request_mutex;
+ std::condition_variable sync_request_cv;
const bool is_async;
VideoCommon::GPUThread::ThreadManager gpu_thread;
std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context;
-#define ASSERT_REG_POSITION(field_name, position) \
- static_assert(offsetof(Regs, field_name) == position * 4, \
- "Field " #field_name " has invalid position")
-
- ASSERT_REG_POSITION(semaphore_address, 0x4);
- ASSERT_REG_POSITION(semaphore_sequence, 0x6);
- ASSERT_REG_POSITION(semaphore_trigger, 0x7);
- ASSERT_REG_POSITION(reference_count, 0x14);
- ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
- ASSERT_REG_POSITION(semaphore_release, 0x1B);
- ASSERT_REG_POSITION(fence_value, 0x1C);
- ASSERT_REG_POSITION(fence_action, 0x1D);
-
- ASSERT_REG_POSITION(acquire_mode, 0x100);
- ASSERT_REG_POSITION(acquire_source, 0x101);
- ASSERT_REG_POSITION(acquire_active, 0x102);
- ASSERT_REG_POSITION(acquire_timeout, 0x103);
- ASSERT_REG_POSITION(acquire_value, 0x104);
-
-#undef ASSERT_REG_POSITION
-
- enum class GpuSemaphoreOperation {
- AcquireEqual = 0x1,
- WriteLong = 0x2,
- AcquireGequal = 0x4,
- AcquireMask = 0x8,
- };
+ std::unique_ptr<Tegra::Control::Scheduler> scheduler;
+ std::unordered_map<s32, std::shared_ptr<Tegra::Control::ChannelState>> channels;
+ Tegra::Control::ChannelState* current_channel;
+ s32 bound_channel{-1};
+
+ std::deque<size_t> free_swap_counters;
+ std::deque<size_t> request_swap_counters;
+ std::mutex request_swap_mutex;
};
GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
@@ -744,25 +381,36 @@ GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
GPU::~GPU() = default;
-void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) {
- impl->BindRenderer(std::move(renderer));
+std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() {
+ return impl->AllocateChannel();
+}
+
+void GPU::InitChannel(Control::ChannelState& to_init) {
+ impl->InitChannel(to_init);
+}
+
+void GPU::BindChannel(s32 channel_id) {
+ impl->BindChannel(channel_id);
}
-void GPU::CallMethod(const MethodCall& method_call) {
- impl->CallMethod(method_call);
+void GPU::ReleaseChannel(Control::ChannelState& to_release) {
+ impl->ReleaseChannel(to_release);
}
-void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
- u32 methods_pending) {
- impl->CallMultiMethod(method, subchannel, base_start, amount, methods_pending);
+void GPU::InitAddressSpace(Tegra::MemoryManager& memory_manager) {
+ impl->InitAddressSpace(memory_manager);
+}
+
+void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) {
+ impl->BindRenderer(std::move(renderer));
}
void GPU::FlushCommands() {
impl->FlushCommands();
}
-void GPU::SyncGuestHost() {
- impl->SyncGuestHost();
+void GPU::InvalidateGPUCache() {
+ impl->InvalidateGPUCache();
}
void GPU::OnCommandListEnd() {
@@ -770,17 +418,32 @@ void GPU::OnCommandListEnd() {
}
u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
- return impl->RequestFlush(addr, size);
+ return impl->RequestSyncOperation(
+ [this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); });
}
-u64 GPU::CurrentFlushRequestFence() const {
- return impl->CurrentFlushRequestFence();
+u64 GPU::CurrentSyncRequestFence() const {
+ return impl->CurrentSyncRequestFence();
+}
+
+void GPU::WaitForSyncOperation(u64 fence) {
+ return impl->WaitForSyncOperation(fence);
}
void GPU::TickWork() {
impl->TickWork();
}
+/// Gets a mutable reference to the Host1x interface
+Host1x::Host1x& GPU::Host1x() {
+ return impl->host1x;
+}
+
+/// Gets an immutable reference to the Host1x interface.
+const Host1x::Host1x& GPU::Host1x() const {
+ return impl->host1x;
+}
+
Engines::Maxwell3D& GPU::Maxwell3D() {
return impl->Maxwell3D();
}
@@ -797,14 +460,6 @@ const Engines::KeplerCompute& GPU::KeplerCompute() const {
return impl->KeplerCompute();
}
-Tegra::MemoryManager& GPU::MemoryManager() {
- return impl->MemoryManager();
-}
-
-const Tegra::MemoryManager& GPU::MemoryManager() const {
- return impl->MemoryManager();
-}
-
Tegra::DmaPusher& GPU::DmaPusher() {
return impl->DmaPusher();
}
@@ -829,24 +484,9 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const {
return impl->ShaderNotify();
}
-void GPU::WaitFence(u32 syncpoint_id, u32 value) {
- impl->WaitFence(syncpoint_id, value);
-}
-
-void GPU::IncrementSyncPoint(u32 syncpoint_id) {
- impl->IncrementSyncPoint(syncpoint_id);
-}
-
-u32 GPU::GetSyncpointValue(u32 syncpoint_id) const {
- return impl->GetSyncpointValue(syncpoint_id);
-}
-
-void GPU::RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
- impl->RegisterSyncptInterrupt(syncpoint_id, value);
-}
-
-bool GPU::CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
- return impl->CancelSyncptInterrupt(syncpoint_id, value);
+void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
+ std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) {
+ impl->RequestSwapBuffers(framebuffer, fences, num_fences);
}
u64 GPU::GetTicks() const {
@@ -881,8 +521,8 @@ void GPU::ReleaseContext() {
impl->ReleaseContext();
}
-void GPU::PushGPUEntries(Tegra::CommandList&& entries) {
- impl->PushGPUEntries(std::move(entries));
+void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
+ impl->PushGPUEntries(channel, std::move(entries));
}
void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) {
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index b939ba315..d0709dc69 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -24,11 +24,15 @@ namespace Tegra {
class DmaPusher;
struct CommandList;
+// TODO: Implement the commented ones
enum class RenderTargetFormat : u32 {
NONE = 0x0,
R32B32G32A32_FLOAT = 0xC0,
R32G32B32A32_SINT = 0xC1,
R32G32B32A32_UINT = 0xC2,
+ // R32G32B32X32_FLOAT = 0xC3,
+ // R32G32B32X32_SINT = 0xC4,
+ // R32G32B32X32_UINT = 0xC5,
R16G16B16A16_UNORM = 0xC6,
R16G16B16A16_SNORM = 0xC7,
R16G16B16A16_SINT = 0xC8,
@@ -38,8 +42,8 @@ enum class RenderTargetFormat : u32 {
R32G32_SINT = 0xCC,
R32G32_UINT = 0xCD,
R16G16B16X16_FLOAT = 0xCE,
- B8G8R8A8_UNORM = 0xCF,
- B8G8R8A8_SRGB = 0xD0,
+ A8R8G8B8_UNORM = 0xCF,
+ A8R8G8B8_SRGB = 0xD0,
A2B10G10R10_UNORM = 0xD1,
A2B10G10R10_UINT = 0xD2,
A8B8G8R8_UNORM = 0xD5,
@@ -52,10 +56,13 @@ enum class RenderTargetFormat : u32 {
R16G16_SINT = 0xDC,
R16G16_UINT = 0xDD,
R16G16_FLOAT = 0xDE,
+ // A2R10G10B10_UNORM = 0xDF,
B10G11R11_FLOAT = 0xE0,
R32_SINT = 0xE3,
R32_UINT = 0xE4,
R32_FLOAT = 0xE5,
+ // X8R8G8B8_UNORM = 0xE6,
+ // X8R8G8B8_SRGB = 0xE7,
R5G6B5_UNORM = 0xE8,
A1R5G5B5_UNORM = 0xE9,
R8G8_UNORM = 0xEA,
@@ -71,17 +78,42 @@ enum class RenderTargetFormat : u32 {
R8_SNORM = 0xF4,
R8_SINT = 0xF5,
R8_UINT = 0xF6,
+
+ /*
+ A8_UNORM = 0xF7,
+ X1R5G5B5_UNORM = 0xF8,
+ X8B8G8R8_UNORM = 0xF9,
+ X8B8G8R8_SRGB = 0xFA,
+ Z1R5G5B5_UNORM = 0xFB,
+ O1R5G5B5_UNORM = 0xFC,
+ Z8R8G8B8_UNORM = 0xFD,
+ O8R8G8B8_UNORM = 0xFE,
+ R32_UNORM = 0xFF,
+ A16_UNORM = 0x40,
+ A16_FLOAT = 0x41,
+ A32_FLOAT = 0x42,
+ A8R8_UNORM = 0x43,
+ R16A16_UNORM = 0x44,
+ R16A16_FLOAT = 0x45,
+ R32A32_FLOAT = 0x46,
+ B8G8R8A8_UNORM = 0x47,
+ */
};
enum class DepthFormat : u32 {
- D32_FLOAT = 0xA,
- D16_UNORM = 0x13,
- S8_UINT_Z24_UNORM = 0x14,
- D24X8_UNORM = 0x15,
- D24S8_UNORM = 0x16,
+ Z32_FLOAT = 0xA,
+ Z16_UNORM = 0x13,
+ Z24_UNORM_S8_UINT = 0x14,
+ X8Z24_UNORM = 0x15,
+ S8Z24_UNORM = 0x16,
S8_UINT = 0x17,
- D24C8_UNORM = 0x18,
- D32_FLOAT_S8X24_UINT = 0x19,
+ V8Z24_UNORM = 0x18,
+ Z32_FLOAT_X24S8_UINT = 0x19,
+ /*
+ X8Z24_UNORM_X16V8S8_UINT = 0x1D,
+ Z32_FLOAT_X16V8X8_UINT = 0x1E,
+ Z32_FLOAT_X16V8S8_UINT = 0x1F,
+ */
};
namespace Engines {
@@ -89,73 +121,58 @@ class Maxwell3D;
class KeplerCompute;
} // namespace Engines
-enum class EngineID {
- FERMI_TWOD_A = 0x902D, // 2D Engine
- MAXWELL_B = 0xB197, // 3D Engine
- KEPLER_COMPUTE_B = 0xB1C0,
- KEPLER_INLINE_TO_MEMORY_B = 0xA140,
- MAXWELL_DMA_COPY_A = 0xB0B5,
-};
+namespace Control {
+struct ChannelState;
+}
+
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
class MemoryManager;
class GPU final {
public:
- struct MethodCall {
- u32 method{};
- u32 argument{};
- u32 subchannel{};
- u32 method_count{};
-
- explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
- : method(method_), argument(argument_), subchannel(subchannel_),
- method_count(method_count_) {}
-
- [[nodiscard]] bool IsLastCall() const {
- return method_count <= 1;
- }
- };
-
- enum class FenceOperation : u32 {
- Acquire = 0,
- Increment = 1,
- };
-
- union FenceAction {
- u32 raw;
- BitField<0, 1, FenceOperation> op;
- BitField<8, 24, u32> syncpoint_id;
- };
-
explicit GPU(Core::System& system, bool is_async, bool use_nvdec);
~GPU();
/// Binds a renderer to the GPU.
void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer);
- /// Calls a GPU method.
- void CallMethod(const MethodCall& method_call);
-
- /// Calls a GPU multivalue method.
- void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
- u32 methods_pending);
-
/// Flush all current written commands into the host GPU for execution.
void FlushCommands();
/// Synchronizes CPU writes with Host GPU memory.
- void SyncGuestHost();
+ void InvalidateGPUCache();
/// Signal the ending of command list.
void OnCommandListEnd();
+ std::shared_ptr<Control::ChannelState> AllocateChannel();
+
+ void InitChannel(Control::ChannelState& to_init);
+
+ void BindChannel(s32 channel_id);
+
+ void ReleaseChannel(Control::ChannelState& to_release);
+
+ void InitAddressSpace(Tegra::MemoryManager& memory_manager);
+
/// Request a host GPU memory flush from the CPU.
[[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
/// Obtains current flush request fence id.
- [[nodiscard]] u64 CurrentFlushRequestFence() const;
+ [[nodiscard]] u64 CurrentSyncRequestFence() const;
+
+ void WaitForSyncOperation(u64 fence);
/// Tick pending requests within the GPU.
void TickWork();
+ /// Gets a mutable reference to the Host1x interface
+ [[nodiscard]] Host1x::Host1x& Host1x();
+
+ /// Gets an immutable reference to the Host1x interface.
+ [[nodiscard]] const Host1x::Host1x& Host1x() const;
+
/// Returns a reference to the Maxwell3D GPU engine.
[[nodiscard]] Engines::Maxwell3D& Maxwell3D();
@@ -168,12 +185,6 @@ public:
/// Returns a reference to the KeplerCompute GPU engine.
[[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const;
- /// Returns a reference to the GPU memory manager.
- [[nodiscard]] Tegra::MemoryManager& MemoryManager();
-
- /// Returns a const reference to the GPU memory manager.
- [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const;
-
/// Returns a reference to the GPU DMA pusher.
[[nodiscard]] Tegra::DmaPusher& DmaPusher();
@@ -192,17 +203,6 @@ public:
/// Returns a const reference to the shader notifier.
[[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
- /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
- void WaitFence(u32 syncpoint_id, u32 value);
-
- void IncrementSyncPoint(u32 syncpoint_id);
-
- [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const;
-
- void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value);
-
- [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value);
-
[[nodiscard]] u64 GetTicks() const;
[[nodiscard]] bool IsAsync() const;
@@ -211,6 +211,9 @@ public:
void RendererFrameEndNotify();
+ void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
+ std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences);
+
/// Performs any additional setup necessary in order to begin GPU emulation.
/// This can be used to launch any necessary threads and register any necessary
/// core timing events.
@@ -226,7 +229,7 @@ public:
void ReleaseContext();
/// Push GPU command entries to be processed
- void PushGPUEntries(Tegra::CommandList&& entries);
+ void PushGPUEntries(s32 channel, Tegra::CommandList&& entries);
/// Push GPU command buffer entries to be processed
void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries);
@@ -248,7 +251,7 @@ public:
private:
struct Impl;
- std::unique_ptr<Impl> impl;
+ mutable std::unique_ptr<Impl> impl;
};
} // namespace Tegra
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index d43f7175a..1bd477011 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -8,6 +8,7 @@
#include "common/thread.h"
#include "core/core.h"
#include "core/frontend/emu_window.h"
+#include "video_core/control/scheduler.h"
#include "video_core/dma_pusher.h"
#include "video_core/gpu.h"
#include "video_core/gpu_thread.h"
@@ -18,8 +19,8 @@ namespace VideoCommon::GPUThread {
/// Runs the GPU thread
static void RunThread(std::stop_token stop_token, Core::System& system,
VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
- Tegra::DmaPusher& dma_pusher, SynchState& state) {
- std::string name = "yuzu:GPU";
+ Tegra::Control::Scheduler& scheduler, SynchState& state) {
+ std::string name = "GPU";
MicroProfileOnThreadCreate(name.c_str());
SCOPE_EXIT({ MicroProfileOnThreadExit(); });
@@ -36,8 +37,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
break;
}
if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) {
- dma_pusher.Push(std::move(submit_list->entries));
- dma_pusher.DispatchCalls();
+ scheduler.Push(submit_list->channel, std::move(submit_list->entries));
} else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) {
renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
} else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) {
@@ -68,14 +68,14 @@ ThreadManager::~ThreadManager() = default;
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
Core::Frontend::GraphicsContext& context,
- Tegra::DmaPusher& dma_pusher) {
+ Tegra::Control::Scheduler& scheduler) {
rasterizer = renderer.ReadRasterizer();
thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
- std::ref(dma_pusher), std::ref(state));
+ std::ref(scheduler), std::ref(state));
}
-void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
- PushCommand(SubmitListCommand(std::move(entries)));
+void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) {
+ PushCommand(SubmitListCommand(channel, std::move(entries)));
}
void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
@@ -93,8 +93,12 @@ void ThreadManager::FlushRegion(VAddr addr, u64 size) {
}
auto& gpu = system.GPU();
u64 fence = gpu.RequestFlush(addr, size);
- PushCommand(GPUTickCommand(), true);
- ASSERT(fence <= gpu.CurrentFlushRequestFence());
+ TickGPU();
+ gpu.WaitForSyncOperation(fence);
+}
+
+void ThreadManager::TickGPU() {
+ PushCommand(GPUTickCommand());
}
void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 2f8210cb9..64628d3e3 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -15,7 +15,9 @@
namespace Tegra {
struct FramebufferConfig;
-class DmaPusher;
+namespace Control {
+class Scheduler;
+}
} // namespace Tegra
namespace Core {
@@ -34,8 +36,10 @@ namespace VideoCommon::GPUThread {
/// Command to signal to the GPU thread that a command list is ready for processing
struct SubmitListCommand final {
- explicit SubmitListCommand(Tegra::CommandList&& entries_) : entries{std::move(entries_)} {}
+ explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_)
+ : channel{channel_}, entries{std::move(entries_)} {}
+ s32 channel;
Tegra::CommandList entries;
};
@@ -112,10 +116,10 @@ public:
/// Creates and starts the GPU thread.
void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
- Tegra::DmaPusher& dma_pusher);
+ Tegra::Control::Scheduler& scheduler);
/// Push GPU command entries to be processed
- void SubmitList(Tegra::CommandList&& entries);
+ void SubmitList(s32 channel, Tegra::CommandList&& entries);
/// Swap buffers (render frame)
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
@@ -131,6 +135,8 @@ public:
void OnCommandListEnd();
+ void TickGPU();
+
private:
/// Pushes a command to be executed by the GPU thread
u64 PushCommand(CommandData&& command_data, bool block = false);
diff --git a/src/video_core/command_classes/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp
index a5eb97b7f..42e7d6e4f 100644
--- a/src/video_core/command_classes/codecs/codec.cpp
+++ b/src/video_core/host1x/codecs/codec.cpp
@@ -6,11 +6,11 @@
#include <vector>
#include "common/assert.h"
#include "common/settings.h"
-#include "video_core/command_classes/codecs/codec.h"
-#include "video_core/command_classes/codecs/h264.h"
-#include "video_core/command_classes/codecs/vp8.h"
-#include "video_core/command_classes/codecs/vp9.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/codecs/codec.h"
+#include "video_core/host1x/codecs/h264.h"
+#include "video_core/host1x/codecs/vp8.h"
+#include "video_core/host1x/codecs/vp9.h"
+#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
extern "C" {
@@ -73,10 +73,10 @@ void AVFrameDeleter(AVFrame* ptr) {
av_frame_free(&ptr);
}
-Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs)
- : gpu(gpu_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(gpu)),
- vp8_decoder(std::make_unique<Decoder::VP8>(gpu)),
- vp9_decoder(std::make_unique<Decoder::VP9>(gpu)) {}
+Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs)
+ : host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)),
+ vp8_decoder(std::make_unique<Decoder::VP8>(host1x)),
+ vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {}
Codec::~Codec() {
if (!initialized) {
@@ -168,11 +168,11 @@ void Codec::InitializeGpuDecoder() {
void Codec::Initialize() {
const AVCodecID codec = [&] {
switch (current_codec) {
- case NvdecCommon::VideoCodec::H264:
+ case Host1x::NvdecCommon::VideoCodec::H264:
return AV_CODEC_ID_H264;
- case NvdecCommon::VideoCodec::VP8:
+ case Host1x::NvdecCommon::VideoCodec::VP8:
return AV_CODEC_ID_VP8;
- case NvdecCommon::VideoCodec::VP9:
+ case Host1x::NvdecCommon::VideoCodec::VP9:
return AV_CODEC_ID_VP9;
default:
UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
@@ -197,7 +197,7 @@ void Codec::Initialize() {
initialized = true;
}
-void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) {
+void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) {
if (current_codec != codec) {
current_codec = codec;
LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName());
@@ -215,11 +215,11 @@ void Codec::Decode() {
bool vp9_hidden_frame = false;
const auto& frame_data = [&]() {
switch (current_codec) {
- case Tegra::NvdecCommon::VideoCodec::H264:
+ case Tegra::Host1x::NvdecCommon::VideoCodec::H264:
return h264_decoder->ComposeFrame(state, is_first_frame);
- case Tegra::NvdecCommon::VideoCodec::VP8:
+ case Tegra::Host1x::NvdecCommon::VideoCodec::VP8:
return vp8_decoder->ComposeFrame(state);
- case Tegra::NvdecCommon::VideoCodec::VP9:
+ case Tegra::Host1x::NvdecCommon::VideoCodec::VP9:
vp9_decoder->ComposeFrame(state);
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
return vp9_decoder->GetFrameBytes();
@@ -287,21 +287,21 @@ AVFramePtr Codec::GetCurrentFrame() {
return frame;
}
-NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
+Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
return current_codec;
}
std::string_view Codec::GetCurrentCodecName() const {
switch (current_codec) {
- case NvdecCommon::VideoCodec::None:
+ case Host1x::NvdecCommon::VideoCodec::None:
return "None";
- case NvdecCommon::VideoCodec::H264:
+ case Host1x::NvdecCommon::VideoCodec::H264:
return "H264";
- case NvdecCommon::VideoCodec::VP8:
+ case Host1x::NvdecCommon::VideoCodec::VP8:
return "VP8";
- case NvdecCommon::VideoCodec::H265:
+ case Host1x::NvdecCommon::VideoCodec::H265:
return "H265";
- case NvdecCommon::VideoCodec::VP9:
+ case Host1x::NvdecCommon::VideoCodec::VP9:
return "VP9";
default:
return "Unknown";
diff --git a/src/video_core/command_classes/codecs/codec.h b/src/video_core/host1x/codecs/codec.h
index 0c2405465..0d45fb7fe 100644
--- a/src/video_core/command_classes/codecs/codec.h
+++ b/src/video_core/host1x/codecs/codec.h
@@ -6,8 +6,8 @@
#include <memory>
#include <string_view>
#include <queue>
-
-#include "video_core/command_classes/nvdec_common.h"
+#include "common/common_types.h"
+#include "video_core/host1x/nvdec_common.h"
extern "C" {
#if defined(__GNUC__) || defined(__clang__)
@@ -21,7 +21,6 @@ extern "C" {
}
namespace Tegra {
-class GPU;
void AVFrameDeleter(AVFrame* ptr);
using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>;
@@ -32,16 +31,20 @@ class VP8;
class VP9;
} // namespace Decoder
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
+
class Codec {
public:
- explicit Codec(GPU& gpu, const NvdecCommon::NvdecRegisters& regs);
+ explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs);
~Codec();
/// Initialize the codec, returning success or failure
void Initialize();
/// Sets NVDEC video stream codec
- void SetTargetCodec(NvdecCommon::VideoCodec codec);
+ void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec);
/// Call decoders to construct headers, decode AVFrame with ffmpeg
void Decode();
@@ -50,7 +53,7 @@ public:
[[nodiscard]] AVFramePtr GetCurrentFrame();
/// Returns the value of current_codec
- [[nodiscard]] NvdecCommon::VideoCodec GetCurrentCodec() const;
+ [[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const;
/// Return name of the current codec
[[nodiscard]] std::string_view GetCurrentCodecName() const;
@@ -63,14 +66,14 @@ private:
bool CreateGpuAvDevice();
bool initialized{};
- NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None};
+ Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None};
const AVCodec* av_codec{nullptr};
AVCodecContext* av_codec_ctx{nullptr};
AVBufferRef* av_gpu_decoder{nullptr};
- GPU& gpu;
- const NvdecCommon::NvdecRegisters& state;
+ Host1x::Host1x& host1x;
+ const Host1x::NvdecCommon::NvdecRegisters& state;
std::unique_ptr<Decoder::H264> h264_decoder;
std::unique_ptr<Decoder::VP8> vp8_decoder;
std::unique_ptr<Decoder::VP9> vp9_decoder;
diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp
index e2acd54d4..e87bd65fa 100644
--- a/src/video_core/command_classes/codecs/h264.cpp
+++ b/src/video_core/host1x/codecs/h264.cpp
@@ -5,8 +5,8 @@
#include <bit>
#include "common/settings.h"
-#include "video_core/command_classes/codecs/h264.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/codecs/h264.h"
+#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
namespace Tegra::Decoder {
@@ -24,19 +24,20 @@ constexpr std::array<u8, 16> zig_zag_scan{
};
} // Anonymous namespace
-H264::H264(GPU& gpu_) : gpu(gpu_) {}
+H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {}
H264::~H264() = default;
-const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& state,
+const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
bool is_first_frame) {
H264DecoderContext context;
- gpu.MemoryManager().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext));
+ host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context,
+ sizeof(H264DecoderContext));
const s64 frame_number = context.h264_parameter_set.frame_number.Value();
if (!is_first_frame && frame_number != 0) {
frame.resize(context.stream_len);
- gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
+ host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
return frame;
}
@@ -155,8 +156,8 @@ const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& sta
frame.resize(encoded_header.size() + context.stream_len);
std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
- gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset,
- frame.data() + encoded_header.size(), context.stream_len);
+ host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
+ frame.data() + encoded_header.size(), context.stream_len);
return frame;
}
diff --git a/src/video_core/command_classes/codecs/h264.h b/src/video_core/host1x/codecs/h264.h
index 261574364..5cc86454e 100644
--- a/src/video_core/command_classes/codecs/h264.h
+++ b/src/video_core/host1x/codecs/h264.h
@@ -8,10 +8,14 @@
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
-#include "video_core/command_classes/nvdec_common.h"
+#include "video_core/host1x/nvdec_common.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
+
namespace Decoder {
class H264BitWriter {
@@ -55,16 +59,16 @@ private:
class H264 {
public:
- explicit H264(GPU& gpu);
+ explicit H264(Host1x::Host1x& host1x);
~H264();
/// Compose the H264 frame for FFmpeg decoding
- [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state,
- bool is_first_frame = false);
+ [[nodiscard]] const std::vector<u8>& ComposeFrame(
+ const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame = false);
private:
std::vector<u8> frame;
- GPU& gpu;
+ Host1x::Host1x& host1x;
struct H264ParameterSet {
s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00
diff --git a/src/video_core/command_classes/codecs/vp8.cpp b/src/video_core/host1x/codecs/vp8.cpp
index c83b9bbc2..28fb12cb8 100644
--- a/src/video_core/command_classes/codecs/vp8.cpp
+++ b/src/video_core/host1x/codecs/vp8.cpp
@@ -3,18 +3,18 @@
#include <vector>
-#include "video_core/command_classes/codecs/vp8.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/codecs/vp8.h"
+#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
namespace Tegra::Decoder {
-VP8::VP8(GPU& gpu_) : gpu(gpu_) {}
+VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {}
VP8::~VP8() = default;
-const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& state) {
+const std::vector<u8>& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
VP8PictureInfo info;
- gpu.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
+ host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
const bool is_key_frame = info.key_frame == 1u;
const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
@@ -45,7 +45,7 @@ const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& stat
frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
}
const u64 bitstream_offset = state.frame_bitstream_offset;
- gpu.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
+ host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
return frame;
}
diff --git a/src/video_core/command_classes/codecs/vp8.h b/src/video_core/host1x/codecs/vp8.h
index 3357667b0..5bf07ecab 100644
--- a/src/video_core/command_classes/codecs/vp8.h
+++ b/src/video_core/host1x/codecs/vp8.h
@@ -8,23 +8,28 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
-#include "video_core/command_classes/nvdec_common.h"
+#include "video_core/host1x/nvdec_common.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
+
namespace Decoder {
class VP8 {
public:
- explicit VP8(GPU& gpu);
+ explicit VP8(Host1x::Host1x& host1x);
~VP8();
/// Compose the VP8 frame for FFmpeg decoding
- [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state);
+ [[nodiscard]] const std::vector<u8>& ComposeFrame(
+ const Host1x::NvdecCommon::NvdecRegisters& state);
private:
std::vector<u8> frame;
- GPU& gpu;
+ Host1x::Host1x& host1x;
struct VP8PictureInfo {
INSERT_PADDING_WORDS_NOINIT(14);
diff --git a/src/video_core/command_classes/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp
index c01431441..cf40c9012 100644
--- a/src/video_core/command_classes/codecs/vp9.cpp
+++ b/src/video_core/host1x/codecs/vp9.cpp
@@ -4,8 +4,8 @@
#include <algorithm> // for std::copy
#include <numeric>
#include "common/assert.h"
-#include "video_core/command_classes/codecs/vp9.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/codecs/vp9.h"
+#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
namespace Tegra::Decoder {
@@ -236,7 +236,7 @@ constexpr std::array<u8, 254> map_lut{
}
} // Anonymous namespace
-VP9::VP9(GPU& gpu_) : gpu{gpu_} {}
+VP9::VP9(Host1x::Host1x& host1x_) : host1x{host1x_} {}
VP9::~VP9() = default;
@@ -355,9 +355,9 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_
}
}
-Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state) {
+Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) {
PictureInfo picture_info;
- gpu.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
+ host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
Vp9PictureInfo vp9_info = picture_info.Convert();
InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy);
@@ -372,18 +372,19 @@ Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state)
void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) {
EntropyProbs entropy;
- gpu.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
+ host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
entropy.Convert(dst);
}
-Vp9FrameContainer VP9::GetCurrentFrame(const NvdecCommon::NvdecRegisters& state) {
+Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
Vp9FrameContainer current_frame{};
{
- gpu.SyncGuestHost();
+ // gpu.SyncGuestHost(); epic, why?
current_frame.info = GetVp9PictureInfo(state);
current_frame.bit_stream.resize(current_frame.info.bitstream_size);
- gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(),
- current_frame.info.bitstream_size);
+ host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
+ current_frame.bit_stream.data(),
+ current_frame.info.bitstream_size);
}
if (!next_frame.bit_stream.empty()) {
Vp9FrameContainer temp{
@@ -769,7 +770,7 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() {
return uncomp_writer;
}
-void VP9::ComposeFrame(const NvdecCommon::NvdecRegisters& state) {
+void VP9::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
std::vector<u8> bitstream;
{
Vp9FrameContainer curr_frame = GetCurrentFrame(state);
diff --git a/src/video_core/command_classes/codecs/vp9.h b/src/video_core/host1x/codecs/vp9.h
index ecc40e8b1..d4083e8d3 100644
--- a/src/video_core/command_classes/codecs/vp9.h
+++ b/src/video_core/host1x/codecs/vp9.h
@@ -8,11 +8,15 @@
#include "common/common_types.h"
#include "common/stream.h"
-#include "video_core/command_classes/codecs/vp9_types.h"
-#include "video_core/command_classes/nvdec_common.h"
+#include "video_core/host1x/codecs/vp9_types.h"
+#include "video_core/host1x/nvdec_common.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
+
namespace Decoder {
/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the
@@ -106,7 +110,7 @@ private:
class VP9 {
public:
- explicit VP9(GPU& gpu_);
+ explicit VP9(Host1x::Host1x& host1x);
~VP9();
VP9(const VP9&) = delete;
@@ -117,7 +121,7 @@ public:
/// Composes the VP9 frame from the GPU state information.
/// Based on the official VP9 spec documentation
- void ComposeFrame(const NvdecCommon::NvdecRegisters& state);
+ void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state);
/// Returns true if the most recent frame was a hidden frame.
[[nodiscard]] bool WasFrameHidden() const {
@@ -162,19 +166,21 @@ private:
void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
/// Returns VP9 information from NVDEC provided offset and size
- [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state);
+ [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(
+ const Host1x::NvdecCommon::NvdecRegisters& state);
/// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct
void InsertEntropy(u64 offset, Vp9EntropyProbs& dst);
/// Returns frame to be decoded after buffering
- [[nodiscard]] Vp9FrameContainer GetCurrentFrame(const NvdecCommon::NvdecRegisters& state);
+ [[nodiscard]] Vp9FrameContainer GetCurrentFrame(
+ const Host1x::NvdecCommon::NvdecRegisters& state);
/// Use NVDEC providied information to compose the headers for the current frame
[[nodiscard]] std::vector<u8> ComposeCompressedHeader();
[[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader();
- GPU& gpu;
+ Host1x::Host1x& host1x;
std::vector<u8> frame;
std::array<s8, 4> loop_filter_ref_deltas{};
diff --git a/src/video_core/command_classes/codecs/vp9_types.h b/src/video_core/host1x/codecs/vp9_types.h
index bb3d8df6e..adad8ed7e 100644
--- a/src/video_core/command_classes/codecs/vp9_types.h
+++ b/src/video_core/host1x/codecs/vp9_types.h
@@ -9,7 +9,6 @@
#include "common/common_types.h"
namespace Tegra {
-class GPU;
namespace Decoder {
struct Vp9FrameDimensions {
diff --git a/src/video_core/host1x/control.cpp b/src/video_core/host1x/control.cpp
new file mode 100644
index 000000000..dceefdb7f
--- /dev/null
+++ b/src/video_core/host1x/control.cpp
@@ -0,0 +1,33 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/assert.h"
+#include "video_core/host1x/control.h"
+#include "video_core/host1x/host1x.h"
+
+namespace Tegra::Host1x {
+
+Control::Control(Host1x& host1x_) : host1x(host1x_) {}
+
+Control::~Control() = default;
+
+void Control::ProcessMethod(Method method, u32 argument) {
+ switch (method) {
+ case Method::LoadSyncptPayload32:
+ syncpoint_value = argument;
+ break;
+ case Method::WaitSyncpt:
+ case Method::WaitSyncpt32:
+ Execute(argument);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Control method 0x{:X}", static_cast<u32>(method));
+ break;
+ }
+}
+
+void Control::Execute(u32 data) {
+ host1x.GetSyncpointManager().WaitHost(data, syncpoint_value);
+}
+
+} // namespace Tegra::Host1x
diff --git a/src/video_core/command_classes/host1x.h b/src/video_core/host1x/control.h
index bb48a4381..e117888a3 100644
--- a/src/video_core/command_classes/host1x.h
+++ b/src/video_core/host1x/control.h
@@ -1,15 +1,19 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
#include "common/common_types.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+
+class Host1x;
class Nvdec;
-class Host1x {
+class Control {
public:
enum class Method : u32 {
WaitSyncpt = 0x8,
@@ -17,8 +21,8 @@ public:
WaitSyncpt32 = 0x50,
};
- explicit Host1x(GPU& gpu);
- ~Host1x();
+ explicit Control(Host1x& host1x);
+ ~Control();
/// Writes the method into the state, Invoke Execute() if encountered
void ProcessMethod(Method method, u32 argument);
@@ -28,7 +32,9 @@ private:
void Execute(u32 data);
u32 syncpoint_value{};
- GPU& gpu;
+ Host1x& host1x;
};
+} // namespace Host1x
+
} // namespace Tegra
diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp
new file mode 100644
index 000000000..7c317a85d
--- /dev/null
+++ b/src/video_core/host1x/host1x.cpp
@@ -0,0 +1,17 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "core/core.h"
+#include "video_core/host1x/host1x.h"
+
+namespace Tegra {
+
+namespace Host1x {
+
+Host1x::Host1x(Core::System& system_)
+ : system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12},
+ allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {}
+
+} // namespace Host1x
+
+} // namespace Tegra
diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h
new file mode 100644
index 000000000..57082ae54
--- /dev/null
+++ b/src/video_core/host1x/host1x.h
@@ -0,0 +1,57 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include "common/common_types.h"
+
+#include "common/address_space.h"
+#include "video_core/host1x/syncpoint_manager.h"
+#include "video_core/memory_manager.h"
+
+namespace Core {
+class System;
+} // namespace Core
+
+namespace Tegra {
+
+namespace Host1x {
+
+class Host1x {
+public:
+ explicit Host1x(Core::System& system);
+
+ SyncpointManager& GetSyncpointManager() {
+ return syncpoint_manager;
+ }
+
+ const SyncpointManager& GetSyncpointManager() const {
+ return syncpoint_manager;
+ }
+
+ Tegra::MemoryManager& MemoryManager() {
+ return memory_manager;
+ }
+
+ const Tegra::MemoryManager& MemoryManager() const {
+ return memory_manager;
+ }
+
+ Common::FlatAllocator<u32, 0, 32>& Allocator() {
+ return *allocator;
+ }
+
+ const Common::FlatAllocator<u32, 0, 32>& Allocator() const {
+ return *allocator;
+ }
+
+private:
+ Core::System& system;
+ SyncpointManager syncpoint_manager;
+ Tegra::MemoryManager memory_manager;
+ std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
+};
+
+} // namespace Host1x
+
+} // namespace Tegra
diff --git a/src/video_core/command_classes/nvdec.cpp b/src/video_core/host1x/nvdec.cpp
index 4fbbe3da6..a4bd5b79f 100644
--- a/src/video_core/command_classes/nvdec.cpp
+++ b/src/video_core/host1x/nvdec.cpp
@@ -2,15 +2,16 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
-#include "video_core/command_classes/nvdec.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/nvdec.h"
-namespace Tegra {
+namespace Tegra::Host1x {
#define NVDEC_REG_INDEX(field_name) \
(offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64))
-Nvdec::Nvdec(GPU& gpu_) : gpu(gpu_), state{}, codec(std::make_unique<Codec>(gpu, state)) {}
+Nvdec::Nvdec(Host1x& host1x_)
+ : host1x(host1x_), state{}, codec(std::make_unique<Codec>(host1x, state)) {}
Nvdec::~Nvdec() = default;
@@ -44,4 +45,4 @@ void Nvdec::Execute() {
}
}
-} // namespace Tegra
+} // namespace Tegra::Host1x
diff --git a/src/video_core/command_classes/nvdec.h b/src/video_core/host1x/nvdec.h
index 488531fc6..3949d5181 100644
--- a/src/video_core/command_classes/nvdec.h
+++ b/src/video_core/host1x/nvdec.h
@@ -6,14 +6,17 @@
#include <memory>
#include <vector>
#include "common/common_types.h"
-#include "video_core/command_classes/codecs/codec.h"
+#include "video_core/host1x/codecs/codec.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+
+class Host1x;
class Nvdec {
public:
- explicit Nvdec(GPU& gpu);
+ explicit Nvdec(Host1x& host1x);
~Nvdec();
/// Writes the method into the state, Invoke Execute() if encountered
@@ -26,8 +29,11 @@ private:
/// Invoke codec to decode a frame
void Execute();
- GPU& gpu;
+ Host1x& host1x;
NvdecCommon::NvdecRegisters state;
std::unique_ptr<Codec> codec;
};
+
+} // namespace Host1x
+
} // namespace Tegra
diff --git a/src/video_core/command_classes/nvdec_common.h b/src/video_core/host1x/nvdec_common.h
index 521e5b52b..49d67ebbe 100644
--- a/src/video_core/command_classes/nvdec_common.h
+++ b/src/video_core/host1x/nvdec_common.h
@@ -7,7 +7,7 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
-namespace Tegra::NvdecCommon {
+namespace Tegra::Host1x::NvdecCommon {
enum class VideoCodec : u64 {
None = 0x0,
@@ -94,4 +94,4 @@ ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176);
#undef ASSERT_REG_POSITION
-} // namespace Tegra::NvdecCommon
+} // namespace Tegra::Host1x::NvdecCommon
diff --git a/src/video_core/command_classes/sync_manager.cpp b/src/video_core/host1x/sync_manager.cpp
index 67e58046f..5ef9ea217 100644
--- a/src/video_core/command_classes/sync_manager.cpp
+++ b/src/video_core/host1x/sync_manager.cpp
@@ -3,10 +3,13 @@
#include <algorithm>
#include "sync_manager.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/syncpoint_manager.h"
namespace Tegra {
-SyncptIncrManager::SyncptIncrManager(GPU& gpu_) : gpu(gpu_) {}
+namespace Host1x {
+
+SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {}
SyncptIncrManager::~SyncptIncrManager() = default;
void SyncptIncrManager::Increment(u32 id) {
@@ -36,8 +39,12 @@ void SyncptIncrManager::IncrementAllDone() {
if (!increments[done_count].complete) {
break;
}
- gpu.IncrementSyncPoint(increments[done_count].syncpt_id);
+ auto& syncpoint_manager = host1x.GetSyncpointManager();
+ syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id);
+ syncpoint_manager.IncrementHost(increments[done_count].syncpt_id);
}
increments.erase(increments.begin(), increments.begin() + done_count);
}
+
+} // namespace Host1x
} // namespace Tegra
diff --git a/src/video_core/command_classes/sync_manager.h b/src/video_core/host1x/sync_manager.h
index 6dfaae080..7bb77fa27 100644
--- a/src/video_core/command_classes/sync_manager.h
+++ b/src/video_core/host1x/sync_manager.h
@@ -8,7 +8,11 @@
#include "common/common_types.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+
+class Host1x;
+
struct SyncptIncr {
u32 id;
u32 class_id;
@@ -21,7 +25,7 @@ struct SyncptIncr {
class SyncptIncrManager {
public:
- explicit SyncptIncrManager(GPU& gpu);
+ explicit SyncptIncrManager(Host1x& host1x);
~SyncptIncrManager();
/// Add syncpoint id and increment all
@@ -41,7 +45,9 @@ private:
std::mutex increment_lock;
u32 current_id{};
- GPU& gpu;
+ Host1x& host1x;
};
+} // namespace Host1x
+
} // namespace Tegra
diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp
new file mode 100644
index 000000000..a44fc83d3
--- /dev/null
+++ b/src/video_core/host1x/syncpoint_manager.cpp
@@ -0,0 +1,106 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/microprofile.h"
+#include "video_core/host1x/syncpoint_manager.h"
+
+namespace Tegra {
+
+namespace Host1x {
+
+MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
+
+SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
+ std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value,
+ std::function<void()>&& action) {
+ if (syncpoint.load(std::memory_order_acquire) >= expected_value) {
+ action();
+ return {};
+ }
+
+ std::unique_lock lk(guard);
+ if (syncpoint.load(std::memory_order_relaxed) >= expected_value) {
+ action();
+ return {};
+ }
+ auto it = action_storage.begin();
+ while (it != action_storage.end()) {
+ if (it->expected_value >= expected_value) {
+ break;
+ }
+ ++it;
+ }
+ return action_storage.emplace(it, expected_value, std::move(action));
+}
+
+void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
+ ActionHandle& handle) {
+ std::unique_lock lk(guard);
+
+ // We want to ensure the iterator still exists prior to erasing it
+ // Otherwise, if an invalid iterator was passed in then it could lead to UB
+ // It is important to avoid UB in that case since the deregister isn't called from a locked
+ // context
+ for (auto it = action_storage.begin(); it != action_storage.end(); it++) {
+ if (it == handle) {
+ action_storage.erase(it);
+ return;
+ }
+ }
+}
+
+void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) {
+ DeregisterAction(guest_action_storage[syncpoint_id], handle);
+}
+
+void SyncpointManager::DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle) {
+ DeregisterAction(host_action_storage[syncpoint_id], handle);
+}
+
+void SyncpointManager::IncrementGuest(u32 syncpoint_id) {
+ Increment(syncpoints_guest[syncpoint_id], wait_guest_cv, guest_action_storage[syncpoint_id]);
+}
+
+void SyncpointManager::IncrementHost(u32 syncpoint_id) {
+ Increment(syncpoints_host[syncpoint_id], wait_host_cv, host_action_storage[syncpoint_id]);
+}
+
+void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) {
+ Wait(syncpoints_guest[syncpoint_id], wait_guest_cv, expected_value);
+}
+
+void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) {
+ MICROPROFILE_SCOPE(GPU_wait);
+ Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value);
+}
+
+void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
+ std::list<RegisteredAction>& action_storage) {
+ auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1};
+
+ std::unique_lock lk(guard);
+ auto it = action_storage.begin();
+ while (it != action_storage.end()) {
+ if (it->expected_value > new_value) {
+ break;
+ }
+ it->action();
+ it = action_storage.erase(it);
+ }
+ wait_cv.notify_all();
+}
+
+void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
+ u32 expected_value) {
+ const auto pred = [&]() { return syncpoint.load(std::memory_order_acquire) >= expected_value; };
+ if (pred()) {
+ return;
+ }
+
+ std::unique_lock lk(guard);
+ wait_cv.wait(lk, pred);
+}
+
+} // namespace Host1x
+
+} // namespace Tegra
diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h
new file mode 100644
index 000000000..50a264e23
--- /dev/null
+++ b/src/video_core/host1x/syncpoint_manager.h
@@ -0,0 +1,98 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <array>
+#include <atomic>
+#include <condition_variable>
+#include <functional>
+#include <list>
+#include <mutex>
+
+#include "common/common_types.h"
+
+namespace Tegra {
+
+namespace Host1x {
+
+class SyncpointManager {
+public:
+ u32 GetGuestSyncpointValue(u32 id) const {
+ return syncpoints_guest[id].load(std::memory_order_acquire);
+ }
+
+ u32 GetHostSyncpointValue(u32 id) const {
+ return syncpoints_host[id].load(std::memory_order_acquire);
+ }
+
+ struct RegisteredAction {
+ explicit RegisteredAction(u32 expected_value_, std::function<void()>&& action_)
+ : expected_value{expected_value_}, action{std::move(action_)} {}
+ u32 expected_value;
+ std::function<void()> action;
+ };
+ using ActionHandle = std::list<RegisteredAction>::iterator;
+
+ template <typename Func>
+ ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
+ std::function<void()> func(action);
+ return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id],
+ expected_value, std::move(func));
+ }
+
+ template <typename Func>
+ ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
+ std::function<void()> func(action);
+ return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id],
+ expected_value, std::move(func));
+ }
+
+ void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle);
+
+ void DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle);
+
+ void IncrementGuest(u32 syncpoint_id);
+
+ void IncrementHost(u32 syncpoint_id);
+
+ void WaitGuest(u32 syncpoint_id, u32 expected_value);
+
+ void WaitHost(u32 syncpoint_id, u32 expected_value);
+
+ bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const {
+ return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
+ }
+
+ bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const {
+ return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
+ }
+
+private:
+ void Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
+ std::list<RegisteredAction>& action_storage);
+
+ ActionHandle RegisterAction(std::atomic<u32>& syncpoint,
+ std::list<RegisteredAction>& action_storage, u32 expected_value,
+ std::function<void()>&& action);
+
+ void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle);
+
+ void Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, u32 expected_value);
+
+ static constexpr size_t NUM_MAX_SYNCPOINTS = 192;
+
+ std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_guest{};
+ std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_host{};
+
+ std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> guest_action_storage;
+ std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> host_action_storage;
+
+ std::mutex guard;
+ std::condition_variable wait_guest_cv;
+ std::condition_variable wait_host_cv;
+};
+
+} // namespace Host1x
+
+} // namespace Tegra
diff --git a/src/video_core/command_classes/vic.cpp b/src/video_core/host1x/vic.cpp
index 7c17df353..ac0b7d20e 100644
--- a/src/video_core/command_classes/vic.cpp
+++ b/src/video_core/host1x/vic.cpp
@@ -18,14 +18,17 @@ extern "C" {
#include "common/bit_field.h"
#include "common/logging/log.h"
-#include "video_core/command_classes/nvdec.h"
-#include "video_core/command_classes/vic.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/nvdec.h"
+#include "video_core/host1x/vic.h"
#include "video_core/memory_manager.h"
#include "video_core/textures/decoders.h"
namespace Tegra {
+
+namespace Host1x {
+
namespace {
enum class VideoPixelFormat : u64_le {
RGBA8 = 0x1f,
@@ -46,8 +49,8 @@ union VicConfig {
BitField<46, 14, u64_le> surface_height_minus1;
};
-Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_)
- : gpu(gpu_),
+Vic::Vic(Host1x& host1x_, std::shared_ptr<Nvdec> nvdec_processor_)
+ : host1x(host1x_),
nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {}
Vic::~Vic() = default;
@@ -78,7 +81,7 @@ void Vic::Execute() {
LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
return;
}
- const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)};
+ const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)};
const AVFramePtr frame_ptr = nvdec_processor->GetFrame();
const auto* frame = frame_ptr.get();
if (!frame) {
@@ -153,15 +156,16 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
luma_buffer.resize(size);
- Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(),
- converted_frame_buf_addr, block_height, 0, 0);
+ std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height);
+ Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
+ block_height, 0, width * 4);
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
+ host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
} else {
// send pitch linear frame
const size_t linear_size = width * height * 4;
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
- linear_size);
+ host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
+ linear_size);
}
}
@@ -189,8 +193,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
luma_buffer[dst + x] = luma_src[src + x];
}
}
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
- luma_buffer.size());
+ host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
+ luma_buffer.size());
// Chroma
const std::size_t half_height = frame_height / 2;
@@ -231,8 +235,10 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
ASSERT(false);
break;
}
- gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
- chroma_buffer.size());
+ host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
+ chroma_buffer.size());
}
+} // namespace Host1x
+
} // namespace Tegra
diff --git a/src/video_core/command_classes/vic.h b/src/video_core/host1x/vic.h
index 010daa6b6..2b78786e8 100644
--- a/src/video_core/command_classes/vic.h
+++ b/src/video_core/host1x/vic.h
@@ -10,7 +10,10 @@
struct SwsContext;
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+
+class Host1x;
class Nvdec;
union VicConfig;
@@ -25,7 +28,7 @@ public:
SetOutputSurfaceChromaUnusedOffset = 0x1ca
};
- explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor);
+ explicit Vic(Host1x& host1x, std::shared_ptr<Nvdec> nvdec_processor);
~Vic();
@@ -39,8 +42,8 @@ private:
void WriteYUVFrame(const AVFrame* frame, const VicConfig& config);
- GPU& gpu;
- std::shared_ptr<Tegra::Nvdec> nvdec_processor;
+ Host1x& host1x;
+ std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
/// Avoid reallocation of the following buffers every frame, as their
/// size does not change during a stream
@@ -58,4 +61,6 @@ private:
s32 scaler_height{};
};
+} // namespace Host1x
+
} // namespace Tegra
diff --git a/src/video_core/host_shaders/astc_decoder.comp b/src/video_core/host_shaders/astc_decoder.comp
index 3441a5fe5..d608678a3 100644
--- a/src/video_core/host_shaders/astc_decoder.comp
+++ b/src/video_core/host_shaders/astc_decoder.comp
@@ -1065,7 +1065,7 @@ TexelWeightParams DecodeBlockInfo() {
void FillError(ivec3 coord) {
for (uint j = 0; j < block_dims.y; j++) {
for (uint i = 0; i < block_dims.x; i++) {
- imageStore(dest_image, coord + ivec3(i, j, 0), vec4(1.0, 1.0, 0.0, 1.0));
+ imageStore(dest_image, coord + ivec3(i, j, 0), vec4(0.0, 0.0, 0.0, 0.0));
}
}
}
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
index 43f8b5904..f61d5998e 100644
--- a/src/video_core/macro/macro.cpp
+++ b/src/video_core/macro/macro.cpp
@@ -8,6 +8,7 @@
#include <boost/container_hash/hash.hpp>
+#include <fstream>
#include "common/assert.h"
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
diff --git a/src/video_core/macro/macro_hle.cpp b/src/video_core/macro/macro_hle.cpp
index 58382755b..f896591bf 100644
--- a/src/video_core/macro/macro_hle.cpp
+++ b/src/video_core/macro/macro_hle.cpp
@@ -3,6 +3,8 @@
#include <array>
#include <vector>
+#include "common/scope_exit.h"
+#include "video_core/dirty_flags.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/macro/macro.h"
#include "video_core/macro/macro_hle.h"
@@ -19,71 +21,116 @@ void HLE_771BB18C62444DA0(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
maxwell3d.regs.draw.topology.Assign(
static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0] & 0x3ffffff));
- maxwell3d.regs.vb_base_instance = parameters[5];
- maxwell3d.mme_draw.instance_count = instance_count;
- maxwell3d.regs.vb_element_base = parameters[3];
- maxwell3d.regs.index_array.count = parameters[1];
- maxwell3d.regs.index_array.first = parameters[4];
+ maxwell3d.regs.global_base_instance_index = parameters[5];
+ maxwell3d.regs.global_base_vertex_index = parameters[3];
+ maxwell3d.regs.index_buffer.count = parameters[1];
+ maxwell3d.regs.index_buffer.first = parameters[4];
if (maxwell3d.ShouldExecute()) {
- maxwell3d.Rasterizer().Draw(true, true);
+ maxwell3d.Rasterizer().Draw(true, instance_count);
}
- maxwell3d.regs.index_array.count = 0;
- maxwell3d.mme_draw.instance_count = 0;
- maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
+ maxwell3d.regs.index_buffer.count = 0;
}
void HLE_0D61FC9FAAC9FCAD(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
- const u32 count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]);
+ const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]);
maxwell3d.regs.vertex_buffer.first = parameters[3];
maxwell3d.regs.vertex_buffer.count = parameters[1];
- maxwell3d.regs.vb_base_instance = parameters[4];
+ maxwell3d.regs.global_base_instance_index = parameters[4];
maxwell3d.regs.draw.topology.Assign(
static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0]));
- maxwell3d.mme_draw.instance_count = count;
if (maxwell3d.ShouldExecute()) {
- maxwell3d.Rasterizer().Draw(false, true);
+ maxwell3d.Rasterizer().Draw(false, instance_count);
}
maxwell3d.regs.vertex_buffer.count = 0;
- maxwell3d.mme_draw.instance_count = 0;
- maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
}
void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]);
const u32 element_base = parameters[4];
const u32 base_instance = parameters[5];
- maxwell3d.regs.index_array.first = parameters[3];
- maxwell3d.regs.reg_array[0x446] = element_base; // vertex id base?
- maxwell3d.regs.index_array.count = parameters[1];
- maxwell3d.regs.vb_element_base = element_base;
- maxwell3d.regs.vb_base_instance = base_instance;
- maxwell3d.mme_draw.instance_count = instance_count;
- maxwell3d.CallMethodFromMME(0x8e3, 0x640);
- maxwell3d.CallMethodFromMME(0x8e4, element_base);
- maxwell3d.CallMethodFromMME(0x8e5, base_instance);
+ maxwell3d.regs.index_buffer.first = parameters[3];
+ maxwell3d.regs.vertex_id_base = element_base;
+ maxwell3d.regs.index_buffer.count = parameters[1];
+ maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
+ maxwell3d.regs.global_base_vertex_index = element_base;
+ maxwell3d.regs.global_base_instance_index = base_instance;
+ maxwell3d.CallMethod(0x8e3, 0x640, true);
+ maxwell3d.CallMethod(0x8e4, element_base, true);
+ maxwell3d.CallMethod(0x8e5, base_instance, true);
maxwell3d.regs.draw.topology.Assign(
static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0]));
if (maxwell3d.ShouldExecute()) {
- maxwell3d.Rasterizer().Draw(true, true);
+ maxwell3d.Rasterizer().Draw(true, instance_count);
+ }
+ maxwell3d.regs.vertex_id_base = 0x0;
+ maxwell3d.regs.index_buffer.count = 0;
+ maxwell3d.regs.global_base_vertex_index = 0x0;
+ maxwell3d.regs.global_base_instance_index = 0x0;
+ maxwell3d.CallMethod(0x8e3, 0x640, true);
+ maxwell3d.CallMethod(0x8e4, 0x0, true);
+ maxwell3d.CallMethod(0x8e5, 0x0, true);
+}
+
+// Multidraw Indirect
+void HLE_3F5E74B9C9A50164(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
+ SCOPE_EXIT({
+ // Clean everything.
+ maxwell3d.regs.vertex_id_base = 0x0;
+ maxwell3d.regs.index_buffer.count = 0;
+ maxwell3d.regs.global_base_vertex_index = 0x0;
+ maxwell3d.regs.global_base_instance_index = 0x0;
+ maxwell3d.CallMethod(0x8e3, 0x640, true);
+ maxwell3d.CallMethod(0x8e4, 0x0, true);
+ maxwell3d.CallMethod(0x8e5, 0x0, true);
+ maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
+ });
+ const u32 start_indirect = parameters[0];
+ const u32 end_indirect = parameters[1];
+ if (start_indirect >= end_indirect) {
+ // Nothing to do.
+ return;
+ }
+ const auto topology =
+ static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[2]);
+ maxwell3d.regs.draw.topology.Assign(topology);
+ const u32 padding = parameters[3];
+ const std::size_t max_draws = parameters[4];
+
+ const u32 indirect_words = 5 + padding;
+ const std::size_t first_draw = start_indirect;
+ const std::size_t effective_draws = end_indirect - start_indirect;
+ const std::size_t last_draw = start_indirect + std::min(effective_draws, max_draws);
+
+ for (std::size_t index = first_draw; index < last_draw; index++) {
+ const std::size_t base = index * indirect_words + 5;
+ const u32 num_vertices = parameters[base];
+ const u32 instance_count = parameters[base + 1];
+ const u32 first_index = parameters[base + 2];
+ const u32 base_vertex = parameters[base + 3];
+ const u32 base_instance = parameters[base + 4];
+ maxwell3d.regs.index_buffer.first = first_index;
+ maxwell3d.regs.vertex_id_base = base_vertex;
+ maxwell3d.regs.index_buffer.count = num_vertices;
+ maxwell3d.regs.global_base_vertex_index = base_vertex;
+ maxwell3d.regs.global_base_instance_index = base_instance;
+ maxwell3d.CallMethod(0x8e3, 0x640, true);
+ maxwell3d.CallMethod(0x8e4, base_vertex, true);
+ maxwell3d.CallMethod(0x8e5, base_instance, true);
+ maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
+ if (maxwell3d.ShouldExecute()) {
+ maxwell3d.Rasterizer().Draw(true, instance_count);
+ }
}
- maxwell3d.regs.reg_array[0x446] = 0x0; // vertex id base?
- maxwell3d.regs.index_array.count = 0;
- maxwell3d.regs.vb_element_base = 0x0;
- maxwell3d.regs.vb_base_instance = 0x0;
- maxwell3d.mme_draw.instance_count = 0;
- maxwell3d.CallMethodFromMME(0x8e3, 0x640);
- maxwell3d.CallMethodFromMME(0x8e4, 0x0);
- maxwell3d.CallMethodFromMME(0x8e5, 0x0);
- maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
}
-constexpr std::array<std::pair<u64, HLEFunction>, 3> hle_funcs{{
+constexpr std::array<std::pair<u64, HLEFunction>, 4> hle_funcs{{
{0x771BB18C62444DA0, &HLE_771BB18C62444DA0},
{0x0D61FC9FAAC9FCAD, &HLE_0D61FC9FAAC9FCAD},
{0x0217920100488FF7, &HLE_0217920100488FF7},
+ {0x3F5E74B9C9A50164, &HLE_3F5E74B9C9A50164},
}};
class HLEMacroImpl final : public CachedMacro {
@@ -99,6 +146,7 @@ private:
Engines::Maxwell3D& maxwell3d;
HLEFunction func;
};
+
} // Anonymous namespace
HLEMacro::HLEMacro(Engines::Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} {}
diff --git a/src/video_core/macro/macro_interpreter.cpp b/src/video_core/macro/macro_interpreter.cpp
index f670b1bca..c0d32c112 100644
--- a/src/video_core/macro/macro_interpreter.cpp
+++ b/src/video_core/macro/macro_interpreter.cpp
@@ -335,7 +335,7 @@ void MacroInterpreterImpl::SetMethodAddress(u32 address) {
}
void MacroInterpreterImpl::Send(u32 value) {
- maxwell3d.CallMethodFromMME(method_address.address, value);
+ maxwell3d.CallMethod(method_address.address, value, true);
// Increment the method address by the method increment.
method_address.address.Assign(method_address.address.Value() +
method_address.increment.Value());
diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp
index aca25d902..25c1ce798 100644
--- a/src/video_core/macro/macro_jit_x64.cpp
+++ b/src/video_core/macro/macro_jit_x64.cpp
@@ -279,28 +279,13 @@ void MacroJITx64Impl::Compile_ExtractInsert(Macro::Opcode opcode) {
auto dst = Compile_GetRegister(opcode.src_a, RESULT);
auto src = Compile_GetRegister(opcode.src_b, eax);
- if (opcode.bf_src_bit != 0 && opcode.bf_src_bit != 31) {
- shr(src, opcode.bf_src_bit);
- } else if (opcode.bf_src_bit == 31) {
- xor_(src, src);
- }
- // Don't bother masking the whole register since we're using a 32 bit register
- if (opcode.bf_size != 31 && opcode.bf_size != 0) {
- and_(src, opcode.GetBitfieldMask());
- } else if (opcode.bf_size == 0) {
- xor_(src, src);
- }
- if (opcode.bf_dst_bit != 31 && opcode.bf_dst_bit != 0) {
- shl(src, opcode.bf_dst_bit);
- } else if (opcode.bf_dst_bit == 31) {
- xor_(src, src);
- }
-
const u32 mask = ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
- if (mask != 0xffffffff) {
- and_(dst, mask);
- }
+ and_(dst, mask);
+ shr(src, opcode.bf_src_bit);
+ and_(src, opcode.GetBitfieldMask());
+ shl(src, opcode.bf_dst_bit);
or_(dst, src);
+
Compile_ProcessResult(opcode.result_operation, opcode.dst);
}
@@ -309,17 +294,9 @@ void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) {
const auto src = Compile_GetRegister(opcode.src_b, RESULT);
shr(src, dst.cvt8());
- if (opcode.bf_size != 0 && opcode.bf_size != 31) {
- and_(src, opcode.GetBitfieldMask());
- } else if (opcode.bf_size == 0) {
- xor_(src, src);
- }
+ and_(src, opcode.GetBitfieldMask());
+ shl(src, opcode.bf_dst_bit);
- if (opcode.bf_dst_bit != 0 && opcode.bf_dst_bit != 31) {
- shl(src, opcode.bf_dst_bit);
- } else if (opcode.bf_dst_bit == 31) {
- xor_(src, src);
- }
Compile_ProcessResult(opcode.result_operation, opcode.dst);
}
@@ -327,13 +304,8 @@ void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) {
const auto dst = Compile_GetRegister(opcode.src_a, ecx);
const auto src = Compile_GetRegister(opcode.src_b, RESULT);
- if (opcode.bf_src_bit != 0) {
- shr(src, opcode.bf_src_bit);
- }
-
- if (opcode.bf_size != 31) {
- and_(src, opcode.GetBitfieldMask());
- }
+ shr(src, opcode.bf_src_bit);
+ and_(src, opcode.GetBitfieldMask());
shl(src, dst.cvt8());
Compile_ProcessResult(opcode.result_operation, opcode.dst);
@@ -374,7 +346,7 @@ void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) {
}
void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) {
- maxwell3d->CallMethodFromMME(method_address.address, value);
+ maxwell3d->CallMethod(method_address.address, value, true);
}
void MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) {
@@ -429,17 +401,11 @@ void MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) {
Xbyak::Label handle_post_exit{};
Xbyak::Label skip{};
jmp(skip, T_NEAR);
- if (opcode.is_exit) {
- L(handle_post_exit);
- // Execute 1 instruction
- mov(BRANCH_HOLDER, end_of_code);
- // Jump to next instruction to skip delay slot check
- jmp(labels[jump_address], T_NEAR);
- } else {
- L(handle_post_exit);
- xor_(BRANCH_HOLDER, BRANCH_HOLDER);
- jmp(labels[jump_address], T_NEAR);
- }
+
+ L(handle_post_exit);
+ xor_(BRANCH_HOLDER, BRANCH_HOLDER);
+ jmp(labels[jump_address], T_NEAR);
+
L(skip);
mov(BRANCH_HOLDER, handle_post_exit);
jmp(delay_skip[pc], T_NEAR);
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index bf9eb735d..384350dbd 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -7,6 +7,7 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
+#include "core/device_memory.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/memory.h"
@@ -16,172 +17,239 @@
namespace Tegra {
-MemoryManager::MemoryManager(Core::System& system_)
- : system{system_}, page_table(page_table_size) {}
+std::atomic<size_t> MemoryManager::unique_identifier_generator{};
+
+MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_,
+ u64 page_bits_)
+ : system{system_}, memory{system.Memory()}, device_memory{system.DeviceMemory()},
+ address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_},
+ entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38,
+ page_bits != big_page_bits ? page_bits : 0},
+ unique_identifier{unique_identifier_generator.fetch_add(1, std::memory_order_acq_rel)} {
+ address_space_size = 1ULL << address_space_bits;
+ page_size = 1ULL << page_bits;
+ page_mask = page_size - 1ULL;
+ big_page_size = 1ULL << big_page_bits;
+ big_page_mask = big_page_size - 1ULL;
+ const u64 page_table_bits = address_space_bits - page_bits;
+ const u64 big_page_table_bits = address_space_bits - big_page_bits;
+ const u64 page_table_size = 1ULL << page_table_bits;
+ const u64 big_page_table_size = 1ULL << big_page_table_bits;
+ page_table_mask = page_table_size - 1;
+ big_page_table_mask = big_page_table_size - 1;
+
+ big_entries.resize(big_page_table_size / 32, 0);
+ big_page_table_cpu.resize(big_page_table_size);
+ big_page_continous.resize(big_page_table_size / continous_bits, 0);
+ std::array<PTEKind, 32> kind_valus;
+ kind_valus.fill(PTEKind::INVALID);
+ big_kinds.resize(big_page_table_size / 32, kind_valus);
+ entries.resize(page_table_size / 32, 0);
+ kinds.resize(big_page_table_size / 32, kind_valus);
+}
MemoryManager::~MemoryManager() = default;
-void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
- rasterizer = rasterizer_;
-}
-
-GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
- u64 remaining_size{size};
- for (u64 offset{}; offset < size; offset += page_size) {
- if (remaining_size < page_size) {
- SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size);
- } else {
- SetPageEntry(gpu_addr + offset, page_entry + offset);
- }
- remaining_size -= page_size;
+template <bool is_big_page>
+MemoryManager::EntryType MemoryManager::GetEntry(size_t position) const {
+ if constexpr (is_big_page) {
+ position = position >> big_page_bits;
+ const u64 entry_mask = big_entries[position / 32];
+ const size_t sub_index = position % 32;
+ return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL);
+ } else {
+ position = position >> page_bits;
+ const u64 entry_mask = entries[position / 32];
+ const size_t sub_index = position % 32;
+ return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL);
}
- return gpu_addr;
}
-GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) {
- const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first);
- if (it != map_ranges.end() && it->first == gpu_addr) {
- it->second = size;
+template <bool is_big_page>
+void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) {
+ if constexpr (is_big_page) {
+ position = position >> big_page_bits;
+ const u64 entry_mask = big_entries[position / 32];
+ const size_t sub_index = position % 32;
+ big_entries[position / 32] =
+ (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2);
} else {
- map_ranges.insert(it, MapRange{gpu_addr, size});
+ position = position >> page_bits;
+ const u64 entry_mask = entries[position / 32];
+ const size_t sub_index = position % 32;
+ entries[position / 32] =
+ (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2);
}
- return UpdateRange(gpu_addr, cpu_addr, size);
}
-GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) {
- return Map(cpu_addr, *FindFreeRange(size, align), size);
+PTEKind MemoryManager::GetPageKind(GPUVAddr gpu_addr) const {
+ auto entry = GetEntry<true>(gpu_addr);
+ if (entry == EntryType::Mapped || entry == EntryType::Reserved) [[likely]] {
+ return GetKind<true>(gpu_addr);
+ } else {
+ return GetKind<false>(gpu_addr);
+ }
}
-GPUVAddr MemoryManager::MapAllocate32(VAddr cpu_addr, std::size_t size) {
- const std::optional<GPUVAddr> gpu_addr = FindFreeRange(size, 1, true);
- ASSERT(gpu_addr);
- return Map(cpu_addr, *gpu_addr, size);
+template <bool is_big_page>
+PTEKind MemoryManager::GetKind(size_t position) const {
+ if constexpr (is_big_page) {
+ position = position >> big_page_bits;
+ const size_t sub_index = position % 32;
+ return big_kinds[position / 32][sub_index];
+ } else {
+ position = position >> page_bits;
+ const size_t sub_index = position % 32;
+ return kinds[position / 32][sub_index];
+ }
}
-void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
- if (size == 0) {
- return;
- }
- const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first);
- if (it != map_ranges.end()) {
- ASSERT(it->first == gpu_addr);
- map_ranges.erase(it);
+template <bool is_big_page>
+void MemoryManager::SetKind(size_t position, PTEKind kind) {
+ if constexpr (is_big_page) {
+ position = position >> big_page_bits;
+ const size_t sub_index = position % 32;
+ big_kinds[position / 32][sub_index] = kind;
} else {
- ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr);
+ position = position >> page_bits;
+ const size_t sub_index = position % 32;
+ kinds[position / 32][sub_index] = kind;
}
- const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
-
- for (const auto& [map_addr, map_size] : submapped_ranges) {
- // Flush and invalidate through the GPU interface, to be asynchronous if possible.
- const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr);
- ASSERT(cpu_addr);
+}
- rasterizer->UnmapMemory(*cpu_addr, map_size);
- }
+inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const {
+ const u64 entry_mask = big_page_continous[big_page_index / continous_bits];
+ const size_t sub_index = big_page_index % continous_bits;
+ return ((entry_mask >> sub_index) & 0x1ULL) != 0;
+}
- UpdateRange(gpu_addr, PageEntry::State::Unmapped, size);
+inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) {
+ const u64 continous_mask = big_page_continous[big_page_index / continous_bits];
+ const size_t sub_index = big_page_index % continous_bits;
+ big_page_continous[big_page_index / continous_bits] =
+ (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0);
}
-std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) {
+template <MemoryManager::EntryType entry_type>
+GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
+ PTEKind kind) {
+ [[maybe_unused]] u64 remaining_size{size};
+ if constexpr (entry_type == EntryType::Mapped) {
+ page_table.ReserveRange(gpu_addr, size);
+ }
for (u64 offset{}; offset < size; offset += page_size) {
- if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) {
- return std::nullopt;
+ const GPUVAddr current_gpu_addr = gpu_addr + offset;
+ [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr);
+ SetEntry<false>(current_gpu_addr, entry_type);
+ SetKind<false>(current_gpu_addr, kind);
+ if (current_entry_type != entry_type) {
+ rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size);
+ }
+ if constexpr (entry_type == EntryType::Mapped) {
+ const VAddr current_cpu_addr = cpu_addr + offset;
+ const auto index = PageEntryIndex<false>(current_gpu_addr);
+ const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
+ page_table[index] = sub_value;
}
+ remaining_size -= page_size;
}
-
- return UpdateRange(gpu_addr, PageEntry::State::Allocated, size);
-}
-
-GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) {
- return *AllocateFixed(*FindFreeRange(size, align), size);
+ return gpu_addr;
}
-void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) {
- if (!page_entry.IsValid()) {
- return;
+template <MemoryManager::EntryType entry_type>
+GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
+ size_t size, PTEKind kind) {
+ [[maybe_unused]] u64 remaining_size{size};
+ for (u64 offset{}; offset < size; offset += big_page_size) {
+ const GPUVAddr current_gpu_addr = gpu_addr + offset;
+ [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr);
+ SetEntry<true>(current_gpu_addr, entry_type);
+ SetKind<true>(current_gpu_addr, kind);
+ if (current_entry_type != entry_type) {
+ rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size);
+ }
+ if constexpr (entry_type == EntryType::Mapped) {
+ const VAddr current_cpu_addr = cpu_addr + offset;
+ const auto index = PageEntryIndex<true>(current_gpu_addr);
+ const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
+ big_page_table_cpu[index] = sub_value;
+ const bool is_continous = ([&] {
+ uintptr_t base_ptr{
+ reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))};
+ if (base_ptr == 0) {
+ return false;
+ }
+ for (VAddr start_cpu = current_cpu_addr + page_size;
+ start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) {
+ base_ptr += page_size;
+ auto next_ptr = reinterpret_cast<uintptr_t>(memory.GetPointerSilent(start_cpu));
+ if (next_ptr == 0 || base_ptr != next_ptr) {
+ return false;
+ }
+ }
+ return true;
+ })();
+ SetBigPageContinous(index, is_continous);
+ }
+ remaining_size -= big_page_size;
}
-
- ASSERT(system.CurrentProcess()
- ->PageTable()
- .LockForDeviceAddressSpace(page_entry.ToAddress(), size)
- .IsSuccess());
+ return gpu_addr;
}
-void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) {
- if (!page_entry.IsValid()) {
- return;
- }
-
- ASSERT(system.CurrentProcess()
- ->PageTable()
- .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size)
- .IsSuccess());
+void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
+ rasterizer = rasterizer_;
}
-PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const {
- return page_table[PageEntryIndex(gpu_addr)];
+GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, PTEKind kind,
+ bool is_big_pages) {
+ if (is_big_pages) [[likely]] {
+ return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
+ }
+ return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size, kind);
}
-void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
- // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to
- // improper tracking, but should be fixed in the future.
-
- //// Unlock the old page
- // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size);
-
- //// Lock the new page
- // TryLockPage(page_entry, size);
- auto& current_page = page_table[PageEntryIndex(gpu_addr)];
-
- if ((!current_page.IsValid() && page_entry.IsValid()) ||
- current_page.ToAddress() != page_entry.ToAddress()) {
- rasterizer->ModifyGPUMemory(gpu_addr, size);
+GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) {
+ if (is_big_pages) [[likely]] {
+ return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID);
}
-
- current_page = page_entry;
+ return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size, PTEKind::INVALID);
}
-std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align,
- bool start_32bit_address) const {
- if (!align) {
- align = page_size;
- } else {
- align = Common::AlignUp(align, page_size);
+void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
+ if (size == 0) {
+ return;
}
+ const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
- u64 available_size{};
- GPUVAddr gpu_addr{start_32bit_address ? address_space_start_low : address_space_start};
- while (gpu_addr + available_size < address_space_size) {
- if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) {
- available_size += page_size;
-
- if (available_size >= size) {
- return gpu_addr;
- }
- } else {
- gpu_addr += available_size + page_size;
- available_size = 0;
+ for (const auto& [map_addr, map_size] : submapped_ranges) {
+ // Flush and invalidate through the GPU interface, to be asynchronous if possible.
+ const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr);
+ ASSERT(cpu_addr);
- const auto remainder{gpu_addr % align};
- if (remainder) {
- gpu_addr = (gpu_addr - remainder) + align;
- }
- }
+ rasterizer->UnmapMemory(*cpu_addr, map_size);
}
- return std::nullopt;
+ BigPageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID);
+ PageTableOp<EntryType::Free>(gpu_addr, 0, size, PTEKind::INVALID);
}
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
- if (gpu_addr == 0) {
+ if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] {
return std::nullopt;
}
- const auto page_entry{GetPageEntry(gpu_addr)};
- if (!page_entry.IsValid()) {
- return std::nullopt;
+ if (GetEntry<true>(gpu_addr) != EntryType::Mapped) [[unlikely]] {
+ if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
+ return std::nullopt;
+ }
+
+ const VAddr cpu_addr_base = static_cast<VAddr>(page_table[PageEntryIndex<false>(gpu_addr)])
+ << cpu_page_bits;
+ return cpu_addr_base + (gpu_addr & page_mask);
}
- return page_entry.ToAddress() + (gpu_addr & page_mask);
+ const VAddr cpu_addr_base =
+ static_cast<VAddr>(big_page_table_cpu[PageEntryIndex<true>(gpu_addr)]) << cpu_page_bits;
+ return cpu_addr_base + (gpu_addr & big_page_mask);
}
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const {
@@ -189,7 +257,7 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t s
const size_t page_last{(addr + size + page_size - 1) >> page_bits};
while (page_index < page_last) {
const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
- if (page_addr && *page_addr != 0) {
+ if (page_addr) {
return page_addr;
}
++page_index;
@@ -232,126 +300,298 @@ template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data);
template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data);
u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
- if (!GetPageEntry(gpu_addr).IsValid()) {
- return {};
- }
-
const auto address{GpuToCpuAddress(gpu_addr)};
if (!address) {
return {};
}
- return system.Memory().GetPointer(*address);
+ return memory.GetPointer(*address);
}
const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
- if (!GetPageEntry(gpu_addr).IsValid()) {
- return {};
- }
-
const auto address{GpuToCpuAddress(gpu_addr)};
if (!address) {
return {};
}
- return system.Memory().GetPointer(*address);
-}
-
-size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept {
- auto it = std::ranges::upper_bound(map_ranges, gpu_addr, {}, &MapRange::first);
- --it;
- return it->second - (gpu_addr - it->first);
-}
-
-void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size,
- bool is_safe) const {
+ return memory.GetPointer(*address);
+}
+
+#ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining.
+#pragma inline_recursion(on)
+#endif
+
+template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
+inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size,
+ FuncMapped&& func_mapped, FuncReserved&& func_reserved,
+ FuncUnmapped&& func_unmapped) const {
+ static constexpr bool BOOL_BREAK_MAPPED = std::is_same_v<FuncMapped, bool>;
+ static constexpr bool BOOL_BREAK_RESERVED = std::is_same_v<FuncReserved, bool>;
+ static constexpr bool BOOL_BREAK_UNMAPPED = std::is_same_v<FuncUnmapped, bool>;
+ u64 used_page_size;
+ u64 used_page_mask;
+ u64 used_page_bits;
+ if constexpr (is_big_pages) {
+ used_page_size = big_page_size;
+ used_page_mask = big_page_mask;
+ used_page_bits = big_page_bits;
+ } else {
+ used_page_size = page_size;
+ used_page_mask = page_mask;
+ used_page_bits = page_bits;
+ }
std::size_t remaining_size{size};
- std::size_t page_index{gpu_src_addr >> page_bits};
- std::size_t page_offset{gpu_src_addr & page_mask};
+ std::size_t page_index{gpu_src_addr >> used_page_bits};
+ std::size_t page_offset{gpu_src_addr & used_page_mask};
+ GPUVAddr current_address = gpu_src_addr;
while (remaining_size > 0) {
const std::size_t copy_amount{
- std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
- const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
- if (page_addr && *page_addr != 0) {
- const auto src_addr{*page_addr + page_offset};
- if (is_safe) {
- // Flush must happen on the rasterizer interface, such that memory is always
- // synchronous when it is read (even when in asynchronous GPU mode).
- // Fixes Dead Cells title menu.
- rasterizer->FlushRegion(src_addr, copy_amount);
+ std::min(static_cast<std::size_t>(used_page_size) - page_offset, remaining_size)};
+ auto entry = GetEntry<is_big_pages>(current_address);
+ if (entry == EntryType::Mapped) [[likely]] {
+ if constexpr (BOOL_BREAK_MAPPED) {
+ if (func_mapped(page_index, page_offset, copy_amount)) {
+ return;
+ }
+ } else {
+ func_mapped(page_index, page_offset, copy_amount);
}
- system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
- } else {
- std::memset(dest_buffer, 0, copy_amount);
- }
+ } else if (entry == EntryType::Reserved) {
+ if constexpr (BOOL_BREAK_RESERVED) {
+ if (func_reserved(page_index, page_offset, copy_amount)) {
+ return;
+ }
+ } else {
+ func_reserved(page_index, page_offset, copy_amount);
+ }
+
+ } else [[unlikely]] {
+ if constexpr (BOOL_BREAK_UNMAPPED) {
+ if (func_unmapped(page_index, page_offset, copy_amount)) {
+ return;
+ }
+ } else {
+ func_unmapped(page_index, page_offset, copy_amount);
+ }
+ }
page_index++;
page_offset = 0;
- dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
remaining_size -= copy_amount;
+ current_address += copy_amount;
}
}
+template <bool is_safe>
+void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer,
+ std::size_t size) const {
+ auto set_to_zero = [&]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset, std::size_t copy_amount) {
+ std::memset(dest_buffer, 0, copy_amount);
+ dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
+ };
+ auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ if constexpr (is_safe) {
+ rasterizer->FlushRegion(cpu_addr_base, copy_amount);
+ }
+ u8* physical = memory.GetPointer(cpu_addr_base);
+ std::memcpy(dest_buffer, physical, copy_amount);
+ dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
+ };
+ auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ if constexpr (is_safe) {
+ rasterizer->FlushRegion(cpu_addr_base, copy_amount);
+ }
+ if (!IsBigPageContinous(page_index)) [[unlikely]] {
+ memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount);
+ } else {
+ u8* physical = memory.GetPointer(cpu_addr_base);
+ std::memcpy(dest_buffer, physical, copy_amount);
+ }
+ dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
+ };
+ auto read_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, mapped_normal, set_to_zero, set_to_zero);
+ };
+ MemoryOperation<true>(gpu_src_addr, size, mapped_big, set_to_zero, read_short_pages);
+}
+
void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const {
- ReadBlockImpl(gpu_src_addr, dest_buffer, size, true);
+ ReadBlockImpl<true>(gpu_src_addr, dest_buffer, size);
}
void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer,
const std::size_t size) const {
- ReadBlockImpl(gpu_src_addr, dest_buffer, size, false);
+ ReadBlockImpl<false>(gpu_src_addr, dest_buffer, size);
}
-void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size,
- bool is_safe) {
- std::size_t remaining_size{size};
- std::size_t page_index{gpu_dest_addr >> page_bits};
- std::size_t page_offset{gpu_dest_addr & page_mask};
-
- while (remaining_size > 0) {
- const std::size_t copy_amount{
- std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
- const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
- if (page_addr && *page_addr != 0) {
- const auto dest_addr{*page_addr + page_offset};
-
- if (is_safe) {
- // Invalidate must happen on the rasterizer interface, such that memory is always
- // synchronous when it is written (even when in asynchronous GPU mode).
- rasterizer->InvalidateRegion(dest_addr, copy_amount);
- }
- system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
+template <bool is_safe>
+void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer,
+ std::size_t size) {
+ auto just_advance = [&]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset, std::size_t copy_amount) {
+ src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
+ };
+ auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ if constexpr (is_safe) {
+ rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
}
-
- page_index++;
- page_offset = 0;
+ u8* physical = memory.GetPointer(cpu_addr_base);
+ std::memcpy(physical, src_buffer, copy_amount);
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
- remaining_size -= copy_amount;
- }
+ };
+ auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ if constexpr (is_safe) {
+ rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
+ }
+ if (!IsBigPageContinous(page_index)) [[unlikely]] {
+ memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount);
+ } else {
+ u8* physical = memory.GetPointer(cpu_addr_base);
+ std::memcpy(physical, src_buffer, copy_amount);
+ }
+ src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
+ };
+ auto write_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, mapped_normal, just_advance, just_advance);
+ };
+ MemoryOperation<true>(gpu_dest_addr, size, mapped_big, just_advance, write_short_pages);
}
void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) {
- WriteBlockImpl(gpu_dest_addr, src_buffer, size, true);
+ WriteBlockImpl<true>(gpu_dest_addr, src_buffer, size);
}
void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer,
std::size_t size) {
- WriteBlockImpl(gpu_dest_addr, src_buffer, size, false);
+ WriteBlockImpl<false>(gpu_dest_addr, src_buffer, size);
}
void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const {
- size_t remaining_size{size};
- size_t page_index{gpu_addr >> page_bits};
- size_t page_offset{gpu_addr & page_mask};
- while (remaining_size > 0) {
- const size_t num_bytes{std::min(page_size - page_offset, remaining_size)};
- if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) {
- rasterizer->FlushRegion(*page_addr + page_offset, num_bytes);
+ auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) {};
+
+ auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ rasterizer->FlushRegion(cpu_addr_base, copy_amount);
+ };
+ auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ rasterizer->FlushRegion(cpu_addr_base, copy_amount);
+ };
+ auto flush_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
+ };
+ MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, flush_short_pages);
+}
+
+bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const {
+ bool result = false;
+ auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) { return false; };
+
+ auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount);
+ return result;
+ };
+ auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount);
+ return result;
+ };
+ auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
+ return result;
+ };
+ MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, check_short_pages);
+ return result;
+}
+
+size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const {
+ std::optional<VAddr> old_page_addr{};
+ size_t range_so_far = 0;
+ bool result{false};
+ auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
+ std::size_t copy_amount) {
+ result = true;
+ return true;
+ };
+ auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr && *old_page_addr != cpu_addr_base) {
+ result = true;
+ return true;
}
- ++page_index;
- page_offset = 0;
- remaining_size -= num_bytes;
- }
+ range_so_far += copy_amount;
+ old_page_addr = {cpu_addr_base + copy_amount};
+ return false;
+ };
+ auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr && *old_page_addr != cpu_addr_base) {
+ return true;
+ }
+ range_so_far += copy_amount;
+ old_page_addr = {cpu_addr_base + copy_amount};
+ return false;
+ };
+ auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, short_check, fail, fail);
+ return result;
+ };
+ MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages);
+ return range_so_far;
+}
+
+void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size) const {
+ auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) {};
+
+ auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
+ };
+ auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
+ };
+ auto invalidate_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
+ };
+ MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, invalidate_short_pages);
}
void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) {
@@ -365,87 +605,134 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std
}
bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
- const auto cpu_addr{GpuToCpuAddress(gpu_addr)};
- if (!cpu_addr) {
+ if (GetEntry<true>(gpu_addr) == EntryType::Mapped) [[likely]] {
+ size_t page_index = gpu_addr >> big_page_bits;
+ if (IsBigPageContinous(page_index)) [[likely]] {
+ const std::size_t page{(page_index & big_page_mask) + size};
+ return page <= big_page_size;
+ }
+ const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
+ return page <= Core::Memory::YUZU_PAGESIZE;
+ }
+ if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
return false;
}
- const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
+ const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
return page <= Core::Memory::YUZU_PAGESIZE;
}
bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const {
- size_t page_index{gpu_addr >> page_bits};
- const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits};
std::optional<VAddr> old_page_addr{};
- while (page_index != page_last) {
- const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
- if (!page_addr || *page_addr == 0) {
- return false;
+ bool result{true};
+ auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
+ std::size_t copy_amount) {
+ result = false;
+ return true;
+ };
+ auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr && *old_page_addr != cpu_addr_base) {
+ result = false;
+ return true;
}
- if (old_page_addr) {
- if (*old_page_addr + page_size != *page_addr) {
- return false;
- }
+ old_page_addr = {cpu_addr_base + copy_amount};
+ return false;
+ };
+ auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr && *old_page_addr != cpu_addr_base) {
+ result = false;
+ return true;
}
- old_page_addr = page_addr;
- ++page_index;
- }
- return true;
+ old_page_addr = {cpu_addr_base + copy_amount};
+ return false;
+ };
+ auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, short_check, fail, fail);
+ return !result;
+ };
+ MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages);
+ return result;
}
bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const {
- size_t page_index{gpu_addr >> page_bits};
- const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits};
- while (page_index < page_last) {
- if (!page_table[page_index].IsValid() || page_table[page_index].ToAddress() == 0) {
- return false;
- }
- ++page_index;
- }
- return true;
+ bool result{true};
+ auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) {
+ result = false;
+ return true;
+ };
+ auto pass = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) { return false; };
+ auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, pass, pass, fail);
+ return !result;
+ };
+ MemoryOperation<true>(gpu_addr, size, pass, fail, check_short_pages);
+ return result;
}
std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange(
GPUVAddr gpu_addr, std::size_t size) const {
std::vector<std::pair<GPUVAddr, std::size_t>> result{};
- size_t page_index{gpu_addr >> page_bits};
- size_t remaining_size{size};
- size_t page_offset{gpu_addr & page_mask};
std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{};
std::optional<VAddr> old_page_addr{};
- const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) {
- if (!last_segment) {
- const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset;
- last_segment = {new_base_addr, bytes};
- } else {
- last_segment->second += bytes;
- }
- };
- const auto split = [&last_segment, &result] {
+ const auto split = [&last_segment, &result]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) {
if (last_segment) {
result.push_back(*last_segment);
last_segment = std::nullopt;
}
};
- while (remaining_size > 0) {
- const size_t num_bytes{std::min(page_size - page_offset, remaining_size)};
- const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
- if (!page_addr || *page_addr == 0) {
- split();
- } else if (old_page_addr) {
- if (*old_page_addr + page_size != *page_addr) {
- split();
+ const auto extend_size_big = [this, &split, &old_page_addr,
+ &last_segment](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr) {
+ if (*old_page_addr != cpu_addr_base) {
+ split(0, 0, 0);
+ }
+ }
+ old_page_addr = {cpu_addr_base + copy_amount};
+ if (!last_segment) {
+ const GPUVAddr new_base_addr = (page_index << big_page_bits) + offset;
+ last_segment = {new_base_addr, copy_amount};
+ } else {
+ last_segment->second += copy_amount;
+ }
+ };
+ const auto extend_size_short = [this, &split, &old_page_addr,
+ &last_segment](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr) {
+ if (*old_page_addr != cpu_addr_base) {
+ split(0, 0, 0);
}
- extend_size(num_bytes);
+ }
+ old_page_addr = {cpu_addr_base + copy_amount};
+ if (!last_segment) {
+ const GPUVAddr new_base_addr = (page_index << page_bits) + offset;
+ last_segment = {new_base_addr, copy_amount};
} else {
- extend_size(num_bytes);
+ last_segment->second += copy_amount;
}
- ++page_index;
- page_offset = 0;
- remaining_size -= num_bytes;
- old_page_addr = page_addr;
- }
- split();
+ };
+ auto do_short_pages = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, extend_size_short, split, split);
+ };
+ MemoryOperation<true>(gpu_addr, size, extend_size_big, split, do_short_pages);
+ split(0, 0, 0);
return result;
}
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 74f9ce175..ab4bc9ec6 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -3,73 +3,40 @@
#pragma once
+#include <atomic>
#include <map>
#include <optional>
#include <vector>
#include "common/common_types.h"
+#include "common/multi_level_page_table.h"
+#include "common/virtual_buffer.h"
+#include "video_core/pte_kind.h"
namespace VideoCore {
class RasterizerInterface;
}
namespace Core {
+class DeviceMemory;
+namespace Memory {
+class Memory;
+} // namespace Memory
class System;
-}
+} // namespace Core
namespace Tegra {
-class PageEntry final {
-public:
- enum class State : u32 {
- Unmapped = static_cast<u32>(-1),
- Allocated = static_cast<u32>(-2),
- };
-
- constexpr PageEntry() = default;
- constexpr PageEntry(State state_) : state{state_} {}
- constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {}
-
- [[nodiscard]] constexpr bool IsUnmapped() const {
- return state == State::Unmapped;
- }
-
- [[nodiscard]] constexpr bool IsAllocated() const {
- return state == State::Allocated;
- }
-
- [[nodiscard]] constexpr bool IsValid() const {
- return !IsUnmapped() && !IsAllocated();
- }
-
- [[nodiscard]] constexpr VAddr ToAddress() const {
- if (!IsValid()) {
- return {};
- }
-
- return static_cast<VAddr>(state) << ShiftBits;
- }
-
- [[nodiscard]] constexpr PageEntry operator+(u64 offset) const {
- // If this is a reserved value, offsets do not apply
- if (!IsValid()) {
- return *this;
- }
- return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset};
- }
-
-private:
- static constexpr std::size_t ShiftBits{12};
-
- State state{State::Unmapped};
-};
-static_assert(sizeof(PageEntry) == 4, "PageEntry is too large");
-
class MemoryManager final {
public:
- explicit MemoryManager(Core::System& system_);
+ explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40,
+ u64 big_page_bits_ = 16, u64 page_bits_ = 12);
~MemoryManager();
+ size_t GetID() const {
+ return unique_identifier;
+ }
+
/// Binds a renderer to the memory manager.
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
@@ -86,9 +53,6 @@ public:
[[nodiscard]] u8* GetPointer(GPUVAddr addr);
[[nodiscard]] const u8* GetPointer(GPUVAddr addr) const;
- /// Returns the number of bytes until the end of the memory map containing the given GPU address
- [[nodiscard]] size_t BytesToMapEnd(GPUVAddr gpu_addr) const noexcept;
-
/**
* ReadBlock and WriteBlock are full read and write operations over virtual
* GPU Memory. It's important to use these when GPU memory may not be continuous
@@ -135,54 +99,109 @@ public:
std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr,
std::size_t size) const;
- [[nodiscard]] GPUVAddr Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size);
- [[nodiscard]] GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align);
- [[nodiscard]] GPUVAddr MapAllocate32(VAddr cpu_addr, std::size_t size);
- [[nodiscard]] std::optional<GPUVAddr> AllocateFixed(GPUVAddr gpu_addr, std::size_t size);
- [[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align);
+ GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size,
+ PTEKind kind = PTEKind::INVALID, bool is_big_pages = true);
+ GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true);
void Unmap(GPUVAddr gpu_addr, std::size_t size);
void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
+ void InvalidateRegion(GPUVAddr gpu_addr, size_t size) const;
+
+ bool IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const;
+
+ size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const;
+
+ bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const {
+ return gpu_addr < address_space_size;
+ }
+
+ PTEKind GetPageKind(GPUVAddr gpu_addr) const;
+
private:
- [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const;
- void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size);
- GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size);
- [[nodiscard]] std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align,
- bool start_32bit_address = false) const;
-
- void TryLockPage(PageEntry page_entry, std::size_t size);
- void TryUnlockPage(PageEntry page_entry, std::size_t size);
-
- void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size,
- bool is_safe) const;
- void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size,
- bool is_safe);
-
- [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) {
- return (gpu_addr >> page_bits) & page_table_mask;
+ template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
+ inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,
+ FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const;
+
+ template <bool is_safe>
+ void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const;
+
+ template <bool is_safe>
+ void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size);
+
+ template <bool is_big_page>
+ [[nodiscard]] std::size_t PageEntryIndex(GPUVAddr gpu_addr) const {
+ if constexpr (is_big_page) {
+ return (gpu_addr >> big_page_bits) & big_page_table_mask;
+ } else {
+ return (gpu_addr >> page_bits) & page_table_mask;
+ }
}
- static constexpr u64 address_space_size = 1ULL << 40;
- static constexpr u64 address_space_start = 1ULL << 32;
- static constexpr u64 address_space_start_low = 1ULL << 16;
- static constexpr u64 page_bits{16};
- static constexpr u64 page_size{1 << page_bits};
- static constexpr u64 page_mask{page_size - 1};
- static constexpr u64 page_table_bits{24};
- static constexpr u64 page_table_size{1 << page_table_bits};
- static constexpr u64 page_table_mask{page_table_size - 1};
+ inline bool IsBigPageContinous(size_t big_page_index) const;
+ inline void SetBigPageContinous(size_t big_page_index, bool value);
Core::System& system;
+ Core::Memory::Memory& memory;
+ Core::DeviceMemory& device_memory;
+
+ const u64 address_space_bits;
+ const u64 page_bits;
+ u64 address_space_size;
+ u64 page_size;
+ u64 page_mask;
+ u64 page_table_mask;
+ static constexpr u64 cpu_page_bits{12};
+
+ const u64 big_page_bits;
+ u64 big_page_size;
+ u64 big_page_mask;
+ u64 big_page_table_mask;
VideoCore::RasterizerInterface* rasterizer = nullptr;
- std::vector<PageEntry> page_table;
+ enum class EntryType : u64 {
+ Free = 0,
+ Reserved = 1,
+ Mapped = 2,
+ };
+
+ std::vector<u64> entries;
+ std::vector<u64> big_entries;
+
+ template <EntryType entry_type>
+ GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
+ PTEKind kind);
+
+ template <EntryType entry_type>
+ GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size,
+ PTEKind kind);
+
+ template <bool is_big_page>
+ inline EntryType GetEntry(size_t position) const;
+
+ template <bool is_big_page>
+ inline void SetEntry(size_t position, EntryType entry);
+
+ std::vector<std::array<PTEKind, 32>> kinds;
+ std::vector<std::array<PTEKind, 32>> big_kinds;
+
+ template <bool is_big_page>
+ inline PTEKind GetKind(size_t position) const;
+
+ template <bool is_big_page>
+ inline void SetKind(size_t position, PTEKind kind);
+
+ Common::MultiLevelPageTable<u32> page_table;
+ Common::VirtualBuffer<u32> big_page_table_cpu;
+
+ std::vector<u64> big_page_continous;
+
+ constexpr static size_t continous_bits = 64;
- using MapRange = std::pair<GPUVAddr, size_t>;
- std::vector<MapRange> map_ranges;
+ const size_t unique_identifier;
- std::vector<std::pair<VAddr, std::size_t>> cache_invalidate_queue;
+ static std::atomic<size_t> unique_identifier_generator;
};
} // namespace Tegra
diff --git a/src/video_core/pte_kind.h b/src/video_core/pte_kind.h
new file mode 100644
index 000000000..591d7214b
--- /dev/null
+++ b/src/video_core/pte_kind.h
@@ -0,0 +1,264 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Tegra {
+
+// https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_mmu.ref.txt
+enum class PTEKind : u8 {
+ INVALID = 0xff,
+ PITCH = 0x00,
+ Z16 = 0x01,
+ Z16_2C = 0x02,
+ Z16_MS2_2C = 0x03,
+ Z16_MS4_2C = 0x04,
+ Z16_MS8_2C = 0x05,
+ Z16_MS16_2C = 0x06,
+ Z16_2Z = 0x07,
+ Z16_MS2_2Z = 0x08,
+ Z16_MS4_2Z = 0x09,
+ Z16_MS8_2Z = 0x0a,
+ Z16_MS16_2Z = 0x0b,
+ Z16_2CZ = 0x36,
+ Z16_MS2_2CZ = 0x37,
+ Z16_MS4_2CZ = 0x38,
+ Z16_MS8_2CZ = 0x39,
+ Z16_MS16_2CZ = 0x5f,
+ Z16_4CZ = 0x0c,
+ Z16_MS2_4CZ = 0x0d,
+ Z16_MS4_4CZ = 0x0e,
+ Z16_MS8_4CZ = 0x0f,
+ Z16_MS16_4CZ = 0x10,
+ S8Z24 = 0x11,
+ S8Z24_1Z = 0x12,
+ S8Z24_MS2_1Z = 0x13,
+ S8Z24_MS4_1Z = 0x14,
+ S8Z24_MS8_1Z = 0x15,
+ S8Z24_MS16_1Z = 0x16,
+ S8Z24_2CZ = 0x17,
+ S8Z24_MS2_2CZ = 0x18,
+ S8Z24_MS4_2CZ = 0x19,
+ S8Z24_MS8_2CZ = 0x1a,
+ S8Z24_MS16_2CZ = 0x1b,
+ S8Z24_2CS = 0x1c,
+ S8Z24_MS2_2CS = 0x1d,
+ S8Z24_MS4_2CS = 0x1e,
+ S8Z24_MS8_2CS = 0x1f,
+ S8Z24_MS16_2CS = 0x20,
+ S8Z24_4CSZV = 0x21,
+ S8Z24_MS2_4CSZV = 0x22,
+ S8Z24_MS4_4CSZV = 0x23,
+ S8Z24_MS8_4CSZV = 0x24,
+ S8Z24_MS16_4CSZV = 0x25,
+ V8Z24_MS4_VC12 = 0x26,
+ V8Z24_MS4_VC4 = 0x27,
+ V8Z24_MS8_VC8 = 0x28,
+ V8Z24_MS8_VC24 = 0x29,
+ V8Z24_MS4_VC12_1ZV = 0x2e,
+ V8Z24_MS4_VC4_1ZV = 0x2f,
+ V8Z24_MS8_VC8_1ZV = 0x30,
+ V8Z24_MS8_VC24_1ZV = 0x31,
+ V8Z24_MS4_VC12_2CS = 0x32,
+ V8Z24_MS4_VC4_2CS = 0x33,
+ V8Z24_MS8_VC8_2CS = 0x34,
+ V8Z24_MS8_VC24_2CS = 0x35,
+ V8Z24_MS4_VC12_2CZV = 0x3a,
+ V8Z24_MS4_VC4_2CZV = 0x3b,
+ V8Z24_MS8_VC8_2CZV = 0x3c,
+ V8Z24_MS8_VC24_2CZV = 0x3d,
+ V8Z24_MS4_VC12_2ZV = 0x3e,
+ V8Z24_MS4_VC4_2ZV = 0x3f,
+ V8Z24_MS8_VC8_2ZV = 0x40,
+ V8Z24_MS8_VC24_2ZV = 0x41,
+ V8Z24_MS4_VC12_4CSZV = 0x42,
+ V8Z24_MS4_VC4_4CSZV = 0x43,
+ V8Z24_MS8_VC8_4CSZV = 0x44,
+ V8Z24_MS8_VC24_4CSZV = 0x45,
+ Z24S8 = 0x46,
+ Z24S8_1Z = 0x47,
+ Z24S8_MS2_1Z = 0x48,
+ Z24S8_MS4_1Z = 0x49,
+ Z24S8_MS8_1Z = 0x4a,
+ Z24S8_MS16_1Z = 0x4b,
+ Z24S8_2CS = 0x4c,
+ Z24S8_MS2_2CS = 0x4d,
+ Z24S8_MS4_2CS = 0x4e,
+ Z24S8_MS8_2CS = 0x4f,
+ Z24S8_MS16_2CS = 0x50,
+ Z24S8_2CZ = 0x51,
+ Z24S8_MS2_2CZ = 0x52,
+ Z24S8_MS4_2CZ = 0x53,
+ Z24S8_MS8_2CZ = 0x54,
+ Z24S8_MS16_2CZ = 0x55,
+ Z24S8_4CSZV = 0x56,
+ Z24S8_MS2_4CSZV = 0x57,
+ Z24S8_MS4_4CSZV = 0x58,
+ Z24S8_MS8_4CSZV = 0x59,
+ Z24S8_MS16_4CSZV = 0x5a,
+ Z24V8_MS4_VC12 = 0x5b,
+ Z24V8_MS4_VC4 = 0x5c,
+ Z24V8_MS8_VC8 = 0x5d,
+ Z24V8_MS8_VC24 = 0x5e,
+ YUV_B8C1_2Y = 0x60,
+ YUV_B8C2_2Y = 0x61,
+ YUV_B10C1_2Y = 0x62,
+ YUV_B10C2_2Y = 0x6b,
+ YUV_B12C1_2Y = 0x6c,
+ YUV_B12C2_2Y = 0x6d,
+ Z24V8_MS4_VC12_1ZV = 0x63,
+ Z24V8_MS4_VC4_1ZV = 0x64,
+ Z24V8_MS8_VC8_1ZV = 0x65,
+ Z24V8_MS8_VC24_1ZV = 0x66,
+ Z24V8_MS4_VC12_2CS = 0x67,
+ Z24V8_MS4_VC4_2CS = 0x68,
+ Z24V8_MS8_VC8_2CS = 0x69,
+ Z24V8_MS8_VC24_2CS = 0x6a,
+ Z24V8_MS4_VC12_2CZV = 0x6f,
+ Z24V8_MS4_VC4_2CZV = 0x70,
+ Z24V8_MS8_VC8_2CZV = 0x71,
+ Z24V8_MS8_VC24_2CZV = 0x72,
+ Z24V8_MS4_VC12_2ZV = 0x73,
+ Z24V8_MS4_VC4_2ZV = 0x74,
+ Z24V8_MS8_VC8_2ZV = 0x75,
+ Z24V8_MS8_VC24_2ZV = 0x76,
+ Z24V8_MS4_VC12_4CSZV = 0x77,
+ Z24V8_MS4_VC4_4CSZV = 0x78,
+ Z24V8_MS8_VC8_4CSZV = 0x79,
+ Z24V8_MS8_VC24_4CSZV = 0x7a,
+ ZF32 = 0x7b,
+ ZF32_1Z = 0x7c,
+ ZF32_MS2_1Z = 0x7d,
+ ZF32_MS4_1Z = 0x7e,
+ ZF32_MS8_1Z = 0x7f,
+ ZF32_MS16_1Z = 0x80,
+ ZF32_2CS = 0x81,
+ ZF32_MS2_2CS = 0x82,
+ ZF32_MS4_2CS = 0x83,
+ ZF32_MS8_2CS = 0x84,
+ ZF32_MS16_2CS = 0x85,
+ ZF32_2CZ = 0x86,
+ ZF32_MS2_2CZ = 0x87,
+ ZF32_MS4_2CZ = 0x88,
+ ZF32_MS8_2CZ = 0x89,
+ ZF32_MS16_2CZ = 0x8a,
+ X8Z24_X16V8S8_MS4_VC12 = 0x8b,
+ X8Z24_X16V8S8_MS4_VC4 = 0x8c,
+ X8Z24_X16V8S8_MS8_VC8 = 0x8d,
+ X8Z24_X16V8S8_MS8_VC24 = 0x8e,
+ X8Z24_X16V8S8_MS4_VC12_1CS = 0x8f,
+ X8Z24_X16V8S8_MS4_VC4_1CS = 0x90,
+ X8Z24_X16V8S8_MS8_VC8_1CS = 0x91,
+ X8Z24_X16V8S8_MS8_VC24_1CS = 0x92,
+ X8Z24_X16V8S8_MS4_VC12_1ZV = 0x97,
+ X8Z24_X16V8S8_MS4_VC4_1ZV = 0x98,
+ X8Z24_X16V8S8_MS8_VC8_1ZV = 0x99,
+ X8Z24_X16V8S8_MS8_VC24_1ZV = 0x9a,
+ X8Z24_X16V8S8_MS4_VC12_1CZV = 0x9b,
+ X8Z24_X16V8S8_MS4_VC4_1CZV = 0x9c,
+ X8Z24_X16V8S8_MS8_VC8_1CZV = 0x9d,
+ X8Z24_X16V8S8_MS8_VC24_1CZV = 0x9e,
+ X8Z24_X16V8S8_MS4_VC12_2CS = 0x9f,
+ X8Z24_X16V8S8_MS4_VC4_2CS = 0xa0,
+ X8Z24_X16V8S8_MS8_VC8_2CS = 0xa1,
+ X8Z24_X16V8S8_MS8_VC24_2CS = 0xa2,
+ X8Z24_X16V8S8_MS4_VC12_2CSZV = 0xa3,
+ X8Z24_X16V8S8_MS4_VC4_2CSZV = 0xa4,
+ X8Z24_X16V8S8_MS8_VC8_2CSZV = 0xa5,
+ X8Z24_X16V8S8_MS8_VC24_2CSZV = 0xa6,
+ ZF32_X16V8S8_MS4_VC12 = 0xa7,
+ ZF32_X16V8S8_MS4_VC4 = 0xa8,
+ ZF32_X16V8S8_MS8_VC8 = 0xa9,
+ ZF32_X16V8S8_MS8_VC24 = 0xaa,
+ ZF32_X16V8S8_MS4_VC12_1CS = 0xab,
+ ZF32_X16V8S8_MS4_VC4_1CS = 0xac,
+ ZF32_X16V8S8_MS8_VC8_1CS = 0xad,
+ ZF32_X16V8S8_MS8_VC24_1CS = 0xae,
+ ZF32_X16V8S8_MS4_VC12_1ZV = 0xb3,
+ ZF32_X16V8S8_MS4_VC4_1ZV = 0xb4,
+ ZF32_X16V8S8_MS8_VC8_1ZV = 0xb5,
+ ZF32_X16V8S8_MS8_VC24_1ZV = 0xb6,
+ ZF32_X16V8S8_MS4_VC12_1CZV = 0xb7,
+ ZF32_X16V8S8_MS4_VC4_1CZV = 0xb8,
+ ZF32_X16V8S8_MS8_VC8_1CZV = 0xb9,
+ ZF32_X16V8S8_MS8_VC24_1CZV = 0xba,
+ ZF32_X16V8S8_MS4_VC12_2CS = 0xbb,
+ ZF32_X16V8S8_MS4_VC4_2CS = 0xbc,
+ ZF32_X16V8S8_MS8_VC8_2CS = 0xbd,
+ ZF32_X16V8S8_MS8_VC24_2CS = 0xbe,
+ ZF32_X16V8S8_MS4_VC12_2CSZV = 0xbf,
+ ZF32_X16V8S8_MS4_VC4_2CSZV = 0xc0,
+ ZF32_X16V8S8_MS8_VC8_2CSZV = 0xc1,
+ ZF32_X16V8S8_MS8_VC24_2CSZV = 0xc2,
+ ZF32_X24S8 = 0xc3,
+ ZF32_X24S8_1CS = 0xc4,
+ ZF32_X24S8_MS2_1CS = 0xc5,
+ ZF32_X24S8_MS4_1CS = 0xc6,
+ ZF32_X24S8_MS8_1CS = 0xc7,
+ ZF32_X24S8_MS16_1CS = 0xc8,
+ ZF32_X24S8_2CSZV = 0xce,
+ ZF32_X24S8_MS2_2CSZV = 0xcf,
+ ZF32_X24S8_MS4_2CSZV = 0xd0,
+ ZF32_X24S8_MS8_2CSZV = 0xd1,
+ ZF32_X24S8_MS16_2CSZV = 0xd2,
+ ZF32_X24S8_2CS = 0xd3,
+ ZF32_X24S8_MS2_2CS = 0xd4,
+ ZF32_X24S8_MS4_2CS = 0xd5,
+ ZF32_X24S8_MS8_2CS = 0xd6,
+ ZF32_X24S8_MS16_2CS = 0xd7,
+ S8 = 0x2a,
+ S8_2S = 0x2b,
+ GENERIC_16BX2 = 0xfe,
+ C32_2C = 0xd8,
+ C32_2CBR = 0xd9,
+ C32_2CBA = 0xda,
+ C32_2CRA = 0xdb,
+ C32_2BRA = 0xdc,
+ C32_MS2_2C = 0xdd,
+ C32_MS2_2CBR = 0xde,
+ C32_MS2_4CBRA = 0xcc,
+ C32_MS4_2C = 0xdf,
+ C32_MS4_2CBR = 0xe0,
+ C32_MS4_2CBA = 0xe1,
+ C32_MS4_2CRA = 0xe2,
+ C32_MS4_2BRA = 0xe3,
+ C32_MS4_4CBRA = 0x2c,
+ C32_MS8_MS16_2C = 0xe4,
+ C32_MS8_MS16_2CRA = 0xe5,
+ C64_2C = 0xe6,
+ C64_2CBR = 0xe7,
+ C64_2CBA = 0xe8,
+ C64_2CRA = 0xe9,
+ C64_2BRA = 0xea,
+ C64_MS2_2C = 0xeb,
+ C64_MS2_2CBR = 0xec,
+ C64_MS2_4CBRA = 0xcd,
+ C64_MS4_2C = 0xed,
+ C64_MS4_2CBR = 0xee,
+ C64_MS4_2CBA = 0xef,
+ C64_MS4_2CRA = 0xf0,
+ C64_MS4_2BRA = 0xf1,
+ C64_MS4_4CBRA = 0x2d,
+ C64_MS8_MS16_2C = 0xf2,
+ C64_MS8_MS16_2CRA = 0xf3,
+ C128_2C = 0xf4,
+ C128_2CR = 0xf5,
+ C128_MS2_2C = 0xf6,
+ C128_MS2_2CR = 0xf7,
+ C128_MS4_2C = 0xf8,
+ C128_MS4_2CR = 0xf9,
+ C128_MS8_MS16_2C = 0xfa,
+ C128_MS8_MS16_2CR = 0xfb,
+ X8C24 = 0xfc,
+ PITCH_NO_SWIZZLE = 0xfd,
+ SMSKED_MESSAGE = 0xca,
+ SMHOST_MESSAGE = 0xcb,
+};
+
+constexpr bool IsPitchKind(PTEKind kind) {
+ return kind == PTEKind::PITCH || kind == PTEKind::PITCH_NO_SWIZZLE;
+}
+
+} // namespace Tegra
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 889b606b3..00ce53e3e 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -17,6 +17,7 @@
#include "common/assert.h"
#include "common/settings.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
@@ -90,13 +91,10 @@ private:
};
template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
-class QueryCacheBase {
+class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
public:
- explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::MemoryManager& gpu_memory_)
- : rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
- gpu_memory{gpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
+ explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_)
+ : rasterizer{rasterizer_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
VideoCore::QueryType::SamplesPassed}}} {}
void InvalidateRegion(VAddr addr, std::size_t size) {
@@ -117,13 +115,13 @@ public:
*/
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) {
std::unique_lock lock{mutex};
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
ASSERT(cpu_addr);
CachedQuery* query = TryGet(*cpu_addr);
if (!query) {
ASSERT_OR_EXECUTE(cpu_addr, return;);
- u8* const host_ptr = gpu_memory.GetPointer(gpu_addr);
+ u8* const host_ptr = gpu_memory->GetPointer(gpu_addr);
query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
}
@@ -137,8 +135,10 @@ public:
/// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
void UpdateCounters() {
std::unique_lock lock{mutex};
- const auto& regs = maxwell3d.regs;
- Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable);
+ if (maxwell3d) {
+ const auto& regs = maxwell3d->regs;
+ Stream(VideoCore::QueryType::SamplesPassed).Update(regs.zpass_pixel_count_enable);
+ }
}
/// Resets a counter to zero. It doesn't disable the query after resetting.
@@ -264,8 +264,6 @@ private:
static constexpr unsigned YUZU_PAGEBITS = 12;
VideoCore::RasterizerInterface& rasterizer;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::MemoryManager& gpu_memory;
std::recursive_mutex mutex;
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index a04a76481..1cbfef090 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -16,6 +16,9 @@ class MemoryManager;
namespace Engines {
class AccelerateDMAInterface;
}
+namespace Control {
+struct ChannelState;
+}
} // namespace Tegra
namespace VideoCore {
@@ -37,7 +40,7 @@ public:
virtual ~RasterizerInterface() = default;
/// Dispatches a draw invocation
- virtual void Draw(bool is_indexed, bool is_instanced) = 0;
+ virtual void Draw(bool is_indexed, u32 instance_count) = 0;
/// Clear the current framebuffer
virtual void Clear() = 0;
@@ -59,7 +62,10 @@ public:
virtual void DisableGraphicsUniformBuffer(size_t stage, u32 index) = 0;
/// Signal a GPU based semaphore as a fence
- virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0;
+ virtual void SignalFence(std::function<void()>&& func) = 0;
+
+ /// Send an operation to be done after a certain amount of flushes.
+ virtual void SyncOperation(std::function<void()>&& func) = 0;
/// Signal a GPU based syncpoint as a fence
virtual void SignalSyncPoint(u32 value) = 0;
@@ -86,13 +92,13 @@ public:
virtual void OnCPUWrite(VAddr addr, u64 size) = 0;
/// Sync memory between guest and host.
- virtual void SyncGuestHost() = 0;
+ virtual void InvalidateGPUCache() = 0;
/// Unmap memory range
virtual void UnmapMemory(VAddr addr, u64 size) = 0;
/// Remap GPU memory range. This means underneath backing memory changed
- virtual void ModifyGPUMemory(GPUVAddr addr, u64 size) = 0;
+ virtual void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
/// and invalidated
@@ -123,7 +129,7 @@ public:
[[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0;
virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
- std::span<u8> memory) = 0;
+ std::span<const u8> memory) = 0;
/// Attempt to use a faster method to display the framebuffer to screen
[[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config,
@@ -137,5 +143,11 @@ public:
/// Initialize disk cached resources for the game being emulated
virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const DiskResourceLoadCallback& callback) {}
+
+ virtual void InitializeChannel(Tegra::Control::ChannelState& channel) {}
+
+ virtual void BindChannel(Tegra::Control::ChannelState& channel) {}
+
+ virtual void ReleaseChannel(s32 channel_id) {}
};
} // namespace VideoCore
diff --git a/src/video_core/renderer_base.cpp b/src/video_core/renderer_base.cpp
index 45791aa75..e8761a747 100644
--- a/src/video_core/renderer_base.cpp
+++ b/src/video_core/renderer_base.cpp
@@ -1,6 +1,8 @@
// SPDX-FileCopyrightText: 2015 Citra Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <thread>
+
#include "common/logging/log.h"
#include "core/frontend/emu_window.h"
#include "video_core/renderer_base.h"
@@ -35,8 +37,12 @@ void RendererBase::RequestScreenshot(void* data, std::function<void(bool)> callb
LOG_ERROR(Render, "A screenshot is already requested or in progress, ignoring the request");
return;
}
+ auto async_callback{[callback = std::move(callback)](bool invert_y) {
+ std::thread t{callback, invert_y};
+ t.detach();
+ }};
renderer_settings.screenshot_bits = data;
- renderer_settings.screenshot_complete_callback = std::move(callback);
+ renderer_settings.screenshot_complete_callback = async_callback;
renderer_settings.screenshot_framebuffer_layout = layout;
renderer_settings.screenshot_requested = true;
}
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 32450ee1d..08f4d69ab 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -168,7 +168,7 @@ void BufferCacheRuntime::BindIndexBuffer(Buffer& buffer, u32 offset, u32 size) {
if (has_unified_vertex_buffers) {
buffer.MakeResident(GL_READ_ONLY);
glBufferAddressRangeNV(GL_ELEMENT_ARRAY_ADDRESS_NV, 0, buffer.HostGpuAddr() + offset,
- static_cast<GLsizeiptr>(size));
+ static_cast<GLsizeiptr>(Common::AlignUp(size, 4)));
} else {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer.Handle());
index_buffer_offset = offset;
diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
index 1f0f156ed..26d066004 100644
--- a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
+++ b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
@@ -28,12 +28,11 @@ bool ComputePipelineKey::operator==(const ComputePipelineKey& rhs) const noexcep
}
ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_,
- BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- ProgramManager& program_manager_, const Shader::Info& info_,
- std::string code, std::vector<u32> code_v)
- : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, gpu_memory{gpu_memory_},
- kepler_compute{kepler_compute_}, program_manager{program_manager_}, info{info_} {
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ const Shader::Info& info_, std::string code,
+ std::vector<u32> code_v)
+ : texture_cache{texture_cache_}, buffer_cache{buffer_cache_},
+ program_manager{program_manager_}, info{info_} {
switch (device.GetShaderBackend()) {
case Settings::ShaderBackend::GLSL:
source_program = CreateProgram(code, GL_COMPUTE_SHADER);
@@ -86,7 +85,7 @@ void ComputePipeline::Configure() {
GLsizei texture_binding{};
GLsizei image_binding{};
- const auto& qmd{kepler_compute.launch_description};
+ const auto& qmd{kepler_compute->launch_description};
const auto& cbufs{qmd.const_buffer_config};
const bool via_header_index{qmd.linked_tsc != 0};
const auto read_handle{[&](const auto& desc, u32 index) {
@@ -101,12 +100,13 @@ void ComputePipeline::Configure() {
const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
secondary_offset};
- const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
- const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
+ const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
+ << desc.secondary_shift_left};
return TexturePair(lhs_raw | rhs_raw, via_header_index);
}
}
- return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
+ return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
}};
const auto add_image{[&](const auto& desc, bool blacklist) {
for (u32 index = 0; index < desc.count; ++index) {
diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.h b/src/video_core/renderer_opengl/gl_compute_pipeline.h
index 723f27f11..6534dec32 100644
--- a/src/video_core/renderer_opengl/gl_compute_pipeline.h
+++ b/src/video_core/renderer_opengl/gl_compute_pipeline.h
@@ -49,10 +49,8 @@ static_assert(std::is_trivially_constructible_v<ComputePipelineKey>);
class ComputePipeline {
public:
explicit ComputePipeline(const Device& device, TextureCache& texture_cache_,
- BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- ProgramManager& program_manager_, const Shader::Info& info_,
- std::string code, std::vector<u32> code_v);
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ const Shader::Info& info_, std::string code, std::vector<u32> code_v);
void Configure();
@@ -60,11 +58,17 @@ public:
return writes_global_memory;
}
+ void SetEngine(Tegra::Engines::KeplerCompute* kepler_compute_,
+ Tegra::MemoryManager* gpu_memory_) {
+ kepler_compute = kepler_compute_;
+ gpu_memory = gpu_memory_;
+ }
+
private:
TextureCache& texture_cache;
BufferCache& buffer_cache;
- Tegra::MemoryManager& gpu_memory;
- Tegra::Engines::KeplerCompute& kepler_compute;
+ Tegra::MemoryManager* gpu_memory;
+ Tegra::Engines::KeplerCompute* kepler_compute;
ProgramManager& program_manager;
Shader::Info info;
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp
index 6e82c2e28..91463f854 100644
--- a/src/video_core/renderer_opengl/gl_fence_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp
@@ -10,10 +10,7 @@
namespace OpenGL {
-GLInnerFence::GLInnerFence(u32 payload_, bool is_stubbed_) : FenceBase{payload_, is_stubbed_} {}
-
-GLInnerFence::GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_)
- : FenceBase{address_, payload_, is_stubbed_} {}
+GLInnerFence::GLInnerFence(bool is_stubbed_) : FenceBase{is_stubbed_} {}
GLInnerFence::~GLInnerFence() = default;
@@ -48,12 +45,8 @@ FenceManagerOpenGL::FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterize
BufferCache& buffer_cache_, QueryCache& query_cache_)
: GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {}
-Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) {
- return std::make_shared<GLInnerFence>(value, is_stubbed);
-}
-
-Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
- return std::make_shared<GLInnerFence>(addr, value, is_stubbed);
+Fence FenceManagerOpenGL::CreateFence(bool is_stubbed) {
+ return std::make_shared<GLInnerFence>(is_stubbed);
}
void FenceManagerOpenGL::QueueFence(Fence& fence) {
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h
index 14ff00db2..f1446e732 100644
--- a/src/video_core/renderer_opengl/gl_fence_manager.h
+++ b/src/video_core/renderer_opengl/gl_fence_manager.h
@@ -16,8 +16,7 @@ namespace OpenGL {
class GLInnerFence : public VideoCommon::FenceBase {
public:
- explicit GLInnerFence(u32 payload_, bool is_stubbed_);
- explicit GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_);
+ explicit GLInnerFence(bool is_stubbed_);
~GLInnerFence();
void Queue();
@@ -40,8 +39,7 @@ public:
QueryCache& query_cache);
protected:
- Fence CreateFence(u32 value, bool is_stubbed) override;
- Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
+ Fence CreateFence(bool is_stubbed) override;
void QueueFence(Fence& fence) override;
bool IsFenceSignaled(Fence& fence) const override;
void WaitFence(Fence& fence) override;
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
index c297f7121..daceb05f4 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
@@ -73,8 +73,8 @@ GLenum AssemblyStage(size_t stage_index) {
/// @param location Hardware location
/// @return Pair of ARB_transform_feedback3 token stream first and third arguments
/// @note Read https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_transform_feedback3.txt
-std::pair<GLint, GLint> TransformFeedbackEnum(u8 location) {
- const u8 index = location / 4;
+std::pair<GLint, GLint> TransformFeedbackEnum(u32 location) {
+ const auto index = location / 4;
if (index >= 8 && index <= 39) {
return {GL_GENERIC_ATTRIB_NV, index - 8};
}
@@ -169,15 +169,15 @@ ConfigureFuncPtr ConfigureFunc(const std::array<Shader::Info, 5>& infos, u32 ena
}
} // Anonymous namespace
-GraphicsPipeline::GraphicsPipeline(
- const Device& device, TextureCache& texture_cache_, BufferCache& buffer_cache_,
- Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
- ProgramManager& program_manager_, StateTracker& state_tracker_, ShaderWorker* thread_worker,
- VideoCore::ShaderNotify* shader_notify, std::array<std::string, 5> sources,
- std::array<std::vector<u32>, 5> sources_spirv, const std::array<const Shader::Info*, 5>& infos,
- const GraphicsPipelineKey& key_)
- : texture_cache{texture_cache_}, buffer_cache{buffer_cache_},
- gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, program_manager{program_manager_},
+GraphicsPipeline::GraphicsPipeline(const Device& device, TextureCache& texture_cache_,
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ StateTracker& state_tracker_, ShaderWorker* thread_worker,
+ VideoCore::ShaderNotify* shader_notify,
+ std::array<std::string, 5> sources,
+ std::array<std::vector<u32>, 5> sources_spirv,
+ const std::array<const Shader::Info*, 5>& infos,
+ const GraphicsPipelineKey& key_)
+ : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_},
state_tracker{state_tracker_}, key{key_} {
if (shader_notify) {
shader_notify->MarkShaderBuilding();
@@ -285,8 +285,8 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings);
buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers);
- const auto& regs{maxwell3d.regs};
- const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
+ const auto& regs{maxwell3d->regs};
+ const bool via_header_index{regs.sampler_binding == Maxwell::SamplerBinding::ViaHeaderBinding};
const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
const Shader::Info& info{stage_infos[stage]};
buffer_cache.UnbindGraphicsStorageBuffers(stage);
@@ -299,7 +299,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
++ssbo_index;
}
}
- const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers};
+ const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers};
const auto read_handle{[&](const auto& desc, u32 index) {
ASSERT(cbufs[desc.cbuf_index].enabled);
const u32 index_offset{index << desc.size_shift};
@@ -312,13 +312,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
const u32 second_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address +
second_offset};
- const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
- const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
+ const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
+ << desc.secondary_shift_left};
const u32 raw{lhs_raw | rhs_raw};
return TexturePair(raw, via_header_index);
}
}
- return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
+ return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
}};
const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE {
for (u32 index = 0; index < desc.count; ++index) {
@@ -567,10 +568,25 @@ void GraphicsPipeline::GenerateTransformFeedbackState() {
++current_stream;
const auto& locations = key.xfb_state.varyings[feedback];
- std::optional<u8> current_index;
+ std::optional<u32> current_index;
for (u32 offset = 0; offset < layout.varying_count; ++offset) {
- const u8 location = locations[offset];
- const u8 index = location / 4;
+ const auto get_attribute = [&locations](u32 index) -> u32 {
+ switch (index % 4) {
+ case 0:
+ return locations[index / 4].attribute0.Value();
+ case 1:
+ return locations[index / 4].attribute1.Value();
+ case 2:
+ return locations[index / 4].attribute2.Value();
+ case 3:
+ return locations[index / 4].attribute3.Value();
+ }
+ UNREACHABLE();
+ return 0;
+ };
+
+ const auto attribute{get_attribute(offset)};
+ const auto index = attribute / 4U;
if (current_index == index) {
// Increase number of components of the previous attachment
@@ -579,7 +595,7 @@ void GraphicsPipeline::GenerateTransformFeedbackState() {
}
current_index = index;
- std::tie(cursor[0], cursor[2]) = TransformFeedbackEnum(location);
+ std::tie(cursor[0], cursor[2]) = TransformFeedbackEnum(attribute);
cursor[1] = 1;
cursor += XFB_ENTRY_STRIDE;
}
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.h b/src/video_core/renderer_opengl/gl_graphics_pipeline.h
index 4ec15b966..ea53ddb46 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.h
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.h
@@ -37,8 +37,8 @@ struct GraphicsPipelineKey {
BitField<0, 1, u32> xfb_enabled;
BitField<1, 1, u32> early_z;
BitField<2, 4, Maxwell::PrimitiveTopology> gs_input_topology;
- BitField<6, 2, Maxwell::TessellationPrimitive> tessellation_primitive;
- BitField<8, 2, Maxwell::TessellationSpacing> tessellation_spacing;
+ BitField<6, 2, Maxwell::Tessellation::DomainType> tessellation_primitive;
+ BitField<8, 2, Maxwell::Tessellation::Spacing> tessellation_spacing;
BitField<10, 1, u32> tessellation_clockwise;
};
std::array<u32, 3> padding;
@@ -71,10 +71,9 @@ static_assert(std::is_trivially_constructible_v<GraphicsPipelineKey>);
class GraphicsPipeline {
public:
explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_,
- BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- ProgramManager& program_manager_, StateTracker& state_tracker_,
- ShaderWorker* thread_worker, VideoCore::ShaderNotify* shader_notify,
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ StateTracker& state_tracker_, ShaderWorker* thread_worker,
+ VideoCore::ShaderNotify* shader_notify,
std::array<std::string, 5> sources,
std::array<std::vector<u32>, 5> sources_spirv,
const std::array<const Shader::Info*, 5>& infos,
@@ -107,6 +106,11 @@ public:
};
}
+ void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) {
+ maxwell3d = maxwell3d_;
+ gpu_memory = gpu_memory_;
+ }
+
private:
template <typename Spec>
void ConfigureImpl(bool is_indexed);
@@ -119,8 +123,8 @@ private:
TextureCache& texture_cache;
BufferCache& buffer_cache;
- Tegra::MemoryManager& gpu_memory;
- Tegra::Engines::Maxwell3D& maxwell3d;
+ Tegra::MemoryManager* gpu_memory;
+ Tegra::Engines::Maxwell3D* maxwell3d;
ProgramManager& program_manager;
StateTracker& state_tracker;
const GraphicsPipelineKey key;
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp
index ed40f5791..5070db441 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_query_cache.cpp
@@ -26,9 +26,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
} // Anonymous namespace
-QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::MemoryManager& gpu_memory_)
- : QueryCacheBase(rasterizer_, maxwell3d_, gpu_memory_), gl_rasterizer{rasterizer_} {}
+QueryCache::QueryCache(RasterizerOpenGL& rasterizer_)
+ : QueryCacheBase(rasterizer_), gl_rasterizer{rasterizer_} {}
QueryCache::~QueryCache() = default;
diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h
index 8a49f1ef0..14ce59990 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.h
+++ b/src/video_core/renderer_opengl/gl_query_cache.h
@@ -28,8 +28,7 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
class QueryCache final
: public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
public:
- explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::MemoryManager& gpu_memory_);
+ explicit QueryCache(RasterizerOpenGL& rasterizer_);
~QueryCache();
OGLQuery AllocateQuery(VideoCore::QueryType type);
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index f018333af..79d7908d4 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -16,7 +16,7 @@
#include "common/microprofile.h"
#include "common/scope_exit.h"
#include "common/settings.h"
-
+#include "video_core/control/channel_state.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
@@ -56,22 +56,20 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra
Core::Memory::Memory& cpu_memory_, const Device& device_,
ScreenInfo& screen_info_, ProgramManager& program_manager_,
StateTracker& state_tracker_)
- : RasterizerAccelerated(cpu_memory_), gpu(gpu_), maxwell3d(gpu.Maxwell3D()),
- kepler_compute(gpu.KeplerCompute()), gpu_memory(gpu.MemoryManager()), device(device_),
- screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_),
+ : RasterizerAccelerated(cpu_memory_), gpu(gpu_), device(device_), screen_info(screen_info_),
+ program_manager(program_manager_), state_tracker(state_tracker_),
texture_cache_runtime(device, program_manager, state_tracker),
- texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
- buffer_cache_runtime(device),
- buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
- shader_cache(*this, emu_window_, maxwell3d, kepler_compute, gpu_memory, device, texture_cache,
- buffer_cache, program_manager, state_tracker, gpu.ShaderNotify()),
- query_cache(*this, maxwell3d, gpu_memory), accelerate_dma(buffer_cache),
+ texture_cache(texture_cache_runtime, *this), buffer_cache_runtime(device),
+ buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
+ shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager,
+ state_tracker, gpu.ShaderNotify()),
+ query_cache(*this), accelerate_dma(buffer_cache),
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {}
RasterizerOpenGL::~RasterizerOpenGL() = default;
void RasterizerOpenGL::SyncVertexFormats() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::VertexFormats]) {
return;
}
@@ -89,7 +87,7 @@ void RasterizerOpenGL::SyncVertexFormats() {
}
flags[Dirty::VertexFormat0 + index] = false;
- const auto attrib = maxwell3d.regs.vertex_attrib_format[index];
+ const auto& attrib = maxwell3d->regs.vertex_attrib_format[index];
const auto gl_index = static_cast<GLuint>(index);
// Disable constant attributes.
@@ -99,8 +97,8 @@ void RasterizerOpenGL::SyncVertexFormats() {
}
glEnableVertexAttribArray(gl_index);
- if (attrib.type == Maxwell::VertexAttribute::Type::SignedInt ||
- attrib.type == Maxwell::VertexAttribute::Type::UnsignedInt) {
+ if (attrib.type == Maxwell::VertexAttribute::Type::SInt ||
+ attrib.type == Maxwell::VertexAttribute::Type::UInt) {
glVertexAttribIFormat(gl_index, attrib.ComponentCount(),
MaxwellToGL::VertexFormat(attrib), attrib.offset);
} else {
@@ -113,13 +111,13 @@ void RasterizerOpenGL::SyncVertexFormats() {
}
void RasterizerOpenGL::SyncVertexInstances() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::VertexInstances]) {
return;
}
flags[Dirty::VertexInstances] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) {
if (!flags[Dirty::VertexInstance0 + index]) {
continue;
@@ -127,8 +125,8 @@ void RasterizerOpenGL::SyncVertexInstances() {
flags[Dirty::VertexInstance0 + index] = false;
const auto gl_index = static_cast<GLuint>(index);
- const bool instancing_enabled = regs.instanced_arrays.IsInstancingEnabled(gl_index);
- const GLuint divisor = instancing_enabled ? regs.vertex_array[index].divisor : 0;
+ const bool instancing_enabled = regs.vertex_stream_instances.IsInstancingEnabled(gl_index);
+ const GLuint divisor = instancing_enabled ? regs.vertex_streams[index].frequency : 0;
glVertexBindingDivisor(gl_index, divisor);
}
}
@@ -140,36 +138,36 @@ void RasterizerOpenGL::LoadDiskResources(u64 title_id, std::stop_token stop_load
void RasterizerOpenGL::Clear() {
MICROPROFILE_SCOPE(OpenGL_Clears);
- if (!maxwell3d.ShouldExecute()) {
+ if (!maxwell3d->ShouldExecute()) {
return;
}
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
bool use_color{};
bool use_depth{};
bool use_stencil{};
- if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
- regs.clear_buffers.A) {
+ if (regs.clear_surface.R || regs.clear_surface.G || regs.clear_surface.B ||
+ regs.clear_surface.A) {
use_color = true;
- const GLuint index = regs.clear_buffers.RT;
+ const GLuint index = regs.clear_surface.RT;
state_tracker.NotifyColorMask(index);
- glColorMaski(index, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0,
- regs.clear_buffers.B != 0, regs.clear_buffers.A != 0);
+ glColorMaski(index, regs.clear_surface.R != 0, regs.clear_surface.G != 0,
+ regs.clear_surface.B != 0, regs.clear_surface.A != 0);
// TODO(Rodrigo): Determine if clamping is used on clears
SyncFragmentColorClampState();
SyncFramebufferSRGB();
}
- if (regs.clear_buffers.Z) {
+ if (regs.clear_surface.Z) {
ASSERT_MSG(regs.zeta_enable != 0, "Tried to clear Z but buffer is not enabled!");
use_depth = true;
state_tracker.NotifyDepthMask();
glDepthMask(GL_TRUE);
}
- if (regs.clear_buffers.S) {
+ if (regs.clear_surface.S) {
ASSERT_MSG(regs.zeta_enable, "Tried to clear stencil but buffer is not enabled!");
use_stencil = true;
}
@@ -186,16 +184,16 @@ void RasterizerOpenGL::Clear() {
texture_cache.UpdateRenderTargets(true);
state_tracker.BindFramebuffer(texture_cache.GetFramebuffer()->Handle());
SyncViewport();
- if (regs.clear_flags.scissor) {
+ if (regs.clear_control.use_scissor) {
SyncScissorTest();
} else {
state_tracker.NotifyScissor0();
glDisablei(GL_SCISSOR_TEST, 0);
}
- UNIMPLEMENTED_IF(regs.clear_flags.viewport);
+ UNIMPLEMENTED_IF(regs.clear_control.use_viewport_clip0);
if (use_color) {
- glClearBufferfv(GL_COLOR, regs.clear_buffers.RT, regs.clear_color);
+ glClearBufferfv(GL_COLOR, regs.clear_surface.RT, regs.clear_color.data());
}
if (use_depth && use_stencil) {
glClearBufferfi(GL_DEPTH_STENCIL, 0, regs.clear_depth, regs.clear_stencil);
@@ -207,7 +205,7 @@ void RasterizerOpenGL::Clear() {
++num_queued_commands;
}
-void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
+void RasterizerOpenGL::Draw(bool is_indexed, u32 instance_count) {
MICROPROFILE_SCOPE(OpenGL_Drawing);
SCOPE_EXIT({ gpu.TickWork(); });
@@ -217,22 +215,27 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
if (!pipeline) {
return;
}
+
+ gpu.TickWork();
+
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ pipeline->SetEngine(maxwell3d, gpu_memory);
pipeline->Configure(is_indexed);
+ BindInlineIndexBuffer();
+
SyncState();
- const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d.regs.draw.topology);
+ const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology);
BeginTransformFeedback(pipeline, primitive_mode);
- const GLuint base_instance = static_cast<GLuint>(maxwell3d.regs.vb_base_instance);
- const GLsizei num_instances =
- static_cast<GLsizei>(is_instanced ? maxwell3d.mme_draw.instance_count : 1);
+ const GLuint base_instance = static_cast<GLuint>(maxwell3d->regs.global_base_instance_index);
+ const GLsizei num_instances = static_cast<GLsizei>(instance_count);
if (is_indexed) {
- const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vb_element_base);
- const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.index_array.count);
+ const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.global_base_vertex_index);
+ const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.index_buffer.count);
const GLvoid* const offset = buffer_cache_runtime.IndexOffset();
- const GLenum format = MaxwellToGL::IndexFormat(maxwell3d.regs.index_array.format);
+ const GLenum format = MaxwellToGL::IndexFormat(maxwell3d->regs.index_buffer.format);
if (num_instances == 1 && base_instance == 0 && base_vertex == 0) {
glDrawElements(primitive_mode, num_vertices, format, offset);
} else if (num_instances == 1 && base_instance == 0) {
@@ -251,8 +254,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
base_instance);
}
} else {
- const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vertex_buffer.first);
- const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.vertex_buffer.count);
+ const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.vertex_buffer.first);
+ const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.vertex_buffer.count);
if (num_instances == 1 && base_instance == 0) {
glDrawArrays(primitive_mode, base_vertex, num_vertices);
} else if (base_instance == 0) {
@@ -273,8 +276,9 @@ void RasterizerOpenGL::DispatchCompute() {
if (!pipeline) {
return;
}
+ pipeline->SetEngine(kepler_compute, gpu_memory);
pipeline->Configure();
- const auto& qmd{kepler_compute.launch_description};
+ const auto& qmd{kepler_compute->launch_description};
glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z);
++num_queued_commands;
has_written_global_memory |= pipeline->WritesGlobalMemory();
@@ -359,7 +363,7 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
}
}
-void RasterizerOpenGL::SyncGuestHost() {
+void RasterizerOpenGL::InvalidateGPUCache() {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
shader_cache.SyncGuestHost();
{
@@ -380,40 +384,30 @@ void RasterizerOpenGL::UnmapMemory(VAddr addr, u64 size) {
shader_cache.OnCPUWrite(addr, size);
}
-void RasterizerOpenGL::ModifyGPUMemory(GPUVAddr addr, u64 size) {
+void RasterizerOpenGL::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {
{
std::scoped_lock lock{texture_cache.mutex};
- texture_cache.UnmapGPUMemory(addr, size);
+ texture_cache.UnmapGPUMemory(as_id, addr, size);
}
}
-void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) {
- if (!gpu.IsAsync()) {
- gpu_memory.Write<u32>(addr, value);
- return;
- }
- fence_manager.SignalSemaphore(addr, value);
+void RasterizerOpenGL::SignalFence(std::function<void()>&& func) {
+ fence_manager.SignalFence(std::move(func));
+}
+
+void RasterizerOpenGL::SyncOperation(std::function<void()>&& func) {
+ fence_manager.SyncOperation(std::move(func));
}
void RasterizerOpenGL::SignalSyncPoint(u32 value) {
- if (!gpu.IsAsync()) {
- gpu.IncrementSyncPoint(value);
- return;
- }
fence_manager.SignalSyncPoint(value);
}
void RasterizerOpenGL::SignalReference() {
- if (!gpu.IsAsync()) {
- return;
- }
fence_manager.SignalOrdering();
}
void RasterizerOpenGL::ReleaseFences() {
- if (!gpu.IsAsync()) {
- return;
- }
fence_manager.WaitPendingFences();
}
@@ -430,6 +424,7 @@ void RasterizerOpenGL::WaitForIdle() {
}
void RasterizerOpenGL::FragmentBarrier() {
+ glTextureBarrier();
glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT);
}
@@ -482,13 +477,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerOpenGL::AccessAccelerateDMA()
}
void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
- std::span<u8> memory) {
- auto cpu_addr = gpu_memory.GpuToCpuAddress(address);
+ std::span<const u8> memory) {
+ auto cpu_addr = gpu_memory->GpuToCpuAddress(address);
if (!cpu_addr) [[unlikely]] {
- gpu_memory.WriteBlock(address, memory.data(), copy_size);
+ gpu_memory->WriteBlock(address, memory.data(), copy_size);
return;
}
- gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size);
+ gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size);
{
std::unique_lock<std::mutex> lock{buffer_cache.mutex};
if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
@@ -551,8 +546,8 @@ void RasterizerOpenGL::SyncState() {
}
void RasterizerOpenGL::SyncViewport() {
- auto& flags = maxwell3d.dirty.flags;
- const auto& regs = maxwell3d.regs;
+ auto& flags = maxwell3d->dirty.flags;
+ const auto& regs = maxwell3d->regs;
const bool rescale_viewports = flags[VideoCommon::Dirty::RescaleViewports];
const bool dirty_viewport = flags[Dirty::Viewports] || rescale_viewports;
@@ -561,9 +556,9 @@ void RasterizerOpenGL::SyncViewport() {
if (dirty_viewport || dirty_clip_control || flags[Dirty::FrontFace]) {
flags[Dirty::FrontFace] = false;
- GLenum mode = MaxwellToGL::FrontFace(regs.front_face);
+ GLenum mode = MaxwellToGL::FrontFace(regs.gl_front_face);
bool flip_faces = true;
- if (regs.screen_y_control.triangle_rast_flip != 0) {
+ if (regs.window_origin.flip_y != 0) {
flip_faces = !flip_faces;
}
if (regs.viewport_transform[0].scale_y < 0.0f) {
@@ -588,14 +583,15 @@ void RasterizerOpenGL::SyncViewport() {
if (regs.viewport_transform[0].scale_y < 0.0f) {
flip_y = !flip_y;
}
- if (regs.screen_y_control.y_negate != 0) {
+ const bool lower_left{regs.window_origin.mode != Maxwell::WindowOrigin::Mode::UpperLeft};
+ if (lower_left) {
flip_y = !flip_y;
}
const bool is_zero_to_one = regs.depth_mode == Maxwell::DepthMode::ZeroToOne;
const GLenum origin = flip_y ? GL_UPPER_LEFT : GL_LOWER_LEFT;
const GLenum depth = is_zero_to_one ? GL_ZERO_TO_ONE : GL_NEGATIVE_ONE_TO_ONE;
state_tracker.ClipControl(origin, depth);
- state_tracker.SetYNegate(regs.screen_y_control.y_negate != 0);
+ state_tracker.SetYNegate(lower_left);
}
const bool is_rescaling{texture_cache.IsRescaling()};
const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f;
@@ -667,23 +663,29 @@ void RasterizerOpenGL::SyncViewport() {
}
void RasterizerOpenGL::SyncDepthClamp() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::DepthClampEnabled]) {
return;
}
flags[Dirty::DepthClampEnabled] = false;
- oglEnable(GL_DEPTH_CLAMP, maxwell3d.regs.view_volume_clip_control.depth_clamp_disabled == 0);
+ bool depth_clamp_disabled{maxwell3d->regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::Passthrough ||
+ maxwell3d->regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::FrustumXYZ ||
+ maxwell3d->regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::FrustumZ};
+ oglEnable(GL_DEPTH_CLAMP, !depth_clamp_disabled);
}
void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) {
return;
}
flags[Dirty::ClipDistances] = false;
- clip_mask &= maxwell3d.regs.clip_distance_enabled;
+ clip_mask &= maxwell3d->regs.user_clip_enable.raw;
if (clip_mask == last_clip_distance_mask) {
return;
}
@@ -699,15 +701,15 @@ void RasterizerOpenGL::SyncClipCoef() {
}
void RasterizerOpenGL::SyncCullMode() {
- auto& flags = maxwell3d.dirty.flags;
- const auto& regs = maxwell3d.regs;
+ auto& flags = maxwell3d->dirty.flags;
+ const auto& regs = maxwell3d->regs;
if (flags[Dirty::CullTest]) {
flags[Dirty::CullTest] = false;
- if (regs.cull_test_enabled) {
+ if (regs.gl_cull_test_enabled) {
glEnable(GL_CULL_FACE);
- glCullFace(MaxwellToGL::CullFace(regs.cull_face));
+ glCullFace(MaxwellToGL::CullFace(regs.gl_cull_face));
} else {
glDisable(GL_CULL_FACE);
}
@@ -715,23 +717,23 @@ void RasterizerOpenGL::SyncCullMode() {
}
void RasterizerOpenGL::SyncPrimitiveRestart() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::PrimitiveRestart]) {
return;
}
flags[Dirty::PrimitiveRestart] = false;
- if (maxwell3d.regs.primitive_restart.enabled) {
+ if (maxwell3d->regs.primitive_restart.enabled) {
glEnable(GL_PRIMITIVE_RESTART);
- glPrimitiveRestartIndex(maxwell3d.regs.primitive_restart.index);
+ glPrimitiveRestartIndex(maxwell3d->regs.primitive_restart.index);
} else {
glDisable(GL_PRIMITIVE_RESTART);
}
}
void RasterizerOpenGL::SyncDepthTestState() {
- auto& flags = maxwell3d.dirty.flags;
- const auto& regs = maxwell3d.regs;
+ auto& flags = maxwell3d->dirty.flags;
+ const auto& regs = maxwell3d->regs;
if (flags[Dirty::DepthMask]) {
flags[Dirty::DepthMask] = false;
@@ -750,28 +752,28 @@ void RasterizerOpenGL::SyncDepthTestState() {
}
void RasterizerOpenGL::SyncStencilTestState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::StencilTest]) {
return;
}
flags[Dirty::StencilTest] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
oglEnable(GL_STENCIL_TEST, regs.stencil_enable);
- glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func),
- regs.stencil_front_func_ref, regs.stencil_front_func_mask);
- glStencilOpSeparate(GL_FRONT, MaxwellToGL::StencilOp(regs.stencil_front_op_fail),
- MaxwellToGL::StencilOp(regs.stencil_front_op_zfail),
- MaxwellToGL::StencilOp(regs.stencil_front_op_zpass));
+ glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_op.func),
+ regs.stencil_front_ref, regs.stencil_front_func_mask);
+ glStencilOpSeparate(GL_FRONT, MaxwellToGL::StencilOp(regs.stencil_front_op.fail),
+ MaxwellToGL::StencilOp(regs.stencil_front_op.zfail),
+ MaxwellToGL::StencilOp(regs.stencil_front_op.zpass));
glStencilMaskSeparate(GL_FRONT, regs.stencil_front_mask);
if (regs.stencil_two_side_enable) {
- glStencilFuncSeparate(GL_BACK, MaxwellToGL::ComparisonOp(regs.stencil_back_func_func),
- regs.stencil_back_func_ref, regs.stencil_back_func_mask);
- glStencilOpSeparate(GL_BACK, MaxwellToGL::StencilOp(regs.stencil_back_op_fail),
- MaxwellToGL::StencilOp(regs.stencil_back_op_zfail),
- MaxwellToGL::StencilOp(regs.stencil_back_op_zpass));
+ glStencilFuncSeparate(GL_BACK, MaxwellToGL::ComparisonOp(regs.stencil_back_op.func),
+ regs.stencil_back_ref, regs.stencil_back_mask);
+ glStencilOpSeparate(GL_BACK, MaxwellToGL::StencilOp(regs.stencil_back_op.fail),
+ MaxwellToGL::StencilOp(regs.stencil_back_op.zfail),
+ MaxwellToGL::StencilOp(regs.stencil_back_op.zpass));
glStencilMaskSeparate(GL_BACK, regs.stencil_back_mask);
} else {
glStencilFuncSeparate(GL_BACK, GL_ALWAYS, 0, 0xFFFFFFFF);
@@ -781,24 +783,24 @@ void RasterizerOpenGL::SyncStencilTestState() {
}
void RasterizerOpenGL::SyncRasterizeEnable() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::RasterizeEnable]) {
return;
}
flags[Dirty::RasterizeEnable] = false;
- oglEnable(GL_RASTERIZER_DISCARD, maxwell3d.regs.rasterize_enable == 0);
+ oglEnable(GL_RASTERIZER_DISCARD, maxwell3d->regs.rasterize_enable == 0);
}
void RasterizerOpenGL::SyncPolygonModes() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::PolygonModes]) {
return;
}
flags[Dirty::PolygonModes] = false;
- const auto& regs = maxwell3d.regs;
- if (regs.fill_rectangle) {
+ const auto& regs = maxwell3d->regs;
+ if (regs.fill_via_triangle_mode != Maxwell::FillViaTriangleMode::Disabled) {
if (!GLAD_GL_NV_fill_rectangle) {
LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported");
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
@@ -830,7 +832,7 @@ void RasterizerOpenGL::SyncPolygonModes() {
}
void RasterizerOpenGL::SyncColorMask() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::ColorMasks]) {
return;
}
@@ -839,7 +841,7 @@ void RasterizerOpenGL::SyncColorMask() {
const bool force = flags[Dirty::ColorMaskCommon];
flags[Dirty::ColorMaskCommon] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.color_mask_common) {
if (!force && !flags[Dirty::ColorMask0]) {
return;
@@ -864,30 +866,31 @@ void RasterizerOpenGL::SyncColorMask() {
}
void RasterizerOpenGL::SyncMultiSampleState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::MultisampleControl]) {
return;
}
flags[Dirty::MultisampleControl] = false;
- const auto& regs = maxwell3d.regs;
- oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage);
- oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one);
+ const auto& regs = maxwell3d->regs;
+ oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.anti_alias_alpha_control.alpha_to_coverage);
+ oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.anti_alias_alpha_control.alpha_to_one);
}
void RasterizerOpenGL::SyncFragmentColorClampState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::FragmentClampColor]) {
return;
}
flags[Dirty::FragmentClampColor] = false;
- glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d.regs.frag_color_clamp ? GL_TRUE : GL_FALSE);
+ glClampColor(GL_CLAMP_FRAGMENT_COLOR,
+ maxwell3d->regs.frag_color_clamp.AnyEnabled() ? GL_TRUE : GL_FALSE);
}
void RasterizerOpenGL::SyncBlendState() {
- auto& flags = maxwell3d.dirty.flags;
- const auto& regs = maxwell3d.regs;
+ auto& flags = maxwell3d->dirty.flags;
+ const auto& regs = maxwell3d->regs;
if (flags[Dirty::BlendColor]) {
flags[Dirty::BlendColor] = false;
@@ -902,18 +905,18 @@ void RasterizerOpenGL::SyncBlendState() {
}
flags[Dirty::BlendStates] = false;
- if (!regs.independent_blend_enable) {
+ if (!regs.blend_per_target_enabled) {
if (!regs.blend.enable[0]) {
glDisable(GL_BLEND);
return;
}
glEnable(GL_BLEND);
- glBlendFuncSeparate(MaxwellToGL::BlendFunc(regs.blend.factor_source_rgb),
- MaxwellToGL::BlendFunc(regs.blend.factor_dest_rgb),
- MaxwellToGL::BlendFunc(regs.blend.factor_source_a),
- MaxwellToGL::BlendFunc(regs.blend.factor_dest_a));
- glBlendEquationSeparate(MaxwellToGL::BlendEquation(regs.blend.equation_rgb),
- MaxwellToGL::BlendEquation(regs.blend.equation_a));
+ glBlendFuncSeparate(MaxwellToGL::BlendFunc(regs.blend.color_source),
+ MaxwellToGL::BlendFunc(regs.blend.color_dest),
+ MaxwellToGL::BlendFunc(regs.blend.alpha_source),
+ MaxwellToGL::BlendFunc(regs.blend.alpha_dest));
+ glBlendEquationSeparate(MaxwellToGL::BlendEquation(regs.blend.color_op),
+ MaxwellToGL::BlendEquation(regs.blend.alpha_op));
return;
}
@@ -932,35 +935,34 @@ void RasterizerOpenGL::SyncBlendState() {
}
glEnablei(GL_BLEND, static_cast<GLuint>(i));
- const auto& src = regs.independent_blend[i];
- glBlendFuncSeparatei(static_cast<GLuint>(i), MaxwellToGL::BlendFunc(src.factor_source_rgb),
- MaxwellToGL::BlendFunc(src.factor_dest_rgb),
- MaxwellToGL::BlendFunc(src.factor_source_a),
- MaxwellToGL::BlendFunc(src.factor_dest_a));
- glBlendEquationSeparatei(static_cast<GLuint>(i),
- MaxwellToGL::BlendEquation(src.equation_rgb),
- MaxwellToGL::BlendEquation(src.equation_a));
+ const auto& src = regs.blend_per_target[i];
+ glBlendFuncSeparatei(static_cast<GLuint>(i), MaxwellToGL::BlendFunc(src.color_source),
+ MaxwellToGL::BlendFunc(src.color_dest),
+ MaxwellToGL::BlendFunc(src.alpha_source),
+ MaxwellToGL::BlendFunc(src.alpha_dest));
+ glBlendEquationSeparatei(static_cast<GLuint>(i), MaxwellToGL::BlendEquation(src.color_op),
+ MaxwellToGL::BlendEquation(src.alpha_op));
}
}
void RasterizerOpenGL::SyncLogicOpState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::LogicOp]) {
return;
}
flags[Dirty::LogicOp] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.logic_op.enable) {
glEnable(GL_COLOR_LOGIC_OP);
- glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation));
+ glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.op));
} else {
glDisable(GL_COLOR_LOGIC_OP);
}
}
void RasterizerOpenGL::SyncScissorTest() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::Scissors] && !flags[VideoCommon::Dirty::RescaleScissors]) {
return;
}
@@ -969,7 +971,7 @@ void RasterizerOpenGL::SyncScissorTest() {
const bool force = flags[VideoCommon::Dirty::RescaleScissors];
flags[VideoCommon::Dirty::RescaleScissors] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
const auto& resolution = Settings::values.resolution_info;
const bool is_rescaling{texture_cache.IsRescaling()};
@@ -1005,39 +1007,39 @@ void RasterizerOpenGL::SyncScissorTest() {
}
void RasterizerOpenGL::SyncPointState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::PointSize]) {
return;
}
flags[Dirty::PointSize] = false;
- oglEnable(GL_POINT_SPRITE, maxwell3d.regs.point_sprite_enable);
- oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d.regs.vp_point_size.enable);
+ oglEnable(GL_POINT_SPRITE, maxwell3d->regs.point_sprite_enable);
+ oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d->regs.point_size_attribute.enabled);
const bool is_rescaling{texture_cache.IsRescaling()};
const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f;
- glPointSize(std::max(1.0f, maxwell3d.regs.point_size * scale));
+ glPointSize(std::max(1.0f, maxwell3d->regs.point_size * scale));
}
void RasterizerOpenGL::SyncLineState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::LineWidth]) {
return;
}
flags[Dirty::LineWidth] = false;
- const auto& regs = maxwell3d.regs;
- oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable);
- glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased);
+ const auto& regs = maxwell3d->regs;
+ oglEnable(GL_LINE_SMOOTH, regs.line_anti_alias_enable);
+ glLineWidth(regs.line_anti_alias_enable ? regs.line_width_smooth : regs.line_width_aliased);
}
void RasterizerOpenGL::SyncPolygonOffset() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::PolygonOffset]) {
return;
}
flags[Dirty::PolygonOffset] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable);
oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable);
oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable);
@@ -1045,19 +1047,19 @@ void RasterizerOpenGL::SyncPolygonOffset() {
if (regs.polygon_offset_fill_enable || regs.polygon_offset_line_enable ||
regs.polygon_offset_point_enable) {
// Hardware divides polygon offset units by two
- glPolygonOffsetClamp(regs.polygon_offset_factor, regs.polygon_offset_units / 2.0f,
- regs.polygon_offset_clamp);
+ glPolygonOffsetClamp(regs.slope_scale_depth_bias, regs.depth_bias / 2.0f,
+ regs.depth_bias_clamp);
}
}
void RasterizerOpenGL::SyncAlphaTest() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::AlphaTest]) {
return;
}
flags[Dirty::AlphaTest] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.alpha_test_enabled) {
glEnable(GL_ALPHA_TEST);
glAlphaFunc(MaxwellToGL::ComparisonOp(regs.alpha_test_func), regs.alpha_test_ref);
@@ -1067,25 +1069,25 @@ void RasterizerOpenGL::SyncAlphaTest() {
}
void RasterizerOpenGL::SyncFramebufferSRGB() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::FramebufferSRGB]) {
return;
}
flags[Dirty::FramebufferSRGB] = false;
- oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d.regs.framebuffer_srgb);
+ oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d->regs.framebuffer_srgb);
}
void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) {
- const auto& regs = maxwell3d.regs;
- if (regs.tfb_enabled == 0) {
+ const auto& regs = maxwell3d->regs;
+ if (regs.transform_feedback_enabled == 0) {
return;
}
program->ConfigureTransformFeedback();
- UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) ||
- regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) ||
- regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::Geometry));
+ UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) ||
+ regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation) ||
+ regs.IsShaderConfigEnabled(Maxwell::ShaderType::Geometry));
UNIMPLEMENTED_IF(primitive_mode != GL_POINTS);
// We may have to call BeginTransformFeedbackNV here since they seem to call different
@@ -1096,11 +1098,58 @@ void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum
}
void RasterizerOpenGL::EndTransformFeedback() {
- if (maxwell3d.regs.tfb_enabled != 0) {
+ if (maxwell3d->regs.transform_feedback_enabled != 0) {
glEndTransformFeedback();
}
}
+void RasterizerOpenGL::InitializeChannel(Tegra::Control::ChannelState& channel) {
+ CreateChannel(channel);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.CreateChannel(channel);
+ buffer_cache.CreateChannel(channel);
+ }
+ shader_cache.CreateChannel(channel);
+ query_cache.CreateChannel(channel);
+ state_tracker.SetupTables(channel);
+}
+
+void RasterizerOpenGL::BindChannel(Tegra::Control::ChannelState& channel) {
+ const s32 channel_id = channel.bind_id;
+ BindToChannel(channel_id);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.BindToChannel(channel_id);
+ buffer_cache.BindToChannel(channel_id);
+ }
+ shader_cache.BindToChannel(channel_id);
+ query_cache.BindToChannel(channel_id);
+ state_tracker.ChangeChannel(channel);
+ state_tracker.InvalidateState();
+}
+
+void RasterizerOpenGL::ReleaseChannel(s32 channel_id) {
+ EraseChannel(channel_id);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.EraseChannel(channel_id);
+ buffer_cache.EraseChannel(channel_id);
+ }
+ shader_cache.EraseChannel(channel_id);
+ query_cache.EraseChannel(channel_id);
+}
+
+void RasterizerOpenGL::BindInlineIndexBuffer() {
+ if (maxwell3d->inline_index_draw_indexes.empty()) {
+ return;
+ }
+ const auto data_count = static_cast<u32>(maxwell3d->inline_index_draw_indexes.size());
+ auto buffer = Buffer(buffer_cache_runtime, *this, 0, data_count);
+ buffer.ImmediateUpload(0, maxwell3d->inline_index_draw_indexes);
+ buffer_cache_runtime.BindIndexBuffer(buffer, 0, data_count);
+}
+
AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {}
bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 31a16fcba..793e0d608 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -12,6 +12,7 @@
#include <glad/glad.h>
#include "common/common_types.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/rasterizer_accelerated.h"
#include "video_core/rasterizer_interface.h"
@@ -58,7 +59,8 @@ private:
BufferCache& buffer_cache;
};
-class RasterizerOpenGL : public VideoCore::RasterizerAccelerated {
+class RasterizerOpenGL : public VideoCore::RasterizerAccelerated,
+ protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
public:
explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
Core::Memory::Memory& cpu_memory_, const Device& device_,
@@ -66,7 +68,7 @@ public:
StateTracker& state_tracker_);
~RasterizerOpenGL() override;
- void Draw(bool is_indexed, bool is_instanced) override;
+ void Draw(bool is_indexed, u32 instance_count) override;
void Clear() override;
void DispatchCompute() override;
void ResetCounter(VideoCore::QueryType type) override;
@@ -78,10 +80,11 @@ public:
bool MustFlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
void OnCPUWrite(VAddr addr, u64 size) override;
- void SyncGuestHost() override;
+ void InvalidateGPUCache() override;
void UnmapMemory(VAddr addr, u64 size) override;
- void ModifyGPUMemory(GPUVAddr addr, u64 size) override;
- void SignalSemaphore(GPUVAddr addr, u32 value) override;
+ void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
+ void SignalFence(std::function<void()>&& func) override;
+ void SyncOperation(std::function<void()>&& func) override;
void SignalSyncPoint(u32 value) override;
void SignalReference() override;
void ReleaseFences() override;
@@ -96,7 +99,7 @@ public:
const Tegra::Engines::Fermi2D::Config& copy_config) override;
Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
- std::span<u8> memory) override;
+ std::span<const u8> memory) override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
@@ -107,6 +110,12 @@ public:
return num_queued_commands > 0;
}
+ void InitializeChannel(Tegra::Control::ChannelState& channel) override;
+
+ void BindChannel(Tegra::Control::ChannelState& channel) override;
+
+ void ReleaseChannel(s32 channel_id) override;
+
private:
static constexpr size_t MAX_TEXTURES = 192;
static constexpr size_t MAX_IMAGES = 48;
@@ -190,10 +199,9 @@ private:
/// End a transform feedback
void EndTransformFeedback();
+ void BindInlineIndexBuffer();
+
Tegra::GPU& gpu;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
- Tegra::MemoryManager& gpu_memory;
const Device& device;
ScreenInfo& screen_info;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 7e76f679f..977709518 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -63,6 +63,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
Shader::RuntimeInfo info;
if (previous_program) {
info.previous_stage_stores = previous_program->info.stores;
+ info.previous_stage_legacy_stores_mapping = previous_program->info.legacy_stores_mapping;
} else {
// Mark all stores as available for vertex shaders
info.previous_stage_stores.mask.set();
@@ -78,11 +79,11 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
info.tess_clockwise = key.tessellation_clockwise != 0;
info.tess_primitive = [&key] {
switch (key.tessellation_primitive) {
- case Maxwell::TessellationPrimitive::Isolines:
+ case Maxwell::Tessellation::DomainType::Isolines:
return Shader::TessPrimitive::Isolines;
- case Maxwell::TessellationPrimitive::Triangles:
+ case Maxwell::Tessellation::DomainType::Triangles:
return Shader::TessPrimitive::Triangles;
- case Maxwell::TessellationPrimitive::Quads:
+ case Maxwell::Tessellation::DomainType::Quads:
return Shader::TessPrimitive::Quads;
}
ASSERT(false);
@@ -90,11 +91,11 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
}();
info.tess_spacing = [&] {
switch (key.tessellation_spacing) {
- case Maxwell::TessellationSpacing::Equal:
+ case Maxwell::Tessellation::Spacing::Integer:
return Shader::TessSpacing::Equal;
- case Maxwell::TessellationSpacing::FractionalOdd:
+ case Maxwell::Tessellation::Spacing::FractionalOdd:
return Shader::TessSpacing::FractionalOdd;
- case Maxwell::TessellationSpacing::FractionalEven:
+ case Maxwell::Tessellation::Spacing::FractionalEven:
return Shader::TessSpacing::FractionalEven;
}
ASSERT(false);
@@ -139,28 +140,26 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
}
void SetXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs) {
- std::ranges::transform(regs.tfb_layouts, state.layouts.begin(), [](const auto& layout) {
- return VideoCommon::TransformFeedbackState::Layout{
- .stream = layout.stream,
- .varying_count = layout.varying_count,
- .stride = layout.stride,
- };
- });
- state.varyings = regs.tfb_varying_locs;
+ std::ranges::transform(regs.transform_feedback.controls, state.layouts.begin(),
+ [](const auto& layout) {
+ return VideoCommon::TransformFeedbackState::Layout{
+ .stream = layout.stream,
+ .varying_count = layout.varying_count,
+ .stride = layout.stride,
+ };
+ });
+ state.varyings = regs.stream_out_layout;
}
} // Anonymous namespace
ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, const Device& device_,
- TextureCache& texture_cache_, BufferCache& buffer_cache_,
- ProgramManager& program_manager_, StateTracker& state_tracker_,
- VideoCore::ShaderNotify& shader_notify_)
- : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_},
- emu_window{emu_window_}, device{device_}, texture_cache{texture_cache_},
- buffer_cache{buffer_cache_}, program_manager{program_manager_}, state_tracker{state_tracker_},
- shader_notify{shader_notify_}, use_asynchronous_shaders{device.UseAsynchronousShaders()},
+ const Device& device_, TextureCache& texture_cache_,
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_)
+ : VideoCommon::ShaderCache{rasterizer_}, emu_window{emu_window_}, device{device_},
+ texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_},
+ state_tracker{state_tracker_}, shader_notify{shader_notify_},
+ use_asynchronous_shaders{device.UseAsynchronousShaders()},
profile{
.supported_spirv = 0x00010000,
@@ -310,16 +309,18 @@ GraphicsPipeline* ShaderCache::CurrentGraphicsPipeline() {
current_pipeline = nullptr;
return nullptr;
}
- const auto& regs{maxwell3d.regs};
+ const auto& regs{maxwell3d->regs};
graphics_key.raw = 0;
- graphics_key.early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
+ graphics_key.early_z.Assign(regs.mandated_early_z != 0 ? 1 : 0);
graphics_key.gs_input_topology.Assign(graphics_key.unique_hashes[4] != 0
? regs.draw.topology.Value()
: Maxwell::PrimitiveTopology{});
- graphics_key.tessellation_primitive.Assign(regs.tess_mode.prim.Value());
- graphics_key.tessellation_spacing.Assign(regs.tess_mode.spacing.Value());
- graphics_key.tessellation_clockwise.Assign(regs.tess_mode.cw.Value());
- graphics_key.xfb_enabled.Assign(regs.tfb_enabled != 0 ? 1 : 0);
+ graphics_key.tessellation_primitive.Assign(regs.tessellation.params.domain_type.Value());
+ graphics_key.tessellation_spacing.Assign(regs.tessellation.params.spacing.Value());
+ graphics_key.tessellation_clockwise.Assign(
+ regs.tessellation.params.output_primitives.Value() ==
+ Maxwell::Tessellation::OutputPrimitives::Triangles_CW);
+ graphics_key.xfb_enabled.Assign(regs.transform_feedback_enabled != 0 ? 1 : 0);
if (graphics_key.xfb_enabled) {
SetXfbState(graphics_key.xfb_state, regs);
}
@@ -351,13 +352,13 @@ GraphicsPipeline* ShaderCache::BuiltPipeline(GraphicsPipeline* pipeline) const n
}
// If something is using depth, we can assume that games are not rendering anything which
// will be used one time.
- if (maxwell3d.regs.zeta_enable) {
+ if (maxwell3d->regs.zeta_enable) {
return nullptr;
}
// If games are using a small index count, we can assume these are full screen quads.
// Usually these shaders are only used once for building textures so we can assume they
// can't be built async
- if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) {
+ if (maxwell3d->regs.index_buffer.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) {
return pipeline;
}
return nullptr;
@@ -368,7 +369,7 @@ ComputePipeline* ShaderCache::CurrentComputePipeline() {
if (!shader) {
return nullptr;
}
- const auto& qmd{kepler_compute.launch_description};
+ const auto& qmd{kepler_compute->launch_description};
const ComputePipelineKey key{
.unique_hash = shader->unique_hash,
.shared_memory_size = qmd.shared_alloc,
@@ -480,9 +481,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
previous_program = &program;
}
auto* const thread_worker{build_in_parallel ? workers.get() : nullptr};
- return std::make_unique<GraphicsPipeline>(
- device, texture_cache, buffer_cache, gpu_memory, maxwell3d, program_manager, state_tracker,
- thread_worker, &shader_notify, sources, sources_spirv, infos, key);
+ return std::make_unique<GraphicsPipeline>(device, texture_cache, buffer_cache, program_manager,
+ state_tracker, thread_worker, &shader_notify, sources,
+ sources_spirv, infos, key);
} catch (Shader::Exception& exception) {
LOG_ERROR(Render_OpenGL, "{}", exception.what());
@@ -491,9 +492,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
const ComputePipelineKey& key, const VideoCommon::ShaderInfo* shader) {
- const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()};
- const auto& qmd{kepler_compute.launch_description};
- ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start};
+ const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
+ const auto& qmd{kepler_compute->launch_description};
+ ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
env.SetCachedSize(shader->size_bytes);
main_pools.ReleaseContents();
@@ -536,9 +537,8 @@ std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
break;
}
- return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, gpu_memory,
- kepler_compute, program_manager, program.info, code,
- code_spirv);
+ return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, program_manager,
+ program.info, code, code_spirv);
} catch (Shader::Exception& exception) {
LOG_ERROR(Render_OpenGL, "{}", exception.what());
return nullptr;
@@ -546,7 +546,7 @@ std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
std::unique_ptr<ShaderWorker> ShaderCache::CreateWorkers() const {
return std::make_unique<ShaderWorker>(std::max(std::thread::hardware_concurrency(), 2U) - 1,
- "yuzu:ShaderBuilder",
+ "GlShaderBuilder",
[this] { return Context{emu_window}; });
}
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index a14269dea..89f181fe3 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -30,12 +30,9 @@ using ShaderWorker = Common::StatefulThreadWorker<ShaderContext::Context>;
class ShaderCache : public VideoCommon::ShaderCache {
public:
explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, const Device& device_,
- TextureCache& texture_cache_, BufferCache& buffer_cache_,
- ProgramManager& program_manager_, StateTracker& state_tracker_,
- VideoCore::ShaderNotify& shader_notify_);
+ const Device& device_, TextureCache& texture_cache_,
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_);
~ShaderCache();
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp
index 912725ef7..a359f96f1 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.cpp
+++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp
@@ -7,8 +7,8 @@
#include "common/common_types.h"
#include "core/core.h"
+#include "video_core/control/channel_state.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/gpu.h"
#include "video_core/renderer_opengl/gl_state_tracker.h"
#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name)
@@ -38,12 +38,12 @@ void SetupDirtyColorMasks(Tables& tables) {
void SetupDirtyVertexInstances(Tables& tables) {
static constexpr std::size_t instance_base_offset = 3;
for (std::size_t i = 0; i < Regs::NumVertexArrays; ++i) {
- const std::size_t array_offset = OFF(vertex_array) + i * NUM(vertex_array[0]);
+ const std::size_t array_offset = OFF(vertex_streams) + i * NUM(vertex_streams[0]);
const std::size_t instance_array_offset = array_offset + instance_base_offset;
tables[0][instance_array_offset] = static_cast<u8>(VertexInstance0 + i);
tables[1][instance_array_offset] = VertexInstances;
- const std::size_t instance_offset = OFF(instanced_arrays) + i;
+ const std::size_t instance_offset = OFF(vertex_stream_instances) + i;
tables[0][instance_offset] = static_cast<u8>(VertexInstance0 + i);
tables[1][instance_offset] = VertexInstances;
}
@@ -70,8 +70,8 @@ void SetupDirtyViewports(Tables& tables) {
FillBlock(tables[1], OFF(viewport_transform), NUM(viewport_transform), Viewports);
FillBlock(tables[1], OFF(viewports), NUM(viewports), Viewports);
- tables[0][OFF(viewport_transform_enabled)] = ViewportTransform;
- tables[1][OFF(viewport_transform_enabled)] = Viewports;
+ tables[0][OFF(viewport_scale_offset_enbled)] = ViewportTransform;
+ tables[1][OFF(viewport_scale_offset_enbled)] = Viewports;
}
void SetupDirtyScissors(Tables& tables) {
@@ -88,7 +88,7 @@ void SetupDirtyPolygonModes(Tables& tables) {
tables[1][OFF(polygon_mode_front)] = PolygonModes;
tables[1][OFF(polygon_mode_back)] = PolygonModes;
- tables[0][OFF(fill_rectangle)] = PolygonModes;
+ tables[0][OFF(fill_via_triangle_mode)] = PolygonModes;
}
void SetupDirtyDepthTest(Tables& tables) {
@@ -100,11 +100,11 @@ void SetupDirtyDepthTest(Tables& tables) {
void SetupDirtyStencilTest(Tables& tables) {
static constexpr std::array offsets = {
- OFF(stencil_enable), OFF(stencil_front_func_func), OFF(stencil_front_func_ref),
- OFF(stencil_front_func_mask), OFF(stencil_front_op_fail), OFF(stencil_front_op_zfail),
- OFF(stencil_front_op_zpass), OFF(stencil_front_mask), OFF(stencil_two_side_enable),
- OFF(stencil_back_func_func), OFF(stencil_back_func_ref), OFF(stencil_back_func_mask),
- OFF(stencil_back_op_fail), OFF(stencil_back_op_zfail), OFF(stencil_back_op_zpass),
+ OFF(stencil_enable), OFF(stencil_front_op.func), OFF(stencil_front_ref),
+ OFF(stencil_front_func_mask), OFF(stencil_front_op.fail), OFF(stencil_front_op.zfail),
+ OFF(stencil_front_op.zpass), OFF(stencil_front_mask), OFF(stencil_two_side_enable),
+ OFF(stencil_back_op.func), OFF(stencil_back_ref), OFF(stencil_back_func_mask),
+ OFF(stencil_back_op.fail), OFF(stencil_back_op.zfail), OFF(stencil_back_op.zpass),
OFF(stencil_back_mask)};
for (const auto offset : offsets) {
tables[0][offset] = StencilTest;
@@ -121,15 +121,15 @@ void SetupDirtyAlphaTest(Tables& tables) {
void SetupDirtyBlend(Tables& tables) {
FillBlock(tables[0], OFF(blend_color), NUM(blend_color), BlendColor);
- tables[0][OFF(independent_blend_enable)] = BlendIndependentEnabled;
+ tables[0][OFF(blend_per_target_enabled)] = BlendIndependentEnabled;
for (std::size_t i = 0; i < Regs::NumRenderTargets; ++i) {
- const std::size_t offset = OFF(independent_blend) + i * NUM(independent_blend[0]);
- FillBlock(tables[0], offset, NUM(independent_blend[0]), BlendState0 + i);
+ const std::size_t offset = OFF(blend_per_target) + i * NUM(blend_per_target[0]);
+ FillBlock(tables[0], offset, NUM(blend_per_target[0]), BlendState0 + i);
tables[0][OFF(blend.enable) + i] = static_cast<u8>(BlendState0 + i);
}
- FillBlock(tables[1], OFF(independent_blend), NUM(independent_blend), BlendStates);
+ FillBlock(tables[1], OFF(blend_per_target), NUM(blend_per_target), BlendStates);
FillBlock(tables[1], OFF(blend), NUM(blend), BlendStates);
}
@@ -142,13 +142,14 @@ void SetupDirtyPolygonOffset(Tables& tables) {
table[OFF(polygon_offset_fill_enable)] = PolygonOffset;
table[OFF(polygon_offset_line_enable)] = PolygonOffset;
table[OFF(polygon_offset_point_enable)] = PolygonOffset;
- table[OFF(polygon_offset_factor)] = PolygonOffset;
- table[OFF(polygon_offset_units)] = PolygonOffset;
- table[OFF(polygon_offset_clamp)] = PolygonOffset;
+ table[OFF(slope_scale_depth_bias)] = PolygonOffset;
+ table[OFF(depth_bias)] = PolygonOffset;
+ table[OFF(depth_bias_clamp)] = PolygonOffset;
}
void SetupDirtyMultisampleControl(Tables& tables) {
- FillBlock(tables[0], OFF(multisample_control), NUM(multisample_control), MultisampleControl);
+ FillBlock(tables[0], OFF(anti_alias_alpha_control), NUM(anti_alias_alpha_control),
+ MultisampleControl);
}
void SetupDirtyRasterizeEnable(Tables& tables) {
@@ -168,7 +169,7 @@ void SetupDirtyFragmentClampColor(Tables& tables) {
}
void SetupDirtyPointSize(Tables& tables) {
- tables[0][OFF(vp_point_size)] = PointSize;
+ tables[0][OFF(point_size_attribute)] = PointSize;
tables[0][OFF(point_size)] = PointSize;
tables[0][OFF(point_sprite_enable)] = PointSize;
}
@@ -176,35 +177,34 @@ void SetupDirtyPointSize(Tables& tables) {
void SetupDirtyLineWidth(Tables& tables) {
tables[0][OFF(line_width_smooth)] = LineWidth;
tables[0][OFF(line_width_aliased)] = LineWidth;
- tables[0][OFF(line_smooth_enable)] = LineWidth;
+ tables[0][OFF(line_anti_alias_enable)] = LineWidth;
}
void SetupDirtyClipControl(Tables& tables) {
auto& table = tables[0];
- table[OFF(screen_y_control)] = ClipControl;
+ table[OFF(window_origin)] = ClipControl;
table[OFF(depth_mode)] = ClipControl;
}
void SetupDirtyDepthClampEnabled(Tables& tables) {
- tables[0][OFF(view_volume_clip_control)] = DepthClampEnabled;
+ tables[0][OFF(viewport_clip_control)] = DepthClampEnabled;
}
void SetupDirtyMisc(Tables& tables) {
auto& table = tables[0];
- table[OFF(clip_distance_enabled)] = ClipDistances;
+ table[OFF(user_clip_enable)] = ClipDistances;
- table[OFF(front_face)] = FrontFace;
+ table[OFF(gl_front_face)] = FrontFace;
- table[OFF(cull_test_enabled)] = CullTest;
- table[OFF(cull_face)] = CullTest;
+ table[OFF(gl_cull_test_enabled)] = CullTest;
+ table[OFF(gl_cull_face)] = CullTest;
}
} // Anonymous namespace
-StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} {
- auto& dirty = gpu.Maxwell3D().dirty;
- auto& tables = dirty.tables;
+void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
+ auto& tables{channel_state.maxwell_3d->dirty.tables};
SetupDirtyFlags(tables);
SetupDirtyColorMasks(tables);
SetupDirtyViewports(tables);
@@ -230,4 +230,14 @@ StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags}
SetupDirtyMisc(tables);
}
+void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) {
+ flags = &channel_state.maxwell_3d->dirty.flags;
+}
+
+void StateTracker::InvalidateState() {
+ flags->set();
+}
+
+StateTracker::StateTracker() : flags{&default_flags} {}
+
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.h b/src/video_core/renderer_opengl/gl_state_tracker.h
index 04e024f08..19bcf3f35 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.h
+++ b/src/video_core/renderer_opengl/gl_state_tracker.h
@@ -12,8 +12,10 @@
#include "video_core/engines/maxwell_3d.h"
namespace Tegra {
-class GPU;
+namespace Control {
+struct ChannelState;
}
+} // namespace Tegra
namespace OpenGL {
@@ -83,7 +85,7 @@ static_assert(Last <= std::numeric_limits<u8>::max());
class StateTracker {
public:
- explicit StateTracker(Tegra::GPU& gpu);
+ explicit StateTracker();
void BindIndexBuffer(GLuint new_index_buffer) {
if (index_buffer == new_index_buffer) {
@@ -121,94 +123,107 @@ public:
}
void NotifyScreenDrawVertexArray() {
- flags[OpenGL::Dirty::VertexFormats] = true;
- flags[OpenGL::Dirty::VertexFormat0 + 0] = true;
- flags[OpenGL::Dirty::VertexFormat0 + 1] = true;
+ (*flags)[OpenGL::Dirty::VertexFormats] = true;
+ (*flags)[OpenGL::Dirty::VertexFormat0 + 0] = true;
+ (*flags)[OpenGL::Dirty::VertexFormat0 + 1] = true;
- flags[VideoCommon::Dirty::VertexBuffers] = true;
- flags[VideoCommon::Dirty::VertexBuffer0] = true;
+ (*flags)[VideoCommon::Dirty::VertexBuffers] = true;
+ (*flags)[VideoCommon::Dirty::VertexBuffer0] = true;
- flags[OpenGL::Dirty::VertexInstances] = true;
- flags[OpenGL::Dirty::VertexInstance0 + 0] = true;
- flags[OpenGL::Dirty::VertexInstance0 + 1] = true;
+ (*flags)[OpenGL::Dirty::VertexInstances] = true;
+ (*flags)[OpenGL::Dirty::VertexInstance0 + 0] = true;
+ (*flags)[OpenGL::Dirty::VertexInstance0 + 1] = true;
}
void NotifyPolygonModes() {
- flags[OpenGL::Dirty::PolygonModes] = true;
- flags[OpenGL::Dirty::PolygonModeFront] = true;
- flags[OpenGL::Dirty::PolygonModeBack] = true;
+ (*flags)[OpenGL::Dirty::PolygonModes] = true;
+ (*flags)[OpenGL::Dirty::PolygonModeFront] = true;
+ (*flags)[OpenGL::Dirty::PolygonModeBack] = true;
}
void NotifyViewport0() {
- flags[OpenGL::Dirty::Viewports] = true;
- flags[OpenGL::Dirty::Viewport0] = true;
+ (*flags)[OpenGL::Dirty::Viewports] = true;
+ (*flags)[OpenGL::Dirty::Viewport0] = true;
}
void NotifyScissor0() {
- flags[OpenGL::Dirty::Scissors] = true;
- flags[OpenGL::Dirty::Scissor0] = true;
+ (*flags)[OpenGL::Dirty::Scissors] = true;
+ (*flags)[OpenGL::Dirty::Scissor0] = true;
}
void NotifyColorMask(size_t index) {
- flags[OpenGL::Dirty::ColorMasks] = true;
- flags[OpenGL::Dirty::ColorMask0 + index] = true;
+ (*flags)[OpenGL::Dirty::ColorMasks] = true;
+ (*flags)[OpenGL::Dirty::ColorMask0 + index] = true;
}
void NotifyBlend0() {
- flags[OpenGL::Dirty::BlendStates] = true;
- flags[OpenGL::Dirty::BlendState0] = true;
+ (*flags)[OpenGL::Dirty::BlendStates] = true;
+ (*flags)[OpenGL::Dirty::BlendState0] = true;
}
void NotifyFramebuffer() {
- flags[VideoCommon::Dirty::RenderTargets] = true;
+ (*flags)[VideoCommon::Dirty::RenderTargets] = true;
}
void NotifyFrontFace() {
- flags[OpenGL::Dirty::FrontFace] = true;
+ (*flags)[OpenGL::Dirty::FrontFace] = true;
}
void NotifyCullTest() {
- flags[OpenGL::Dirty::CullTest] = true;
+ (*flags)[OpenGL::Dirty::CullTest] = true;
}
void NotifyDepthMask() {
- flags[OpenGL::Dirty::DepthMask] = true;
+ (*flags)[OpenGL::Dirty::DepthMask] = true;
}
void NotifyDepthTest() {
- flags[OpenGL::Dirty::DepthTest] = true;
+ (*flags)[OpenGL::Dirty::DepthTest] = true;
}
void NotifyStencilTest() {
- flags[OpenGL::Dirty::StencilTest] = true;
+ (*flags)[OpenGL::Dirty::StencilTest] = true;
}
void NotifyPolygonOffset() {
- flags[OpenGL::Dirty::PolygonOffset] = true;
+ (*flags)[OpenGL::Dirty::PolygonOffset] = true;
}
void NotifyRasterizeEnable() {
- flags[OpenGL::Dirty::RasterizeEnable] = true;
+ (*flags)[OpenGL::Dirty::RasterizeEnable] = true;
}
void NotifyFramebufferSRGB() {
- flags[OpenGL::Dirty::FramebufferSRGB] = true;
+ (*flags)[OpenGL::Dirty::FramebufferSRGB] = true;
}
void NotifyLogicOp() {
- flags[OpenGL::Dirty::LogicOp] = true;
+ (*flags)[OpenGL::Dirty::LogicOp] = true;
}
void NotifyClipControl() {
- flags[OpenGL::Dirty::ClipControl] = true;
+ (*flags)[OpenGL::Dirty::ClipControl] = true;
}
void NotifyAlphaTest() {
- flags[OpenGL::Dirty::AlphaTest] = true;
+ (*flags)[OpenGL::Dirty::AlphaTest] = true;
}
+ void NotifyRange(u8 start, u8 end) {
+ for (auto flag = start; flag <= end; flag++) {
+ (*flags)[flag] = true;
+ }
+ }
+
+ void SetupTables(Tegra::Control::ChannelState& channel_state);
+
+ void ChangeChannel(Tegra::Control::ChannelState& channel_state);
+
+ void InvalidateState();
+
private:
- Tegra::Engines::Maxwell3D::DirtyState::Flags& flags;
+ Tegra::Engines::Maxwell3D::DirtyState::Flags* flags;
+ Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags{};
GLuint framebuffer = 0;
GLuint index_buffer = 0;
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h
index 9a72d0d6d..e14f9b2db 100644
--- a/src/video_core/renderer_opengl/maxwell_to_gl.h
+++ b/src/video_core/renderer_opengl/maxwell_to_gl.h
@@ -87,7 +87,7 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TAB
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB
{GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB
{GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM
- {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R4G4_UNORM
+ {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // G4R4_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB
@@ -99,6 +99,8 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TAB
{GL_COMPRESSED_RGBA_ASTC_6x6_KHR}, // ASTC_2D_6X6_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR}, // ASTC_2D_6X6_SRGB
{GL_COMPRESSED_RGBA_ASTC_10x6_KHR}, // ASTC_2D_10X6_UNORM
+ {GL_COMPRESSED_RGBA_ASTC_10x5_KHR}, // ASTC_2D_10X5_UNORM
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR}, // ASTC_2D_10X5_SRGB
{GL_COMPRESSED_RGBA_ASTC_10x10_KHR}, // ASTC_2D_10X10_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR}, // ASTC_2D_10X10_SRGB
{GL_COMPRESSED_RGBA_ASTC_12x12_KHR}, // ASTC_2D_12X12_UNORM
@@ -124,51 +126,60 @@ inline const FormatTuple& GetFormatTuple(VideoCore::Surface::PixelFormat pixel_f
inline GLenum VertexFormat(Maxwell::VertexAttribute attrib) {
switch (attrib.type) {
- case Maxwell::VertexAttribute::Type::UnsignedNorm:
- case Maxwell::VertexAttribute::Type::UnsignedScaled:
- case Maxwell::VertexAttribute::Type::UnsignedInt:
+ case Maxwell::VertexAttribute::Type::UnusedEnumDoNotUseBecauseItWillGoAway:
+ ASSERT_MSG(false, "Invalid vertex attribute type!");
+ break;
+ case Maxwell::VertexAttribute::Type::UNorm:
+ case Maxwell::VertexAttribute::Type::UScaled:
+ case Maxwell::VertexAttribute::Type::UInt:
switch (attrib.size) {
- case Maxwell::VertexAttribute::Size::Size_8:
- case Maxwell::VertexAttribute::Size::Size_8_8:
- case Maxwell::VertexAttribute::Size::Size_8_8_8:
- case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8:
+ case Maxwell::VertexAttribute::Size::Size_A8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8:
+ case Maxwell::VertexAttribute::Size::Size_G8_R8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8:
+ case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8:
return GL_UNSIGNED_BYTE;
- case Maxwell::VertexAttribute::Size::Size_16:
- case Maxwell::VertexAttribute::Size::Size_16_16:
- case Maxwell::VertexAttribute::Size::Size_16_16_16:
- case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16:
return GL_UNSIGNED_SHORT;
- case Maxwell::VertexAttribute::Size::Size_32:
- case Maxwell::VertexAttribute::Size::Size_32_32:
- case Maxwell::VertexAttribute::Size::Size_32_32_32:
- case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32:
return GL_UNSIGNED_INT;
- case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
+ case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10:
return GL_UNSIGNED_INT_2_10_10_10_REV;
default:
break;
}
break;
- case Maxwell::VertexAttribute::Type::SignedNorm:
- case Maxwell::VertexAttribute::Type::SignedScaled:
- case Maxwell::VertexAttribute::Type::SignedInt:
+ case Maxwell::VertexAttribute::Type::SNorm:
+ case Maxwell::VertexAttribute::Type::SScaled:
+ case Maxwell::VertexAttribute::Type::SInt:
switch (attrib.size) {
- case Maxwell::VertexAttribute::Size::Size_8:
- case Maxwell::VertexAttribute::Size::Size_8_8:
- case Maxwell::VertexAttribute::Size::Size_8_8_8:
- case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8:
+ case Maxwell::VertexAttribute::Size::Size_A8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8:
+ case Maxwell::VertexAttribute::Size::Size_G8_R8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8:
+ case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8:
return GL_BYTE;
- case Maxwell::VertexAttribute::Size::Size_16:
- case Maxwell::VertexAttribute::Size::Size_16_16:
- case Maxwell::VertexAttribute::Size::Size_16_16_16:
- case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16:
return GL_SHORT;
- case Maxwell::VertexAttribute::Size::Size_32:
- case Maxwell::VertexAttribute::Size::Size_32_32:
- case Maxwell::VertexAttribute::Size::Size_32_32_32:
- case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32:
return GL_INT;
- case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
+ case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10:
return GL_INT_2_10_10_10_REV;
default:
break;
@@ -176,17 +187,17 @@ inline GLenum VertexFormat(Maxwell::VertexAttribute attrib) {
break;
case Maxwell::VertexAttribute::Type::Float:
switch (attrib.size) {
- case Maxwell::VertexAttribute::Size::Size_16:
- case Maxwell::VertexAttribute::Size::Size_16_16:
- case Maxwell::VertexAttribute::Size::Size_16_16_16:
- case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16:
return GL_HALF_FLOAT;
- case Maxwell::VertexAttribute::Size::Size_32:
- case Maxwell::VertexAttribute::Size::Size_32_32:
- case Maxwell::VertexAttribute::Size::Size_32_32_32:
- case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32:
return GL_FLOAT;
- case Maxwell::VertexAttribute::Size::Size_11_11_10:
+ case Maxwell::VertexAttribute::Size::Size_B10_G11_R11:
return GL_UNSIGNED_INT_10F_11F_11F_REV;
default:
break;
@@ -333,20 +344,20 @@ inline GLenum DepthCompareFunc(Tegra::Texture::DepthCompareFunc func) {
inline GLenum BlendEquation(Maxwell::Blend::Equation equation) {
switch (equation) {
- case Maxwell::Blend::Equation::Add:
- case Maxwell::Blend::Equation::AddGL:
+ case Maxwell::Blend::Equation::Add_D3D:
+ case Maxwell::Blend::Equation::Add_GL:
return GL_FUNC_ADD;
- case Maxwell::Blend::Equation::Subtract:
- case Maxwell::Blend::Equation::SubtractGL:
+ case Maxwell::Blend::Equation::Subtract_D3D:
+ case Maxwell::Blend::Equation::Subtract_GL:
return GL_FUNC_SUBTRACT;
- case Maxwell::Blend::Equation::ReverseSubtract:
- case Maxwell::Blend::Equation::ReverseSubtractGL:
+ case Maxwell::Blend::Equation::ReverseSubtract_D3D:
+ case Maxwell::Blend::Equation::ReverseSubtract_GL:
return GL_FUNC_REVERSE_SUBTRACT;
- case Maxwell::Blend::Equation::Min:
- case Maxwell::Blend::Equation::MinGL:
+ case Maxwell::Blend::Equation::Min_D3D:
+ case Maxwell::Blend::Equation::Min_GL:
return GL_MIN;
- case Maxwell::Blend::Equation::Max:
- case Maxwell::Blend::Equation::MaxGL:
+ case Maxwell::Blend::Equation::Max_D3D:
+ case Maxwell::Blend::Equation::Max_GL:
return GL_MAX;
}
UNIMPLEMENTED_MSG("Unimplemented blend equation={}", equation);
@@ -355,62 +366,62 @@ inline GLenum BlendEquation(Maxwell::Blend::Equation equation) {
inline GLenum BlendFunc(Maxwell::Blend::Factor factor) {
switch (factor) {
- case Maxwell::Blend::Factor::Zero:
- case Maxwell::Blend::Factor::ZeroGL:
+ case Maxwell::Blend::Factor::Zero_D3D:
+ case Maxwell::Blend::Factor::Zero_GL:
return GL_ZERO;
- case Maxwell::Blend::Factor::One:
- case Maxwell::Blend::Factor::OneGL:
+ case Maxwell::Blend::Factor::One_D3D:
+ case Maxwell::Blend::Factor::One_GL:
return GL_ONE;
- case Maxwell::Blend::Factor::SourceColor:
- case Maxwell::Blend::Factor::SourceColorGL:
+ case Maxwell::Blend::Factor::SourceColor_D3D:
+ case Maxwell::Blend::Factor::SourceColor_GL:
return GL_SRC_COLOR;
- case Maxwell::Blend::Factor::OneMinusSourceColor:
- case Maxwell::Blend::Factor::OneMinusSourceColorGL:
+ case Maxwell::Blend::Factor::OneMinusSourceColor_D3D:
+ case Maxwell::Blend::Factor::OneMinusSourceColor_GL:
return GL_ONE_MINUS_SRC_COLOR;
- case Maxwell::Blend::Factor::SourceAlpha:
- case Maxwell::Blend::Factor::SourceAlphaGL:
+ case Maxwell::Blend::Factor::SourceAlpha_D3D:
+ case Maxwell::Blend::Factor::SourceAlpha_GL:
return GL_SRC_ALPHA;
- case Maxwell::Blend::Factor::OneMinusSourceAlpha:
- case Maxwell::Blend::Factor::OneMinusSourceAlphaGL:
+ case Maxwell::Blend::Factor::OneMinusSourceAlpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusSourceAlpha_GL:
return GL_ONE_MINUS_SRC_ALPHA;
- case Maxwell::Blend::Factor::DestAlpha:
- case Maxwell::Blend::Factor::DestAlphaGL:
+ case Maxwell::Blend::Factor::DestAlpha_D3D:
+ case Maxwell::Blend::Factor::DestAlpha_GL:
return GL_DST_ALPHA;
- case Maxwell::Blend::Factor::OneMinusDestAlpha:
- case Maxwell::Blend::Factor::OneMinusDestAlphaGL:
+ case Maxwell::Blend::Factor::OneMinusDestAlpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusDestAlpha_GL:
return GL_ONE_MINUS_DST_ALPHA;
- case Maxwell::Blend::Factor::DestColor:
- case Maxwell::Blend::Factor::DestColorGL:
+ case Maxwell::Blend::Factor::DestColor_D3D:
+ case Maxwell::Blend::Factor::DestColor_GL:
return GL_DST_COLOR;
- case Maxwell::Blend::Factor::OneMinusDestColor:
- case Maxwell::Blend::Factor::OneMinusDestColorGL:
+ case Maxwell::Blend::Factor::OneMinusDestColor_D3D:
+ case Maxwell::Blend::Factor::OneMinusDestColor_GL:
return GL_ONE_MINUS_DST_COLOR;
- case Maxwell::Blend::Factor::SourceAlphaSaturate:
- case Maxwell::Blend::Factor::SourceAlphaSaturateGL:
+ case Maxwell::Blend::Factor::SourceAlphaSaturate_D3D:
+ case Maxwell::Blend::Factor::SourceAlphaSaturate_GL:
return GL_SRC_ALPHA_SATURATE;
- case Maxwell::Blend::Factor::Source1Color:
- case Maxwell::Blend::Factor::Source1ColorGL:
+ case Maxwell::Blend::Factor::Source1Color_D3D:
+ case Maxwell::Blend::Factor::Source1Color_GL:
return GL_SRC1_COLOR;
- case Maxwell::Blend::Factor::OneMinusSource1Color:
- case Maxwell::Blend::Factor::OneMinusSource1ColorGL:
+ case Maxwell::Blend::Factor::OneMinusSource1Color_D3D:
+ case Maxwell::Blend::Factor::OneMinusSource1Color_GL:
return GL_ONE_MINUS_SRC1_COLOR;
- case Maxwell::Blend::Factor::Source1Alpha:
- case Maxwell::Blend::Factor::Source1AlphaGL:
+ case Maxwell::Blend::Factor::Source1Alpha_D3D:
+ case Maxwell::Blend::Factor::Source1Alpha_GL:
return GL_SRC1_ALPHA;
- case Maxwell::Blend::Factor::OneMinusSource1Alpha:
- case Maxwell::Blend::Factor::OneMinusSource1AlphaGL:
+ case Maxwell::Blend::Factor::OneMinusSource1Alpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusSource1Alpha_GL:
return GL_ONE_MINUS_SRC1_ALPHA;
- case Maxwell::Blend::Factor::ConstantColor:
- case Maxwell::Blend::Factor::ConstantColorGL:
+ case Maxwell::Blend::Factor::BlendFactor_D3D:
+ case Maxwell::Blend::Factor::ConstantColor_GL:
return GL_CONSTANT_COLOR;
- case Maxwell::Blend::Factor::OneMinusConstantColor:
- case Maxwell::Blend::Factor::OneMinusConstantColorGL:
+ case Maxwell::Blend::Factor::OneMinusBlendFactor_D3D:
+ case Maxwell::Blend::Factor::OneMinusConstantColor_GL:
return GL_ONE_MINUS_CONSTANT_COLOR;
- case Maxwell::Blend::Factor::ConstantAlpha:
- case Maxwell::Blend::Factor::ConstantAlphaGL:
+ case Maxwell::Blend::Factor::BothSourceAlpha_D3D:
+ case Maxwell::Blend::Factor::ConstantAlpha_GL:
return GL_CONSTANT_ALPHA;
- case Maxwell::Blend::Factor::OneMinusConstantAlpha:
- case Maxwell::Blend::Factor::OneMinusConstantAlphaGL:
+ case Maxwell::Blend::Factor::OneMinusBothSourceAlpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusConstantAlpha_GL:
return GL_ONE_MINUS_CONSTANT_ALPHA;
}
UNIMPLEMENTED_MSG("Unimplemented blend factor={}", factor);
@@ -419,60 +430,60 @@ inline GLenum BlendFunc(Maxwell::Blend::Factor factor) {
inline GLenum ComparisonOp(Maxwell::ComparisonOp comparison) {
switch (comparison) {
- case Maxwell::ComparisonOp::Never:
- case Maxwell::ComparisonOp::NeverOld:
+ case Maxwell::ComparisonOp::Never_D3D:
+ case Maxwell::ComparisonOp::Never_GL:
return GL_NEVER;
- case Maxwell::ComparisonOp::Less:
- case Maxwell::ComparisonOp::LessOld:
+ case Maxwell::ComparisonOp::Less_D3D:
+ case Maxwell::ComparisonOp::Less_GL:
return GL_LESS;
- case Maxwell::ComparisonOp::Equal:
- case Maxwell::ComparisonOp::EqualOld:
+ case Maxwell::ComparisonOp::Equal_D3D:
+ case Maxwell::ComparisonOp::Equal_GL:
return GL_EQUAL;
- case Maxwell::ComparisonOp::LessEqual:
- case Maxwell::ComparisonOp::LessEqualOld:
+ case Maxwell::ComparisonOp::LessEqual_D3D:
+ case Maxwell::ComparisonOp::LessEqual_GL:
return GL_LEQUAL;
- case Maxwell::ComparisonOp::Greater:
- case Maxwell::ComparisonOp::GreaterOld:
+ case Maxwell::ComparisonOp::Greater_D3D:
+ case Maxwell::ComparisonOp::Greater_GL:
return GL_GREATER;
- case Maxwell::ComparisonOp::NotEqual:
- case Maxwell::ComparisonOp::NotEqualOld:
+ case Maxwell::ComparisonOp::NotEqual_D3D:
+ case Maxwell::ComparisonOp::NotEqual_GL:
return GL_NOTEQUAL;
- case Maxwell::ComparisonOp::GreaterEqual:
- case Maxwell::ComparisonOp::GreaterEqualOld:
+ case Maxwell::ComparisonOp::GreaterEqual_D3D:
+ case Maxwell::ComparisonOp::GreaterEqual_GL:
return GL_GEQUAL;
- case Maxwell::ComparisonOp::Always:
- case Maxwell::ComparisonOp::AlwaysOld:
+ case Maxwell::ComparisonOp::Always_D3D:
+ case Maxwell::ComparisonOp::Always_GL:
return GL_ALWAYS;
}
UNIMPLEMENTED_MSG("Unimplemented comparison op={}", comparison);
return GL_ALWAYS;
}
-inline GLenum StencilOp(Maxwell::StencilOp stencil) {
+inline GLenum StencilOp(Maxwell::StencilOp::Op stencil) {
switch (stencil) {
- case Maxwell::StencilOp::Keep:
- case Maxwell::StencilOp::KeepOGL:
+ case Maxwell::StencilOp::Op::Keep_D3D:
+ case Maxwell::StencilOp::Op::Keep_GL:
return GL_KEEP;
- case Maxwell::StencilOp::Zero:
- case Maxwell::StencilOp::ZeroOGL:
+ case Maxwell::StencilOp::Op::Zero_D3D:
+ case Maxwell::StencilOp::Op::Zero_GL:
return GL_ZERO;
- case Maxwell::StencilOp::Replace:
- case Maxwell::StencilOp::ReplaceOGL:
+ case Maxwell::StencilOp::Op::Replace_D3D:
+ case Maxwell::StencilOp::Op::Replace_GL:
return GL_REPLACE;
- case Maxwell::StencilOp::Incr:
- case Maxwell::StencilOp::IncrOGL:
+ case Maxwell::StencilOp::Op::IncrSaturate_D3D:
+ case Maxwell::StencilOp::Op::IncrSaturate_GL:
return GL_INCR;
- case Maxwell::StencilOp::Decr:
- case Maxwell::StencilOp::DecrOGL:
+ case Maxwell::StencilOp::Op::DecrSaturate_D3D:
+ case Maxwell::StencilOp::Op::DecrSaturate_GL:
return GL_DECR;
- case Maxwell::StencilOp::Invert:
- case Maxwell::StencilOp::InvertOGL:
+ case Maxwell::StencilOp::Op::Invert_D3D:
+ case Maxwell::StencilOp::Op::Invert_GL:
return GL_INVERT;
- case Maxwell::StencilOp::IncrWrap:
- case Maxwell::StencilOp::IncrWrapOGL:
+ case Maxwell::StencilOp::Op::Incr_D3D:
+ case Maxwell::StencilOp::Op::Incr_GL:
return GL_INCR_WRAP;
- case Maxwell::StencilOp::DecrWrap:
- case Maxwell::StencilOp::DecrWrapOGL:
+ case Maxwell::StencilOp::Op::Decr_D3D:
+ case Maxwell::StencilOp::Op::Decr_GL:
return GL_DECR_WRAP;
}
UNIMPLEMENTED_MSG("Unimplemented stencil op={}", stencil);
@@ -503,39 +514,39 @@ inline GLenum CullFace(Maxwell::CullFace cull_face) {
return GL_BACK;
}
-inline GLenum LogicOp(Maxwell::LogicOperation operation) {
+inline GLenum LogicOp(Maxwell::LogicOp::Op operation) {
switch (operation) {
- case Maxwell::LogicOperation::Clear:
+ case Maxwell::LogicOp::Op::Clear:
return GL_CLEAR;
- case Maxwell::LogicOperation::And:
+ case Maxwell::LogicOp::Op::And:
return GL_AND;
- case Maxwell::LogicOperation::AndReverse:
+ case Maxwell::LogicOp::Op::AndReverse:
return GL_AND_REVERSE;
- case Maxwell::LogicOperation::Copy:
+ case Maxwell::LogicOp::Op::Copy:
return GL_COPY;
- case Maxwell::LogicOperation::AndInverted:
+ case Maxwell::LogicOp::Op::AndInverted:
return GL_AND_INVERTED;
- case Maxwell::LogicOperation::NoOp:
+ case Maxwell::LogicOp::Op::NoOp:
return GL_NOOP;
- case Maxwell::LogicOperation::Xor:
+ case Maxwell::LogicOp::Op::Xor:
return GL_XOR;
- case Maxwell::LogicOperation::Or:
+ case Maxwell::LogicOp::Op::Or:
return GL_OR;
- case Maxwell::LogicOperation::Nor:
+ case Maxwell::LogicOp::Op::Nor:
return GL_NOR;
- case Maxwell::LogicOperation::Equiv:
+ case Maxwell::LogicOp::Op::Equiv:
return GL_EQUIV;
- case Maxwell::LogicOperation::Invert:
+ case Maxwell::LogicOp::Op::Invert:
return GL_INVERT;
- case Maxwell::LogicOperation::OrReverse:
+ case Maxwell::LogicOp::Op::OrReverse:
return GL_OR_REVERSE;
- case Maxwell::LogicOperation::CopyInverted:
+ case Maxwell::LogicOp::Op::CopyInverted:
return GL_COPY_INVERTED;
- case Maxwell::LogicOperation::OrInverted:
+ case Maxwell::LogicOp::Op::OrInverted:
return GL_OR_INVERTED;
- case Maxwell::LogicOperation::Nand:
+ case Maxwell::LogicOp::Op::Nand:
return GL_NAND;
- case Maxwell::LogicOperation::Set:
+ case Maxwell::LogicOp::Op::Set:
return GL_SET;
}
UNIMPLEMENTED_MSG("Unimplemented logic operation={}", operation);
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index 34f3f7a67..8bd5eba7e 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -131,7 +131,7 @@ RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_,
Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
std::unique_ptr<Core::Frontend::GraphicsContext> context_)
: RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_},
- emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{gpu},
+ emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{},
program_manager{device},
rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) {
if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) {
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index 733b454de..f85ed8e5b 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -34,14 +34,15 @@ constexpr std::array POLYGON_OFFSET_ENABLE_LUT = {
};
void RefreshXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs) {
- std::ranges::transform(regs.tfb_layouts, state.layouts.begin(), [](const auto& layout) {
- return VideoCommon::TransformFeedbackState::Layout{
- .stream = layout.stream,
- .varying_count = layout.varying_count,
- .stride = layout.stride,
- };
- });
- state.varyings = regs.tfb_varying_locs;
+ std::ranges::transform(regs.transform_feedback.controls, state.layouts.begin(),
+ [](const auto& layout) {
+ return VideoCommon::TransformFeedbackState::Layout{
+ .stream = layout.stream,
+ .varying_count = layout.varying_count,
+ .stride = layout.stride,
+ };
+ });
+ state.varyings = regs.stream_out_layout;
}
} // Anonymous namespace
@@ -58,33 +59,38 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
raw1 = 0;
extended_dynamic_state.Assign(has_extended_dynamic_state ? 1 : 0);
dynamic_vertex_input.Assign(has_dynamic_vertex_input ? 1 : 0);
- xfb_enabled.Assign(regs.tfb_enabled != 0);
+ xfb_enabled.Assign(regs.transform_feedback_enabled != 0);
primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
- depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value());
+ depth_clamp_disabled.Assign(regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::Passthrough ||
+ regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::FrustumXYZ ||
+ regs.viewport_clip_control.geometry_clip ==
+ Maxwell::ViewportClipControl::GeometryClip::FrustumZ);
ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0);
polygon_mode.Assign(PackPolygonMode(regs.polygon_mode_front));
patch_control_points_minus_one.Assign(regs.patch_vertices - 1);
- tessellation_primitive.Assign(static_cast<u32>(regs.tess_mode.prim.Value()));
- tessellation_spacing.Assign(static_cast<u32>(regs.tess_mode.spacing.Value()));
- tessellation_clockwise.Assign(regs.tess_mode.cw.Value());
+ tessellation_primitive.Assign(static_cast<u32>(regs.tessellation.params.domain_type.Value()));
+ tessellation_spacing.Assign(static_cast<u32>(regs.tessellation.params.spacing.Value()));
+ tessellation_clockwise.Assign(regs.tessellation.params.output_primitives.Value() ==
+ Maxwell::Tessellation::OutputPrimitives::Triangles_CW);
logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0);
- logic_op.Assign(PackLogicOp(regs.logic_op.operation));
+ logic_op.Assign(PackLogicOp(regs.logic_op.op));
topology.Assign(regs.draw.topology);
- msaa_mode.Assign(regs.multisample_mode);
+ msaa_mode.Assign(regs.anti_alias_samples_mode);
raw2 = 0;
rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0);
const auto test_func =
- regs.alpha_test_enabled != 0 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always;
+ regs.alpha_test_enabled != 0 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always_GL;
alpha_test_func.Assign(PackComparisonOp(test_func));
- early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
+ early_z.Assign(regs.mandated_early_z != 0 ? 1 : 0);
depth_enabled.Assign(regs.zeta_enable != 0 ? 1 : 0);
depth_format.Assign(static_cast<u32>(regs.zeta.format));
- y_negate.Assign(regs.screen_y_control.y_negate != 0 ? 1 : 0);
- provoking_vertex_last.Assign(regs.provoking_vertex_last != 0 ? 1 : 0);
- conservative_raster_enable.Assign(regs.conservative_raster_enable != 0 ? 1 : 0);
- smooth_lines.Assign(regs.line_smooth_enable != 0 ? 1 : 0);
+ y_negate.Assign(regs.window_origin.mode != Maxwell::WindowOrigin::Mode::UpperLeft ? 1 : 0);
+ provoking_vertex_last.Assign(regs.provoking_vertex == Maxwell::ProvokingVertex::Last ? 1 : 0);
+ smooth_lines.Assign(regs.line_anti_alias_enable != 0 ? 1 : 0);
for (size_t i = 0; i < regs.rt.size(); ++i) {
color_formats[i] = static_cast<u8>(regs.rt[i].format);
@@ -116,8 +122,8 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
maxwell3d.dirty.flags[Dirty::VertexInput] = false;
enabled_divisors = 0;
for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
- const bool is_enabled = regs.instanced_arrays.IsInstancingEnabled(index);
- binding_divisors[index] = is_enabled ? regs.vertex_array[index].divisor : 0;
+ const bool is_enabled = regs.vertex_stream_instances.IsInstancingEnabled(index);
+ binding_divisors[index] = is_enabled ? regs.vertex_streams[index].frequency : 0;
enabled_divisors |= (is_enabled ? u64{1} : 0) << index;
}
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
@@ -164,17 +170,17 @@ void FixedPipelineState::BlendingAttachment::Refresh(const Maxwell& regs, size_t
// TODO: C++20 Use templated lambda to deduplicate code
- if (!regs.independent_blend_enable) {
- const auto& src = regs.blend;
- if (!src.enable[index]) {
+ if (!regs.blend_per_target_enabled) {
+ if (!regs.blend.enable[index]) {
return;
}
- equation_rgb.Assign(PackBlendEquation(src.equation_rgb));
- equation_a.Assign(PackBlendEquation(src.equation_a));
- factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb));
- factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb));
- factor_source_a.Assign(PackBlendFactor(src.factor_source_a));
- factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a));
+ const auto& src = regs.blend;
+ equation_rgb.Assign(PackBlendEquation(src.color_op));
+ equation_a.Assign(PackBlendEquation(src.alpha_op));
+ factor_source_rgb.Assign(PackBlendFactor(src.color_source));
+ factor_dest_rgb.Assign(PackBlendFactor(src.color_dest));
+ factor_source_a.Assign(PackBlendFactor(src.alpha_source));
+ factor_dest_a.Assign(PackBlendFactor(src.alpha_dest));
enable.Assign(1);
return;
}
@@ -182,34 +188,34 @@ void FixedPipelineState::BlendingAttachment::Refresh(const Maxwell& regs, size_t
if (!regs.blend.enable[index]) {
return;
}
- const auto& src = regs.independent_blend[index];
- equation_rgb.Assign(PackBlendEquation(src.equation_rgb));
- equation_a.Assign(PackBlendEquation(src.equation_a));
- factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb));
- factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb));
- factor_source_a.Assign(PackBlendFactor(src.factor_source_a));
- factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a));
+ const auto& src = regs.blend_per_target[index];
+ equation_rgb.Assign(PackBlendEquation(src.color_op));
+ equation_a.Assign(PackBlendEquation(src.alpha_op));
+ factor_source_rgb.Assign(PackBlendFactor(src.color_source));
+ factor_dest_rgb.Assign(PackBlendFactor(src.color_dest));
+ factor_source_a.Assign(PackBlendFactor(src.alpha_source));
+ factor_dest_a.Assign(PackBlendFactor(src.alpha_dest));
enable.Assign(1);
}
void FixedPipelineState::DynamicState::Refresh(const Maxwell& regs) {
- u32 packed_front_face = PackFrontFace(regs.front_face);
- if (regs.screen_y_control.triangle_rast_flip != 0) {
+ u32 packed_front_face = PackFrontFace(regs.gl_front_face);
+ if (regs.window_origin.flip_y != 0) {
// Flip front face
packed_front_face = 1 - packed_front_face;
}
raw1 = 0;
raw2 = 0;
- front.action_stencil_fail.Assign(PackStencilOp(regs.stencil_front_op_fail));
- front.action_depth_fail.Assign(PackStencilOp(regs.stencil_front_op_zfail));
- front.action_depth_pass.Assign(PackStencilOp(regs.stencil_front_op_zpass));
- front.test_func.Assign(PackComparisonOp(regs.stencil_front_func_func));
+ front.action_stencil_fail.Assign(PackStencilOp(regs.stencil_front_op.fail));
+ front.action_depth_fail.Assign(PackStencilOp(regs.stencil_front_op.zfail));
+ front.action_depth_pass.Assign(PackStencilOp(regs.stencil_front_op.zpass));
+ front.test_func.Assign(PackComparisonOp(regs.stencil_front_op.func));
if (regs.stencil_two_side_enable) {
- back.action_stencil_fail.Assign(PackStencilOp(regs.stencil_back_op_fail));
- back.action_depth_fail.Assign(PackStencilOp(regs.stencil_back_op_zfail));
- back.action_depth_pass.Assign(PackStencilOp(regs.stencil_back_op_zpass));
- back.test_func.Assign(PackComparisonOp(regs.stencil_back_func_func));
+ back.action_stencil_fail.Assign(PackStencilOp(regs.stencil_back_op.fail));
+ back.action_depth_fail.Assign(PackStencilOp(regs.stencil_back_op.zfail));
+ back.action_depth_pass.Assign(PackStencilOp(regs.stencil_back_op.zpass));
+ back.test_func.Assign(PackComparisonOp(regs.stencil_back_op.func));
} else {
back.action_stencil_fail.Assign(front.action_stencil_fail);
back.action_depth_fail.Assign(front.action_depth_fail);
@@ -222,9 +228,9 @@ void FixedPipelineState::DynamicState::Refresh(const Maxwell& regs) {
depth_test_enable.Assign(regs.depth_test_enable);
front_face.Assign(packed_front_face);
depth_test_func.Assign(PackComparisonOp(regs.depth_test_func));
- cull_face.Assign(PackCullFace(regs.cull_face));
- cull_enable.Assign(regs.cull_test_enabled != 0 ? 1 : 0);
- std::ranges::transform(regs.vertex_array, vertex_strides.begin(), [](const auto& array) {
+ cull_face.Assign(PackCullFace(regs.gl_cull_face));
+ cull_enable.Assign(regs.gl_cull_test_enabled != 0 ? 1 : 0);
+ std::ranges::transform(regs.vertex_streams, vertex_strides.begin(), [](const auto& array) {
return static_cast<u16>(array.stride.Value());
});
}
@@ -251,41 +257,42 @@ Maxwell::ComparisonOp FixedPipelineState::UnpackComparisonOp(u32 packed) noexcep
return static_cast<Maxwell::ComparisonOp>(packed + 1);
}
-u32 FixedPipelineState::PackStencilOp(Maxwell::StencilOp op) noexcept {
+u32 FixedPipelineState::PackStencilOp(Maxwell::StencilOp::Op op) noexcept {
switch (op) {
- case Maxwell::StencilOp::Keep:
- case Maxwell::StencilOp::KeepOGL:
+ case Maxwell::StencilOp::Op::Keep_D3D:
+ case Maxwell::StencilOp::Op::Keep_GL:
return 0;
- case Maxwell::StencilOp::Zero:
- case Maxwell::StencilOp::ZeroOGL:
+ case Maxwell::StencilOp::Op::Zero_D3D:
+ case Maxwell::StencilOp::Op::Zero_GL:
return 1;
- case Maxwell::StencilOp::Replace:
- case Maxwell::StencilOp::ReplaceOGL:
+ case Maxwell::StencilOp::Op::Replace_D3D:
+ case Maxwell::StencilOp::Op::Replace_GL:
return 2;
- case Maxwell::StencilOp::Incr:
- case Maxwell::StencilOp::IncrOGL:
+ case Maxwell::StencilOp::Op::IncrSaturate_D3D:
+ case Maxwell::StencilOp::Op::IncrSaturate_GL:
return 3;
- case Maxwell::StencilOp::Decr:
- case Maxwell::StencilOp::DecrOGL:
+ case Maxwell::StencilOp::Op::DecrSaturate_D3D:
+ case Maxwell::StencilOp::Op::DecrSaturate_GL:
return 4;
- case Maxwell::StencilOp::Invert:
- case Maxwell::StencilOp::InvertOGL:
+ case Maxwell::StencilOp::Op::Invert_D3D:
+ case Maxwell::StencilOp::Op::Invert_GL:
return 5;
- case Maxwell::StencilOp::IncrWrap:
- case Maxwell::StencilOp::IncrWrapOGL:
+ case Maxwell::StencilOp::Op::Incr_D3D:
+ case Maxwell::StencilOp::Op::Incr_GL:
return 6;
- case Maxwell::StencilOp::DecrWrap:
- case Maxwell::StencilOp::DecrWrapOGL:
+ case Maxwell::StencilOp::Op::Decr_D3D:
+ case Maxwell::StencilOp::Op::Decr_GL:
return 7;
}
return 0;
}
-Maxwell::StencilOp FixedPipelineState::UnpackStencilOp(u32 packed) noexcept {
- static constexpr std::array LUT = {Maxwell::StencilOp::Keep, Maxwell::StencilOp::Zero,
- Maxwell::StencilOp::Replace, Maxwell::StencilOp::Incr,
- Maxwell::StencilOp::Decr, Maxwell::StencilOp::Invert,
- Maxwell::StencilOp::IncrWrap, Maxwell::StencilOp::DecrWrap};
+Maxwell::StencilOp::Op FixedPipelineState::UnpackStencilOp(u32 packed) noexcept {
+ static constexpr std::array LUT = {
+ Maxwell::StencilOp::Op::Keep_D3D, Maxwell::StencilOp::Op::Zero_D3D,
+ Maxwell::StencilOp::Op::Replace_D3D, Maxwell::StencilOp::Op::IncrSaturate_D3D,
+ Maxwell::StencilOp::Op::DecrSaturate_D3D, Maxwell::StencilOp::Op::Invert_D3D,
+ Maxwell::StencilOp::Op::Incr_D3D, Maxwell::StencilOp::Op::Decr_D3D};
return LUT[packed];
}
@@ -318,30 +325,30 @@ Maxwell::PolygonMode FixedPipelineState::UnpackPolygonMode(u32 packed) noexcept
return static_cast<Maxwell::PolygonMode>(packed + 0x1B00);
}
-u32 FixedPipelineState::PackLogicOp(Maxwell::LogicOperation op) noexcept {
+u32 FixedPipelineState::PackLogicOp(Maxwell::LogicOp::Op op) noexcept {
return static_cast<u32>(op) - 0x1500;
}
-Maxwell::LogicOperation FixedPipelineState::UnpackLogicOp(u32 packed) noexcept {
- return static_cast<Maxwell::LogicOperation>(packed + 0x1500);
+Maxwell::LogicOp::Op FixedPipelineState::UnpackLogicOp(u32 packed) noexcept {
+ return static_cast<Maxwell::LogicOp::Op>(packed + 0x1500);
}
u32 FixedPipelineState::PackBlendEquation(Maxwell::Blend::Equation equation) noexcept {
switch (equation) {
- case Maxwell::Blend::Equation::Add:
- case Maxwell::Blend::Equation::AddGL:
+ case Maxwell::Blend::Equation::Add_D3D:
+ case Maxwell::Blend::Equation::Add_GL:
return 0;
- case Maxwell::Blend::Equation::Subtract:
- case Maxwell::Blend::Equation::SubtractGL:
+ case Maxwell::Blend::Equation::Subtract_D3D:
+ case Maxwell::Blend::Equation::Subtract_GL:
return 1;
- case Maxwell::Blend::Equation::ReverseSubtract:
- case Maxwell::Blend::Equation::ReverseSubtractGL:
+ case Maxwell::Blend::Equation::ReverseSubtract_D3D:
+ case Maxwell::Blend::Equation::ReverseSubtract_GL:
return 2;
- case Maxwell::Blend::Equation::Min:
- case Maxwell::Blend::Equation::MinGL:
+ case Maxwell::Blend::Equation::Min_D3D:
+ case Maxwell::Blend::Equation::Min_GL:
return 3;
- case Maxwell::Blend::Equation::Max:
- case Maxwell::Blend::Equation::MaxGL:
+ case Maxwell::Blend::Equation::Max_D3D:
+ case Maxwell::Blend::Equation::Max_GL:
return 4;
}
return 0;
@@ -349,97 +356,99 @@ u32 FixedPipelineState::PackBlendEquation(Maxwell::Blend::Equation equation) noe
Maxwell::Blend::Equation FixedPipelineState::UnpackBlendEquation(u32 packed) noexcept {
static constexpr std::array LUT = {
- Maxwell::Blend::Equation::Add, Maxwell::Blend::Equation::Subtract,
- Maxwell::Blend::Equation::ReverseSubtract, Maxwell::Blend::Equation::Min,
- Maxwell::Blend::Equation::Max};
+ Maxwell::Blend::Equation::Add_D3D, Maxwell::Blend::Equation::Subtract_D3D,
+ Maxwell::Blend::Equation::ReverseSubtract_D3D, Maxwell::Blend::Equation::Min_D3D,
+ Maxwell::Blend::Equation::Max_D3D};
return LUT[packed];
}
u32 FixedPipelineState::PackBlendFactor(Maxwell::Blend::Factor factor) noexcept {
switch (factor) {
- case Maxwell::Blend::Factor::Zero:
- case Maxwell::Blend::Factor::ZeroGL:
+ case Maxwell::Blend::Factor::Zero_D3D:
+ case Maxwell::Blend::Factor::Zero_GL:
return 0;
- case Maxwell::Blend::Factor::One:
- case Maxwell::Blend::Factor::OneGL:
+ case Maxwell::Blend::Factor::One_D3D:
+ case Maxwell::Blend::Factor::One_GL:
return 1;
- case Maxwell::Blend::Factor::SourceColor:
- case Maxwell::Blend::Factor::SourceColorGL:
+ case Maxwell::Blend::Factor::SourceColor_D3D:
+ case Maxwell::Blend::Factor::SourceColor_GL:
return 2;
- case Maxwell::Blend::Factor::OneMinusSourceColor:
- case Maxwell::Blend::Factor::OneMinusSourceColorGL:
+ case Maxwell::Blend::Factor::OneMinusSourceColor_D3D:
+ case Maxwell::Blend::Factor::OneMinusSourceColor_GL:
return 3;
- case Maxwell::Blend::Factor::SourceAlpha:
- case Maxwell::Blend::Factor::SourceAlphaGL:
+ case Maxwell::Blend::Factor::SourceAlpha_D3D:
+ case Maxwell::Blend::Factor::SourceAlpha_GL:
return 4;
- case Maxwell::Blend::Factor::OneMinusSourceAlpha:
- case Maxwell::Blend::Factor::OneMinusSourceAlphaGL:
+ case Maxwell::Blend::Factor::OneMinusSourceAlpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusSourceAlpha_GL:
return 5;
- case Maxwell::Blend::Factor::DestAlpha:
- case Maxwell::Blend::Factor::DestAlphaGL:
+ case Maxwell::Blend::Factor::DestAlpha_D3D:
+ case Maxwell::Blend::Factor::DestAlpha_GL:
return 6;
- case Maxwell::Blend::Factor::OneMinusDestAlpha:
- case Maxwell::Blend::Factor::OneMinusDestAlphaGL:
+ case Maxwell::Blend::Factor::OneMinusDestAlpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusDestAlpha_GL:
return 7;
- case Maxwell::Blend::Factor::DestColor:
- case Maxwell::Blend::Factor::DestColorGL:
+ case Maxwell::Blend::Factor::DestColor_D3D:
+ case Maxwell::Blend::Factor::DestColor_GL:
return 8;
- case Maxwell::Blend::Factor::OneMinusDestColor:
- case Maxwell::Blend::Factor::OneMinusDestColorGL:
+ case Maxwell::Blend::Factor::OneMinusDestColor_D3D:
+ case Maxwell::Blend::Factor::OneMinusDestColor_GL:
return 9;
- case Maxwell::Blend::Factor::SourceAlphaSaturate:
- case Maxwell::Blend::Factor::SourceAlphaSaturateGL:
+ case Maxwell::Blend::Factor::SourceAlphaSaturate_D3D:
+ case Maxwell::Blend::Factor::SourceAlphaSaturate_GL:
return 10;
- case Maxwell::Blend::Factor::Source1Color:
- case Maxwell::Blend::Factor::Source1ColorGL:
+ case Maxwell::Blend::Factor::Source1Color_D3D:
+ case Maxwell::Blend::Factor::Source1Color_GL:
return 11;
- case Maxwell::Blend::Factor::OneMinusSource1Color:
- case Maxwell::Blend::Factor::OneMinusSource1ColorGL:
+ case Maxwell::Blend::Factor::OneMinusSource1Color_D3D:
+ case Maxwell::Blend::Factor::OneMinusSource1Color_GL:
return 12;
- case Maxwell::Blend::Factor::Source1Alpha:
- case Maxwell::Blend::Factor::Source1AlphaGL:
+ case Maxwell::Blend::Factor::Source1Alpha_D3D:
+ case Maxwell::Blend::Factor::Source1Alpha_GL:
return 13;
- case Maxwell::Blend::Factor::OneMinusSource1Alpha:
- case Maxwell::Blend::Factor::OneMinusSource1AlphaGL:
+ case Maxwell::Blend::Factor::OneMinusSource1Alpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusSource1Alpha_GL:
return 14;
- case Maxwell::Blend::Factor::ConstantColor:
- case Maxwell::Blend::Factor::ConstantColorGL:
+ case Maxwell::Blend::Factor::BlendFactor_D3D:
+ case Maxwell::Blend::Factor::ConstantColor_GL:
return 15;
- case Maxwell::Blend::Factor::OneMinusConstantColor:
- case Maxwell::Blend::Factor::OneMinusConstantColorGL:
+ case Maxwell::Blend::Factor::OneMinusBlendFactor_D3D:
+ case Maxwell::Blend::Factor::OneMinusConstantColor_GL:
return 16;
- case Maxwell::Blend::Factor::ConstantAlpha:
- case Maxwell::Blend::Factor::ConstantAlphaGL:
+ case Maxwell::Blend::Factor::BothSourceAlpha_D3D:
+ case Maxwell::Blend::Factor::ConstantAlpha_GL:
return 17;
- case Maxwell::Blend::Factor::OneMinusConstantAlpha:
- case Maxwell::Blend::Factor::OneMinusConstantAlphaGL:
+ case Maxwell::Blend::Factor::OneMinusBothSourceAlpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusConstantAlpha_GL:
return 18;
}
+ UNIMPLEMENTED_MSG("Unknown blend factor {}", static_cast<u32>(factor));
return 0;
}
Maxwell::Blend::Factor FixedPipelineState::UnpackBlendFactor(u32 packed) noexcept {
static constexpr std::array LUT = {
- Maxwell::Blend::Factor::Zero,
- Maxwell::Blend::Factor::One,
- Maxwell::Blend::Factor::SourceColor,
- Maxwell::Blend::Factor::OneMinusSourceColor,
- Maxwell::Blend::Factor::SourceAlpha,
- Maxwell::Blend::Factor::OneMinusSourceAlpha,
- Maxwell::Blend::Factor::DestAlpha,
- Maxwell::Blend::Factor::OneMinusDestAlpha,
- Maxwell::Blend::Factor::DestColor,
- Maxwell::Blend::Factor::OneMinusDestColor,
- Maxwell::Blend::Factor::SourceAlphaSaturate,
- Maxwell::Blend::Factor::Source1Color,
- Maxwell::Blend::Factor::OneMinusSource1Color,
- Maxwell::Blend::Factor::Source1Alpha,
- Maxwell::Blend::Factor::OneMinusSource1Alpha,
- Maxwell::Blend::Factor::ConstantColor,
- Maxwell::Blend::Factor::OneMinusConstantColor,
- Maxwell::Blend::Factor::ConstantAlpha,
- Maxwell::Blend::Factor::OneMinusConstantAlpha,
+ Maxwell::Blend::Factor::Zero_D3D,
+ Maxwell::Blend::Factor::One_D3D,
+ Maxwell::Blend::Factor::SourceColor_D3D,
+ Maxwell::Blend::Factor::OneMinusSourceColor_D3D,
+ Maxwell::Blend::Factor::SourceAlpha_D3D,
+ Maxwell::Blend::Factor::OneMinusSourceAlpha_D3D,
+ Maxwell::Blend::Factor::DestAlpha_D3D,
+ Maxwell::Blend::Factor::OneMinusDestAlpha_D3D,
+ Maxwell::Blend::Factor::DestColor_D3D,
+ Maxwell::Blend::Factor::OneMinusDestColor_D3D,
+ Maxwell::Blend::Factor::SourceAlphaSaturate_D3D,
+ Maxwell::Blend::Factor::Source1Color_D3D,
+ Maxwell::Blend::Factor::OneMinusSource1Color_D3D,
+ Maxwell::Blend::Factor::Source1Alpha_D3D,
+ Maxwell::Blend::Factor::OneMinusSource1Alpha_D3D,
+ Maxwell::Blend::Factor::BlendFactor_D3D,
+ Maxwell::Blend::Factor::OneMinusBlendFactor_D3D,
+ Maxwell::Blend::Factor::BothSourceAlpha_D3D,
+ Maxwell::Blend::Factor::OneMinusBothSourceAlpha_D3D,
};
+ ASSERT(packed < LUT.size());
return LUT[packed];
}
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index 9d60756e5..43441209c 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -21,8 +21,8 @@ struct FixedPipelineState {
static u32 PackComparisonOp(Maxwell::ComparisonOp op) noexcept;
static Maxwell::ComparisonOp UnpackComparisonOp(u32 packed) noexcept;
- static u32 PackStencilOp(Maxwell::StencilOp op) noexcept;
- static Maxwell::StencilOp UnpackStencilOp(u32 packed) noexcept;
+ static u32 PackStencilOp(Maxwell::StencilOp::Op op) noexcept;
+ static Maxwell::StencilOp::Op UnpackStencilOp(u32 packed) noexcept;
static u32 PackCullFace(Maxwell::CullFace cull) noexcept;
static Maxwell::CullFace UnpackCullFace(u32 packed) noexcept;
@@ -33,8 +33,8 @@ struct FixedPipelineState {
static u32 PackPolygonMode(Maxwell::PolygonMode mode) noexcept;
static Maxwell::PolygonMode UnpackPolygonMode(u32 packed) noexcept;
- static u32 PackLogicOp(Maxwell::LogicOperation op) noexcept;
- static Maxwell::LogicOperation UnpackLogicOp(u32 packed) noexcept;
+ static u32 PackLogicOp(Maxwell::LogicOp::Op op) noexcept;
+ static Maxwell::LogicOp::Op UnpackLogicOp(u32 packed) noexcept;
static u32 PackBlendEquation(Maxwell::Blend::Equation equation) noexcept;
static Maxwell::Blend::Equation UnpackBlendEquation(u32 packed) noexcept;
@@ -113,15 +113,15 @@ struct FixedPipelineState {
BitField<Position + 6, 3, u32> action_depth_pass;
BitField<Position + 9, 3, u32> test_func;
- Maxwell::StencilOp ActionStencilFail() const noexcept {
+ Maxwell::StencilOp::Op ActionStencilFail() const noexcept {
return UnpackStencilOp(action_stencil_fail);
}
- Maxwell::StencilOp ActionDepthFail() const noexcept {
+ Maxwell::StencilOp::Op ActionDepthFail() const noexcept {
return UnpackStencilOp(action_depth_fail);
}
- Maxwell::StencilOp ActionDepthPass() const noexcept {
+ Maxwell::StencilOp::Op ActionDepthPass() const noexcept {
return UnpackStencilOp(action_depth_pass);
}
@@ -193,7 +193,6 @@ struct FixedPipelineState {
BitField<6, 5, u32> depth_format;
BitField<11, 1, u32> y_negate;
BitField<12, 1, u32> provoking_vertex_last;
- BitField<13, 1, u32> conservative_raster_enable;
BitField<14, 1, u32> smooth_lines;
};
std::array<u8, Maxwell::NumRenderTargets> color_formats;
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index bdb71dc53..5c156087b 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -184,7 +184,7 @@ struct FormatTuple {
{VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB
{VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB
{VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // A4B4G4R4_UNORM
- {VK_FORMAT_R4G4_UNORM_PACK8}, // R4G4_UNORM
+ {VK_FORMAT_R4G4_UNORM_PACK8}, // G4R4_UNORM
{VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB
{VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB
{VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB
@@ -196,6 +196,8 @@ struct FormatTuple {
{VK_FORMAT_ASTC_6x6_UNORM_BLOCK}, // ASTC_2D_6X6_UNORM
{VK_FORMAT_ASTC_6x6_SRGB_BLOCK}, // ASTC_2D_6X6_SRGB
{VK_FORMAT_ASTC_10x6_UNORM_BLOCK}, // ASTC_2D_10X6_UNORM
+ {VK_FORMAT_ASTC_10x5_UNORM_BLOCK}, // ASTC_2D_10X5_UNORM
+ {VK_FORMAT_ASTC_10x5_SRGB_BLOCK}, // ASTC_2D_10X5_SRGB
{VK_FORMAT_ASTC_10x10_UNORM_BLOCK}, // ASTC_2D_10X10_UNORM
{VK_FORMAT_ASTC_10x10_SRGB_BLOCK}, // ASTC_2D_10X10_SRGB
{VK_FORMAT_ASTC_12x12_UNORM_BLOCK}, // ASTC_2D_12X12_UNORM
@@ -321,161 +323,182 @@ VkFormat VertexFormat(const Device& device, Maxwell::VertexAttribute::Type type,
Maxwell::VertexAttribute::Size size) {
const VkFormat format{([&]() {
switch (type) {
- case Maxwell::VertexAttribute::Type::UnsignedNorm:
+ case Maxwell::VertexAttribute::Type::UnusedEnumDoNotUseBecauseItWillGoAway:
+ ASSERT_MSG(false, "Invalid vertex attribute type!");
+ break;
+ case Maxwell::VertexAttribute::Type::UNorm:
switch (size) {
- case Maxwell::VertexAttribute::Size::Size_8:
+ case Maxwell::VertexAttribute::Size::Size_R8:
+ case Maxwell::VertexAttribute::Size::Size_A8:
return VK_FORMAT_R8_UNORM;
- case Maxwell::VertexAttribute::Size::Size_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8:
+ case Maxwell::VertexAttribute::Size::Size_G8_R8:
return VK_FORMAT_R8G8_UNORM;
- case Maxwell::VertexAttribute::Size::Size_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8:
return VK_FORMAT_R8G8B8_UNORM;
- case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8:
+ case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8:
return VK_FORMAT_R8G8B8A8_UNORM;
- case Maxwell::VertexAttribute::Size::Size_16:
+ case Maxwell::VertexAttribute::Size::Size_R16:
return VK_FORMAT_R16_UNORM;
- case Maxwell::VertexAttribute::Size::Size_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16:
return VK_FORMAT_R16G16_UNORM;
- case Maxwell::VertexAttribute::Size::Size_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16:
return VK_FORMAT_R16G16B16_UNORM;
- case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16:
return VK_FORMAT_R16G16B16A16_UNORM;
- case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
+ case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10:
return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
default:
break;
}
break;
- case Maxwell::VertexAttribute::Type::SignedNorm:
+ case Maxwell::VertexAttribute::Type::SNorm:
switch (size) {
- case Maxwell::VertexAttribute::Size::Size_8:
+ case Maxwell::VertexAttribute::Size::Size_R8:
+ case Maxwell::VertexAttribute::Size::Size_A8:
return VK_FORMAT_R8_SNORM;
- case Maxwell::VertexAttribute::Size::Size_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8:
+ case Maxwell::VertexAttribute::Size::Size_G8_R8:
return VK_FORMAT_R8G8_SNORM;
- case Maxwell::VertexAttribute::Size::Size_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8:
return VK_FORMAT_R8G8B8_SNORM;
- case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8:
+ case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8:
return VK_FORMAT_R8G8B8A8_SNORM;
- case Maxwell::VertexAttribute::Size::Size_16:
+ case Maxwell::VertexAttribute::Size::Size_R16:
return VK_FORMAT_R16_SNORM;
- case Maxwell::VertexAttribute::Size::Size_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16:
return VK_FORMAT_R16G16_SNORM;
- case Maxwell::VertexAttribute::Size::Size_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16:
return VK_FORMAT_R16G16B16_SNORM;
- case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16:
return VK_FORMAT_R16G16B16A16_SNORM;
- case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
+ case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10:
return VK_FORMAT_A2B10G10R10_SNORM_PACK32;
default:
break;
}
break;
- case Maxwell::VertexAttribute::Type::UnsignedScaled:
+ case Maxwell::VertexAttribute::Type::UScaled:
switch (size) {
- case Maxwell::VertexAttribute::Size::Size_8:
+ case Maxwell::VertexAttribute::Size::Size_R8:
+ case Maxwell::VertexAttribute::Size::Size_A8:
return VK_FORMAT_R8_USCALED;
- case Maxwell::VertexAttribute::Size::Size_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8:
+ case Maxwell::VertexAttribute::Size::Size_G8_R8:
return VK_FORMAT_R8G8_USCALED;
- case Maxwell::VertexAttribute::Size::Size_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8:
return VK_FORMAT_R8G8B8_USCALED;
- case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8:
+ case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8:
return VK_FORMAT_R8G8B8A8_USCALED;
- case Maxwell::VertexAttribute::Size::Size_16:
+ case Maxwell::VertexAttribute::Size::Size_R16:
return VK_FORMAT_R16_USCALED;
- case Maxwell::VertexAttribute::Size::Size_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16:
return VK_FORMAT_R16G16_USCALED;
- case Maxwell::VertexAttribute::Size::Size_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16:
return VK_FORMAT_R16G16B16_USCALED;
- case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16:
return VK_FORMAT_R16G16B16A16_USCALED;
- case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
+ case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10:
return VK_FORMAT_A2B10G10R10_USCALED_PACK32;
default:
break;
}
break;
- case Maxwell::VertexAttribute::Type::SignedScaled:
+ case Maxwell::VertexAttribute::Type::SScaled:
switch (size) {
- case Maxwell::VertexAttribute::Size::Size_8:
+ case Maxwell::VertexAttribute::Size::Size_R8:
+ case Maxwell::VertexAttribute::Size::Size_A8:
return VK_FORMAT_R8_SSCALED;
- case Maxwell::VertexAttribute::Size::Size_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8:
+ case Maxwell::VertexAttribute::Size::Size_G8_R8:
return VK_FORMAT_R8G8_SSCALED;
- case Maxwell::VertexAttribute::Size::Size_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8:
return VK_FORMAT_R8G8B8_SSCALED;
- case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8:
+ case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8:
return VK_FORMAT_R8G8B8A8_SSCALED;
- case Maxwell::VertexAttribute::Size::Size_16:
+ case Maxwell::VertexAttribute::Size::Size_R16:
return VK_FORMAT_R16_SSCALED;
- case Maxwell::VertexAttribute::Size::Size_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16:
return VK_FORMAT_R16G16_SSCALED;
- case Maxwell::VertexAttribute::Size::Size_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16:
return VK_FORMAT_R16G16B16_SSCALED;
- case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16:
return VK_FORMAT_R16G16B16A16_SSCALED;
- case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
+ case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10:
return VK_FORMAT_A2B10G10R10_SSCALED_PACK32;
default:
break;
}
break;
- case Maxwell::VertexAttribute::Type::UnsignedInt:
+ case Maxwell::VertexAttribute::Type::UInt:
switch (size) {
- case Maxwell::VertexAttribute::Size::Size_8:
+ case Maxwell::VertexAttribute::Size::Size_R8:
+ case Maxwell::VertexAttribute::Size::Size_A8:
return VK_FORMAT_R8_UINT;
- case Maxwell::VertexAttribute::Size::Size_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8:
+ case Maxwell::VertexAttribute::Size::Size_G8_R8:
return VK_FORMAT_R8G8_UINT;
- case Maxwell::VertexAttribute::Size::Size_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8:
return VK_FORMAT_R8G8B8_UINT;
- case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8:
+ case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8:
return VK_FORMAT_R8G8B8A8_UINT;
- case Maxwell::VertexAttribute::Size::Size_16:
+ case Maxwell::VertexAttribute::Size::Size_R16:
return VK_FORMAT_R16_UINT;
- case Maxwell::VertexAttribute::Size::Size_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16:
return VK_FORMAT_R16G16_UINT;
- case Maxwell::VertexAttribute::Size::Size_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16:
return VK_FORMAT_R16G16B16_UINT;
- case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16:
return VK_FORMAT_R16G16B16A16_UINT;
- case Maxwell::VertexAttribute::Size::Size_32:
+ case Maxwell::VertexAttribute::Size::Size_R32:
return VK_FORMAT_R32_UINT;
- case Maxwell::VertexAttribute::Size::Size_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32:
return VK_FORMAT_R32G32_UINT;
- case Maxwell::VertexAttribute::Size::Size_32_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32:
return VK_FORMAT_R32G32B32_UINT;
- case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32:
return VK_FORMAT_R32G32B32A32_UINT;
- case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
+ case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10:
return VK_FORMAT_A2B10G10R10_UINT_PACK32;
default:
break;
}
break;
- case Maxwell::VertexAttribute::Type::SignedInt:
+ case Maxwell::VertexAttribute::Type::SInt:
switch (size) {
- case Maxwell::VertexAttribute::Size::Size_8:
+ case Maxwell::VertexAttribute::Size::Size_R8:
+ case Maxwell::VertexAttribute::Size::Size_A8:
return VK_FORMAT_R8_SINT;
- case Maxwell::VertexAttribute::Size::Size_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8:
+ case Maxwell::VertexAttribute::Size::Size_G8_R8:
return VK_FORMAT_R8G8_SINT;
- case Maxwell::VertexAttribute::Size::Size_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8:
return VK_FORMAT_R8G8B8_SINT;
- case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
+ case Maxwell::VertexAttribute::Size::Size_R8_G8_B8_A8:
+ case Maxwell::VertexAttribute::Size::Size_X8_B8_G8_R8:
return VK_FORMAT_R8G8B8A8_SINT;
- case Maxwell::VertexAttribute::Size::Size_16:
+ case Maxwell::VertexAttribute::Size::Size_R16:
return VK_FORMAT_R16_SINT;
- case Maxwell::VertexAttribute::Size::Size_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16:
return VK_FORMAT_R16G16_SINT;
- case Maxwell::VertexAttribute::Size::Size_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16:
return VK_FORMAT_R16G16B16_SINT;
- case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16:
return VK_FORMAT_R16G16B16A16_SINT;
- case Maxwell::VertexAttribute::Size::Size_32:
+ case Maxwell::VertexAttribute::Size::Size_R32:
return VK_FORMAT_R32_SINT;
- case Maxwell::VertexAttribute::Size::Size_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32:
return VK_FORMAT_R32G32_SINT;
- case Maxwell::VertexAttribute::Size::Size_32_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32:
return VK_FORMAT_R32G32B32_SINT;
- case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32:
return VK_FORMAT_R32G32B32A32_SINT;
- case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
+ case Maxwell::VertexAttribute::Size::Size_A2_B10_G10_R10:
return VK_FORMAT_A2B10G10R10_SINT_PACK32;
default:
break;
@@ -483,23 +506,23 @@ VkFormat VertexFormat(const Device& device, Maxwell::VertexAttribute::Type type,
break;
case Maxwell::VertexAttribute::Type::Float:
switch (size) {
- case Maxwell::VertexAttribute::Size::Size_16:
+ case Maxwell::VertexAttribute::Size::Size_R16:
return VK_FORMAT_R16_SFLOAT;
- case Maxwell::VertexAttribute::Size::Size_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16:
return VK_FORMAT_R16G16_SFLOAT;
- case Maxwell::VertexAttribute::Size::Size_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16:
return VK_FORMAT_R16G16B16_SFLOAT;
- case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_R16_G16_B16_A16:
return VK_FORMAT_R16G16B16A16_SFLOAT;
- case Maxwell::VertexAttribute::Size::Size_32:
+ case Maxwell::VertexAttribute::Size::Size_R32:
return VK_FORMAT_R32_SFLOAT;
- case Maxwell::VertexAttribute::Size::Size_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32:
return VK_FORMAT_R32G32_SFLOAT;
- case Maxwell::VertexAttribute::Size::Size_32_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32:
return VK_FORMAT_R32G32B32_SFLOAT;
- case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
+ case Maxwell::VertexAttribute::Size::Size_R32_G32_B32_A32:
return VK_FORMAT_R32G32B32A32_SFLOAT;
- case Maxwell::VertexAttribute::Size::Size_11_11_10:
+ case Maxwell::VertexAttribute::Size::Size_B10_G11_R11:
return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
default:
break;
@@ -519,29 +542,29 @@ VkFormat VertexFormat(const Device& device, Maxwell::VertexAttribute::Type type,
VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison) {
switch (comparison) {
- case Maxwell::ComparisonOp::Never:
- case Maxwell::ComparisonOp::NeverOld:
+ case Maxwell::ComparisonOp::Never_D3D:
+ case Maxwell::ComparisonOp::Never_GL:
return VK_COMPARE_OP_NEVER;
- case Maxwell::ComparisonOp::Less:
- case Maxwell::ComparisonOp::LessOld:
+ case Maxwell::ComparisonOp::Less_D3D:
+ case Maxwell::ComparisonOp::Less_GL:
return VK_COMPARE_OP_LESS;
- case Maxwell::ComparisonOp::Equal:
- case Maxwell::ComparisonOp::EqualOld:
+ case Maxwell::ComparisonOp::Equal_D3D:
+ case Maxwell::ComparisonOp::Equal_GL:
return VK_COMPARE_OP_EQUAL;
- case Maxwell::ComparisonOp::LessEqual:
- case Maxwell::ComparisonOp::LessEqualOld:
+ case Maxwell::ComparisonOp::LessEqual_D3D:
+ case Maxwell::ComparisonOp::LessEqual_GL:
return VK_COMPARE_OP_LESS_OR_EQUAL;
- case Maxwell::ComparisonOp::Greater:
- case Maxwell::ComparisonOp::GreaterOld:
+ case Maxwell::ComparisonOp::Greater_D3D:
+ case Maxwell::ComparisonOp::Greater_GL:
return VK_COMPARE_OP_GREATER;
- case Maxwell::ComparisonOp::NotEqual:
- case Maxwell::ComparisonOp::NotEqualOld:
+ case Maxwell::ComparisonOp::NotEqual_D3D:
+ case Maxwell::ComparisonOp::NotEqual_GL:
return VK_COMPARE_OP_NOT_EQUAL;
- case Maxwell::ComparisonOp::GreaterEqual:
- case Maxwell::ComparisonOp::GreaterEqualOld:
+ case Maxwell::ComparisonOp::GreaterEqual_D3D:
+ case Maxwell::ComparisonOp::GreaterEqual_GL:
return VK_COMPARE_OP_GREATER_OR_EQUAL;
- case Maxwell::ComparisonOp::Always:
- case Maxwell::ComparisonOp::AlwaysOld:
+ case Maxwell::ComparisonOp::Always_D3D:
+ case Maxwell::ComparisonOp::Always_GL:
return VK_COMPARE_OP_ALWAYS;
}
UNIMPLEMENTED_MSG("Unimplemented comparison op={}", comparison);
@@ -561,31 +584,31 @@ VkIndexType IndexFormat(Maxwell::IndexFormat index_format) {
return {};
}
-VkStencilOp StencilOp(Maxwell::StencilOp stencil_op) {
+VkStencilOp StencilOp(Maxwell::StencilOp::Op stencil_op) {
switch (stencil_op) {
- case Maxwell::StencilOp::Keep:
- case Maxwell::StencilOp::KeepOGL:
+ case Maxwell::StencilOp::Op::Keep_D3D:
+ case Maxwell::StencilOp::Op::Keep_GL:
return VK_STENCIL_OP_KEEP;
- case Maxwell::StencilOp::Zero:
- case Maxwell::StencilOp::ZeroOGL:
+ case Maxwell::StencilOp::Op::Zero_D3D:
+ case Maxwell::StencilOp::Op::Zero_GL:
return VK_STENCIL_OP_ZERO;
- case Maxwell::StencilOp::Replace:
- case Maxwell::StencilOp::ReplaceOGL:
+ case Maxwell::StencilOp::Op::Replace_D3D:
+ case Maxwell::StencilOp::Op::Replace_GL:
return VK_STENCIL_OP_REPLACE;
- case Maxwell::StencilOp::Incr:
- case Maxwell::StencilOp::IncrOGL:
+ case Maxwell::StencilOp::Op::IncrSaturate_D3D:
+ case Maxwell::StencilOp::Op::IncrSaturate_GL:
return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
- case Maxwell::StencilOp::Decr:
- case Maxwell::StencilOp::DecrOGL:
+ case Maxwell::StencilOp::Op::DecrSaturate_D3D:
+ case Maxwell::StencilOp::Op::DecrSaturate_GL:
return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
- case Maxwell::StencilOp::Invert:
- case Maxwell::StencilOp::InvertOGL:
+ case Maxwell::StencilOp::Op::Invert_D3D:
+ case Maxwell::StencilOp::Op::Invert_GL:
return VK_STENCIL_OP_INVERT;
- case Maxwell::StencilOp::IncrWrap:
- case Maxwell::StencilOp::IncrWrapOGL:
+ case Maxwell::StencilOp::Op::Incr_D3D:
+ case Maxwell::StencilOp::Op::Incr_GL:
return VK_STENCIL_OP_INCREMENT_AND_WRAP;
- case Maxwell::StencilOp::DecrWrap:
- case Maxwell::StencilOp::DecrWrapOGL:
+ case Maxwell::StencilOp::Op::Decr_D3D:
+ case Maxwell::StencilOp::Op::Decr_GL:
return VK_STENCIL_OP_DECREMENT_AND_WRAP;
}
UNIMPLEMENTED_MSG("Unimplemented stencil op={}", stencil_op);
@@ -594,20 +617,20 @@ VkStencilOp StencilOp(Maxwell::StencilOp stencil_op) {
VkBlendOp BlendEquation(Maxwell::Blend::Equation equation) {
switch (equation) {
- case Maxwell::Blend::Equation::Add:
- case Maxwell::Blend::Equation::AddGL:
+ case Maxwell::Blend::Equation::Add_D3D:
+ case Maxwell::Blend::Equation::Add_GL:
return VK_BLEND_OP_ADD;
- case Maxwell::Blend::Equation::Subtract:
- case Maxwell::Blend::Equation::SubtractGL:
+ case Maxwell::Blend::Equation::Subtract_D3D:
+ case Maxwell::Blend::Equation::Subtract_GL:
return VK_BLEND_OP_SUBTRACT;
- case Maxwell::Blend::Equation::ReverseSubtract:
- case Maxwell::Blend::Equation::ReverseSubtractGL:
+ case Maxwell::Blend::Equation::ReverseSubtract_D3D:
+ case Maxwell::Blend::Equation::ReverseSubtract_GL:
return VK_BLEND_OP_REVERSE_SUBTRACT;
- case Maxwell::Blend::Equation::Min:
- case Maxwell::Blend::Equation::MinGL:
+ case Maxwell::Blend::Equation::Min_D3D:
+ case Maxwell::Blend::Equation::Min_GL:
return VK_BLEND_OP_MIN;
- case Maxwell::Blend::Equation::Max:
- case Maxwell::Blend::Equation::MaxGL:
+ case Maxwell::Blend::Equation::Max_D3D:
+ case Maxwell::Blend::Equation::Max_GL:
return VK_BLEND_OP_MAX;
}
UNIMPLEMENTED_MSG("Unimplemented blend equation={}", equation);
@@ -616,62 +639,62 @@ VkBlendOp BlendEquation(Maxwell::Blend::Equation equation) {
VkBlendFactor BlendFactor(Maxwell::Blend::Factor factor) {
switch (factor) {
- case Maxwell::Blend::Factor::Zero:
- case Maxwell::Blend::Factor::ZeroGL:
+ case Maxwell::Blend::Factor::Zero_D3D:
+ case Maxwell::Blend::Factor::Zero_GL:
return VK_BLEND_FACTOR_ZERO;
- case Maxwell::Blend::Factor::One:
- case Maxwell::Blend::Factor::OneGL:
+ case Maxwell::Blend::Factor::One_D3D:
+ case Maxwell::Blend::Factor::One_GL:
return VK_BLEND_FACTOR_ONE;
- case Maxwell::Blend::Factor::SourceColor:
- case Maxwell::Blend::Factor::SourceColorGL:
+ case Maxwell::Blend::Factor::SourceColor_D3D:
+ case Maxwell::Blend::Factor::SourceColor_GL:
return VK_BLEND_FACTOR_SRC_COLOR;
- case Maxwell::Blend::Factor::OneMinusSourceColor:
- case Maxwell::Blend::Factor::OneMinusSourceColorGL:
+ case Maxwell::Blend::Factor::OneMinusSourceColor_D3D:
+ case Maxwell::Blend::Factor::OneMinusSourceColor_GL:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
- case Maxwell::Blend::Factor::SourceAlpha:
- case Maxwell::Blend::Factor::SourceAlphaGL:
+ case Maxwell::Blend::Factor::SourceAlpha_D3D:
+ case Maxwell::Blend::Factor::SourceAlpha_GL:
return VK_BLEND_FACTOR_SRC_ALPHA;
- case Maxwell::Blend::Factor::OneMinusSourceAlpha:
- case Maxwell::Blend::Factor::OneMinusSourceAlphaGL:
+ case Maxwell::Blend::Factor::OneMinusSourceAlpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusSourceAlpha_GL:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
- case Maxwell::Blend::Factor::DestAlpha:
- case Maxwell::Blend::Factor::DestAlphaGL:
+ case Maxwell::Blend::Factor::DestAlpha_D3D:
+ case Maxwell::Blend::Factor::DestAlpha_GL:
return VK_BLEND_FACTOR_DST_ALPHA;
- case Maxwell::Blend::Factor::OneMinusDestAlpha:
- case Maxwell::Blend::Factor::OneMinusDestAlphaGL:
+ case Maxwell::Blend::Factor::OneMinusDestAlpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusDestAlpha_GL:
return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
- case Maxwell::Blend::Factor::DestColor:
- case Maxwell::Blend::Factor::DestColorGL:
+ case Maxwell::Blend::Factor::DestColor_D3D:
+ case Maxwell::Blend::Factor::DestColor_GL:
return VK_BLEND_FACTOR_DST_COLOR;
- case Maxwell::Blend::Factor::OneMinusDestColor:
- case Maxwell::Blend::Factor::OneMinusDestColorGL:
+ case Maxwell::Blend::Factor::OneMinusDestColor_D3D:
+ case Maxwell::Blend::Factor::OneMinusDestColor_GL:
return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
- case Maxwell::Blend::Factor::SourceAlphaSaturate:
- case Maxwell::Blend::Factor::SourceAlphaSaturateGL:
+ case Maxwell::Blend::Factor::SourceAlphaSaturate_D3D:
+ case Maxwell::Blend::Factor::SourceAlphaSaturate_GL:
return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
- case Maxwell::Blend::Factor::Source1Color:
- case Maxwell::Blend::Factor::Source1ColorGL:
+ case Maxwell::Blend::Factor::Source1Color_D3D:
+ case Maxwell::Blend::Factor::Source1Color_GL:
return VK_BLEND_FACTOR_SRC1_COLOR;
- case Maxwell::Blend::Factor::OneMinusSource1Color:
- case Maxwell::Blend::Factor::OneMinusSource1ColorGL:
+ case Maxwell::Blend::Factor::OneMinusSource1Color_D3D:
+ case Maxwell::Blend::Factor::OneMinusSource1Color_GL:
return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR;
- case Maxwell::Blend::Factor::Source1Alpha:
- case Maxwell::Blend::Factor::Source1AlphaGL:
+ case Maxwell::Blend::Factor::Source1Alpha_D3D:
+ case Maxwell::Blend::Factor::Source1Alpha_GL:
return VK_BLEND_FACTOR_SRC1_ALPHA;
- case Maxwell::Blend::Factor::OneMinusSource1Alpha:
- case Maxwell::Blend::Factor::OneMinusSource1AlphaGL:
+ case Maxwell::Blend::Factor::OneMinusSource1Alpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusSource1Alpha_GL:
return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
- case Maxwell::Blend::Factor::ConstantColor:
- case Maxwell::Blend::Factor::ConstantColorGL:
+ case Maxwell::Blend::Factor::BlendFactor_D3D:
+ case Maxwell::Blend::Factor::ConstantColor_GL:
return VK_BLEND_FACTOR_CONSTANT_COLOR;
- case Maxwell::Blend::Factor::OneMinusConstantColor:
- case Maxwell::Blend::Factor::OneMinusConstantColorGL:
+ case Maxwell::Blend::Factor::OneMinusBlendFactor_D3D:
+ case Maxwell::Blend::Factor::OneMinusConstantColor_GL:
return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
- case Maxwell::Blend::Factor::ConstantAlpha:
- case Maxwell::Blend::Factor::ConstantAlphaGL:
+ case Maxwell::Blend::Factor::BothSourceAlpha_D3D:
+ case Maxwell::Blend::Factor::ConstantAlpha_GL:
return VK_BLEND_FACTOR_CONSTANT_ALPHA;
- case Maxwell::Blend::Factor::OneMinusConstantAlpha:
- case Maxwell::Blend::Factor::OneMinusConstantAlphaGL:
+ case Maxwell::Blend::Factor::OneMinusBothSourceAlpha_D3D:
+ case Maxwell::Blend::Factor::OneMinusConstantAlpha_GL:
return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA;
}
UNIMPLEMENTED_MSG("Unimplemented blend factor={}", factor);
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h
index 356d46292..6f65502d6 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.h
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h
@@ -55,7 +55,7 @@ VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison);
VkIndexType IndexFormat(Maxwell::IndexFormat index_format);
-VkStencilOp StencilOp(Maxwell::StencilOp stencil_op);
+VkStencilOp StencilOp(Maxwell::StencilOp::Op stencil_op);
VkBlendOp BlendEquation(Maxwell::Blend::Equation equation);
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 7c78d0299..d8131232a 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -102,13 +102,13 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr),
surface(CreateSurface(instance, render_window)),
device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false),
- state_tracker(gpu), scheduler(device, state_tracker),
+ state_tracker(), scheduler(device, state_tracker),
swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width,
render_window.GetFramebufferLayout().height, false),
blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler,
screen_info),
- rasterizer(render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, device,
- memory_allocator, state_tracker, scheduler) {
+ rasterizer(render_window, gpu, cpu_memory, screen_info, device, memory_allocator,
+ state_tracker, scheduler) {
Report();
} catch (const vk::Exception& exception) {
LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what());
@@ -142,7 +142,7 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
const auto recreate_swapchain = [&] {
if (!has_been_recreated) {
has_been_recreated = true;
- scheduler.WaitWorker();
+ scheduler.Finish();
}
const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout();
swapchain.Create(layout.width, layout.height, is_srgb);
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 444c29f68..89426121f 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -145,6 +145,11 @@ VkSemaphore BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
// Finish any pending renderpass
scheduler.RequestOutsideRenderPassOperationContext();
+ if (const auto swapchain_images = swapchain.GetImageCount(); swapchain_images != image_count) {
+ image_count = swapchain_images;
+ Recreate();
+ }
+
const std::size_t image_index = swapchain.GetImageIndex();
scheduler.Wait(resource_ticks[image_index]);
@@ -448,15 +453,15 @@ vk::Framebuffer BlitScreen::CreateFramebuffer(const VkImageView& image_view, VkE
void BlitScreen::CreateStaticResources() {
CreateShaders();
+ CreateSampler();
+}
+
+void BlitScreen::CreateDynamicResources() {
CreateSemaphores();
CreateDescriptorPool();
CreateDescriptorSetLayout();
CreateDescriptorSets();
CreatePipelineLayout();
- CreateSampler();
-}
-
-void BlitScreen::CreateDynamicResources() {
CreateRenderPass();
CreateFramebuffers();
CreateGraphicsPipeline();
@@ -475,11 +480,15 @@ void BlitScreen::RefreshResources(const Tegra::FramebufferConfig& framebuffer) {
fsr.reset();
}
- if (framebuffer.width == raw_width && framebuffer.height == raw_height && !raw_images.empty()) {
+ if (framebuffer.width == raw_width && framebuffer.height == raw_height &&
+ framebuffer.pixel_format == pixel_format && !raw_images.empty()) {
return;
}
+
raw_width = framebuffer.width;
raw_height = framebuffer.height;
+ pixel_format = framebuffer.pixel_format;
+
ReleaseRawImages();
CreateStagingBuffer(framebuffer);
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index b8c67bef0..a2b73ec54 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -28,6 +28,10 @@ namespace VideoCore {
class RasterizerInterface;
}
+namespace Service::android {
+enum class PixelFormat : u32;
+}
+
namespace Vulkan {
struct ScreenInfo;
@@ -109,7 +113,7 @@ private:
MemoryAllocator& memory_allocator;
Swapchain& swapchain;
Scheduler& scheduler;
- const std::size_t image_count;
+ std::size_t image_count;
const ScreenInfo& screen_info;
vk::ShaderModule vertex_shader;
@@ -156,6 +160,7 @@ private:
u32 raw_width = 0;
u32 raw_height = 0;
+ Service::android::PixelFormat pixel_format{};
std::unique_ptr<FSR> fsr;
};
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index f17a5ccd6..241d7573e 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -26,8 +26,6 @@
namespace Vulkan {
-using Tegra::Texture::SWIZZLE_TABLE;
-
namespace {
constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 6447210e2..7906e11a8 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -126,8 +126,8 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
secondary_offset};
- const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
- const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ const u32 lhs_raw{gpu_memory.Read<u32>(addr) << desc.shift_left};
+ const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr) << desc.secondary_shift_left};
return TexturePair(lhs_raw | rhs_raw, via_header_index);
}
}
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
index c249b34d4..0214b103a 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
@@ -11,11 +11,8 @@
namespace Vulkan {
-InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_)
- : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {}
-
-InnerFence::InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_)
- : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {}
+InnerFence::InnerFence(Scheduler& scheduler_, bool is_stubbed_)
+ : FenceBase{is_stubbed_}, scheduler{scheduler_} {}
InnerFence::~InnerFence() = default;
@@ -48,12 +45,8 @@ FenceManager::FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::G
: GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_},
scheduler{scheduler_} {}
-Fence FenceManager::CreateFence(u32 value, bool is_stubbed) {
- return std::make_shared<InnerFence>(scheduler, value, is_stubbed);
-}
-
-Fence FenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
- return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed);
+Fence FenceManager::CreateFence(bool is_stubbed) {
+ return std::make_shared<InnerFence>(scheduler, is_stubbed);
}
void FenceManager::QueueFence(Fence& fence) {
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
index 7c0bbd80a..7fe2afcd9 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.h
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -25,8 +25,7 @@ class Scheduler;
class InnerFence : public VideoCommon::FenceBase {
public:
- explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_);
- explicit InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_);
+ explicit InnerFence(Scheduler& scheduler_, bool is_stubbed_);
~InnerFence();
void Queue();
@@ -50,8 +49,7 @@ public:
QueryCache& query_cache, const Device& device, Scheduler& scheduler);
protected:
- Fence CreateFence(u32 value, bool is_stubbed) override;
- Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
+ Fence CreateFence(bool is_stubbed) override;
void QueueFence(Fence& fence) override;
bool IsFenceSignaled(Fence& fence) const override;
void WaitFence(Fence& fence) override;
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 7eb623a7c..b4372a839 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -216,15 +216,14 @@ ConfigureFuncPtr ConfigureFunc(const std::array<vk::ShaderModule, NUM_STAGES>& m
} // Anonymous namespace
GraphicsPipeline::GraphicsPipeline(
- Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, Scheduler& scheduler_,
- BufferCache& buffer_cache_, TextureCache& texture_cache_,
+ Scheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_,
VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool,
UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread,
PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache,
const GraphicsPipelineCacheKey& key_, std::array<vk::ShaderModule, NUM_STAGES> stages,
const std::array<const Shader::Info*, NUM_STAGES>& infos)
- : key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, device{device_},
- texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, scheduler{scheduler_},
+ : key{key_}, device{device_}, texture_cache{texture_cache_},
+ buffer_cache{buffer_cache_}, scheduler{scheduler_},
update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} {
if (shader_notify) {
shader_notify->MarkShaderBuilding();
@@ -289,8 +288,8 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes);
- const auto& regs{maxwell3d.regs};
- const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
+ const auto& regs{maxwell3d->regs};
+ const bool via_header_index{regs.sampler_binding == Maxwell::SamplerBinding::ViaHeaderBinding};
const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
const Shader::Info& info{stage_infos[stage]};
buffer_cache.UnbindGraphicsStorageBuffers(stage);
@@ -303,7 +302,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
++ssbo_index;
}
}
- const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers};
+ const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers};
const auto read_handle{[&](const auto& desc, u32 index) {
ASSERT(cbufs[desc.cbuf_index].enabled);
const u32 index_offset{index << desc.size_shift};
@@ -316,13 +315,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
const u32 second_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address +
second_offset};
- const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
- const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
+ const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
+ << desc.secondary_shift_left};
const u32 raw{lhs_raw | rhs_raw};
return TexturePair(raw, via_header_index);
}
}
- return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
+ return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
}};
const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE {
for (u32 index = 0; index < desc.count; ++index) {
@@ -680,15 +680,6 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.lineStippleFactor = 0,
.lineStipplePattern = 0,
};
- VkPipelineRasterizationConservativeStateCreateInfoEXT conservative_raster{
- .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT,
- .pNext = nullptr,
- .flags = 0,
- .conservativeRasterizationMode = key.state.conservative_raster_enable != 0
- ? VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
- : VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT,
- .extraPrimitiveOverestimationSize = 0.0f,
- };
VkPipelineRasterizationProvokingVertexStateCreateInfoEXT provoking_vertex{
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT,
.pNext = nullptr,
@@ -699,9 +690,6 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
if (IsLine(input_assembly_topology) && device.IsExtLineRasterizationSupported()) {
line_state.pNext = std::exchange(rasterization_ci.pNext, &line_state);
}
- if (device.IsExtConservativeRasterizationSupported()) {
- conservative_raster.pNext = std::exchange(rasterization_ci.pNext, &conservative_raster);
- }
if (device.IsExtProvokingVertexSupported()) {
provoking_vertex.pNext = std::exchange(rasterization_ci.pNext, &provoking_vertex);
}
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index 8842f62d7..6bf577d25 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -70,15 +70,16 @@ class GraphicsPipeline {
static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
public:
- explicit GraphicsPipeline(
- Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
- Scheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache,
- VideoCore::ShaderNotify* shader_notify, const Device& device,
- DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue,
- Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics,
- RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key,
- std::array<vk::ShaderModule, NUM_STAGES> stages,
- const std::array<const Shader::Info*, NUM_STAGES>& infos);
+ explicit GraphicsPipeline(Scheduler& scheduler, BufferCache& buffer_cache,
+ TextureCache& texture_cache, VideoCore::ShaderNotify* shader_notify,
+ const Device& device, DescriptorPool& descriptor_pool,
+ UpdateDescriptorQueue& update_descriptor_queue,
+ Common::ThreadWorker* worker_thread,
+ PipelineStatistics* pipeline_statistics,
+ RenderPassCache& render_pass_cache,
+ const GraphicsPipelineCacheKey& key,
+ std::array<vk::ShaderModule, NUM_STAGES> stages,
+ const std::array<const Shader::Info*, NUM_STAGES>& infos);
GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete;
GraphicsPipeline(GraphicsPipeline&&) noexcept = delete;
@@ -110,6 +111,11 @@ public:
return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); };
}
+ void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) {
+ maxwell3d = maxwell3d_;
+ gpu_memory = gpu_memory_;
+ }
+
private:
template <typename Spec>
void ConfigureImpl(bool is_indexed);
@@ -122,8 +128,8 @@ private:
void Validate();
const GraphicsPipelineCacheKey key;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::MemoryManager& gpu_memory;
+ Tegra::Engines::Maxwell3D* maxwell3d;
+ Tegra::MemoryManager* gpu_memory;
const Device& device;
TextureCache& texture_cache;
BufferCache& buffer_cache;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 9708dc45e..13d5a1f67 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -62,29 +62,29 @@ auto MakeSpan(Container& container) {
Shader::CompareFunction MaxwellToCompareFunction(Maxwell::ComparisonOp comparison) {
switch (comparison) {
- case Maxwell::ComparisonOp::Never:
- case Maxwell::ComparisonOp::NeverOld:
+ case Maxwell::ComparisonOp::Never_D3D:
+ case Maxwell::ComparisonOp::Never_GL:
return Shader::CompareFunction::Never;
- case Maxwell::ComparisonOp::Less:
- case Maxwell::ComparisonOp::LessOld:
+ case Maxwell::ComparisonOp::Less_D3D:
+ case Maxwell::ComparisonOp::Less_GL:
return Shader::CompareFunction::Less;
- case Maxwell::ComparisonOp::Equal:
- case Maxwell::ComparisonOp::EqualOld:
+ case Maxwell::ComparisonOp::Equal_D3D:
+ case Maxwell::ComparisonOp::Equal_GL:
return Shader::CompareFunction::Equal;
- case Maxwell::ComparisonOp::LessEqual:
- case Maxwell::ComparisonOp::LessEqualOld:
+ case Maxwell::ComparisonOp::LessEqual_D3D:
+ case Maxwell::ComparisonOp::LessEqual_GL:
return Shader::CompareFunction::LessThanEqual;
- case Maxwell::ComparisonOp::Greater:
- case Maxwell::ComparisonOp::GreaterOld:
+ case Maxwell::ComparisonOp::Greater_D3D:
+ case Maxwell::ComparisonOp::Greater_GL:
return Shader::CompareFunction::Greater;
- case Maxwell::ComparisonOp::NotEqual:
- case Maxwell::ComparisonOp::NotEqualOld:
+ case Maxwell::ComparisonOp::NotEqual_D3D:
+ case Maxwell::ComparisonOp::NotEqual_GL:
return Shader::CompareFunction::NotEqual;
- case Maxwell::ComparisonOp::GreaterEqual:
- case Maxwell::ComparisonOp::GreaterEqualOld:
+ case Maxwell::ComparisonOp::GreaterEqual_D3D:
+ case Maxwell::ComparisonOp::GreaterEqual_GL:
return Shader::CompareFunction::GreaterThanEqual;
- case Maxwell::ComparisonOp::Always:
- case Maxwell::ComparisonOp::AlwaysOld:
+ case Maxwell::ComparisonOp::Always_D3D:
+ case Maxwell::ComparisonOp::Always_GL:
return Shader::CompareFunction::Always;
}
UNIMPLEMENTED_MSG("Unimplemented comparison op={}", comparison);
@@ -96,15 +96,18 @@ Shader::AttributeType CastAttributeType(const FixedPipelineState::VertexAttribut
return Shader::AttributeType::Disabled;
}
switch (attr.Type()) {
- case Maxwell::VertexAttribute::Type::SignedNorm:
- case Maxwell::VertexAttribute::Type::UnsignedNorm:
- case Maxwell::VertexAttribute::Type::UnsignedScaled:
- case Maxwell::VertexAttribute::Type::SignedScaled:
+ case Maxwell::VertexAttribute::Type::UnusedEnumDoNotUseBecauseItWillGoAway:
+ ASSERT_MSG(false, "Invalid vertex attribute type!");
+ return Shader::AttributeType::Disabled;
+ case Maxwell::VertexAttribute::Type::SNorm:
+ case Maxwell::VertexAttribute::Type::UNorm:
+ case Maxwell::VertexAttribute::Type::UScaled:
+ case Maxwell::VertexAttribute::Type::SScaled:
case Maxwell::VertexAttribute::Type::Float:
return Shader::AttributeType::Float;
- case Maxwell::VertexAttribute::Type::SignedInt:
+ case Maxwell::VertexAttribute::Type::SInt:
return Shader::AttributeType::SignedInt;
- case Maxwell::VertexAttribute::Type::UnsignedInt:
+ case Maxwell::VertexAttribute::Type::UInt:
return Shader::AttributeType::UnsignedInt;
}
return Shader::AttributeType::Float;
@@ -131,6 +134,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
Shader::RuntimeInfo info;
if (previous_program) {
info.previous_stage_stores = previous_program->info.stores;
+ info.previous_stage_legacy_stores_mapping = previous_program->info.legacy_stores_mapping;
if (previous_program->is_geometry_passthrough) {
info.previous_stage_stores.mask |= previous_program->info.passthrough.mask;
}
@@ -162,16 +166,14 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
}
break;
case Shader::Stage::TessellationEval:
- // We have to flip tessellation clockwise for some reason...
- info.tess_clockwise = key.state.tessellation_clockwise == 0;
info.tess_primitive = [&key] {
const u32 raw{key.state.tessellation_primitive.Value()};
- switch (static_cast<Maxwell::TessellationPrimitive>(raw)) {
- case Maxwell::TessellationPrimitive::Isolines:
+ switch (static_cast<Maxwell::Tessellation::DomainType>(raw)) {
+ case Maxwell::Tessellation::DomainType::Isolines:
return Shader::TessPrimitive::Isolines;
- case Maxwell::TessellationPrimitive::Triangles:
+ case Maxwell::Tessellation::DomainType::Triangles:
return Shader::TessPrimitive::Triangles;
- case Maxwell::TessellationPrimitive::Quads:
+ case Maxwell::Tessellation::DomainType::Quads:
return Shader::TessPrimitive::Quads;
}
ASSERT(false);
@@ -179,12 +181,12 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
}();
info.tess_spacing = [&] {
const u32 raw{key.state.tessellation_spacing};
- switch (static_cast<Maxwell::TessellationSpacing>(raw)) {
- case Maxwell::TessellationSpacing::Equal:
+ switch (static_cast<Maxwell::Tessellation::Spacing>(raw)) {
+ case Maxwell::Tessellation::Spacing::Integer:
return Shader::TessSpacing::Equal;
- case Maxwell::TessellationSpacing::FractionalOdd:
+ case Maxwell::Tessellation::Spacing::FractionalOdd:
return Shader::TessSpacing::FractionalOdd;
- case Maxwell::TessellationSpacing::FractionalEven:
+ case Maxwell::Tessellation::Spacing::FractionalEven:
return Shader::TessSpacing::FractionalEven;
}
ASSERT(false);
@@ -259,20 +261,18 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c
return std::memcmp(&rhs, this, Size()) == 0;
}
-PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, const Device& device_,
+PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_,
Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
UpdateDescriptorQueue& update_descriptor_queue_,
RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
- : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_},
- device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_},
- update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_},
- buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_},
+ : VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_},
+ descriptor_pool{descriptor_pool_}, update_descriptor_queue{update_descriptor_queue_},
+ render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_},
+ texture_cache{texture_cache_}, shader_notify{shader_notify_},
use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()},
- workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "yuzu:PipelineBuilder"),
- serialization_thread(1, "yuzu:PipelineSerialization") {
+ workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"),
+ serialization_thread(1, "VkPipelineSerialization") {
const auto& float_control{device.FloatControlProperties()};
const VkDriverIdKHR driver_id{device.GetDriverID()};
profile = Shader::Profile{
@@ -337,7 +337,7 @@ GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() {
current_pipeline = nullptr;
return nullptr;
}
- graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(),
+ graphics_key.state.Refresh(*maxwell3d, device.IsExtExtendedDynamicStateSupported(),
device.IsExtVertexInputDynamicStateSupported());
if (current_pipeline) {
@@ -357,7 +357,7 @@ ComputePipeline* PipelineCache::CurrentComputePipeline() {
if (!shader) {
return nullptr;
}
- const auto& qmd{kepler_compute.launch_description};
+ const auto& qmd{kepler_compute->launch_description};
const ComputePipelineCacheKey key{
.unique_hash = shader->unique_hash,
.shared_memory_size = qmd.shared_alloc,
@@ -486,13 +486,13 @@ GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const
}
// If something is using depth, we can assume that games are not rendering anything which
// will be used one time.
- if (maxwell3d.regs.zeta_enable) {
+ if (maxwell3d->regs.zeta_enable) {
return nullptr;
}
// If games are using a small index count, we can assume these are full screen quads.
// Usually these shaders are only used once for building textures so we can assume they
// can't be built async
- if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) {
+ if (maxwell3d->regs.index_buffer.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) {
return pipeline;
}
return nullptr;
@@ -557,10 +557,10 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
previous_stage = &program;
}
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
- return std::make_unique<GraphicsPipeline>(
- maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device,
- descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key,
- std::move(modules), infos);
+ return std::make_unique<GraphicsPipeline>(scheduler, buffer_cache, texture_cache,
+ &shader_notify, device, descriptor_pool,
+ update_descriptor_queue, thread_worker, statistics,
+ render_pass_cache, key, std::move(modules), infos);
} catch (const Shader::Exception& exception) {
LOG_ERROR(Render_Vulkan, "{}", exception.what());
@@ -592,9 +592,9 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() {
std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
const ComputePipelineCacheKey& key, const ShaderInfo* shader) {
- const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()};
- const auto& qmd{kepler_compute.launch_description};
- ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start};
+ const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
+ const auto& qmd{kepler_compute->launch_description};
+ ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
env.SetCachedSize(shader->size_bytes);
main_pools.ReleaseContents();
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 127957dbf..61f9e9366 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -100,10 +100,8 @@ struct ShaderPools {
class PipelineCache : public VideoCommon::ShaderCache {
public:
- explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d,
- Tegra::Engines::KeplerCompute& kepler_compute,
- Tegra::MemoryManager& gpu_memory, const Device& device,
- Scheduler& scheduler, DescriptorPool& descriptor_pool,
+ explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler,
+ DescriptorPool& descriptor_pool,
UpdateDescriptorQueue& update_descriptor_queue,
RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 2b859c6b8..4b15c0f85 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -59,16 +59,16 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
return query_pool == *pool;
});
- ASSERT(it != std::end(pools));
- const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
- usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
+ if (it != std::end(pools)) {
+ const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
+ usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
+ }
}
-QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
- const Device& device_, Scheduler& scheduler_)
- : QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_},
+QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
+ Scheduler& scheduler_)
+ : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_},
query_pools{
QueryPool{device_, scheduler_, QueryType::SamplesPassed},
} {}
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index b0d86c4f8..26762ee09 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -52,9 +52,8 @@ private:
class QueryCache final
: public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
public:
- explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
- const Device& device_, Scheduler& scheduler_);
+ explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
+ Scheduler& scheduler_);
~QueryCache();
std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index bf9f825f4..5af3c930b 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -11,6 +11,7 @@
#include "common/microprofile.h"
#include "common/scope_exit.h"
#include "common/settings.h"
+#include "video_core/control/channel_state.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/blit_image.h"
@@ -69,7 +70,7 @@ VkViewport GetViewportState(const Device& device, const Maxwell& regs, size_t in
const float width = conv(src.scale_x * 2.0f);
float y = conv(src.translate_y - src.scale_y);
float height = conv(src.scale_y * 2.0f);
- bool y_negate = regs.screen_y_control.y_negate;
+ bool y_negate = regs.window_origin.mode != Maxwell::WindowOrigin::Mode::UpperLeft;
if (!device.IsNvViewportSwizzleSupported()) {
y_negate = y_negate != (src.swizzle.y == Maxwell::ViewportSwizzle::NegativeY);
@@ -126,14 +127,13 @@ VkRect2D GetScissorState(const Maxwell& regs, size_t index, u32 up_scale = 1, u3
return scissor;
}
-DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instanced,
- bool is_indexed) {
+DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_indexed) {
DrawParams params{
- .base_instance = regs.vb_base_instance,
- .num_instances = is_instanced ? num_instances : 1,
- .base_vertex = is_indexed ? regs.vb_element_base : regs.vertex_buffer.first,
- .num_vertices = is_indexed ? regs.index_array.count : regs.vertex_buffer.count,
- .first_index = is_indexed ? regs.index_array.first : 0,
+ .base_instance = regs.global_base_instance_index,
+ .num_instances = num_instances,
+ .base_vertex = is_indexed ? regs.global_base_vertex_index : regs.vertex_buffer.first,
+ .num_vertices = is_indexed ? regs.index_buffer.count : regs.vertex_buffer.count,
+ .first_index = is_indexed ? regs.index_buffer.first : 0,
.is_indexed = is_indexed,
};
if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) {
@@ -148,31 +148,25 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan
} // Anonymous namespace
RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
- Tegra::MemoryManager& gpu_memory_,
Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
const Device& device_, MemoryAllocator& memory_allocator_,
StateTracker& state_tracker_, Scheduler& scheduler_)
- : RasterizerAccelerated{cpu_memory_}, gpu{gpu_},
- gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()},
- screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_},
- state_tracker{state_tracker_}, scheduler{scheduler_},
+ : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_},
+ memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_},
staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler),
update_descriptor_queue(device, scheduler),
blit_image(device, scheduler, state_tracker, descriptor_pool),
- astc_decoder_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue,
- memory_allocator),
render_pass_cache(device), texture_cache_runtime{device, scheduler,
memory_allocator, staging_pool,
- blit_image, astc_decoder_pass,
- render_pass_cache},
- texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
+ blit_image, render_pass_cache,
+ descriptor_pool, update_descriptor_queue},
+ texture_cache(texture_cache_runtime, *this),
buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
update_descriptor_queue, descriptor_pool),
- buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
- pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
- descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache,
- texture_cache, gpu.ShaderNotify()),
- query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache},
+ buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
+ pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue,
+ render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
+ query_cache{*this, device, scheduler}, accelerate_dma{buffer_cache},
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
wfi_event(device.GetLogical().CreateEvent()) {
scheduler.SetQueryCache(query_cache);
@@ -180,7 +174,7 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
RasterizerVulkan::~RasterizerVulkan() = default;
-void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
+void RasterizerVulkan::Draw(bool is_indexed, u32 instance_count) {
MICROPROFILE_SCOPE(Vulkan_Drawing);
SCOPE_EXIT({ gpu.TickWork(); });
@@ -193,15 +187,19 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
return;
}
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ // update engine as channel may be different.
+ pipeline->SetEngine(maxwell3d, gpu_memory);
pipeline->Configure(is_indexed);
+ BindInlineIndexBuffer();
+
BeginTransformFeedback();
UpdateDynamicStates();
- const auto& regs{maxwell3d.regs};
- const u32 num_instances{maxwell3d.mme_draw.instance_count};
- const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)};
+ const auto& regs{maxwell3d->regs};
+ const u32 num_instances{instance_count};
+ const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_indexed)};
scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
if (draw_params.is_indexed) {
cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances,
@@ -218,18 +216,18 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
void RasterizerVulkan::Clear() {
MICROPROFILE_SCOPE(Vulkan_Clearing);
- if (!maxwell3d.ShouldExecute()) {
+ if (!maxwell3d->ShouldExecute()) {
return;
}
FlushWork();
query_cache.UpdateCounters();
- auto& regs = maxwell3d.regs;
- const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
- regs.clear_buffers.A;
- const bool use_depth = regs.clear_buffers.Z;
- const bool use_stencil = regs.clear_buffers.S;
+ auto& regs = maxwell3d->regs;
+ const bool use_color = regs.clear_surface.R || regs.clear_surface.G || regs.clear_surface.B ||
+ regs.clear_surface.A;
+ const bool use_depth = regs.clear_surface.Z;
+ const bool use_stencil = regs.clear_surface.S;
if (!use_color && !use_depth && !use_stencil) {
return;
}
@@ -248,9 +246,16 @@ void RasterizerVulkan::Clear() {
}
UpdateViewportsState(regs);
+ VkRect2D default_scissor;
+ default_scissor.offset.x = 0;
+ default_scissor.offset.y = 0;
+ default_scissor.extent.width = std::numeric_limits<s32>::max();
+ default_scissor.extent.height = std::numeric_limits<s32>::max();
+
VkClearRect clear_rect{
- .rect = GetScissorState(regs, 0, up_scale, down_shift),
- .baseArrayLayer = regs.clear_buffers.layer,
+ .rect = regs.clear_control.use_scissor ? GetScissorState(regs, 0, up_scale, down_shift)
+ : default_scissor,
+ .baseArrayLayer = regs.clear_surface.layer,
.layerCount = 1,
};
if (clear_rect.rect.extent.width == 0 || clear_rect.rect.extent.height == 0) {
@@ -261,7 +266,7 @@ void RasterizerVulkan::Clear() {
.height = std::min(clear_rect.rect.extent.height, render_area.height),
};
- const u32 color_attachment = regs.clear_buffers.RT;
+ const u32 color_attachment = regs.clear_surface.RT;
if (use_color && framebuffer->HasAspectColorBit(color_attachment)) {
VkClearValue clear_value;
bool is_integer = false;
@@ -283,7 +288,8 @@ void RasterizerVulkan::Clear() {
break;
}
if (!is_integer) {
- std::memcpy(clear_value.color.float32, regs.clear_color, sizeof(regs.clear_color));
+ std::memcpy(clear_value.color.float32, regs.clear_color.data(),
+ regs.clear_color.size() * sizeof(f32));
} else if (!is_signed) {
for (size_t i = 0; i < 4; i++) {
clear_value.color.uint32[i] = static_cast<u32>(
@@ -297,14 +303,19 @@ void RasterizerVulkan::Clear() {
}
}
- scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) {
- const VkClearAttachment attachment{
- .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
- .colorAttachment = color_attachment,
- .clearValue = clear_value,
- };
- cmdbuf.ClearAttachments(attachment, clear_rect);
- });
+ if (regs.clear_surface.R && regs.clear_surface.G && regs.clear_surface.B &&
+ regs.clear_surface.A) {
+ scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) {
+ const VkClearAttachment attachment{
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .colorAttachment = color_attachment,
+ .clearValue = clear_value,
+ };
+ cmdbuf.ClearAttachments(attachment, clear_rect);
+ });
+ } else {
+ UNIMPLEMENTED_MSG("Unimplemented Clear only the specified channel");
+ }
}
if (!use_depth && !use_stencil) {
@@ -339,9 +350,9 @@ void RasterizerVulkan::DispatchCompute() {
return;
}
std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex};
- pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache);
+ pipeline->Configure(*kepler_compute, *gpu_memory, scheduler, buffer_cache, texture_cache);
- const auto& qmd{kepler_compute.launch_description};
+ const auto& qmd{kepler_compute->launch_description};
const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z};
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); });
@@ -422,7 +433,7 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
}
}
-void RasterizerVulkan::SyncGuestHost() {
+void RasterizerVulkan::InvalidateGPUCache() {
pipeline_cache.SyncGuestHost();
{
std::scoped_lock lock{buffer_cache.mutex};
@@ -442,40 +453,30 @@ void RasterizerVulkan::UnmapMemory(VAddr addr, u64 size) {
pipeline_cache.OnCPUWrite(addr, size);
}
-void RasterizerVulkan::ModifyGPUMemory(GPUVAddr addr, u64 size) {
+void RasterizerVulkan::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {
{
std::scoped_lock lock{texture_cache.mutex};
- texture_cache.UnmapGPUMemory(addr, size);
+ texture_cache.UnmapGPUMemory(as_id, addr, size);
}
}
-void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) {
- if (!gpu.IsAsync()) {
- gpu_memory.Write<u32>(addr, value);
- return;
- }
- fence_manager.SignalSemaphore(addr, value);
+void RasterizerVulkan::SignalFence(std::function<void()>&& func) {
+ fence_manager.SignalFence(std::move(func));
+}
+
+void RasterizerVulkan::SyncOperation(std::function<void()>&& func) {
+ fence_manager.SyncOperation(std::move(func));
}
void RasterizerVulkan::SignalSyncPoint(u32 value) {
- if (!gpu.IsAsync()) {
- gpu.IncrementSyncPoint(value);
- return;
- }
fence_manager.SignalSyncPoint(value);
}
void RasterizerVulkan::SignalReference() {
- if (!gpu.IsAsync()) {
- return;
- }
fence_manager.SignalOrdering();
}
void RasterizerVulkan::ReleaseFences() {
- if (!gpu.IsAsync()) {
- return;
- }
fence_manager.WaitPendingFences();
}
@@ -552,13 +553,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA()
}
void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
- std::span<u8> memory) {
- auto cpu_addr = gpu_memory.GpuToCpuAddress(address);
+ std::span<const u8> memory) {
+ auto cpu_addr = gpu_memory->GpuToCpuAddress(address);
if (!cpu_addr) [[unlikely]] {
- gpu_memory.WriteBlock(address, memory.data(), copy_size);
+ gpu_memory->WriteBlock(address, memory.data(), copy_size);
return;
}
- gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size);
+ gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size);
{
std::unique_lock<std::mutex> lock{buffer_cache.mutex};
if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
@@ -627,7 +628,7 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64
}
void RasterizerVulkan::UpdateDynamicStates() {
- auto& regs = maxwell3d.regs;
+ auto& regs = maxwell3d->regs;
UpdateViewportsState(regs);
UpdateScissorsState(regs);
UpdateDepthBias(regs);
@@ -651,24 +652,24 @@ void RasterizerVulkan::UpdateDynamicStates() {
}
void RasterizerVulkan::BeginTransformFeedback() {
- const auto& regs = maxwell3d.regs;
- if (regs.tfb_enabled == 0) {
+ const auto& regs = maxwell3d->regs;
+ if (regs.transform_feedback_enabled == 0) {
return;
}
if (!device.IsExtTransformFeedbackSupported()) {
LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported");
return;
}
- UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) ||
- regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) ||
- regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::Geometry));
+ UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) ||
+ regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation) ||
+ regs.IsShaderConfigEnabled(Maxwell::ShaderType::Geometry));
scheduler.Record(
[](vk::CommandBuffer cmdbuf) { cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr); });
}
void RasterizerVulkan::EndTransformFeedback() {
- const auto& regs = maxwell3d.regs;
- if (regs.tfb_enabled == 0) {
+ const auto& regs = maxwell3d->regs;
+ if (regs.transform_feedback_enabled == 0) {
return;
}
if (!device.IsExtTransformFeedbackSupported()) {
@@ -748,11 +749,11 @@ void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchDepthBias()) {
return;
}
- float units = regs.polygon_offset_units / 2.0f;
- const bool is_d24 = regs.zeta.format == Tegra::DepthFormat::S8_UINT_Z24_UNORM ||
- regs.zeta.format == Tegra::DepthFormat::D24X8_UNORM ||
- regs.zeta.format == Tegra::DepthFormat::D24S8_UNORM ||
- regs.zeta.format == Tegra::DepthFormat::D24C8_UNORM;
+ float units = regs.depth_bias / 2.0f;
+ const bool is_d24 = regs.zeta.format == Tegra::DepthFormat::Z24_UNORM_S8_UINT ||
+ regs.zeta.format == Tegra::DepthFormat::X8Z24_UNORM ||
+ regs.zeta.format == Tegra::DepthFormat::S8Z24_UNORM ||
+ regs.zeta.format == Tegra::DepthFormat::V8Z24_UNORM;
if (is_d24 && !device.SupportsD24DepthBuffer()) {
// the base formulas can be obtained from here:
// https://docs.microsoft.com/en-us/windows/win32/direct3d11/d3d10-graphics-programming-guide-output-merger-stage-depth-bias
@@ -760,8 +761,8 @@ void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) {
static_cast<double>(1ULL << (32 - 24)) / (static_cast<double>(0x1.ep+127));
units = static_cast<float>(static_cast<double>(units) * rescale_factor);
}
- scheduler.Record([constant = units, clamp = regs.polygon_offset_clamp,
- factor = regs.polygon_offset_factor](vk::CommandBuffer cmdbuf) {
+ scheduler.Record([constant = units, clamp = regs.depth_bias_clamp,
+ factor = regs.slope_scale_depth_bias](vk::CommandBuffer cmdbuf) {
cmdbuf.SetDepthBias(constant, clamp, factor);
});
}
@@ -791,8 +792,8 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
if (regs.stencil_two_side_enable) {
// Separate values per face
scheduler.Record(
- [front_ref = regs.stencil_front_func_ref, front_write_mask = regs.stencil_front_mask,
- front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_func_ref,
+ [front_ref = regs.stencil_front_ref, front_write_mask = regs.stencil_front_mask,
+ front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_ref,
back_write_mask = regs.stencil_back_mask,
back_test_mask = regs.stencil_back_func_mask](vk::CommandBuffer cmdbuf) {
// Front face
@@ -807,7 +808,7 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
});
} else {
// Front face defines both faces
- scheduler.Record([ref = regs.stencil_front_func_ref, write_mask = regs.stencil_front_mask,
+ scheduler.Record([ref = regs.stencil_front_ref, write_mask = regs.stencil_front_mask,
test_mask = regs.stencil_front_func_mask](vk::CommandBuffer cmdbuf) {
cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_AND_BACK, ref);
cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_AND_BACK, write_mask);
@@ -820,7 +821,8 @@ void RasterizerVulkan::UpdateLineWidth(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchLineWidth()) {
return;
}
- const float width = regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased;
+ const float width =
+ regs.line_anti_alias_enable ? regs.line_width_smooth : regs.line_width_aliased;
scheduler.Record([width](vk::CommandBuffer cmdbuf) { cmdbuf.SetLineWidth(width); });
}
@@ -828,10 +830,10 @@ void RasterizerVulkan::UpdateCullMode(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchCullMode()) {
return;
}
- scheduler.Record(
- [enabled = regs.cull_test_enabled, cull_face = regs.cull_face](vk::CommandBuffer cmdbuf) {
- cmdbuf.SetCullModeEXT(enabled ? MaxwellToVK::CullFace(cull_face) : VK_CULL_MODE_NONE);
- });
+ scheduler.Record([enabled = regs.gl_cull_test_enabled,
+ cull_face = regs.gl_cull_face](vk::CommandBuffer cmdbuf) {
+ cmdbuf.SetCullModeEXT(enabled ? MaxwellToVK::CullFace(cull_face) : VK_CULL_MODE_NONE);
+ });
}
void RasterizerVulkan::UpdateDepthBoundsTestEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
@@ -880,8 +882,8 @@ void RasterizerVulkan::UpdateFrontFace(Tegra::Engines::Maxwell3D::Regs& regs) {
return;
}
- VkFrontFace front_face = MaxwellToVK::FrontFace(regs.front_face);
- if (regs.screen_y_control.triangle_rast_flip != 0) {
+ VkFrontFace front_face = MaxwellToVK::FrontFace(regs.gl_front_face);
+ if (regs.window_origin.flip_y != 0) {
front_face = front_face == VK_FRONT_FACE_CLOCKWISE ? VK_FRONT_FACE_COUNTER_CLOCKWISE
: VK_FRONT_FACE_CLOCKWISE;
}
@@ -893,16 +895,16 @@ void RasterizerVulkan::UpdateStencilOp(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchStencilOp()) {
return;
}
- const Maxwell::StencilOp fail = regs.stencil_front_op_fail;
- const Maxwell::StencilOp zfail = regs.stencil_front_op_zfail;
- const Maxwell::StencilOp zpass = regs.stencil_front_op_zpass;
- const Maxwell::ComparisonOp compare = regs.stencil_front_func_func;
+ const Maxwell::StencilOp::Op fail = regs.stencil_front_op.fail;
+ const Maxwell::StencilOp::Op zfail = regs.stencil_front_op.zfail;
+ const Maxwell::StencilOp::Op zpass = regs.stencil_front_op.zpass;
+ const Maxwell::ComparisonOp compare = regs.stencil_front_op.func;
if (regs.stencil_two_side_enable) {
// Separate stencil op per face
- const Maxwell::StencilOp back_fail = regs.stencil_back_op_fail;
- const Maxwell::StencilOp back_zfail = regs.stencil_back_op_zfail;
- const Maxwell::StencilOp back_zpass = regs.stencil_back_op_zpass;
- const Maxwell::ComparisonOp back_compare = regs.stencil_back_func_func;
+ const Maxwell::StencilOp::Op back_fail = regs.stencil_back_op.fail;
+ const Maxwell::StencilOp::Op back_zfail = regs.stencil_back_op.zfail;
+ const Maxwell::StencilOp::Op back_zpass = regs.stencil_back_op.zpass;
+ const Maxwell::ComparisonOp back_compare = regs.stencil_back_op.func;
scheduler.Record([fail, zfail, zpass, compare, back_fail, back_zfail, back_zpass,
back_compare](vk::CommandBuffer cmdbuf) {
cmdbuf.SetStencilOpEXT(VK_STENCIL_FACE_FRONT_BIT, MaxwellToVK::StencilOp(fail),
@@ -933,7 +935,7 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs&
}
void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) {
- auto& dirty{maxwell3d.dirty.flags};
+ auto& dirty{maxwell3d->dirty.flags};
if (!dirty[Dirty::VertexInput]) {
return;
}
@@ -974,15 +976,15 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs)
dirty[Dirty::VertexBinding0 + index] = false;
const u32 binding{static_cast<u32>(index)};
- const auto& input_binding{regs.vertex_array[binding]};
- const bool is_instanced{regs.instanced_arrays.IsInstancingEnabled(binding)};
+ const auto& input_binding{regs.vertex_streams[binding]};
+ const bool is_instanced{regs.vertex_stream_instances.IsInstancingEnabled(binding)};
bindings.push_back({
.sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT,
.pNext = nullptr,
.binding = binding,
.stride = input_binding.stride,
.inputRate = is_instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX,
- .divisor = is_instanced ? input_binding.divisor : 1,
+ .divisor = is_instanced ? input_binding.frequency : 1,
});
}
scheduler.Record([bindings, attributes](vk::CommandBuffer cmdbuf) {
@@ -990,4 +992,54 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs)
});
}
+void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) {
+ CreateChannel(channel);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.CreateChannel(channel);
+ buffer_cache.CreateChannel(channel);
+ }
+ pipeline_cache.CreateChannel(channel);
+ query_cache.CreateChannel(channel);
+ state_tracker.SetupTables(channel);
+}
+
+void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
+ const s32 channel_id = channel.bind_id;
+ BindToChannel(channel_id);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.BindToChannel(channel_id);
+ buffer_cache.BindToChannel(channel_id);
+ }
+ pipeline_cache.BindToChannel(channel_id);
+ query_cache.BindToChannel(channel_id);
+ state_tracker.ChangeChannel(channel);
+ state_tracker.InvalidateState();
+}
+
+void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
+ EraseChannel(channel_id);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.EraseChannel(channel_id);
+ buffer_cache.EraseChannel(channel_id);
+ }
+ pipeline_cache.EraseChannel(channel_id);
+ query_cache.EraseChannel(channel_id);
+}
+
+void RasterizerVulkan::BindInlineIndexBuffer() {
+ if (maxwell3d->inline_index_draw_indexes.empty()) {
+ return;
+ }
+ const auto data_count = static_cast<u32>(maxwell3d->inline_index_draw_indexes.size());
+ auto buffer = buffer_cache_runtime.UploadStagingBuffer(data_count);
+ std::memcpy(buffer.mapped_span.data(), maxwell3d->inline_index_draw_indexes.data(), data_count);
+ buffer_cache_runtime.BindIndexBuffer(
+ maxwell3d->regs.draw.topology, maxwell3d->regs.index_buffer.format,
+ maxwell3d->regs.index_buffer.first, maxwell3d->regs.index_buffer.count, buffer.buffer,
+ static_cast<u32>(buffer.offset), data_count);
+}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 0370ea39b..b0bc306f5 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -8,6 +8,7 @@
#include <boost/container/static_vector.hpp>
#include "common/common_types.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/rasterizer_accelerated.h"
#include "video_core/rasterizer_interface.h"
@@ -54,16 +55,16 @@ private:
BufferCache& buffer_cache;
};
-class RasterizerVulkan final : public VideoCore::RasterizerAccelerated {
+class RasterizerVulkan final : public VideoCore::RasterizerAccelerated,
+ protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
public:
explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
- Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
- ScreenInfo& screen_info_, const Device& device_,
- MemoryAllocator& memory_allocator_, StateTracker& state_tracker_,
- Scheduler& scheduler_);
+ Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
+ const Device& device_, MemoryAllocator& memory_allocator_,
+ StateTracker& state_tracker_, Scheduler& scheduler_);
~RasterizerVulkan() override;
- void Draw(bool is_indexed, bool is_instanced) override;
+ void Draw(bool is_indexed, u32 instance_count) override;
void Clear() override;
void DispatchCompute() override;
void ResetCounter(VideoCore::QueryType type) override;
@@ -75,10 +76,11 @@ public:
bool MustFlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
void OnCPUWrite(VAddr addr, u64 size) override;
- void SyncGuestHost() override;
+ void InvalidateGPUCache() override;
void UnmapMemory(VAddr addr, u64 size) override;
- void ModifyGPUMemory(GPUVAddr addr, u64 size) override;
- void SignalSemaphore(GPUVAddr addr, u32 value) override;
+ void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
+ void SignalFence(std::function<void()>&& func) override;
+ void SyncOperation(std::function<void()>&& func) override;
void SignalSyncPoint(u32 value) override;
void SignalReference() override;
void ReleaseFences() override;
@@ -93,12 +95,18 @@ public:
const Tegra::Engines::Fermi2D::Config& copy_config) override;
Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
- std::span<u8> memory) override;
+ std::span<const u8> memory) override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const VideoCore::DiskResourceLoadCallback& callback) override;
+ void InitializeChannel(Tegra::Control::ChannelState& channel) override;
+
+ void BindChannel(Tegra::Control::ChannelState& channel) override;
+
+ void ReleaseChannel(s32 channel_id) override;
+
private:
static constexpr size_t MAX_TEXTURES = 192;
static constexpr size_t MAX_IMAGES = 48;
@@ -133,10 +141,9 @@ private:
void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
+ void BindInlineIndexBuffer();
+
Tegra::GPU& gpu;
- Tegra::MemoryManager& gpu_memory;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
ScreenInfo& screen_info;
const Device& device;
@@ -148,7 +155,6 @@ private:
DescriptorPool descriptor_pool;
UpdateDescriptorQueue update_descriptor_queue;
BlitImageHelper blit_image;
- ASTCDecoderPass astc_decoder_pass;
RenderPassCache render_pass_cache;
TextureCacheRuntime texture_cache_runtime;
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index a331ff37e..7934f2a51 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -136,9 +136,10 @@ bool Scheduler::UpdateRescaling(bool is_rescaling) {
}
void Scheduler::WorkerThread(std::stop_token stop_token) {
- Common::SetCurrentThreadName("yuzu:VulkanWorker");
+ Common::SetCurrentThreadName("VulkanWorker");
do {
std::unique_ptr<CommandChunk> work;
+ bool has_submit{false};
{
std::unique_lock lock{work_mutex};
if (work_queue.empty()) {
@@ -150,9 +151,10 @@ void Scheduler::WorkerThread(std::stop_token stop_token) {
}
work = std::move(work_queue.front());
work_queue.pop();
+
+ has_submit = work->HasSubmit();
+ work->ExecuteAll(current_cmdbuf);
}
- const bool has_submit = work->HasSubmit();
- work->ExecuteAll(current_cmdbuf);
if (has_submit) {
AllocateWorkerCommandBuffer();
}
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index c04aad08f..929216749 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -144,7 +144,6 @@ private:
using FuncType = TypedCommand<T>;
static_assert(sizeof(FuncType) < sizeof(data), "Lambda is too large");
- recorded_counts++;
command_offset = Common::AlignUp(command_offset, alignof(FuncType));
if (command_offset > sizeof(data) - sizeof(FuncType)) {
return false;
@@ -166,7 +165,7 @@ private:
}
bool Empty() const {
- return recorded_counts == 0;
+ return command_offset == 0;
}
bool HasSubmit() const {
@@ -177,7 +176,6 @@ private:
Command* first = nullptr;
Command* last = nullptr;
- size_t recorded_counts = 0;
size_t command_offset = 0;
bool submit = false;
alignas(std::max_align_t) std::array<u8, 0x8000> data{};
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index 9ad096431..b87c3be66 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -7,9 +7,9 @@
#include "common/common_types.h"
#include "core/core.h"
+#include "video_core/control/channel_state.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/gpu.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h"
#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name)
@@ -51,8 +51,8 @@ Flags MakeInvalidationFlags() {
void SetupDirtyViewports(Tables& tables) {
FillBlock(tables[0], OFF(viewport_transform), NUM(viewport_transform), Viewports);
FillBlock(tables[0], OFF(viewports), NUM(viewports), Viewports);
- tables[0][OFF(viewport_transform_enabled)] = Viewports;
- tables[1][OFF(screen_y_control)] = Viewports;
+ tables[0][OFF(viewport_scale_offset_enbled)] = Viewports;
+ tables[1][OFF(window_origin)] = Viewports;
}
void SetupDirtyScissors(Tables& tables) {
@@ -61,9 +61,9 @@ void SetupDirtyScissors(Tables& tables) {
void SetupDirtyDepthBias(Tables& tables) {
auto& table = tables[0];
- table[OFF(polygon_offset_units)] = DepthBias;
- table[OFF(polygon_offset_clamp)] = DepthBias;
- table[OFF(polygon_offset_factor)] = DepthBias;
+ table[OFF(depth_bias)] = DepthBias;
+ table[OFF(depth_bias_clamp)] = DepthBias;
+ table[OFF(slope_scale_depth_bias)] = DepthBias;
}
void SetupDirtyBlendConstants(Tables& tables) {
@@ -77,10 +77,10 @@ void SetupDirtyDepthBounds(Tables& tables) {
void SetupDirtyStencilProperties(Tables& tables) {
auto& table = tables[0];
table[OFF(stencil_two_side_enable)] = StencilProperties;
- table[OFF(stencil_front_func_ref)] = StencilProperties;
+ table[OFF(stencil_front_ref)] = StencilProperties;
table[OFF(stencil_front_mask)] = StencilProperties;
table[OFF(stencil_front_func_mask)] = StencilProperties;
- table[OFF(stencil_back_func_ref)] = StencilProperties;
+ table[OFF(stencil_back_ref)] = StencilProperties;
table[OFF(stencil_back_mask)] = StencilProperties;
table[OFF(stencil_back_func_mask)] = StencilProperties;
}
@@ -92,8 +92,8 @@ void SetupDirtyLineWidth(Tables& tables) {
void SetupDirtyCullMode(Tables& tables) {
auto& table = tables[0];
- table[OFF(cull_face)] = CullMode;
- table[OFF(cull_test_enabled)] = CullMode;
+ table[OFF(gl_cull_face)] = CullMode;
+ table[OFF(gl_cull_test_enabled)] = CullMode;
}
void SetupDirtyDepthBoundsEnable(Tables& tables) {
@@ -114,20 +114,20 @@ void SetupDirtyDepthCompareOp(Tables& tables) {
void SetupDirtyFrontFace(Tables& tables) {
auto& table = tables[0];
- table[OFF(front_face)] = FrontFace;
- table[OFF(screen_y_control)] = FrontFace;
+ table[OFF(gl_front_face)] = FrontFace;
+ table[OFF(window_origin)] = FrontFace;
}
void SetupDirtyStencilOp(Tables& tables) {
auto& table = tables[0];
- table[OFF(stencil_front_op_fail)] = StencilOp;
- table[OFF(stencil_front_op_zfail)] = StencilOp;
- table[OFF(stencil_front_op_zpass)] = StencilOp;
- table[OFF(stencil_front_func_func)] = StencilOp;
- table[OFF(stencil_back_op_fail)] = StencilOp;
- table[OFF(stencil_back_op_zfail)] = StencilOp;
- table[OFF(stencil_back_op_zpass)] = StencilOp;
- table[OFF(stencil_back_func_func)] = StencilOp;
+ table[OFF(stencil_front_op.fail)] = StencilOp;
+ table[OFF(stencil_front_op.zfail)] = StencilOp;
+ table[OFF(stencil_front_op.zpass)] = StencilOp;
+ table[OFF(stencil_front_op.func)] = StencilOp;
+ table[OFF(stencil_back_op.fail)] = StencilOp;
+ table[OFF(stencil_back_op.zfail)] = StencilOp;
+ table[OFF(stencil_back_op.zpass)] = StencilOp;
+ table[OFF(stencil_back_op.func)] = StencilOp;
// Table 0 is used by StencilProperties
tables[1][OFF(stencil_two_side_enable)] = StencilOp;
@@ -139,10 +139,10 @@ void SetupDirtyStencilTestEnable(Tables& tables) {
void SetupDirtyBlending(Tables& tables) {
tables[0][OFF(color_mask_common)] = Blending;
- tables[0][OFF(independent_blend_enable)] = Blending;
+ tables[0][OFF(blend_per_target_enabled)] = Blending;
FillBlock(tables[0], OFF(color_mask), NUM(color_mask), Blending);
FillBlock(tables[0], OFF(blend), NUM(blend), Blending);
- FillBlock(tables[0], OFF(independent_blend), NUM(independent_blend), Blending);
+ FillBlock(tables[0], OFF(blend_per_target), NUM(blend_per_target), Blending);
}
void SetupDirtyViewportSwizzles(Tables& tables) {
@@ -166,17 +166,16 @@ void SetupDirtyVertexBindings(Tables& tables) {
static constexpr size_t divisor_offset = 3;
for (size_t i = 0; i < Regs::NumVertexArrays; ++i) {
const u8 flag = static_cast<u8>(VertexBinding0 + i);
- tables[0][OFF(instanced_arrays) + i] = VertexInput;
- tables[1][OFF(instanced_arrays) + i] = flag;
- tables[0][OFF(vertex_array) + i * NUM(vertex_array[0]) + divisor_offset] = VertexInput;
- tables[1][OFF(vertex_array) + i * NUM(vertex_array[0]) + divisor_offset] = flag;
+ tables[0][OFF(vertex_stream_instances) + i] = VertexInput;
+ tables[1][OFF(vertex_stream_instances) + i] = flag;
+ tables[0][OFF(vertex_streams) + i * NUM(vertex_streams[0]) + divisor_offset] = VertexInput;
+ tables[1][OFF(vertex_streams) + i * NUM(vertex_streams[0]) + divisor_offset] = flag;
}
}
} // Anonymous namespace
-StateTracker::StateTracker(Tegra::GPU& gpu)
- : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} {
- auto& tables{gpu.Maxwell3D().dirty.tables};
+void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
+ auto& tables{channel_state.maxwell_3d->dirty.tables};
SetupDirtyFlags(tables);
SetupDirtyViewports(tables);
SetupDirtyScissors(tables);
@@ -199,4 +198,15 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
SetupDirtyVertexBindings(tables);
}
+void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) {
+ flags = &channel_state.maxwell_3d->dirty.flags;
+}
+
+void StateTracker::InvalidateState() {
+ flags->set();
+}
+
+StateTracker::StateTracker()
+ : flags{&default_flags}, default_flags{}, invalidation_flags{MakeInvalidationFlags()} {}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h
index a85bc1c10..2296dea60 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.h
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.h
@@ -10,6 +10,12 @@
#include "video_core/dirty_flags.h"
#include "video_core/engines/maxwell_3d.h"
+namespace Tegra {
+namespace Control {
+struct ChannelState;
+}
+} // namespace Tegra
+
namespace Vulkan {
namespace Dirty {
@@ -53,19 +59,19 @@ class StateTracker {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
public:
- explicit StateTracker(Tegra::GPU& gpu);
+ explicit StateTracker();
void InvalidateCommandBufferState() {
- flags |= invalidation_flags;
+ (*flags) |= invalidation_flags;
current_topology = INVALID_TOPOLOGY;
}
void InvalidateViewports() {
- flags[Dirty::Viewports] = true;
+ (*flags)[Dirty::Viewports] = true;
}
void InvalidateScissors() {
- flags[Dirty::Scissors] = true;
+ (*flags)[Dirty::Scissors] = true;
}
bool TouchViewports() {
@@ -139,16 +145,23 @@ public:
return has_changed;
}
+ void SetupTables(Tegra::Control::ChannelState& channel_state);
+
+ void ChangeChannel(Tegra::Control::ChannelState& channel_state);
+
+ void InvalidateState();
+
private:
static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u);
bool Exchange(std::size_t id, bool new_value) const noexcept {
- const bool is_dirty = flags[id];
- flags[id] = new_value;
+ const bool is_dirty = (*flags)[id];
+ (*flags)[id] = new_value;
return is_dirty;
}
- Tegra::Engines::Maxwell3D::DirtyState::Flags& flags;
+ Tegra::Engines::Maxwell3D::DirtyState::Flags* flags;
+ Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags;
Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags;
Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY;
};
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index a69ae7725..706d9ba74 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -36,7 +36,8 @@ VkPresentModeKHR ChooseSwapPresentMode(vk::Span<VkPresentModeKHR> modes) {
// Mailbox (triple buffering) doesn't lock the application like fifo (vsync),
// prefer it if vsync option is not selected
const auto found_mailbox = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR);
- if (found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) {
+ if (Settings::values.fullscreen_mode.GetValue() == Settings::FullscreenMode::Borderless &&
+ found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) {
return VK_PRESENT_MODE_MAILBOX_KHR;
}
if (!Settings::values.use_speed_limit.GetValue()) {
@@ -156,8 +157,16 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u3
present_mode = ChooseSwapPresentMode(present_modes);
u32 requested_image_count{capabilities.minImageCount + 1};
- if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
- requested_image_count = capabilities.maxImageCount;
+ // Ensure Tripple buffering if possible.
+ if (capabilities.maxImageCount > 0) {
+ if (requested_image_count > capabilities.maxImageCount) {
+ requested_image_count = capabilities.maxImageCount;
+ } else {
+ requested_image_count =
+ std::max(requested_image_count, std::min(3U, capabilities.maxImageCount));
+ }
+ } else {
+ requested_image_count = std::max(requested_image_count, 3U);
}
VkSwapchainCreateInfoKHR swapchain_ci{
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index caca79d79..853b80d8a 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -592,7 +592,7 @@ void TryTransformSwizzleIfNeeded(PixelFormat format, std::array<SwizzleSource, 4
case PixelFormat::A5B5G5R1_UNORM:
std::ranges::transform(swizzle, swizzle.begin(), SwapSpecial);
break;
- case PixelFormat::R4G4_UNORM:
+ case PixelFormat::G4R4_UNORM:
std::ranges::transform(swizzle, swizzle.begin(), SwapGreenRed);
break;
default:
@@ -791,12 +791,17 @@ TextureCacheRuntime::TextureCacheRuntime(const Device& device_, Scheduler& sched
MemoryAllocator& memory_allocator_,
StagingBufferPool& staging_buffer_pool_,
BlitImageHelper& blit_image_helper_,
- ASTCDecoderPass& astc_decoder_pass_,
- RenderPassCache& render_pass_cache_)
+ RenderPassCache& render_pass_cache_,
+ DescriptorPool& descriptor_pool,
+ UpdateDescriptorQueue& update_descriptor_queue)
: device{device_}, scheduler{scheduler_}, memory_allocator{memory_allocator_},
staging_buffer_pool{staging_buffer_pool_}, blit_image_helper{blit_image_helper_},
- astc_decoder_pass{astc_decoder_pass_}, render_pass_cache{render_pass_cache_},
- resolution{Settings::values.resolution_info} {}
+ render_pass_cache{render_pass_cache_}, resolution{Settings::values.resolution_info} {
+ if (Settings::values.accelerate_astc) {
+ astc_decoder_pass.emplace(device, scheduler, descriptor_pool, staging_buffer_pool,
+ update_descriptor_queue, memory_allocator);
+ }
+}
void TextureCacheRuntime::Finish() {
scheduler.Finish();
@@ -1474,13 +1479,14 @@ bool Image::BlitScaleHelper(bool scale_up) {
};
const VkExtent2D extent{
.width = std::max(scaled_width, info.size.width),
- .height = std::max(scaled_height, info.size.width),
+ .height = std::max(scaled_height, info.size.height),
};
auto* view_ptr = blit_view.get();
if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
if (!blit_framebuffer) {
- blit_framebuffer = std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent);
+ blit_framebuffer =
+ std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent, scale_up);
}
const auto color_view = blit_view->Handle(Shader::TextureType::Color2D);
@@ -1488,7 +1494,8 @@ bool Image::BlitScaleHelper(bool scale_up) {
src_region, operation, BLIT_OPERATION);
} else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
if (!blit_framebuffer) {
- blit_framebuffer = std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent);
+ blit_framebuffer =
+ std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent, scale_up);
}
runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(),
blit_view->StencilView(), dst_region,
@@ -1756,34 +1763,42 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
.width = key.size.width,
.height = key.size.height,
}} {
- CreateFramebuffer(runtime, color_buffers, depth_buffer);
+ CreateFramebuffer(runtime, color_buffers, depth_buffer, key.is_rescaled);
if (runtime.device.HasDebuggingToolAttached()) {
framebuffer.SetObjectNameEXT(VideoCommon::Name(key).c_str());
}
}
Framebuffer::Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer,
- ImageView* depth_buffer, VkExtent2D extent)
+ ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled)
: render_area{extent} {
std::array<ImageView*, NUM_RT> color_buffers{color_buffer};
- CreateFramebuffer(runtime, color_buffers, depth_buffer);
+ CreateFramebuffer(runtime, color_buffers, depth_buffer, is_rescaled);
}
Framebuffer::~Framebuffer() = default;
void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
std::span<ImageView*, NUM_RT> color_buffers,
- ImageView* depth_buffer) {
+ ImageView* depth_buffer, bool is_rescaled) {
std::vector<VkImageView> attachments;
RenderPassKey renderpass_key{};
s32 num_layers = 1;
+ const auto& resolution = runtime.resolution;
+
+ u32 width = std::numeric_limits<u32>::max();
+ u32 height = std::numeric_limits<u32>::max();
for (size_t index = 0; index < NUM_RT; ++index) {
const ImageView* const color_buffer = color_buffers[index];
if (!color_buffer) {
renderpass_key.color_formats[index] = PixelFormat::Invalid;
continue;
}
+ width = std::min(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width)
+ : color_buffer->size.width);
+ height = std::min(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height)
+ : color_buffer->size.height);
attachments.push_back(color_buffer->RenderTarget());
renderpass_key.color_formats[index] = color_buffer->format;
num_layers = std::max(num_layers, color_buffer->range.extent.layers);
@@ -1794,6 +1809,10 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
}
const size_t num_colors = attachments.size();
if (depth_buffer) {
+ width = std::min(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width)
+ : depth_buffer->size.width);
+ height = std::min(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height)
+ : depth_buffer->size.height);
attachments.push_back(depth_buffer->RenderTarget());
renderpass_key.depth_format = depth_buffer->format;
num_layers = std::max(num_layers, depth_buffer->range.extent.layers);
@@ -1810,6 +1829,8 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
renderpass_key.samples = samples;
renderpass = runtime.render_pass_cache.Get(renderpass_key);
+ render_area.width = std::min(render_area.width, width);
+ render_area.height = std::min(render_area.height, height);
num_color_buffers = static_cast<u32>(num_colors);
framebuffer = runtime.device.GetLogical().CreateFramebuffer({
@@ -1829,7 +1850,7 @@ void TextureCacheRuntime::AccelerateImageUpload(
Image& image, const StagingBufferRef& map,
std::span<const VideoCommon::SwizzleParameters> swizzles) {
if (IsPixelFormatASTC(image.info.format)) {
- return astc_decoder_pass.Assemble(image, map, swizzles);
+ return astc_decoder_pass->Assemble(image, map, swizzles);
}
ASSERT(false);
}
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 69f06ee7b..7ec0df134 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -6,6 +6,7 @@
#include <span>
#include "shader_recompiler/shader_info.h"
+#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/texture_cache/image_view_base.h"
#include "video_core/texture_cache/texture_cache_base.h"
@@ -25,14 +26,15 @@ using VideoCommon::RenderTargets;
using VideoCommon::SlotVector;
using VideoCore::Surface::PixelFormat;
-class ASTCDecoderPass;
class BlitImageHelper;
+class DescriptorPool;
class Device;
class Image;
class ImageView;
class Framebuffer;
class RenderPassCache;
class StagingBufferPool;
+class UpdateDescriptorQueue;
class Scheduler;
class TextureCacheRuntime {
@@ -41,8 +43,9 @@ public:
MemoryAllocator& memory_allocator_,
StagingBufferPool& staging_buffer_pool_,
BlitImageHelper& blit_image_helper_,
- ASTCDecoderPass& astc_decoder_pass_,
- RenderPassCache& render_pass_cache_);
+ RenderPassCache& render_pass_cache_,
+ DescriptorPool& descriptor_pool,
+ UpdateDescriptorQueue& update_descriptor_queue);
void Finish();
@@ -97,8 +100,8 @@ public:
MemoryAllocator& memory_allocator;
StagingBufferPool& staging_buffer_pool;
BlitImageHelper& blit_image_helper;
- ASTCDecoderPass& astc_decoder_pass;
RenderPassCache& render_pass_cache;
+ std::optional<ASTCDecoderPass> astc_decoder_pass;
const Settings::ResolutionScalingInfo& resolution;
constexpr static size_t indexing_slots = 8 * sizeof(size_t);
@@ -268,7 +271,7 @@ public:
ImageView* depth_buffer, const VideoCommon::RenderTargets& key);
explicit Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer,
- ImageView* depth_buffer, VkExtent2D extent);
+ ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled);
~Framebuffer();
@@ -279,7 +282,8 @@ public:
Framebuffer& operator=(Framebuffer&&) = default;
void CreateFramebuffer(TextureCacheRuntime& runtime,
- std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer);
+ std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer,
+ bool is_rescaled = false);
[[nodiscard]] VkFramebuffer Handle() const noexcept {
return *framebuffer;
diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp
index 164e4ee0e..d9482371b 100644
--- a/src/video_core/shader_cache.cpp
+++ b/src/video_core/shader_cache.cpp
@@ -8,6 +8,7 @@
#include "common/assert.h"
#include "shader_recompiler/frontend/maxwell/control_flow.h"
#include "shader_recompiler/object_pool.h"
+#include "video_core/control/channel_state.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
@@ -33,29 +34,25 @@ void ShaderCache::SyncGuestHost() {
RemovePendingShaders();
}
-ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_)
- : gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_},
- rasterizer{rasterizer_} {}
+ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_) : rasterizer{rasterizer_} {}
bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
- auto& dirty{maxwell3d.dirty.flags};
+ auto& dirty{maxwell3d->dirty.flags};
if (!dirty[VideoCommon::Dirty::Shaders]) {
return last_shaders_valid;
}
dirty[VideoCommon::Dirty::Shaders] = false;
- const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()};
+ const GPUVAddr base_addr{maxwell3d->regs.program_region.Address()};
for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) {
- if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
+ if (!maxwell3d->regs.IsShaderConfigEnabled(index)) {
unique_hashes[index] = 0;
continue;
}
- const auto& shader_config{maxwell3d.regs.shader_config[index]};
- const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)};
+ const auto& shader_config{maxwell3d->regs.pipelines[index]};
+ const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderType>(index)};
const GPUVAddr shader_addr{base_addr + shader_config.offset};
- const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)};
+ const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
if (!cpu_shader_addr) {
LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
last_shaders_valid = false;
@@ -64,7 +61,7 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)};
if (!shader_info) {
const u32 start_address{shader_config.offset};
- GraphicsEnvironment env{maxwell3d, gpu_memory, program, base_addr, start_address};
+ GraphicsEnvironment env{*maxwell3d, *gpu_memory, program, base_addr, start_address};
shader_info = MakeShaderInfo(env, *cpu_shader_addr);
}
shader_infos[index] = shader_info;
@@ -75,10 +72,10 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
}
const ShaderInfo* ShaderCache::ComputeShader() {
- const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()};
- const auto& qmd{kepler_compute.launch_description};
+ const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
+ const auto& qmd{kepler_compute->launch_description};
const GPUVAddr shader_addr{program_base + qmd.program_start};
- const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)};
+ const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
if (!cpu_shader_addr) {
LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
return nullptr;
@@ -86,22 +83,22 @@ const ShaderInfo* ShaderCache::ComputeShader() {
if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) {
return shader;
}
- ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start};
+ ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
return MakeShaderInfo(env, *cpu_shader_addr);
}
void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result,
const std::array<u64, NUM_PROGRAMS>& unique_hashes) {
size_t env_index{};
- const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()};
+ const GPUVAddr base_addr{maxwell3d->regs.program_region.Address()};
for (size_t index = 0; index < NUM_PROGRAMS; ++index) {
if (unique_hashes[index] == 0) {
continue;
}
- const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)};
+ const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderType>(index)};
auto& env{result.envs[index]};
- const u32 start_address{maxwell3d.regs.shader_config[index].offset};
- env = GraphicsEnvironment{maxwell3d, gpu_memory, program, base_addr, start_address};
+ const u32 start_address{maxwell3d->regs.pipelines[index].offset};
+ env = GraphicsEnvironment{*maxwell3d, *gpu_memory, program, base_addr, start_address};
env.SetCachedSize(shader_infos[index]->size_bytes);
result.env_ptrs[env_index++] = &env;
}
diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h
index f67cea8c4..a4391202d 100644
--- a/src/video_core/shader_cache.h
+++ b/src/video_core/shader_cache.h
@@ -12,6 +12,7 @@
#include <vector>
#include "common/common_types.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/shader_environment.h"
@@ -19,6 +20,10 @@ namespace Tegra {
class MemoryManager;
}
+namespace Tegra::Control {
+struct ChannelState;
+}
+
namespace VideoCommon {
class GenericEnvironment;
@@ -28,7 +33,7 @@ struct ShaderInfo {
size_t size_bytes{};
};
-class ShaderCache {
+class ShaderCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
static constexpr u64 YUZU_PAGEBITS = 14;
static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS;
@@ -71,9 +76,7 @@ protected:
}
};
- explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_);
+ explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_);
/// @brief Update the hashes and information of shader stages
/// @param unique_hashes Shader hashes to store into when a stage is enabled
@@ -88,10 +91,6 @@ protected:
void GetGraphicsEnvironments(GraphicsEnvironments& result,
const std::array<u64, NUM_PROGRAMS>& unique_hashes);
- Tegra::MemoryManager& gpu_memory;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
-
std::array<const ShaderInfo*, NUM_PROGRAMS> shader_infos{};
bool last_shaders_valid = false;
diff --git a/src/video_core/shader_environment.cpp b/src/video_core/shader_environment.cpp
index c903975fc..63bcf9337 100644
--- a/src/video_core/shader_environment.cpp
+++ b/src/video_core/shader_environment.cpp
@@ -252,34 +252,34 @@ Shader::TextureType GenericEnvironment::ReadTextureTypeImpl(GPUVAddr tic_addr, u
GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
Tegra::MemoryManager& gpu_memory_,
- Maxwell::ShaderProgram program, GPUVAddr program_base_,
+ Maxwell::ShaderType program, GPUVAddr program_base_,
u32 start_address_)
: GenericEnvironment{gpu_memory_, program_base_, start_address_}, maxwell3d{&maxwell3d_} {
gpu_memory->ReadBlock(program_base + start_address, &sph, sizeof(sph));
initial_offset = sizeof(sph);
- gp_passthrough_mask = maxwell3d->regs.gp_passthrough_mask;
+ gp_passthrough_mask = maxwell3d->regs.post_vtg_shader_attrib_skip_mask;
switch (program) {
- case Maxwell::ShaderProgram::VertexA:
+ case Maxwell::ShaderType::VertexA:
stage = Shader::Stage::VertexA;
stage_index = 0;
break;
- case Maxwell::ShaderProgram::VertexB:
+ case Maxwell::ShaderType::VertexB:
stage = Shader::Stage::VertexB;
stage_index = 0;
break;
- case Maxwell::ShaderProgram::TesselationControl:
+ case Maxwell::ShaderType::TessellationInit:
stage = Shader::Stage::TessellationControl;
stage_index = 1;
break;
- case Maxwell::ShaderProgram::TesselationEval:
+ case Maxwell::ShaderType::Tessellation:
stage = Shader::Stage::TessellationEval;
stage_index = 2;
break;
- case Maxwell::ShaderProgram::Geometry:
+ case Maxwell::ShaderType::Geometry:
stage = Shader::Stage::Geometry;
stage_index = 3;
break;
- case Maxwell::ShaderProgram::Fragment:
+ case Maxwell::ShaderType::Pixel:
stage = Shader::Stage::Fragment;
stage_index = 4;
break;
@@ -290,7 +290,7 @@ GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
const u64 local_size{sph.LocalMemorySize()};
ASSERT(local_size <= std::numeric_limits<u32>::max());
local_memory_size = static_cast<u32>(local_size) + sph.common3.shader_local_memory_crs_size;
- texture_bound = maxwell3d->regs.tex_cb_index;
+ texture_bound = maxwell3d->regs.bindless_texture_const_buffer_slot;
}
u32 GraphicsEnvironment::ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) {
@@ -306,8 +306,9 @@ u32 GraphicsEnvironment::ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) {
Shader::TextureType GraphicsEnvironment::ReadTextureType(u32 handle) {
const auto& regs{maxwell3d->regs};
- const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
- return ReadTextureTypeImpl(regs.tic.Address(), regs.tic.limit, via_header_index, handle);
+ const bool via_header_index{regs.sampler_binding == Maxwell::SamplerBinding::ViaHeaderBinding};
+ return ReadTextureTypeImpl(regs.tex_header.Address(), regs.tex_header.limit, via_header_index,
+ handle);
}
u32 GraphicsEnvironment::ReadViewportTransformState() {
diff --git a/src/video_core/shader_environment.h b/src/video_core/shader_environment.h
index a0659fd7c..a05833f38 100644
--- a/src/video_core/shader_environment.h
+++ b/src/video_core/shader_environment.h
@@ -95,7 +95,7 @@ public:
explicit GraphicsEnvironment() = default;
explicit GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
Tegra::MemoryManager& gpu_memory_,
- Tegra::Engines::Maxwell3D::Regs::ShaderProgram program,
+ Tegra::Engines::Maxwell3D::Regs::ShaderType program,
GPUVAddr program_base_, u32 start_address_);
~GraphicsEnvironment() override = default;
diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp
index 079d5f028..6bd133d10 100644
--- a/src/video_core/surface.cpp
+++ b/src/video_core/surface.cpp
@@ -73,17 +73,17 @@ bool SurfaceTargetIsArray(SurfaceTarget target) {
PixelFormat PixelFormatFromDepthFormat(Tegra::DepthFormat format) {
switch (format) {
- case Tegra::DepthFormat::S8_UINT_Z24_UNORM:
+ case Tegra::DepthFormat::Z24_UNORM_S8_UINT:
return PixelFormat::S8_UINT_D24_UNORM;
- case Tegra::DepthFormat::D24S8_UNORM:
+ case Tegra::DepthFormat::S8Z24_UNORM:
return PixelFormat::D24_UNORM_S8_UINT;
- case Tegra::DepthFormat::D32_FLOAT:
+ case Tegra::DepthFormat::Z32_FLOAT:
return PixelFormat::D32_FLOAT;
- case Tegra::DepthFormat::D16_UNORM:
+ case Tegra::DepthFormat::Z16_UNORM:
return PixelFormat::D16_UNORM;
case Tegra::DepthFormat::S8_UINT:
return PixelFormat::S8_UINT;
- case Tegra::DepthFormat::D32_FLOAT_S8X24_UINT:
+ case Tegra::DepthFormat::Z32_FLOAT_X24S8_UINT:
return PixelFormat::D32_FLOAT_S8_UINT;
default:
UNIMPLEMENTED_MSG("Unimplemented format={}", format);
@@ -117,9 +117,9 @@ PixelFormat PixelFormatFromRenderTargetFormat(Tegra::RenderTargetFormat format)
return PixelFormat::R32G32_UINT;
case Tegra::RenderTargetFormat::R16G16B16X16_FLOAT:
return PixelFormat::R16G16B16X16_FLOAT;
- case Tegra::RenderTargetFormat::B8G8R8A8_UNORM:
+ case Tegra::RenderTargetFormat::A8R8G8B8_UNORM:
return PixelFormat::B8G8R8A8_UNORM;
- case Tegra::RenderTargetFormat::B8G8R8A8_SRGB:
+ case Tegra::RenderTargetFormat::A8R8G8B8_SRGB:
return PixelFormat::B8G8R8A8_SRGB;
case Tegra::RenderTargetFormat::A2B10G10R10_UNORM:
return PixelFormat::A2B10G10R10_UNORM;
@@ -247,6 +247,8 @@ bool IsPixelFormatASTC(PixelFormat format) {
case PixelFormat::ASTC_2D_6X6_UNORM:
case PixelFormat::ASTC_2D_6X6_SRGB:
case PixelFormat::ASTC_2D_10X6_UNORM:
+ case PixelFormat::ASTC_2D_10X5_UNORM:
+ case PixelFormat::ASTC_2D_10X5_SRGB:
case PixelFormat::ASTC_2D_10X10_UNORM:
case PixelFormat::ASTC_2D_10X10_SRGB:
case PixelFormat::ASTC_2D_12X12_UNORM:
@@ -276,6 +278,7 @@ bool IsPixelFormatSRGB(PixelFormat format) {
case PixelFormat::ASTC_2D_5X5_SRGB:
case PixelFormat::ASTC_2D_10X8_SRGB:
case PixelFormat::ASTC_2D_6X6_SRGB:
+ case PixelFormat::ASTC_2D_10X5_SRGB:
case PixelFormat::ASTC_2D_10X10_SRGB:
case PixelFormat::ASTC_2D_12X12_SRGB:
case PixelFormat::ASTC_2D_8X6_SRGB:
diff --git a/src/video_core/surface.h b/src/video_core/surface.h
index 16273f185..57ca7f597 100644
--- a/src/video_core/surface.h
+++ b/src/video_core/surface.h
@@ -82,7 +82,7 @@ enum class PixelFormat {
BC3_SRGB,
BC7_SRGB,
A4B4G4R4_UNORM,
- R4G4_UNORM,
+ G4R4_UNORM,
ASTC_2D_4X4_SRGB,
ASTC_2D_8X8_SRGB,
ASTC_2D_8X5_SRGB,
@@ -94,6 +94,8 @@ enum class PixelFormat {
ASTC_2D_6X6_UNORM,
ASTC_2D_6X6_SRGB,
ASTC_2D_10X6_UNORM,
+ ASTC_2D_10X5_UNORM,
+ ASTC_2D_10X5_SRGB,
ASTC_2D_10X10_UNORM,
ASTC_2D_10X10_SRGB,
ASTC_2D_12X12_UNORM,
@@ -216,7 +218,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_WIDTH_TABLE = {{
4, // BC3_SRGB
4, // BC7_SRGB
1, // A4B4G4R4_UNORM
- 1, // R4G4_UNORM
+ 1, // G4R4_UNORM
4, // ASTC_2D_4X4_SRGB
8, // ASTC_2D_8X8_SRGB
8, // ASTC_2D_8X5_SRGB
@@ -228,6 +230,8 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_WIDTH_TABLE = {{
6, // ASTC_2D_6X6_UNORM
6, // ASTC_2D_6X6_SRGB
10, // ASTC_2D_10X6_UNORM
+ 10, // ASTC_2D_10X5_UNORM
+ 10, // ASTC_2D_10X5_SRGB
10, // ASTC_2D_10X10_UNORM
10, // ASTC_2D_10X10_SRGB
12, // ASTC_2D_12X12_UNORM
@@ -319,7 +323,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_HEIGHT_TABLE = {{
4, // BC3_SRGB
4, // BC7_SRGB
1, // A4B4G4R4_UNORM
- 1, // R4G4_UNORM
+ 1, // G4R4_UNORM
4, // ASTC_2D_4X4_SRGB
8, // ASTC_2D_8X8_SRGB
5, // ASTC_2D_8X5_SRGB
@@ -331,6 +335,8 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_HEIGHT_TABLE = {{
6, // ASTC_2D_6X6_UNORM
6, // ASTC_2D_6X6_SRGB
6, // ASTC_2D_10X6_UNORM
+ 5, // ASTC_2D_10X5_UNORM
+ 5, // ASTC_2D_10X5_SRGB
10, // ASTC_2D_10X10_UNORM
10, // ASTC_2D_10X10_SRGB
12, // ASTC_2D_12X12_UNORM
@@ -422,7 +428,7 @@ constexpr std::array<u8, MaxPixelFormat> BITS_PER_BLOCK_TABLE = {{
128, // BC3_SRGB
128, // BC7_UNORM
16, // A4B4G4R4_UNORM
- 8, // R4G4_UNORM
+ 8, // G4R4_UNORM
128, // ASTC_2D_4X4_SRGB
128, // ASTC_2D_8X8_SRGB
128, // ASTC_2D_8X5_SRGB
@@ -434,6 +440,8 @@ constexpr std::array<u8, MaxPixelFormat> BITS_PER_BLOCK_TABLE = {{
128, // ASTC_2D_6X6_UNORM
128, // ASTC_2D_6X6_SRGB
128, // ASTC_2D_10X6_UNORM
+ 128, // ASTC_2D_10X5_UNORM
+ 128, // ASTC_2D_10X5_SRGB
128, // ASTC_2D_10X10_UNORM
128, // ASTC_2D_10X10_SRGB
128, // ASTC_2D_12X12_UNORM
diff --git a/src/video_core/texture_cache/descriptor_table.h b/src/video_core/texture_cache/descriptor_table.h
index b18e3838f..ee4240288 100644
--- a/src/video_core/texture_cache/descriptor_table.h
+++ b/src/video_core/texture_cache/descriptor_table.h
@@ -18,7 +18,7 @@ class DescriptorTable {
public:
explicit DescriptorTable(Tegra::MemoryManager& gpu_memory_) : gpu_memory{gpu_memory_} {}
- [[nodiscard]] bool Synchornize(GPUVAddr gpu_addr, u32 limit) {
+ [[nodiscard]] bool Synchronize(GPUVAddr gpu_addr, u32 limit) {
[[likely]] if (current_gpu_addr == gpu_addr && current_limit == limit) {
return false;
}
diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp
index 1412aa076..08aa8ca33 100644
--- a/src/video_core/texture_cache/format_lookup_table.cpp
+++ b/src/video_core/texture_cache/format_lookup_table.cpp
@@ -63,7 +63,7 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
case Hash(TextureFormat::A4B4G4R4, UNORM):
return PixelFormat::A4B4G4R4_UNORM;
case Hash(TextureFormat::G4R4, UNORM):
- return PixelFormat::R4G4_UNORM;
+ return PixelFormat::G4R4_UNORM;
case Hash(TextureFormat::A5B5G5R1, UNORM):
return PixelFormat::A5B5G5R1_UNORM;
case Hash(TextureFormat::R8, UNORM):
@@ -150,6 +150,8 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
return PixelFormat::D24_UNORM_S8_UINT;
case Hash(TextureFormat::D32S8, FLOAT, UINT, UNORM, UNORM, LINEAR):
return PixelFormat::D32_FLOAT_S8_UINT;
+ case Hash(TextureFormat::R32_B24G8, FLOAT, UINT, UNORM, UNORM, LINEAR):
+ return PixelFormat::D32_FLOAT_S8_UINT;
case Hash(TextureFormat::BC1_RGBA, UNORM, LINEAR):
return PixelFormat::BC1_RGBA_UNORM;
case Hash(TextureFormat::BC1_RGBA, UNORM, SRGB):
@@ -208,6 +210,10 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
return PixelFormat::ASTC_2D_6X6_SRGB;
case Hash(TextureFormat::ASTC_2D_10X6, UNORM, LINEAR):
return PixelFormat::ASTC_2D_10X6_UNORM;
+ case Hash(TextureFormat::ASTC_2D_10X5, UNORM, LINEAR):
+ return PixelFormat::ASTC_2D_10X5_UNORM;
+ case Hash(TextureFormat::ASTC_2D_10X5, UNORM, SRGB):
+ return PixelFormat::ASTC_2D_10X5_SRGB;
case Hash(TextureFormat::ASTC_2D_10X10, UNORM, LINEAR):
return PixelFormat::ASTC_2D_10X10_UNORM;
case Hash(TextureFormat::ASTC_2D_10X10, UNORM, SRGB):
diff --git a/src/video_core/texture_cache/formatter.h b/src/video_core/texture_cache/formatter.h
index 95a572604..acc854715 100644
--- a/src/video_core/texture_cache/formatter.h
+++ b/src/video_core/texture_cache/formatter.h
@@ -153,8 +153,8 @@ struct fmt::formatter<VideoCore::Surface::PixelFormat> : fmt::formatter<fmt::str
return "BC7_SRGB";
case PixelFormat::A4B4G4R4_UNORM:
return "A4B4G4R4_UNORM";
- case PixelFormat::R4G4_UNORM:
- return "R4G4_UNORM";
+ case PixelFormat::G4R4_UNORM:
+ return "G4R4_UNORM";
case PixelFormat::ASTC_2D_4X4_SRGB:
return "ASTC_2D_4X4_SRGB";
case PixelFormat::ASTC_2D_8X8_SRGB:
@@ -177,6 +177,10 @@ struct fmt::formatter<VideoCore::Surface::PixelFormat> : fmt::formatter<fmt::str
return "ASTC_2D_6X6_SRGB";
case PixelFormat::ASTC_2D_10X6_UNORM:
return "ASTC_2D_10X6_UNORM";
+ case PixelFormat::ASTC_2D_10X5_UNORM:
+ return "ASTC_2D_10X5_UNORM";
+ case PixelFormat::ASTC_2D_10X5_SRGB:
+ return "ASTC_2D_10X5_SRGB";
case PixelFormat::ASTC_2D_10X10_UNORM:
return "ASTC_2D_10X10_UNORM";
case PixelFormat::ASTC_2D_10X10_SRGB:
diff --git a/src/video_core/texture_cache/image_base.cpp b/src/video_core/texture_cache/image_base.cpp
index f61e09ac7..91512022f 100644
--- a/src/video_core/texture_cache/image_base.cpp
+++ b/src/video_core/texture_cache/image_base.cpp
@@ -7,6 +7,7 @@
#include <vector>
#include "common/common_types.h"
+#include "common/div_ceil.h"
#include "video_core/surface.h"
#include "video_core/texture_cache/formatter.h"
#include "video_core/texture_cache/image_base.h"
@@ -182,10 +183,6 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
};
const bool is_lhs_compressed = lhs_block.width > 1 || lhs_block.height > 1;
const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1;
- if (is_lhs_compressed && is_rhs_compressed) {
- LOG_ERROR(HW_GPU, "Compressed to compressed image aliasing is not implemented");
- return;
- }
const s32 lhs_mips = lhs.info.resources.levels;
const s32 rhs_mips = rhs.info.resources.levels;
const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips);
@@ -199,12 +196,12 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
Extent3D lhs_size = MipSize(lhs.info.size, base->level + mip_level);
Extent3D rhs_size = MipSize(rhs.info.size, mip_level);
if (is_lhs_compressed) {
- lhs_size.width /= lhs_block.width;
- lhs_size.height /= lhs_block.height;
+ lhs_size.width = Common::DivCeil(lhs_size.width, lhs_block.width);
+ lhs_size.height = Common::DivCeil(lhs_size.height, lhs_block.height);
}
if (is_rhs_compressed) {
- rhs_size.width /= rhs_block.width;
- rhs_size.height /= rhs_block.height;
+ rhs_size.width = Common::DivCeil(rhs_size.width, rhs_block.width);
+ rhs_size.height = Common::DivCeil(rhs_size.height, rhs_block.height);
}
const Extent3D copy_size{
.width = std::min(lhs_size.width, rhs_size.width),
diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h
index 1f85ec9da..620565684 100644
--- a/src/video_core/texture_cache/image_base.h
+++ b/src/video_core/texture_cache/image_base.h
@@ -88,6 +88,9 @@ struct ImageBase {
u32 scale_rating = 0;
u64 scale_tick = 0;
bool has_scaled = false;
+
+ size_t channel = 0;
+
ImageFlagBits flags = ImageFlagBits::CpuModified;
GPUVAddr gpu_addr = 0;
diff --git a/src/video_core/texture_cache/image_info.cpp b/src/video_core/texture_cache/image_info.cpp
index 6c073ee57..852ec2519 100644
--- a/src/video_core/texture_cache/image_info.cpp
+++ b/src/video_core/texture_cache/image_info.cpp
@@ -1,6 +1,8 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <fmt/format.h>
+
#include "common/assert.h"
#include "video_core/surface.h"
#include "video_core/texture_cache/format_lookup_table.h"
@@ -12,6 +14,7 @@
namespace VideoCommon {
+using Tegra::Engines::Maxwell3D;
using Tegra::Texture::TextureType;
using Tegra::Texture::TICEntry;
using VideoCore::Surface::PixelFormat;
@@ -107,12 +110,13 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
}
}
-ImageInfo::ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs, size_t index) noexcept {
+ImageInfo::ImageInfo(const Maxwell3D::Regs& regs, size_t index) noexcept {
const auto& rt = regs.rt[index];
format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(rt.format);
rescaleable = false;
if (rt.tile_mode.is_pitch_linear) {
- ASSERT(rt.tile_mode.is_3d == 0);
+ ASSERT(rt.tile_mode.dim_control ==
+ Maxwell3D::Regs::TileMode::DimensionControl::DepthDefinesArray);
type = ImageType::Linear;
pitch = rt.width;
size = Extent3D{
@@ -124,15 +128,16 @@ ImageInfo::ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs, size_t index)
}
size.width = rt.width;
size.height = rt.height;
- layer_stride = rt.layer_stride * 4;
+ layer_stride = rt.array_pitch * 4;
maybe_unaligned_layer_stride = layer_stride;
- num_samples = NumSamples(regs.multisample_mode);
+ num_samples = NumSamples(regs.anti_alias_samples_mode);
block = Extent3D{
.width = rt.tile_mode.block_width,
.height = rt.tile_mode.block_height,
.depth = rt.tile_mode.block_depth,
};
- if (rt.tile_mode.is_3d) {
+ if (rt.tile_mode.dim_control ==
+ Maxwell3D::Regs::TileMode::DimensionControl::DepthDefinesDepth) {
type = ImageType::e3D;
size.depth = rt.depth;
} else {
@@ -146,31 +151,37 @@ ImageInfo::ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs, size_t index)
ImageInfo::ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs) noexcept {
format = VideoCore::Surface::PixelFormatFromDepthFormat(regs.zeta.format);
- size.width = regs.zeta_width;
- size.height = regs.zeta_height;
+ size.width = regs.zeta_size.width;
+ size.height = regs.zeta_size.height;
rescaleable = false;
resources.levels = 1;
- layer_stride = regs.zeta.layer_stride * 4;
+ layer_stride = regs.zeta.array_pitch * 4;
maybe_unaligned_layer_stride = layer_stride;
- num_samples = NumSamples(regs.multisample_mode);
+ num_samples = NumSamples(regs.anti_alias_samples_mode);
block = Extent3D{
.width = regs.zeta.tile_mode.block_width,
.height = regs.zeta.tile_mode.block_height,
.depth = regs.zeta.tile_mode.block_depth,
};
if (regs.zeta.tile_mode.is_pitch_linear) {
- ASSERT(regs.zeta.tile_mode.is_3d == 0);
+ ASSERT(regs.zeta.tile_mode.dim_control ==
+ Maxwell3D::Regs::TileMode::DimensionControl::DepthDefinesArray);
type = ImageType::Linear;
pitch = size.width * BytesPerBlock(format);
- } else if (regs.zeta.tile_mode.is_3d) {
+ } else if (regs.zeta.tile_mode.dim_control ==
+ Maxwell3D::Regs::TileMode::DimensionControl::DepthDefinesDepth) {
ASSERT(regs.zeta.tile_mode.is_pitch_linear == 0);
+ ASSERT(regs.zeta_size.dim_control ==
+ Maxwell3D::Regs::ZetaSize::DimensionControl::ArraySizeOne);
type = ImageType::e3D;
- size.depth = regs.zeta_depth;
+ size.depth = regs.zeta_size.depth;
} else {
+ ASSERT(regs.zeta_size.dim_control ==
+ Maxwell3D::Regs::ZetaSize::DimensionControl::DepthDefinesArray);
rescaleable = block.depth == 0;
downscaleable = size.height > 512;
type = ImageType::e2D;
- resources.layers = regs.zeta_depth;
+ resources.layers = regs.zeta_size.depth;
}
}
diff --git a/src/video_core/texture_cache/render_targets.h b/src/video_core/texture_cache/render_targets.h
index da8ffe9ec..1efbd6507 100644
--- a/src/video_core/texture_cache/render_targets.h
+++ b/src/video_core/texture_cache/render_targets.h
@@ -26,6 +26,7 @@ struct RenderTargets {
ImageViewId depth_buffer_id{};
std::array<u8, NUM_RT> draw_buffers{};
Extent2D size{};
+ bool is_rescaled{};
};
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.cpp b/src/video_core/texture_cache/texture_cache.cpp
new file mode 100644
index 000000000..8a9a32f44
--- /dev/null
+++ b/src/video_core/texture_cache/texture_cache.cpp
@@ -0,0 +1,15 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "video_core/control/channel_state_cache.inc"
+#include "video_core/texture_cache/texture_cache_base.h"
+
+namespace VideoCommon {
+
+TextureCacheChannelInfo::TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept
+ : ChannelInfo(state), graphics_image_table{gpu_memory}, graphics_sampler_table{gpu_memory},
+ compute_image_table{gpu_memory}, compute_sampler_table{gpu_memory} {}
+
+template class VideoCommon::ChannelSetupCaches<VideoCommon::TextureCacheChannelInfo>;
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 1dbe01bc0..8ef75fe73 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -1,5 +1,5 @@
-// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
@@ -7,6 +7,7 @@
#include "common/alignment.h"
#include "common/settings.h"
+#include "video_core/control/channel_state.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/texture_cache/image_view_base.h"
@@ -29,12 +30,8 @@ using VideoCore::Surface::SurfaceType;
using namespace Common::Literals;
template <class P>
-TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_)
- : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
- kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_} {
+TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_)
+ : runtime{runtime_}, rasterizer{rasterizer_} {
// Configure null sampler
TSCEntry sampler_descriptor{};
sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear);
@@ -93,7 +90,7 @@ void TextureCache<P>::RunGarbageCollector() {
const auto copies = FullDownloadCopies(image.info);
image.DownloadMemory(map, copies);
runtime.Finish();
- SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
+ SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
}
if (True(image.flags & ImageFlagBits::Tracked)) {
UntrackImage(image, image_id);
@@ -152,22 +149,24 @@ void TextureCache<P>::MarkModification(ImageId id) noexcept {
template <class P>
template <bool has_blacklists>
void TextureCache<P>::FillGraphicsImageViews(std::span<ImageViewInOut> views) {
- FillImageViews<has_blacklists>(graphics_image_table, graphics_image_view_ids, views);
+ FillImageViews<has_blacklists>(channel_state->graphics_image_table,
+ channel_state->graphics_image_view_ids, views);
}
template <class P>
void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) {
- FillImageViews<true>(compute_image_table, compute_image_view_ids, views);
+ FillImageViews<true>(channel_state->compute_image_table, channel_state->compute_image_view_ids,
+ views);
}
template <class P>
typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) {
- if (index > graphics_sampler_table.Limit()) {
+ if (index > channel_state->graphics_sampler_table.Limit()) {
LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index);
return &slot_samplers[NULL_SAMPLER_ID];
}
- const auto [descriptor, is_new] = graphics_sampler_table.Read(index);
- SamplerId& id = graphics_sampler_ids[index];
+ const auto [descriptor, is_new] = channel_state->graphics_sampler_table.Read(index);
+ SamplerId& id = channel_state->graphics_sampler_ids[index];
if (is_new) {
id = FindSampler(descriptor);
}
@@ -176,12 +175,12 @@ typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) {
template <class P>
typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) {
- if (index > compute_sampler_table.Limit()) {
+ if (index > channel_state->compute_sampler_table.Limit()) {
LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index);
return &slot_samplers[NULL_SAMPLER_ID];
}
- const auto [descriptor, is_new] = compute_sampler_table.Read(index);
- SamplerId& id = compute_sampler_ids[index];
+ const auto [descriptor, is_new] = channel_state->compute_sampler_table.Read(index);
+ SamplerId& id = channel_state->compute_sampler_ids[index];
if (is_new) {
id = FindSampler(descriptor);
}
@@ -190,35 +189,38 @@ typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) {
template <class P>
void TextureCache<P>::SynchronizeGraphicsDescriptors() {
- using SamplerIndex = Tegra::Engines::Maxwell3D::Regs::SamplerIndex;
- const bool linked_tsc = maxwell3d.regs.sampler_index == SamplerIndex::ViaHeaderIndex;
- const u32 tic_limit = maxwell3d.regs.tic.limit;
- const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d.regs.tsc.limit;
- if (graphics_sampler_table.Synchornize(maxwell3d.regs.tsc.Address(), tsc_limit)) {
- graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
+ using SamplerBinding = Tegra::Engines::Maxwell3D::Regs::SamplerBinding;
+ const bool linked_tsc = maxwell3d->regs.sampler_binding == SamplerBinding::ViaHeaderBinding;
+ const u32 tic_limit = maxwell3d->regs.tex_header.limit;
+ const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tex_sampler.limit;
+ if (channel_state->graphics_sampler_table.Synchronize(maxwell3d->regs.tex_sampler.Address(),
+ tsc_limit)) {
+ channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
}
- if (graphics_image_table.Synchornize(maxwell3d.regs.tic.Address(), tic_limit)) {
- graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
+ if (channel_state->graphics_image_table.Synchronize(maxwell3d->regs.tex_header.Address(),
+ tic_limit)) {
+ channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
}
}
template <class P>
void TextureCache<P>::SynchronizeComputeDescriptors() {
- const bool linked_tsc = kepler_compute.launch_description.linked_tsc;
- const u32 tic_limit = kepler_compute.regs.tic.limit;
- const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute.regs.tsc.limit;
- const GPUVAddr tsc_gpu_addr = kepler_compute.regs.tsc.Address();
- if (compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) {
- compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
+ const bool linked_tsc = kepler_compute->launch_description.linked_tsc;
+ const u32 tic_limit = kepler_compute->regs.tic.limit;
+ const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit;
+ const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address();
+ if (channel_state->compute_sampler_table.Synchronize(tsc_gpu_addr, tsc_limit)) {
+ channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
}
- if (compute_image_table.Synchornize(kepler_compute.regs.tic.Address(), tic_limit)) {
- compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
+ if (channel_state->compute_image_table.Synchronize(kepler_compute->regs.tic.Address(),
+ tic_limit)) {
+ channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
}
}
template <class P>
bool TextureCache<P>::RescaleRenderTargets(bool is_clear) {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
u32 scale_rating = 0;
bool rescaled = false;
std::array<ImageId, NUM_RT> tmp_color_images{};
@@ -315,7 +317,7 @@ bool TextureCache<P>::RescaleRenderTargets(bool is_clear) {
template <class P>
void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
using namespace VideoCommon::Dirty;
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::RenderTargets]) {
for (size_t index = 0; index < NUM_RT; ++index) {
ImageViewId& color_buffer_id = render_targets.color_buffer_ids[index];
@@ -342,7 +344,7 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
PrepareImageView(depth_buffer_id, true, is_clear && IsFullClear(depth_buffer_id));
for (size_t index = 0; index < NUM_RT; ++index) {
- render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d.regs.rt_control.Map(index));
+ render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d->regs.rt_control.Map(index));
}
u32 up_scale = 1;
u32 down_shift = 0;
@@ -351,9 +353,10 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
down_shift = Settings::values.resolution_info.down_shift;
}
render_targets.size = Extent2D{
- (maxwell3d.regs.render_area.width * up_scale) >> down_shift,
- (maxwell3d.regs.render_area.height * up_scale) >> down_shift,
+ (maxwell3d->regs.surface_clip.width * up_scale) >> down_shift,
+ (maxwell3d->regs.surface_clip.height * up_scale) >> down_shift,
};
+ render_targets.is_rescaled = is_rescaling;
flags[Dirty::DepthBiasGlobal] = true;
}
@@ -439,7 +442,7 @@ void TextureCache<P>::WriteMemory(VAddr cpu_addr, size_t size) {
template <class P>
void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
std::vector<ImageId> images;
- ForEachImageInRegion(cpu_addr, size, [this, &images](ImageId image_id, ImageBase& image) {
+ ForEachImageInRegion(cpu_addr, size, [&images](ImageId image_id, ImageBase& image) {
if (!image.IsSafeDownload()) {
return;
}
@@ -458,7 +461,7 @@ void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
const auto copies = FullDownloadCopies(image.info);
image.DownloadMemory(map, copies);
runtime.Finish();
- SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
+ SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
}
}
@@ -477,12 +480,20 @@ void TextureCache<P>::UnmapMemory(VAddr cpu_addr, size_t size) {
}
template <class P>
-void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) {
+void TextureCache<P>::UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size) {
std::vector<ImageId> deleted_images;
- ForEachImageInRegionGPU(gpu_addr, size,
+ ForEachImageInRegionGPU(as_id, gpu_addr, size,
[&](ImageId id, Image&) { deleted_images.push_back(id); });
for (const ImageId id : deleted_images) {
Image& image = slot_images[id];
+ if (True(image.flags & ImageFlagBits::CpuModified)) {
+ return;
+ }
+ image.flags |= ImageFlagBits::CpuModified;
+ if (True(image.flags & ImageFlagBits::Tracked)) {
+ UntrackImage(image, id);
+ }
+ /*
if (True(image.flags & ImageFlagBits::Remapped)) {
continue;
}
@@ -490,6 +501,7 @@ void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) {
if (True(image.flags & ImageFlagBits::Tracked)) {
UntrackImage(image, id);
}
+ */
}
}
@@ -655,7 +667,7 @@ void TextureCache<P>::PopAsyncFlushes() {
for (const ImageId image_id : download_ids) {
const ImageBase& image = slot_images[image_id];
const auto copies = FullDownloadCopies(image.info);
- SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, download_span);
+ SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span);
download_map.offset += image.unswizzled_size_bytes;
download_span = download_span.subspan(image.unswizzled_size_bytes);
}
@@ -714,26 +726,26 @@ void TextureCache<P>::UploadImageContents(Image& image, StagingBuffer& staging)
const GPUVAddr gpu_addr = image.gpu_addr;
if (True(image.flags & ImageFlagBits::AcceleratedUpload)) {
- gpu_memory.ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes());
+ gpu_memory->ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes());
const auto uploads = FullUploadSwizzles(image.info);
runtime.AccelerateImageUpload(image, staging, uploads);
} else if (True(image.flags & ImageFlagBits::Converted)) {
std::vector<u8> unswizzled_data(image.unswizzled_size_bytes);
- auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, unswizzled_data);
+ auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, unswizzled_data);
ConvertImage(unswizzled_data, image.info, mapped_span, copies);
image.UploadMemory(staging, copies);
} else {
- const auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, mapped_span);
+ const auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, mapped_span);
image.UploadMemory(staging, copies);
}
}
template <class P>
ImageViewId TextureCache<P>::FindImageView(const TICEntry& config) {
- if (!IsValidEntry(gpu_memory, config)) {
+ if (!IsValidEntry(*gpu_memory, config)) {
return NULL_IMAGE_VIEW_ID;
}
- const auto [pair, is_new] = image_views.try_emplace(config);
+ const auto [pair, is_new] = channel_state->image_views.try_emplace(config);
ImageViewId& image_view_id = pair->second;
if (is_new) {
image_view_id = CreateImageView(config);
@@ -777,9 +789,9 @@ ImageId TextureCache<P>::FindOrInsertImage(const ImageInfo& info, GPUVAddr gpu_a
template <class P>
ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr,
RelaxedOptions options) {
- std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
if (!cpu_addr) {
- cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info));
+ cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info));
if (!cpu_addr) {
return ImageId{};
}
@@ -860,7 +872,7 @@ void TextureCache<P>::InvalidateScale(Image& image) {
image.scale_tick = frame_tick + 1;
}
const std::span<const ImageViewId> image_view_ids = image.image_view_ids;
- auto& dirty = maxwell3d.dirty.flags;
+ auto& dirty = maxwell3d->dirty.flags;
dirty[Dirty::RenderTargets] = true;
dirty[Dirty::ZetaBuffer] = true;
for (size_t rt = 0; rt < NUM_RT; ++rt) {
@@ -880,12 +892,15 @@ void TextureCache<P>::InvalidateScale(Image& image) {
}
image.image_view_ids.clear();
image.image_view_infos.clear();
- if constexpr (ENABLE_VALIDATION) {
- std::ranges::fill(graphics_image_view_ids, CORRUPT_ID);
- std::ranges::fill(compute_image_view_ids, CORRUPT_ID);
+ for (size_t c : active_channel_ids) {
+ auto& channel_info = channel_storage[c];
+ if constexpr (ENABLE_VALIDATION) {
+ std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID);
+ std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID);
+ }
+ channel_info.graphics_image_table.Invalidate();
+ channel_info.compute_image_table.Invalidate();
}
- graphics_image_table.Invalidate();
- compute_image_table.Invalidate();
has_deleted_images = true;
}
@@ -929,10 +944,10 @@ bool TextureCache<P>::ScaleDown(Image& image) {
template <class P>
ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr,
RelaxedOptions options) {
- std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
if (!cpu_addr) {
const auto size = CalculateGuestSizeInBytes(info);
- cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, size);
+ cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, size);
if (!cpu_addr) {
const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space;
virtual_invalid_space += Common::AlignUp(size, 32);
@@ -1050,7 +1065,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr);
Image& new_image = slot_images[new_image_id];
- if (!gpu_memory.IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
+ if (!gpu_memory->IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
new_image.flags |= ImageFlagBits::Sparse;
}
@@ -1192,7 +1207,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) {
if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) {
return NULL_SAMPLER_ID;
}
- const auto [pair, is_new] = samplers.try_emplace(config);
+ const auto [pair, is_new] = channel_state->samplers.try_emplace(config);
if (is_new) {
pair->second = slot_samplers.insert(runtime, config);
}
@@ -1201,7 +1216,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) {
template <class P>
ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) {
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (index >= regs.rt_control.count) {
return ImageViewId{};
}
@@ -1219,7 +1234,7 @@ ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) {
template <class P>
ImageViewId TextureCache<P>::FindDepthBuffer(bool is_clear) {
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (!regs.zeta_enable) {
return ImageViewId{};
}
@@ -1316,11 +1331,17 @@ void TextureCache<P>::ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& f
template <class P>
template <typename Func>
-void TextureCache<P>::ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func) {
+void TextureCache<P>::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size,
+ Func&& func) {
using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type;
static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>;
boost::container::small_vector<ImageId, 8> images;
- ForEachGPUPage(gpu_addr, size, [this, &images, gpu_addr, size, func](u64 page) {
+ auto storage_id = getStorageID(as_id);
+ if (!storage_id) {
+ return;
+ }
+ auto& gpu_page_table = gpu_page_table_storage[*storage_id];
+ ForEachGPUPage(gpu_addr, size, [this, gpu_page_table, &images, gpu_addr, size, func](u64 page) {
const auto it = gpu_page_table.find(page);
if (it == gpu_page_table.end()) {
if constexpr (BOOL_BREAK) {
@@ -1403,9 +1424,9 @@ template <typename Func>
void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) {
using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type;
static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>;
- const auto segments = gpu_memory.GetSubmappedRange(image.gpu_addr, image.guest_size_bytes);
+ const auto segments = gpu_memory->GetSubmappedRange(image.gpu_addr, image.guest_size_bytes);
for (const auto& [gpu_addr, size] : segments) {
- std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
ASSERT(cpu_addr);
if constexpr (RETURNS_BOOL) {
if (func(gpu_addr, *cpu_addr, size)) {
@@ -1448,8 +1469,9 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
}
image.lru_index = lru_cache.Insert(image_id, frame_tick);
- ForEachGPUPage(image.gpu_addr, image.guest_size_bytes,
- [this, image_id](u64 page) { gpu_page_table[page].push_back(image_id); });
+ ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) {
+ (*channel_state->gpu_page_table)[page].push_back(image_id);
+ });
if (False(image.flags & ImageFlagBits::Sparse)) {
auto map_id =
slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id);
@@ -1480,9 +1502,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
image.flags &= ~ImageFlagBits::BadOverlap;
lru_cache.Free(image.lru_index);
const auto& clear_page_table =
- [this, image_id](
- u64 page,
- std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) {
+ [image_id](u64 page,
+ std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>&
+ selected_page_table) {
const auto page_it = selected_page_table.find(page);
if (page_it == selected_page_table.end()) {
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
@@ -1497,8 +1519,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
}
image_ids.erase(vector_it);
};
- ForEachGPUPage(image.gpu_addr, image.guest_size_bytes,
- [this, &clear_page_table](u64 page) { clear_page_table(page, gpu_page_table); });
+ ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, &clear_page_table](u64 page) {
+ clear_page_table(page, (*channel_state->gpu_page_table));
+ });
if (False(image.flags & ImageFlagBits::Sparse)) {
const auto map_id = image.map_view_id;
ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
@@ -1631,7 +1654,7 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered");
// Mark render targets as dirty
- auto& dirty = maxwell3d.dirty.flags;
+ auto& dirty = maxwell3d->dirty.flags;
dirty[Dirty::RenderTargets] = true;
dirty[Dirty::ZetaBuffer] = true;
for (size_t rt = 0; rt < NUM_RT; ++rt) {
@@ -1681,24 +1704,30 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
if (alloc_images.empty()) {
image_allocs_table.erase(alloc_it);
}
- if constexpr (ENABLE_VALIDATION) {
- std::ranges::fill(graphics_image_view_ids, CORRUPT_ID);
- std::ranges::fill(compute_image_view_ids, CORRUPT_ID);
+ for (size_t c : active_channel_ids) {
+ auto& channel_info = channel_storage[c];
+ if constexpr (ENABLE_VALIDATION) {
+ std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID);
+ std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID);
+ }
+ channel_info.graphics_image_table.Invalidate();
+ channel_info.compute_image_table.Invalidate();
}
- graphics_image_table.Invalidate();
- compute_image_table.Invalidate();
has_deleted_images = true;
}
template <class P>
void TextureCache<P>::RemoveImageViewReferences(std::span<const ImageViewId> removed_views) {
- auto it = image_views.begin();
- while (it != image_views.end()) {
- const auto found = std::ranges::find(removed_views, it->second);
- if (found != removed_views.end()) {
- it = image_views.erase(it);
- } else {
- ++it;
+ for (size_t c : active_channel_ids) {
+ auto& channel_info = channel_storage[c];
+ auto it = channel_info.image_views.begin();
+ while (it != channel_info.image_views.end()) {
+ const auto found = std::ranges::find(removed_views, it->second);
+ if (found != removed_views.end()) {
+ it = channel_info.image_views.erase(it);
+ } else {
+ ++it;
+ }
}
}
}
@@ -1729,6 +1758,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
boost::container::small_vector<const AliasedImage*, 1> aliased_images;
Image& image = slot_images[image_id];
bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled);
+ bool any_modified = True(image.flags & ImageFlagBits::GpuModified);
u64 most_recent_tick = image.modification_tick;
for (const AliasedImage& aliased : image.aliased_images) {
ImageBase& aliased_image = slot_images[aliased.id];
@@ -1736,9 +1766,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick);
aliased_images.push_back(&aliased);
any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled);
- if (True(aliased_image.flags & ImageFlagBits::GpuModified)) {
- image.flags |= ImageFlagBits::GpuModified;
- }
+ any_modified |= True(aliased_image.flags & ImageFlagBits::GpuModified);
}
}
if (aliased_images.empty()) {
@@ -1753,6 +1781,9 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
}
}
image.modification_tick = most_recent_tick;
+ if (any_modified) {
+ image.flags |= ImageFlagBits::GpuModified;
+ }
std::ranges::sort(aliased_images, [this](const AliasedImage* lhs, const AliasedImage* rhs) {
const ImageBase& lhs_image = slot_images[lhs->id];
const ImageBase& rhs_image = slot_images[rhs->id];
@@ -1931,6 +1962,7 @@ std::pair<FramebufferId, ImageViewId> TextureCache<P>::RenderTargetFromImage(
.color_buffer_ids = {color_view_id},
.depth_buffer_id = depth_view_id,
.size = {extent.width >> samples_x, extent.height >> samples_y},
+ .is_rescaled = is_rescaled,
});
return {framebuffer_id, view_id};
}
@@ -1943,13 +1975,13 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) {
const ImageViewBase& image_view = slot_image_views[id];
const ImageBase& image = slot_images[image_view.image_id];
const Extent3D size = image_view.size;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
const auto& scissor = regs.scissor_test[0];
if (image.info.resources.levels > 1 || image.info.resources.layers > 1) {
// Images with multiple resources can't be cleared in a single call
return false;
}
- if (regs.clear_flags.scissor == 0) {
+ if (regs.clear_control.use_scissor == 0) {
// If scissor testing is disabled, the clear is always full
return true;
}
@@ -1958,4 +1990,19 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) {
scissor.max_y >= size.height;
}
+template <class P>
+void TextureCache<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) {
+ VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo>::CreateChannel(channel);
+ const auto it = channel_map.find(channel.bind_id);
+ auto* this_state = &channel_storage[it->second];
+ const auto& this_as_ref = address_spaces[channel.memory_manager->GetID()];
+ this_state->gpu_page_table = &gpu_page_table_storage[this_as_ref.storage_id];
+}
+
+/// Bind a channel for execution.
+template <class P>
+void TextureCache<P>::OnGPUASRegister([[maybe_unused]] size_t map_id) {
+ gpu_page_table_storage.emplace_back();
+}
+
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 7e6c6cef2..2fa8445eb 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -1,8 +1,10 @@
-// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
+#include <deque>
+#include <limits>
#include <mutex>
#include <span>
#include <type_traits>
@@ -11,9 +13,11 @@
#include <queue>
#include "common/common_types.h"
+#include "common/hash.h"
#include "common/literals.h"
#include "common/lru_cache.h"
#include "video_core/compatible_formats.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/delayed_destruction_ring.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/surface.h"
@@ -26,6 +30,10 @@
#include "video_core/texture_cache/types.h"
#include "video_core/textures/texture.h"
+namespace Tegra::Control {
+struct ChannelState;
+}
+
namespace VideoCommon {
using Tegra::Texture::SwizzleSource;
@@ -44,8 +52,35 @@ struct ImageViewInOut {
ImageViewId id{};
};
+using TextureCacheGPUMap = std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>;
+
+class TextureCacheChannelInfo : public ChannelInfo {
+public:
+ TextureCacheChannelInfo() = delete;
+ TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept;
+ TextureCacheChannelInfo(const TextureCacheChannelInfo& state) = delete;
+ TextureCacheChannelInfo& operator=(const TextureCacheChannelInfo&) = delete;
+ TextureCacheChannelInfo(TextureCacheChannelInfo&& other) noexcept = default;
+ TextureCacheChannelInfo& operator=(TextureCacheChannelInfo&& other) noexcept = default;
+
+ DescriptorTable<TICEntry> graphics_image_table{gpu_memory};
+ DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory};
+ std::vector<SamplerId> graphics_sampler_ids;
+ std::vector<ImageViewId> graphics_image_view_ids;
+
+ DescriptorTable<TICEntry> compute_image_table{gpu_memory};
+ DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory};
+ std::vector<SamplerId> compute_sampler_ids;
+ std::vector<ImageViewId> compute_image_view_ids;
+
+ std::unordered_map<TICEntry, ImageViewId> image_views;
+ std::unordered_map<TSCEntry, SamplerId> samplers;
+
+ TextureCacheGPUMap* gpu_page_table;
+};
+
template <class P>
-class TextureCache {
+class TextureCache : public VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo> {
/// Address shift for caching images into a hash table
static constexpr u64 YUZU_PAGEBITS = 20;
@@ -58,6 +93,8 @@ class TextureCache {
/// True when the API can provide info about the memory of the device.
static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO;
+ static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
+
static constexpr s64 TARGET_THRESHOLD = 4_GiB;
static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB;
static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB;
@@ -77,16 +114,8 @@ class TextureCache {
PixelFormat src_format;
};
- template <typename T>
- struct IdentityHash {
- [[nodiscard]] size_t operator()(T value) const noexcept {
- return static_cast<size_t>(value);
- }
- };
-
public:
- explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&, Tegra::Engines::Maxwell3D&,
- Tegra::Engines::KeplerCompute&, Tegra::MemoryManager&);
+ explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&);
/// Notify the cache that a new frame has been queued
void TickFrame();
@@ -142,7 +171,7 @@ public:
void UnmapMemory(VAddr cpu_addr, size_t size);
/// Remove images in a region
- void UnmapGPUMemory(GPUVAddr gpu_addr, size_t size);
+ void UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size);
/// Blit an image with the given parameters
void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
@@ -171,6 +200,9 @@ public:
[[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept;
+ /// Create channel state.
+ void CreateChannel(Tegra::Control::ChannelState& channel) final override;
+
std::mutex mutex;
private:
@@ -205,6 +237,8 @@ private:
}
}
+ void OnGPUASRegister(size_t map_id) final override;
+
/// Runs the Garbage Collector.
void RunGarbageCollector();
@@ -273,7 +307,7 @@ private:
void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func);
template <typename Func>
- void ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func);
+ void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func);
template <typename Func>
void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func);
@@ -338,31 +372,16 @@ private:
u64 GetScaledImageSizeBytes(ImageBase& image);
Runtime& runtime;
- VideoCore::RasterizerInterface& rasterizer;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
- Tegra::MemoryManager& gpu_memory;
- DescriptorTable<TICEntry> graphics_image_table{gpu_memory};
- DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory};
- std::vector<SamplerId> graphics_sampler_ids;
- std::vector<ImageViewId> graphics_image_view_ids;
-
- DescriptorTable<TICEntry> compute_image_table{gpu_memory};
- DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory};
- std::vector<SamplerId> compute_sampler_ids;
- std::vector<ImageViewId> compute_image_view_ids;
+ VideoCore::RasterizerInterface& rasterizer;
+ std::deque<TextureCacheGPUMap> gpu_page_table_storage;
RenderTargets render_targets;
- std::unordered_map<TICEntry, ImageViewId> image_views;
- std::unordered_map<TSCEntry, SamplerId> samplers;
std::unordered_map<RenderTargets, FramebufferId> framebuffers;
- std::unordered_map<u64, std::vector<ImageMapId>, IdentityHash<u64>> page_table;
- std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> gpu_page_table;
- std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> sparse_page_table;
-
+ std::unordered_map<u64, std::vector<ImageMapId>, Common::IdentityHash<u64>> page_table;
+ std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>> sparse_page_table;
std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views;
VAddr virtual_invalid_space{};
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index 1820823b2..1223df5a0 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -517,7 +517,6 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr
const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block;
UNIMPLEMENTED_IF(info.tile_width_spacing > 0);
-
UNIMPLEMENTED_IF(copy.image_offset.x != 0);
UNIMPLEMENTED_IF(copy.image_offset.y != 0);
UNIMPLEMENTED_IF(copy.image_offset.z != 0);
@@ -755,7 +754,7 @@ bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config
if (address == 0) {
return false;
}
- if (address > (1ULL << 48)) {
+ if (address >= (1ULL << 40)) {
return false;
}
if (gpu_memory.GpuToCpuAddress(address).has_value()) {
diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp
index b159494c5..69a32819a 100644
--- a/src/video_core/textures/astc.cpp
+++ b/src/video_core/textures/astc.cpp
@@ -1413,7 +1413,7 @@ static void FillVoidExtentLDR(InputBitStream& strm, std::span<u32> outBuf, u32 b
static void FillError(std::span<u32> outBuf, u32 blockWidth, u32 blockHeight) {
for (u32 j = 0; j < blockHeight; j++) {
for (u32 i = 0; i < blockWidth; i++) {
- outBuf[j * blockWidth + i] = 0xFFFF00FF;
+ outBuf[j * blockWidth + i] = 0x00000000;
}
}
}
@@ -1656,13 +1656,13 @@ void Decompress(std::span<const uint8_t> data, uint32_t width, uint32_t height,
const u32 cols = Common::DivideUp(width, block_width);
Common::ThreadWorker workers{std::max(std::thread::hardware_concurrency(), 2U) / 2,
- "yuzu:ASTCDecompress"};
+ "ASTCDecompress"};
for (u32 z = 0; z < depth; ++z) {
const u32 depth_offset = z * height * width * 4;
for (u32 y_index = 0; y_index < rows; ++y_index) {
- auto decompress_stride = [data, width, height, depth, block_width, block_height, output,
- rows, cols, z, depth_offset, y_index] {
+ auto decompress_stride = [data, width, height, block_width, block_height, output, rows,
+ cols, z, depth_offset, y_index] {
const u32 y = y_index * block_height;
for (u32 x_index = 0; x_index < cols; ++x_index) {
const u32 block_index = (z * rows * cols) + (y_index * cols) + x_index;
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 913f8ebcb..fd1a4b987 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -21,7 +21,7 @@ constexpr u32 pdep(u32 value) {
u32 m = mask;
for (u32 bit = 1; m; bit += bit) {
if (value & bit)
- result |= m & -m;
+ result |= m & (~m + 1);
m &= m - 1;
}
return result;
@@ -35,7 +35,7 @@ void incrpdep(u32& value) {
template <bool TO_LINEAR, u32 BYTES_PER_PIXEL>
void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, u32 depth,
- u32 block_height, u32 block_depth, u32 stride_alignment) {
+ u32 block_height, u32 block_depth, u32 stride) {
// The origin of the transformation can be configured here, leave it as zero as the current API
// doesn't expose it.
static constexpr u32 origin_x = 0;
@@ -45,7 +45,6 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32
// We can configure here a custom pitch
// As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch.
const u32 pitch = width * BYTES_PER_PIXEL;
- const u32 stride = Common::AlignUpLog2(width, stride_alignment) * BYTES_PER_PIXEL;
const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT);
const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
@@ -89,6 +88,69 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32
}
}
+template <bool TO_LINEAR, u32 BYTES_PER_PIXEL>
+void SwizzleSubrectImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height,
+ u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 num_lines,
+ u32 block_height, u32 block_depth, u32 pitch_linear) {
+ // The origin of the transformation can be configured here, leave it as zero as the current API
+ // doesn't expose it.
+ static constexpr u32 origin_z = 0;
+
+ // We can configure here a custom pitch
+ // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch.
+ const u32 pitch = pitch_linear;
+ const u32 stride = Common::AlignUpLog2(width * BYTES_PER_PIXEL, GOB_SIZE_X_SHIFT);
+
+ const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT);
+ const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
+ const u32 slice_size =
+ Common::DivCeilLog2(height, block_height + GOB_SIZE_Y_SHIFT) * block_size;
+
+ const u32 block_height_mask = (1U << block_height) - 1;
+ const u32 block_depth_mask = (1U << block_depth) - 1;
+ const u32 x_shift = GOB_SIZE_SHIFT + block_height + block_depth;
+
+ u32 unprocessed_lines = num_lines;
+ u32 extent_y = std::min(num_lines, height - origin_y);
+
+ for (u32 slice = 0; slice < depth; ++slice) {
+ const u32 z = slice + origin_z;
+ const u32 offset_z = (z >> block_depth) * slice_size +
+ ((z & block_depth_mask) << (GOB_SIZE_SHIFT + block_height));
+ const u32 lines_in_y = std::min(unprocessed_lines, extent_y);
+ for (u32 line = 0; line < lines_in_y; ++line) {
+ const u32 y = line + origin_y;
+ const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(y);
+
+ const u32 block_y = y >> GOB_SIZE_Y_SHIFT;
+ const u32 offset_y = (block_y >> block_height) * block_size +
+ ((block_y & block_height_mask) << GOB_SIZE_SHIFT);
+
+ u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL);
+ for (u32 column = 0; column < extent_x;
+ ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
+ const u32 x = (column + origin_x) * BYTES_PER_PIXEL;
+ const u32 offset_x = (x >> GOB_SIZE_X_SHIFT) << x_shift;
+
+ const u32 base_swizzled_offset = offset_z + offset_y + offset_x;
+ const u32 swizzled_offset = base_swizzled_offset + (swizzled_x | swizzled_y);
+
+ const u32 unswizzled_offset =
+ slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL;
+
+ u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset];
+ const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset];
+
+ std::memcpy(dst, src, BYTES_PER_PIXEL);
+ }
+ }
+ unprocessed_lines -= lines_in_y;
+ if (unprocessed_lines == 0) {
+ return;
+ }
+ }
+}
+
template <bool TO_LINEAR>
void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) {
@@ -111,122 +173,39 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe
}
}
-template <u32 BYTES_PER_PIXEL>
-void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
- u8* swizzled_data, const u8* unswizzled_data, u32 block_height_bit,
- u32 offset_x, u32 offset_y) {
- const u32 block_height = 1U << block_height_bit;
- const u32 image_width_in_gobs =
- (swizzled_width * BYTES_PER_PIXEL + (GOB_SIZE_X - 1)) / GOB_SIZE_X;
- for (u32 line = 0; line < subrect_height; ++line) {
- const u32 dst_y = line + offset_y;
- const u32 gob_address_y =
- (dst_y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs +
- ((dst_y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE;
-
- const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(dst_y);
- u32 swizzled_x = pdep<SWIZZLE_X_BITS>(offset_x * BYTES_PER_PIXEL);
- for (u32 x = 0; x < subrect_width;
- ++x, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
- const u32 dst_x = x + offset_x;
- const u32 gob_address =
- gob_address_y + (dst_x * BYTES_PER_PIXEL / GOB_SIZE_X) * GOB_SIZE * block_height;
- const u32 swizzled_offset = gob_address + (swizzled_x | swizzled_y);
- const u32 unswizzled_offset = line * source_pitch + x * BYTES_PER_PIXEL;
-
- const u8* const source_line = unswizzled_data + unswizzled_offset;
- u8* const dest_addr = swizzled_data + swizzled_offset;
- std::memcpy(dest_addr, source_line, BYTES_PER_PIXEL);
- }
- }
-}
-
-template <u32 BYTES_PER_PIXEL>
-void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 block_height,
- u32 origin_x, u32 origin_y, u8* output, const u8* input) {
- const u32 stride = width * BYTES_PER_PIXEL;
- const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X;
- const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height);
-
- const u32 block_height_mask = (1U << block_height) - 1;
- const u32 x_shift = GOB_SIZE_SHIFT + block_height;
-
- for (u32 line = 0; line < line_count; ++line) {
- const u32 src_y = line + origin_y;
- const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(src_y);
-
- const u32 block_y = src_y >> GOB_SIZE_Y_SHIFT;
- const u32 src_offset_y = (block_y >> block_height) * block_size +
- ((block_y & block_height_mask) << GOB_SIZE_SHIFT);
-
- u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL);
- for (u32 column = 0; column < line_length_in;
- ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
- const u32 src_x = (column + origin_x) * BYTES_PER_PIXEL;
- const u32 src_offset_x = (src_x >> GOB_SIZE_X_SHIFT) << x_shift;
-
- const u32 swizzled_offset = src_offset_y + src_offset_x + (swizzled_x | swizzled_y);
- const u32 unswizzled_offset = line * pitch + column * BYTES_PER_PIXEL;
-
- std::memcpy(output + unswizzled_offset, input + swizzled_offset, BYTES_PER_PIXEL);
- }
- }
-}
-
-template <u32 BYTES_PER_PIXEL>
-void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
- u32 block_height, u32 block_depth, u32 origin_x, u32 origin_y, u8* output,
- const u8* input) {
- UNIMPLEMENTED_IF(origin_x > 0);
- UNIMPLEMENTED_IF(origin_y > 0);
-
- const u32 stride = width * BYTES_PER_PIXEL;
- const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X;
- const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
-
- const u32 block_height_mask = (1U << block_height) - 1;
- const u32 x_shift = static_cast<u32>(GOB_SIZE_SHIFT) + block_height + block_depth;
-
- for (u32 line = 0; line < line_count; ++line) {
- const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(line);
- const u32 block_y = line / GOB_SIZE_Y;
- const u32 dst_offset_y =
- (block_y >> block_height) * block_size + (block_y & block_height_mask) * GOB_SIZE;
-
- u32 swizzled_x = 0;
- for (u32 x = 0; x < line_length_in; ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) {
- const u32 dst_offset =
- ((x / GOB_SIZE_X) << x_shift) + dst_offset_y + (swizzled_x | swizzled_y);
- const u32 src_offset = x * BYTES_PER_PIXEL + line * pitch;
- std::memcpy(output + dst_offset, input + src_offset, BYTES_PER_PIXEL);
- }
- }
-}
} // Anonymous namespace
void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth,
u32 stride_alignment) {
+ const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
+ const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
+ width = (width * bytes_per_pixel) >> new_bpp;
+ bytes_per_pixel = 1U << new_bpp;
Swizzle<false>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth,
- stride_alignment);
+ stride);
}
void SwizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
u32 height, u32 depth, u32 block_height, u32 block_depth,
u32 stride_alignment) {
+ const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
+ const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
+ width = (width * bytes_per_pixel) >> new_bpp;
+ bytes_per_pixel = 1U << new_bpp;
Swizzle<true>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth,
- stride_alignment);
+ stride);
}
-void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
- u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data,
- u32 block_height_bit, u32 offset_x, u32 offset_y) {
+void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
+ u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y,
+ u32 block_height, u32 block_depth, u32 pitch_linear) {
switch (bytes_per_pixel) {
#define BPP_CASE(x) \
case x: \
- return SwizzleSubrect<x>(subrect_width, subrect_height, source_pitch, swizzled_width, \
- swizzled_data, unswizzled_data, block_height_bit, offset_x, \
- offset_y);
+ return SwizzleSubrectImpl<true, x>(output, input, width, height, depth, origin_x, \
+ origin_y, extent_x, extent_y, block_height, \
+ block_depth, pitch_linear);
BPP_CASE(1)
BPP_CASE(2)
BPP_CASE(3)
@@ -241,13 +220,15 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32
}
}
-void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel,
- u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input) {
+void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
+ u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x,
+ u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear) {
switch (bytes_per_pixel) {
#define BPP_CASE(x) \
case x: \
- return UnswizzleSubrect<x>(line_length_in, line_count, pitch, width, block_height, \
- origin_x, origin_y, output, input);
+ return SwizzleSubrectImpl<false, x>(output, input, width, height, depth, origin_x, \
+ origin_y, extent_x, extent_y, block_height, \
+ block_depth, pitch_linear);
BPP_CASE(1)
BPP_CASE(2)
BPP_CASE(3)
@@ -262,55 +243,6 @@ void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width,
}
}
-void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
- u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x,
- u32 origin_y, u8* output, const u8* input) {
- switch (bytes_per_pixel) {
-#define BPP_CASE(x) \
- case x: \
- return SwizzleSliceToVoxel<x>(line_length_in, line_count, pitch, width, height, \
- block_height, block_depth, origin_x, origin_y, output, \
- input);
- BPP_CASE(1)
- BPP_CASE(2)
- BPP_CASE(3)
- BPP_CASE(4)
- BPP_CASE(6)
- BPP_CASE(8)
- BPP_CASE(12)
- BPP_CASE(16)
-#undef BPP_CASE
- default:
- ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
- }
-}
-
-void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y,
- const u32 block_height_bit, const std::size_t copy_size, const u8* source_data,
- u8* swizzle_data) {
- const u32 block_height = 1U << block_height_bit;
- const u32 image_width_in_gobs{(width + GOB_SIZE_X - 1) / GOB_SIZE_X};
- std::size_t count = 0;
- for (std::size_t y = dst_y; y < height && count < copy_size; ++y) {
- const std::size_t gob_address_y =
- (y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs +
- ((y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE;
- const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(static_cast<u32>(y));
- u32 swizzled_x = pdep<SWIZZLE_X_BITS>(dst_x);
- for (std::size_t x = dst_x; x < width && count < copy_size;
- ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) {
- const std::size_t gob_address =
- gob_address_y + (x / GOB_SIZE_X) * GOB_SIZE * block_height;
- const std::size_t swizzled_offset = gob_address + (swizzled_x | swizzled_y);
- const u8* source_line = source_data + count;
- u8* dest_addr = swizzle_data + swizzled_offset;
- count++;
-
- *dest_addr = *source_line;
- }
- }
-}
-
std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
u32 block_height, u32 block_depth) {
if (tiled) {
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index 31a11708f..e70407692 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -40,7 +40,6 @@ constexpr SwizzleTable MakeSwizzleTable() {
}
return table;
}
-constexpr SwizzleTable SWIZZLE_TABLE = MakeSwizzleTable();
/// Unswizzles a block linear texture into linear memory.
void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
@@ -57,34 +56,14 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height
u32 block_height, u32 block_depth);
/// Copies an untiled subrectangle into a tiled surface.
-void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
- u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data,
- u32 block_height_bit, u32 offset_x, u32 offset_y);
+void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
+ u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y,
+ u32 block_height, u32 block_depth, u32 pitch_linear);
/// Copies a tiled subrectangle into a linear surface.
-void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel,
- u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input);
-
-/// @brief Swizzles a 2D array of pixels into a 3D texture
-/// @param line_length_in Number of pixels per line
-/// @param line_count Number of lines
-/// @param pitch Number of bytes per line
-/// @param width Width of the swizzled texture
-/// @param height Height of the swizzled texture
-/// @param bytes_per_pixel Number of bytes used per pixel
-/// @param block_height Block height shift
-/// @param block_depth Block depth shift
-/// @param origin_x Column offset in pixels of the swizzled texture
-/// @param origin_y Row offset in pixels of the swizzled texture
-/// @param output Pointer to the pixels of the swizzled texture
-/// @param input Pointer to the 2D array of pixels used as input
-/// @pre input and output points to an array large enough to hold the number of bytes used
-void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
- u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x,
- u32 origin_y, u8* output, const u8* input);
-
-void SwizzleKepler(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
- std::size_t copy_size, const u8* source_data, u8* swizzle_data);
+void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
+ u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x,
+ u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear);
/// Obtains the offset of the gob for positions 'dst_x' & 'dst_y'
u64 GetGOBOffset(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
diff --git a/src/video_core/transform_feedback.cpp b/src/video_core/transform_feedback.cpp
index 7e605981c..45071185a 100644
--- a/src/video_core/transform_feedback.cpp
+++ b/src/video_core/transform_feedback.cpp
@@ -15,51 +15,51 @@ namespace VideoCommon {
std::vector<Shader::TransformFeedbackVarying> MakeTransformFeedbackVaryings(
const TransformFeedbackState& state) {
static constexpr std::array VECTORS{
- 28, // gl_Position
- 32, // Generic 0
- 36, // Generic 1
- 40, // Generic 2
- 44, // Generic 3
- 48, // Generic 4
- 52, // Generic 5
- 56, // Generic 6
- 60, // Generic 7
- 64, // Generic 8
- 68, // Generic 9
- 72, // Generic 10
- 76, // Generic 11
- 80, // Generic 12
- 84, // Generic 13
- 88, // Generic 14
- 92, // Generic 15
- 96, // Generic 16
- 100, // Generic 17
- 104, // Generic 18
- 108, // Generic 19
- 112, // Generic 20
- 116, // Generic 21
- 120, // Generic 22
- 124, // Generic 23
- 128, // Generic 24
- 132, // Generic 25
- 136, // Generic 26
- 140, // Generic 27
- 144, // Generic 28
- 148, // Generic 29
- 152, // Generic 30
- 156, // Generic 31
- 160, // gl_FrontColor
- 164, // gl_FrontSecondaryColor
- 160, // gl_BackColor
- 164, // gl_BackSecondaryColor
- 192, // gl_TexCoord[0]
- 196, // gl_TexCoord[1]
- 200, // gl_TexCoord[2]
- 204, // gl_TexCoord[3]
- 208, // gl_TexCoord[4]
- 212, // gl_TexCoord[5]
- 216, // gl_TexCoord[6]
- 220, // gl_TexCoord[7]
+ 28U, // gl_Position
+ 32U, // Generic 0
+ 36U, // Generic 1
+ 40U, // Generic 2
+ 44U, // Generic 3
+ 48U, // Generic 4
+ 52U, // Generic 5
+ 56U, // Generic 6
+ 60U, // Generic 7
+ 64U, // Generic 8
+ 68U, // Generic 9
+ 72U, // Generic 10
+ 76U, // Generic 11
+ 80U, // Generic 12
+ 84U, // Generic 13
+ 88U, // Generic 14
+ 92U, // Generic 15
+ 96U, // Generic 16
+ 100U, // Generic 17
+ 104U, // Generic 18
+ 108U, // Generic 19
+ 112U, // Generic 20
+ 116U, // Generic 21
+ 120U, // Generic 22
+ 124U, // Generic 23
+ 128U, // Generic 24
+ 132U, // Generic 25
+ 136U, // Generic 26
+ 140U, // Generic 27
+ 144U, // Generic 28
+ 148U, // Generic 29
+ 152U, // Generic 30
+ 156U, // Generic 31
+ 160U, // gl_FrontColor
+ 164U, // gl_FrontSecondaryColor
+ 160U, // gl_BackColor
+ 164U, // gl_BackSecondaryColor
+ 192U, // gl_TexCoord[0]
+ 196U, // gl_TexCoord[1]
+ 200U, // gl_TexCoord[2]
+ 204U, // gl_TexCoord[3]
+ 208U, // gl_TexCoord[4]
+ 212U, // gl_TexCoord[5]
+ 216U, // gl_TexCoord[6]
+ 220U, // gl_TexCoord[7]
};
std::vector<Shader::TransformFeedbackVarying> xfb(256);
for (size_t buffer = 0; buffer < state.layouts.size(); ++buffer) {
@@ -68,8 +68,20 @@ std::vector<Shader::TransformFeedbackVarying> MakeTransformFeedbackVaryings(
const u32 varying_count = layout.varying_count;
u32 highest = 0;
for (u32 offset = 0; offset < varying_count; ++offset) {
- const u32 base_offset = offset;
- const u8 location = locations[offset];
+ const auto get_attribute = [&locations](u32 index) -> u32 {
+ switch (index % 4) {
+ case 0:
+ return locations[index / 4].attribute0.Value();
+ case 1:
+ return locations[index / 4].attribute1.Value();
+ case 2:
+ return locations[index / 4].attribute2.Value();
+ case 3:
+ return locations[index / 4].attribute3.Value();
+ }
+ UNREACHABLE();
+ return 0;
+ };
UNIMPLEMENTED_IF_MSG(layout.stream != 0, "Stream is not zero: {}", layout.stream);
Shader::TransformFeedbackVarying varying{
@@ -78,16 +90,18 @@ std::vector<Shader::TransformFeedbackVarying> MakeTransformFeedbackVaryings(
.offset = offset * 4,
.components = 1,
};
- if (std::ranges::find(VECTORS, Common::AlignDown(location, 4)) != VECTORS.end()) {
- UNIMPLEMENTED_IF_MSG(location % 4 != 0, "Unaligned TFB");
+ const u32 base_offset = offset;
+ const auto attribute{get_attribute(offset)};
+ if (std::ranges::find(VECTORS, Common::AlignDown(attribute, 4)) != VECTORS.end()) {
+ UNIMPLEMENTED_IF_MSG(attribute % 4 != 0, "Unaligned TFB {}", attribute);
- const u8 base_index = location / 4;
- while (offset + 1 < varying_count && base_index == locations[offset + 1] / 4) {
+ const auto base_index = attribute / 4;
+ while (offset + 1 < varying_count && base_index == get_attribute(offset + 1) / 4) {
++offset;
++varying.components;
}
}
- xfb[location] = varying;
+ xfb[attribute] = varying;
highest = std::max(highest, (base_offset + varying.components) * 4);
}
UNIMPLEMENTED_IF(highest != layout.stride);
diff --git a/src/video_core/transform_feedback.h b/src/video_core/transform_feedback.h
index a519adb59..d13eb16c3 100644
--- a/src/video_core/transform_feedback.h
+++ b/src/video_core/transform_feedback.h
@@ -19,7 +19,8 @@ struct TransformFeedbackState {
u32 stride;
};
std::array<Layout, Tegra::Engines::Maxwell3D::Regs::NumTransformFeedbackBuffers> layouts;
- std::array<std::array<u8, 128>, Tegra::Engines::Maxwell3D::Regs::NumTransformFeedbackBuffers>
+ std::array<std::array<Tegra::Engines::Maxwell3D::Regs::StreamOutLayout, 32>,
+ Tegra::Engines::Maxwell3D::Regs::NumTransformFeedbackBuffers>
varyings;
};
diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h
index 795f16bfb..1b3f493bd 100644
--- a/src/video_core/vulkan_common/vulkan_wrapper.h
+++ b/src/video_core/vulkan_common/vulkan_wrapper.h
@@ -519,9 +519,7 @@ public:
dld{rhs.dld} {}
/// Assign an allocation transfering ownership from another allocation.
- /// Releases any previously held allocation.
PoolAllocations& operator=(PoolAllocations&& rhs) noexcept {
- Release();
allocations = std::move(rhs.allocations);
num = rhs.num;
device = rhs.device;
@@ -530,11 +528,6 @@ public:
return *this;
}
- /// Destroys any held allocation.
- ~PoolAllocations() {
- Release();
- }
-
/// Returns the number of allocations.
std::size_t size() const noexcept {
return num;
@@ -557,19 +550,6 @@ public:
}
private:
- /// Destroys the held allocations if they exist.
- void Release() noexcept {
- if (!allocations) {
- return;
- }
- const Span<AllocationType> span(allocations.get(), num);
- const VkResult result = Free(device, pool, span, *dld);
- // There's no way to report errors from a destructor.
- if (result != VK_SUCCESS) {
- std::terminate();
- }
- }
-
std::unique_ptr<AllocationType[]> allocations;
std::size_t num = 0;
VkDevice device = nullptr;