diff options
author | ameerj <52414509+ameerj@users.noreply.github.com> | 2021-02-13 22:49:24 +0100 |
---|---|---|
committer | ameerj <52414509+ameerj@users.noreply.github.com> | 2021-03-13 18:16:03 +0100 |
commit | 20eb368e147e1c27f05d6923c51596f8dfe24e89 (patch) | |
tree | a8b1c8eb79eb55e189a10dfd43b8b6bb1449220f /src | |
parent | host_shaders: Modify shader cmake integration to allow for larger shaders (diff) | |
download | yuzu-20eb368e147e1c27f05d6923c51596f8dfe24e89.tar yuzu-20eb368e147e1c27f05d6923c51596f8dfe24e89.tar.gz yuzu-20eb368e147e1c27f05d6923c51596f8dfe24e89.tar.bz2 yuzu-20eb368e147e1c27f05d6923c51596f8dfe24e89.tar.lz yuzu-20eb368e147e1c27f05d6923c51596f8dfe24e89.tar.xz yuzu-20eb368e147e1c27f05d6923c51596f8dfe24e89.tar.zst yuzu-20eb368e147e1c27f05d6923c51596f8dfe24e89.zip |
Diffstat (limited to 'src')
-rw-r--r-- | src/video_core/host_shaders/astc_decoder.comp | 43 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/maxwell_to_vk.cpp | 2 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_compute_pass.cpp | 298 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_compute_pass.h | 32 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.cpp | 5 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.h | 1 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_texture_cache.cpp | 45 | ||||
-rw-r--r-- | src/video_core/renderer_vulkan/vk_texture_cache.h | 12 | ||||
-rw-r--r-- | src/video_core/texture_cache/accelerated_swizzle.h | 4 | ||||
-rw-r--r-- | src/video_core/textures/decoders.cpp | 23 | ||||
-rw-r--r-- | src/video_core/textures/decoders.h | 18 |
11 files changed, 426 insertions, 57 deletions
diff --git a/src/video_core/host_shaders/astc_decoder.comp b/src/video_core/host_shaders/astc_decoder.comp index 070190a5c..2ddac2e1d 100644 --- a/src/video_core/host_shaders/astc_decoder.comp +++ b/src/video_core/host_shaders/astc_decoder.comp @@ -16,7 +16,7 @@ #define BINDING_7_TO_8_BUFFER 4 #define BINDING_8_TO_8_BUFFER 5 #define BINDING_BYTE_TO_16_BUFFER 6 -#define BINDING_OUTPUT_IMAGE 3 +#define BINDING_OUTPUT_IMAGE 7 #else // ^^^ Vulkan ^^^ // vvv OpenGL vvv @@ -85,7 +85,26 @@ layout(binding = BINDING_SWIZZLE_BUFFER, std430) readonly buffer SwizzleTable { layout(binding = BINDING_INPUT_BUFFER, std430) buffer InputBufferU32 { uint astc_data[]; }; -layout(binding = BINDING_OUTPUT_IMAGE) uniform writeonly image2D dest_image; + +// ASTC Encodings data +layout(binding = BINDING_ENC_BUFFER, std430) readonly buffer EncodingsValues { + EncodingData encoding_values[]; +}; +// ASTC Precompiled tables +layout(binding = BINDING_6_TO_8_BUFFER, std430) readonly buffer REPLICATE_6_BIT_TO_8 { + uint REPLICATE_6_BIT_TO_8_TABLE[]; +}; +layout(binding = BINDING_7_TO_8_BUFFER, std430) readonly buffer REPLICATE_7_BIT_TO_8 { + uint REPLICATE_7_BIT_TO_8_TABLE[]; +}; +layout(binding = BINDING_8_TO_8_BUFFER, std430) readonly buffer REPLICATE_8_BIT_TO_8 { + uint REPLICATE_8_BIT_TO_8_TABLE[]; +}; +layout(binding = BINDING_BYTE_TO_16_BUFFER, std430) readonly buffer REPLICATE_BYTE_TO_16 { + uint REPLICATE_BYTE_TO_16_TABLE[]; +}; + +layout(binding = BINDING_OUTPUT_IMAGE, rgba8) uniform writeonly image2D dest_image; const uint GOB_SIZE_X = 64; const uint GOB_SIZE_Y = 8; @@ -109,23 +128,6 @@ uint ReadTexel(uint offset) { return bitfieldExtract(astc_data[offset / 4], int((offset * 8) & 24), 8); } -// ASTC Encodings data -layout(binding = BINDING_ENC_BUFFER, std430) readonly buffer EncodingsValues { - EncodingData encoding_values[256]; -}; -// ASTC Precompiled tables -layout(binding = BINDING_6_TO_8_BUFFER, std430) readonly buffer REPLICATE_6_BIT_TO_8 { - uint REPLICATE_6_BIT_TO_8_TABLE[]; -}; -layout(binding = BINDING_7_TO_8_BUFFER, std430) readonly buffer REPLICATE_7_BIT_TO_8 { - uint REPLICATE_7_BIT_TO_8_TABLE[]; -}; -layout(binding = BINDING_8_TO_8_BUFFER, std430) readonly buffer REPLICATE_8_BIT_TO_8 { - uint REPLICATE_8_BIT_TO_8_TABLE[]; -}; -layout(binding = BINDING_BYTE_TO_16_BUFFER, std430) readonly buffer REPLICATE_BYTE_TO_16 { - uint REPLICATE_BYTE_TO_16_TABLE[]; -}; const int BLOCK_SIZE_IN_BYTES = 16; @@ -1275,8 +1277,7 @@ void main() { offset += (pos.x >> GOB_SIZE_X_SHIFT) << x_shift; offset += swizzle; - const ivec3 invocation_destination = ivec3(gl_GlobalInvocationID + destination); - const ivec3 coord = ivec3(invocation_destination * uvec3(block_dims, 1.0)); + const ivec3 coord = ivec3(gl_GlobalInvocationID * uvec3(block_dims, 1.0)); uint block_index = layer * num_image_blocks.x * num_image_blocks.y + pos.y * num_image_blocks.x + pos.x; current_index = 0; diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index 19aaf034f..f088447e9 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -166,7 +166,7 @@ struct FormatTuple { {VK_FORMAT_R16G16_SINT, Attachable | Storage}, // R16G16_SINT {VK_FORMAT_R16G16_SNORM, Attachable | Storage}, // R16G16_SNORM {VK_FORMAT_UNDEFINED}, // R32G32B32_FLOAT - {VK_FORMAT_R8G8B8A8_SRGB, Attachable}, // A8B8G8R8_SRGB + {VK_FORMAT_A8B8G8R8_SRGB_PACK32, Attachable}, // A8B8G8R8_SRGB {VK_FORMAT_R8G8_UNORM, Attachable | Storage}, // R8G8_UNORM {VK_FORMAT_R8G8_SNORM, Attachable | Storage}, // R8G8_SNORM {VK_FORMAT_R8G8_SINT, Attachable | Storage}, // R8G8_SINT diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp index 2f9a7b028..7587ab1e0 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp @@ -11,18 +11,38 @@ #include "common/assert.h" #include "common/common_types.h" #include "common/div_ceil.h" +#include "video_core/host_shaders/astc_decoder_comp_spv.h" #include "video_core/host_shaders/vulkan_quad_indexed_comp_spv.h" #include "video_core/host_shaders/vulkan_uint8_comp_spv.h" #include "video_core/renderer_vulkan/vk_compute_pass.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" +#include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h" +#include "video_core/texture_cache/accelerated_swizzle.h" +#include "video_core/texture_cache/types.h" +#include "video_core/textures/astc.h" +#include "video_core/textures/decoders.h" #include "video_core/vulkan_common/vulkan_device.h" #include "video_core/vulkan_common/vulkan_wrapper.h" namespace Vulkan { + +using Tegra::Texture::SWIZZLE_TABLE; +using Tegra::Texture::ASTC::EncodingsValues; + namespace { + +constexpr u32 ASTC_BINDING_SWIZZLE_BUFFER = 0; +constexpr u32 ASTC_BINDING_INPUT_BUFFER = 1; +constexpr u32 ASTC_BINDING_ENC_BUFFER = 2; +constexpr u32 ASTC_BINDING_6_TO_8_BUFFER = 3; +constexpr u32 ASTC_BINDING_7_TO_8_BUFFER = 4; +constexpr u32 ASTC_BINDING_8_TO_8_BUFFER = 5; +constexpr u32 ASTC_BINDING_BYTE_TO_16_BUFFER = 6; +constexpr u32 ASTC_BINDING_OUTPUT_IMAGE = 7; + VkPushConstantRange BuildComputePushConstantRange(std::size_t size) { return { .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, @@ -50,6 +70,67 @@ std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBinding }}; } +std::array<VkDescriptorSetLayoutBinding, 8> BuildASTCDescriptorSetBindings() { + return {{ + { + .binding = ASTC_BINDING_SWIZZLE_BUFFER, // Swizzle buffer + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + { + .binding = ASTC_BINDING_INPUT_BUFFER, // ASTC Img data buffer + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + { + .binding = ASTC_BINDING_ENC_BUFFER, // Encodings buffer + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + { + .binding = ASTC_BINDING_6_TO_8_BUFFER, // BINDING_6_TO_8_BUFFER + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + { + .binding = ASTC_BINDING_7_TO_8_BUFFER, // BINDING_7_TO_8_BUFFER + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + { + .binding = ASTC_BINDING_8_TO_8_BUFFER, // BINDING_8_TO_8_BUFFER + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + { + .binding = ASTC_BINDING_BYTE_TO_16_BUFFER, // BINDING_BYTE_TO_16_BUFFER + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + { + .binding = ASTC_BINDING_OUTPUT_IMAGE, // Output image + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + }}; +} + VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() { return { .dstBinding = 0, @@ -61,6 +142,90 @@ VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() { }; } +std::array<VkDescriptorUpdateTemplateEntryKHR, 8> BuildASTCPassDescriptorUpdateTemplateEntry() { + return {{ + { + .dstBinding = ASTC_BINDING_SWIZZLE_BUFFER, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .offset = 0 * sizeof(DescriptorUpdateEntry), + .stride = sizeof(DescriptorUpdateEntry), + }, + { + .dstBinding = ASTC_BINDING_INPUT_BUFFER, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .offset = 1 * sizeof(DescriptorUpdateEntry), + .stride = sizeof(DescriptorUpdateEntry), + }, + { + .dstBinding = ASTC_BINDING_ENC_BUFFER, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .offset = 2 * sizeof(DescriptorUpdateEntry), + .stride = sizeof(DescriptorUpdateEntry), + }, + { + .dstBinding = ASTC_BINDING_6_TO_8_BUFFER, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .offset = 3 * sizeof(DescriptorUpdateEntry), + .stride = sizeof(DescriptorUpdateEntry), + }, + { + .dstBinding = ASTC_BINDING_7_TO_8_BUFFER, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .offset = 4 * sizeof(DescriptorUpdateEntry), + .stride = sizeof(DescriptorUpdateEntry), + }, + { + .dstBinding = ASTC_BINDING_8_TO_8_BUFFER, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .offset = 5 * sizeof(DescriptorUpdateEntry), + .stride = sizeof(DescriptorUpdateEntry), + }, + { + .dstBinding = ASTC_BINDING_BYTE_TO_16_BUFFER, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .offset = 6 * sizeof(DescriptorUpdateEntry), + .stride = sizeof(DescriptorUpdateEntry), + }, + { + .dstBinding = ASTC_BINDING_OUTPUT_IMAGE, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + .offset = 7 * sizeof(DescriptorUpdateEntry), + .stride = sizeof(DescriptorUpdateEntry), + }, + }}; +} + +struct AstcPushConstants { + std::array<u32, 2> num_image_blocks; + std::array<u32, 2> blocks_dims; + u32 layer; + VideoCommon::Accelerated::BlockLinearSwizzle2DParams params; +}; + +struct AstcBufferData { + decltype(SWIZZLE_TABLE) swizzle_table_buffer = SWIZZLE_TABLE; + decltype(EncodingsValues) encoding_values = EncodingsValues; + decltype(REPLICATE_6_BIT_TO_8_TABLE) replicate_6_to_8 = REPLICATE_6_BIT_TO_8_TABLE; + decltype(REPLICATE_7_BIT_TO_8_TABLE) replicate_7_to_8 = REPLICATE_7_BIT_TO_8_TABLE; + decltype(REPLICATE_8_BIT_TO_8_TABLE) replicate_8_to_8 = REPLICATE_8_BIT_TO_8_TABLE; + decltype(REPLICATE_BYTE_TO_16_TABLE) replicate_byte_to_16 = REPLICATE_BYTE_TO_16_TABLE; +} constexpr ASTC_BUFFER_DATA; } // Anonymous namespace VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_pool, @@ -238,4 +403,137 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble( return {staging.buffer, staging.offset}; } +using namespace Tegra::Texture::ASTC; +ASTCDecoderPass::ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_, + VKDescriptorPool& descriptor_pool_, + StagingBufferPool& staging_buffer_pool_, + VKUpdateDescriptorQueue& update_descriptor_queue_, + MemoryAllocator& memory_allocator_) + : VKComputePass(device_, descriptor_pool_, BuildASTCDescriptorSetBindings(), + BuildASTCPassDescriptorUpdateTemplateEntry(), + BuildComputePushConstantRange(sizeof(AstcPushConstants)), + ASTC_DECODER_COMP_SPV), + device{device_}, scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_}, + update_descriptor_queue{update_descriptor_queue_}, memory_allocator{memory_allocator_} {} + +ASTCDecoderPass::~ASTCDecoderPass() = default; + +void ASTCDecoderPass::MakeDataBuffer() { + data_buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{ + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .size = sizeof(ASTC_BUFFER_DATA), + .usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = nullptr, + }); + data_buffer_commit = memory_allocator.Commit(data_buffer, MemoryUsage::Upload); + + const auto staging_ref = + staging_buffer_pool.Request(sizeof(ASTC_BUFFER_DATA), MemoryUsage::Upload); + std::memcpy(staging_ref.mapped_span.data(), &ASTC_BUFFER_DATA, sizeof(ASTC_BUFFER_DATA)); + scheduler.Record([src = staging_ref.buffer, dst = *data_buffer](vk::CommandBuffer cmdbuf) { + cmdbuf.CopyBuffer(src, dst, + VkBufferCopy{ + .srcOffset = 0, + .dstOffset = 0, + .size = sizeof(ASTC_BUFFER_DATA), + }); + cmdbuf.PipelineBarrier( + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, + VkMemoryBarrier{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT, + }, + {}, {}); + }); +} + +void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map, + std::span<const VideoCommon::SwizzleParameters> swizzles) { + using namespace VideoCommon::Accelerated; + const VideoCommon::Extent2D tile_size{ + .width = VideoCore::Surface::DefaultBlockWidth(image.info.format), + .height = VideoCore::Surface::DefaultBlockHeight(image.info.format), + }; + scheduler.RequestOutsideRenderPassOperationContext(); + if (!data_buffer) { + MakeDataBuffer(); + } + const std::array<u32, 2> block_dims{tile_size.width, tile_size.height}; + for (s32 layer = 0; layer < image.info.resources.layers; layer++) { + for (const VideoCommon::SwizzleParameters& swizzle : swizzles) { + const size_t input_offset = swizzle.buffer_offset + map.offset; + const auto num_dispatches_x = Common::DivCeil(swizzle.num_tiles.width, 32U); + const auto num_dispatches_y = Common::DivCeil(swizzle.num_tiles.height, 32U); + const std::array num_image_blocks{swizzle.num_tiles.width, swizzle.num_tiles.height}; + const u32 layer_image_size = + image.guest_size_bytes - static_cast<u32>(swizzle.buffer_offset); + + update_descriptor_queue.Acquire(); + update_descriptor_queue.AddBuffer(*data_buffer, + offsetof(AstcBufferData, swizzle_table_buffer), + sizeof(AstcBufferData::swizzle_table_buffer)); + update_descriptor_queue.AddBuffer(map.buffer, input_offset, image.guest_size_bytes); + update_descriptor_queue.AddBuffer(*data_buffer, + offsetof(AstcBufferData, encoding_values), + sizeof(AstcBufferData::encoding_values)); + update_descriptor_queue.AddBuffer(*data_buffer, + offsetof(AstcBufferData, replicate_6_to_8), + sizeof(AstcBufferData::replicate_6_to_8)); + update_descriptor_queue.AddBuffer(*data_buffer, + offsetof(AstcBufferData, replicate_7_to_8), + sizeof(AstcBufferData::replicate_7_to_8)); + update_descriptor_queue.AddBuffer(*data_buffer, + offsetof(AstcBufferData, replicate_8_to_8), + sizeof(AstcBufferData::replicate_8_to_8)); + update_descriptor_queue.AddBuffer(*data_buffer, + offsetof(AstcBufferData, replicate_byte_to_16), + sizeof(AstcBufferData::replicate_byte_to_16)); + update_descriptor_queue.AddImage(image.StorageImageView()); + + const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); + // To unswizzle the ASTC data + const auto params = MakeBlockLinearSwizzle2DParams(swizzle, image.info); + scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = map.buffer, + num_dispatches_x, num_dispatches_y, layer_image_size, + num_image_blocks, block_dims, layer, params, set, + image = image.Handle(), input_offset, + aspect_mask = image.AspectMask()](vk::CommandBuffer cmdbuf) { + const AstcPushConstants uniforms{num_image_blocks, block_dims, layer, params}; + + cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); + cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {}); + cmdbuf.PushConstants(layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms); + cmdbuf.Dispatch(num_dispatches_x, num_dispatches_y, 1); + + const VkImageMemoryBarrier image_barrier{ + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, + .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .newLayout = VK_IMAGE_LAYOUT_GENERAL, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange{ + .aspectMask = aspect_mask, + .baseMipLevel = 0, + .levelCount = VK_REMAINING_MIP_LEVELS, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }, + }; + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, image_barrier); + }); + } + } +} + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h index 17d781d99..5ea187c30 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.h +++ b/src/video_core/renderer_vulkan/vk_compute_pass.h @@ -11,14 +11,21 @@ #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" +namespace VideoCommon { +struct SwizzleParameters; +} + namespace Vulkan { class Device; class StagingBufferPool; class VKScheduler; class VKUpdateDescriptorQueue; +class Image; +struct StagingBufferRef; class VKComputePass { public: @@ -77,4 +84,29 @@ private: VKUpdateDescriptorQueue& update_descriptor_queue; }; +class ASTCDecoderPass final : public VKComputePass { +public: + explicit ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_, + VKDescriptorPool& descriptor_pool_, + StagingBufferPool& staging_buffer_pool_, + VKUpdateDescriptorQueue& update_descriptor_queue_, + MemoryAllocator& memory_allocator_); + ~ASTCDecoderPass(); + + void Assemble(Image& image, const StagingBufferRef& map, + std::span<const VideoCommon::SwizzleParameters> swizzles); + +private: + void MakeDataBuffer(); + + const Device& device; + VKScheduler& scheduler; + StagingBufferPool& staging_buffer_pool; + VKUpdateDescriptorQueue& update_descriptor_queue; + MemoryAllocator& memory_allocator; + + vk::Buffer data_buffer; + MemoryCommit data_buffer_commit; +}; + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index dfd38f575..df5b7b172 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -241,7 +241,10 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), update_descriptor_queue(device, scheduler), blit_image(device, scheduler, state_tracker, descriptor_pool), - texture_cache_runtime{device, scheduler, memory_allocator, staging_pool, blit_image}, + astc_decoder_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue, + memory_allocator), + texture_cache_runtime{device, scheduler, memory_allocator, + staging_pool, blit_image, astc_decoder_pass}, texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, update_descriptor_queue, descriptor_pool), diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index acea1ba2d..235afc6f3 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -173,6 +173,7 @@ private: VKDescriptorPool descriptor_pool; VKUpdateDescriptorQueue update_descriptor_queue; BlitImageHelper blit_image; + ASTCDecoderPass astc_decoder_pass; GraphicsPipelineCacheKey graphics_key; diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index 22a1014a9..f7f744587 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -10,6 +10,7 @@ #include "video_core/engines/fermi_2d.h" #include "video_core/renderer_vulkan/blit_image.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h" +#include "video_core/renderer_vulkan/vk_compute_pass.h" #include "video_core/renderer_vulkan/vk_rasterizer.h" #include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" @@ -807,7 +808,7 @@ Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_ commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal); } if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) { - flags |= VideoCommon::ImageFlagBits::Converted; + flags |= VideoCommon::ImageFlagBits::AcceleratedUpload; } if (runtime.device.HasDebuggingToolAttached()) { if (image) { @@ -816,6 +817,34 @@ Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_ buffer.SetObjectNameEXT(VideoCommon::Name(*this).c_str()); } } + static constexpr VkImageViewUsageCreateInfo storage_image_view_usage_create_info{ + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + .pNext = nullptr, + .usage = VK_IMAGE_USAGE_STORAGE_BIT, + }; + if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) { + storage_image_view = runtime.device.GetLogical().CreateImageView(VkImageViewCreateInfo{ + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .pNext = &storage_image_view_usage_create_info, + .flags = 0, + .image = *image, + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = VK_FORMAT_A8B8G8R8_UNORM_PACK32, + .components{ + .r = VK_COMPONENT_SWIZZLE_IDENTITY, + .g = VK_COMPONENT_SWIZZLE_IDENTITY, + .b = VK_COMPONENT_SWIZZLE_IDENTITY, + .a = VK_COMPONENT_SWIZZLE_IDENTITY, + }, + .subresourceRange{ + .aspectMask = aspect_mask, + .baseMipLevel = 0, + .levelCount = VK_REMAINING_MIP_LEVELS, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }, + }); + } } void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) { @@ -918,7 +947,6 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI } } const auto format_info = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format); - const VkFormat vk_format = format_info.format; const VkImageViewUsageCreateInfo image_view_usage{ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, .pNext = nullptr, @@ -930,7 +958,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI .flags = 0, .image = image.Handle(), .viewType = VkImageViewType{}, - .format = vk_format, + .format = format_info.format, .components{ .r = ComponentSwizzle(swizzle[0]), .g = ComponentSwizzle(swizzle[1]), @@ -982,7 +1010,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI .pNext = nullptr, .flags = 0, .buffer = image.Buffer(), - .format = vk_format, + .format = format_info.format, .offset = 0, // TODO: Redesign buffer cache to support this .range = image.guest_size_bytes, }); @@ -1167,4 +1195,13 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM } } +void TextureCacheRuntime::AccelerateImageUpload( + Image& image, const StagingBufferRef& map, + std::span<const VideoCommon::SwizzleParameters> swizzles) { + if (IsPixelFormatASTC(image.info.format)) { + return astc_decoder_pass.Assemble(image, map, swizzles); + } + UNREACHABLE(); +} + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 3aee27ce0..51705eccb 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h @@ -20,6 +20,7 @@ using VideoCommon::Offset2D; using VideoCommon::RenderTargets; using VideoCore::Surface::PixelFormat; +class ASTCDecoderPass; class BlitImageHelper; class Device; class Image; @@ -60,6 +61,7 @@ struct TextureCacheRuntime { MemoryAllocator& memory_allocator; StagingBufferPool& staging_buffer_pool; BlitImageHelper& blit_image_helper; + ASTCDecoderPass& astc_decoder_pass; std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache{}; void Finish(); @@ -83,9 +85,7 @@ struct TextureCacheRuntime { } void AccelerateImageUpload(Image&, const StagingBufferRef&, - std::span<const VideoCommon::SwizzleParameters>) { - UNREACHABLE(); - } + std::span<const VideoCommon::SwizzleParameters>); void InsertUploadMemoryBarrier() {} @@ -125,11 +125,17 @@ public: return aspect_mask; } + [[nodiscard]] VkImageView StorageImageView() const noexcept { + return *storage_image_view; + } + private: VKScheduler* scheduler; vk::Image image; vk::Buffer buffer; MemoryCommit commit; + vk::ImageView image_view; + vk::ImageView storage_image_view; VkImageAspectFlags aspect_mask = 0; bool initialized = false; }; diff --git a/src/video_core/texture_cache/accelerated_swizzle.h b/src/video_core/texture_cache/accelerated_swizzle.h index 6ec5c78c4..a11c924e1 100644 --- a/src/video_core/texture_cache/accelerated_swizzle.h +++ b/src/video_core/texture_cache/accelerated_swizzle.h @@ -13,8 +13,8 @@ namespace VideoCommon::Accelerated { struct BlockLinearSwizzle2DParams { - std::array<u32, 3> origin; - std::array<s32, 3> destination; + alignas(16) std::array<u32, 3> origin; + alignas(16) std::array<s32, 3> destination; u32 bytes_per_block_log2; u32 layer_stride; u32 block_size; diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index 62685a183..3a463d5db 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp @@ -17,26 +17,7 @@ #include "video_core/textures/texture.h" namespace Tegra::Texture { - namespace { -/** - * This table represents the internal swizzle of a gob, in format 16 bytes x 2 sector packing. - * Calculates the offset of an (x, y) position within a swizzled texture. - * Taken from the Tegra X1 Technical Reference Manual. pages 1187-1188 - */ -constexpr SwizzleTable MakeSwizzleTableConst() { - SwizzleTable table{}; - for (u32 y = 0; y < table.size(); ++y) { - for (u32 x = 0; x < table[0].size(); ++x) { - table[y][x] = ((x % 64) / 32) * 256 + ((y % 8) / 2) * 64 + ((x % 32) / 16) * 32 + - (y % 2) * 16 + (x % 16); - } - } - return table; -} - -constexpr SwizzleTable SWIZZLE_TABLE = MakeSwizzleTableConst(); - template <bool TO_LINEAR> void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { @@ -91,10 +72,6 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe } } // Anonymous namespace -SwizzleTable MakeSwizzleTable() { - return SWIZZLE_TABLE; -} - void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index d7cdc81e8..4c14cefbf 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h @@ -23,8 +23,22 @@ constexpr u32 GOB_SIZE_SHIFT = GOB_SIZE_X_SHIFT + GOB_SIZE_Y_SHIFT + GOB_SIZE_Z_ using SwizzleTable = std::array<std::array<u32, GOB_SIZE_X>, GOB_SIZE_Y>; -/// Returns a z-order swizzle table -SwizzleTable MakeSwizzleTable(); +/** + * This table represents the internal swizzle of a gob, in format 16 bytes x 2 sector packing. + * Calculates the offset of an (x, y) position within a swizzled texture. + * Taken from the Tegra X1 Technical Reference Manual. pages 1187-1188 + */ +constexpr SwizzleTable MakeSwizzleTable() { + SwizzleTable table{}; + for (u32 y = 0; y < table.size(); ++y) { + for (u32 x = 0; x < table[0].size(); ++x) { + table[y][x] = ((x % 64) / 32) * 256 + ((y % 8) / 2) * 64 + ((x % 32) / 16) * 32 + + (y % 2) * 16 + (x % 16); + } + } + return table; +} +constexpr SwizzleTable SWIZZLE_TABLE = MakeSwizzleTable(); /// Unswizzles a block linear texture into linear memory. void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, |