summaryrefslogtreecommitdiffstats
path: root/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/renderer_vulkan/vk_compute_pipeline.cpp')
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp296
1 files changed, 171 insertions, 125 deletions
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 3a48219b7..70b84c7a6 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -2,152 +2,198 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <algorithm>
#include <vector>
+#include <boost/container/small_vector.hpp>
+
+#include "video_core/renderer_vulkan/pipeline_helper.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
-#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/shader_notify.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
-VKComputePipeline::VKComputePipeline(const Device& device_, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool_,
- VKUpdateDescriptorQueue& update_descriptor_queue_,
- const SPIRVShader& shader_)
- : device{device_}, scheduler{scheduler_}, entries{shader_.entries},
- descriptor_set_layout{CreateDescriptorSetLayout()},
- descriptor_allocator{descriptor_pool_, *descriptor_set_layout},
- update_descriptor_queue{update_descriptor_queue_}, layout{CreatePipelineLayout()},
- descriptor_template{CreateDescriptorUpdateTemplate()},
- shader_module{CreateShaderModule(shader_.code)}, pipeline{CreatePipeline()} {}
-
-VKComputePipeline::~VKComputePipeline() = default;
-
-VkDescriptorSet VKComputePipeline::CommitDescriptorSet() {
- if (!descriptor_template) {
- return {};
- }
- const VkDescriptorSet set = descriptor_allocator.Commit();
- update_descriptor_queue.Send(*descriptor_template, set);
- return set;
-}
-
-vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
- std::vector<VkDescriptorSetLayoutBinding> bindings;
- u32 binding = 0;
- const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
- // TODO(Rodrigo): Maybe make individual bindings here?
- for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
- bindings.push_back({
- .binding = binding++,
- .descriptorType = descriptor_type,
- .descriptorCount = 1,
- .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
- .pImmutableSamplers = nullptr,
- });
- }
- };
- add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
- add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
- add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.uniform_texels.size());
- add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
- add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
- add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
-
- return device.GetLogical().CreateDescriptorSetLayout({
- .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .bindingCount = static_cast<u32>(bindings.size()),
- .pBindings = bindings.data(),
- });
-}
-
-vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
- return device.GetLogical().CreatePipelineLayout({
- .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .setLayoutCount = 1,
- .pSetLayouts = descriptor_set_layout.address(),
- .pushConstantRangeCount = 0,
- .pPushConstantRanges = nullptr,
- });
-}
-
-vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
- std::vector<VkDescriptorUpdateTemplateEntryKHR> template_entries;
- u32 binding = 0;
- u32 offset = 0;
- FillDescriptorUpdateTemplateEntries(entries, binding, offset, template_entries);
- if (template_entries.empty()) {
- // If the shader doesn't use descriptor sets, skip template creation.
- return {};
+using Shader::ImageBufferDescriptor;
+using Tegra::Texture::TexturePair;
+
+ComputePipeline::ComputePipeline(const Device& device_, DescriptorPool& descriptor_pool,
+ VKUpdateDescriptorQueue& update_descriptor_queue_,
+ Common::ThreadWorker* thread_worker,
+ VideoCore::ShaderNotify* shader_notify, const Shader::Info& info_,
+ vk::ShaderModule spv_module_)
+ : device{device_}, update_descriptor_queue{update_descriptor_queue_}, info{info_},
+ spv_module(std::move(spv_module_)) {
+ if (shader_notify) {
+ shader_notify->MarkShaderBuilding();
}
-
- return device.GetLogical().CreateDescriptorUpdateTemplateKHR({
- .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
- .pNext = nullptr,
- .flags = 0,
- .descriptorUpdateEntryCount = static_cast<u32>(template_entries.size()),
- .pDescriptorUpdateEntries = template_entries.data(),
- .templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
- .descriptorSetLayout = *descriptor_set_layout,
- .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
- .pipelineLayout = *layout,
- .set = DESCRIPTOR_SET,
- });
-}
-
-vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
- device.SaveShader(code);
-
- return device.GetLogical().CreateShaderModule({
- .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .codeSize = code.size() * sizeof(u32),
- .pCode = code.data(),
- });
-}
-
-vk::Pipeline VKComputePipeline::CreatePipeline() const {
-
- VkComputePipelineCreateInfo ci{
- .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .stage =
- {
+ std::copy_n(info.constant_buffer_used_sizes.begin(), uniform_buffer_sizes.size(),
+ uniform_buffer_sizes.begin());
+
+ auto func{[this, &descriptor_pool, shader_notify] {
+ DescriptorLayoutBuilder builder{device};
+ builder.Add(info, VK_SHADER_STAGE_COMPUTE_BIT);
+
+ descriptor_set_layout = builder.CreateDescriptorSetLayout(false);
+ pipeline_layout = builder.CreatePipelineLayout(*descriptor_set_layout);
+ descriptor_update_template =
+ builder.CreateTemplate(*descriptor_set_layout, *pipeline_layout, false);
+ descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, info);
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ .pNext = nullptr,
+ .requiredSubgroupSize = GuestWarpSize,
+ };
+ pipeline = device.GetLogical().CreateComputePipeline({
+ .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = 0,
+ .stage{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
- .pNext = nullptr,
+ .pNext = device.IsExtSubgroupSizeControlSupported() ? &subgroup_size_ci : nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
- .module = *shader_module,
+ .module = *spv_module,
.pName = "main",
.pSpecializationInfo = nullptr,
},
- .layout = *layout,
- .basePipelineHandle = nullptr,
- .basePipelineIndex = 0,
- };
-
- const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
- .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
- .pNext = nullptr,
- .requiredSubgroupSize = GuestWarpSize,
- };
-
- if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
- ci.stage.pNext = &subgroup_size_ci;
+ .layout = *pipeline_layout,
+ .basePipelineHandle = 0,
+ .basePipelineIndex = 0,
+ });
+ std::lock_guard lock{build_mutex};
+ is_built = true;
+ build_condvar.notify_one();
+ if (shader_notify) {
+ shader_notify->MarkShaderComplete();
+ }
+ }};
+ if (thread_worker) {
+ thread_worker->QueueWork(std::move(func));
+ } else {
+ func();
+ }
+}
+
+void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
+ Tegra::MemoryManager& gpu_memory, VKScheduler& scheduler,
+ BufferCache& buffer_cache, TextureCache& texture_cache) {
+ update_descriptor_queue.Acquire();
+
+ buffer_cache.SetComputeUniformBufferState(info.constant_buffer_mask, &uniform_buffer_sizes);
+ buffer_cache.UnbindComputeStorageBuffers();
+ size_t ssbo_index{};
+ for (const auto& desc : info.storage_buffers_descriptors) {
+ ASSERT(desc.count == 1);
+ buffer_cache.BindComputeStorageBuffer(ssbo_index, desc.cbuf_index, desc.cbuf_offset,
+ desc.is_written);
+ ++ssbo_index;
}
- return device.GetLogical().CreateComputePipeline(ci);
+ texture_cache.SynchronizeComputeDescriptors();
+
+ static constexpr size_t max_elements = 64;
+ std::array<ImageId, max_elements> image_view_ids;
+ boost::container::static_vector<u32, max_elements> image_view_indices;
+ boost::container::static_vector<VkSampler, max_elements> samplers;
+
+ const auto& qmd{kepler_compute.launch_description};
+ const auto& cbufs{qmd.const_buffer_config};
+ const bool via_header_index{qmd.linked_tsc != 0};
+ const auto read_handle{[&](const auto& desc, u32 index) {
+ ASSERT(((qmd.const_buffer_enable_mask >> desc.cbuf_index) & 1) != 0);
+ const u32 index_offset{index << desc.size_shift};
+ const u32 offset{desc.cbuf_offset + index_offset};
+ const GPUVAddr addr{cbufs[desc.cbuf_index].Address() + offset};
+ if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> ||
+ std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) {
+ if (desc.has_secondary) {
+ ASSERT(((qmd.const_buffer_enable_mask >> desc.secondary_cbuf_index) & 1) != 0);
+ const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
+ const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
+ secondary_offset};
+ const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
+ const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ return TexturePair(lhs_raw | rhs_raw, via_header_index);
+ }
+ }
+ return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
+ }};
+ const auto add_image{[&](const auto& desc) {
+ for (u32 index = 0; index < desc.count; ++index) {
+ const auto handle{read_handle(desc, index)};
+ image_view_indices.push_back(handle.first);
+ }
+ }};
+ std::ranges::for_each(info.texture_buffer_descriptors, add_image);
+ std::ranges::for_each(info.image_buffer_descriptors, add_image);
+ for (const auto& desc : info.texture_descriptors) {
+ for (u32 index = 0; index < desc.count; ++index) {
+ const auto handle{read_handle(desc, index)};
+ image_view_indices.push_back(handle.first);
+
+ Sampler* const sampler = texture_cache.GetComputeSampler(handle.second);
+ samplers.push_back(sampler->Handle());
+ }
+ }
+ std::ranges::for_each(info.image_descriptors, add_image);
+
+ const std::span indices_span(image_view_indices.data(), image_view_indices.size());
+ texture_cache.FillComputeImageViews(indices_span, image_view_ids);
+
+ buffer_cache.UnbindComputeTextureBuffers();
+ ImageId* texture_buffer_ids{image_view_ids.data()};
+ size_t index{};
+ const auto add_buffer{[&](const auto& desc) {
+ constexpr bool is_image = std::is_same_v<decltype(desc), const ImageBufferDescriptor&>;
+ for (u32 i = 0; i < desc.count; ++i) {
+ bool is_written{false};
+ if constexpr (is_image) {
+ is_written = desc.is_written;
+ }
+ ImageView& image_view = texture_cache.GetImageView(*texture_buffer_ids);
+ buffer_cache.BindComputeTextureBuffer(index, image_view.GpuAddr(),
+ image_view.BufferSize(), image_view.format,
+ is_written, is_image);
+ ++texture_buffer_ids;
+ ++index;
+ }
+ }};
+ std::ranges::for_each(info.texture_buffer_descriptors, add_buffer);
+ std::ranges::for_each(info.image_buffer_descriptors, add_buffer);
+
+ buffer_cache.UpdateComputeBuffers();
+ buffer_cache.BindHostComputeBuffers();
+
+ const VkSampler* samplers_it{samplers.data()};
+ const ImageId* views_it{image_view_ids.data()};
+ PushImageDescriptors(info, samplers_it, views_it, texture_cache, update_descriptor_queue);
+
+ if (!is_built.load(std::memory_order::relaxed)) {
+ // Wait for the pipeline to be built
+ scheduler.Record([this](vk::CommandBuffer) {
+ std::unique_lock lock{build_mutex};
+ build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
+ });
+ }
+ const void* const descriptor_data{update_descriptor_queue.UpdateData()};
+ scheduler.Record([this, descriptor_data](vk::CommandBuffer cmdbuf) {
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
+ if (!descriptor_set_layout) {
+ return;
+ }
+ const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
+ const vk::Device& dev{device.GetLogical()};
+ dev.UpdateDescriptorSet(descriptor_set, *descriptor_update_template, descriptor_data);
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
+ descriptor_set, nullptr);
+ });
}
} // namespace Vulkan