summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/video_core/CMakeLists.txt2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp112
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.h66
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h39
4 files changed, 219 insertions, 0 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index efdd2c902..61ac0f23a 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -155,6 +155,8 @@ if (ENABLE_VULKAN)
renderer_vulkan/maxwell_to_vk.h
renderer_vulkan/vk_buffer_cache.cpp
renderer_vulkan/vk_buffer_cache.h
+ renderer_vulkan/vk_compute_pipeline.cpp
+ renderer_vulkan/vk_compute_pipeline.h
renderer_vulkan/vk_descriptor_pool.cpp
renderer_vulkan/vk_descriptor_pool.h
renderer_vulkan/vk_device.cpp
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
new file mode 100644
index 000000000..9d5b8de7a
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -0,0 +1,112 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <memory>
+#include <vector>
+
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
+#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
+#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
+#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+
+namespace Vulkan {
+
+VKComputePipeline::VKComputePipeline(const VKDevice& device, VKScheduler& scheduler,
+ VKDescriptorPool& descriptor_pool,
+ VKUpdateDescriptorQueue& update_descriptor_queue,
+ const SPIRVShader& shader)
+ : device{device}, scheduler{scheduler}, entries{shader.entries},
+ descriptor_set_layout{CreateDescriptorSetLayout()},
+ descriptor_allocator{descriptor_pool, *descriptor_set_layout},
+ update_descriptor_queue{update_descriptor_queue}, layout{CreatePipelineLayout()},
+ descriptor_template{CreateDescriptorUpdateTemplate()},
+ shader_module{CreateShaderModule(shader.code)}, pipeline{CreatePipeline()} {}
+
+VKComputePipeline::~VKComputePipeline() = default;
+
+vk::DescriptorSet VKComputePipeline::CommitDescriptorSet() {
+ if (!descriptor_template) {
+ return {};
+ }
+ const auto set = descriptor_allocator.Commit(scheduler.GetFence());
+ update_descriptor_queue.Send(*descriptor_template, set);
+ return set;
+}
+
+UniqueDescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
+ std::vector<vk::DescriptorSetLayoutBinding> bindings;
+ u32 binding = 0;
+ const auto AddBindings = [&](vk::DescriptorType descriptor_type, std::size_t num_entries) {
+ // TODO(Rodrigo): Maybe make individual bindings here?
+ for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
+ bindings.emplace_back(binding++, descriptor_type, 1, vk::ShaderStageFlagBits::eCompute,
+ nullptr);
+ }
+ };
+ AddBindings(vk::DescriptorType::eUniformBuffer, entries.const_buffers.size());
+ AddBindings(vk::DescriptorType::eStorageBuffer, entries.global_buffers.size());
+ AddBindings(vk::DescriptorType::eUniformTexelBuffer, entries.texel_buffers.size());
+ AddBindings(vk::DescriptorType::eCombinedImageSampler, entries.samplers.size());
+ AddBindings(vk::DescriptorType::eStorageImage, entries.images.size());
+
+ const vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_ci(
+ {}, static_cast<u32>(bindings.size()), bindings.data());
+
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ return dev.createDescriptorSetLayoutUnique(descriptor_set_layout_ci, nullptr, dld);
+}
+
+UniquePipelineLayout VKComputePipeline::CreatePipelineLayout() const {
+ const vk::PipelineLayoutCreateInfo layout_ci({}, 1, &*descriptor_set_layout, 0, nullptr);
+ const auto dev = device.GetLogical();
+ return dev.createPipelineLayoutUnique(layout_ci, nullptr, device.GetDispatchLoader());
+}
+
+UniqueDescriptorUpdateTemplate VKComputePipeline::CreateDescriptorUpdateTemplate() const {
+ std::vector<vk::DescriptorUpdateTemplateEntry> template_entries;
+ u32 binding = 0;
+ u32 offset = 0;
+ FillDescriptorUpdateTemplateEntries(device, entries, binding, offset, template_entries);
+ if (template_entries.empty()) {
+ // If the shader doesn't use descriptor sets, skip template creation.
+ return UniqueDescriptorUpdateTemplate{};
+ }
+
+ const vk::DescriptorUpdateTemplateCreateInfo template_ci(
+ {}, static_cast<u32>(template_entries.size()), template_entries.data(),
+ vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout,
+ vk::PipelineBindPoint::eGraphics, *layout, DESCRIPTOR_SET);
+
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ return dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld);
+}
+
+UniqueShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
+ const vk::ShaderModuleCreateInfo module_ci({}, code.size() * sizeof(u32), code.data());
+ const auto dev = device.GetLogical();
+ return dev.createShaderModuleUnique(module_ci, nullptr, device.GetDispatchLoader());
+}
+
+UniquePipeline VKComputePipeline::CreatePipeline() const {
+ vk::PipelineShaderStageCreateInfo shader_stage_ci({}, vk::ShaderStageFlagBits::eCompute,
+ *shader_module, "main", nullptr);
+ vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
+ subgroup_size_ci.requiredSubgroupSize = GuestWarpSize;
+ if (entries.uses_warps && device.IsGuestWarpSizeSupported(vk::ShaderStageFlagBits::eCompute)) {
+ shader_stage_ci.pNext = &subgroup_size_ci;
+ }
+
+ const vk::ComputePipelineCreateInfo create_info({}, shader_stage_ci, *layout, {}, 0);
+ const auto dev = device.GetLogical();
+ return dev.createComputePipelineUnique({}, create_info, nullptr, device.GetDispatchLoader());
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
new file mode 100644
index 000000000..22235c6c9
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -0,0 +1,66 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
+#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
+
+namespace Vulkan {
+
+class VKDevice;
+class VKScheduler;
+class VKUpdateDescriptorQueue;
+
+class VKComputePipeline final {
+public:
+ explicit VKComputePipeline(const VKDevice& device, VKScheduler& scheduler,
+ VKDescriptorPool& descriptor_pool,
+ VKUpdateDescriptorQueue& update_descriptor_queue,
+ const SPIRVShader& shader);
+ ~VKComputePipeline();
+
+ vk::DescriptorSet CommitDescriptorSet();
+
+ vk::Pipeline GetHandle() const {
+ return *pipeline;
+ }
+
+ vk::PipelineLayout GetLayout() const {
+ return *layout;
+ }
+
+ const ShaderEntries& GetEntries() {
+ return entries;
+ }
+
+private:
+ UniqueDescriptorSetLayout CreateDescriptorSetLayout() const;
+
+ UniquePipelineLayout CreatePipelineLayout() const;
+
+ UniqueDescriptorUpdateTemplate CreateDescriptorUpdateTemplate() const;
+
+ UniqueShaderModule CreateShaderModule(const std::vector<u32>& code) const;
+
+ UniquePipeline CreatePipeline() const;
+
+ const VKDevice& device;
+ VKScheduler& scheduler;
+ ShaderEntries entries;
+
+ UniqueDescriptorSetLayout descriptor_set_layout;
+ DescriptorAllocator descriptor_allocator;
+ VKUpdateDescriptorQueue& update_descriptor_queue;
+ UniquePipelineLayout layout;
+ UniqueDescriptorUpdateTemplate descriptor_template;
+ UniqueShaderModule shader_module;
+ UniquePipeline pipeline;
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 532ee45cc..33b1a1d23 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -4,9 +4,12 @@
#pragma once
+#include <array>
+#include <cstddef>
#include <vector>
#include "common/common_types.h"
+#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/shader/shader_ir.h"
@@ -15,6 +18,42 @@ namespace Vulkan {
class VKDevice;
+struct ComputePipelineCacheKey {
+ GPUVAddr shader{};
+ u32 shared_memory_size{};
+ std::array<u32, 3> workgroup_size{};
+
+ std::size_t Hash() const noexcept {
+ return static_cast<std::size_t>(shader) ^
+ ((static_cast<std::size_t>(shared_memory_size) >> 7) << 40) ^
+ static_cast<std::size_t>(workgroup_size[0]) ^
+ (static_cast<std::size_t>(workgroup_size[1]) << 16) ^
+ (static_cast<std::size_t>(workgroup_size[2]) << 24);
+ }
+
+ bool operator==(const ComputePipelineCacheKey& rhs) const noexcept {
+ return std::tie(shader, shared_memory_size, workgroup_size) ==
+ std::tie(rhs.shader, rhs.shared_memory_size, rhs.workgroup_size);
+ }
+};
+
+} // namespace Vulkan
+
+namespace std {
+
+template <>
+struct hash<Vulkan::ComputePipelineCacheKey> {
+ std::size_t operator()(const Vulkan::ComputePipelineCacheKey& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
+
+namespace Vulkan {
+
+class VKDevice;
+
void FillDescriptorUpdateTemplateEntries(
const VKDevice& device, const ShaderEntries& entries, u32& binding, u32& offset,
std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries);