From 6ac97405df021d5d2bd9a529253bd5c5a418c1a9 Mon Sep 17 00:00:00 2001 From: ameerj Date: Tue, 28 Jul 2020 00:08:02 -0400 Subject: Vk Async pipeline compilation --- src/video_core/shader/async_shaders.cpp | 59 ++++++++++++++++++++++++++++++--- src/video_core/shader/async_shaders.h | 31 +++++++++++++++-- 2 files changed, 84 insertions(+), 6 deletions(-) (limited to 'src/video_core/shader') diff --git a/src/video_core/shader/async_shaders.cpp b/src/video_core/shader/async_shaders.cpp index b7f66d7ee..335a0d05b 100644 --- a/src/video_core/shader/async_shaders.cpp +++ b/src/video_core/shader/async_shaders.cpp @@ -113,15 +113,38 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device, VAddr cpu_addr) { WorkerParams params{device.UseAssemblyShaders() ? AsyncShaders::Backend::GLASM : AsyncShaders::Backend::OpenGL, - device, + &device, shader_type, uid, std::move(code), std::move(code_b), main_offset, compiler_settings, - registry, + ®istry, cpu_addr}; + + std::unique_lock lock(queue_mutex); + pending_queue.push_back(std::move(params)); + cv.notify_one(); +} + +void AsyncShaders::QueueVulkanShader( + Vulkan::VKPipelineCache* pp_cache, std::vector bindings, + Vulkan::SPIRVProgram program, Vulkan::RenderPassParams renderpass_params, u32 padding, + std::array shaders, + Vulkan::FixedPipelineState fixed_state) { + + WorkerParams params{ + .backend = AsyncShaders::Backend::Vulkan, + .pp_cache = pp_cache, + .bindings = bindings, + .program = program, + .renderpass_params = renderpass_params, + .padding = padding, + .shaders = shaders, + .fixed_state = fixed_state, + }; + std::unique_lock lock(queue_mutex); pending_queue.push_back(std::move(params)); cv.notify_one(); @@ -140,6 +163,7 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context if (!HasWorkQueued()) { continue; } + // Another thread beat us, just unlock and wait for the next load if (pending_queue.empty()) { continue; @@ -152,10 +176,11 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context if (work.backend == AsyncShaders::Backend::OpenGL || work.backend == AsyncShaders::Backend::GLASM) { - const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, work.registry); + VideoCommon::Shader::Registry registry = *work.registry; + const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, registry); const auto scope = context->Acquire(); auto program = - OpenGL::BuildShader(work.device, work.shader_type, work.uid, ir, work.registry); + OpenGL::BuildShader(*work.device, work.shader_type, work.uid, ir, registry); Result result{}; result.backend = work.backend; result.cpu_address = work.cpu_address; @@ -174,6 +199,32 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context std::unique_lock complete_lock(completed_mutex); finished_work.push_back(std::move(result)); } + + } else if (work.backend == AsyncShaders::Backend::Vulkan) { + Vulkan::GraphicsPipelineCacheKey params_key{ + work.renderpass_params, + work.padding, + work.shaders, + work.fixed_state, + }; + { + std::unique_lock complete_lock(completed_mutex); + + // Duplicate creation of pipelines leads to instability and crashing, caused by a + // race condition but band-aid solution is locking the making of the pipeline + // results in only one pipeline created at a time. + Result result{ + .backend = work.backend, + .pipeline = std::make_unique( + work.pp_cache->GetDevice(), work.pp_cache->GetScheduler(), + work.pp_cache->GetDescriptorPool(), + work.pp_cache->GetUpdateDescriptorQueue(), + work.pp_cache->GetRenderpassCache(), params_key, work.bindings, + work.program), + }; + + finished_work.push_back(std::move(result)); + } } } } diff --git a/src/video_core/shader/async_shaders.h b/src/video_core/shader/async_shaders.h index 2f5ee94ad..702026ce2 100644 --- a/src/video_core/shader/async_shaders.h +++ b/src/video_core/shader/async_shaders.h @@ -14,6 +14,10 @@ #include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_shader_decompiler.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_pipeline_cache.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" +#include "video_core/renderer_vulkan/vk_update_descriptor.h" namespace Core::Frontend { class EmuWindow; @@ -24,6 +28,10 @@ namespace Tegra { class GPU; } +namespace Vulkan { +class VKPipelineCache; +} + namespace VideoCommon::Shader { class AsyncShaders { @@ -31,6 +39,7 @@ public: enum class Backend { OpenGL, GLASM, + Vulkan, }; struct ResultPrograms { @@ -46,6 +55,7 @@ public: std::vector code; std::vector code_b; Tegra::Engines::ShaderType shader_type; + std::unique_ptr pipeline; }; explicit AsyncShaders(Core::Frontend::EmuWindow& emu_window); @@ -76,6 +86,13 @@ public: VideoCommon::Shader::CompilerSettings compiler_settings, const VideoCommon::Shader::Registry& registry, VAddr cpu_addr); + void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, + std::vector bindings, + Vulkan::SPIRVProgram program, Vulkan::RenderPassParams renderpass_params, + u32 padding, + std::array shaders, + Vulkan::FixedPipelineState fixed_state); + private: void ShaderCompilerThread(Core::Frontend::GraphicsContext* context); @@ -84,15 +101,25 @@ private: struct WorkerParams { AsyncShaders::Backend backend; - OpenGL::Device device; + // For OGL + const OpenGL::Device* device; Tegra::Engines::ShaderType shader_type; u64 uid; std::vector code; std::vector code_b; u32 main_offset; VideoCommon::Shader::CompilerSettings compiler_settings; - VideoCommon::Shader::Registry registry; + const VideoCommon::Shader::Registry* registry; VAddr cpu_address; + + // For Vulkan + Vulkan::VKPipelineCache* pp_cache; + std::vector bindings; + Vulkan::SPIRVProgram program; + Vulkan::RenderPassParams renderpass_params; + u32 padding; + std::array shaders; + Vulkan::FixedPipelineState fixed_state; }; std::condition_variable cv; -- cgit v1.2.3 From 4539073ce1d8fd6df03263e826d3805b4909e055 Mon Sep 17 00:00:00 2001 From: ameerj Date: Thu, 30 Jul 2020 15:41:11 -0400 Subject: Address feedback. Bruteforce delete duplicates --- src/video_core/shader/async_shaders.cpp | 135 ++++++++++++++++++-------------- src/video_core/shader/async_shaders.h | 4 +- 2 files changed, 78 insertions(+), 61 deletions(-) (limited to 'src/video_core/shader') diff --git a/src/video_core/shader/async_shaders.cpp b/src/video_core/shader/async_shaders.cpp index 335a0d05b..c536b025b 100644 --- a/src/video_core/shader/async_shaders.cpp +++ b/src/video_core/shader/async_shaders.cpp @@ -111,20 +111,19 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device, VideoCommon::Shader::CompilerSettings compiler_settings, const VideoCommon::Shader::Registry& registry, VAddr cpu_addr) { - WorkerParams params{device.UseAssemblyShaders() ? AsyncShaders::Backend::GLASM - : AsyncShaders::Backend::OpenGL, - &device, - shader_type, - uid, - std::move(code), - std::move(code_b), - main_offset, - compiler_settings, - ®istry, - cpu_addr}; - + auto p = std::make_unique(); + p->backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL; + p->device = &device; + p->shader_type = shader_type; + p->uid = uid; + p->code = std::move(code); + p->code_b = std::move(code_b); + p->main_offset = main_offset; + p->compiler_settings = compiler_settings; + p->registry = ®istry; + p->cpu_address = cpu_addr; std::unique_lock lock(queue_mutex); - pending_queue.push_back(std::move(params)); + pending_queue.push(std::move(p)); cv.notify_one(); } @@ -134,19 +133,19 @@ void AsyncShaders::QueueVulkanShader( std::array shaders, Vulkan::FixedPipelineState fixed_state) { - WorkerParams params{ - .backend = AsyncShaders::Backend::Vulkan, - .pp_cache = pp_cache, - .bindings = bindings, - .program = program, - .renderpass_params = renderpass_params, - .padding = padding, - .shaders = shaders, - .fixed_state = fixed_state, - }; + auto p = std::make_unique(); + + p->backend = Backend::Vulkan; + p->pp_cache = pp_cache; + p->bindings = bindings; + p->program = program; + p->renderpass_params = renderpass_params; + p->padding = padding; + p->shaders = shaders; + p->fixed_state = fixed_state; std::unique_lock lock(queue_mutex); - pending_queue.push_back(std::move(params)); + pending_queue.push(std::move(p)); cv.notify_one(); } @@ -168,64 +167,82 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context if (pending_queue.empty()) { continue; } - // Pull work from queue - WorkerParams work = std::move(pending_queue.front()); - pending_queue.pop_front(); + // Pull work from queue + auto work = std::move(pending_queue.front()); + pending_queue.pop(); lock.unlock(); - if (work.backend == AsyncShaders::Backend::OpenGL || - work.backend == AsyncShaders::Backend::GLASM) { - VideoCommon::Shader::Registry registry = *work.registry; - const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, registry); + if (work->backend == Backend::OpenGL || work->backend == Backend::GLASM) { + VideoCommon::Shader::Registry registry = *work->registry; + const ShaderIR ir(work->code, work->main_offset, work->compiler_settings, registry); const auto scope = context->Acquire(); auto program = - OpenGL::BuildShader(*work.device, work.shader_type, work.uid, ir, registry); + OpenGL::BuildShader(*work->device, work->shader_type, work->uid, ir, registry); Result result{}; - result.backend = work.backend; - result.cpu_address = work.cpu_address; - result.uid = work.uid; - result.code = std::move(work.code); - result.code_b = std::move(work.code_b); - result.shader_type = work.shader_type; - - if (work.backend == AsyncShaders::Backend::OpenGL) { + result.backend = work->backend; + result.cpu_address = work->cpu_address; + result.uid = work->uid; + result.code = std::move(work->code); + result.code_b = std::move(work->code_b); + result.shader_type = work->shader_type; + // LOG_CRITICAL(Render_Vulkan, "Shader hast been Compiled \t0x{:016X} id {}", + // result.uid, id); + + if (work->backend == Backend::OpenGL) { result.program.opengl = std::move(program->source_program); - } else if (work.backend == AsyncShaders::Backend::GLASM) { + } else if (work->backend == Backend::GLASM) { result.program.glasm = std::move(program->assembly_program); } + work.reset(); { std::unique_lock complete_lock(completed_mutex); finished_work.push_back(std::move(result)); } - - } else if (work.backend == AsyncShaders::Backend::Vulkan) { + } else if (work->backend == Backend::Vulkan) { Vulkan::GraphicsPipelineCacheKey params_key{ - work.renderpass_params, - work.padding, - work.shaders, - work.fixed_state, + .renderpass_params = work->renderpass_params, + .padding = work->padding, + .shaders = work->shaders, + .fixed_state = work->fixed_state, }; + { - std::unique_lock complete_lock(completed_mutex); + std::unique_lock find_lock{completed_mutex}; + for (size_t i = 0; i < finished_work.size(); ++i) { + // This loop deletes duplicate pipelines in finished_work + // in favor of the pipeline about to be created + + if (finished_work[i].pipeline && + finished_work[i].pipeline->GetCacheKey().Hash() == params_key.Hash()) { + LOG_CRITICAL(Render_Vulkan, + "Pipeliene was already here \t0x{:016X} matches 0x{:016X} ", + params_key.Hash(), + finished_work[i].pipeline->GetCacheKey().Hash()); + finished_work.erase(finished_work.begin() + i); + } + } + find_lock.unlock(); + } + + auto pipeline = std::make_unique( + work->pp_cache->GetDevice(), work->pp_cache->GetScheduler(), + work->pp_cache->GetDescriptorPool(), work->pp_cache->GetUpdateDescriptorQueue(), + work->pp_cache->GetRenderpassCache(), params_key, work->bindings, work->program); - // Duplicate creation of pipelines leads to instability and crashing, caused by a - // race condition but band-aid solution is locking the making of the pipeline - // results in only one pipeline created at a time. + { + std::unique_lock complete_lock(completed_mutex); Result result{ - .backend = work.backend, - .pipeline = std::make_unique( - work.pp_cache->GetDevice(), work.pp_cache->GetScheduler(), - work.pp_cache->GetDescriptorPool(), - work.pp_cache->GetUpdateDescriptorQueue(), - work.pp_cache->GetRenderpassCache(), params_key, work.bindings, - work.program), + .backend = Backend::Vulkan, + .pipeline = std::move(pipeline), }; - finished_work.push_back(std::move(result)); + complete_lock.unlock(); } } + // Give a chance for another thread to get work. Lessens duplicates + std::this_thread::yield(); } } diff --git a/src/video_core/shader/async_shaders.h b/src/video_core/shader/async_shaders.h index 702026ce2..d4eeb8fb6 100644 --- a/src/video_core/shader/async_shaders.h +++ b/src/video_core/shader/async_shaders.h @@ -100,7 +100,7 @@ private: bool HasWorkQueued(); struct WorkerParams { - AsyncShaders::Backend backend; + Backend backend; // For OGL const OpenGL::Device* device; Tegra::Engines::ShaderType shader_type; @@ -128,7 +128,7 @@ private: std::atomic is_thread_exiting{}; std::vector> context_list; std::vector worker_threads; - std::deque pending_queue; + std::queue> pending_queue; std::vector finished_work; Core::Frontend::EmuWindow& emu_window; }; -- cgit v1.2.3 From c02464f64e302b8c9ea0f310e6fd85834d26cca5 Mon Sep 17 00:00:00 2001 From: ameerj Date: Fri, 31 Jul 2020 17:30:05 -0400 Subject: Vk Async Worker directly emplace in cache --- src/video_core/shader/async_shaders.cpp | 78 +++++++++++---------------------- 1 file changed, 25 insertions(+), 53 deletions(-) (limited to 'src/video_core/shader') diff --git a/src/video_core/shader/async_shaders.cpp b/src/video_core/shader/async_shaders.cpp index c536b025b..54a81460b 100644 --- a/src/video_core/shader/async_shaders.cpp +++ b/src/video_core/shader/async_shaders.cpp @@ -111,19 +111,19 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device, VideoCommon::Shader::CompilerSettings compiler_settings, const VideoCommon::Shader::Registry& registry, VAddr cpu_addr) { - auto p = std::make_unique(); - p->backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL; - p->device = &device; - p->shader_type = shader_type; - p->uid = uid; - p->code = std::move(code); - p->code_b = std::move(code_b); - p->main_offset = main_offset; - p->compiler_settings = compiler_settings; - p->registry = ®istry; - p->cpu_address = cpu_addr; + auto params = std::make_unique(); + params->backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL; + params->device = &device; + params->shader_type = shader_type; + params->uid = uid; + params->code = std::move(code); + params->code_b = std::move(code_b); + params->main_offset = main_offset; + params->compiler_settings = compiler_settings; + params->registry = ®istry; + params->cpu_address = cpu_addr; std::unique_lock lock(queue_mutex); - pending_queue.push(std::move(p)); + pending_queue.push(std::move(params)); cv.notify_one(); } @@ -133,19 +133,19 @@ void AsyncShaders::QueueVulkanShader( std::array shaders, Vulkan::FixedPipelineState fixed_state) { - auto p = std::make_unique(); + auto params = std::make_unique(); - p->backend = Backend::Vulkan; - p->pp_cache = pp_cache; - p->bindings = bindings; - p->program = program; - p->renderpass_params = renderpass_params; - p->padding = padding; - p->shaders = shaders; - p->fixed_state = fixed_state; + params->backend = Backend::Vulkan; + params->pp_cache = pp_cache; + params->bindings = bindings; + params->program = program; + params->renderpass_params = renderpass_params; + params->padding = padding; + params->shaders = shaders; + params->fixed_state = fixed_state; std::unique_lock lock(queue_mutex); - pending_queue.push(std::move(p)); + pending_queue.push(std::move(params)); cv.notify_one(); } @@ -162,7 +162,6 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context if (!HasWorkQueued()) { continue; } - // Another thread beat us, just unlock and wait for the next load if (pending_queue.empty()) { continue; @@ -186,8 +185,6 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context result.code = std::move(work->code); result.code_b = std::move(work->code_b); result.shader_type = work->shader_type; - // LOG_CRITICAL(Render_Vulkan, "Shader hast been Compiled \t0x{:016X} id {}", - // result.uid, id); if (work->backend == Backend::OpenGL) { result.program.opengl = std::move(program->source_program); @@ -208,40 +205,15 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context .fixed_state = work->fixed_state, }; - { - std::unique_lock find_lock{completed_mutex}; - for (size_t i = 0; i < finished_work.size(); ++i) { - // This loop deletes duplicate pipelines in finished_work - // in favor of the pipeline about to be created - - if (finished_work[i].pipeline && - finished_work[i].pipeline->GetCacheKey().Hash() == params_key.Hash()) { - LOG_CRITICAL(Render_Vulkan, - "Pipeliene was already here \t0x{:016X} matches 0x{:016X} ", - params_key.Hash(), - finished_work[i].pipeline->GetCacheKey().Hash()); - finished_work.erase(finished_work.begin() + i); - } - } - find_lock.unlock(); - } - auto pipeline = std::make_unique( work->pp_cache->GetDevice(), work->pp_cache->GetScheduler(), work->pp_cache->GetDescriptorPool(), work->pp_cache->GetUpdateDescriptorQueue(), work->pp_cache->GetRenderpassCache(), params_key, work->bindings, work->program); - { - std::unique_lock complete_lock(completed_mutex); - Result result{ - .backend = Backend::Vulkan, - .pipeline = std::move(pipeline), - }; - finished_work.push_back(std::move(result)); - complete_lock.unlock(); - } + work->pp_cache->EmplacePipeline(std::move(pipeline)); + work.reset(); } - // Give a chance for another thread to get work. Lessens duplicates + // Give a chance for another thread to get work. std::this_thread::yield(); } } -- cgit v1.2.3 From 31a76410e8fa09462d960c10148c075125dc385a Mon Sep 17 00:00:00 2001 From: ameerj Date: Sun, 2 Aug 2020 13:05:41 -0400 Subject: Address feedback, add shader compile notifier, update setting text --- src/video_core/shader/async_shaders.cpp | 110 +++++++++++++++----------------- src/video_core/shader/async_shaders.h | 23 ++++--- 2 files changed, 65 insertions(+), 68 deletions(-) (limited to 'src/video_core/shader') diff --git a/src/video_core/shader/async_shaders.cpp b/src/video_core/shader/async_shaders.cpp index 54a81460b..ea813d506 100644 --- a/src/video_core/shader/async_shaders.cpp +++ b/src/video_core/shader/async_shaders.cpp @@ -2,7 +2,6 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include #include #include #include @@ -111,38 +110,44 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device, VideoCommon::Shader::CompilerSettings compiler_settings, const VideoCommon::Shader::Registry& registry, VAddr cpu_addr) { - auto params = std::make_unique(); - params->backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL; - params->device = &device; - params->shader_type = shader_type; - params->uid = uid; - params->code = std::move(code); - params->code_b = std::move(code_b); - params->main_offset = main_offset; - params->compiler_settings = compiler_settings; - params->registry = ®istry; - params->cpu_address = cpu_addr; + WorkerParams params{ + .backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL, + .device = &device, + .shader_type = shader_type, + .uid = uid, + .code = std::move(code), + .code_b = std::move(code_b), + .main_offset = main_offset, + .compiler_settings = compiler_settings, + .registry = ®istry, + .cpu_address = cpu_addr, + }; std::unique_lock lock(queue_mutex); pending_queue.push(std::move(params)); cv.notify_one(); } -void AsyncShaders::QueueVulkanShader( - Vulkan::VKPipelineCache* pp_cache, std::vector bindings, - Vulkan::SPIRVProgram program, Vulkan::RenderPassParams renderpass_params, u32 padding, - std::array shaders, - Vulkan::FixedPipelineState fixed_state) { - - auto params = std::make_unique(); - - params->backend = Backend::Vulkan; - params->pp_cache = pp_cache; - params->bindings = bindings; - params->program = program; - params->renderpass_params = renderpass_params; - params->padding = padding; - params->shaders = shaders; - params->fixed_state = fixed_state; +void AsyncShaders::QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, + const Vulkan::VKDevice& device, Vulkan::VKScheduler& scheduler, + Vulkan::VKDescriptorPool& descriptor_pool, + Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue, + Vulkan::VKRenderPassCache& renderpass_cache, + std::vector bindings, + Vulkan::SPIRVProgram program, + Vulkan::GraphicsPipelineCacheKey key) { + + WorkerParams params{ + .backend = Backend::Vulkan, + .pp_cache = pp_cache, + .vk_device = &device, + .scheduler = &scheduler, + .descriptor_pool = &descriptor_pool, + .update_descriptor_queue = &update_descriptor_queue, + .renderpass_cache = &renderpass_cache, + .bindings = bindings, + .program = program, + .key = key, + }; std::unique_lock lock(queue_mutex); pending_queue.push(std::move(params)); @@ -150,7 +155,6 @@ void AsyncShaders::QueueVulkanShader( } void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context) { - using namespace std::chrono_literals; while (!is_thread_exiting.load(std::memory_order_relaxed)) { std::unique_lock lock{queue_mutex}; cv.wait(lock, [this] { return HasWorkQueued() || is_thread_exiting; }); @@ -168,53 +172,43 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context } // Pull work from queue - auto work = std::move(pending_queue.front()); + WorkerParams work = std::move(pending_queue.front()); pending_queue.pop(); lock.unlock(); - if (work->backend == Backend::OpenGL || work->backend == Backend::GLASM) { - VideoCommon::Shader::Registry registry = *work->registry; - const ShaderIR ir(work->code, work->main_offset, work->compiler_settings, registry); + if (work.backend == Backend::OpenGL || work.backend == Backend::GLASM) { + VideoCommon::Shader::Registry registry = *work.registry; + const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, registry); const auto scope = context->Acquire(); auto program = - OpenGL::BuildShader(*work->device, work->shader_type, work->uid, ir, registry); + OpenGL::BuildShader(*work.device, work.shader_type, work.uid, ir, registry); Result result{}; - result.backend = work->backend; - result.cpu_address = work->cpu_address; - result.uid = work->uid; - result.code = std::move(work->code); - result.code_b = std::move(work->code_b); - result.shader_type = work->shader_type; - - if (work->backend == Backend::OpenGL) { + result.backend = work.backend; + result.cpu_address = work.cpu_address; + result.uid = work.uid; + result.code = std::move(work.code); + result.code_b = std::move(work.code_b); + result.shader_type = work.shader_type; + + if (work.backend == Backend::OpenGL) { result.program.opengl = std::move(program->source_program); - } else if (work->backend == Backend::GLASM) { + } else if (work.backend == Backend::GLASM) { result.program.glasm = std::move(program->assembly_program); } - work.reset(); { std::unique_lock complete_lock(completed_mutex); finished_work.push_back(std::move(result)); } - } else if (work->backend == Backend::Vulkan) { - Vulkan::GraphicsPipelineCacheKey params_key{ - .renderpass_params = work->renderpass_params, - .padding = work->padding, - .shaders = work->shaders, - .fixed_state = work->fixed_state, - }; + } else if (work.backend == Backend::Vulkan) { auto pipeline = std::make_unique( - work->pp_cache->GetDevice(), work->pp_cache->GetScheduler(), - work->pp_cache->GetDescriptorPool(), work->pp_cache->GetUpdateDescriptorQueue(), - work->pp_cache->GetRenderpassCache(), params_key, work->bindings, work->program); + *work.vk_device, *work.scheduler, *work.descriptor_pool, + *work.update_descriptor_queue, *work.renderpass_cache, work.key, work.bindings, + work.program); - work->pp_cache->EmplacePipeline(std::move(pipeline)); - work.reset(); + work.pp_cache->EmplacePipeline(std::move(pipeline)); } - // Give a chance for another thread to get work. - std::this_thread::yield(); } } diff --git a/src/video_core/shader/async_shaders.h b/src/video_core/shader/async_shaders.h index d4eeb8fb6..7c10bd63f 100644 --- a/src/video_core/shader/async_shaders.h +++ b/src/video_core/shader/async_shaders.h @@ -86,12 +86,13 @@ public: VideoCommon::Shader::CompilerSettings compiler_settings, const VideoCommon::Shader::Registry& registry, VAddr cpu_addr); - void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, + void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, const Vulkan::VKDevice& device, + Vulkan::VKScheduler& scheduler, + Vulkan::VKDescriptorPool& descriptor_pool, + Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue, + Vulkan::VKRenderPassCache& renderpass_cache, std::vector bindings, - Vulkan::SPIRVProgram program, Vulkan::RenderPassParams renderpass_params, - u32 padding, - std::array shaders, - Vulkan::FixedPipelineState fixed_state); + Vulkan::SPIRVProgram program, Vulkan::GraphicsPipelineCacheKey key); private: void ShaderCompilerThread(Core::Frontend::GraphicsContext* context); @@ -114,12 +115,14 @@ private: // For Vulkan Vulkan::VKPipelineCache* pp_cache; + const Vulkan::VKDevice* vk_device; + Vulkan::VKScheduler* scheduler; + Vulkan::VKDescriptorPool* descriptor_pool; + Vulkan::VKUpdateDescriptorQueue* update_descriptor_queue; + Vulkan::VKRenderPassCache* renderpass_cache; std::vector bindings; Vulkan::SPIRVProgram program; - Vulkan::RenderPassParams renderpass_params; - u32 padding; - std::array shaders; - Vulkan::FixedPipelineState fixed_state; + Vulkan::GraphicsPipelineCacheKey key; }; std::condition_variable cv; @@ -128,7 +131,7 @@ private: std::atomic is_thread_exiting{}; std::vector> context_list; std::vector worker_threads; - std::queue> pending_queue; + std::queue pending_queue; std::vector finished_work; Core::Frontend::EmuWindow& emu_window; }; -- cgit v1.2.3 From 1b829fbd7a36f9c2b553b04aa39bdf8135d30458 Mon Sep 17 00:00:00 2001 From: ameerj Date: Wed, 5 Aug 2020 12:53:26 -0400 Subject: move thread 1/4 count computation into allocate workers method --- src/video_core/shader/async_shaders.cpp | 13 +++++++++++-- src/video_core/shader/async_shaders.h | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'src/video_core/shader') diff --git a/src/video_core/shader/async_shaders.cpp b/src/video_core/shader/async_shaders.cpp index ea813d506..6a1b8999c 100644 --- a/src/video_core/shader/async_shaders.cpp +++ b/src/video_core/shader/async_shaders.cpp @@ -19,9 +19,18 @@ AsyncShaders::~AsyncShaders() { KillWorkers(); } -void AsyncShaders::AllocateWorkers(std::size_t num_workers) { +void AsyncShaders::AllocateWorkers() { + // Max worker threads we should allow + constexpr u32 MAX_THREADS = 4; + // Deduce how many threads we can use + const u32 threads_used = std::thread::hardware_concurrency() / 4; + // Always allow at least 1 thread regardless of our settings + const auto max_worker_count = std::max(1U, threads_used); + // Don't use more than MAX_THREADS + const auto num_workers = std::min(max_worker_count, MAX_THREADS); + // If we're already have workers queued or don't want to queue workers, ignore - if (num_workers == worker_threads.size() || num_workers == 0) { + if (num_workers == worker_threads.size()) { return; } diff --git a/src/video_core/shader/async_shaders.h b/src/video_core/shader/async_shaders.h index 7c10bd63f..5b58dd9bd 100644 --- a/src/video_core/shader/async_shaders.h +++ b/src/video_core/shader/async_shaders.h @@ -62,7 +62,7 @@ public: ~AsyncShaders(); /// Start up shader worker threads - void AllocateWorkers(std::size_t num_workers); + void AllocateWorkers(); /// Clear the shader queue and kill all worker threads void FreeWorkers(); -- cgit v1.2.3 From f49ffdd6488e74591ed66d911c13c3684762d425 Mon Sep 17 00:00:00 2001 From: Ameer J <52414509+ameerj@users.noreply.github.com> Date: Wed, 5 Aug 2020 16:41:22 -0400 Subject: Morph: Update worker allocation comment Co-authored-by: Morph <39850852+Morph1984@users.noreply.github.com> --- src/video_core/shader/async_shaders.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/video_core/shader') diff --git a/src/video_core/shader/async_shaders.cpp b/src/video_core/shader/async_shaders.cpp index 6a1b8999c..91d1b6bbd 100644 --- a/src/video_core/shader/async_shaders.cpp +++ b/src/video_core/shader/async_shaders.cpp @@ -29,7 +29,7 @@ void AsyncShaders::AllocateWorkers() { // Don't use more than MAX_THREADS const auto num_workers = std::min(max_worker_count, MAX_THREADS); - // If we're already have workers queued or don't want to queue workers, ignore + // If we already have workers queued, ignore if (num_workers == worker_threads.size()) { return; } -- cgit v1.2.3 From fde8102a415c546e88346258bf42de2a248113b1 Mon Sep 17 00:00:00 2001 From: ameerj Date: Sun, 16 Aug 2020 16:33:21 -0400 Subject: Remove unneeded newlines, optional Registry in shader params Addressing feedback from Rodrigo --- src/video_core/shader/async_shaders.cpp | 9 +++------ src/video_core/shader/async_shaders.h | 3 +-- 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'src/video_core/shader') diff --git a/src/video_core/shader/async_shaders.cpp b/src/video_core/shader/async_shaders.cpp index 91d1b6bbd..6c19eaf07 100644 --- a/src/video_core/shader/async_shaders.cpp +++ b/src/video_core/shader/async_shaders.cpp @@ -128,7 +128,7 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device, .code_b = std::move(code_b), .main_offset = main_offset, .compiler_settings = compiler_settings, - .registry = ®istry, + .registry = registry, .cpu_address = cpu_addr, }; std::unique_lock lock(queue_mutex); @@ -144,7 +144,6 @@ void AsyncShaders::QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, std::vector bindings, Vulkan::SPIRVProgram program, Vulkan::GraphicsPipelineCacheKey key) { - WorkerParams params{ .backend = Backend::Vulkan, .pp_cache = pp_cache, @@ -186,11 +185,10 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context lock.unlock(); if (work.backend == Backend::OpenGL || work.backend == Backend::GLASM) { - VideoCommon::Shader::Registry registry = *work.registry; - const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, registry); + const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, *work.registry); const auto scope = context->Acquire(); auto program = - OpenGL::BuildShader(*work.device, work.shader_type, work.uid, ir, registry); + OpenGL::BuildShader(*work.device, work.shader_type, work.uid, ir, *work.registry); Result result{}; result.backend = work.backend; result.cpu_address = work.cpu_address; @@ -210,7 +208,6 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context finished_work.push_back(std::move(result)); } } else if (work.backend == Backend::Vulkan) { - auto pipeline = std::make_unique( *work.vk_device, *work.scheduler, *work.descriptor_pool, *work.update_descriptor_queue, *work.renderpass_cache, work.key, work.bindings, diff --git a/src/video_core/shader/async_shaders.h b/src/video_core/shader/async_shaders.h index 5b58dd9bd..d5ae814d5 100644 --- a/src/video_core/shader/async_shaders.h +++ b/src/video_core/shader/async_shaders.h @@ -55,7 +55,6 @@ public: std::vector code; std::vector code_b; Tegra::Engines::ShaderType shader_type; - std::unique_ptr pipeline; }; explicit AsyncShaders(Core::Frontend::EmuWindow& emu_window); @@ -110,7 +109,7 @@ private: std::vector code_b; u32 main_offset; VideoCommon::Shader::CompilerSettings compiler_settings; - const VideoCommon::Shader::Registry* registry; + std::optional registry; VAddr cpu_address; // For Vulkan -- cgit v1.2.3