diff options
Diffstat (limited to 'src/video_core')
56 files changed, 625 insertions, 639 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index d85f1e9d1..f52b55ef3 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -269,5 +269,5 @@ endif() if (MSVC) target_compile_options(video_core PRIVATE /we4267) else() - target_compile_options(video_core PRIVATE -Werror=conversion -Wno-error=sign-conversion) + target_compile_options(video_core PRIVATE -Werror=conversion -Wno-error=sign-conversion -Werror=switch) endif() diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index b5dc68902..e7edd733f 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -51,46 +51,43 @@ public: bool is_written = false, bool use_fast_cbuf = false) { std::lock_guard lock{mutex}; - auto& memory_manager = system.GPU().MemoryManager(); - const std::optional<VAddr> cpu_addr_opt = memory_manager.GpuToCpuAddress(gpu_addr); - if (!cpu_addr_opt) { + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + if (!cpu_addr) { return GetEmptyBuffer(size); } - const VAddr cpu_addr = *cpu_addr_opt; // Cache management is a big overhead, so only cache entries with a given size. // TODO: Figure out which size is the best for given games. constexpr std::size_t max_stream_size = 0x800; if (use_fast_cbuf || size < max_stream_size) { - if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) { - const bool is_granular = memory_manager.IsGranularRange(gpu_addr, size); + if (!is_written && !IsRegionWritten(*cpu_addr, *cpu_addr + size - 1)) { + const bool is_granular = gpu_memory.IsGranularRange(gpu_addr, size); if (use_fast_cbuf) { u8* dest; if (is_granular) { - dest = memory_manager.GetPointer(gpu_addr); + dest = gpu_memory.GetPointer(gpu_addr); } else { staging_buffer.resize(size); dest = staging_buffer.data(); - memory_manager.ReadBlockUnsafe(gpu_addr, dest, size); + gpu_memory.ReadBlockUnsafe(gpu_addr, dest, size); } return ConstBufferUpload(dest, size); } if (is_granular) { - u8* const host_ptr = memory_manager.GetPointer(gpu_addr); + u8* const host_ptr = gpu_memory.GetPointer(gpu_addr); return StreamBufferUpload(size, alignment, [host_ptr, size](u8* dest) { std::memcpy(dest, host_ptr, size); }); } else { - return StreamBufferUpload( - size, alignment, [&memory_manager, gpu_addr, size](u8* dest) { - memory_manager.ReadBlockUnsafe(gpu_addr, dest, size); - }); + return StreamBufferUpload(size, alignment, [this, gpu_addr, size](u8* dest) { + gpu_memory.ReadBlockUnsafe(gpu_addr, dest, size); + }); } } } - Buffer* const block = GetBlock(cpu_addr, size); - MapInterval* const map = MapAddress(block, gpu_addr, cpu_addr, size); + Buffer* const block = GetBlock(*cpu_addr, size); + MapInterval* const map = MapAddress(block, gpu_addr, *cpu_addr, size); if (!map) { return GetEmptyBuffer(size); } @@ -106,7 +103,7 @@ public: } } - return BufferInfo{block->Handle(), block->Offset(cpu_addr), block->Address()}; + return BufferInfo{block->Handle(), block->Offset(*cpu_addr), block->Address()}; } /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset. @@ -262,9 +259,11 @@ public: virtual BufferInfo GetEmptyBuffer(std::size_t size) = 0; protected: - explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, - std::unique_ptr<StreamBuffer> stream_buffer) - : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer)} {} + explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_, + Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, + std::unique_ptr<StreamBuffer> stream_buffer_) + : rasterizer{rasterizer_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_}, + stream_buffer{std::move(stream_buffer_)}, stream_buffer_handle{stream_buffer->Handle()} {} ~BufferCache() = default; @@ -326,14 +325,13 @@ private: MapInterval* MapAddress(Buffer* block, GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size) { const VectorMapInterval overlaps = GetMapsInRange(cpu_addr, size); if (overlaps.empty()) { - auto& memory_manager = system.GPU().MemoryManager(); const VAddr cpu_addr_end = cpu_addr + size; - if (memory_manager.IsGranularRange(gpu_addr, size)) { - u8* host_ptr = memory_manager.GetPointer(gpu_addr); + if (gpu_memory.IsGranularRange(gpu_addr, size)) { + u8* const host_ptr = gpu_memory.GetPointer(gpu_addr); block->Upload(block->Offset(cpu_addr), size, host_ptr); } else { staging_buffer.resize(size); - memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); + gpu_memory.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); block->Upload(block->Offset(cpu_addr), size, staging_buffer.data()); } return Register(MapInterval(cpu_addr, cpu_addr_end, gpu_addr)); @@ -392,7 +390,7 @@ private: continue; } staging_buffer.resize(size); - system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size); + cpu_memory.ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size); block->Upload(block->Offset(interval.lower()), size, staging_buffer.data()); } } @@ -431,7 +429,7 @@ private: const std::size_t size = map->end - map->start; staging_buffer.resize(size); block->Download(block->Offset(map->start), size, staging_buffer.data()); - system.Memory().WriteBlockUnsafe(map->start, staging_buffer.data(), size); + cpu_memory.WriteBlockUnsafe(map->start, staging_buffer.data(), size); map->MarkAsModified(false, 0); } @@ -567,7 +565,8 @@ private: } VideoCore::RasterizerInterface& rasterizer; - Core::System& system; + Tegra::MemoryManager& gpu_memory; + Core::Memory::Memory& cpu_memory; std::unique_ptr<StreamBuffer> stream_buffer; BufferType stream_buffer_handle; diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h index 06cc12d5a..de6991ef6 100644 --- a/src/video_core/fence_manager.h +++ b/src/video_core/fence_manager.h @@ -74,8 +74,6 @@ public: } void WaitPendingFences() { - auto& gpu{system.GPU()}; - auto& memory_manager{gpu.MemoryManager()}; while (!fences.empty()) { TFence& current_fence = fences.front(); if (ShouldWait()) { @@ -83,8 +81,8 @@ public: } PopAsyncFlushes(); if (current_fence->IsSemaphore()) { - memory_manager.template Write<u32>(current_fence->GetAddress(), - current_fence->GetPayload()); + gpu_memory.template Write<u32>(current_fence->GetAddress(), + current_fence->GetPayload()); } else { gpu.IncrementSyncPoint(current_fence->GetPayload()); } @@ -93,13 +91,13 @@ public: } protected: - FenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer, - TTextureCache& texture_cache, TTBufferCache& buffer_cache, - TQueryCache& query_cache) - : system{system}, rasterizer{rasterizer}, texture_cache{texture_cache}, - buffer_cache{buffer_cache}, query_cache{query_cache} {} + explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, + TTextureCache& texture_cache_, TTBufferCache& buffer_cache_, + TQueryCache& query_cache_) + : rasterizer{rasterizer_}, gpu{gpu_}, gpu_memory{gpu.MemoryManager()}, + texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {} - virtual ~FenceManager() {} + virtual ~FenceManager() = default; /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is /// true @@ -113,16 +111,15 @@ protected: /// Waits until a fence has been signalled by the host GPU. virtual void WaitFence(TFence& fence) = 0; - Core::System& system; VideoCore::RasterizerInterface& rasterizer; + Tegra::GPU& gpu; + Tegra::MemoryManager& gpu_memory; TTextureCache& texture_cache; TTBufferCache& buffer_cache; TQueryCache& query_cache; private: void TryReleasePendingFences() { - auto& gpu{system.GPU()}; - auto& memory_manager{gpu.MemoryManager()}; while (!fences.empty()) { TFence& current_fence = fences.front(); if (ShouldWait() && !IsFenceSignaled(current_fence)) { @@ -130,8 +127,8 @@ private: } PopAsyncFlushes(); if (current_fence->IsSemaphore()) { - memory_manager.template Write<u32>(current_fence->GetAddress(), - current_fence->GetPayload()); + gpu_memory.template Write<u32>(current_fence->GetAddress(), + current_fence->GetPayload()); } else { gpu.IncrementSyncPoint(current_fence->GetPayload()); } diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index acb6e6d46..4bb9256e9 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -28,8 +28,8 @@ namespace Tegra { MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); GPU::GPU(Core::System& system_, bool is_async_) - : system{system_}, dma_pusher{std::make_unique<Tegra::DmaPusher>(system, *this)}, - memory_manager{std::make_unique<Tegra::MemoryManager>(system)}, + : system{system_}, memory_manager{std::make_unique<Tegra::MemoryManager>(system)}, + dma_pusher{std::make_unique<Tegra::DmaPusher>(system, *this)}, maxwell_3d{std::make_unique<Engines::Maxwell3D>(system, *memory_manager)}, fermi_2d{std::make_unique<Engines::Fermi2D>()}, kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)}, diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index c7d11deb2..2d15d1c6f 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -347,12 +347,11 @@ private: protected: Core::System& system; + std::unique_ptr<Tegra::MemoryManager> memory_manager; std::unique_ptr<Tegra::DmaPusher> dma_pusher; std::unique_ptr<VideoCore::RendererBase> renderer; private: - std::unique_ptr<Tegra::MemoryManager> memory_manager; - /// Mapping of command subchannels to their bound engine ids std::array<EngineID, 8> bound_engines = {}; /// 3D engine diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index 0d3a88765..d13a66bb6 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h @@ -95,10 +95,12 @@ template <class QueryCache, class CachedQuery, class CounterStream, class HostCo class QueryPool> class QueryCacheBase { public: - explicit QueryCacheBase(Core::System& system, VideoCore::RasterizerInterface& rasterizer) - : system{system}, rasterizer{rasterizer}, streams{{CounterStream{ - static_cast<QueryCache&>(*this), - VideoCore::QueryType::SamplesPassed}}} {} + explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, + Tegra::Engines::Maxwell3D& maxwell3d_, + Tegra::MemoryManager& gpu_memory_) + : rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, + gpu_memory{gpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this), + VideoCore::QueryType::SamplesPassed}}} {} void InvalidateRegion(VAddr addr, std::size_t size) { std::unique_lock lock{mutex}; @@ -118,29 +120,27 @@ public: */ void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) { std::unique_lock lock{mutex}; - auto& memory_manager = system.GPU().MemoryManager(); - const std::optional<VAddr> cpu_addr_opt = memory_manager.GpuToCpuAddress(gpu_addr); - ASSERT(cpu_addr_opt); - VAddr cpu_addr = *cpu_addr_opt; + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); + ASSERT(cpu_addr); - CachedQuery* query = TryGet(cpu_addr); + CachedQuery* query = TryGet(*cpu_addr); if (!query) { - ASSERT_OR_EXECUTE(cpu_addr_opt, return;); - const auto host_ptr = memory_manager.GetPointer(gpu_addr); + ASSERT_OR_EXECUTE(cpu_addr, return;); + u8* const host_ptr = gpu_memory.GetPointer(gpu_addr); - query = Register(type, cpu_addr, host_ptr, timestamp.has_value()); + query = Register(type, *cpu_addr, host_ptr, timestamp.has_value()); } query->BindCounter(Stream(type).Current(), timestamp); if (Settings::values.use_asynchronous_gpu_emulation.GetValue()) { - AsyncFlushQuery(cpu_addr); + AsyncFlushQuery(*cpu_addr); } } /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. void UpdateCounters() { std::unique_lock lock{mutex}; - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable); } @@ -270,8 +270,9 @@ private: static constexpr std::uintptr_t PAGE_SIZE = 4096; static constexpr unsigned PAGE_BITS = 12; - Core::System& system; VideoCore::RasterizerInterface& rasterizer; + Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::MemoryManager& gpu_memory; std::recursive_mutex mutex; diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index 3cbdac8e7..b3e0919f8 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -106,11 +106,8 @@ public: virtual void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {} /// Initialize disk cached resources for the game being emulated - virtual void LoadDiskResources(const std::atomic_bool& stop_loading = false, - const DiskResourceLoadCallback& callback = {}) {} - - /// Initializes renderer dirty flags - virtual void SetupDirtyFlags() {} + virtual void LoadDiskResources(u64 title_id, const std::atomic_bool& stop_loading, + const DiskResourceLoadCallback& callback) {} /// Grant access to the Guest Driver Profile for recording/obtaining info on the guest driver. GuestDriverProfile& AccessGuestDriverProfile() { diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index e866d8f2f..b1c4cd62f 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -59,9 +59,10 @@ void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst static_cast<GLintptr>(dst_offset), static_cast<GLsizeiptr>(size)); } -OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system, +OGLBufferCache::OGLBufferCache(VideoCore::RasterizerInterface& rasterizer, + Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, const Device& device_, std::size_t stream_size) - : GenericBufferCache{rasterizer, system, + : GenericBufferCache{rasterizer, gpu_memory, cpu_memory, std::make_unique<OGLStreamBuffer>(device_, stream_size, true)}, device{device_} { if (!device.HasFastBufferSubData()) { diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index 88fdc0536..f75b32e31 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h @@ -52,7 +52,8 @@ private: using GenericBufferCache = VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuffer>; class OGLBufferCache final : public GenericBufferCache { public: - explicit OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system, + explicit OGLBufferCache(VideoCore::RasterizerInterface& rasterizer, + Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, const Device& device, std::size_t stream_size); ~OGLBufferCache(); diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp index 3d2588dd2..b532fdcc2 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.cpp +++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp @@ -45,11 +45,10 @@ void GLInnerFence::Wait() { glClientWaitSync(sync_object.handle, 0, GL_TIMEOUT_IGNORED); } -FenceManagerOpenGL::FenceManagerOpenGL(Core::System& system, - VideoCore::RasterizerInterface& rasterizer, +FenceManagerOpenGL::FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu, TextureCacheOpenGL& texture_cache, OGLBufferCache& buffer_cache, QueryCache& query_cache) - : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache) {} + : GenericFenceManager{rasterizer, gpu, texture_cache, buffer_cache, query_cache} {} Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) { return std::make_shared<GLInnerFence>(value, is_stubbed); diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h index 1686cf5c8..da1dcdace 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.h +++ b/src/video_core/renderer_opengl/gl_fence_manager.h @@ -37,9 +37,9 @@ using GenericFenceManager = class FenceManagerOpenGL final : public GenericFenceManager { public: - FenceManagerOpenGL(Core::System& system, VideoCore::RasterizerInterface& rasterizer, - TextureCacheOpenGL& texture_cache, OGLBufferCache& buffer_cache, - QueryCache& query_cache); + explicit FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu, + TextureCacheOpenGL& texture_cache, OGLBufferCache& buffer_cache, + QueryCache& query_cache); protected: Fence CreateFence(u32 value, bool is_stubbed) override; diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp index d7ba57aca..2bb8ec2b8 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.cpp +++ b/src/video_core/renderer_opengl/gl_query_cache.cpp @@ -30,12 +30,13 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) { } // Anonymous namespace -QueryCache::QueryCache(Core::System& system, RasterizerOpenGL& gl_rasterizer) +QueryCache::QueryCache(RasterizerOpenGL& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, + Tegra::MemoryManager& gpu_memory) : VideoCommon::QueryCacheBase< QueryCache, CachedQuery, CounterStream, HostCounter, - std::vector<OGLQuery>>{system, - static_cast<VideoCore::RasterizerInterface&>(gl_rasterizer)}, - gl_rasterizer{gl_rasterizer} {} + std::vector<OGLQuery>>{static_cast<VideoCore::RasterizerInterface&>(rasterizer), + maxwell3d, gpu_memory}, + gl_rasterizer{rasterizer} {} QueryCache::~QueryCache() = default; diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h index d8e7052a1..dd626b66b 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.h +++ b/src/video_core/renderer_opengl/gl_query_cache.h @@ -29,7 +29,8 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>; class QueryCache final : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter, std::vector<OGLQuery>> { public: - explicit QueryCache(Core::System& system, RasterizerOpenGL& rasterizer); + explicit QueryCache(RasterizerOpenGL& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, + Tegra::MemoryManager& gpu_memory); ~QueryCache(); OGLQuery AllocateQuery(VideoCore::QueryType type); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 4af5824cd..bbb2eb17c 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -153,16 +153,19 @@ void UpdateBindlessPointers(GLenum target, GLuint64EXT* pointers, std::size_t nu } // Anonymous namespace -RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window, - const Device& device, ScreenInfo& info, - ProgramManager& program_manager, StateTracker& state_tracker) - : RasterizerAccelerated{system.Memory()}, device{device}, texture_cache{system, *this, device, - state_tracker}, - shader_cache{*this, system, emu_window, device}, query_cache{system, *this}, - buffer_cache{*this, system, device, STREAM_BUFFER_SIZE}, - fence_manager{system, *this, texture_cache, buffer_cache, query_cache}, system{system}, - screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker}, - async_shaders{emu_window} { +RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu_, + Core::Memory::Memory& cpu_memory, const Device& device_, + ScreenInfo& screen_info_, ProgramManager& program_manager_, + StateTracker& state_tracker_) + : RasterizerAccelerated{cpu_memory}, gpu(gpu_), maxwell3d(gpu.Maxwell3D()), + kepler_compute(gpu.KeplerCompute()), gpu_memory(gpu.MemoryManager()), device(device_), + screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_), + texture_cache(*this, maxwell3d, gpu_memory, device, state_tracker), + shader_cache(*this, emu_window, gpu, maxwell3d, kepler_compute, gpu_memory, device), + query_cache(*this, maxwell3d, gpu_memory), + buffer_cache(*this, gpu_memory, cpu_memory, device, STREAM_BUFFER_SIZE), + fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache), + async_shaders(emu_window) { CheckExtensions(); unified_uniform_buffer.Create(); @@ -196,8 +199,7 @@ void RasterizerOpenGL::CheckExtensions() { } void RasterizerOpenGL::SetupVertexFormat() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::VertexFormats]) { return; } @@ -217,7 +219,7 @@ void RasterizerOpenGL::SetupVertexFormat() { } flags[Dirty::VertexFormat0 + index] = false; - const auto attrib = gpu.regs.vertex_attrib_format[index]; + const auto attrib = maxwell3d.regs.vertex_attrib_format[index]; const auto gl_index = static_cast<GLuint>(index); // Disable constant attributes. @@ -241,8 +243,7 @@ void RasterizerOpenGL::SetupVertexFormat() { } void RasterizerOpenGL::SetupVertexBuffer() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::VertexBuffers]) { return; } @@ -253,7 +254,7 @@ void RasterizerOpenGL::SetupVertexBuffer() { const bool use_unified_memory = device.HasVertexBufferUnifiedMemory(); // Upload all guest vertex arrays sequentially to our buffer - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_BINDINGS; ++index) { if (!flags[Dirty::VertexBuffer0 + index]) { continue; @@ -290,14 +291,13 @@ void RasterizerOpenGL::SetupVertexBuffer() { } void RasterizerOpenGL::SetupVertexInstances() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::VertexInstances]) { return; } flags[Dirty::VertexInstances] = false; - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) { if (!flags[Dirty::VertexInstance0 + index]) { continue; @@ -313,7 +313,7 @@ void RasterizerOpenGL::SetupVertexInstances() { GLintptr RasterizerOpenGL::SetupIndexBuffer() { MICROPROFILE_SCOPE(OpenGL_Index); - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; const std::size_t size = CalculateIndexBufferSize(); const auto info = buffer_cache.UploadMemory(regs.index_array.IndexStart(), size); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, info.handle); @@ -322,15 +322,14 @@ GLintptr RasterizerOpenGL::SetupIndexBuffer() { void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { MICROPROFILE_SCOPE(OpenGL_Shader); - auto& gpu = system.GPU().Maxwell3D(); u32 clip_distances = 0; for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { - const auto& shader_config = gpu.regs.shader_config[index]; + const auto& shader_config = maxwell3d.regs.shader_config[index]; const auto program{static_cast<Maxwell::ShaderProgram>(index)}; // Skip stages that are not enabled - if (!gpu.regs.IsShaderConfigEnabled(index)) { + if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { switch (program) { case Maxwell::ShaderProgram::Geometry: program_manager.UseGeometryShader(0); @@ -391,11 +390,11 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { } SyncClipEnabled(clip_distances); - gpu.dirty.flags[Dirty::Shaders] = false; + maxwell3d.dirty.flags[Dirty::Shaders] = false; } std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const { - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; std::size_t size = 0; for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { @@ -413,34 +412,27 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const { } std::size_t RasterizerOpenGL::CalculateIndexBufferSize() const { - const auto& regs = system.GPU().Maxwell3D().regs; - - return static_cast<std::size_t>(regs.index_array.count) * - static_cast<std::size_t>(regs.index_array.FormatSizeInBytes()); + return static_cast<std::size_t>(maxwell3d.regs.index_array.count) * + static_cast<std::size_t>(maxwell3d.regs.index_array.FormatSizeInBytes()); } -void RasterizerOpenGL::LoadDiskResources(const std::atomic_bool& stop_loading, +void RasterizerOpenGL::LoadDiskResources(u64 title_id, const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback) { - shader_cache.LoadDiskCache(stop_loading, callback); -} - -void RasterizerOpenGL::SetupDirtyFlags() { - state_tracker.Initialize(); + shader_cache.LoadDiskCache(title_id, stop_loading, callback); } void RasterizerOpenGL::ConfigureFramebuffers() { MICROPROFILE_SCOPE(OpenGL_Framebuffer); - auto& gpu = system.GPU().Maxwell3D(); - if (!gpu.dirty.flags[VideoCommon::Dirty::RenderTargets]) { + if (!maxwell3d.dirty.flags[VideoCommon::Dirty::RenderTargets]) { return; } - gpu.dirty.flags[VideoCommon::Dirty::RenderTargets] = false; + maxwell3d.dirty.flags[VideoCommon::Dirty::RenderTargets] = false; texture_cache.GuardRenderTargets(true); View depth_surface = texture_cache.GetDepthBufferSurface(true); - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0); // Bind the framebuffer surfaces @@ -472,8 +464,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() { } void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil) { - auto& gpu = system.GPU().Maxwell3D(); - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; texture_cache.GuardRenderTargets(true); View color_surface; @@ -523,12 +514,11 @@ void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color, bool using_de } void RasterizerOpenGL::Clear() { - const auto& gpu = system.GPU().Maxwell3D(); - if (!gpu.ShouldExecute()) { + if (!maxwell3d.ShouldExecute()) { return; } - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; bool use_color{}; bool use_depth{}; bool use_stencil{}; @@ -593,7 +583,6 @@ void RasterizerOpenGL::Clear() { void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { MICROPROFILE_SCOPE(OpenGL_Drawing); - auto& gpu = system.GPU().Maxwell3D(); query_cache.UpdateCounters(); @@ -641,7 +630,7 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { if (invalidated) { // When the stream buffer has been invalidated, we have to consider vertex buffers as dirty - auto& dirty = gpu.dirty.flags; + auto& dirty = maxwell3d.dirty.flags; dirty[Dirty::VertexBuffers] = true; for (int index = Dirty::VertexBuffer0; index <= Dirty::VertexBuffer31; ++index) { dirty[index] = true; @@ -662,7 +651,7 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { // Setup emulation uniform buffer. if (!device.UseAssemblyShaders()) { MaxwellUniformData ubo; - ubo.SetFromRegs(gpu); + ubo.SetFromRegs(maxwell3d); const auto info = buffer_cache.UploadHostMemory(&ubo, sizeof(ubo), device.GetUniformBufferAlignment()); glBindBufferRange(GL_UNIFORM_BUFFER, EmulationUniformBlockBinding, info.handle, info.offset, @@ -671,7 +660,7 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { // Setup shaders and their used resources. texture_cache.GuardSamplers(true); - const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(gpu.regs.draw.topology); + const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d.regs.draw.topology); SetupShaders(primitive_mode); texture_cache.GuardSamplers(false); @@ -688,14 +677,14 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { BeginTransformFeedback(primitive_mode); - const GLuint base_instance = static_cast<GLuint>(gpu.regs.vb_base_instance); + const GLuint base_instance = static_cast<GLuint>(maxwell3d.regs.vb_base_instance); const GLsizei num_instances = - static_cast<GLsizei>(is_instanced ? gpu.mme_draw.instance_count : 1); + static_cast<GLsizei>(is_instanced ? maxwell3d.mme_draw.instance_count : 1); if (is_indexed) { - const GLint base_vertex = static_cast<GLint>(gpu.regs.vb_element_base); - const GLsizei num_vertices = static_cast<GLsizei>(gpu.regs.index_array.count); + const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vb_element_base); + const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.index_array.count); const GLvoid* offset = reinterpret_cast<const GLvoid*>(index_buffer_offset); - const GLenum format = MaxwellToGL::IndexFormat(gpu.regs.index_array.format); + const GLenum format = MaxwellToGL::IndexFormat(maxwell3d.regs.index_array.format); if (num_instances == 1 && base_instance == 0 && base_vertex == 0) { glDrawElements(primitive_mode, num_vertices, format, offset); } else if (num_instances == 1 && base_instance == 0) { @@ -714,8 +703,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { base_instance); } } else { - const GLint base_vertex = static_cast<GLint>(gpu.regs.vertex_buffer.first); - const GLsizei num_vertices = static_cast<GLsizei>(gpu.regs.vertex_buffer.count); + const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vertex_buffer.first); + const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.vertex_buffer.count); if (num_instances == 1 && base_instance == 0) { glDrawArrays(primitive_mode, base_vertex, num_vertices); } else if (base_instance == 0) { @@ -730,7 +719,7 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { ++num_queued_commands; - system.GPU().TickWork(); + gpu.TickWork(); } void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { @@ -753,7 +742,8 @@ void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { buffer_cache.Unmap(); - const auto& launch_desc = system.GPU().KeplerCompute().launch_description; + const auto& launch_desc = kepler_compute.launch_description; + program_manager.BindCompute(kernel->GetHandle()); glDispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z); ++num_queued_commands; } @@ -815,17 +805,14 @@ void RasterizerOpenGL::SyncGuestHost() { } void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) { - auto& gpu{system.GPU()}; if (!gpu.IsAsync()) { - auto& memory_manager{gpu.MemoryManager()}; - memory_manager.Write<u32>(addr, value); + gpu_memory.Write<u32>(addr, value); return; } fence_manager.SignalSemaphore(addr, value); } void RasterizerOpenGL::SignalSyncPoint(u32 value) { - auto& gpu{system.GPU()}; if (!gpu.IsAsync()) { gpu.IncrementSyncPoint(value); return; @@ -834,7 +821,6 @@ void RasterizerOpenGL::SignalSyncPoint(u32 value) { } void RasterizerOpenGL::ReleaseFences() { - auto& gpu{system.GPU()}; if (!gpu.IsAsync()) { return; } @@ -920,7 +906,7 @@ void RasterizerOpenGL::SetupDrawConstBuffers(std::size_t stage_index, Shader* sh GL_FRAGMENT_PROGRAM_PARAMETER_BUFFER_NV}; MICROPROFILE_SCOPE(OpenGL_UBO); - const auto& stages = system.GPU().Maxwell3D().state.shader_stages; + const auto& stages = maxwell3d.state.shader_stages; const auto& shader_stage = stages[stage_index]; const auto& entries = shader->GetEntries(); const bool use_unified = entries.use_unified_uniforms; @@ -945,7 +931,7 @@ void RasterizerOpenGL::SetupDrawConstBuffers(std::size_t stage_index, Shader* sh void RasterizerOpenGL::SetupComputeConstBuffers(Shader* kernel) { MICROPROFILE_SCOPE(OpenGL_UBO); - const auto& launch_desc = system.GPU().KeplerCompute().launch_description; + const auto& launch_desc = kepler_compute.launch_description; const auto& entries = kernel->GetEntries(); const bool use_unified = entries.use_unified_uniforms; @@ -1018,9 +1004,7 @@ void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, Shader* sh GL_GEOMETRY_PROGRAM_NV, GL_FRAGMENT_PROGRAM_NV, }; - auto& gpu{system.GPU()}; - auto& memory_manager{gpu.MemoryManager()}; - const auto& cbufs{gpu.Maxwell3D().state.shader_stages[stage_index]}; + const auto& cbufs{maxwell3d.state.shader_stages[stage_index]}; const auto& entries{shader->GetEntries().global_memory_entries}; std::array<GLuint64EXT, 32> pointers; @@ -1030,8 +1014,8 @@ void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, Shader* sh u32 binding = assembly_shaders ? 0 : device.GetBaseBindings(stage_index).shader_storage_buffer; for (const auto& entry : entries) { const GPUVAddr addr{cbufs.const_buffers[entry.cbuf_index].address + entry.cbuf_offset}; - const GPUVAddr gpu_addr{memory_manager.Read<u64>(addr)}; - const u32 size{memory_manager.Read<u32>(addr + 8)}; + const GPUVAddr gpu_addr{gpu_memory.Read<u64>(addr)}; + const u32 size{gpu_memory.Read<u32>(addr + 8)}; SetupGlobalMemory(binding, entry, gpu_addr, size, &pointers[binding]); ++binding; } @@ -1041,9 +1025,7 @@ void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, Shader* sh } void RasterizerOpenGL::SetupComputeGlobalMemory(Shader* kernel) { - auto& gpu{system.GPU()}; - auto& memory_manager{gpu.MemoryManager()}; - const auto& cbufs{gpu.KeplerCompute().launch_description.const_buffer_config}; + const auto& cbufs{kepler_compute.launch_description.const_buffer_config}; const auto& entries{kernel->GetEntries().global_memory_entries}; std::array<GLuint64EXT, 32> pointers; @@ -1052,8 +1034,8 @@ void RasterizerOpenGL::SetupComputeGlobalMemory(Shader* kernel) { u32 binding = 0; for (const auto& entry : entries) { const GPUVAddr addr{cbufs[entry.cbuf_index].Address() + entry.cbuf_offset}; - const GPUVAddr gpu_addr{memory_manager.Read<u64>(addr)}; - const u32 size{memory_manager.Read<u32>(addr + 8)}; + const GPUVAddr gpu_addr{gpu_memory.Read<u64>(addr)}; + const u32 size{gpu_memory.Read<u32>(addr + 8)}; SetupGlobalMemory(binding, entry, gpu_addr, size, &pointers[binding]); ++binding; } @@ -1077,7 +1059,6 @@ void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& e void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, Shader* shader) { MICROPROFILE_SCOPE(OpenGL_Texture); - const auto& maxwell3d = system.GPU().Maxwell3D(); u32 binding = device.GetBaseBindings(stage_index).sampler; for (const auto& entry : shader->GetEntries().samplers) { const auto shader_type = static_cast<ShaderType>(stage_index); @@ -1090,11 +1071,10 @@ void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, Shader* shader void RasterizerOpenGL::SetupComputeTextures(Shader* kernel) { MICROPROFILE_SCOPE(OpenGL_Texture); - const auto& compute = system.GPU().KeplerCompute(); u32 binding = 0; for (const auto& entry : kernel->GetEntries().samplers) { for (std::size_t i = 0; i < entry.size; ++i) { - const auto texture = GetTextureInfo(compute, entry, ShaderType::Compute, i); + const auto texture = GetTextureInfo(kepler_compute, entry, ShaderType::Compute, i); SetupTexture(binding++, texture, entry); } } @@ -1118,20 +1098,18 @@ void RasterizerOpenGL::SetupTexture(u32 binding, const Tegra::Texture::FullTextu } void RasterizerOpenGL::SetupDrawImages(std::size_t stage_index, Shader* shader) { - const auto& maxwell3d = system.GPU().Maxwell3D(); u32 binding = device.GetBaseBindings(stage_index).image; for (const auto& entry : shader->GetEntries().images) { - const auto shader_type = static_cast<Tegra::Engines::ShaderType>(stage_index); + const auto shader_type = static_cast<ShaderType>(stage_index); const auto tic = GetTextureInfo(maxwell3d, entry, shader_type).tic; SetupImage(binding++, tic, entry); } } void RasterizerOpenGL::SetupComputeImages(Shader* shader) { - const auto& compute = system.GPU().KeplerCompute(); u32 binding = 0; for (const auto& entry : shader->GetEntries().images) { - const auto tic = GetTextureInfo(compute, entry, Tegra::Engines::ShaderType::Compute).tic; + const auto tic = GetTextureInfo(kepler_compute, entry, ShaderType::Compute).tic; SetupImage(binding++, tic, entry); } } @@ -1151,9 +1129,8 @@ void RasterizerOpenGL::SetupImage(u32 binding, const Tegra::Texture::TICEntry& t } void RasterizerOpenGL::SyncViewport() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; - const auto& regs = gpu.regs; + auto& flags = maxwell3d.dirty.flags; + const auto& regs = maxwell3d.regs; const bool dirty_viewport = flags[Dirty::Viewports]; const bool dirty_clip_control = flags[Dirty::ClipControl]; @@ -1225,25 +1202,23 @@ void RasterizerOpenGL::SyncViewport() { } void RasterizerOpenGL::SyncDepthClamp() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::DepthClampEnabled]) { return; } flags[Dirty::DepthClampEnabled] = false; - oglEnable(GL_DEPTH_CLAMP, gpu.regs.view_volume_clip_control.depth_clamp_disabled == 0); + oglEnable(GL_DEPTH_CLAMP, maxwell3d.regs.view_volume_clip_control.depth_clamp_disabled == 0); } void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::ClipDistances] && !flags[Dirty::Shaders]) { return; } flags[Dirty::ClipDistances] = false; - clip_mask &= gpu.regs.clip_distance_enabled; + clip_mask &= maxwell3d.regs.clip_distance_enabled; if (clip_mask == last_clip_distance_mask) { return; } @@ -1259,9 +1234,8 @@ void RasterizerOpenGL::SyncClipCoef() { } void RasterizerOpenGL::SyncCullMode() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; - const auto& regs = gpu.regs; + auto& flags = maxwell3d.dirty.flags; + const auto& regs = maxwell3d.regs; if (flags[Dirty::CullTest]) { flags[Dirty::CullTest] = false; @@ -1276,26 +1250,24 @@ void RasterizerOpenGL::SyncCullMode() { } void RasterizerOpenGL::SyncPrimitiveRestart() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::PrimitiveRestart]) { return; } flags[Dirty::PrimitiveRestart] = false; - if (gpu.regs.primitive_restart.enabled) { + if (maxwell3d.regs.primitive_restart.enabled) { glEnable(GL_PRIMITIVE_RESTART); - glPrimitiveRestartIndex(gpu.regs.primitive_restart.index); + glPrimitiveRestartIndex(maxwell3d.regs.primitive_restart.index); } else { glDisable(GL_PRIMITIVE_RESTART); } } void RasterizerOpenGL::SyncDepthTestState() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; + const auto& regs = maxwell3d.regs; - const auto& regs = gpu.regs; if (flags[Dirty::DepthMask]) { flags[Dirty::DepthMask] = false; glDepthMask(regs.depth_write_enabled ? GL_TRUE : GL_FALSE); @@ -1313,14 +1285,13 @@ void RasterizerOpenGL::SyncDepthTestState() { } void RasterizerOpenGL::SyncStencilTestState() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::StencilTest]) { return; } flags[Dirty::StencilTest] = false; - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; oglEnable(GL_STENCIL_TEST, regs.stencil_enable); glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func), @@ -1345,25 +1316,24 @@ void RasterizerOpenGL::SyncStencilTestState() { } void RasterizerOpenGL::SyncRasterizeEnable() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::RasterizeEnable]) { return; } flags[Dirty::RasterizeEnable] = false; - oglEnable(GL_RASTERIZER_DISCARD, gpu.regs.rasterize_enable == 0); + oglEnable(GL_RASTERIZER_DISCARD, maxwell3d.regs.rasterize_enable == 0); } void RasterizerOpenGL::SyncPolygonModes() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::PolygonModes]) { return; } flags[Dirty::PolygonModes] = false; - if (gpu.regs.fill_rectangle) { + const auto& regs = maxwell3d.regs; + if (regs.fill_rectangle) { if (!GLAD_GL_NV_fill_rectangle) { LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported"); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); @@ -1376,27 +1346,26 @@ void RasterizerOpenGL::SyncPolygonModes() { return; } - if (gpu.regs.polygon_mode_front == gpu.regs.polygon_mode_back) { + if (regs.polygon_mode_front == regs.polygon_mode_back) { flags[Dirty::PolygonModeFront] = false; flags[Dirty::PolygonModeBack] = false; - glPolygonMode(GL_FRONT_AND_BACK, MaxwellToGL::PolygonMode(gpu.regs.polygon_mode_front)); + glPolygonMode(GL_FRONT_AND_BACK, MaxwellToGL::PolygonMode(regs.polygon_mode_front)); return; } if (flags[Dirty::PolygonModeFront]) { flags[Dirty::PolygonModeFront] = false; - glPolygonMode(GL_FRONT, MaxwellToGL::PolygonMode(gpu.regs.polygon_mode_front)); + glPolygonMode(GL_FRONT, MaxwellToGL::PolygonMode(regs.polygon_mode_front)); } if (flags[Dirty::PolygonModeBack]) { flags[Dirty::PolygonModeBack] = false; - glPolygonMode(GL_BACK, MaxwellToGL::PolygonMode(gpu.regs.polygon_mode_back)); + glPolygonMode(GL_BACK, MaxwellToGL::PolygonMode(regs.polygon_mode_back)); } } void RasterizerOpenGL::SyncColorMask() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::ColorMasks]) { return; } @@ -1405,7 +1374,7 @@ void RasterizerOpenGL::SyncColorMask() { const bool force = flags[Dirty::ColorMaskCommon]; flags[Dirty::ColorMaskCommon] = false; - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; if (regs.color_mask_common) { if (!force && !flags[Dirty::ColorMask0]) { return; @@ -1430,33 +1399,30 @@ void RasterizerOpenGL::SyncColorMask() { } void RasterizerOpenGL::SyncMultiSampleState() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::MultisampleControl]) { return; } flags[Dirty::MultisampleControl] = false; - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage); oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one); } void RasterizerOpenGL::SyncFragmentColorClampState() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::FragmentClampColor]) { return; } flags[Dirty::FragmentClampColor] = false; - glClampColor(GL_CLAMP_FRAGMENT_COLOR, gpu.regs.frag_color_clamp ? GL_TRUE : GL_FALSE); + glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d.regs.frag_color_clamp ? GL_TRUE : GL_FALSE); } void RasterizerOpenGL::SyncBlendState() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; - const auto& regs = gpu.regs; + auto& flags = maxwell3d.dirty.flags; + const auto& regs = maxwell3d.regs; if (flags[Dirty::BlendColor]) { flags[Dirty::BlendColor] = false; @@ -1513,14 +1479,13 @@ void RasterizerOpenGL::SyncBlendState() { } void RasterizerOpenGL::SyncLogicOpState() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::LogicOp]) { return; } flags[Dirty::LogicOp] = false; - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; if (regs.logic_op.enable) { glEnable(GL_COLOR_LOGIC_OP); glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation)); @@ -1530,14 +1495,13 @@ void RasterizerOpenGL::SyncLogicOpState() { } void RasterizerOpenGL::SyncScissorTest() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::Scissors]) { return; } flags[Dirty::Scissors] = false; - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; for (std::size_t index = 0; index < Maxwell::NumViewports; ++index) { if (!flags[Dirty::Scissor0 + index]) { continue; @@ -1556,16 +1520,15 @@ void RasterizerOpenGL::SyncScissorTest() { } void RasterizerOpenGL::SyncPointState() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::PointSize]) { return; } flags[Dirty::PointSize] = false; - oglEnable(GL_POINT_SPRITE, gpu.regs.point_sprite_enable); + oglEnable(GL_POINT_SPRITE, maxwell3d.regs.point_sprite_enable); - if (gpu.regs.vp_point_size.enable) { + if (maxwell3d.regs.vp_point_size.enable) { // By definition of GL_POINT_SIZE, it only matters if GL_PROGRAM_POINT_SIZE is disabled. glEnable(GL_PROGRAM_POINT_SIZE); return; @@ -1573,32 +1536,30 @@ void RasterizerOpenGL::SyncPointState() { // Limit the point size to 1 since nouveau sometimes sets a point size of 0 (and that's invalid // in OpenGL). - glPointSize(std::max(1.0f, gpu.regs.point_size)); + glPointSize(std::max(1.0f, maxwell3d.regs.point_size)); glDisable(GL_PROGRAM_POINT_SIZE); } void RasterizerOpenGL::SyncLineState() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::LineWidth]) { return; } flags[Dirty::LineWidth] = false; - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable); glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased); } void RasterizerOpenGL::SyncPolygonOffset() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::PolygonOffset]) { return; } flags[Dirty::PolygonOffset] = false; - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable); oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable); oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable); @@ -1612,14 +1573,13 @@ void RasterizerOpenGL::SyncPolygonOffset() { } void RasterizerOpenGL::SyncAlphaTest() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::AlphaTest]) { return; } flags[Dirty::AlphaTest] = false; - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; if (regs.alpha_test_enabled && regs.rt_control.count > 1) { LOG_WARNING(Render_OpenGL, "Alpha testing with more than one render target is not tested"); } @@ -1633,20 +1593,19 @@ void RasterizerOpenGL::SyncAlphaTest() { } void RasterizerOpenGL::SyncFramebufferSRGB() { - auto& gpu = system.GPU().Maxwell3D(); - auto& flags = gpu.dirty.flags; + auto& flags = maxwell3d.dirty.flags; if (!flags[Dirty::FramebufferSRGB]) { return; } flags[Dirty::FramebufferSRGB] = false; - oglEnable(GL_FRAMEBUFFER_SRGB, gpu.regs.framebuffer_srgb); + oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d.regs.framebuffer_srgb); } void RasterizerOpenGL::SyncTransformFeedback() { // TODO(Rodrigo): Inject SKIP_COMPONENTS*_NV when required. An unimplemented message will signal // when this is required. - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; static constexpr std::size_t STRIDE = 3; std::array<GLint, 128 * STRIDE * Maxwell::NumTransformFeedbackBuffers> attribs; @@ -1698,7 +1657,7 @@ void RasterizerOpenGL::SyncTransformFeedback() { } void RasterizerOpenGL::BeginTransformFeedback(GLenum primitive_mode) { - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; if (regs.tfb_enabled == 0) { return; } @@ -1741,7 +1700,7 @@ void RasterizerOpenGL::BeginTransformFeedback(GLenum primitive_mode) { } void RasterizerOpenGL::EndTransformFeedback() { - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; if (regs.tfb_enabled == 0) { return; } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index ccc6f50f6..f451404b2 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -36,8 +36,8 @@ #include "video_core/shader/async_shaders.h" #include "video_core/textures/texture.h" -namespace Core { -class System; +namespace Core::Memory { +class Memory; } namespace Core::Frontend { @@ -55,9 +55,10 @@ struct DrawParameters; class RasterizerOpenGL : public VideoCore::RasterizerAccelerated { public: - explicit RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window, - const Device& device, ScreenInfo& info, - ProgramManager& program_manager, StateTracker& state_tracker); + explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu, + Core::Memory::Memory& cpu_memory, const Device& device, + ScreenInfo& screen_info, ProgramManager& program_manager, + StateTracker& state_tracker); ~RasterizerOpenGL() override; void Draw(bool is_indexed, bool is_instanced) override; @@ -83,9 +84,8 @@ public: const Tegra::Engines::Fermi2D::Config& copy_config) override; bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, u32 pixel_stride) override; - void LoadDiskResources(const std::atomic_bool& stop_loading, + void LoadDiskResources(u64 title_id, const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback) override; - void SetupDirtyFlags() override; /// Returns true when there are commands queued to the OpenGL server. bool AnyCommandQueued() const { @@ -237,7 +237,15 @@ private: void SetupShaders(GLenum primitive_mode); + Tegra::GPU& gpu; + Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::Engines::KeplerCompute& kepler_compute; + Tegra::MemoryManager& gpu_memory; + const Device& device; + ScreenInfo& screen_info; + ProgramManager& program_manager; + StateTracker& state_tracker; TextureCacheOpenGL texture_cache; ShaderCacheOpenGL shader_cache; @@ -247,10 +255,6 @@ private: OGLBufferCache buffer_cache; FenceManagerOpenGL fence_manager; - Core::System& system; - ScreenInfo& screen_info; - ProgramManager& program_manager; - StateTracker& state_tracker; VideoCommon::Shader::AsyncShaders async_shaders; static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024; diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index a07d56ef0..bd56bed0c 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -239,12 +239,11 @@ std::unique_ptr<Shader> Shader::CreateStageFromMemory( ProgramCode code_b, VideoCommon::Shader::AsyncShaders& async_shaders, VAddr cpu_addr) { const auto shader_type = GetShaderType(program_type); - auto& gpu = params.system.GPU(); + auto& gpu = params.gpu; gpu.ShaderNotify().MarkSharderBuilding(); auto registry = std::make_shared<Registry>(shader_type, gpu.Maxwell3D()); - if (!async_shaders.IsShaderAsync(params.system.GPU()) || - !params.device.UseAsynchronousShaders()) { + if (!async_shaders.IsShaderAsync(gpu) || !params.device.UseAsynchronousShaders()) { const ShaderIR ir(code, STAGE_MAIN_OFFSET, COMPILER_SETTINGS, *registry); // TODO(Rodrigo): Handle VertexA shaders // std::optional<ShaderIR> ir_b; @@ -287,11 +286,10 @@ std::unique_ptr<Shader> Shader::CreateStageFromMemory( std::unique_ptr<Shader> Shader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code) { - auto& gpu = params.system.GPU(); + auto& gpu = params.gpu; gpu.ShaderNotify().MarkSharderBuilding(); - auto& engine = gpu.KeplerCompute(); - auto registry = std::make_shared<Registry>(ShaderType::Compute, engine); + auto registry = std::make_shared<Registry>(ShaderType::Compute, params.engine); const ShaderIR ir(code, KERNEL_MAIN_OFFSET, COMPILER_SETTINGS, *registry); const u64 uid = params.unique_identifier; auto program = BuildShader(params.device, ShaderType::Compute, uid, ir, *registry); @@ -320,15 +318,20 @@ std::unique_ptr<Shader> Shader::CreateFromCache(const ShaderParameters& params, precompiled_shader.registry, precompiled_shader.entries, precompiled_shader.program)); } -ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system, - Core::Frontend::EmuWindow& emu_window, const Device& device) - : VideoCommon::ShaderCache<Shader>{rasterizer}, system{system}, - emu_window{emu_window}, device{device}, disk_cache{system} {} +ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, + Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, + Tegra::Engines::Maxwell3D& maxwell3d_, + Tegra::Engines::KeplerCompute& kepler_compute_, + Tegra::MemoryManager& gpu_memory_, const Device& device_) + : VideoCommon::ShaderCache<Shader>{rasterizer}, emu_window{emu_window_}, gpu{gpu_}, + gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, + kepler_compute{kepler_compute_}, device{device_} {} ShaderCacheOpenGL::~ShaderCacheOpenGL() = default; -void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading, +void ShaderCacheOpenGL::LoadDiskCache(u64 title_id, const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback) { + disk_cache.BindTitleID(title_id); const std::optional transferable = disk_cache.LoadTransferable(); if (!transferable) { return; @@ -481,21 +484,19 @@ ProgramSharedPtr ShaderCacheOpenGL::GeneratePrecompiledProgram( Shader* ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program, VideoCommon::Shader::AsyncShaders& async_shaders) { - if (!system.GPU().Maxwell3D().dirty.flags[Dirty::Shaders]) { + if (!maxwell3d.dirty.flags[Dirty::Shaders]) { auto* last_shader = last_shaders[static_cast<std::size_t>(program)]; if (last_shader->IsBuilt()) { return last_shader; } } - auto& memory_manager{system.GPU().MemoryManager()}; - const GPUVAddr address{GetShaderAddress(system, program)}; + const GPUVAddr address{GetShaderAddress(maxwell3d, program)}; if (device.UseAsynchronousShaders() && async_shaders.HasCompletedWork()) { auto completed_work = async_shaders.GetCompletedWork(); for (auto& work : completed_work) { Shader* shader = TryGet(work.cpu_address); - auto& gpu = system.GPU(); gpu.ShaderNotify().MarkShaderComplete(); if (shader == nullptr) { continue; @@ -507,14 +508,13 @@ Shader* ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program, shader->AsyncGLASMBuilt(std::move(work.program.glasm)); } + auto& registry = shader->GetRegistry(); + ShaderDiskCacheEntry entry; entry.type = work.shader_type; entry.code = std::move(work.code); entry.code_b = std::move(work.code_b); entry.unique_identifier = work.uid; - - auto& registry = shader->GetRegistry(); - entry.bound_buffer = registry.GetBoundBuffer(); entry.graphics_info = registry.GetGraphicsInfo(); entry.keys = registry.GetKeys(); @@ -525,28 +525,28 @@ Shader* ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program, } // Look up shader in the cache based on address - const auto cpu_addr{memory_manager.GpuToCpuAddress(address)}; + const std::optional<VAddr> cpu_addr{gpu_memory.GpuToCpuAddress(address)}; if (Shader* const shader{cpu_addr ? TryGet(*cpu_addr) : null_shader.get()}) { return last_shaders[static_cast<std::size_t>(program)] = shader; } - const auto host_ptr{memory_manager.GetPointer(address)}; + const u8* const host_ptr{gpu_memory.GetPointer(address)}; // No shader found - create a new one - ProgramCode code{GetShaderCode(memory_manager, address, host_ptr, false)}; + ProgramCode code{GetShaderCode(gpu_memory, address, host_ptr, false)}; ProgramCode code_b; if (program == Maxwell::ShaderProgram::VertexA) { - const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)}; - const u8* host_ptr_b = memory_manager.GetPointer(address_b); - code_b = GetShaderCode(memory_manager, address_b, host_ptr_b, false); + const GPUVAddr address_b{GetShaderAddress(maxwell3d, Maxwell::ShaderProgram::VertexB)}; + const u8* host_ptr_b = gpu_memory.GetPointer(address_b); + code_b = GetShaderCode(gpu_memory, address_b, host_ptr_b, false); } const std::size_t code_size = code.size() * sizeof(u64); const u64 unique_identifier = GetUniqueIdentifier( GetShaderType(program), program == Maxwell::ShaderProgram::VertexA, code, code_b); - const ShaderParameters params{system, disk_cache, device, - *cpu_addr, host_ptr, unique_identifier}; + const ShaderParameters params{gpu, maxwell3d, disk_cache, device, + *cpu_addr, host_ptr, unique_identifier}; std::unique_ptr<Shader> shader; const auto found = runtime_cache.find(unique_identifier); @@ -568,21 +568,20 @@ Shader* ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program, } Shader* ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) { - auto& memory_manager{system.GPU().MemoryManager()}; - const auto cpu_addr{memory_manager.GpuToCpuAddress(code_addr)}; + const std::optional<VAddr> cpu_addr{gpu_memory.GpuToCpuAddress(code_addr)}; if (Shader* const kernel = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get()) { return kernel; } - const auto host_ptr{memory_manager.GetPointer(code_addr)}; // No kernel found, create a new one - ProgramCode code{GetShaderCode(memory_manager, code_addr, host_ptr, true)}; + const u8* host_ptr{gpu_memory.GetPointer(code_addr)}; + ProgramCode code{GetShaderCode(gpu_memory, code_addr, host_ptr, true)}; const std::size_t code_size{code.size() * sizeof(u64)}; const u64 unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)}; - const ShaderParameters params{system, disk_cache, device, - *cpu_addr, host_ptr, unique_identifier}; + const ShaderParameters params{gpu, kepler_compute, disk_cache, device, + *cpu_addr, host_ptr, unique_identifier}; std::unique_ptr<Shader> kernel; const auto found = runtime_cache.find(unique_identifier); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index 7528ac686..1708af06a 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -25,8 +25,8 @@ #include "video_core/shader/shader_ir.h" #include "video_core/shader_cache.h" -namespace Core { -class System; +namespace Tegra { +class MemoryManager; } namespace Core::Frontend { @@ -57,11 +57,12 @@ struct PrecompiledShader { }; struct ShaderParameters { - Core::System& system; + Tegra::GPU& gpu; + Tegra::Engines::ConstBufferEngineInterface& engine; ShaderDiskCacheOpenGL& disk_cache; const Device& device; VAddr cpu_addr; - u8* host_ptr; + const u8* host_ptr; u64 unique_identifier; }; @@ -118,12 +119,14 @@ private: class ShaderCacheOpenGL final : public VideoCommon::ShaderCache<Shader> { public: - explicit ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system, - Core::Frontend::EmuWindow& emu_window, const Device& device); + explicit ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::Frontend::EmuWindow& emu_window, + Tegra::GPU& gpu, Tegra::Engines::Maxwell3D& maxwell3d, + Tegra::Engines::KeplerCompute& kepler_compute, + Tegra::MemoryManager& gpu_memory, const Device& device); ~ShaderCacheOpenGL() override; /// Loads disk cache for the current game - void LoadDiskCache(const std::atomic_bool& stop_loading, + void LoadDiskCache(u64 title_id, const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback); /// Gets the current specified shader stage program @@ -138,9 +141,13 @@ private: const ShaderDiskCacheEntry& entry, const ShaderDiskCachePrecompiled& precompiled_entry, const std::unordered_set<GLenum>& supported_formats); - Core::System& system; Core::Frontend::EmuWindow& emu_window; + Tegra::GPU& gpu; + Tegra::MemoryManager& gpu_memory; + Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::Engines::KeplerCompute& kepler_compute; const Device& device; + ShaderDiskCacheOpenGL disk_cache; std::unordered_map<u64, PrecompiledShader> runtime_cache; diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 3f75fcd2b..ce3a65122 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -1443,8 +1443,10 @@ private: return expr + ", vec2(0.0), vec2(0.0))"; case TextureType::TextureCube: return expr + ", vec3(0.0), vec3(0.0))"; + default: + UNREACHABLE(); + break; } - UNREACHABLE(); } for (const auto& variant : extras) { diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp index 40c0877c1..166ee34e1 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp @@ -206,13 +206,17 @@ bool ShaderDiskCacheEntry::Save(Common::FS::IOFile& file) const { flat_bindless_samplers.size(); } -ShaderDiskCacheOpenGL::ShaderDiskCacheOpenGL(Core::System& system) : system{system} {} +ShaderDiskCacheOpenGL::ShaderDiskCacheOpenGL() = default; ShaderDiskCacheOpenGL::~ShaderDiskCacheOpenGL() = default; +void ShaderDiskCacheOpenGL::BindTitleID(u64 title_id_) { + title_id = title_id_; +} + std::optional<std::vector<ShaderDiskCacheEntry>> ShaderDiskCacheOpenGL::LoadTransferable() { // Skip games without title id - const bool has_title_id = system.CurrentProcess()->GetTitleID() != 0; + const bool has_title_id = title_id != 0; if (!Settings::values.use_disk_shader_cache.GetValue() || !has_title_id) { return std::nullopt; } @@ -474,7 +478,7 @@ std::string ShaderDiskCacheOpenGL::GetBaseDir() const { } std::string ShaderDiskCacheOpenGL::GetTitleID() const { - return fmt::format("{:016X}", system.CurrentProcess()->GetTitleID()); + return fmt::format("{:016X}", title_id); } } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.h b/src/video_core/renderer_opengl/gl_shader_disk_cache.h index db2bb73bc..aef841c1d 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.h @@ -21,10 +21,6 @@ #include "video_core/engines/shader_type.h" #include "video_core/shader/registry.h" -namespace Core { -class System; -} - namespace Common::FS { class IOFile; } @@ -70,9 +66,12 @@ struct ShaderDiskCachePrecompiled { class ShaderDiskCacheOpenGL { public: - explicit ShaderDiskCacheOpenGL(Core::System& system); + explicit ShaderDiskCacheOpenGL(); ~ShaderDiskCacheOpenGL(); + /// Binds a title ID for all future operations. + void BindTitleID(u64 title_id); + /// Loads transferable cache. If file has a old version or on failure, it deletes the file. std::optional<std::vector<ShaderDiskCacheEntry>> LoadTransferable(); @@ -157,8 +156,6 @@ private: return LoadArrayFromPrecompiled(&object, 1); } - Core::System& system; - // Stores whole precompiled cache which will be read from or saved to the precompiled chache // file FileSys::VectorVfsFile precompiled_cache_virtual_file; @@ -168,8 +165,11 @@ private: // Stored transferable shaders std::unordered_set<u64> stored_transferable; + /// Title ID to operate on + u64 title_id = 0; + // The cache has been loaded at boot - bool is_usable{}; + bool is_usable = false; }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp index d24fad3de..6bcf831f2 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.cpp +++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp @@ -214,10 +214,8 @@ void SetupDirtyMisc(Tables& tables) { } // Anonymous namespace -StateTracker::StateTracker(Core::System& system) : system{system} {} - -void StateTracker::Initialize() { - auto& dirty = system.GPU().Maxwell3D().dirty; +StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} { + auto& dirty = gpu.Maxwell3D().dirty; auto& tables = dirty.tables; SetupDirtyRenderTargets(tables); SetupDirtyColorMasks(tables); diff --git a/src/video_core/renderer_opengl/gl_state_tracker.h b/src/video_core/renderer_opengl/gl_state_tracker.h index 0f823288e..9d127548f 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.h +++ b/src/video_core/renderer_opengl/gl_state_tracker.h @@ -13,8 +13,8 @@ #include "video_core/dirty_flags.h" #include "video_core/engines/maxwell_3d.h" -namespace Core { -class System; +namespace Tegra { +class GPU; } namespace OpenGL { @@ -90,9 +90,7 @@ static_assert(Last <= std::numeric_limits<u8>::max()); class StateTracker { public: - explicit StateTracker(Core::System& system); - - void Initialize(); + explicit StateTracker(Tegra::GPU& gpu); void BindIndexBuffer(GLuint new_index_buffer) { if (index_buffer == new_index_buffer) { @@ -103,7 +101,6 @@ public: } void NotifyScreenDrawVertexArray() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::VertexFormats] = true; flags[OpenGL::Dirty::VertexFormat0 + 0] = true; flags[OpenGL::Dirty::VertexFormat0 + 1] = true; @@ -117,98 +114,81 @@ public: } void NotifyPolygonModes() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::PolygonModes] = true; flags[OpenGL::Dirty::PolygonModeFront] = true; flags[OpenGL::Dirty::PolygonModeBack] = true; } void NotifyViewport0() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::Viewports] = true; flags[OpenGL::Dirty::Viewport0] = true; } void NotifyScissor0() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::Scissors] = true; flags[OpenGL::Dirty::Scissor0] = true; } void NotifyColorMask0() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::ColorMasks] = true; flags[OpenGL::Dirty::ColorMask0] = true; } void NotifyBlend0() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::BlendStates] = true; flags[OpenGL::Dirty::BlendState0] = true; } void NotifyFramebuffer() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[VideoCommon::Dirty::RenderTargets] = true; } void NotifyFrontFace() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::FrontFace] = true; } void NotifyCullTest() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::CullTest] = true; } void NotifyDepthMask() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::DepthMask] = true; } void NotifyDepthTest() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::DepthTest] = true; } void NotifyStencilTest() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::StencilTest] = true; } void NotifyPolygonOffset() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::PolygonOffset] = true; } void NotifyRasterizeEnable() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::RasterizeEnable] = true; } void NotifyFramebufferSRGB() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::FramebufferSRGB] = true; } void NotifyLogicOp() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::LogicOp] = true; } void NotifyClipControl() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::ClipControl] = true; } void NotifyAlphaTest() { - auto& flags = system.GPU().Maxwell3D().dirty.flags; flags[OpenGL::Dirty::AlphaTest] = true; } private: - Core::System& system; + Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; GLuint index_buffer = 0; }; diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp index f403f388a..a863ef218 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.cpp +++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp @@ -532,10 +532,12 @@ OGLTextureView CachedSurfaceView::CreateTextureView() const { return texture_view; } -TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system, - VideoCore::RasterizerInterface& rasterizer, - const Device& device, StateTracker& state_tracker) - : TextureCacheBase{system, rasterizer, device.HasASTC()}, state_tracker{state_tracker} { +TextureCacheOpenGL::TextureCacheOpenGL(VideoCore::RasterizerInterface& rasterizer, + Tegra::Engines::Maxwell3D& maxwell3d, + Tegra::MemoryManager& gpu_memory, const Device& device, + StateTracker& state_tracker_) + : TextureCacheBase{rasterizer, maxwell3d, gpu_memory, device.HasASTC()}, state_tracker{ + state_tracker_} { src_framebuffer.Create(); dst_framebuffer.Create(); } diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h index de8f18489..7787134fc 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.h +++ b/src/video_core/renderer_opengl/gl_texture_cache.h @@ -129,8 +129,10 @@ private: class TextureCacheOpenGL final : public TextureCacheBase { public: - explicit TextureCacheOpenGL(Core::System& system, VideoCore::RasterizerInterface& rasterizer, - const Device& device, StateTracker& state_tracker); + explicit TextureCacheOpenGL(VideoCore::RasterizerInterface& rasterizer, + Tegra::Engines::Maxwell3D& maxwell3d, + Tegra::MemoryManager& gpu_memory, const Device& device, + StateTracker& state_tracker); ~TextureCacheOpenGL(); protected: diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index fe9bd4b5a..a8be2aa37 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h @@ -47,6 +47,8 @@ inline GLenum VertexFormat(Maxwell::VertexAttribute attrib) { return GL_UNSIGNED_INT; case Maxwell::VertexAttribute::Size::Size_10_10_10_2: return GL_UNSIGNED_INT_2_10_10_10_REV; + default: + break; } break; case Maxwell::VertexAttribute::Type::SignedNorm: @@ -70,6 +72,8 @@ inline GLenum VertexFormat(Maxwell::VertexAttribute attrib) { return GL_INT; case Maxwell::VertexAttribute::Size::Size_10_10_10_2: return GL_INT_2_10_10_10_REV; + default: + break; } break; case Maxwell::VertexAttribute::Type::Float: @@ -84,6 +88,8 @@ inline GLenum VertexFormat(Maxwell::VertexAttribute attrib) { case Maxwell::VertexAttribute::Size::Size_32_32_32: case Maxwell::VertexAttribute::Size::Size_32_32_32_32: return GL_FLOAT; + default: + break; } break; } diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index b759c2dba..a4c5b8f74 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -275,11 +275,13 @@ public: } }; -RendererOpenGL::RendererOpenGL(Core::System& system_, Core::Frontend::EmuWindow& emu_window_, - Tegra::GPU& gpu_, - std::unique_ptr<Core::Frontend::GraphicsContext> context_) - : RendererBase{emu_window_, std::move(context_)}, system{system_}, - emu_window{emu_window_}, gpu{gpu_}, program_manager{device}, has_debug_tool{HasDebugTool()} {} +RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_, + Core::Frontend::EmuWindow& emu_window_, + Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, + std::unique_ptr<Core::Frontend::GraphicsContext> context) + : RendererBase{emu_window_, std::move(context)}, telemetry_session{telemetry_session_}, + emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, program_manager{device}, + has_debug_tool{HasDebugTool()} {} RendererOpenGL::~RendererOpenGL() = default; @@ -386,7 +388,7 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf VideoCore::Surface::PixelFormatFromGPUPixelFormat(framebuffer.pixel_format)}; const u32 bytes_per_pixel{VideoCore::Surface::GetBytesPerPixel(pixel_format)}; const u64 size_in_bytes{framebuffer.stride * framebuffer.height * bytes_per_pixel}; - u8* const host_ptr{system.Memory().GetPointer(framebuffer_addr)}; + u8* const host_ptr{cpu_memory.GetPointer(framebuffer_addr)}; rasterizer->FlushRegion(ToCacheAddr(host_ptr), size_in_bytes); // TODO(Rodrigo): Read this from HLE @@ -471,7 +473,6 @@ void RendererOpenGL::AddTelemetryFields() { LOG_INFO(Render_OpenGL, "GL_VENDOR: {}", gpu_vendor); LOG_INFO(Render_OpenGL, "GL_RENDERER: {}", gpu_model); - auto& telemetry_session = system.TelemetrySession(); constexpr auto user_system = Common::Telemetry::FieldType::UserSystem; telemetry_session.AddField(user_system, "GPU_Vendor", gpu_vendor); telemetry_session.AddField(user_system, "GPU_Model", gpu_model); @@ -482,8 +483,8 @@ void RendererOpenGL::CreateRasterizer() { if (rasterizer) { return; } - rasterizer = std::make_unique<RasterizerOpenGL>(system, emu_window, device, screen_info, - program_manager, state_tracker); + rasterizer = std::make_unique<RasterizerOpenGL>(emu_window, gpu, cpu_memory, device, + screen_info, program_manager, state_tracker); } void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture, diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h index 52ea76b7d..5329577fb 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.h +++ b/src/video_core/renderer_opengl/renderer_opengl.h @@ -16,16 +16,25 @@ namespace Core { class System; -} +class TelemetrySession; +} // namespace Core namespace Core::Frontend { class EmuWindow; } +namespace Core::Memory { +class Memory; +} + namespace Layout { struct FramebufferLayout; } +namespace Tegra { +class GPU; +} + namespace OpenGL { /// Structure used for storing information about the textures for the Switch screen @@ -56,7 +65,8 @@ class FrameMailbox; class RendererOpenGL final : public VideoCore::RendererBase { public: - explicit RendererOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window, + explicit RendererOpenGL(Core::TelemetrySession& telemetry_session, + Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory, Tegra::GPU& gpu, std::unique_ptr<Core::Frontend::GraphicsContext> context); ~RendererOpenGL() override; @@ -94,12 +104,13 @@ private: bool Present(int timeout_ms); - Core::System& system; + Core::TelemetrySession& telemetry_session; Core::Frontend::EmuWindow& emu_window; + Core::Memory::Memory& cpu_memory; Tegra::GPU& gpu; - const Device device; - StateTracker state_tracker{system}; + const Device device; + StateTracker state_tracker{gpu}; // OpenGL object IDs OGLBuffer vertex_buffer; diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index f8c77f4fa..d22de1d81 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -78,9 +78,10 @@ VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode w case Tegra::Texture::WrapMode::MirrorOnceBorder: UNIMPLEMENTED(); return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; + default: + UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode)); + return {}; } - UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode)); - return {}; } VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) { @@ -298,9 +299,10 @@ VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device, return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; case Maxwell::PrimitiveTopology::Patches: return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; + default: + UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology)); + return {}; } - UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology)); - return {}; } VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) { @@ -325,6 +327,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib return VK_FORMAT_R16G16B16A16_UNORM; case Maxwell::VertexAttribute::Size::Size_10_10_10_2: return VK_FORMAT_A2B10G10R10_UNORM_PACK32; + default: + break; } break; case Maxwell::VertexAttribute::Type::SignedNorm: @@ -347,6 +351,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib return VK_FORMAT_R16G16B16A16_SNORM; case Maxwell::VertexAttribute::Size::Size_10_10_10_2: return VK_FORMAT_A2B10G10R10_SNORM_PACK32; + default: + break; } break; case Maxwell::VertexAttribute::Type::UnsignedScaled: @@ -369,6 +375,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib return VK_FORMAT_R16G16B16A16_USCALED; case Maxwell::VertexAttribute::Size::Size_10_10_10_2: return VK_FORMAT_A2B10G10R10_USCALED_PACK32; + default: + break; } break; case Maxwell::VertexAttribute::Type::SignedScaled: @@ -391,6 +399,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib return VK_FORMAT_R16G16B16A16_SSCALED; case Maxwell::VertexAttribute::Size::Size_10_10_10_2: return VK_FORMAT_A2B10G10R10_SSCALED_PACK32; + default: + break; } break; case Maxwell::VertexAttribute::Type::UnsignedInt: @@ -421,6 +431,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib return VK_FORMAT_R32G32B32A32_UINT; case Maxwell::VertexAttribute::Size::Size_10_10_10_2: return VK_FORMAT_A2B10G10R10_UINT_PACK32; + default: + break; } break; case Maxwell::VertexAttribute::Type::SignedInt: @@ -451,6 +463,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib return VK_FORMAT_R32G32B32A32_SINT; case Maxwell::VertexAttribute::Size::Size_10_10_10_2: return VK_FORMAT_A2B10G10R10_SINT_PACK32; + default: + break; } break; case Maxwell::VertexAttribute::Type::Float: @@ -471,6 +485,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib return VK_FORMAT_R32G32B32_SFLOAT; case Maxwell::VertexAttribute::Size::Size_32_32_32_32: return VK_FORMAT_R32G32B32A32_SFLOAT; + default: + break; } break; } diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index ae46e0444..0e4583986 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -86,7 +86,7 @@ Common::DynamicLibrary OpenVulkanLibrary() { if (!library.Open(filename.c_str())) { // Android devices may not have libvulkan.so.1, only libvulkan.so. filename = Common::DynamicLibrary::GetVersionedFilename("vulkan"); - library.Open(filename.c_str()); + (void)library.Open(filename.c_str()); } #endif return library; @@ -237,10 +237,12 @@ std::string BuildCommaSeparatedExtensions(std::vector<std::string> available_ext } // Anonymous namespace -RendererVulkan::RendererVulkan(Core::System& system_, Core::Frontend::EmuWindow& emu_window, - Tegra::GPU& gpu_, +RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_, + Core::Frontend::EmuWindow& emu_window, + Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, std::unique_ptr<Core::Frontend::GraphicsContext> context) - : RendererBase{emu_window, std::move(context)}, system{system_}, gpu{gpu_} {} + : RendererBase{emu_window, std::move(context)}, telemetry_session{telemetry_session_}, + cpu_memory{cpu_memory_}, gpu{gpu_} {} RendererVulkan::~RendererVulkan() { ShutDown(); @@ -304,15 +306,15 @@ bool RendererVulkan::Init() { swapchain = std::make_unique<VKSwapchain>(*surface, *device); swapchain->Create(framebuffer.width, framebuffer.height, false); - state_tracker = std::make_unique<StateTracker>(system); + state_tracker = std::make_unique<StateTracker>(gpu); scheduler = std::make_unique<VKScheduler>(*device, *resource_manager, *state_tracker); - rasterizer = std::make_unique<RasterizerVulkan>(system, render_window, screen_info, *device, - *resource_manager, *memory_manager, - *state_tracker, *scheduler); + rasterizer = std::make_unique<RasterizerVulkan>( + render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, *device, + *resource_manager, *memory_manager, *state_tracker, *scheduler); - blit_screen = std::make_unique<VKBlitScreen>(system, render_window, *rasterizer, *device, + blit_screen = std::make_unique<VKBlitScreen>(cpu_memory, render_window, *rasterizer, *device, *resource_manager, *memory_manager, *swapchain, *scheduler, screen_info); @@ -440,8 +442,7 @@ void RendererVulkan::Report() const { LOG_INFO(Render_Vulkan, "Device: {}", model_name); LOG_INFO(Render_Vulkan, "Vulkan: {}", api_version); - auto& telemetry_session = system.TelemetrySession(); - constexpr auto field = Common::Telemetry::FieldType::UserSystem; + static constexpr auto field = Common::Telemetry::FieldType::UserSystem; telemetry_session.AddField(field, "GPU_Vendor", vendor_name); telemetry_session.AddField(field, "GPU_Model", model_name); telemetry_session.AddField(field, "GPU_Vulkan_Driver", driver_name); diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h index 13debbbc0..ddff77942 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.h +++ b/src/video_core/renderer_vulkan/renderer_vulkan.h @@ -14,7 +14,15 @@ #include "video_core/renderer_vulkan/wrapper.h" namespace Core { -class System; +class TelemetrySession; +} + +namespace Core::Memory { +class Memory; +} + +namespace Tegra { +class GPU; } namespace Vulkan { @@ -38,7 +46,8 @@ struct VKScreenInfo { class RendererVulkan final : public VideoCore::RendererBase { public: - explicit RendererVulkan(Core::System& system, Core::Frontend::EmuWindow& emu_window, + explicit RendererVulkan(Core::TelemetrySession& telemtry_session, + Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory, Tegra::GPU& gpu, std::unique_ptr<Core::Frontend::GraphicsContext> context); ~RendererVulkan() override; @@ -59,7 +68,8 @@ private: void Report() const; - Core::System& system; + Core::TelemetrySession& telemetry_session; + Core::Memory::Memory& cpu_memory; Tegra::GPU& gpu; Common::DynamicLibrary library; diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index a551e3de8..2bea7b24d 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp @@ -210,14 +210,16 @@ struct VKBlitScreen::BufferData { // Unaligned image data goes here }; -VKBlitScreen::VKBlitScreen(Core::System& system, Core::Frontend::EmuWindow& render_window, - VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, - VKResourceManager& resource_manager, VKMemoryManager& memory_manager, - VKSwapchain& swapchain, VKScheduler& scheduler, - const VKScreenInfo& screen_info) - : system{system}, render_window{render_window}, rasterizer{rasterizer}, device{device}, - resource_manager{resource_manager}, memory_manager{memory_manager}, swapchain{swapchain}, - scheduler{scheduler}, image_count{swapchain.GetImageCount()}, screen_info{screen_info} { +VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_, + Core::Frontend::EmuWindow& render_window_, + VideoCore::RasterizerInterface& rasterizer_, const VKDevice& device_, + VKResourceManager& resource_manager_, VKMemoryManager& memory_manager_, + VKSwapchain& swapchain_, VKScheduler& scheduler_, + const VKScreenInfo& screen_info_) + : cpu_memory{cpu_memory_}, render_window{render_window_}, + rasterizer{rasterizer_}, device{device_}, resource_manager{resource_manager_}, + memory_manager{memory_manager_}, swapchain{swapchain_}, scheduler{scheduler_}, + image_count{swapchain.GetImageCount()}, screen_info{screen_info_} { watches.resize(image_count); std::generate(watches.begin(), watches.end(), []() { return std::make_unique<VKFenceWatch>(); }); @@ -259,7 +261,7 @@ std::tuple<VKFence&, VkSemaphore> VKBlitScreen::Draw(const Tegra::FramebufferCon const auto pixel_format = VideoCore::Surface::PixelFormatFromGPUPixelFormat(framebuffer.pixel_format); const VAddr framebuffer_addr = framebuffer.address + framebuffer.offset; - const auto host_ptr = system.Memory().GetPointer(framebuffer_addr); + const auto host_ptr = cpu_memory.GetPointer(framebuffer_addr); rasterizer.FlushRegion(ToCacheAddr(host_ptr), GetSizeInBytes(framebuffer)); // TODO(Rodrigo): Read this from HLE diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h index 243640fab..838d38f69 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.h +++ b/src/video_core/renderer_vulkan/vk_blit_screen.h @@ -15,6 +15,10 @@ namespace Core { class System; } +namespace Core::Memory { +class Memory; +} + namespace Core::Frontend { class EmuWindow; } @@ -39,7 +43,8 @@ class VKSwapchain; class VKBlitScreen final { public: - explicit VKBlitScreen(Core::System& system, Core::Frontend::EmuWindow& render_window, + explicit VKBlitScreen(Core::Memory::Memory& cpu_memory, + Core::Frontend::EmuWindow& render_window, VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, VKResourceManager& resource_manager, VKMemoryManager& memory_manager, VKSwapchain& swapchain, VKScheduler& scheduler, @@ -81,7 +86,7 @@ private: u64 GetRawImageOffset(const Tegra::FramebufferConfig& framebuffer, std::size_t image_index) const; - Core::System& system; + Core::Memory::Memory& cpu_memory; Core::Frontend::EmuWindow& render_window; VideoCore::RasterizerInterface& rasterizer; const VKDevice& device; diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index 1d2f8b557..d9d3da9ea 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -145,14 +145,15 @@ void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst }); } -VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, - const VKDevice& device, VKMemoryManager& memory_manager, - VKScheduler& scheduler, VKStagingBufferPool& staging_pool) - : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer, system, - CreateStreamBuffer(device, - scheduler)}, - device{device}, memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{ - staging_pool} {} +VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, + Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, + const VKDevice& device_, VKMemoryManager& memory_manager_, + VKScheduler& scheduler_, VKStagingBufferPool& staging_pool_) + : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer, gpu_memory, cpu_memory, + CreateStreamBuffer(device_, + scheduler_)}, + device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{ + staging_pool_} {} VKBufferCache::~VKBufferCache() = default; diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index 991ee451c..7fb5ceedf 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h @@ -13,10 +13,6 @@ #include "video_core/renderer_vulkan/vk_stream_buffer.h" #include "video_core/renderer_vulkan/wrapper.h" -namespace Core { -class System; -} - namespace Vulkan { class VKDevice; @@ -53,7 +49,8 @@ private: class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> { public: - explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, + explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, + Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler, VKStagingBufferPool& staging_pool); ~VKBufferCache(); diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp index d7f65d435..55a8348fc 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp @@ -71,12 +71,12 @@ bool InnerFence::IsEventSignalled() const { } } -VKFenceManager::VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer, - const VKDevice& device, VKScheduler& scheduler, - VKTextureCache& texture_cache, VKBufferCache& buffer_cache, - VKQueryCache& query_cache) - : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache), - device{device}, scheduler{scheduler} {} +VKFenceManager::VKFenceManager(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu, + Tegra::MemoryManager& memory_manager, VKTextureCache& texture_cache, + VKBufferCache& buffer_cache, VKQueryCache& query_cache, + const VKDevice& device_, VKScheduler& scheduler_) + : GenericFenceManager(rasterizer, gpu, texture_cache, buffer_cache, query_cache), + device{device_}, scheduler{scheduler_} {} Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) { return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed); diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h index 043fe7947..1547d6d30 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.h +++ b/src/video_core/renderer_vulkan/vk_fence_manager.h @@ -55,10 +55,10 @@ using GenericFenceManager = class VKFenceManager final : public GenericFenceManager { public: - explicit VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer, - const VKDevice& device, VKScheduler& scheduler, - VKTextureCache& texture_cache, VKBufferCache& buffer_cache, - VKQueryCache& query_cache); + explicit VKFenceManager(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu, + Tegra::MemoryManager& memory_manager, VKTextureCache& texture_cache, + VKBufferCache& buffer_cache, VKQueryCache& query_cache, + const VKDevice& device, VKScheduler& scheduler); protected: Fence CreateFence(u32 value, bool is_stubbed) override; diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index cfdcdd6ab..5c038f4bc 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -135,64 +135,56 @@ bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) con return std::memcmp(&rhs, this, sizeof *this) == 0; } -Shader::Shader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, - VideoCommon::Shader::ProgramCode program_code, u32 main_offset) - : gpu_addr{gpu_addr}, program_code{std::move(program_code)}, - registry{stage, GetEngine(system, stage)}, shader_ir{this->program_code, main_offset, - compiler_settings, registry}, - entries{GenerateShaderEntries(shader_ir)} {} +Shader::Shader(Tegra::Engines::ConstBufferEngineInterface& engine, Tegra::Engines::ShaderType stage, + GPUVAddr gpu_addr_, VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code_, + u32 main_offset) + : gpu_addr(gpu_addr_), program_code(std::move(program_code_)), registry(stage, engine), + shader_ir(program_code, main_offset, compiler_settings, registry), + entries(GenerateShaderEntries(shader_ir)) {} Shader::~Shader() = default; -Tegra::Engines::ConstBufferEngineInterface& Shader::GetEngine(Core::System& system, - Tegra::Engines::ShaderType stage) { - if (stage == ShaderType::Compute) { - return system.GPU().KeplerCompute(); - } else { - return system.GPU().Maxwell3D(); - } -} - -VKPipelineCache::VKPipelineCache(Core::System& system, RasterizerVulkan& rasterizer, - const VKDevice& device, VKScheduler& scheduler, - VKDescriptorPool& descriptor_pool, - VKUpdateDescriptorQueue& update_descriptor_queue, - VKRenderPassCache& renderpass_cache) - : VideoCommon::ShaderCache<Shader>{rasterizer}, system{system}, device{device}, - scheduler{scheduler}, descriptor_pool{descriptor_pool}, - update_descriptor_queue{update_descriptor_queue}, renderpass_cache{renderpass_cache} {} +VKPipelineCache::VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu_, + Tegra::Engines::Maxwell3D& maxwell3d_, + Tegra::Engines::KeplerCompute& kepler_compute_, + Tegra::MemoryManager& gpu_memory_, const VKDevice& device_, + VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_, + VKUpdateDescriptorQueue& update_descriptor_queue_, + VKRenderPassCache& renderpass_cache_) + : VideoCommon::ShaderCache<Shader>{rasterizer}, gpu{gpu_}, maxwell3d{maxwell3d_}, + kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, device{device_}, + scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, + update_descriptor_queue{update_descriptor_queue_}, renderpass_cache{renderpass_cache_} {} VKPipelineCache::~VKPipelineCache() = default; std::array<Shader*, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() { - const auto& gpu = system.GPU().Maxwell3D(); - std::array<Shader*, Maxwell::MaxShaderProgram> shaders{}; + for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { const auto program{static_cast<Maxwell::ShaderProgram>(index)}; // Skip stages that are not enabled - if (!gpu.regs.IsShaderConfigEnabled(index)) { + if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { continue; } - auto& memory_manager{system.GPU().MemoryManager()}; - const GPUVAddr program_addr{GetShaderAddress(system, program)}; - const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr); + const GPUVAddr gpu_addr{GetShaderAddress(maxwell3d, program)}; + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); ASSERT(cpu_addr); Shader* result = cpu_addr ? TryGet(*cpu_addr) : null_shader.get(); if (!result) { - const auto host_ptr{memory_manager.GetPointer(program_addr)}; + const u8* const host_ptr{gpu_memory.GetPointer(gpu_addr)}; // No shader found - create a new one - constexpr u32 stage_offset = STAGE_MAIN_OFFSET; + static constexpr u32 stage_offset = STAGE_MAIN_OFFSET; const auto stage = static_cast<ShaderType>(index == 0 ? 0 : index - 1); - ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, false); + ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, false); const std::size_t size_in_bytes = code.size() * sizeof(u64); - auto shader = std::make_unique<Shader>(system, stage, program_addr, std::move(code), - stage_offset); + auto shader = std::make_unique<Shader>(maxwell3d, stage, gpu_addr, *cpu_addr, + std::move(code), stage_offset); result = shader.get(); if (cpu_addr) { @@ -215,11 +207,11 @@ VKGraphicsPipeline* VKPipelineCache::GetGraphicsPipeline( } last_graphics_key = key; - if (device.UseAsynchronousShaders() && async_shaders.IsShaderAsync(system.GPU())) { + if (device.UseAsynchronousShaders() && async_shaders.IsShaderAsync(gpu)) { std::unique_lock lock{pipeline_cache}; const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key); if (is_cache_miss) { - system.GPU().ShaderNotify().MarkSharderBuilding(); + gpu.ShaderNotify().MarkSharderBuilding(); LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash()); const auto [program, bindings] = DecompileShaders(key.fixed_state); async_shaders.QueueVulkanShader(this, device, scheduler, descriptor_pool, @@ -233,13 +225,13 @@ VKGraphicsPipeline* VKPipelineCache::GetGraphicsPipeline( const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key); auto& entry = pair->second; if (is_cache_miss) { - system.GPU().ShaderNotify().MarkSharderBuilding(); + gpu.ShaderNotify().MarkSharderBuilding(); LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash()); const auto [program, bindings] = DecompileShaders(key.fixed_state); entry = std::make_unique<VKGraphicsPipeline>(device, scheduler, descriptor_pool, update_descriptor_queue, renderpass_cache, key, bindings, program); - system.GPU().ShaderNotify().MarkShaderComplete(); + gpu.ShaderNotify().MarkShaderComplete(); } last_graphics_pipeline = entry.get(); return last_graphics_pipeline; @@ -255,22 +247,21 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach } LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash()); - auto& memory_manager = system.GPU().MemoryManager(); - const auto program_addr = key.shader; + const GPUVAddr gpu_addr = key.shader; - const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr); + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); ASSERT(cpu_addr); Shader* shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get(); if (!shader) { // No shader found - create a new one - const auto host_ptr = memory_manager.GetPointer(program_addr); + const auto host_ptr = gpu_memory.GetPointer(gpu_addr); - ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, true); + ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, true); const std::size_t size_in_bytes = code.size() * sizeof(u64); - auto shader_info = std::make_unique<Shader>(system, ShaderType::Compute, program_addr, - std::move(code), KERNEL_MAIN_OFFSET); + auto shader_info = std::make_unique<Shader>(kepler_compute, ShaderType::Compute, gpu_addr, + *cpu_addr, std::move(code), KERNEL_MAIN_OFFSET); shader = shader_info.get(); if (cpu_addr) { @@ -298,7 +289,7 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach } void VKPipelineCache::EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline) { - system.GPU().ShaderNotify().MarkShaderComplete(); + gpu.ShaderNotify().MarkShaderComplete(); std::unique_lock lock{pipeline_cache}; graphics_cache.at(pipeline->GetCacheKey()) = std::move(pipeline); } @@ -339,9 +330,6 @@ void VKPipelineCache::OnShaderRemoval(Shader* shader) { std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) { - auto& memory_manager = system.GPU().MemoryManager(); - const auto& gpu = system.GPU().Maxwell3D(); - Specialization specialization; if (fixed_state.dynamic_state.Topology() == Maxwell::PrimitiveTopology::Points || device.IsExtExtendedDynamicStateSupported()) { @@ -364,12 +352,12 @@ VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) { const auto program_enum = static_cast<Maxwell::ShaderProgram>(index); // Skip stages that are not enabled - if (!gpu.regs.IsShaderConfigEnabled(index)) { + if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { continue; } - const GPUVAddr gpu_addr = GetShaderAddress(system, program_enum); - const std::optional<VAddr> cpu_addr = memory_manager.GpuToCpuAddress(gpu_addr); + const GPUVAddr gpu_addr = GetShaderAddress(maxwell3d, program_enum); + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); Shader* const shader = cpu_addr ? TryGet(*cpu_addr) : null_shader.get(); const std::size_t stage = index == 0 ? 0 : index - 1; // Stage indices are 0 - 5 diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index c04829e77..1a31fd9f6 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h @@ -85,7 +85,8 @@ namespace Vulkan { class Shader { public: - explicit Shader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, + explicit Shader(Tegra::Engines::ConstBufferEngineInterface& engine, + Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code, u32 main_offset); ~Shader(); @@ -97,22 +98,19 @@ public: return shader_ir; } - const VideoCommon::Shader::Registry& GetRegistry() const { - return registry; - } - const VideoCommon::Shader::ShaderIR& GetIR() const { return shader_ir; } + const VideoCommon::Shader::Registry& GetRegistry() const { + return registry; + } + const ShaderEntries& GetEntries() const { return entries; } private: - static Tegra::Engines::ConstBufferEngineInterface& GetEngine(Core::System& system, - Tegra::Engines::ShaderType stage); - GPUVAddr gpu_addr{}; VideoCommon::Shader::ProgramCode program_code; VideoCommon::Shader::Registry registry; @@ -122,9 +120,11 @@ private: class VKPipelineCache final : public VideoCommon::ShaderCache<Shader> { public: - explicit VKPipelineCache(Core::System& system, RasterizerVulkan& rasterizer, - const VKDevice& device, VKScheduler& scheduler, - VKDescriptorPool& descriptor_pool, + explicit VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu, + Tegra::Engines::Maxwell3D& maxwell3d, + Tegra::Engines::KeplerCompute& kepler_compute, + Tegra::MemoryManager& gpu_memory, const VKDevice& device, + VKScheduler& scheduler, VKDescriptorPool& descriptor_pool, VKUpdateDescriptorQueue& update_descriptor_queue, VKRenderPassCache& renderpass_cache); ~VKPipelineCache() override; @@ -145,7 +145,11 @@ private: std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders( const FixedPipelineState& fixed_state); - Core::System& system; + Tegra::GPU& gpu; + Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::Engines::KeplerCompute& kepler_compute; + Tegra::MemoryManager& gpu_memory; + const VKDevice& device; VKScheduler& scheduler; VKDescriptorPool& descriptor_pool; diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 6cd63d090..5a97c959d 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -68,10 +68,11 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) { usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; } -VKQueryCache::VKQueryCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, +VKQueryCache::VKQueryCache(VideoCore::RasterizerInterface& rasterizer, + Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, const VKDevice& device, VKScheduler& scheduler) : VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter, - QueryPool>{system, rasterizer}, + QueryPool>{rasterizer, maxwell3d, gpu_memory}, device{device}, scheduler{scheduler} { for (std::size_t i = 0; i < static_cast<std::size_t>(VideoCore::NumQueryTypes); ++i) { query_pools[i].Initialize(device, static_cast<VideoCore::QueryType>(i)); diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h index 40119e6d3..9be996e55 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.h +++ b/src/video_core/renderer_vulkan/vk_query_cache.h @@ -56,7 +56,8 @@ class VKQueryCache final : public VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter, QueryPool> { public: - explicit VKQueryCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + explicit VKQueryCache(VideoCore::RasterizerInterface& rasterizer, + Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, const VKDevice& device, VKScheduler& scheduler); ~VKQueryCache(); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index ff1b52eab..bafebe294 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -381,28 +381,30 @@ void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const { } } -RasterizerVulkan::RasterizerVulkan(Core::System& system, Core::Frontend::EmuWindow& renderer, - VKScreenInfo& screen_info, const VKDevice& device, - VKResourceManager& resource_manager, - VKMemoryManager& memory_manager, StateTracker& state_tracker, - VKScheduler& scheduler) - : RasterizerAccelerated{system.Memory()}, system{system}, render_window{renderer}, - screen_info{screen_info}, device{device}, resource_manager{resource_manager}, - memory_manager{memory_manager}, state_tracker{state_tracker}, scheduler{scheduler}, +RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu_, + Tegra::MemoryManager& gpu_memory_, + Core::Memory::Memory& cpu_memory, VKScreenInfo& screen_info_, + const VKDevice& device_, VKResourceManager& resource_manager_, + VKMemoryManager& memory_manager_, StateTracker& state_tracker_, + VKScheduler& scheduler_) + : RasterizerAccelerated(cpu_memory), gpu(gpu_), gpu_memory(gpu_memory_), + maxwell3d(gpu.Maxwell3D()), kepler_compute(gpu.KeplerCompute()), screen_info(screen_info_), + device(device_), resource_manager(resource_manager_), memory_manager(memory_manager_), + state_tracker(state_tracker_), scheduler(scheduler_), staging_pool(device, memory_manager, scheduler), descriptor_pool(device), update_descriptor_queue(device, scheduler), renderpass_cache(device), quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), - texture_cache(system, *this, device, resource_manager, memory_manager, scheduler, - staging_pool), - pipeline_cache(system, *this, device, scheduler, descriptor_pool, update_descriptor_queue, - renderpass_cache), - buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool), - sampler_cache(device), - fence_manager(system, *this, device, scheduler, texture_cache, buffer_cache, query_cache), - query_cache(system, *this, device, scheduler), - wfi_event{device.GetLogical().CreateNewEvent()}, async_shaders{renderer} { + texture_cache(*this, maxwell3d, gpu_memory, device, resource_manager, memory_manager, + scheduler, staging_pool), + pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler, + descriptor_pool, update_descriptor_queue, renderpass_cache), + buffer_cache(*this, gpu_memory, cpu_memory, device, memory_manager, scheduler, staging_pool), + sampler_cache(device), query_cache(*this, maxwell3d, gpu_memory, device, scheduler), + fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, device, + scheduler), + wfi_event(device.GetLogical().CreateNewEvent()), async_shaders(emu_window) { scheduler.SetQueryCache(query_cache); if (device.UseAsynchronousShaders()) { async_shaders.AllocateWorkers(); @@ -414,15 +416,13 @@ RasterizerVulkan::~RasterizerVulkan() = default; void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { MICROPROFILE_SCOPE(Vulkan_Drawing); + SCOPE_EXIT({ gpu.TickWork(); }); FlushWork(); query_cache.UpdateCounters(); - SCOPE_EXIT({ system.GPU().TickWork(); }); - - const auto& gpu = system.GPU().Maxwell3D(); GraphicsPipelineCacheKey key; - key.fixed_state.Fill(gpu.regs, device.IsExtExtendedDynamicStateSupported()); + key.fixed_state.Fill(maxwell3d.regs, device.IsExtExtendedDynamicStateSupported()); buffer_cache.Map(CalculateGraphicsStreamBufferSize(is_indexed)); @@ -480,8 +480,7 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { void RasterizerVulkan::Clear() { MICROPROFILE_SCOPE(Vulkan_Clearing); - const auto& gpu = system.GPU().Maxwell3D(); - if (!system.GPU().Maxwell3D().ShouldExecute()) { + if (!maxwell3d.ShouldExecute()) { return; } @@ -490,7 +489,7 @@ void RasterizerVulkan::Clear() { query_cache.UpdateCounters(); - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || regs.clear_buffers.A; const bool use_depth = regs.clear_buffers.Z; @@ -559,7 +558,7 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) { query_cache.UpdateCounters(); - const auto& launch_desc = system.GPU().KeplerCompute().launch_description; + const auto& launch_desc = kepler_compute.launch_description; auto& pipeline = pipeline_cache.GetComputePipeline({ .shader = code_addr, .shared_memory_size = launch_desc.shared_alloc, @@ -655,16 +654,14 @@ void RasterizerVulkan::SyncGuestHost() { } void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { - auto& gpu{system.GPU()}; if (!gpu.IsAsync()) { - gpu.MemoryManager().Write<u32>(addr, value); + gpu_memory.Write<u32>(addr, value); return; } fence_manager.SignalSemaphore(addr, value); } void RasterizerVulkan::SignalSyncPoint(u32 value) { - auto& gpu{system.GPU()}; if (!gpu.IsAsync()) { gpu.IncrementSyncPoint(value); return; @@ -673,7 +670,6 @@ void RasterizerVulkan::SignalSyncPoint(u32 value) { } void RasterizerVulkan::ReleaseFences() { - auto& gpu{system.GPU()}; if (!gpu.IsAsync()) { return; } @@ -751,10 +747,6 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config, return true; } -void RasterizerVulkan::SetupDirtyFlags() { - state_tracker.Initialize(); -} - void RasterizerVulkan::FlushWork() { static constexpr u32 DRAWS_TO_DISPATCH = 4096; @@ -778,10 +770,9 @@ void RasterizerVulkan::FlushWork() { RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments(bool is_clear) { MICROPROFILE_SCOPE(Vulkan_RenderTargets); - auto& maxwell3d = system.GPU().Maxwell3D(); - auto& dirty = maxwell3d.dirty.flags; - auto& regs = maxwell3d.regs; + const auto& regs = maxwell3d.regs; + auto& dirty = maxwell3d.dirty.flags; const bool update_rendertargets = dirty[VideoCommon::Dirty::RenderTargets]; dirty[VideoCommon::Dirty::RenderTargets] = false; @@ -844,7 +835,7 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers( return true; }; - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count); for (std::size_t index = 0; index < num_attachments; ++index) { if (try_push(color_attachments[index])) { @@ -880,13 +871,12 @@ RasterizerVulkan::DrawParameters RasterizerVulkan::SetupGeometry(FixedPipelineSt bool is_instanced) { MICROPROFILE_SCOPE(Vulkan_Geometry); - const auto& gpu = system.GPU().Maxwell3D(); - const auto& regs = gpu.regs; + const auto& regs = maxwell3d.regs; SetupVertexArrays(buffer_bindings); const u32 base_instance = regs.vb_base_instance; - const u32 num_instances = is_instanced ? gpu.mme_draw.instance_count : 1; + const u32 num_instances = is_instanced ? maxwell3d.mme_draw.instance_count : 1; const u32 base_vertex = is_indexed ? regs.vb_element_base : regs.vertex_buffer.first; const u32 num_vertices = is_indexed ? regs.index_array.count : regs.vertex_buffer.count; @@ -947,7 +937,7 @@ void RasterizerVulkan::SetupImageTransitions( } void RasterizerVulkan::UpdateDynamicStates() { - auto& regs = system.GPU().Maxwell3D().regs; + auto& regs = maxwell3d.regs; UpdateViewportsState(regs); UpdateScissorsState(regs); UpdateDepthBias(regs); @@ -968,7 +958,7 @@ void RasterizerVulkan::UpdateDynamicStates() { } void RasterizerVulkan::BeginTransformFeedback() { - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; if (regs.tfb_enabled == 0) { return; } @@ -1000,7 +990,7 @@ void RasterizerVulkan::BeginTransformFeedback() { } void RasterizerVulkan::EndTransformFeedback() { - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; if (regs.tfb_enabled == 0) { return; } @@ -1013,7 +1003,7 @@ void RasterizerVulkan::EndTransformFeedback() { } void RasterizerVulkan::SetupVertexArrays(BufferBindings& buffer_bindings) { - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) { const auto& vertex_array = regs.vertex_array[index]; @@ -1039,7 +1029,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar if (params.num_vertices == 0) { return; } - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; switch (regs.draw.topology) { case Maxwell::PrimitiveTopology::Quads: { if (!params.is_indexed) { @@ -1087,8 +1077,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar void RasterizerVulkan::SetupGraphicsConstBuffers(const ShaderEntries& entries, std::size_t stage) { MICROPROFILE_SCOPE(Vulkan_ConstBuffers); - const auto& gpu = system.GPU().Maxwell3D(); - const auto& shader_stage = gpu.state.shader_stages[stage]; + const auto& shader_stage = maxwell3d.state.shader_stages[stage]; for (const auto& entry : entries.const_buffers) { SetupConstBuffer(entry, shader_stage.const_buffers[entry.GetIndex()]); } @@ -1096,8 +1085,7 @@ void RasterizerVulkan::SetupGraphicsConstBuffers(const ShaderEntries& entries, s void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage) { MICROPROFILE_SCOPE(Vulkan_GlobalBuffers); - auto& gpu{system.GPU()}; - const auto cbufs{gpu.Maxwell3D().state.shader_stages[stage]}; + const auto& cbufs{maxwell3d.state.shader_stages[stage]}; for (const auto& entry : entries.global_buffers) { const auto addr = cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset(); @@ -1107,19 +1095,17 @@ void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries, void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage) { MICROPROFILE_SCOPE(Vulkan_Textures); - const auto& gpu = system.GPU().Maxwell3D(); for (const auto& entry : entries.uniform_texels) { - const auto image = GetTextureInfo(gpu, entry, stage).tic; + const auto image = GetTextureInfo(maxwell3d, entry, stage).tic; SetupUniformTexels(image, entry); } } void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage) { MICROPROFILE_SCOPE(Vulkan_Textures); - const auto& gpu = system.GPU().Maxwell3D(); for (const auto& entry : entries.samplers) { for (std::size_t i = 0; i < entry.size; ++i) { - const auto texture = GetTextureInfo(gpu, entry, stage, i); + const auto texture = GetTextureInfo(maxwell3d, entry, stage, i); SetupTexture(texture, entry); } } @@ -1127,25 +1113,23 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std:: void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage) { MICROPROFILE_SCOPE(Vulkan_Textures); - const auto& gpu = system.GPU().Maxwell3D(); for (const auto& entry : entries.storage_texels) { - const auto image = GetTextureInfo(gpu, entry, stage).tic; + const auto image = GetTextureInfo(maxwell3d, entry, stage).tic; SetupStorageTexel(image, entry); } } void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) { MICROPROFILE_SCOPE(Vulkan_Images); - const auto& gpu = system.GPU().Maxwell3D(); for (const auto& entry : entries.images) { - const auto tic = GetTextureInfo(gpu, entry, stage).tic; + const auto tic = GetTextureInfo(maxwell3d, entry, stage).tic; SetupImage(tic, entry); } } void RasterizerVulkan::SetupComputeConstBuffers(const ShaderEntries& entries) { MICROPROFILE_SCOPE(Vulkan_ConstBuffers); - const auto& launch_desc = system.GPU().KeplerCompute().launch_description; + const auto& launch_desc = kepler_compute.launch_description; for (const auto& entry : entries.const_buffers) { const auto& config = launch_desc.const_buffer_config[entry.GetIndex()]; const std::bitset<8> mask = launch_desc.const_buffer_enable_mask.Value(); @@ -1159,7 +1143,7 @@ void RasterizerVulkan::SetupComputeConstBuffers(const ShaderEntries& entries) { void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) { MICROPROFILE_SCOPE(Vulkan_GlobalBuffers); - const auto cbufs{system.GPU().KeplerCompute().launch_description.const_buffer_config}; + const auto& cbufs{kepler_compute.launch_description.const_buffer_config}; for (const auto& entry : entries.global_buffers) { const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()}; SetupGlobalBuffer(entry, addr); @@ -1168,19 +1152,17 @@ void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) { void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) { MICROPROFILE_SCOPE(Vulkan_Textures); - const auto& gpu = system.GPU().KeplerCompute(); for (const auto& entry : entries.uniform_texels) { - const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic; + const auto image = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex).tic; SetupUniformTexels(image, entry); } } void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) { MICROPROFILE_SCOPE(Vulkan_Textures); - const auto& gpu = system.GPU().KeplerCompute(); for (const auto& entry : entries.samplers) { for (std::size_t i = 0; i < entry.size; ++i) { - const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i); + const auto texture = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex, i); SetupTexture(texture, entry); } } @@ -1188,18 +1170,16 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) { void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) { MICROPROFILE_SCOPE(Vulkan_Textures); - const auto& gpu = system.GPU().KeplerCompute(); for (const auto& entry : entries.storage_texels) { - const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic; + const auto image = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex).tic; SetupStorageTexel(image, entry); } } void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) { MICROPROFILE_SCOPE(Vulkan_Images); - const auto& gpu = system.GPU().KeplerCompute(); for (const auto& entry : entries.images) { - const auto tic = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic; + const auto tic = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex).tic; SetupImage(tic, entry); } } @@ -1223,9 +1203,8 @@ void RasterizerVulkan::SetupConstBuffer(const ConstBufferEntry& entry, } void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address) { - auto& memory_manager{system.GPU().MemoryManager()}; - const auto actual_addr = memory_manager.Read<u64>(address); - const auto size = memory_manager.Read<u32>(address + 8); + const u64 actual_addr = gpu_memory.Read<u64>(address); + const u32 size = gpu_memory.Read<u32>(address + 8); if (size == 0) { // Sometimes global memory pointers don't have a proper size. Upload a dummy entry @@ -1508,7 +1487,7 @@ std::size_t RasterizerVulkan::CalculateComputeStreamBufferSize() const { } std::size_t RasterizerVulkan::CalculateVertexArraysSize() const { - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; std::size_t size = 0; for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { @@ -1523,9 +1502,8 @@ std::size_t RasterizerVulkan::CalculateVertexArraysSize() const { } std::size_t RasterizerVulkan::CalculateIndexBufferSize() const { - const auto& regs = system.GPU().Maxwell3D().regs; - return static_cast<std::size_t>(regs.index_array.count) * - static_cast<std::size_t>(regs.index_array.FormatSizeInBytes()); + return static_cast<std::size_t>(maxwell3d.regs.index_array.count) * + static_cast<std::size_t>(maxwell3d.regs.index_array.FormatSizeInBytes()); } std::size_t RasterizerVulkan::CalculateConstBufferSize( @@ -1540,7 +1518,7 @@ std::size_t RasterizerVulkan::CalculateConstBufferSize( } RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions) const { - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& regs = maxwell3d.regs; const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count); RenderPassParams params; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index f640ba649..16251d0f6 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -106,7 +106,8 @@ struct ImageView { class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { public: - explicit RasterizerVulkan(Core::System& system, Core::Frontend::EmuWindow& render_window, + explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu, + Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, VKScreenInfo& screen_info, const VKDevice& device, VKResourceManager& resource_manager, VKMemoryManager& memory_manager, StateTracker& state_tracker, VKScheduler& scheduler); @@ -135,7 +136,6 @@ public: const Tegra::Engines::Fermi2D::Config& copy_config) override; bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, u32 pixel_stride) override; - void SetupDirtyFlags() override; VideoCommon::Shader::AsyncShaders& GetAsyncShaders() { return async_shaders; @@ -279,8 +279,11 @@ private: VkBuffer DefaultBuffer(); - Core::System& system; - Core::Frontend::EmuWindow& render_window; + Tegra::GPU& gpu; + Tegra::MemoryManager& gpu_memory; + Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::Engines::KeplerCompute& kepler_compute; + VKScreenInfo& screen_info; const VKDevice& device; VKResourceManager& resource_manager; @@ -300,8 +303,8 @@ private: VKPipelineCache pipeline_cache; VKBufferCache buffer_cache; VKSamplerCache sampler_cache; - VKFenceManager fence_manager; VKQueryCache query_cache; + VKFenceManager fence_manager; vk::Buffer default_buffer; VKMemoryCommit default_buffer_commit; diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp index 4bd1009f9..5d2c4a796 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp +++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp @@ -132,12 +132,9 @@ void SetupDirtyStencilTestEnable(Tables& tables) { } // Anonymous namespace -StateTracker::StateTracker(Core::System& system) - : system{system}, invalidation_flags{MakeInvalidationFlags()} {} - -void StateTracker::Initialize() { - auto& dirty = system.GPU().Maxwell3D().dirty; - auto& tables = dirty.tables; +StateTracker::StateTracker(Tegra::GPU& gpu) + : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} { + auto& tables = gpu.Maxwell3D().dirty.tables; SetupDirtyRenderTargets(tables); SetupDirtyViewports(tables); SetupDirtyScissors(tables); @@ -155,9 +152,4 @@ void StateTracker::Initialize() { SetupDirtyStencilTestEnable(tables); } -void StateTracker::InvalidateCommandBufferState() { - system.GPU().Maxwell3D().dirty.flags |= invalidation_flags; - current_topology = INVALID_TOPOLOGY; -} - } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h index 13a6ce786..1de789e57 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.h +++ b/src/video_core/renderer_vulkan/vk_state_tracker.h @@ -45,11 +45,12 @@ class StateTracker { using Maxwell = Tegra::Engines::Maxwell3D::Regs; public: - explicit StateTracker(Core::System& system); + explicit StateTracker(Tegra::GPU& gpu); - void Initialize(); - - void InvalidateCommandBufferState(); + void InvalidateCommandBufferState() { + flags |= invalidation_flags; + current_topology = INVALID_TOPOLOGY; + } bool TouchViewports() { return Exchange(Dirty::Viewports, false); @@ -121,13 +122,12 @@ private: static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u); bool Exchange(std::size_t id, bool new_value) const noexcept { - auto& flags = system.GPU().Maxwell3D().dirty.flags; const bool is_dirty = flags[id]; flags[id] = new_value; return is_dirty; } - Core::System& system; + Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; }; diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp index a5526a3f5..3c9171a5e 100644 --- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp +++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp @@ -57,9 +57,9 @@ u32 GetMemoryType(const VkPhysicalDeviceMemoryProperties& properties, } // Anonymous namespace -VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler, +VKStreamBuffer::VKStreamBuffer(const VKDevice& device_, VKScheduler& scheduler_, VkBufferUsageFlags usage) - : device{device}, scheduler{scheduler} { + : device{device_}, scheduler{scheduler_} { CreateBuffers(usage); ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE); ReserveWatches(previous_watches, WATCHES_INITIAL_RESERVE); diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index 2c6f54101..06182d909 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -188,13 +188,13 @@ u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source, Tegra::Texture::Swizzl } // Anonymous namespace -CachedSurface::CachedSurface(Core::System& system, const VKDevice& device, - VKResourceManager& resource_manager, VKMemoryManager& memory_manager, - VKScheduler& scheduler, VKStagingBufferPool& staging_pool, - GPUVAddr gpu_addr, const SurfaceParams& params) - : SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, system{system}, - device{device}, resource_manager{resource_manager}, - memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} { +CachedSurface::CachedSurface(const VKDevice& device, VKResourceManager& resource_manager, + VKMemoryManager& memory_manager, VKScheduler& scheduler, + VKStagingBufferPool& staging_pool, GPUVAddr gpu_addr, + const SurfaceParams& params) + : SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, device{device}, + resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler}, + staging_pool{staging_pool} { if (params.IsBuffer()) { buffer = CreateBuffer(device, params, host_memory_size); commit = memory_manager.Commit(buffer, false); @@ -490,19 +490,21 @@ VkImageView CachedSurfaceView::GetAttachment() { return *render_target; } -VKTextureCache::VKTextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, - const VKDevice& device, VKResourceManager& resource_manager, - VKMemoryManager& memory_manager, VKScheduler& scheduler, - VKStagingBufferPool& staging_pool) - : TextureCache(system, rasterizer, device.IsOptimalAstcSupported()), device{device}, - resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler}, - staging_pool{staging_pool} {} +VKTextureCache::VKTextureCache(VideoCore::RasterizerInterface& rasterizer, + Tegra::Engines::Maxwell3D& maxwell3d, + Tegra::MemoryManager& gpu_memory, const VKDevice& device_, + VKResourceManager& resource_manager_, + VKMemoryManager& memory_manager_, VKScheduler& scheduler_, + VKStagingBufferPool& staging_pool_) + : TextureCache(rasterizer, maxwell3d, gpu_memory, device_.IsOptimalAstcSupported()), + device{device_}, resource_manager{resource_manager_}, + memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{staging_pool_} {} VKTextureCache::~VKTextureCache() = default; Surface VKTextureCache::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) { - return std::make_shared<CachedSurface>(system, device, resource_manager, memory_manager, - scheduler, staging_pool, gpu_addr, params); + return std::make_shared<CachedSurface>(device, resource_manager, memory_manager, scheduler, + staging_pool, gpu_addr, params); } void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface, diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 807e26c8a..e47d02c41 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h @@ -15,10 +15,6 @@ #include "video_core/texture_cache/surface_base.h" #include "video_core/texture_cache/texture_cache.h" -namespace Core { -class System; -} - namespace VideoCore { class RasterizerInterface; } @@ -45,10 +41,10 @@ class CachedSurface final : public VideoCommon::SurfaceBase<View> { friend CachedSurfaceView; public: - explicit CachedSurface(Core::System& system, const VKDevice& device, - VKResourceManager& resource_manager, VKMemoryManager& memory_manager, - VKScheduler& scheduler, VKStagingBufferPool& staging_pool, - GPUVAddr gpu_addr, const SurfaceParams& params); + explicit CachedSurface(const VKDevice& device, VKResourceManager& resource_manager, + VKMemoryManager& memory_manager, VKScheduler& scheduler, + VKStagingBufferPool& staging_pool, GPUVAddr gpu_addr, + const SurfaceParams& params); ~CachedSurface(); void UploadTexture(const std::vector<u8>& staging_buffer) override; @@ -101,7 +97,6 @@ private: VkImageSubresourceRange GetImageSubresourceRange() const; - Core::System& system; const VKDevice& device; VKResourceManager& resource_manager; VKMemoryManager& memory_manager; @@ -201,7 +196,8 @@ private: class VKTextureCache final : public TextureCacheBase { public: - explicit VKTextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + explicit VKTextureCache(VideoCore::RasterizerInterface& rasterizer, + Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, const VKDevice& device, VKResourceManager& resource_manager, VKMemoryManager& memory_manager, VKScheduler& scheduler, VKStagingBufferPool& staging_pool); diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp index 013865aa4..fe291a148 100644 --- a/src/video_core/renderer_vulkan/wrapper.cpp +++ b/src/video_core/renderer_vulkan/wrapper.cpp @@ -262,6 +262,22 @@ const char* ToString(VkResult result) noexcept { return "VK_ERROR_INVALID_DEVICE_ADDRESS_EXT"; case VkResult::VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT: return "VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT"; + case VkResult::VK_ERROR_UNKNOWN: + return "VK_ERROR_UNKNOWN"; + case VkResult::VK_ERROR_INCOMPATIBLE_VERSION_KHR: + return "VK_ERROR_INCOMPATIBLE_VERSION_KHR"; + case VkResult::VK_THREAD_IDLE_KHR: + return "VK_THREAD_IDLE_KHR"; + case VkResult::VK_THREAD_DONE_KHR: + return "VK_THREAD_DONE_KHR"; + case VkResult::VK_OPERATION_DEFERRED_KHR: + return "VK_OPERATION_DEFERRED_KHR"; + case VkResult::VK_OPERATION_NOT_DEFERRED_KHR: + return "VK_OPERATION_NOT_DEFERRED_KHR"; + case VkResult::VK_PIPELINE_COMPILE_REQUIRED_EXT: + return "VK_PIPELINE_COMPILE_REQUIRED_EXT"; + case VkResult::VK_RESULT_MAX_ENUM: + return "VK_RESULT_MAX_ENUM"; } return "Unknown"; } diff --git a/src/video_core/shader/decode/arithmetic_half.cpp b/src/video_core/shader/decode/arithmetic_half.cpp index a276aee44..88103fede 100644 --- a/src/video_core/shader/decode/arithmetic_half.cpp +++ b/src/video_core/shader/decode/arithmetic_half.cpp @@ -53,6 +53,9 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) { absolute_a = ((instr.value >> 44) & 1) != 0; absolute_b = ((instr.value >> 54) & 1) != 0; break; + default: + UNREACHABLE(); + break; } Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half.type_a); diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp index e75ca4fdb..cd424aa91 100644 --- a/src/video_core/shader/decode/image.cpp +++ b/src/video_core/shader/decode/image.cpp @@ -119,6 +119,8 @@ ComponentType GetComponentType(Tegra::Engines::SamplerDescriptor descriptor, return descriptor.r_type; } break; + default: + break; } UNIMPLEMENTED_MSG("Texture format not implemented={}", format); return ComponentType::FLOAT; @@ -220,9 +222,10 @@ u32 GetComponentSize(TextureFormat format, std::size_t component) { return (component == 0 || component == 1) ? 8 : 0; case TextureFormat::G4R4: return (component == 0 || component == 1) ? 4 : 0; + default: + UNIMPLEMENTED_MSG("Texture format not implemented={}", format); + return 0; } - UNIMPLEMENTED_MSG("Texture format not implemented={}", format); - return 0; } std::size_t GetImageComponentMask(TextureFormat format) { @@ -257,9 +260,10 @@ std::size_t GetImageComponentMask(TextureFormat format) { case TextureFormat::R8: case TextureFormat::R1: return std::size_t{R}; + default: + UNIMPLEMENTED_MSG("Texture format not implemented={}", format); + return std::size_t{R | G | B | A}; } - UNIMPLEMENTED_MSG("Texture format not implemented={}", format); - return std::size_t{R | G | B | A}; } std::size_t GetImageTypeNumCoordinates(Tegra::Shader::ImageType image_type) { @@ -463,6 +467,8 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) { return OperationCode::AtomicImageXor; case Tegra::Shader::ImageAtomicOperation::Exch: return OperationCode::AtomicImageExchange; + default: + break; } default: break; diff --git a/src/video_core/shader/memory_util.cpp b/src/video_core/shader/memory_util.cpp index 5071c83ca..e18ccba8e 100644 --- a/src/video_core/shader/memory_util.cpp +++ b/src/video_core/shader/memory_util.cpp @@ -16,11 +16,10 @@ namespace VideoCommon::Shader { -GPUVAddr GetShaderAddress(Core::System& system, +GPUVAddr GetShaderAddress(Tegra::Engines::Maxwell3D& maxwell3d, Tegra::Engines::Maxwell3D::Regs::ShaderProgram program) { - const auto& gpu{system.GPU().Maxwell3D()}; - const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]}; - return gpu.regs.code_address.CodeAddress() + shader_config.offset; + const auto& shader_config{maxwell3d.regs.shader_config[static_cast<std::size_t>(program)]}; + return maxwell3d.regs.code_address.CodeAddress() + shader_config.offset; } bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) { diff --git a/src/video_core/shader/memory_util.h b/src/video_core/shader/memory_util.h index be90d24fd..4624d38e6 100644 --- a/src/video_core/shader/memory_util.h +++ b/src/video_core/shader/memory_util.h @@ -11,10 +11,6 @@ #include "video_core/engines/maxwell_3d.h" #include "video_core/engines/shader_type.h" -namespace Core { -class System; -} - namespace Tegra { class MemoryManager; } @@ -27,7 +23,7 @@ constexpr u32 STAGE_MAIN_OFFSET = 10; constexpr u32 KERNEL_MAIN_OFFSET = 0; /// Gets the address for the specified shader stage program -GPUVAddr GetShaderAddress(Core::System& system, +GPUVAddr GetShaderAddress(Tegra::Engines::Maxwell3D& maxwell3d, Tegra::Engines::Maxwell3D::Regs::ShaderProgram program); /// Gets if the current instruction offset is a scheduler instruction diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp index e614a92df..e8515321b 100644 --- a/src/video_core/texture_cache/surface_params.cpp +++ b/src/video_core/texture_cache/surface_params.cpp @@ -163,13 +163,11 @@ SurfaceParams SurfaceParams::CreateForImage(const FormatLookupTable& lookup_tabl return params; } -SurfaceParams SurfaceParams::CreateForDepthBuffer(Core::System& system) { - const auto& regs = system.GPU().Maxwell3D().regs; - +SurfaceParams SurfaceParams::CreateForDepthBuffer(Tegra::Engines::Maxwell3D& maxwell3d) { + const auto& regs = maxwell3d.regs; const auto block_depth = std::min(regs.zeta.memory_layout.block_depth.Value(), 5U); const bool is_layered = regs.zeta_layers > 1 && block_depth == 0; const auto pixel_format = PixelFormatFromDepthFormat(regs.zeta.format); - return { .is_tiled = regs.zeta.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear, @@ -191,8 +189,9 @@ SurfaceParams SurfaceParams::CreateForDepthBuffer(Core::System& system) { }; } -SurfaceParams SurfaceParams::CreateForFramebuffer(Core::System& system, std::size_t index) { - const auto& config{system.GPU().Maxwell3D().regs.rt[index]}; +SurfaceParams SurfaceParams::CreateForFramebuffer(Tegra::Engines::Maxwell3D& maxwell3d, + std::size_t index) { + const auto& config{maxwell3d.regs.rt[index]}; SurfaceParams params; params.is_tiled = config.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear; diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h index 118aa689e..4466c3c34 100644 --- a/src/video_core/texture_cache/surface_params.h +++ b/src/video_core/texture_cache/surface_params.h @@ -33,10 +33,11 @@ public: const VideoCommon::Shader::Image& entry); /// Creates SurfaceCachedParams for a depth buffer configuration. - static SurfaceParams CreateForDepthBuffer(Core::System& system); + static SurfaceParams CreateForDepthBuffer(Tegra::Engines::Maxwell3D& maxwell3d); /// Creates SurfaceCachedParams from a framebuffer configuration. - static SurfaceParams CreateForFramebuffer(Core::System& system, std::size_t index); + static SurfaceParams CreateForFramebuffer(Tegra::Engines::Maxwell3D& maxwell3d, + std::size_t index); /// Creates SurfaceCachedParams from a Fermi2D surface configuration. static SurfaceParams CreateForFermiCopySurface( diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 96c4e4cc2..ea835c59f 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -135,8 +135,7 @@ public: return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); } - const std::optional<VAddr> cpu_addr = - system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr); + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); if (!cpu_addr) { return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); } @@ -160,8 +159,7 @@ public: if (!gpu_addr) { return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); } - const std::optional<VAddr> cpu_addr = - system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr); + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); if (!cpu_addr) { return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); } @@ -183,11 +181,11 @@ public: TView GetDepthBufferSurface(bool preserve_contents) { std::lock_guard lock{mutex}; - auto& maxwell3d = system.GPU().Maxwell3D(); - if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) { + auto& dirty = maxwell3d.dirty; + if (!dirty.flags[VideoCommon::Dirty::ZetaBuffer]) { return depth_buffer.view; } - maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer] = false; + dirty.flags[VideoCommon::Dirty::ZetaBuffer] = false; const auto& regs{maxwell3d.regs}; const auto gpu_addr{regs.zeta.Address()}; @@ -195,13 +193,12 @@ public: SetEmptyDepthBuffer(); return {}; } - const std::optional<VAddr> cpu_addr = - system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr); + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); if (!cpu_addr) { SetEmptyDepthBuffer(); return {}; } - const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)}; + const auto depth_params{SurfaceParams::CreateForDepthBuffer(maxwell3d)}; auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, preserve_contents, true); if (depth_buffer.target) depth_buffer.target->MarkAsRenderTarget(false, NO_RT); @@ -215,7 +212,6 @@ public: TView GetColorBufferSurface(std::size_t index, bool preserve_contents) { std::lock_guard lock{mutex}; ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets); - auto& maxwell3d = system.GPU().Maxwell3D(); if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ColorBuffer0 + index]) { return render_targets[index].view; } @@ -235,15 +231,14 @@ public: return {}; } - const std::optional<VAddr> cpu_addr = - system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr); + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); if (!cpu_addr) { SetEmptyColorBuffer(index); return {}; } auto surface_view = - GetSurface(gpu_addr, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index), + GetSurface(gpu_addr, *cpu_addr, SurfaceParams::CreateForFramebuffer(maxwell3d, index), preserve_contents, true); if (render_targets[index].target) { auto& surface = render_targets[index].target; @@ -300,9 +295,8 @@ public: const GPUVAddr dst_gpu_addr = dst_config.Address(); DeduceBestBlit(src_params, dst_params, src_gpu_addr, dst_gpu_addr); - const auto& memory_manager = system.GPU().MemoryManager(); - const std::optional<VAddr> dst_cpu_addr = memory_manager.GpuToCpuAddress(dst_gpu_addr); - const std::optional<VAddr> src_cpu_addr = memory_manager.GpuToCpuAddress(src_gpu_addr); + const std::optional<VAddr> dst_cpu_addr = gpu_memory.GpuToCpuAddress(dst_gpu_addr); + const std::optional<VAddr> src_cpu_addr = gpu_memory.GpuToCpuAddress(src_gpu_addr); std::pair dst_surface = GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false); TView src_surface = GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false).second; ImageBlit(src_surface, dst_surface.second, copy_config); @@ -358,9 +352,11 @@ public: } protected: - explicit TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, - bool is_astc_supported) - : system{system}, is_astc_supported{is_astc_supported}, rasterizer{rasterizer} { + explicit TextureCache(VideoCore::RasterizerInterface& rasterizer_, + Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, + bool is_astc_supported_) + : is_astc_supported{is_astc_supported_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_}, + gpu_memory{gpu_memory_} { for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) { SetEmptyColorBuffer(i); } @@ -395,7 +391,7 @@ protected: virtual void BufferCopy(TSurface& src_surface, TSurface& dst_surface) = 0; void ManageRenderTargetUnregister(TSurface& surface) { - auto& dirty = system.GPU().Maxwell3D().dirty; + auto& dirty = maxwell3d.dirty; const u32 index = surface->GetRenderTarget(); if (index == DEPTH_RT) { dirty.flags[VideoCommon::Dirty::ZetaBuffer] = true; @@ -408,8 +404,7 @@ protected: void Register(TSurface surface) { const GPUVAddr gpu_addr = surface->GetGpuAddr(); const std::size_t size = surface->GetSizeInBytes(); - const std::optional<VAddr> cpu_addr = - system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr); + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); if (!cpu_addr) { LOG_CRITICAL(HW_GPU, "Failed to register surface with unmapped gpu_address 0x{:016x}", gpu_addr); @@ -459,7 +454,6 @@ protected: return new_surface; } - Core::System& system; const bool is_astc_supported; private: @@ -954,8 +948,7 @@ private: * @param params The parameters on the candidate surface. **/ Deduction DeduceSurface(const GPUVAddr gpu_addr, const SurfaceParams& params) { - const std::optional<VAddr> cpu_addr = - system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr); + const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); if (!cpu_addr) { Deduction result{}; @@ -1112,7 +1105,7 @@ private: void LoadSurface(const TSurface& surface) { staging_cache.GetBuffer(0).resize(surface->GetHostSizeInBytes()); - surface->LoadBuffer(system.GPU().MemoryManager(), staging_cache); + surface->LoadBuffer(gpu_memory, staging_cache); surface->UploadTexture(staging_cache.GetBuffer(0)); surface->MarkAsModified(false, Tick()); } @@ -1123,7 +1116,7 @@ private: } staging_cache.GetBuffer(0).resize(surface->GetHostSizeInBytes()); surface->DownloadTexture(staging_cache.GetBuffer(0)); - surface->FlushBuffer(system.GPU().MemoryManager(), staging_cache); + surface->FlushBuffer(gpu_memory, staging_cache); surface->MarkAsModified(false, Tick()); } @@ -1253,6 +1246,8 @@ private: } VideoCore::RasterizerInterface& rasterizer; + Tegra::Engines::Maxwell3D& maxwell3d; + Tegra::MemoryManager& gpu_memory; FormatLookupTable format_lookup_table; FormatCompatibility format_compatibility; diff --git a/src/video_core/video_core.cpp b/src/video_core/video_core.cpp index 4e3a092c7..a14df06a3 100644 --- a/src/video_core/video_core.cpp +++ b/src/video_core/video_core.cpp @@ -21,14 +21,17 @@ namespace { std::unique_ptr<VideoCore::RendererBase> CreateRenderer( Core::System& system, Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu, std::unique_ptr<Core::Frontend::GraphicsContext> context) { + auto& telemetry_session = system.TelemetrySession(); + auto& cpu_memory = system.Memory(); + switch (Settings::values.renderer_backend.GetValue()) { case Settings::RendererBackend::OpenGL: - return std::make_unique<OpenGL::RendererOpenGL>(system, emu_window, gpu, - std::move(context)); + return std::make_unique<OpenGL::RendererOpenGL>(telemetry_session, emu_window, cpu_memory, + gpu, std::move(context)); #ifdef HAS_VULKAN case Settings::RendererBackend::Vulkan: - return std::make_unique<Vulkan::RendererVulkan>(system, emu_window, gpu, - std::move(context)); + return std::make_unique<Vulkan::RendererVulkan>(telemetry_session, emu_window, cpu_memory, + gpu, std::move(context)); #endif default: return nullptr; |