From 7e665c2721863fe9784dd3de4aa430434fb10cff Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 17 Oct 2018 21:29:10 -0400 Subject: GPU: Improved implementation of maxwell DMA (Subv). --- src/video_core/engines/maxwell_dma.cpp | 73 +++++++++++++++++++++++++++------- src/video_core/engines/maxwell_dma.h | 8 +++- 2 files changed, 65 insertions(+), 16 deletions(-) (limited to 'src/video_core/engines') diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index bf2a21bb6..103cd110e 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -4,12 +4,14 @@ #include "core/memory.h" #include "video_core/engines/maxwell_dma.h" +#include "video_core/rasterizer_interface.h" #include "video_core/textures/decoders.h" namespace Tegra { namespace Engines { -MaxwellDMA::MaxwellDMA(MemoryManager& memory_manager) : memory_manager(memory_manager) {} +MaxwellDMA::MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager) + : memory_manager(memory_manager), rasterizer{rasterizer} {} void MaxwellDMA::WriteReg(u32 method, u32 value) { ASSERT_MSG(method < Regs::NUM_REGS, @@ -44,38 +46,79 @@ void MaxwellDMA::HandleCopy() { ASSERT(regs.exec.query_mode == Regs::QueryMode::None); ASSERT(regs.exec.query_intr == Regs::QueryIntr::None); ASSERT(regs.exec.copy_mode == Regs::CopyMode::Unk2); - ASSERT(regs.src_params.pos_x == 0); - ASSERT(regs.src_params.pos_y == 0); ASSERT(regs.dst_params.pos_x == 0); ASSERT(regs.dst_params.pos_y == 0); - if (regs.exec.is_dst_linear == regs.exec.is_src_linear) { - std::size_t copy_size = regs.x_count; + if (!regs.exec.is_dst_linear && !regs.exec.is_src_linear) { + // If both the source and the destination are in block layout, assert. + UNREACHABLE_MSG("Tiled->Tiled DMA transfers are not yet implemented"); + return; + } + if (regs.exec.is_dst_linear && regs.exec.is_src_linear) { // When the enable_2d bit is disabled, the copy is performed as if we were copying a 1D - // buffer of length `x_count`, otherwise we copy a 2D buffer of size (x_count, y_count). - if (regs.exec.enable_2d) { - copy_size = copy_size * regs.y_count; + // buffer of length `x_count`, otherwise we copy a 2D image of dimensions (x_count, + // y_count). + if (!regs.exec.enable_2d) { + Memory::CopyBlock(dest_cpu, source_cpu, regs.x_count); + return; } - Memory::CopyBlock(dest_cpu, source_cpu, copy_size); + // If both the source and the destination are in linear layout, perform a line-by-line + // copy. We're going to take a subrect of size (x_count, y_count) from the source + // rectangle. There is no need to manually flush/invalidate the regions because + // CopyBlock does that for us. + for (u32 line = 0; line < regs.y_count; ++line) { + const VAddr source_line = source_cpu + line * regs.src_pitch; + const VAddr dest_line = dest_cpu + line * regs.dst_pitch; + Memory::CopyBlock(dest_line, source_line, regs.x_count); + } return; } ASSERT(regs.exec.enable_2d == 1); + + std::size_t copy_size = regs.x_count * regs.y_count; + + const auto FlushAndInvalidate = [&](u32 src_size, u32 dst_size) { + // TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated + // copying. + rasterizer.FlushRegion(source_cpu, src_size); + + // We have to invalidate the destination region to evict any outdated surfaces from the + // cache. We do this before actually writing the new data because the destination address + // might contain a dirty surface that will have to be written back to memory. + rasterizer.InvalidateRegion(dest_cpu, dst_size); + }; + u8* src_buffer = Memory::GetPointer(source_cpu); u8* dst_buffer = Memory::GetPointer(dest_cpu); if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) { + ASSERT(regs.src_params.size_z == 1); // If the input is tiled and the output is linear, deswizzle the input and copy it over. - Texture::CopySwizzledData(regs.src_params.size_x, regs.src_params.size_y, - regs.src_params.size_z, 1, 1, src_buffer, dst_buffer, true, - regs.src_params.BlockHeight(), regs.src_params.BlockDepth()); + + u32 src_bytes_per_pixel = regs.src_pitch / regs.src_params.size_x; + + FlushAndInvalidate(regs.src_pitch * regs.src_params.size_y, + copy_size * src_bytes_per_pixel); + + Texture::UnswizzleSubrect(regs.x_count, regs.y_count, regs.dst_pitch, + regs.src_params.size_x, src_bytes_per_pixel, source_cpu, dest_cpu, + regs.src_params.BlockHeight(), regs.src_params.pos_x, + regs.src_params.pos_y); } else { + ASSERT(regs.dst_params.size_z == 1); + ASSERT(regs.src_pitch == regs.x_count); + + u32 src_bpp = regs.src_pitch / regs.x_count; + + FlushAndInvalidate(regs.src_pitch * regs.y_count, + regs.dst_params.size_x * regs.dst_params.size_y * src_bpp); + // If the input is linear and the output is tiled, swizzle the input and copy it over. - Texture::CopySwizzledData(regs.dst_params.size_x, regs.dst_params.size_y, - regs.dst_params.size_z, 1, 1, dst_buffer, src_buffer, false, - regs.dst_params.BlockHeight(), regs.dst_params.BlockDepth()); + Texture::SwizzleSubrect(regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x, + src_bpp, dest_cpu, source_cpu, regs.dst_params.BlockHeight()); } } diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index df19e02e2..5f3704f05 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -12,11 +12,15 @@ #include "video_core/gpu.h" #include "video_core/memory_manager.h" +namespace VideoCore { +class RasterizerInterface; +} + namespace Tegra::Engines { class MaxwellDMA final { public: - explicit MaxwellDMA(MemoryManager& memory_manager); + explicit MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); ~MaxwellDMA() = default; /// Write the value to the register identified by method. @@ -133,6 +137,8 @@ public: MemoryManager& memory_manager; private: + VideoCore::RasterizerInterface& rasterizer; + /// Performs the copy from the source buffer to the destination buffer as configured in the /// registers. void HandleCopy(); -- cgit v1.2.3