summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/engines/maxwell_3d.cpp6
-rw-r--r--src/video_core/engines/maxwell_3d.h4
-rw-r--r--src/video_core/engines/maxwell_compute.cpp6
-rw-r--r--src/video_core/engines/maxwell_dma.cpp17
-rw-r--r--src/video_core/engines/shader_bytecode.h34
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp25
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h6
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp300
-rw-r--r--src/video_core/renderer_opengl/gl_shader_gen.cpp29
-rw-r--r--src/video_core/renderer_opengl/gl_shader_manager.cpp11
-rw-r--r--src/video_core/renderer_opengl/gl_shader_manager.h8
-rw-r--r--src/video_core/renderer_opengl/maxwell_to_gl.h16
12 files changed, 287 insertions, 175 deletions
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 8afd26fe9..bca014a4a 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -13,8 +13,7 @@
#include "video_core/renderer_base.h"
#include "video_core/textures/texture.h"
-namespace Tegra {
-namespace Engines {
+namespace Tegra::Engines {
/// First register id that is actually a Macro call.
constexpr u32 MacroRegistersStart = 0xE00;
@@ -408,5 +407,4 @@ void Maxwell3D::ProcessClearBuffers() {
rasterizer.Clear();
}
-} // namespace Engines
-} // namespace Tegra
+} // namespace Tegra::Engines
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index c8af1c6b6..0e09a7ee5 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -643,8 +643,10 @@ public:
u32 d3d_cull_mode;
ComparisonOp depth_test_func;
+ float alpha_test_ref;
+ ComparisonOp alpha_test_func;
- INSERT_PADDING_WORDS(0xB);
+ INSERT_PADDING_WORDS(0x9);
struct {
u32 separate_alpha;
diff --git a/src/video_core/engines/maxwell_compute.cpp b/src/video_core/engines/maxwell_compute.cpp
index 59e28b22d..8b5f08351 100644
--- a/src/video_core/engines/maxwell_compute.cpp
+++ b/src/video_core/engines/maxwell_compute.cpp
@@ -6,8 +6,7 @@
#include "core/core.h"
#include "video_core/engines/maxwell_compute.h"
-namespace Tegra {
-namespace Engines {
+namespace Tegra::Engines {
void MaxwellCompute::WriteReg(u32 method, u32 value) {
ASSERT_MSG(method < Regs::NUM_REGS,
@@ -26,5 +25,4 @@ void MaxwellCompute::WriteReg(u32 method, u32 value) {
}
}
-} // namespace Engines
-} // namespace Tegra
+} // namespace Tegra::Engines
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 103cd110e..b8a78cf82 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -7,8 +7,7 @@
#include "video_core/rasterizer_interface.h"
#include "video_core/textures/decoders.h"
-namespace Tegra {
-namespace Engines {
+namespace Tegra::Engines {
MaxwellDMA::MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager)
: memory_manager(memory_manager), rasterizer{rasterizer} {}
@@ -78,9 +77,9 @@ void MaxwellDMA::HandleCopy() {
ASSERT(regs.exec.enable_2d == 1);
- std::size_t copy_size = regs.x_count * regs.y_count;
+ const std::size_t copy_size = regs.x_count * regs.y_count;
- const auto FlushAndInvalidate = [&](u32 src_size, u32 dst_size) {
+ const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) {
// TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated
// copying.
rasterizer.FlushRegion(source_cpu, src_size);
@@ -91,14 +90,11 @@ void MaxwellDMA::HandleCopy() {
rasterizer.InvalidateRegion(dest_cpu, dst_size);
};
- u8* src_buffer = Memory::GetPointer(source_cpu);
- u8* dst_buffer = Memory::GetPointer(dest_cpu);
-
if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) {
ASSERT(regs.src_params.size_z == 1);
// If the input is tiled and the output is linear, deswizzle the input and copy it over.
- u32 src_bytes_per_pixel = regs.src_pitch / regs.src_params.size_x;
+ const u32 src_bytes_per_pixel = regs.src_pitch / regs.src_params.size_x;
FlushAndInvalidate(regs.src_pitch * regs.src_params.size_y,
copy_size * src_bytes_per_pixel);
@@ -111,7 +107,7 @@ void MaxwellDMA::HandleCopy() {
ASSERT(regs.dst_params.size_z == 1);
ASSERT(regs.src_pitch == regs.x_count);
- u32 src_bpp = regs.src_pitch / regs.x_count;
+ const u32 src_bpp = regs.src_pitch / regs.x_count;
FlushAndInvalidate(regs.src_pitch * regs.y_count,
regs.dst_params.size_x * regs.dst_params.size_y * src_bpp);
@@ -122,5 +118,4 @@ void MaxwellDMA::HandleCopy() {
}
}
-} // namespace Engines
-} // namespace Tegra
+} // namespace Tegra::Engines
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index e3d67ff87..6cd08d28b 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -214,7 +214,7 @@ enum class IMinMaxExchange : u64 {
XHi = 3,
};
-enum class VmadType : u64 {
+enum class VideoType : u64 {
Size16_Low = 0,
Size16_High = 1,
Size32 = 2,
@@ -564,6 +564,10 @@ union Instruction {
} fmul;
union {
+ BitField<55, 1, u64> saturate;
+ } fmul32;
+
+ union {
BitField<48, 1, u64> is_signed;
} shift;
@@ -753,7 +757,6 @@ union Instruction {
BitField<45, 2, PredOperation> op;
BitField<47, 1, u64> ftz;
BitField<48, 4, PredCondition> cond;
- BitField<56, 1, u64> neg_b;
} fsetp;
union {
@@ -780,6 +783,14 @@ union Instruction {
} psetp;
union {
+ BitField<43, 4, PredCondition> cond;
+ BitField<45, 2, PredOperation> op;
+ BitField<3, 3, u64> pred3;
+ BitField<0, 3, u64> pred0;
+ BitField<39, 3, u64> pred39;
+ } vsetp;
+
+ union {
BitField<12, 3, u64> pred12;
BitField<15, 1, u64> neg_pred12;
BitField<24, 2, PredOperation> cond;
@@ -828,7 +839,6 @@ union Instruction {
BitField<53, 1, u64> neg_b;
BitField<54, 1, u64> abs_a;
BitField<55, 1, u64> ftz;
- BitField<56, 1, u64> neg_imm;
} fset;
union {
@@ -1152,15 +1162,17 @@ union Instruction {
union {
BitField<48, 1, u64> signed_a;
BitField<38, 1, u64> is_byte_chunk_a;
- BitField<36, 2, VmadType> type_a;
+ BitField<36, 2, VideoType> type_a;
BitField<36, 2, u64> byte_height_a;
BitField<49, 1, u64> signed_b;
BitField<50, 1, u64> use_register_b;
BitField<30, 1, u64> is_byte_chunk_b;
- BitField<28, 2, VmadType> type_b;
+ BitField<28, 2, VideoType> type_b;
BitField<28, 2, u64> byte_height_b;
+ } video;
+ union {
BitField<51, 2, VmadShr> shr;
BitField<55, 1, u64> saturate; // Saturates the result (a * b + c)
BitField<47, 1, u64> cc;
@@ -1211,11 +1223,13 @@ public:
KIL,
SSY,
SYNC,
+ BRK,
DEPBAR,
BFE_C,
BFE_R,
BFE_IMM,
BRA,
+ PBK,
LD_A,
LD_C,
ST_A,
@@ -1234,6 +1248,7 @@ public:
OUT_R, // Emit vertex/primitive
ISBERD,
VMAD,
+ VSETP,
FFMA_IMM, // Fused Multiply and Add
FFMA_CR,
FFMA_RC,
@@ -1372,7 +1387,7 @@ public:
/// conditionally executed).
static bool IsPredicatedInstruction(Id opcode) {
// TODO(Subv): Add the rest of unpredicated instructions.
- return opcode != Id::SSY;
+ return opcode != Id::SSY && opcode != Id::PBK;
}
class Matcher {
@@ -1468,9 +1483,11 @@ private:
#define INST(bitstring, op, type, name) Detail::GetMatcher(bitstring, op, type, name)
INST("111000110011----", Id::KIL, Type::Flow, "KIL"),
INST("111000101001----", Id::SSY, Type::Flow, "SSY"),
+ INST("111000101010----", Id::PBK, Type::Flow, "PBK"),
INST("111000100100----", Id::BRA, Type::Flow, "BRA"),
+ INST("1111000011111---", Id::SYNC, Type::Flow, "SYNC"),
+ INST("111000110100---", Id::BRK, Type::Flow, "BRK"),
INST("1111000011110---", Id::DEPBAR, Type::Synch, "DEPBAR"),
- INST("1111000011111---", Id::SYNC, Type::Synch, "SYNC"),
INST("1110111111011---", Id::LD_A, Type::Memory, "LD_A"),
INST("1110111110010---", Id::LD_C, Type::Memory, "LD_C"),
INST("1110111111110---", Id::ST_A, Type::Memory, "ST_A"),
@@ -1489,6 +1506,7 @@ private:
INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"),
INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"),
INST("01011111--------", Id::VMAD, Type::Trivial, "VMAD"),
+ INST("0101000011110---", Id::VSETP, Type::Trivial, "VSETP"),
INST("0011001-1-------", Id::FFMA_IMM, Type::Ffma, "FFMA_IMM"),
INST("010010011-------", Id::FFMA_CR, Type::Ffma, "FFMA_CR"),
INST("010100011-------", Id::FFMA_RC, Type::Ffma, "FFMA_RC"),
@@ -1608,4 +1626,4 @@ private:
}
};
-} // namespace Tegra::Shader
+} // namespace Tegra::Shader \ No newline at end of file
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 3daccf82f..be51c5215 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -570,10 +570,11 @@ void RasterizerOpenGL::DrawArrays() {
SyncBlendState();
SyncLogicOpState();
SyncCullMode();
- SyncAlphaTest();
SyncScissorTest();
+ // Alpha Testing is synced on shaders.
SyncTransformFeedback();
SyncPointState();
+ CheckAlphaTests();
// TODO(bunnei): Sync framebuffer_scale uniform here
// TODO(bunnei): Sync scissorbox uniform(s) here
@@ -1007,17 +1008,6 @@ void RasterizerOpenGL::SyncLogicOpState() {
state.logic_op.operation = MaxwellToGL::LogicOp(regs.logic_op.operation);
}
-void RasterizerOpenGL::SyncAlphaTest() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
-
- // TODO(Rodrigo): Alpha testing is a legacy OpenGL feature, but it can be
- // implemented with a test+discard in fragment shaders.
- if (regs.alpha_test_enabled != 0) {
- LOG_CRITICAL(Render_OpenGL, "Alpha testing is not implemented");
- UNREACHABLE();
- }
-}
-
void RasterizerOpenGL::SyncScissorTest() {
const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
@@ -1052,4 +1042,15 @@ void RasterizerOpenGL::SyncPointState() {
state.point.size = regs.point_size == 0 ? 1 : regs.point_size;
}
+void RasterizerOpenGL::CheckAlphaTests() {
+ const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+
+ if (regs.alpha_test_enabled != 0 && regs.rt_control.count > 1) {
+ LOG_CRITICAL(
+ Render_OpenGL,
+ "Alpha Testing is enabled with Multiple Render Targets, this behavior is undefined.");
+ UNREACHABLE();
+ }
+}
+
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index b1f7ccc7e..0e90a31f5 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -162,9 +162,6 @@ private:
/// Syncs the LogicOp state to match the guest state
void SyncLogicOpState();
- /// Syncs the alpha test state to match the guest state
- void SyncAlphaTest();
-
/// Syncs the scissor test state to match the guest state
void SyncScissorTest();
@@ -174,6 +171,9 @@ private:
/// Syncs the point state to match the guest state
void SyncPointState();
+ /// Check asserts for alpha testing.
+ void CheckAlphaTests();
+
bool has_ARB_direct_state_access = false;
bool has_ARB_multi_bind = false;
bool has_ARB_separate_shader_objects = false;
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index a427353e9..fe4d1bd83 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -163,10 +163,11 @@ private:
const ExitMethod jmp = Scan(target, end, labels);
return exit_method = ParallelExit(no_jmp, jmp);
}
- case OpCode::Id::SSY: {
- // The SSY instruction uses a similar encoding as the BRA instruction.
+ case OpCode::Id::SSY:
+ case OpCode::Id::PBK: {
+ // The SSY and PBK use a similar encoding as the BRA instruction.
ASSERT_MSG(instr.bra.constant_buffer == 0,
- "Constant buffer SSY is not supported");
+ "Constant buffer branching is not supported");
const u32 target = offset + instr.bra.GetBranchTarget();
labels.insert(target);
// Continue scanning for an exit method.
@@ -378,8 +379,8 @@ public:
* @param reg The destination register to use.
* @param elem The element to use for the operation.
* @param value The code representing the value to assign. Type has to be half float.
- * @param type Half float kind of assignment.
- * @param dest_num_components Number of components in the destionation.
+ * @param merge Half float kind of assignment.
+ * @param dest_num_components Number of components in the destination.
* @param value_num_components Number of components in the value.
* @param is_saturated Optional, when True, saturates the provided value.
* @param dest_elem Optional, the destination element to use for the operation.
@@ -422,6 +423,7 @@ public:
* @param reg The destination register to use.
* @param elem The element to use for the operation.
* @param attribute The input attribute to use as the source value.
+ * @param input_mode The input mode.
* @param vertex The register that decides which vertex to read from (used in GS).
*/
void SetRegisterToInputAttibute(const Register& reg, u64 elem, Attribute::Index attribute,
@@ -951,7 +953,7 @@ private:
// Can't assign to the constant predicate.
ASSERT(pred != static_cast<u64>(Pred::UnusedIndex));
- const std::string variable = 'p' + std::to_string(pred) + '_' + suffix;
+ std::string variable = 'p' + std::to_string(pred) + '_' + suffix;
shader.AddLine(variable + " = " + value + ';');
declr_predicates.insert(std::move(variable));
}
@@ -1058,7 +1060,7 @@ private:
/*
* Transforms the input string GLSL operand into an unpacked half float pair.
* @note This function returns a float type pair instead of a half float pair. This is because
- * real half floats are not standarized in GLSL but unpackHalf2x16 (which returns a vec2) is.
+ * real half floats are not standardized in GLSL but unpackHalf2x16 (which returns a vec2) is.
* @param operand Input operand. It has to be an unsigned integer.
* @param type How to unpack the unsigned integer to a half float pair.
* @param abs Get the absolute value of unpacked half floats.
@@ -1232,27 +1234,27 @@ private:
}
/*
- * Emits code to push the input target address to the SSY address stack, incrementing the stack
+ * Emits code to push the input target address to the flow address stack, incrementing the stack
* top.
*/
- void EmitPushToSSYStack(u32 target) {
+ void EmitPushToFlowStack(u32 target) {
shader.AddLine('{');
++shader.scope;
- shader.AddLine("ssy_stack[ssy_stack_top] = " + std::to_string(target) + "u;");
- shader.AddLine("ssy_stack_top++;");
+ shader.AddLine("flow_stack[flow_stack_top] = " + std::to_string(target) + "u;");
+ shader.AddLine("flow_stack_top++;");
--shader.scope;
shader.AddLine('}');
}
/*
- * Emits code to pop an address from the SSY address stack, setting the jump address to the
+ * Emits code to pop an address from the flow address stack, setting the jump address to the
* popped address and decrementing the stack top.
*/
- void EmitPopFromSSYStack() {
+ void EmitPopFromFlowStack() {
shader.AddLine('{');
++shader.scope;
- shader.AddLine("ssy_stack_top--;");
- shader.AddLine("jmp_to = ssy_stack[ssy_stack_top];");
+ shader.AddLine("flow_stack_top--;");
+ shader.AddLine("jmp_to = flow_stack[flow_stack_top];");
shader.AddLine("break;");
--shader.scope;
shader.AddLine('}');
@@ -1264,9 +1266,29 @@ private:
ASSERT_MSG(header.ps.omap.sample_mask == 0, "Samplemask write is unimplemented");
+ shader.AddLine("if (alpha_test[0] != 0) {");
+ ++shader.scope;
+ // We start on the register containing the alpha value in the first RT.
+ u32 current_reg = 3;
+ for (u32 render_target = 0; render_target < Maxwell3D::Regs::NumRenderTargets;
+ ++render_target) {
+ // TODO(Blinkhawk): verify the behavior of alpha testing on hardware when
+ // multiple render targets are used.
+ if (header.ps.IsColorComponentOutputEnabled(render_target, 0) ||
+ header.ps.IsColorComponentOutputEnabled(render_target, 1) ||
+ header.ps.IsColorComponentOutputEnabled(render_target, 2) ||
+ header.ps.IsColorComponentOutputEnabled(render_target, 3)) {
+ shader.AddLine(fmt::format("if (!AlphaFunc({})) discard;",
+ regs.GetRegisterAsFloat(current_reg)));
+ current_reg += 4;
+ }
+ }
+ --shader.scope;
+ shader.AddLine('}');
+
// Write the color outputs using the data in the shader registers, disabled
// rendertargets/components are skipped in the register assignment.
- u32 current_reg = 0;
+ current_reg = 0;
for (u32 render_target = 0; render_target < Maxwell3D::Regs::NumRenderTargets;
++render_target) {
// TODO(Subv): Figure out how dual-source blending is configured in the Switch.
@@ -1290,6 +1312,63 @@ private:
}
}
+ /// Unpacks a video instruction operand (e.g. VMAD).
+ std::string GetVideoOperand(const std::string& op, bool is_chunk, bool is_signed,
+ Tegra::Shader::VideoType type, u64 byte_height) {
+ const std::string value = [&]() {
+ if (!is_chunk) {
+ const auto offset = static_cast<u32>(byte_height * 8);
+ return "((" + op + " >> " + std::to_string(offset) + ") & 0xff)";
+ }
+ const std::string zero = "0";
+
+ switch (type) {
+ case Tegra::Shader::VideoType::Size16_Low:
+ return '(' + op + " & 0xffff)";
+ case Tegra::Shader::VideoType::Size16_High:
+ return '(' + op + " >> 16)";
+ case Tegra::Shader::VideoType::Size32:
+ // TODO(Rodrigo): From my hardware tests it becomes a bit "mad" when
+ // this type is used (1 * 1 + 0 == 0x5b800000). Until a better
+ // explanation is found: assert.
+ UNIMPLEMENTED();
+ return zero;
+ case Tegra::Shader::VideoType::Invalid:
+ UNREACHABLE_MSG("Invalid instruction encoding");
+ return zero;
+ default:
+ UNREACHABLE();
+ return zero;
+ }
+ }();
+
+ if (is_signed) {
+ return "int(" + value + ')';
+ }
+ return value;
+ };
+
+ /// Gets the A operand for a video instruction.
+ std::string GetVideoOperandA(Instruction instr) {
+ return GetVideoOperand(regs.GetRegisterAsInteger(instr.gpr8, 0, false),
+ instr.video.is_byte_chunk_a != 0, instr.video.signed_a,
+ instr.video.type_a, instr.video.byte_height_a);
+ }
+
+ /// Gets the B operand for a video instruction.
+ std::string GetVideoOperandB(Instruction instr) {
+ if (instr.video.use_register_b) {
+ return GetVideoOperand(regs.GetRegisterAsInteger(instr.gpr20, 0, false),
+ instr.video.is_byte_chunk_b != 0, instr.video.signed_b,
+ instr.video.type_b, instr.video.byte_height_b);
+ } else {
+ return '(' +
+ std::to_string(instr.video.signed_b ? static_cast<s16>(instr.alu.GetImm20_16())
+ : instr.alu.GetImm20_16()) +
+ ')';
+ }
+ }
+
/**
* Compiles a single instruction from Tegra to GLSL.
* @param offset the offset of the Tegra shader instruction.
@@ -1459,9 +1538,10 @@ private:
break;
}
case OpCode::Id::FMUL32_IMM: {
- regs.SetRegisterToFloat(
- instr.gpr0, 0,
- regs.GetRegisterAsFloat(instr.gpr8) + " * " + GetImmediate32(instr), 1, 1);
+ regs.SetRegisterToFloat(instr.gpr0, 0,
+ regs.GetRegisterAsFloat(instr.gpr8) + " * " +
+ GetImmediate32(instr),
+ 1, 1, instr.fmul32.saturate);
break;
}
case OpCode::Id::FADD32I: {
@@ -2736,20 +2816,13 @@ private:
break;
}
case OpCode::Type::FloatSetPredicate: {
- std::string op_a = instr.fsetp.neg_a ? "-" : "";
- op_a += regs.GetRegisterAsFloat(instr.gpr8);
-
- if (instr.fsetp.abs_a) {
- op_a = "abs(" + op_a + ')';
- }
+ const std::string op_a =
+ GetOperandAbsNeg(regs.GetRegisterAsFloat(instr.gpr8), instr.fsetp.abs_a != 0,
+ instr.fsetp.neg_a != 0);
- std::string op_b{};
+ std::string op_b;
if (instr.is_b_imm) {
- if (instr.fsetp.neg_b) {
- // Only the immediate version of fsetp has a neg_b bit.
- op_b += '-';
- }
op_b += '(' + GetImmediate19(instr) + ')';
} else {
if (instr.is_b_gpr) {
@@ -2945,33 +3018,24 @@ private:
break;
}
case OpCode::Type::FloatSet: {
- std::string op_a = instr.fset.neg_a ? "-" : "";
- op_a += regs.GetRegisterAsFloat(instr.gpr8);
-
- if (instr.fset.abs_a) {
- op_a = "abs(" + op_a + ')';
- }
+ const std::string op_a = GetOperandAbsNeg(regs.GetRegisterAsFloat(instr.gpr8),
+ instr.fset.abs_a != 0, instr.fset.neg_a != 0);
- std::string op_b = instr.fset.neg_b ? "-" : "";
+ std::string op_b;
if (instr.is_b_imm) {
const std::string imm = GetImmediate19(instr);
- if (instr.fset.neg_imm)
- op_b += "(-" + imm + ')';
- else
- op_b += imm;
+ op_b = imm;
} else {
if (instr.is_b_gpr) {
- op_b += regs.GetRegisterAsFloat(instr.gpr20);
+ op_b = regs.GetRegisterAsFloat(instr.gpr20);
} else {
- op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset,
- GLSLRegister::Type::Float);
+ op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset,
+ GLSLRegister::Type::Float);
}
}
- if (instr.fset.abs_b) {
- op_b = "abs(" + op_b + ')';
- }
+ op_b = GetOperandAbsNeg(op_b, instr.fset.abs_b != 0, instr.fset.neg_b != 0);
// The fset instruction sets a register to 1.0 or -1 (depending on the bf bit) if the
// condition is true, and to 0 otherwise.
@@ -3279,16 +3343,32 @@ private:
// The SSY opcode tells the GPU where to re-converge divergent execution paths, it
// sets the target of the jump that the SYNC instruction will make. The SSY opcode
// has a similar structure to the BRA opcode.
- ASSERT_MSG(instr.bra.constant_buffer == 0, "Constant buffer SSY is not supported");
+ ASSERT_MSG(instr.bra.constant_buffer == 0, "Constant buffer flow is not supported");
+
+ const u32 target = offset + instr.bra.GetBranchTarget();
+ EmitPushToFlowStack(target);
+ break;
+ }
+ case OpCode::Id::PBK: {
+ // PBK pushes to a stack the address where BRK will jump to. This shares stack with
+ // SSY but using SYNC on a PBK address will kill the shader execution. We don't
+ // emulate this because it's very unlikely a driver will emit such invalid shader.
+ ASSERT_MSG(instr.bra.constant_buffer == 0, "Constant buffer PBK is not supported");
const u32 target = offset + instr.bra.GetBranchTarget();
- EmitPushToSSYStack(target);
+ EmitPushToFlowStack(target);
break;
}
case OpCode::Id::SYNC: {
// The SYNC opcode jumps to the address previously set by the SSY opcode
ASSERT(instr.flow.cond == Tegra::Shader::FlowCondition::Always);
- EmitPopFromSSYStack();
+ EmitPopFromFlowStack();
+ break;
+ }
+ case OpCode::Id::BRK: {
+ // The BRK opcode jumps to the address previously set by the PBK opcode
+ ASSERT(instr.flow.cond == Tegra::Shader::FlowCondition::Always);
+ EmitPopFromFlowStack();
break;
}
case OpCode::Id::DEPBAR: {
@@ -3298,87 +3378,51 @@ private:
break;
}
case OpCode::Id::VMAD: {
- const bool signed_a = instr.vmad.signed_a == 1;
- const bool signed_b = instr.vmad.signed_b == 1;
- const bool result_signed = signed_a || signed_b;
- boost::optional<std::string> forced_result;
-
- auto Unpack = [&](const std::string& op, bool is_chunk, bool is_signed,
- Tegra::Shader::VmadType type, u64 byte_height) {
- const std::string value = [&]() {
- if (!is_chunk) {
- const auto offset = static_cast<u32>(byte_height * 8);
- return "((" + op + " >> " + std::to_string(offset) + ") & 0xff)";
- }
- const std::string zero = "0";
-
- switch (type) {
- case Tegra::Shader::VmadType::Size16_Low:
- return '(' + op + " & 0xffff)";
- case Tegra::Shader::VmadType::Size16_High:
- return '(' + op + " >> 16)";
- case Tegra::Shader::VmadType::Size32:
- // TODO(Rodrigo): From my hardware tests it becomes a bit "mad" when
- // this type is used (1 * 1 + 0 == 0x5b800000). Until a better
- // explanation is found: assert.
- UNREACHABLE_MSG("Unimplemented");
- return zero;
- case Tegra::Shader::VmadType::Invalid:
- // Note(Rodrigo): This flag is invalid according to nvdisasm. From my
- // testing (even though it's invalid) this makes the whole instruction
- // assign zero to target register.
- forced_result = boost::make_optional(zero);
- return zero;
- default:
- UNREACHABLE();
- return zero;
- }
- }();
-
- if (is_signed) {
- return "int(" + value + ')';
- }
- return value;
- };
-
- const std::string op_a = Unpack(regs.GetRegisterAsInteger(instr.gpr8, 0, false),
- instr.vmad.is_byte_chunk_a != 0, signed_a,
- instr.vmad.type_a, instr.vmad.byte_height_a);
-
- std::string op_b;
- if (instr.vmad.use_register_b) {
- op_b = Unpack(regs.GetRegisterAsInteger(instr.gpr20, 0, false),
- instr.vmad.is_byte_chunk_b != 0, signed_b, instr.vmad.type_b,
- instr.vmad.byte_height_b);
- } else {
- op_b = '(' +
- std::to_string(signed_b ? static_cast<s16>(instr.alu.GetImm20_16())
- : instr.alu.GetImm20_16()) +
- ')';
- }
-
+ const bool result_signed = instr.video.signed_a == 1 || instr.video.signed_b == 1;
+ const std::string op_a = GetVideoOperandA(instr);
+ const std::string op_b = GetVideoOperandB(instr);
const std::string op_c = regs.GetRegisterAsInteger(instr.gpr39, 0, result_signed);
- std::string result;
- if (forced_result) {
- result = *forced_result;
- } else {
- result = '(' + op_a + " * " + op_b + " + " + op_c + ')';
+ std::string result = '(' + op_a + " * " + op_b + " + " + op_c + ')';
- switch (instr.vmad.shr) {
- case Tegra::Shader::VmadShr::Shr7:
- result = '(' + result + " >> 7)";
- break;
- case Tegra::Shader::VmadShr::Shr15:
- result = '(' + result + " >> 15)";
- break;
- }
+ switch (instr.vmad.shr) {
+ case Tegra::Shader::VmadShr::Shr7:
+ result = '(' + result + " >> 7)";
+ break;
+ case Tegra::Shader::VmadShr::Shr15:
+ result = '(' + result + " >> 15)";
+ break;
}
+
regs.SetRegisterToInteger(instr.gpr0, result_signed, 1, result, 1, 1,
instr.vmad.saturate == 1, 0, Register::Size::Word,
instr.vmad.cc);
break;
}
+ case OpCode::Id::VSETP: {
+ const std::string op_a = GetVideoOperandA(instr);
+ const std::string op_b = GetVideoOperandB(instr);
+
+ // We can't use the constant predicate as destination.
+ ASSERT(instr.vsetp.pred3 != static_cast<u64>(Pred::UnusedIndex));
+
+ const std::string second_pred = GetPredicateCondition(instr.vsetp.pred39, false);
+
+ const std::string combiner = GetPredicateCombiner(instr.vsetp.op);
+
+ const std::string predicate = GetPredicateComparison(instr.vsetp.cond, op_a, op_b);
+ // Set the primary predicate to the result of Predicate OP SecondPredicate
+ SetPredicate(instr.vsetp.pred3,
+ '(' + predicate + ") " + combiner + " (" + second_pred + ')');
+
+ if (instr.vsetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) {
+ // Set the secondary predicate to the result of !Predicate OP SecondPredicate,
+ // if enabled
+ SetPredicate(instr.vsetp.pred0,
+ "!(" + predicate + ") " + combiner + " (" + second_pred + ')');
+ }
+ break;
+ }
default: {
LOG_CRITICAL(HW_GPU, "Unhandled instruction: {}", opcode->GetName());
UNREACHABLE();
@@ -3442,11 +3486,11 @@ private:
labels.insert(subroutine.begin);
shader.AddLine("uint jmp_to = " + std::to_string(subroutine.begin) + "u;");
- // TODO(Subv): Figure out the actual depth of the SSY stack, for now it seems
- // unlikely that shaders will use 20 nested SSYs.
- constexpr u32 SSY_STACK_SIZE = 20;
- shader.AddLine("uint ssy_stack[" + std::to_string(SSY_STACK_SIZE) + "];");
- shader.AddLine("uint ssy_stack_top = 0u;");
+ // TODO(Subv): Figure out the actual depth of the flow stack, for now it seems
+ // unlikely that shaders will use 20 nested SSYs and PBKs.
+ constexpr u32 FLOW_STACK_SIZE = 20;
+ shader.AddLine("uint flow_stack[" + std::to_string(FLOW_STACK_SIZE) + "];");
+ shader.AddLine("uint flow_stack_top = 0u;");
shader.AddLine("while (true) {");
++shader.scope;
@@ -3513,7 +3557,7 @@ private:
// Declarations
std::set<std::string> declr_predicates;
-}; // namespace Decompiler
+}; // namespace OpenGL::GLShader::Decompiler
std::string GetCommonDeclarations() {
return fmt::format("#define MAX_CONSTBUFFER_ELEMENTS {}\n",
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp
index ecbc9d8ed..e883ffb1d 100644
--- a/src/video_core/renderer_opengl/gl_shader_gen.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp
@@ -29,6 +29,7 @@ layout(std140) uniform vs_config {
vec4 viewport_flip;
uvec4 instance_id;
uvec4 flip_stage;
+ uvec4 alpha_test;
};
)";
@@ -105,6 +106,7 @@ layout (std140) uniform gs_config {
vec4 viewport_flip;
uvec4 instance_id;
uvec4 flip_stage;
+ uvec4 alpha_test;
};
void main() {
@@ -142,8 +144,33 @@ layout (std140) uniform fs_config {
vec4 viewport_flip;
uvec4 instance_id;
uvec4 flip_stage;
+ uvec4 alpha_test;
};
+bool AlphaFunc(in float value) {
+ float ref = uintBitsToFloat(alpha_test[2]);
+ switch (alpha_test[1]) {
+ case 1:
+ return false;
+ case 2:
+ return value < ref;
+ case 3:
+ return value == ref;
+ case 4:
+ return value <= ref;
+ case 5:
+ return value > ref;
+ case 6:
+ return value != ref;
+ case 7:
+ return value >= ref;
+ case 8:
+ return true;
+ default:
+ return false;
+ }
+}
+
void main() {
exec_fragment();
}
@@ -152,4 +179,4 @@ void main() {
out += program.first;
return {out, program.second};
}
-} // namespace OpenGL::GLShader \ No newline at end of file
+} // namespace OpenGL::GLShader
diff --git a/src/video_core/renderer_opengl/gl_shader_manager.cpp b/src/video_core/renderer_opengl/gl_shader_manager.cpp
index 010857ec6..8b8869ecb 100644
--- a/src/video_core/renderer_opengl/gl_shader_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_manager.cpp
@@ -16,6 +16,17 @@ void MaxwellUniformData::SetFromRegs(const Maxwell3D::State::ShaderStageInfo& sh
viewport_flip[0] = regs.viewport_transform[0].scale_x < 0.0 ? -1.0f : 1.0f;
viewport_flip[1] = regs.viewport_transform[0].scale_y < 0.0 ? -1.0f : 1.0f;
+ u32 func = static_cast<u32>(regs.alpha_test_func);
+ // Normalize the gl variants of opCompare to be the same as the normal variants
+ u32 op_gl_variant_base = static_cast<u32>(Tegra::Engines::Maxwell3D::Regs::ComparisonOp::Never);
+ if (func >= op_gl_variant_base) {
+ func = func - op_gl_variant_base + 1U;
+ }
+
+ alpha_test.enabled = regs.alpha_test_enabled;
+ alpha_test.func = func;
+ alpha_test.ref = regs.alpha_test_ref;
+
// We only assign the instance to the first component of the vector, the rest is just padding.
instance_id[0] = state.current_instance;
diff --git a/src/video_core/renderer_opengl/gl_shader_manager.h b/src/video_core/renderer_opengl/gl_shader_manager.h
index b3a191cf2..36fe1f04c 100644
--- a/src/video_core/renderer_opengl/gl_shader_manager.h
+++ b/src/video_core/renderer_opengl/gl_shader_manager.h
@@ -22,8 +22,14 @@ struct MaxwellUniformData {
alignas(16) GLvec4 viewport_flip;
alignas(16) GLuvec4 instance_id;
alignas(16) GLuvec4 flip_stage;
+ struct alignas(16) {
+ GLuint enabled;
+ GLuint func;
+ GLfloat ref;
+ GLuint padding;
+ } alpha_test;
};
-static_assert(sizeof(MaxwellUniformData) == 48, "MaxwellUniformData structure size is incorrect");
+static_assert(sizeof(MaxwellUniformData) == 64, "MaxwellUniformData structure size is incorrect");
static_assert(sizeof(MaxwellUniformData) < 16384,
"MaxwellUniformData structure must be less than 16kb as per the OpenGL spec");
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h
index 3c3bcaae4..0f6dcab2b 100644
--- a/src/video_core/renderer_opengl/maxwell_to_gl.h
+++ b/src/video_core/renderer_opengl/maxwell_to_gl.h
@@ -82,8 +82,20 @@ inline GLenum VertexType(Maxwell::VertexAttribute attrib) {
return {};
}
- case Maxwell::VertexAttribute::Type::Float:
- return GL_FLOAT;
+ case Maxwell::VertexAttribute::Type::Float: {
+ switch (attrib.size) {
+ case Maxwell::VertexAttribute::Size::Size_16:
+ case Maxwell::VertexAttribute::Size::Size_16_16:
+ case Maxwell::VertexAttribute::Size::Size_16_16_16:
+ case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ return GL_HALF_FLOAT;
+ case Maxwell::VertexAttribute::Size::Size_32:
+ case Maxwell::VertexAttribute::Size::Size_32_32:
+ case Maxwell::VertexAttribute::Size::Size_32_32_32:
+ case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
+ return GL_FLOAT;
+ }
+ }
}
LOG_CRITICAL(Render_OpenGL, "Unimplemented vertex type={}", attrib.TypeString());