diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 327db68a5..509ca117a 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -59,6 +59,34 @@ add_library(video_core STATIC renderer_opengl/renderer_opengl.h renderer_opengl/utils.cpp renderer_opengl/utils.h + shader/decode/arithmetic.cpp + shader/decode/arithmetic_immediate.cpp + shader/decode/bfe.cpp + shader/decode/bfi.cpp + shader/decode/shift.cpp + shader/decode/arithmetic_integer.cpp + shader/decode/arithmetic_integer_immediate.cpp + shader/decode/arithmetic_half.cpp + shader/decode/arithmetic_half_immediate.cpp + shader/decode/ffma.cpp + shader/decode/hfma2.cpp + shader/decode/conversion.cpp + shader/decode/memory.cpp + shader/decode/float_set_predicate.cpp + shader/decode/integer_set_predicate.cpp + shader/decode/half_set_predicate.cpp + shader/decode/predicate_set_register.cpp + shader/decode/predicate_set_predicate.cpp + shader/decode/register_set_predicate.cpp + shader/decode/float_set.cpp + shader/decode/integer_set.cpp + shader/decode/half_set.cpp + shader/decode/video.cpp + shader/decode/xmad.cpp + shader/decode/other.cpp + shader/decode.cpp + shader/shader_ir.cpp + shader/shader_ir.h surface.cpp surface.h textures/astc.cpp diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index e53c77f2b..cdef97bc6 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -397,6 +397,10 @@ struct IpaMode { bool operator!=(const IpaMode& a) const { return !operator==(a); } + bool operator<(const IpaMode& a) const { + return std::tie(interpolation_mode, sampling_mode) < + std::tie(a.interpolation_mode, a.sampling_mode); + } }; enum class SystemVariable : u64 { @@ -644,6 +648,7 @@ union Instruction { BitField<37, 2, HalfPrecision> precision; BitField<32, 1, u64> saturate; + BitField<31, 1, u64> negate_b; BitField<30, 1, u64> negate_c; BitField<35, 2, HalfType> type_c; } rr; @@ -1431,6 +1436,7 @@ public: PredicateSetRegister, RegisterSetPredicate, Conversion, + Video, Xmad, Unknown, }; @@ -1562,8 +1568,8 @@ private: INST("11100000--------", Id::IPA, Type::Trivial, "IPA"), INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"), - INST("01011111--------", Id::VMAD, Type::Trivial, "VMAD"), - INST("0101000011110---", Id::VSETP, Type::Trivial, "VSETP"), + INST("01011111--------", Id::VMAD, Type::Video, "VMAD"), + INST("0101000011110---", Id::VSETP, Type::Video, "VSETP"), INST("0011001-1-------", Id::FFMA_IMM, Type::Ffma, "FFMA_IMM"), INST("010010011-------", Id::FFMA_CR, Type::Ffma, "FFMA_CR"), INST("010100011-------", Id::FFMA_RC, Type::Ffma, "FFMA_RC"), diff --git a/src/video_core/engines/shader_header.h b/src/video_core/engines/shader_header.h index 99c34649f..cf2b76ff6 100644 --- a/src/video_core/engines/shader_header.h +++ b/src/video_core/engines/shader_header.h @@ -106,7 +106,7 @@ struct Header { } ps; }; - u64 GetLocalMemorySize() { + u64 GetLocalMemorySize() const { return (common1.shader_local_memory_low_size | (common2.shader_local_memory_high_size << 24)); } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 6600ad528..71829fee0 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -930,7 +930,7 @@ u32 RasterizerOpenGL::SetupConstBuffers(Maxwell::ShaderStage stage, Shader& shad const auto& gpu = Core::System::GetInstance().GPU(); const auto& maxwell3d = gpu.Maxwell3D(); const auto& shader_stage = maxwell3d.state.shader_stages[static_cast(stage)]; - const auto& entries = shader->GetShaderEntries().const_buffer_entries; + const auto& entries = shader->GetShaderEntries().const_buffers; constexpr u64 max_binds = Tegra::Engines::Maxwell3D::Regs::MaxConstBuffers; std::array bind_buffers; @@ -998,7 +998,7 @@ u32 RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, Shader& shader, MICROPROFILE_SCOPE(OpenGL_Texture); const auto& gpu = Core::System::GetInstance().GPU(); const auto& maxwell3d = gpu.Maxwell3D(); - const auto& entries = shader->GetShaderEntries().texture_samplers; + const auto& entries = shader->GetShaderEntries().samplers; ASSERT_MSG(current_unit + entries.size() <= std::size(state.texture_units), "Exceeded the number of active textures."); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index c785fffa3..b3aca39af 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -10,11 +10,15 @@ #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_opengl/gl_rasterizer.h" #include "video_core/renderer_opengl/gl_shader_cache.h" +#include "video_core/renderer_opengl/gl_shader_decompiler.h" #include "video_core/renderer_opengl/gl_shader_manager.h" #include "video_core/renderer_opengl/utils.h" +#include "video_core/shader/shader_ir.h" namespace OpenGL { +using VideoCommon::Shader::ProgramCode; + /// Gets the address for the specified shader stage program static VAddr GetShaderAddress(Maxwell::ShaderProgram program) { const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); @@ -24,8 +28,8 @@ static VAddr GetShaderAddress(Maxwell::ShaderProgram program) { } /// Gets the shader program code from memory for the specified address -static GLShader::ProgramCode GetShaderCode(VAddr addr) { - GLShader::ProgramCode program_code(GLShader::MAX_PROGRAM_CODE_LENGTH); +static ProgramCode GetShaderCode(VAddr addr) { + ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH); Memory::ReadBlock(addr, program_code.data(), program_code.size() * sizeof(u64)); return program_code; } diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index 768747968..e0887dd7b 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -12,6 +12,7 @@ #include "common/common_types.h" #include "video_core/rasterizer_cache.h" #include "video_core/renderer_opengl/gl_resource_manager.h" +#include "video_core/renderer_opengl/gl_shader_decompiler.h" #include "video_core/renderer_opengl/gl_shader_gen.h" namespace OpenGL { diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 1bb09e61b..3411cf9e6 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -2,247 +2,40 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include -#include -#include +#include #include #include -#include +#include #include +#include "common/alignment.h" #include "common/assert.h" #include "common/common_types.h" -#include "video_core/engines/shader_bytecode.h" -#include "video_core/engines/shader_header.h" +#include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_opengl/gl_rasterizer.h" #include "video_core/renderer_opengl/gl_shader_decompiler.h" +#include "video_core/shader/shader_ir.h" -namespace OpenGL::GLShader::Decompiler { +namespace OpenGL::GLShader { using Tegra::Shader::Attribute; -using Tegra::Shader::Instruction; -using Tegra::Shader::LogicOperation; -using Tegra::Shader::OpCode; +using Tegra::Shader::Header; +using Tegra::Shader::IpaInterpMode; +using Tegra::Shader::IpaMode; +using Tegra::Shader::IpaSampleMode; using Tegra::Shader::Register; -using Tegra::Shader::Sampler; -using Tegra::Shader::SubOp; +using namespace VideoCommon::Shader; -constexpr u32 PROGRAM_END = MAX_PROGRAM_CODE_LENGTH; -constexpr u32 PROGRAM_HEADER_SIZE = sizeof(Tegra::Shader::Header); +using Maxwell = Tegra::Engines::Maxwell3D::Regs; +using ShaderStage = Tegra::Engines::Maxwell3D::Regs::ShaderStage; +using Operation = const OperationNode&; -constexpr u32 MAX_GEOMETRY_BUFFERS = 6; -constexpr u32 MAX_ATTRIBUTES = 0x100; // Size in vec4s, this value is untested +enum : u32 { POSITION_VARYING_LOCATION = 0, GENERIC_VARYING_START_LOCATION = 1 }; +constexpr u32 MAX_CONSTBUFFER_ELEMENTS = + static_cast(RasterizerOpenGL::MaxConstbufferSize) / (4 * sizeof(float)); -static const char* INTERNAL_FLAG_NAMES[] = {"zero_flag", "sign_flag", "carry_flag", - "overflow_flag"}; - -enum class InternalFlag : u64 { - ZeroFlag = 0, - SignFlag = 1, - CarryFlag = 2, - OverflowFlag = 3, - Amount -}; - -class DecompileFail : public std::runtime_error { -public: - using std::runtime_error::runtime_error; -}; - -/// Generates code to use for a swizzle operation. -static std::string GetSwizzle(u64 elem) { - ASSERT(elem <= 3); - std::string swizzle = "."; - swizzle += "xyzw"[elem]; - return swizzle; -} - -/// Translate topology -static std::string GetTopologyName(Tegra::Shader::OutputTopology topology) { - switch (topology) { - case Tegra::Shader::OutputTopology::PointList: - return "points"; - case Tegra::Shader::OutputTopology::LineStrip: - return "line_strip"; - case Tegra::Shader::OutputTopology::TriangleStrip: - return "triangle_strip"; - default: - UNIMPLEMENTED_MSG("Unknown output topology: {}", static_cast(topology)); - return "points"; - } -} - -/// Describes the behaviour of code path of a given entry point and a return point. -enum class ExitMethod { - Undetermined, ///< Internal value. Only occur when analyzing JMP loop. - AlwaysReturn, ///< All code paths reach the return point. - Conditional, ///< Code path reaches the return point or an END instruction conditionally. - AlwaysEnd, ///< All code paths reach a END instruction. -}; - -/// A subroutine is a range of code refereced by a CALL, IF or LOOP instruction. -struct Subroutine { - /// Generates a name suitable for GLSL source code. - std::string GetName() const { - return "sub_" + std::to_string(begin) + '_' + std::to_string(end) + '_' + suffix; - } - - u32 begin; ///< Entry point of the subroutine. - u32 end; ///< Return point of the subroutine. - const std::string& suffix; ///< Suffix of the shader, used to make a unique subroutine name - ExitMethod exit_method; ///< Exit method of the subroutine. - std::set labels; ///< Addresses refereced by JMP instructions. - - bool operator<(const Subroutine& rhs) const { - return std::tie(begin, end) < std::tie(rhs.begin, rhs.end); - } -}; - -/// Analyzes shader code and produces a set of subroutines. -class ControlFlowAnalyzer { -public: - ControlFlowAnalyzer(const ProgramCode& program_code, u32 main_offset, const std::string& suffix) - : program_code(program_code), shader_coverage_begin(main_offset), - shader_coverage_end(main_offset + 1) { - - // Recursively finds all subroutines. - const Subroutine& program_main = AddSubroutine(main_offset, PROGRAM_END, suffix); - if (program_main.exit_method != ExitMethod::AlwaysEnd) - throw DecompileFail("Program does not always end"); - } - - std::set GetSubroutines() { - return std::move(subroutines); - } - - std::size_t GetShaderLength() const { - return shader_coverage_end * sizeof(u64); - } - -private: - const ProgramCode& program_code; - std::set subroutines; - std::map, ExitMethod> exit_method_map; - u32 shader_coverage_begin; - u32 shader_coverage_end; - - /// Adds and analyzes a new subroutine if it is not added yet. - const Subroutine& AddSubroutine(u32 begin, u32 end, const std::string& suffix) { - Subroutine subroutine{begin, end, suffix, ExitMethod::Undetermined, {}}; - - const auto iter = subroutines.find(subroutine); - if (iter != subroutines.end()) { - return *iter; - } - - subroutine.exit_method = Scan(begin, end, subroutine.labels); - if (subroutine.exit_method == ExitMethod::Undetermined) { - throw DecompileFail("Recursive function detected"); - } - - return *subroutines.insert(std::move(subroutine)).first; - } - - /// Merges exit method of two parallel branches. - static ExitMethod ParallelExit(ExitMethod a, ExitMethod b) { - if (a == ExitMethod::Undetermined) { - return b; - } - if (b == ExitMethod::Undetermined) { - return a; - } - if (a == b) { - return a; - } - return ExitMethod::Conditional; - } - - /// Scans a range of code for labels and determines the exit method. - ExitMethod Scan(u32 begin, u32 end, std::set& labels) { - const auto [iter, inserted] = - exit_method_map.emplace(std::make_pair(begin, end), ExitMethod::Undetermined); - ExitMethod& exit_method = iter->second; - if (!inserted) - return exit_method; - - for (u32 offset = begin; offset != end && offset != PROGRAM_END; ++offset) { - shader_coverage_begin = std::min(shader_coverage_begin, offset); - shader_coverage_end = std::max(shader_coverage_end, offset + 1); - - const Instruction instr = {program_code[offset]}; - if (const auto opcode = OpCode::Decode(instr)) { - switch (opcode->get().GetId()) { - case OpCode::Id::EXIT: { - // The EXIT instruction can be predicated, which means that the shader can - // conditionally end on this instruction. We have to consider the case where the - // condition is not met and check the exit method of that other basic block. - using Tegra::Shader::Pred; - if (instr.pred.pred_index == static_cast(Pred::UnusedIndex)) { - return exit_method = ExitMethod::AlwaysEnd; - } else { - const ExitMethod not_met = Scan(offset + 1, end, labels); - return exit_method = ParallelExit(ExitMethod::AlwaysEnd, not_met); - } - } - case OpCode::Id::BRA: { - const u32 target = offset + instr.bra.GetBranchTarget(); - labels.insert(target); - const ExitMethod no_jmp = Scan(offset + 1, end, labels); - const ExitMethod jmp = Scan(target, end, labels); - return exit_method = ParallelExit(no_jmp, jmp); - } - case OpCode::Id::SSY: - case OpCode::Id::PBK: { - // The SSY and PBK use a similar encoding as the BRA instruction. - UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, - "Constant buffer branching is not supported"); - const u32 target = offset + instr.bra.GetBranchTarget(); - labels.insert(target); - // Continue scanning for an exit method. - break; - } - } - } - } - return exit_method = ExitMethod::AlwaysReturn; - } -}; - -template -class ShaderScopedScope { -public: - explicit ShaderScopedScope(T& writer, std::string_view begin_expr, std::string end_expr) - : writer(writer), end_expr(std::move(end_expr)) { - - if (begin_expr.empty()) { - writer.AddLine('{'); - } else { - writer.AddExpression(begin_expr); - writer.AddLine(" {"); - } - ++writer.scope; - } - - ShaderScopedScope(const ShaderScopedScope&) = delete; - - ~ShaderScopedScope() { - --writer.scope; - if (end_expr.empty()) { - writer.AddLine('}'); - } else { - writer.AddExpression("} "); - writer.AddExpression(end_expr); - writer.AddLine(';'); - } - } - - ShaderScopedScope& operator=(const ShaderScopedScope&) = delete; - -private: - T& writer; - std::string end_expr; -}; +enum class Type { Bool, Bool2, Float, Int, Uint, HalfFloat }; class ShaderWriter { public: @@ -271,16 +64,17 @@ public: shader_source += '\n'; } + std::string GenerateTemporal() { + std::string temporal = "tmp"; + temporal += std::to_string(temporal_index++); + return temporal; + } + std::string GetResult() { return std::move(shader_source); } - ShaderScopedScope Scope(std::string_view begin_expr = {}, - std::string end_expr = {}) { - return ShaderScopedScope(*this, begin_expr, end_expr); - } - - int scope = 0; + s32 scope = 0; private: void AppendIndentation() { @@ -288,1218 +82,1186 @@ private: } std::string shader_source; + u32 temporal_index = 1; }; -/** - * Represents an emulated shader register, used to track the state of that register for emulation - * with GLSL. At this time, a register can be used as a float or an integer. This class is used for - * bookkeeping within the GLSL program. - */ -class GLSLRegister { +/// Generates code to use for a swizzle operation. +static std::string GetSwizzle(u32 elem) { + ASSERT(elem <= 3); + std::string swizzle = "."; + swizzle += "xyzw"[elem]; + return swizzle; +} + +/// Translate topology +static std::string GetTopologyName(Tegra::Shader::OutputTopology topology) { + switch (topology) { + case Tegra::Shader::OutputTopology::PointList: + return "points"; + case Tegra::Shader::OutputTopology::LineStrip: + return "line_strip"; + case Tegra::Shader::OutputTopology::TriangleStrip: + return "triangle_strip"; + default: + UNIMPLEMENTED_MSG("Unknown output topology: {}", static_cast(topology)); + return "points"; + } +} + +/// Returns true if an object has to be treated as precise +static bool IsPrecise(Operation operand) { + const auto& meta = operand.GetMeta(); + + if (const auto arithmetic = std::get_if(&meta)) { + return arithmetic->precise; + } + if (const auto half_arithmetic = std::get_if(&meta)) { + return half_arithmetic->precise; + } + return false; +} + +static bool IsPrecise(Node node) { + if (const auto operation = std::get_if(node)) { + return IsPrecise(*operation); + } + return false; +} + +class GLSLDecompiler final { public: - enum class Type { - Float, - Integer, - UnsignedInteger, - }; + explicit GLSLDecompiler(const ShaderIR& ir, ShaderStage stage, std::string suffix) + : ir{ir}, stage{stage}, suffix{suffix}, header{ir.GetHeader()} {} - GLSLRegister(std::size_t index, const std::string& suffix) : index{index}, suffix{suffix} {} + void Decompile() { + DeclareVertex(); + DeclareGeometry(); + DeclareRegisters(); + DeclarePredicates(); + DeclareLocalMemory(); + DeclareInternalFlags(); + DeclareInputAttributes(); + DeclareOutputAttributes(); + DeclareConstantBuffers(); + DeclareSamplers(); - /// Gets the GLSL type string for a register - static std::string GetTypeString() { - return "float"; + code.AddLine("void execute_" + suffix + "() {"); + ++code.scope; + + // VM's program counter + const auto first_address = ir.GetBasicBlocks().begin()->first; + code.AddLine("uint jmp_to = " + std::to_string(first_address) + "u;"); + + // TODO(Subv): Figure out the actual depth of the flow stack, for now it seems + // unlikely that shaders will use 20 nested SSYs and PBKs. + constexpr u32 FLOW_STACK_SIZE = 20; + code.AddLine(fmt::format("uint flow_stack[{}];", FLOW_STACK_SIZE)); + code.AddLine("uint flow_stack_top = 0u;"); + + code.AddLine("while (true) {"); + ++code.scope; + + code.AddLine("switch (jmp_to) {"); + + for (const auto& pair : ir.GetBasicBlocks()) { + const auto [address, bb] = pair; + code.AddLine(fmt::format("case 0x{:x}u: {{", address)); + ++code.scope; + + VisitBasicBlock(bb); + + --code.scope; + code.AddLine('}'); + } + + code.AddLine("default: return;"); + code.AddLine('}'); + + for (std::size_t i = 0; i < 2; ++i) { + --code.scope; + code.AddLine('}'); + } } - /// Gets the GLSL register prefix string, used for declarations and referencing - static std::string GetPrefixString() { - return "reg_"; + std::string GetResult() { + return code.GetResult(); } - /// Returns a GLSL string representing the current state of the register - std::string GetString() const { - return GetPrefixString() + std::to_string(index) + '_' + suffix; - } - - /// Returns the index of the register - std::size_t GetIndex() const { - return index; + ShaderEntries GetShaderEntries() const { + ShaderEntries entries; + for (const auto& cbuf : ir.GetConstantBuffers()) { + ConstBufferEntry desc(cbuf.second, stage, GetConstBufferBlock(cbuf.first), cbuf.first); + entries.const_buffers.push_back(desc); + } + for (const auto& sampler : ir.GetSamplers()) { + SamplerEntry desc(sampler, stage, GetSampler(sampler)); + entries.samplers.push_back(desc); + } + entries.clip_distances = ir.GetClipDistances(); + entries.shader_length = ir.GetLength(); + return entries; } private: - const std::size_t index; - const std::string& suffix; -}; + using OperationDecompilerFn = std::string (GLSLDecompiler::*)(Operation); + using OperationDecompilersArray = + std::array(OperationCode::Amount)>; -/** - * Used to manage shader registers that are emulated with GLSL. This class keeps track of the state - * of all registers (e.g. whether they are currently being used as Floats or Integers), and - * generates the necessary GLSL code to perform conversions as needed. This class is used for - * bookkeeping within the GLSL program. - */ -class GLSLRegisterManager { -public: - GLSLRegisterManager(ShaderWriter& shader, ShaderWriter& declarations, - const Maxwell3D::Regs::ShaderStage& stage, const std::string& suffix, - const Tegra::Shader::Header& header) - : shader{shader}, declarations{declarations}, stage{stage}, suffix{suffix}, header{header}, - fixed_pipeline_output_attributes_used{}, local_memory_size{0} { - BuildRegisterList(); - BuildInputList(); - } - - void SetConditionalCodesFromExpression(const std::string& expresion) { - SetInternalFlag(InternalFlag::ZeroFlag, "(" + expresion + ") == 0"); - LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete."); - } - - void SetConditionalCodesFromRegister(const Register& reg, u64 dest_elem = 0) { - SetConditionalCodesFromExpression(GetRegister(reg, static_cast(dest_elem))); - } - - /** - * Returns code that does an integer size conversion for the specified size. - * @param value Value to perform integer size conversion on. - * @param size Register size to use for conversion instructions. - * @returns GLSL string corresponding to the value converted to the specified size. - */ - static std::string ConvertIntegerSize(const std::string& value, Register::Size size) { - switch (size) { - case Register::Size::Byte: - return "((" + value + " << 24) >> 24)"; - case Register::Size::Short: - return "((" + value + " << 16) >> 16)"; - case Register::Size::Word: - // Default - do nothing - return value; - default: - UNREACHABLE_MSG("Unimplemented conversion size: {}", static_cast(size)); - return value; - } - } - - /** - * Gets a register as an float. - * @param reg The register to get. - * @param elem The element to use for the operation. - * @returns GLSL string corresponding to the register as a float. - */ - std::string GetRegisterAsFloat(const Register& reg, unsigned elem = 0) { - return GetRegister(reg, elem); - } - - /** - * Gets a register as an integer. - * @param reg The register to get. - * @param elem The element to use for the operation. - * @param is_signed Whether to get the register as a signed (or unsigned) integer. - * @param size Register size to use for conversion instructions. - * @returns GLSL string corresponding to the register as an integer. - */ - std::string GetRegisterAsInteger(const Register& reg, unsigned elem = 0, bool is_signed = true, - Register::Size size = Register::Size::Word) { - const std::string func{is_signed ? "floatBitsToInt" : "floatBitsToUint"}; - const std::string value{func + '(' + GetRegister(reg, elem) + ')'}; - return ConvertIntegerSize(value, size); - } - - /** - * Writes code that does a register assignment to float value operation. - * @param reg The destination register to use. - * @param elem The element to use for the operation. - * @param value The code representing the value to assign. - * @param dest_num_components Number of components in the destination. - * @param value_num_components Number of components in the value. - * @param is_saturated Optional, when True, saturates the provided value. - * @param sets_cc Optional, when True, sets the corresponding values to the implemented - * condition flags. - * @param dest_elem Optional, the destination element to use for the operation. - */ - void SetRegisterToFloat(const Register& reg, u64 elem, const std::string& value, - u64 dest_num_components, u64 value_num_components, - bool is_saturated = false, bool sets_cc = false, u64 dest_elem = 0, - bool precise = false) { - const std::string clamped_value = is_saturated ? "clamp(" + value + ", 0.0, 1.0)" : value; - SetRegister(reg, elem, clamped_value, dest_num_components, value_num_components, dest_elem, - precise); - if (sets_cc) { - if (reg == Register::ZeroIndex) { - SetConditionalCodesFromExpression(clamped_value); - } else { - SetConditionalCodesFromRegister(reg, dest_elem); - } - } - } - - /** - * Writes code that does a register assignment to integer value operation. - * @param reg The destination register to use. - * @param elem The element to use for the operation. - * @param value The code representing the value to assign. - * @param dest_num_components Number of components in the destination. - * @param value_num_components Number of components in the value. - * @param is_saturated Optional, when True, saturates the provided value. - * @param sets_cc Optional, when True, sets the corresponding values to the implemented - * condition flags. - * @param dest_elem Optional, the destination element to use for the operation. - * @param size Register size to use for conversion instructions. - */ - void SetRegisterToInteger(const Register& reg, bool is_signed, u64 elem, - const std::string& value, u64 dest_num_components, - u64 value_num_components, bool is_saturated = false, - bool sets_cc = false, u64 dest_elem = 0, - Register::Size size = Register::Size::Word) { - UNIMPLEMENTED_IF(is_saturated); - const std::string final_value = ConvertIntegerSize(value, size); - const std::string func{is_signed ? "intBitsToFloat" : "uintBitsToFloat"}; - - SetRegister(reg, elem, func + '(' + final_value + ')', dest_num_components, - value_num_components, dest_elem, false); - - if (sets_cc) { - if (reg == Register::ZeroIndex) { - SetConditionalCodesFromExpression(final_value); - } else { - SetConditionalCodesFromRegister(reg, dest_elem); - } - } - } - - /** - * Writes code that does a register assignment to a half float value operation. - * @param reg The destination register to use. - * @param elem The element to use for the operation. - * @param value The code representing the value to assign. Type has to be half float. - * @param merge Half float kind of assignment. - * @param dest_num_components Number of components in the destination. - * @param value_num_components Number of components in the value. - * @param is_saturated Optional, when True, saturates the provided value. - * @param dest_elem Optional, the destination element to use for the operation. - */ - void SetRegisterToHalfFloat(const Register& reg, u64 elem, const std::string& value, - Tegra::Shader::HalfMerge merge, u64 dest_num_components, - u64 value_num_components, bool is_saturated = false, - u64 dest_elem = 0) { - UNIMPLEMENTED_IF(is_saturated); - - const std::string result = [&]() { - switch (merge) { - case Tegra::Shader::HalfMerge::H0_H1: - return "uintBitsToFloat(packHalf2x16(" + value + "))"; - case Tegra::Shader::HalfMerge::F32: - // Half float instructions take the first component when doing a float cast. - return "float(" + value + ".x)"; - case Tegra::Shader::HalfMerge::Mrg_H0: - // TODO(Rodrigo): I guess Mrg_H0 and Mrg_H1 take their respective component from the - // pack. I couldn't test this on hardware but it shouldn't really matter since most - // of the time when a Mrg_* flag is used both components will be mirrored. That - // being said, it deserves a test. - return "uintBitsToFloat((" + GetRegisterAsInteger(reg, 0, false) + - " & 0xffff0000) | (packHalf2x16(" + value + ") & 0x0000ffff))"; - case Tegra::Shader::HalfMerge::Mrg_H1: - return "uintBitsToFloat((" + GetRegisterAsInteger(reg, 0, false) + - " & 0x0000ffff) | (packHalf2x16(" + value + ") & 0xffff0000))"; - default: - UNREACHABLE(); - return std::string("0"); - } - }(); - - SetRegister(reg, elem, result, dest_num_components, value_num_components, dest_elem, false); - } - - /** - * Writes code that does a register assignment to input attribute operation. Input attributes - * are stored as floats, so this may require conversion. - * @param reg The destination register to use. - * @param elem The element to use for the operation. - * @param attribute The input attribute to use as the source value. - * @param input_mode The input mode. - * @param vertex The register that decides which vertex to read from (used in GS). - */ - void SetRegisterToInputAttibute(const Register& reg, u64 elem, Attribute::Index attribute, - const Tegra::Shader::IpaMode& input_mode, - std::optional vertex = {}) { - const std::string dest = GetRegisterAsFloat(reg); - const std::string src = GetInputAttribute(attribute, input_mode, vertex) + GetSwizzle(elem); - shader.AddLine(dest + " = " + src + ';'); - } - - std::string GetLocalMemoryAsFloat(const std::string& index) { - return "lmem[" + index + ']'; - } - - std::string GetLocalMemoryAsInteger(const std::string& index, bool is_signed = false) { - const std::string func{is_signed ? "floatToIntBits" : "floatBitsToUint"}; - return func + "(lmem[" + index + "])"; - } - - void SetLocalMemoryAsFloat(const std::string& index, const std::string& value) { - shader.AddLine("lmem[" + index + "] = " + value + ';'); - } - - void SetLocalMemoryAsInteger(const std::string& index, const std::string& value, - bool is_signed = false) { - const std::string func{is_signed ? "intBitsToFloat" : "uintBitsToFloat"}; - shader.AddLine("lmem[" + index + "] = " + func + '(' + value + ");"); - } - - std::string GetConditionCode(const Tegra::Shader::ConditionCode cc) const { - switch (cc) { - case Tegra::Shader::ConditionCode::NEU: - return "!(" + GetInternalFlag(InternalFlag::ZeroFlag) + ')'; - default: - UNIMPLEMENTED_MSG("Unimplemented condition code: {}", static_cast(cc)); - return "false"; - } - } - - std::string GetInternalFlag(const InternalFlag flag) const { - const auto index = static_cast(flag); - ASSERT(index < static_cast(InternalFlag::Amount)); - - return std::string(INTERNAL_FLAG_NAMES[index]) + '_' + suffix; - } - - void SetInternalFlag(const InternalFlag flag, const std::string& value) const { - shader.AddLine(GetInternalFlag(flag) + " = " + value + ';'); - } - - /** - * Writes code that does a output attribute assignment to register operation. Output attributes - * are stored as floats, so this may require conversion. - * @param attribute The destination output attribute. - * @param elem The element to use for the operation. - * @param val_reg The register to use as the source value. - * @param buf_reg The register that tells which buffer to write to (used in geometry shaders). - */ - void SetOutputAttributeToRegister(Attribute::Index attribute, u64 elem, const Register& val_reg, - const Register& buf_reg) { - const std::string dest = GetOutputAttribute(attribute); - const std::string src = GetRegisterAsFloat(val_reg); - if (dest.empty()) + void DeclareVertex() { + if (stage != ShaderStage::Vertex) return; - // Can happen with unknown/unimplemented output attributes, in which case we ignore the - // instruction for now. - if (stage == Maxwell3D::Regs::ShaderStage::Geometry) { - // TODO(Rodrigo): nouveau sets some attributes after setting emitting a geometry - // shader. These instructions use a dirty register as buffer index, to avoid some - // drivers from complaining about out of boundary writes, guard them. - const std::string buf_index{"((" + GetRegisterAsInteger(buf_reg) + ") % " + - std::to_string(MAX_GEOMETRY_BUFFERS) + ')'}; - shader.AddLine("amem[" + buf_index + "][" + - std::to_string(static_cast(attribute)) + ']' + GetSwizzle(elem) + - " = " + src + ';'); - return; - } - - switch (attribute) { - case Attribute::Index::ClipDistances0123: - case Attribute::Index::ClipDistances4567: { - const u64 index = (attribute == Attribute::Index::ClipDistances4567 ? 4 : 0) + elem; - UNIMPLEMENTED_IF_MSG( - ((header.vtg.clip_distances >> index) & 1) == 0, - "Shader is setting gl_ClipDistance{} without enabling it in the header", index); - - clip_distances[index] = true; - fixed_pipeline_output_attributes_used.insert(attribute); - shader.AddLine(dest + '[' + std::to_string(index) + "] = " + src + ';'); - break; - } - case Attribute::Index::PointSize: - fixed_pipeline_output_attributes_used.insert(attribute); - shader.AddLine(dest + " = " + src + ';'); - break; - default: - shader.AddLine(dest + GetSwizzle(elem) + " = " + src + ';'); - break; - } + DeclareVertexRedeclarations(); } - /// Generates code representing a uniform (C buffer) register, interpreted as the input type. - std::string GetUniform(u64 index, u64 offset, GLSLRegister::Type type, - Register::Size size = Register::Size::Word) { - declr_const_buffers[index].MarkAsUsed(index, offset, stage); - std::string value = 'c' + std::to_string(index) + '[' + std::to_string(offset / 4) + "][" + - std::to_string(offset % 4) + ']'; - - if (type == GLSLRegister::Type::Float) { - // Do nothing, default - } else if (type == GLSLRegister::Type::Integer) { - value = "floatBitsToInt(" + value + ')'; - } else if (type == GLSLRegister::Type::UnsignedInteger) { - value = "floatBitsToUint(" + value + ')'; - } else { - UNREACHABLE(); - } - - return ConvertIntegerSize(value, size); - } - - std::string GetUniformIndirect(u64 cbuf_index, s64 offset, const std::string& index_str, - GLSLRegister::Type type) { - declr_const_buffers[cbuf_index].MarkAsUsedIndirect(cbuf_index, stage); - - const std::string final_offset = fmt::format("({} + {})", index_str, offset / 4); - const std::string value = 'c' + std::to_string(cbuf_index) + '[' + final_offset + " / 4][" + - final_offset + " % 4]"; - - if (type == GLSLRegister::Type::Float) { - return value; - } else if (type == GLSLRegister::Type::Integer) { - return "floatBitsToInt(" + value + ')'; - } else { - UNREACHABLE(); - return value; - } - } - - /// Add declarations. - void GenerateDeclarations(const std::string& suffix) { - GenerateVertex(); - GenerateRegisters(suffix); - GenerateLocalMemory(); - GenerateInternalFlags(); - GenerateInputAttrs(); - GenerateOutputAttrs(); - GenerateConstBuffers(); - GenerateSamplers(); - GenerateGeometry(); - } - - /// Returns a list of constant buffer declarations. - std::vector GetConstBuffersDeclarations() const { - std::vector result; - std::copy_if(declr_const_buffers.begin(), declr_const_buffers.end(), - std::back_inserter(result), [](const auto& entry) { return entry.IsUsed(); }); - return result; - } - - /// Returns a list of samplers used in the shader. - const std::vector& GetSamplers() const { - return used_samplers; - } - - /// Returns an array of the used clip distances. - const std::array& GetClipDistances() const { - return clip_distances; - } - - /// Returns the GLSL sampler used for the input shader sampler, and creates a new one if - /// necessary. - std::string AccessSampler(const Sampler& sampler, Tegra::Shader::TextureType type, - bool is_array, bool is_shadow) { - const auto offset = static_cast(sampler.index.Value()); - - // If this sampler has already been used, return the existing mapping. - const auto itr = - std::find_if(used_samplers.begin(), used_samplers.end(), - [&](const SamplerEntry& entry) { return entry.GetOffset() == offset; }); - - if (itr != used_samplers.end()) { - ASSERT(itr->GetType() == type && itr->IsArray() == is_array && - itr->IsShadow() == is_shadow); - return itr->GetName(); - } - - // Otherwise create a new mapping for this sampler - const std::size_t next_index = used_samplers.size(); - const SamplerEntry entry{stage, offset, next_index, type, is_array, is_shadow}; - used_samplers.emplace_back(entry); - return entry.GetName(); - } - - void SetLocalMemory(u64 lmem) { - local_memory_size = lmem; - } - -private: - /// Generates declarations for registers. - void GenerateRegisters(const std::string& suffix) { - for (const auto& reg : regs) { - declarations.AddLine(GLSLRegister::GetTypeString() + ' ' + reg.GetPrefixString() + - std::to_string(reg.GetIndex()) + '_' + suffix + " = 0;"); - } - declarations.AddNewLine(); - } - - /// Generates declarations for local memory. - void GenerateLocalMemory() { - if (local_memory_size > 0) { - declarations.AddLine("float lmem[" + std::to_string((local_memory_size - 1 + 4) / 4) + - "];"); - declarations.AddNewLine(); - } - } - - /// Generates declarations for internal flags. - void GenerateInternalFlags() { - for (u32 flag = 0; flag < static_cast(InternalFlag::Amount); flag++) { - const InternalFlag code = static_cast(flag); - declarations.AddLine("bool " + GetInternalFlag(code) + " = false;"); - } - declarations.AddNewLine(); - } - - /// Generates declarations for input attributes. - void GenerateInputAttrs() { - for (const auto element : declr_input_attribute) { - // TODO(bunnei): Use proper number of elements for these - u32 idx = - static_cast(element.first) - static_cast(Attribute::Index::Attribute_0); - if (stage != Maxwell3D::Regs::ShaderStage::Vertex) { - // If inputs are varyings, add an offset - idx += GENERIC_VARYING_START_LOCATION; - } - - std::string attr{GetInputAttribute(element.first, element.second)}; - if (stage == Maxwell3D::Regs::ShaderStage::Geometry) { - attr = "gs_" + attr + "[]"; - } - declarations.AddLine("layout (location = " + std::to_string(idx) + ") " + - GetInputFlags(element.first) + "in vec4 " + attr + ';'); - } - - declarations.AddNewLine(); - } - - /// Generates declarations for output attributes. - void GenerateOutputAttrs() { - for (const auto& index : declr_output_attribute) { - // TODO(bunnei): Use proper number of elements for these - const u32 idx = static_cast(index) - - static_cast(Attribute::Index::Attribute_0) + - GENERIC_VARYING_START_LOCATION; - declarations.AddLine("layout (location = " + std::to_string(idx) + ") out vec4 " + - GetOutputAttribute(index) + ';'); - } - declarations.AddNewLine(); - } - - /// Generates declarations for constant buffers. - void GenerateConstBuffers() { - for (const auto& entry : GetConstBuffersDeclarations()) { - declarations.AddLine("layout (std140) uniform " + entry.GetName()); - declarations.AddLine('{'); - declarations.AddLine(" vec4 c" + std::to_string(entry.GetIndex()) + - "[MAX_CONSTBUFFER_ELEMENTS];"); - declarations.AddLine("};"); - declarations.AddNewLine(); - } - declarations.AddNewLine(); - } - - /// Generates declarations for samplers. - void GenerateSamplers() { - const auto& samplers = GetSamplers(); - for (const auto& sampler : samplers) { - declarations.AddLine("uniform " + sampler.GetTypeString() + ' ' + sampler.GetName() + - ';'); - } - declarations.AddNewLine(); - } - - /// Generates declarations used for geometry shaders. - void GenerateGeometry() { - if (stage != Maxwell3D::Regs::ShaderStage::Geometry) + void DeclareGeometry() { + if (stage != ShaderStage::Geometry) return; - declarations.AddLine( - "layout (" + GetTopologyName(header.common3.output_topology) + - ", max_vertices = " + std::to_string(header.common4.max_output_vertices) + ") out;"); - declarations.AddNewLine(); + const auto topology = GetTopologyName(header.common3.output_topology); + const auto max_vertices = std::to_string(header.common4.max_output_vertices); + code.AddLine("layout (" + topology + ", max_vertices = " + max_vertices + ") out;"); + code.AddNewLine(); - declarations.AddLine("vec4 amem[" + std::to_string(MAX_GEOMETRY_BUFFERS) + "][" + - std::to_string(MAX_ATTRIBUTES) + "];"); - declarations.AddNewLine(); - - constexpr char buffer[] = "amem[output_buffer]"; - declarations.AddLine("void emit_vertex(uint output_buffer) {"); - ++declarations.scope; - for (const auto element : declr_output_attribute) { - declarations.AddLine(GetOutputAttribute(element) + " = " + buffer + '[' + - std::to_string(static_cast(element)) + "];"); - } - - declarations.AddLine("position = " + std::string(buffer) + '[' + - std::to_string(static_cast(Attribute::Index::Position)) + "];"); - - // If a geometry shader is attached, it will always flip (it's the last stage before - // fragment). For more info about flipping, refer to gl_shader_gen.cpp. - declarations.AddLine("position.xy *= viewport_flip.xy;"); - declarations.AddLine("gl_Position = position;"); - declarations.AddLine("position.w = 1.0;"); - declarations.AddLine("EmitVertex();"); - --declarations.scope; - declarations.AddLine('}'); - declarations.AddNewLine(); + DeclareVertexRedeclarations(); } - void GenerateVertex() { - if (stage != Maxwell3D::Regs::ShaderStage::Vertex) - return; + void DeclareVertexRedeclarations() { bool clip_distances_declared = false; - declarations.AddLine("out gl_PerVertex {"); - ++declarations.scope; - declarations.AddLine("vec4 gl_Position;"); - for (auto& o : fixed_pipeline_output_attributes_used) { + code.AddLine("out gl_PerVertex {"); + ++code.scope; + + code.AddLine("vec4 gl_Position;"); + + for (const auto o : ir.GetOutputAttributes()) { if (o == Attribute::Index::PointSize) - declarations.AddLine("float gl_PointSize;"); + code.AddLine("float gl_PointSize;"); if (!clip_distances_declared && (o == Attribute::Index::ClipDistances0123 || o == Attribute::Index::ClipDistances4567)) { - declarations.AddLine("float gl_ClipDistance[];"); + code.AddLine("float gl_ClipDistance[];"); clip_distances_declared = true; } } - --declarations.scope; - declarations.AddLine("};"); + + --code.scope; + code.AddLine("};"); + code.AddNewLine(); } - /// Generates code representing a temporary (GPR) register. - std::string GetRegister(const Register& reg, unsigned elem) { - if (reg == Register::ZeroIndex) { - return "0"; + void DeclareRegisters() { + const auto& registers = ir.GetRegisters(); + for (const u32 gpr : registers) { + code.AddLine("float " + GetRegister(gpr) + " = 0;"); } - - return regs[reg.GetSwizzledIndex(elem)].GetString(); + if (!registers.empty()) + code.AddNewLine(); } - /** - * Writes code that does a register assignment to value operation. - * @param reg The destination register to use. - * @param elem The element to use for the operation. - * @param value The code representing the value to assign. - * @param dest_num_components Number of components in the destination. - * @param value_num_components Number of components in the value. - * @param dest_elem Optional, the destination element to use for the operation. - */ - void SetRegister(const Register& reg, u64 elem, const std::string& value, - u64 dest_num_components, u64 value_num_components, u64 dest_elem, - bool precise) { - if (reg == Register::ZeroIndex) { - // Setting RZ is a nop in hardware. - return; + void DeclarePredicates() { + const auto& predicates = ir.GetPredicates(); + for (const auto pred : predicates) { + code.AddLine("bool " + GetPredicate(pred) + " = false;"); } + if (!predicates.empty()) + code.AddNewLine(); + } - std::string dest = GetRegister(reg, static_cast(dest_elem)); - if (dest_num_components > 1) { - dest += GetSwizzle(elem); - } - - std::string src = '(' + value + ')'; - if (value_num_components > 1) { - src += GetSwizzle(elem); - } - - if (precise && stage != Maxwell3D::Regs::ShaderStage::Fragment) { - const auto scope = shader.Scope(); - - // This avoids optimizations of constant propagation and keeps the code as the original - // Sadly using the precise keyword causes "linking" errors on fragment shaders. - shader.AddLine("precise float tmp = " + src + ';'); - shader.AddLine(dest + " = tmp;"); - } else { - shader.AddLine(dest + " = " + src + ';'); + void DeclareLocalMemory() { + if (const u64 local_memory_size = header.GetLocalMemorySize(); local_memory_size > 0) { + const auto element_count = Common::AlignUp(local_memory_size, 4) / 4; + code.AddLine("float " + GetLocalMemory() + '[' + std::to_string(element_count) + "];"); + code.AddNewLine(); } } - /// Build the GLSL register list. - void BuildRegisterList() { - regs.reserve(Register::NumRegisters); - - for (std::size_t index = 0; index < Register::NumRegisters; ++index) { - regs.emplace_back(index, suffix); + void DeclareInternalFlags() { + for (u32 flag = 0; flag < static_cast(InternalFlag::Amount); flag++) { + const InternalFlag flag_code = static_cast(flag); + code.AddLine("bool " + GetInternalFlag(flag_code) + " = false;"); } + code.AddNewLine(); } - void BuildInputList() { - const u32 size = static_cast(Attribute::Index::Attribute_31) - - static_cast(Attribute::Index::Attribute_0) + 1; - declr_input_attribute.reserve(size); - } - - /// Generates code representing an input attribute register. - std::string GetInputAttribute(Attribute::Index attribute, - const Tegra::Shader::IpaMode& input_mode, - std::optional vertex = {}) { - auto GeometryPass = [&](const std::string& name) { - if (stage == Maxwell3D::Regs::ShaderStage::Geometry && vertex) { - // TODO(Rodrigo): Guard geometry inputs against out of bound reads. Some games set - // an 0x80000000 index for those and the shader fails to build. Find out why this - // happens and what's its intent. - return "gs_" + name + '[' + GetRegisterAsInteger(*vertex, 0, false) + - " % MAX_VERTEX_INPUT]"; - } - return name; - }; - - switch (attribute) { - case Attribute::Index::Position: - if (stage != Maxwell3D::Regs::ShaderStage::Fragment) { - return GeometryPass("position"); - } else { - return "vec4(gl_FragCoord.x, gl_FragCoord.y, gl_FragCoord.z, 1.0)"; - } - case Attribute::Index::PointCoord: - return "vec4(gl_PointCoord.x, gl_PointCoord.y, 0, 0)"; - case Attribute::Index::TessCoordInstanceIDVertexID: - // TODO(Subv): Find out what the values are for the first two elements when inside a - // vertex shader, and what's the value of the fourth element when inside a Tess Eval - // shader. - ASSERT(stage == Maxwell3D::Regs::ShaderStage::Vertex); - // Config pack's first value is instance_id. - return "vec4(0, 0, uintBitsToFloat(config_pack[0]), uintBitsToFloat(gl_VertexID))"; - case Attribute::Index::FrontFacing: - // TODO(Subv): Find out what the values are for the other elements. - ASSERT(stage == Maxwell3D::Regs::ShaderStage::Fragment); - return "vec4(0, 0, 0, intBitsToFloat(gl_FrontFacing ? -1 : 0))"; - default: - const u32 index{static_cast(attribute) - - static_cast(Attribute::Index::Attribute_0)}; - if (attribute >= Attribute::Index::Attribute_0 && - attribute <= Attribute::Index::Attribute_31) { - if (declr_input_attribute.count(attribute) == 0) { - declr_input_attribute[attribute] = input_mode; - } else { - UNIMPLEMENTED_IF_MSG(declr_input_attribute[attribute] != input_mode, - "Multiple input modes for the same attribute"); - } - return GeometryPass("input_attribute_" + std::to_string(index)); - } - - UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast(attribute)); - } - - return "vec4(0, 0, 0, 0)"; - } - - std::string GetInputFlags(const Attribute::Index attribute) { - const Tegra::Shader::IpaSampleMode sample_mode = - declr_input_attribute[attribute].sampling_mode; - const Tegra::Shader::IpaInterpMode interp_mode = - declr_input_attribute[attribute].interpolation_mode; + std::string GetInputFlags(const IpaMode& input_mode) { + const IpaSampleMode sample_mode = input_mode.sampling_mode; + const IpaInterpMode interp_mode = input_mode.interpolation_mode; std::string out; + switch (interp_mode) { - case Tegra::Shader::IpaInterpMode::Flat: { + case IpaInterpMode::Flat: out += "flat "; break; - } - case Tegra::Shader::IpaInterpMode::Linear: { + case IpaInterpMode::Linear: out += "noperspective "; break; - } - case Tegra::Shader::IpaInterpMode::Perspective: { + case IpaInterpMode::Perspective: // Default, Smooth break; - } - default: { + default: UNIMPLEMENTED_MSG("Unhandled IPA interp mode: {}", static_cast(interp_mode)); } - } switch (sample_mode) { - case Tegra::Shader::IpaSampleMode::Centroid: - // It can be implemented with the "centroid " keyword in glsl + case IpaSampleMode::Centroid: + // It can be implemented with the "centroid " keyword in GLSL UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode centroid"); break; - case Tegra::Shader::IpaSampleMode::Default: + case IpaSampleMode::Default: // Default, n/a break; - default: { + default: UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode: {}", static_cast(sample_mode)); - break; - } } return out; } - /// Generates code representing the declaration name of an output attribute register. - std::string GetOutputAttribute(Attribute::Index attribute) { - switch (attribute) { - case Attribute::Index::PointSize: - return "gl_PointSize"; - case Attribute::Index::Position: - return "position"; - case Attribute::Index::ClipDistances0123: - case Attribute::Index::ClipDistances4567: { - return "gl_ClipDistance"; - } - default: - const u32 index{static_cast(attribute) - - static_cast(Attribute::Index::Attribute_0)}; - if (attribute >= Attribute::Index::Attribute_0) { - declr_output_attribute.insert(attribute); - return "output_attribute_" + std::to_string(index); + void DeclareInputAttributes() { + const auto& attributes = ir.GetInputAttributes(); + for (const auto element : attributes) { + const Attribute::Index index = element.first; + const IpaMode& input_mode = *element.second.begin(); + if (index < Attribute::Index::Attribute_0 || index > Attribute::Index::Attribute_31) { + // Skip when it's not a generic attribute + continue; } - UNIMPLEMENTED_MSG("Unhandled output attribute={}", index); + ASSERT(element.second.size() > 0); + UNIMPLEMENTED_IF_MSG(element.second.size() > 1, + "Multiple input flag modes are not supported in GLSL"); + + // TODO(bunnei): Use proper number of elements for these + u32 idx = static_cast(index) - static_cast(Attribute::Index::Attribute_0); + if (stage != ShaderStage::Vertex) { + // If inputs are varyings, add an offset + idx += GENERIC_VARYING_START_LOCATION; + } + + std::string attr = GetInputAttribute(index); + if (stage == ShaderStage::Geometry) { + attr = "gs_" + attr + "[]"; + } + code.AddLine("layout (location = " + std::to_string(idx) + ") " + + GetInputFlags(input_mode) + "in vec4 " + attr + ';'); + } + if (!attributes.empty()) + code.AddNewLine(); + } + + void DeclareOutputAttributes() { + const auto& attributes = ir.GetOutputAttributes(); + for (const auto index : attributes) { + if (index < Attribute::Index::Attribute_0 || index > Attribute::Index::Attribute_31) { + // Skip when it's not a generic attribute + continue; + } + // TODO(bunnei): Use proper number of elements for these + const auto idx = static_cast(index) - + static_cast(Attribute::Index::Attribute_0) + + GENERIC_VARYING_START_LOCATION; + code.AddLine("layout (location = " + std::to_string(idx) + ") out vec4 " + + GetOutputAttribute(index) + ';'); + } + if (!attributes.empty()) + code.AddNewLine(); + } + + void DeclareConstantBuffers() { + for (const auto& entry : ir.GetConstantBuffers()) { + const auto [index, size] = entry; + code.AddLine("layout (std140) uniform " + GetConstBufferBlock(index) + " {"); + code.AddLine(" vec4 " + GetConstBuffer(index) + "[MAX_CONSTBUFFER_ELEMENTS];"); + code.AddLine("};"); + code.AddNewLine(); + } + } + + void DeclareSamplers() { + const auto& samplers = ir.GetSamplers(); + for (const auto& sampler : samplers) { + std::string sampler_type = [&]() { + switch (sampler.GetType()) { + case Tegra::Shader::TextureType::Texture1D: + return "sampler1D"; + case Tegra::Shader::TextureType::Texture2D: + return "sampler2D"; + case Tegra::Shader::TextureType::Texture3D: + return "sampler3D"; + case Tegra::Shader::TextureType::TextureCube: + return "samplerCube"; + default: + UNREACHABLE(); + return "sampler2D"; + } + }(); + if (sampler.IsArray()) + sampler_type += "Array"; + if (sampler.IsShadow()) + sampler_type += "Shadow"; + + code.AddLine("uniform " + sampler_type + ' ' + GetSampler(sampler) + ';'); + } + if (!samplers.empty()) + code.AddNewLine(); + } + + void VisitBasicBlock(const BasicBlock& bb) { + for (const Node node : bb) { + if (const std::string expr = Visit(node); !expr.empty()) { + code.AddLine(expr); + } + } + } + + std::string Visit(Node node) { + if (const auto operation = std::get_if(node)) { + const auto operation_index = static_cast(operation->GetCode()); + const auto decompiler = operation_decompilers[operation_index]; + if (decompiler == nullptr) { + UNREACHABLE_MSG("Operation decompiler {} not defined", operation_index); + } + return (this->*decompiler)(*operation); + + } else if (const auto gpr = std::get_if(node)) { + const u32 index = gpr->GetIndex(); + if (index == Register::ZeroIndex) { + return "0"; + } + return GetRegister(index); + + } else if (const auto immediate = std::get_if(node)) { + const u32 value = immediate->GetValue(); + if (value < 10) { + // For eyecandy avoid using hex numbers on single digits + return fmt::format("utof({}u)", immediate->GetValue()); + } + return fmt::format("utof(0x{:x}u)", immediate->GetValue()); + + } else if (const auto predicate = std::get_if(node)) { + const auto value = [&]() -> std::string { + switch (const auto index = predicate->GetIndex(); index) { + case Tegra::Shader::Pred::UnusedIndex: + return "true"; + case Tegra::Shader::Pred::NeverExecute: + return "false"; + default: + return GetPredicate(index); + } + }(); + if (predicate->IsNegated()) { + return "!(" + value + ')'; + } + return value; + + } else if (const auto abuf = std::get_if(node)) { + const auto attribute = abuf->GetIndex(); + const auto element = abuf->GetElement(); + + const auto GeometryPass = [&](const std::string& name) { + if (stage == ShaderStage::Geometry && abuf->GetBuffer()) { + // TODO(Rodrigo): Guard geometry inputs against out of bound reads. Some games + // set an 0x80000000 index for those and the shader fails to build. Find out why + // this happens and what's its intent. + return "gs_" + name + "[ftou(" + Visit(abuf->GetBuffer()) + + ") % MAX_VERTEX_INPUT]"; + } + return name; + }; + + switch (attribute) { + case Attribute::Index::Position: + if (stage != ShaderStage::Fragment) { + return GeometryPass("position") + GetSwizzle(element); + } else { + return element == 3 ? "1.0f" : "gl_FragCoord" + GetSwizzle(element); + } + case Attribute::Index::PointCoord: + switch (element) { + case 0: + return "gl_PointCoord.x"; + case 1: + return "gl_PointCoord.y"; + case 2: + case 3: + return "0"; + } + UNREACHABLE(); + return "0"; + case Attribute::Index::TessCoordInstanceIDVertexID: + // TODO(Subv): Find out what the values are for the first two elements when inside a + // vertex shader, and what's the value of the fourth element when inside a Tess Eval + // shader. + ASSERT(stage == ShaderStage::Vertex); + switch (element) { + case 2: + // Config pack's first value is instance_id. + return "uintBitsToFloat(config_pack[0])"; + case 3: + return "uintBitsToFloat(gl_VertexID)"; + } + UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element); + return "0"; + case Attribute::Index::FrontFacing: + // TODO(Subv): Find out what the values are for the other elements. + ASSERT(stage == ShaderStage::Fragment); + switch (element) { + case 3: + return "itof(gl_FrontFacing ? -1 : 0)"; + } + UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element); + return "0"; + default: + if (attribute >= Attribute::Index::Attribute_0 && + attribute <= Attribute::Index::Attribute_31) { + return GeometryPass(GetInputAttribute(attribute)) + GetSwizzle(element); + } + break; + } + UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast(attribute)); + + } else if (const auto cbuf = std::get_if(node)) { + const Node offset = cbuf->GetOffset(); + if (const auto immediate = std::get_if(offset)) { + // Direct access + const u32 offset_imm = immediate->GetValue(); + return fmt::format("{}[{}][{}]", GetConstBuffer(cbuf->GetIndex()), offset_imm / 4, + offset_imm % 4); + + } else if (std::holds_alternative(*offset)) { + // Indirect access + const std::string final_offset = code.GenerateTemporal(); + code.AddLine("uint " + final_offset + " = (ftou(" + Visit(offset) + ") / 4) & " + + std::to_string(MAX_CONSTBUFFER_ELEMENTS - 1) + ';'); + return fmt::format("{}[{} / 4][{} % 4]", GetConstBuffer(cbuf->GetIndex()), + final_offset, final_offset); + + } else { + UNREACHABLE_MSG("Unmanaged offset node type"); + } + + } else if (const auto lmem = std::get_if(node)) { + return fmt::format("{}[ftou({}) / 4]", GetLocalMemory(), Visit(lmem->GetAddress())); + + } else if (const auto internal_flag = std::get_if(node)) { + return GetInternalFlag(internal_flag->GetFlag()); + + } else if (const auto conditional = std::get_if(node)) { + // It's invalid to call conditional on nested nodes, use an operation instead + code.AddLine("if (" + Visit(conditional->GetCondition()) + ") {"); + ++code.scope; + + VisitBasicBlock(conditional->GetCode()); + + --code.scope; + code.AddLine('}'); + return {}; + + } else if (const auto comment = std::get_if(node)) { + return "// " + comment->GetText(); + } + UNREACHABLE(); + return {}; + } + + std::string ApplyPrecise(Operation operation, const std::string& value) { + if (!IsPrecise(operation)) { + return value; + } + // There's a bug in NVidia's proprietary drivers that makes precise fail on fragment shaders + const std::string precise = stage != ShaderStage::Fragment ? "precise " : ""; + + const std::string temporal = code.GenerateTemporal(); + code.AddLine(precise + "float " + temporal + " = " + value + ';'); + return temporal; + } + + std::string VisitOperand(Operation operation, std::size_t operand_index) { + const auto& operand = operation[operand_index]; + const bool parent_precise = IsPrecise(operation); + const bool child_precise = IsPrecise(operand); + const bool child_trivial = !std::holds_alternative(*operand); + if (!parent_precise || child_precise || child_trivial) { + return Visit(operand); + } + + const std::string temporal = code.GenerateTemporal(); + code.AddLine("float " + temporal + " = " + Visit(operand) + ';'); + return temporal; + } + + std::string VisitOperand(Operation operation, std::size_t operand_index, Type type) { + std::string value = VisitOperand(operation, operand_index); + + switch (type) { + case Type::Bool: + case Type::Bool2: + case Type::Float: + return value; + case Type::Int: + return "ftoi(" + value + ')'; + case Type::Uint: + return "ftou(" + value + ')'; + case Type::HalfFloat: + const auto half_meta = std::get_if(&operation.GetMeta()); + if (!half_meta) { + value = "toHalf2(" + value + ')'; + } + + switch (half_meta->types.at(operand_index)) { + case Tegra::Shader::HalfType::H0_H1: + return "toHalf2(" + value + ')'; + case Tegra::Shader::HalfType::F32: + return "vec2(" + value + ')'; + case Tegra::Shader::HalfType::H0_H0: + return "vec2(toHalf2(" + value + ")[0])"; + case Tegra::Shader::HalfType::H1_H1: + return "vec2(toHalf2(" + value + ")[1])"; + } + } + UNREACHABLE(); + return value; + } + + std::string BitwiseCastResult(std::string value, Type type, bool needs_parenthesis = false) { + switch (type) { + case Type::Bool: + case Type::Float: + if (needs_parenthesis) { + return '(' + value + ')'; + } + return value; + case Type::Int: + return "itof(" + value + ')'; + case Type::Uint: + return "utof(" + value + ')'; + case Type::HalfFloat: + return "fromHalf2(" + value + ')'; + } + UNREACHABLE(); + return value; + } + + std::string GenerateUnary(Operation operation, const std::string& func, Type result_type, + Type type_a, bool needs_parenthesis = true) { + return ApplyPrecise(operation, + BitwiseCastResult(func + '(' + VisitOperand(operation, 0, type_a) + ')', + result_type, needs_parenthesis)); + } + + std::string GenerateBinaryInfix(Operation operation, const std::string& func, Type result_type, + Type type_a, Type type_b) { + const std::string op_a = VisitOperand(operation, 0, type_a); + const std::string op_b = VisitOperand(operation, 1, type_b); + + return ApplyPrecise( + operation, BitwiseCastResult('(' + op_a + ' ' + func + ' ' + op_b + ')', result_type)); + } + + std::string GenerateBinaryCall(Operation operation, const std::string& func, Type result_type, + Type type_a, Type type_b) { + const std::string op_a = VisitOperand(operation, 0, type_a); + const std::string op_b = VisitOperand(operation, 1, type_b); + + return ApplyPrecise(operation, + BitwiseCastResult(func + '(' + op_a + ", " + op_b + ')', result_type)); + } + + std::string GenerateTernary(Operation operation, const std::string& func, Type result_type, + Type type_a, Type type_b, Type type_c) { + const std::string op_a = VisitOperand(operation, 0, type_a); + const std::string op_b = VisitOperand(operation, 1, type_b); + const std::string op_c = VisitOperand(operation, 2, type_c); + + return ApplyPrecise( + operation, + BitwiseCastResult(func + '(' + op_a + ", " + op_b + ", " + op_c + ')', result_type)); + } + + std::string GenerateQuaternary(Operation operation, const std::string& func, Type result_type, + Type type_a, Type type_b, Type type_c, Type type_d) { + const std::string op_a = VisitOperand(operation, 0, type_a); + const std::string op_b = VisitOperand(operation, 1, type_b); + const std::string op_c = VisitOperand(operation, 2, type_c); + const std::string op_d = VisitOperand(operation, 3, type_d); + + return ApplyPrecise(operation, BitwiseCastResult(func + '(' + op_a + ", " + op_b + ", " + + op_c + ", " + op_d + ')', + result_type)); + } + + std::string GenerateTexture(Operation operation, const std::string& func, + bool is_extra_int = false) { + constexpr std::array coord_constructors = {"float", "vec2", "vec3", "vec4"}; + + const auto meta = std::get_if(&operation.GetMeta()); + const auto count = static_cast(operation.GetOperandsCount()); + ASSERT(meta); + + std::string expr = func; + expr += '('; + expr += GetSampler(meta->sampler); + expr += ", "; + + expr += coord_constructors[meta->coords_count - 1]; + expr += '('; + for (u32 i = 0; i < count; ++i) { + const bool is_extra = i >= meta->coords_count; + const bool is_array = i == meta->array_index; + + std::string operand = [&]() { + if (is_extra && is_extra_int) { + if (const auto immediate = std::get_if(operation[i])) { + return std::to_string(static_cast(immediate->GetValue())); + } else { + return "ftoi(" + Visit(operation[i]) + ')'; + } + } else { + return Visit(operation[i]); + } + }(); + if (is_array) { + ASSERT(!is_extra); + operand = "float(ftoi(" + operand + "))"; + } + + expr += operand; + + if (i + 1 == meta->coords_count) { + expr += ')'; + } + if (i + 1 < count) { + expr += ", "; + } + } + expr += ')'; + return expr; + } + + std::string Assign(Operation operation) { + const Node dest = operation[0]; + const Node src = operation[1]; + + std::string target; + if (const auto gpr = std::get_if(dest)) { + if (gpr->GetIndex() == Register::ZeroIndex) { + // Writing to Register::ZeroIndex is a no op + return {}; + } + target = GetRegister(gpr->GetIndex()); + + } else if (const auto abuf = std::get_if(dest)) { + target = [&]() -> std::string { + switch (const auto attribute = abuf->GetIndex(); abuf->GetIndex()) { + case Attribute::Index::Position: + return "position" + GetSwizzle(abuf->GetElement()); + case Attribute::Index::PointSize: + return "gl_PointSize"; + case Attribute::Index::ClipDistances0123: + return "gl_ClipDistance[" + std::to_string(abuf->GetElement()) + ']'; + case Attribute::Index::ClipDistances4567: + return "gl_ClipDistance[" + std::to_string(abuf->GetElement() + 4) + ']'; + default: + if (attribute >= Attribute::Index::Attribute_0 && + attribute <= Attribute::Index::Attribute_31) { + return GetOutputAttribute(attribute) + GetSwizzle(abuf->GetElement()); + } + UNIMPLEMENTED_MSG("Unhandled output attribute: {}", + static_cast(attribute)); + return "0"; + } + }(); + + } else if (const auto lmem = std::get_if(dest)) { + target = GetLocalMemory() + "[ftou(" + Visit(lmem->GetAddress()) + ") / 4]"; + + } else { + UNREACHABLE_MSG("Assign called without a proper target"); + } + + code.AddLine(target + " = " + Visit(src) + ';'); + return {}; + } + + std::string Composite(Operation operation) { + std::string value = "vec4("; + for (std::size_t i = 0; i < 4; ++i) { + value += Visit(operation[i]); + if (i < 3) + value += ", "; + } + value += ')'; + return value; + } + + template + std::string Add(Operation operation) { + return GenerateBinaryInfix(operation, "+", type, type, type); + } + + template + std::string Mul(Operation operation) { + return GenerateBinaryInfix(operation, "*", type, type, type); + } + + template + std::string Div(Operation operation) { + return GenerateBinaryInfix(operation, "/", type, type, type); + } + + template + std::string Fma(Operation operation) { + return GenerateTernary(operation, "fma", type, type, type, type); + } + + template + std::string Negate(Operation operation) { + return GenerateUnary(operation, "-", type, type, true); + } + + template + std::string Absolute(Operation operation) { + return GenerateUnary(operation, "abs", type, type, false); + } + + std::string FClamp(Operation operation) { + return GenerateTernary(operation, "clamp", Type::Float, Type::Float, Type::Float, + Type::Float); + } + + template + std::string Min(Operation operation) { + return GenerateBinaryCall(operation, "min", type, type, type); + } + + template + std::string Max(Operation operation) { + return GenerateBinaryCall(operation, "max", type, type, type); + } + + std::string Select(Operation operation) { + const std::string condition = Visit(operation[0]); + const std::string true_case = Visit(operation[1]); + const std::string false_case = Visit(operation[2]); + return ApplyPrecise(operation, + '(' + condition + " ? " + true_case + " : " + false_case + ')'); + } + + std::string FCos(Operation operation) { + return GenerateUnary(operation, "cos", Type::Float, Type::Float, false); + } + + std::string FSin(Operation operation) { + return GenerateUnary(operation, "sin", Type::Float, Type::Float, false); + } + + std::string FExp2(Operation operation) { + return GenerateUnary(operation, "exp2", Type::Float, Type::Float, false); + } + + std::string FLog2(Operation operation) { + return GenerateUnary(operation, "log2", Type::Float, Type::Float, false); + } + + std::string FInverseSqrt(Operation operation) { + return GenerateUnary(operation, "inversesqrt", Type::Float, Type::Float, false); + } + + std::string FSqrt(Operation operation) { + return GenerateUnary(operation, "sqrt", Type::Float, Type::Float, false); + } + + std::string FRoundEven(Operation operation) { + return GenerateUnary(operation, "roundEven", Type::Float, Type::Float, false); + } + + std::string FFloor(Operation operation) { + return GenerateUnary(operation, "floor", Type::Float, Type::Float, false); + } + + std::string FCeil(Operation operation) { + return GenerateUnary(operation, "ceil", Type::Float, Type::Float, false); + } + + std::string FTrunc(Operation operation) { + return GenerateUnary(operation, "trunc", Type::Float, Type::Float, false); + } + + template + std::string FCastInteger(Operation operation) { + return GenerateUnary(operation, "float", Type::Float, type, false); + } + + std::string ICastFloat(Operation operation) { + return GenerateUnary(operation, "int", Type::Int, Type::Float, false); + } + + std::string ICastUnsigned(Operation operation) { + return GenerateUnary(operation, "int", Type::Int, Type::Uint, false); + } + + template + std::string LogicalShiftLeft(Operation operation) { + return GenerateBinaryInfix(operation, "<<", type, type, Type::Uint); + } + + std::string ILogicalShiftRight(Operation operation) { + const std::string op_a = VisitOperand(operation, 0, Type::Uint); + const std::string op_b = VisitOperand(operation, 1, Type::Uint); + + return ApplyPrecise(operation, + BitwiseCastResult("int(" + op_a + " >> " + op_b + ')', Type::Int)); + } + + std::string IArithmeticShiftRight(Operation operation) { + return GenerateBinaryInfix(operation, ">>", Type::Int, Type::Int, Type::Uint); + } + + template + std::string BitwiseAnd(Operation operation) { + return GenerateBinaryInfix(operation, "&", type, type, type); + } + + template + std::string BitwiseOr(Operation operation) { + return GenerateBinaryInfix(operation, "|", type, type, type); + } + + template + std::string BitwiseXor(Operation operation) { + return GenerateBinaryInfix(operation, "^", type, type, type); + } + + template + std::string BitwiseNot(Operation operation) { + return GenerateUnary(operation, "~", type, type, false); + } + + std::string UCastFloat(Operation operation) { + return GenerateUnary(operation, "uint", Type::Uint, Type::Float, false); + } + + std::string UCastSigned(Operation operation) { + return GenerateUnary(operation, "uint", Type::Uint, Type::Int, false); + } + + std::string UShiftRight(Operation operation) { + return GenerateBinaryInfix(operation, ">>", Type::Uint, Type::Uint, Type::Uint); + } + + template + std::string BitfieldInsert(Operation operation) { + return GenerateQuaternary(operation, "bitfieldInsert", type, type, type, Type::Int, + Type::Int); + } + + template + std::string BitfieldExtract(Operation operation) { + return GenerateTernary(operation, "bitfieldExtract", type, type, Type::Int, Type::Int); + } + + template + std::string BitCount(Operation operation) { + return GenerateUnary(operation, "bitCount", type, type, false); + } + + std::string HNegate(Operation operation) { + const auto GetNegate = [&](std::size_t index) -> std::string { + return VisitOperand(operation, index, Type::Bool) + " ? -1 : 1"; + }; + const std::string value = '(' + VisitOperand(operation, 0, Type::HalfFloat) + " * vec2(" + + GetNegate(1) + ", " + GetNegate(2) + "))"; + return BitwiseCastResult(value, Type::HalfFloat); + } + + std::string HMergeF32(Operation operation) { + return "float(toHalf2(" + Visit(operation[0]) + ")[0])"; + } + + std::string HMergeH0(Operation operation) { + return "fromHalf2(vec2(toHalf2(" + Visit(operation[0]) + ")[1], toHalf2(" + + Visit(operation[1]) + ")[0]))"; + } + + std::string HMergeH1(Operation operation) { + return "fromHalf2(vec2(toHalf2(" + Visit(operation[0]) + ")[0], toHalf2(" + + Visit(operation[1]) + ")[1]))"; + } + + std::string HPack2(Operation operation) { + return "utof(packHalf2x16(vec2(" + Visit(operation[0]) + ", " + Visit(operation[1]) + ")))"; + } + + template + std::string LogicalLessThan(Operation operation) { + return GenerateBinaryInfix(operation, "<", Type::Bool, type, type); + } + + template + std::string LogicalEqual(Operation operation) { + return GenerateBinaryInfix(operation, "==", Type::Bool, type, type); + } + + template + std::string LogicalLessEqual(Operation operation) { + return GenerateBinaryInfix(operation, "<=", Type::Bool, type, type); + } + + template + std::string LogicalGreaterThan(Operation operation) { + return GenerateBinaryInfix(operation, ">", Type::Bool, type, type); + } + + template + std::string LogicalNotEqual(Operation operation) { + return GenerateBinaryInfix(operation, "!=", Type::Bool, type, type); + } + + template + std::string LogicalGreaterEqual(Operation operation) { + return GenerateBinaryInfix(operation, ">=", Type::Bool, type, type); + } + + std::string LogicalFIsNan(Operation operation) { + return GenerateUnary(operation, "isnan", Type::Bool, Type::Float, false); + } + + std::string LogicalAssign(Operation operation) { + const Node dest = operation[0]; + const Node src = operation[1]; + + std::string target; + + if (const auto pred = std::get_if(dest)) { + ASSERT_MSG(!pred->IsNegated(), "Negating logical assignment"); + + const auto index = pred->GetIndex(); + switch (index) { + case Tegra::Shader::Pred::NeverExecute: + case Tegra::Shader::Pred::UnusedIndex: + // Writing to these predicates is a no-op + return {}; + } + target = GetPredicate(index); + } else if (const auto flag = std::get_if(dest)) { + target = GetInternalFlag(flag->GetFlag()); + } + + code.AddLine(target + " = " + Visit(src) + ';'); + return {}; + } + + std::string LogicalAnd(Operation operation) { + return GenerateBinaryInfix(operation, "&&", Type::Bool, Type::Bool, Type::Bool); + } + + std::string LogicalOr(Operation operation) { + return GenerateBinaryInfix(operation, "||", Type::Bool, Type::Bool, Type::Bool); + } + + std::string LogicalXor(Operation operation) { + return GenerateBinaryInfix(operation, "^^", Type::Bool, Type::Bool, Type::Bool); + } + + std::string LogicalNegate(Operation operation) { + return GenerateUnary(operation, "!", Type::Bool, Type::Bool, false); + } + + std::string LogicalPick2(Operation operation) { + const std::string pair = VisitOperand(operation, 0, Type::Bool2); + return pair + '[' + VisitOperand(operation, 1, Type::Uint) + ']'; + } + + std::string LogicalAll2(Operation operation) { + return GenerateUnary(operation, "all", Type::Bool, Type::Bool2); + } + + std::string LogicalAny2(Operation operation) { + return GenerateUnary(operation, "any", Type::Bool, Type::Bool2); + } + + std::string Logical2HLessThan(Operation operation) { + return GenerateBinaryCall(operation, "lessThan", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } + + std::string Logical2HEqual(Operation operation) { + return GenerateBinaryCall(operation, "equal", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } + + std::string Logical2HLessEqual(Operation operation) { + return GenerateBinaryCall(operation, "lessThanEqual", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } + + std::string Logical2HGreaterThan(Operation operation) { + return GenerateBinaryCall(operation, "greaterThan", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } + + std::string Logical2HNotEqual(Operation operation) { + return GenerateBinaryCall(operation, "notEqual", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } + + std::string Logical2HGreaterEqual(Operation operation) { + return GenerateBinaryCall(operation, "greaterThanEqual", Type::Bool2, Type::HalfFloat, + Type::HalfFloat); + } + + std::string F4Texture(Operation operation) { + const auto meta = std::get_if(&operation.GetMeta()); + ASSERT(meta); + + std::string expr = GenerateTexture(operation, "texture"); + if (meta->sampler.IsShadow()) { + expr = "vec4(" + expr + ')'; + } + return expr + GetSwizzle(meta->element); + } + + std::string F4TextureLod(Operation operation) { + const auto meta = std::get_if(&operation.GetMeta()); + ASSERT(meta); + + std::string expr = GenerateTexture(operation, "textureLod"); + if (meta->sampler.IsShadow()) { + expr = "vec4(" + expr + ')'; + } + return expr + GetSwizzle(meta->element); + } + + std::string F4TextureGather(Operation operation) { + const auto meta = std::get_if(&operation.GetMeta()); + ASSERT(meta); + + return GenerateTexture(operation, "textureGather", !meta->sampler.IsShadow()) + + GetSwizzle(meta->element); + } + + std::string F4TextureQueryDimensions(Operation operation) { + const auto meta = std::get_if(&operation.GetMeta()); + ASSERT(meta); + + const std::string sampler = GetSampler(meta->sampler); + const std::string lod = VisitOperand(operation, 0, Type::Int); + + switch (meta->element) { + case 0: + case 1: + return "textureSize(" + sampler + ", " + lod + ')' + GetSwizzle(meta->element); + case 2: + return "0"; + case 3: + return "textureQueryLevels(" + sampler + ')'; + } + UNREACHABLE(); + return "0"; + } + + std::string F4TextureQueryLod(Operation operation) { + const auto meta = std::get_if(&operation.GetMeta()); + ASSERT(meta); + + if (meta->element < 2) { + return "itof(int((" + GenerateTexture(operation, "textureQueryLod") + " * vec2(256))" + + GetSwizzle(meta->element) + "))"; + } + return "0"; + } + + std::string F4TexelFetch(Operation operation) { + constexpr std::array constructors = {"int", "ivec2", "ivec3", "ivec4"}; + const auto meta = std::get_if(&operation.GetMeta()); + const auto count = static_cast(operation.GetOperandsCount()); + ASSERT(meta); + + std::string expr = "texelFetch("; + expr += GetSampler(meta->sampler); + expr += ", "; + + expr += constructors[meta->coords_count - 1]; + expr += '('; + for (u32 i = 0; i < count; ++i) { + expr += VisitOperand(operation, i, Type::Int); + + if (i + 1 == meta->coords_count) { + expr += ')'; + } + if (i + 1 < count) { + expr += ", "; + } + } + expr += ')'; + return expr + GetSwizzle(meta->element); + } + + std::string Branch(Operation operation) { + const auto target = std::get_if(operation[0]); + UNIMPLEMENTED_IF(!target); + + code.AddLine(fmt::format("jmp_to = 0x{:x}u;", target->GetValue())); + code.AddLine("break;"); + return {}; + } + + std::string PushFlowStack(Operation operation) { + const auto target = std::get_if(operation[0]); + UNIMPLEMENTED_IF(!target); + + code.AddLine(fmt::format("flow_stack[flow_stack_top++] = 0x{:x}u;", target->GetValue())); + return {}; + } + + std::string PopFlowStack(Operation operation) { + code.AddLine("jmp_to = flow_stack[--flow_stack_top];"); + code.AddLine("break;"); + return {}; + } + + std::string Exit(Operation operation) { + if (stage != ShaderStage::Fragment) { + code.AddLine("return;"); return {}; } - } - - ShaderWriter& shader; - ShaderWriter& declarations; - std::vector regs; - std::unordered_map declr_input_attribute; - std::set declr_output_attribute; - std::array declr_const_buffers; - std::vector used_samplers; - const Maxwell3D::Regs::ShaderStage& stage; - const std::string& suffix; - const Tegra::Shader::Header& header; - std::unordered_set fixed_pipeline_output_attributes_used; - std::array clip_distances{}; - u64 local_memory_size; -}; - -class GLSLGenerator { -public: - GLSLGenerator(const std::set& subroutines, const ProgramCode& program_code, - u32 main_offset, Maxwell3D::Regs::ShaderStage stage, const std::string& suffix, - std::size_t shader_length) - : subroutines(subroutines), program_code(program_code), main_offset(main_offset), - stage(stage), suffix(suffix), shader_length(shader_length) { - std::memcpy(&header, program_code.data(), sizeof(Tegra::Shader::Header)); - local_memory_size = header.GetLocalMemorySize(); - regs.SetLocalMemory(local_memory_size); - Generate(suffix); - } - - std::string GetShaderCode() { - return declarations.GetResult() + shader.GetResult(); - } - - /// Returns entries in the shader that are useful for external functions - ShaderEntries GetEntries() const { - return {regs.GetConstBuffersDeclarations(), regs.GetSamplers(), regs.GetClipDistances(), - shader_length}; - } - -private: - /// Gets the Subroutine object corresponding to the specified address. - const Subroutine& GetSubroutine(u32 begin, u32 end) const { - const auto iter = subroutines.find(Subroutine{begin, end, suffix}); - ASSERT(iter != subroutines.end()); - return *iter; - } - - /// Generates code representing a 19-bit immediate value - static std::string GetImmediate19(const Instruction& instr) { - return fmt::format("uintBitsToFloat({})", instr.alu.GetImm20_19()); - } - - /// Generates code representing a 32-bit immediate value - static std::string GetImmediate32(const Instruction& instr) { - return fmt::format("uintBitsToFloat({})", instr.alu.GetImm20_32()); - } - - /// Generates code representing a vec2 pair unpacked from a half float immediate - static std::string UnpackHalfImmediate(const Instruction& instr, bool negate) { - const std::string immediate = GetHalfFloat(std::to_string(instr.half_imm.PackImmediates())); - if (!negate) { - return immediate; - } - const std::string negate_first = instr.half_imm.first_negate != 0 ? "-" : ""; - const std::string negate_second = instr.half_imm.second_negate != 0 ? "-" : ""; - const std::string negate_vec = "vec2(" + negate_first + "1, " + negate_second + "1)"; - - return '(' + immediate + " * " + negate_vec + ')'; - } - - /// Generates code representing a texture sampler. - std::string GetSampler(const Sampler& sampler, Tegra::Shader::TextureType type, bool is_array, - bool is_shadow) { - return regs.AccessSampler(sampler, type, is_array, is_shadow); - } - - /** - * Adds code that calls a subroutine. - * @param subroutine the subroutine to call. - */ - void CallSubroutine(const Subroutine& subroutine) { - if (subroutine.exit_method == ExitMethod::AlwaysEnd) { - shader.AddLine(subroutine.GetName() + "();"); - shader.AddLine("return true;"); - } else if (subroutine.exit_method == ExitMethod::Conditional) { - shader.AddLine("if (" + subroutine.GetName() + "()) { return true; }"); - } else { - shader.AddLine(subroutine.GetName() + "();"); - } - } - - /* - * Writes code that assigns a predicate boolean variable. - * @param pred The id of the predicate to write to. - * @param value The expression value to assign to the predicate. - */ - void SetPredicate(u64 pred, const std::string& value) { - using Tegra::Shader::Pred; - // Can't assign to the constant predicate. - ASSERT(pred != static_cast(Pred::UnusedIndex)); - - std::string variable = 'p' + std::to_string(pred) + '_' + suffix; - shader.AddLine(variable + " = " + value + ';'); - declr_predicates.insert(std::move(variable)); - } - - /* - * Returns the condition to use in the 'if' for a predicated instruction. - * @param instr Instruction to generate the if condition for. - * @returns string containing the predicate condition. - */ - std::string GetPredicateCondition(u64 index, bool negate) { - using Tegra::Shader::Pred; - std::string variable; - - // Index 7 is used as an 'Always True' condition. - if (index == static_cast(Pred::UnusedIndex)) { - variable = "true"; - } else { - variable = 'p' + std::to_string(index) + '_' + suffix; - declr_predicates.insert(variable); - } - if (negate) { - return "!(" + variable + ')'; - } - - return variable; - } - - /** - * Returns the comparison string to use to compare two values in the 'set' family of - * instructions. - * @param condition The condition used in the 'set'-family instruction. - * @param op_a First operand to use for the comparison. - * @param op_b Second operand to use for the comparison. - * @returns String corresponding to the GLSL operator that matches the desired comparison. - */ - std::string GetPredicateComparison(Tegra::Shader::PredCondition condition, - const std::string& op_a, const std::string& op_b) const { - using Tegra::Shader::PredCondition; - static const std::unordered_map PredicateComparisonStrings = { - {PredCondition::LessThan, "<"}, - {PredCondition::Equal, "=="}, - {PredCondition::LessEqual, "<="}, - {PredCondition::GreaterThan, ">"}, - {PredCondition::NotEqual, "!="}, - {PredCondition::GreaterEqual, ">="}, - {PredCondition::LessThanWithNan, "<"}, - {PredCondition::NotEqualWithNan, "!="}, - {PredCondition::LessEqualWithNan, "<="}, - {PredCondition::GreaterThanWithNan, ">"}, - {PredCondition::GreaterEqualWithNan, ">="}}; - - const auto& comparison{PredicateComparisonStrings.find(condition)}; - UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonStrings.end(), - "Unknown predicate comparison operation"); - - std::string predicate{'(' + op_a + ") " + comparison->second + " (" + op_b + ')'}; - if (condition == PredCondition::LessThanWithNan || - condition == PredCondition::NotEqualWithNan || - condition == PredCondition::LessEqualWithNan || - condition == PredCondition::GreaterThanWithNan || - condition == PredCondition::GreaterEqualWithNan) { - predicate += " || isnan(" + op_a + ") || isnan(" + op_b + ')'; - } - - return predicate; - } - - /** - * Returns the operator string to use to combine two predicates in the 'setp' family of - * instructions. - * @params operation The operator used in the 'setp'-family instruction. - * @returns String corresponding to the GLSL operator that matches the desired operator. - */ - std::string GetPredicateCombiner(Tegra::Shader::PredOperation operation) const { - using Tegra::Shader::PredOperation; - static const std::unordered_map PredicateOperationStrings = { - {PredOperation::And, "&&"}, - {PredOperation::Or, "||"}, - {PredOperation::Xor, "^^"}, + const auto& used_registers = ir.GetRegisters(); + const auto SafeGetRegister = [&](u32 reg) -> std::string { + // TODO(Rodrigo): Replace with contains once C++20 releases + if (used_registers.find(reg) != used_registers.end()) { + return GetRegister(reg); + } + return "0.0f"; }; - auto op = PredicateOperationStrings.find(operation); - UNIMPLEMENTED_IF_MSG(op == PredicateOperationStrings.end(), "Unknown predicate operation"); - return op->second; - } + UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, "Sample mask write is unimplemented"); - /** - * Transforms the input string GLSL operand into one that applies the abs() function and negates - * the output if necessary. When both abs and neg are true, the negation will be applied after - * taking the absolute value. - * @param operand The input operand to take the abs() of, negate, or both. - * @param abs Whether to apply the abs() function to the input operand. - * @param neg Whether to negate the input operand. - * @returns String corresponding to the operand after being transformed by the abs() and - * negation operations. - */ - static std::string GetOperandAbsNeg(const std::string& operand, bool abs, bool neg) { - std::string result = operand; - - if (abs) { - result = "abs(" + result + ')'; - } - - if (neg) { - result = "-(" + result + ')'; - } - - return result; - } - - /* - * Transforms the input string GLSL operand into an unpacked half float pair. - * @note This function returns a float type pair instead of a half float pair. This is because - * real half floats are not standardized in GLSL but unpackHalf2x16 (which returns a vec2) is. - * @param operand Input operand. It has to be an unsigned integer. - * @param type How to unpack the unsigned integer to a half float pair. - * @param abs Get the absolute value of unpacked half floats. - * @param neg Get the negative value of unpacked half floats. - * @returns String corresponding to a half float pair. - */ - static std::string GetHalfFloat(const std::string& operand, - Tegra::Shader::HalfType type = Tegra::Shader::HalfType::H0_H1, - bool abs = false, bool neg = false) { - // "vec2" calls emitted in this function are intended to alias components. - const std::string value = [&]() { - switch (type) { - case Tegra::Shader::HalfType::H0_H1: - return "unpackHalf2x16(" + operand + ')'; - case Tegra::Shader::HalfType::F32: - return "vec2(uintBitsToFloat(" + operand + "))"; - case Tegra::Shader::HalfType::H0_H0: - case Tegra::Shader::HalfType::H1_H1: { - const bool high = type == Tegra::Shader::HalfType::H1_H1; - const char unpack_index = "xy"[high ? 1 : 0]; - return "vec2(unpackHalf2x16(" + operand + ")." + unpack_index + ')'; - } - default: - UNREACHABLE(); - return std::string("vec2(0)"); - } - }(); - - return GetOperandAbsNeg(value, abs, neg); - } - - /* - * Returns whether the instruction at the specified offset is a 'sched' instruction. - * Sched instructions always appear before a sequence of 3 instructions. - */ - bool IsSchedInstruction(u32 offset) const { - // sched instructions appear once every 4 instructions. - static constexpr std::size_t SchedPeriod = 4; - u32 absolute_offset = offset - main_offset; - - return (absolute_offset % SchedPeriod) == 0; - } - - void WriteLogicOperation(Register dest, LogicOperation logic_op, const std::string& op_a, - const std::string& op_b, - Tegra::Shader::PredicateResultMode predicate_mode, - Tegra::Shader::Pred predicate, const bool set_cc) { - std::string result{}; - switch (logic_op) { - case LogicOperation::And: { - result = '(' + op_a + " & " + op_b + ')'; - break; - } - case LogicOperation::Or: { - result = '(' + op_a + " | " + op_b + ')'; - break; - } - case LogicOperation::Xor: { - result = '(' + op_a + " ^ " + op_b + ')'; - break; - } - case LogicOperation::PassB: { - result = op_b; - break; - } - default: - UNIMPLEMENTED_MSG("Unimplemented logic operation={}", static_cast(logic_op)); - } - - if (dest != Tegra::Shader::Register::ZeroIndex) { - regs.SetRegisterToInteger(dest, true, 0, result, 1, 1, false, set_cc); - } - - using Tegra::Shader::PredicateResultMode; - // Write the predicate value depending on the predicate mode. - switch (predicate_mode) { - case PredicateResultMode::None: - // Do nothing. - return; - case PredicateResultMode::NotZero: - // Set the predicate to true if the result is not zero. - SetPredicate(static_cast(predicate), '(' + result + ") != 0"); - break; - default: - UNIMPLEMENTED_MSG("Unimplemented predicate result mode: {}", - static_cast(predicate_mode)); - } - } - - void WriteLop3Instruction(Register dest, const std::string& op_a, const std::string& op_b, - const std::string& op_c, const std::string& imm_lut, - const bool set_cc) { - if (dest == Tegra::Shader::Register::ZeroIndex) { - return; - } - - static constexpr std::array shift_amounts = { - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", - "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", - "22", "23", "24", "25", "26", "27", "28", "29", "30", "31"}; - - std::string result; - result += '('; - - for (std::size_t i = 0; i < shift_amounts.size(); ++i) { - if (i) - result += '|'; - result += "(((" + imm_lut + " >> (((" + op_c + " >> " + shift_amounts[i] + - ") & 1) | ((" + op_b + " >> " + shift_amounts[i] + ") & 1) << 1 | ((" + op_a + - " >> " + shift_amounts[i] + ") & 1) << 2)) & 1) << " + shift_amounts[i] + ")"; - } - - result += ')'; - - regs.SetRegisterToInteger(dest, true, 0, result, 1, 1, false, set_cc); - } - - void WriteTexsInstructionFloat(const Instruction& instr, const std::string& texture) { - // TEXS has two destination registers and a swizzle. The first two elements in the swizzle - // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1 - - std::size_t written_components = 0; - for (u32 component = 0; component < 4; ++component) { - if (!instr.texs.IsComponentEnabled(component)) { - continue; - } - - if (written_components < 2) { - // Write the first two swizzle components to gpr0 and gpr0+1 - regs.SetRegisterToFloat(instr.gpr0, component, texture, 1, 4, false, false, - written_components % 2); - } else { - ASSERT(instr.texs.HasTwoDestinations()); - // Write the rest of the swizzle components to gpr28 and gpr28+1 - regs.SetRegisterToFloat(instr.gpr28, component, texture, 1, 4, false, false, - written_components % 2); - } - - ++written_components; - } - } - - void WriteTexsInstructionHalfFloat(const Instruction& instr, const std::string& texture) { - // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half - // float instruction). - - std::array components; - u32 written_components = 0; - - for (u32 component = 0; component < 4; ++component) { - if (!instr.texs.IsComponentEnabled(component)) - continue; - components[written_components++] = texture + GetSwizzle(component); - } - if (written_components == 0) - return; - - const auto BuildComponent = [&](std::string low, std::string high, bool high_enabled) { - return "vec2(" + low + ", " + (high_enabled ? high : "0") + ')'; - }; - - regs.SetRegisterToHalfFloat( - instr.gpr0, 0, BuildComponent(components[0], components[1], written_components > 1), - Tegra::Shader::HalfMerge::H0_H1, 1, 1); - - if (written_components > 2) { - ASSERT(instr.texs.HasTwoDestinations()); - regs.SetRegisterToHalfFloat( - instr.gpr28, 0, - BuildComponent(components[2], components[3], written_components > 3), - Tegra::Shader::HalfMerge::H0_H1, 1, 1); - } - } - - static u32 TextureCoordinates(Tegra::Shader::TextureType texture_type) { - switch (texture_type) { - case Tegra::Shader::TextureType::Texture1D: - return 1; - case Tegra::Shader::TextureType::Texture2D: - return 2; - case Tegra::Shader::TextureType::Texture3D: - case Tegra::Shader::TextureType::TextureCube: - return 3; - default: - UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast(texture_type)); - return 0; - } - } - - /* - * Emits code to push the input target address to the flow address stack, incrementing the stack - * top. - */ - void EmitPushToFlowStack(u32 target) { - const auto scope = shader.Scope(); - - shader.AddLine("flow_stack[flow_stack_top] = " + std::to_string(target) + "u;"); - shader.AddLine("flow_stack_top++;"); - } - - /* - * Emits code to pop an address from the flow address stack, setting the jump address to the - * popped address and decrementing the stack top. - */ - void EmitPopFromFlowStack() { - const auto scope = shader.Scope(); - - shader.AddLine("flow_stack_top--;"); - shader.AddLine("jmp_to = flow_stack[flow_stack_top];"); - shader.AddLine("break;"); - } - - /// Writes the output values from a fragment shader to the corresponding GLSL output variables. - void EmitFragmentOutputsWrite() { - ASSERT(stage == Maxwell3D::Regs::ShaderStage::Fragment); - - UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, "Samplemask write is unimplemented"); - - shader.AddLine("if (alpha_test[0] != 0) {"); - ++shader.scope; + code.AddLine("if (alpha_test[0] != 0) {"); + ++code.scope; // We start on the register containing the alpha value in the first RT. u32 current_reg = 3; - for (u32 render_target = 0; render_target < Maxwell3D::Regs::NumRenderTargets; - ++render_target) { + for (u32 render_target = 0; render_target < Maxwell::NumRenderTargets; ++render_target) { // TODO(Blinkhawk): verify the behavior of alpha testing on hardware when // multiple render targets are used. if (header.ps.IsColorComponentOutputEnabled(render_target, 0) || header.ps.IsColorComponentOutputEnabled(render_target, 1) || header.ps.IsColorComponentOutputEnabled(render_target, 2) || header.ps.IsColorComponentOutputEnabled(render_target, 3)) { - shader.AddLine(fmt::format("if (!AlphaFunc({})) discard;", - regs.GetRegisterAsFloat(current_reg))); + code.AddLine( + fmt::format("if (!AlphaFunc({})) discard;", SafeGetRegister(current_reg))); current_reg += 4; } } - --shader.scope; - shader.AddLine('}'); + --code.scope; + code.AddLine('}'); // Write the color outputs using the data in the shader registers, disabled // rendertargets/components are skipped in the register assignment. current_reg = 0; - for (u32 render_target = 0; render_target < Maxwell3D::Regs::NumRenderTargets; - ++render_target) { + for (u32 render_target = 0; render_target < Maxwell::NumRenderTargets; ++render_target) { // TODO(Subv): Figure out how dual-source blending is configured in the Switch. for (u32 component = 0; component < 4; ++component) { if (header.ps.IsColorComponentOutputEnabled(render_target, component)) { - shader.AddLine(fmt::format("FragColor{}[{}] = {};", render_target, component, - regs.GetRegisterAsFloat(current_reg))); + code.AddLine(fmt::format("FragColor{}[{}] = {};", render_target, component, + SafeGetRegister(current_reg))); ++current_reg; } } @@ -1508,2443 +1270,259 @@ private: if (header.ps.omap.depth) { // The depth output is always 2 registers after the last color output, and current_reg // already contains one past the last color register. - - shader.AddLine( - "gl_FragDepth = " + - regs.GetRegisterAsFloat(static_cast(current_reg) + 1) + - ';'); + code.AddLine("gl_FragDepth = " + SafeGetRegister(current_reg + 1) + ';'); } + + code.AddLine("return;"); + return {}; } - /// Unpacks a video instruction operand (e.g. VMAD). - std::string GetVideoOperand(const std::string& op, bool is_chunk, bool is_signed, - Tegra::Shader::VideoType type, u64 byte_height) { - const std::string value = [&]() { - if (!is_chunk) { - const auto offset = static_cast(byte_height * 8); - return "((" + op + " >> " + std::to_string(offset) + ") & 0xff)"; - } - const std::string zero = "0"; + std::string Discard(Operation operation) { + // Enclose "discard" in a conditional, so that GLSL compilation does not complain + // about unexecuted instructions that may follow this. + code.AddLine("if (true) {"); + ++code.scope; + code.AddLine("discard;"); + --code.scope; + code.AddLine("}"); + return {}; + } - switch (type) { - case Tegra::Shader::VideoType::Size16_Low: - return '(' + op + " & 0xffff)"; - case Tegra::Shader::VideoType::Size16_High: - return '(' + op + " >> 16)"; - case Tegra::Shader::VideoType::Size32: - // TODO(Rodrigo): From my hardware tests it becomes a bit "mad" when - // this type is used (1 * 1 + 0 == 0x5b800000). Until a better - // explanation is found: abort. - UNIMPLEMENTED(); - return zero; - case Tegra::Shader::VideoType::Invalid: - UNREACHABLE_MSG("Invalid instruction encoding"); - return zero; - default: - UNREACHABLE(); - return zero; - } - }(); + std::string EmitVertex(Operation operation) { + ASSERT_MSG(stage == ShaderStage::Geometry, + "EmitVertex is expected to be used in a geometry shader."); - if (is_signed) { - return "int(" + value + ')'; - } - return value; + // If a geometry shader is attached, it will always flip (it's the last stage before + // fragment). For more info about flipping, refer to gl_shader_gen.cpp. + code.AddLine("position.xy *= viewport_flip.xy;"); + code.AddLine("gl_Position = position;"); + code.AddLine("position.w = 1.0;"); + code.AddLine("EmitVertex();"); + return {}; + } + + std::string EndPrimitive(Operation operation) { + ASSERT_MSG(stage == ShaderStage::Geometry, + "EndPrimitive is expected to be used in a geometry shader."); + + code.AddLine("EndPrimitive();"); + return {}; + } + + std::string YNegate(Operation operation) { + // Config pack's third value is Y_NEGATE's state. + return "uintBitsToFloat(config_pack[2])"; + } + + static constexpr OperationDecompilersArray operation_decompilers = { + &GLSLDecompiler::Assign, + + &GLSLDecompiler::Select, + + &GLSLDecompiler::Add, + &GLSLDecompiler::Mul, + &GLSLDecompiler::Div, + &GLSLDecompiler::Fma, + &GLSLDecompiler::Negate, + &GLSLDecompiler::Absolute, + &GLSLDecompiler::FClamp, + &GLSLDecompiler::Min, + &GLSLDecompiler::Max, + &GLSLDecompiler::FCos, + &GLSLDecompiler::FSin, + &GLSLDecompiler::FExp2, + &GLSLDecompiler::FLog2, + &GLSLDecompiler::FInverseSqrt, + &GLSLDecompiler::FSqrt, + &GLSLDecompiler::FRoundEven, + &GLSLDecompiler::FFloor, + &GLSLDecompiler::FCeil, + &GLSLDecompiler::FTrunc, + &GLSLDecompiler::FCastInteger, + &GLSLDecompiler::FCastInteger, + + &GLSLDecompiler::Add, + &GLSLDecompiler::Mul, + &GLSLDecompiler::Div, + &GLSLDecompiler::Negate, + &GLSLDecompiler::Absolute, + &GLSLDecompiler::Min, + &GLSLDecompiler::Max, + + &GLSLDecompiler::ICastFloat, + &GLSLDecompiler::ICastUnsigned, + &GLSLDecompiler::LogicalShiftLeft, + &GLSLDecompiler::ILogicalShiftRight, + &GLSLDecompiler::IArithmeticShiftRight, + &GLSLDecompiler::BitwiseAnd, + &GLSLDecompiler::BitwiseOr, + &GLSLDecompiler::BitwiseXor, + &GLSLDecompiler::BitwiseNot, + &GLSLDecompiler::BitfieldInsert, + &GLSLDecompiler::BitfieldExtract, + &GLSLDecompiler::BitCount, + + &GLSLDecompiler::Add, + &GLSLDecompiler::Mul, + &GLSLDecompiler::Div, + &GLSLDecompiler::Min, + &GLSLDecompiler::Max, + &GLSLDecompiler::UCastFloat, + &GLSLDecompiler::UCastSigned, + &GLSLDecompiler::LogicalShiftLeft, + &GLSLDecompiler::UShiftRight, + &GLSLDecompiler::UShiftRight, + &GLSLDecompiler::BitwiseAnd, + &GLSLDecompiler::BitwiseOr, + &GLSLDecompiler::BitwiseXor, + &GLSLDecompiler::BitwiseNot, + &GLSLDecompiler::BitfieldInsert, + &GLSLDecompiler::BitfieldExtract, + &GLSLDecompiler::BitCount, + + &GLSLDecompiler::Add, + &GLSLDecompiler::Mul, + &GLSLDecompiler::Fma, + &GLSLDecompiler::Absolute, + &GLSLDecompiler::HNegate, + &GLSLDecompiler::HMergeF32, + &GLSLDecompiler::HMergeH0, + &GLSLDecompiler::HMergeH1, + &GLSLDecompiler::HPack2, + + &GLSLDecompiler::LogicalAssign, + &GLSLDecompiler::LogicalAnd, + &GLSLDecompiler::LogicalOr, + &GLSLDecompiler::LogicalXor, + &GLSLDecompiler::LogicalNegate, + &GLSLDecompiler::LogicalPick2, + &GLSLDecompiler::LogicalAll2, + &GLSLDecompiler::LogicalAny2, + + &GLSLDecompiler::LogicalLessThan, + &GLSLDecompiler::LogicalEqual, + &GLSLDecompiler::LogicalLessEqual, + &GLSLDecompiler::LogicalGreaterThan, + &GLSLDecompiler::LogicalNotEqual, + &GLSLDecompiler::LogicalGreaterEqual, + &GLSLDecompiler::LogicalFIsNan, + + &GLSLDecompiler::LogicalLessThan, + &GLSLDecompiler::LogicalEqual, + &GLSLDecompiler::LogicalLessEqual, + &GLSLDecompiler::LogicalGreaterThan, + &GLSLDecompiler::LogicalNotEqual, + &GLSLDecompiler::LogicalGreaterEqual, + + &GLSLDecompiler::LogicalLessThan, + &GLSLDecompiler::LogicalEqual, + &GLSLDecompiler::LogicalLessEqual, + &GLSLDecompiler::LogicalGreaterThan, + &GLSLDecompiler::LogicalNotEqual, + &GLSLDecompiler::LogicalGreaterEqual, + + &GLSLDecompiler::Logical2HLessThan, + &GLSLDecompiler::Logical2HEqual, + &GLSLDecompiler::Logical2HLessEqual, + &GLSLDecompiler::Logical2HGreaterThan, + &GLSLDecompiler::Logical2HNotEqual, + &GLSLDecompiler::Logical2HGreaterEqual, + + &GLSLDecompiler::F4Texture, + &GLSLDecompiler::F4TextureLod, + &GLSLDecompiler::F4TextureGather, + &GLSLDecompiler::F4TextureQueryDimensions, + &GLSLDecompiler::F4TextureQueryLod, + &GLSLDecompiler::F4TexelFetch, + + &GLSLDecompiler::Branch, + &GLSLDecompiler::PushFlowStack, + &GLSLDecompiler::PopFlowStack, + &GLSLDecompiler::Exit, + &GLSLDecompiler::Discard, + + &GLSLDecompiler::EmitVertex, + &GLSLDecompiler::EndPrimitive, + + &GLSLDecompiler::YNegate, }; - /// Gets the A operand for a video instruction. - std::string GetVideoOperandA(Instruction instr) { - return GetVideoOperand(regs.GetRegisterAsInteger(instr.gpr8, 0, false), - instr.video.is_byte_chunk_a != 0, instr.video.signed_a, - instr.video.type_a, instr.video.byte_height_a); + std::string GetRegister(u32 index) const { + return GetDeclarationWithSuffix(index, "gpr"); } - /// Gets the B operand for a video instruction. - std::string GetVideoOperandB(Instruction instr) { - if (instr.video.use_register_b) { - return GetVideoOperand(regs.GetRegisterAsInteger(instr.gpr20, 0, false), - instr.video.is_byte_chunk_b != 0, instr.video.signed_b, - instr.video.type_b, instr.video.byte_height_b); - } else { - return '(' + - std::to_string(instr.video.signed_b ? static_cast(instr.alu.GetImm20_16()) - : instr.alu.GetImm20_16()) + - ')'; - } + std::string GetPredicate(Tegra::Shader::Pred pred) const { + return GetDeclarationWithSuffix(static_cast(pred), "pred"); } - std::pair ValidateAndGetCoordinateElement( - const Tegra::Shader::TextureType texture_type, const bool depth_compare, - const bool is_array, const bool lod_bias_enabled, size_t max_coords, size_t max_inputs) { - const size_t coord_count = TextureCoordinates(texture_type); - - size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0); - const size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0); - if (total_coord_count > max_coords || total_reg_count > max_inputs) { - UNIMPLEMENTED_MSG("Unsupported Texture operation"); - total_coord_count = std::min(total_coord_count, max_coords); - } - // 1D.DC opengl is using a vec3 but 2nd component is ignored later. - total_coord_count += - (depth_compare && !is_array && texture_type == Tegra::Shader::TextureType::Texture1D) - ? 1 - : 0; - - constexpr std::array coord_container{ - {"", "float coord = (", "vec2 coord = vec2(", "vec3 coord = vec3(", - "vec4 coord = vec4("}}; - - return std::pair(coord_count, coord_container[total_coord_count]); + std::string GetInputAttribute(Attribute::Index attribute) const { + const auto index{static_cast(attribute) - + static_cast(Attribute::Index::Attribute_0)}; + return GetDeclarationWithSuffix(index, "input_attr"); } - std::string GetTextureCode(const Tegra::Shader::Instruction& instr, - const Tegra::Shader::TextureType texture_type, - const Tegra::Shader::TextureProcessMode process_mode, - const bool depth_compare, const bool is_array, - const size_t bias_offset) { - - if ((texture_type == Tegra::Shader::TextureType::Texture3D && - (is_array || depth_compare)) || - (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && - depth_compare)) { - UNIMPLEMENTED_MSG("This method is not supported."); - } - - const std::string sampler = - GetSampler(instr.sampler, texture_type, is_array, depth_compare); - - const bool lod_needed = process_mode == Tegra::Shader::TextureProcessMode::LZ || - process_mode == Tegra::Shader::TextureProcessMode::LL || - process_mode == Tegra::Shader::TextureProcessMode::LLA; - - // LOD selection (either via bias or explicit textureLod) not supported in GL for - // sampler2DArrayShadow and samplerCubeArrayShadow. - const bool gl_lod_supported = !( - (texture_type == Tegra::Shader::TextureType::Texture2D && is_array && depth_compare) || - (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && depth_compare)); - - const std::string read_method = lod_needed && gl_lod_supported ? "textureLod(" : "texture("; - std::string texture = read_method + sampler + ", coord"; - - UNIMPLEMENTED_IF(process_mode != Tegra::Shader::TextureProcessMode::None && - !gl_lod_supported); - - if (process_mode != Tegra::Shader::TextureProcessMode::None && gl_lod_supported) { - if (process_mode == Tegra::Shader::TextureProcessMode::LZ) { - texture += ", 0.0"; - } else { - // If present, lod or bias are always stored in the register indexed by the - // gpr20 - // field with an offset depending on the usage of the other registers - texture += ',' + regs.GetRegisterAsFloat(instr.gpr20.Value() + bias_offset); - } - } - texture += ")"; - return texture; + std::string GetOutputAttribute(Attribute::Index attribute) const { + const auto index{static_cast(attribute) - + static_cast(Attribute::Index::Attribute_0)}; + return GetDeclarationWithSuffix(index, "output_attr"); } - std::pair GetTEXCode( - const Instruction& instr, const Tegra::Shader::TextureType texture_type, - const Tegra::Shader::TextureProcessMode process_mode, const bool depth_compare, - const bool is_array) { - const bool lod_bias_enabled = (process_mode != Tegra::Shader::TextureProcessMode::None && - process_mode != Tegra::Shader::TextureProcessMode::LZ); - - const auto [coord_count, coord_dcl] = ValidateAndGetCoordinateElement( - texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5); - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used - const u64 coord_register = array_register + (is_array ? 1 : 0); - - std::string coord = coord_dcl; - for (size_t i = 0; i < coord_count;) { - coord += regs.GetRegisterAsFloat(coord_register + i); - ++i; - if (i != coord_count) { - coord += ','; - } - } - // 1D.DC in opengl the 2nd component is ignored. - if (depth_compare && !is_array && texture_type == Tegra::Shader::TextureType::Texture1D) { - coord += ",0.0"; - } - if (is_array) { - coord += ',' + regs.GetRegisterAsInteger(array_register); - } - if (depth_compare) { - // Depth is always stored in the register signaled by gpr20 - // or in the next register if lod or bias are used - const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); - coord += ',' + regs.GetRegisterAsFloat(depth_register); - } - coord += ");"; - return std::make_pair( - coord, GetTextureCode(instr, texture_type, process_mode, depth_compare, is_array, 0)); + std::string GetConstBuffer(u32 index) const { + return GetDeclarationWithSuffix(index, "cbuf"); } - std::pair GetTEXSCode( - const Instruction& instr, const Tegra::Shader::TextureType texture_type, - const Tegra::Shader::TextureProcessMode process_mode, const bool depth_compare, - const bool is_array) { - const bool lod_bias_enabled = (process_mode != Tegra::Shader::TextureProcessMode::None && - process_mode != Tegra::Shader::TextureProcessMode::LZ); - - const auto [coord_count, coord_dcl] = ValidateAndGetCoordinateElement( - texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4); - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used - const u64 coord_register = array_register + (is_array ? 1 : 0); - const u64 last_coord_register = - (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2)) - ? static_cast(instr.gpr20.Value()) - : coord_register + 1; - - std::string coord = coord_dcl; - for (size_t i = 0; i < coord_count; ++i) { - const bool last = (i == (coord_count - 1)) && (coord_count > 1); - coord += regs.GetRegisterAsFloat(last ? last_coord_register : coord_register + i); - if (i < coord_count - 1) { - coord += ','; - } - } - - if (is_array) { - coord += ',' + regs.GetRegisterAsInteger(array_register); - } - if (depth_compare) { - // Depth is always stored in the register signaled by gpr20 - // or in the next register if lod or bias are used - const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); - coord += ',' + regs.GetRegisterAsFloat(depth_register); - } - coord += ");"; - - return std::make_pair(coord, - GetTextureCode(instr, texture_type, process_mode, depth_compare, - is_array, (coord_count > 2 ? 1 : 0))); + std::string GetConstBufferBlock(u32 index) const { + return GetDeclarationWithSuffix(index, "cbuf_block"); } - std::pair GetTLD4Code(const Instruction& instr, - const Tegra::Shader::TextureType texture_type, - const bool depth_compare, const bool is_array) { - - const size_t coord_count = TextureCoordinates(texture_type); - const size_t total_coord_count = coord_count + (is_array ? 1 : 0); - const size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0); - - constexpr std::array coord_container{ - {"", "", "vec2 coord = vec2(", "vec3 coord = vec3(", "vec4 coord = vec4("}}; - - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used - const u64 coord_register = array_register + (is_array ? 1 : 0); - - std::string coord = coord_container[total_coord_count]; - for (size_t i = 0; i < coord_count;) { - coord += regs.GetRegisterAsFloat(coord_register + i); - ++i; - if (i != coord_count) { - coord += ','; - } - } - - if (is_array) { - coord += ',' + regs.GetRegisterAsInteger(array_register); - } - coord += ");"; - - const std::string sampler = - GetSampler(instr.sampler, texture_type, is_array, depth_compare); - - std::string texture = "textureGather(" + sampler + ", coord, "; - if (depth_compare) { - // Depth is always stored in the register signaled by gpr20 - texture += regs.GetRegisterAsFloat(instr.gpr20.Value()) + ')'; - } else { - texture += std::to_string(instr.tld4.component) + ')'; - } - return std::make_pair(coord, texture); + std::string GetLocalMemory() const { + return "lmem_" + suffix; } - std::pair GetTLDSCode(const Instruction& instr, - const Tegra::Shader::TextureType texture_type, - const bool is_array) { + std::string GetInternalFlag(InternalFlag flag) const { + constexpr std::array InternalFlagNames = {"zero_flag", "sign_flag", + "carry_flag", "overflow_flag"}; + const auto index = static_cast(flag); + ASSERT(index < static_cast(InternalFlag::Amount)); - const size_t coord_count = TextureCoordinates(texture_type); - const size_t total_coord_count = coord_count + (is_array ? 1 : 0); - const bool lod_enabled = - instr.tlds.GetTextureProcessMode() == Tegra::Shader::TextureProcessMode::LL; - - constexpr std::array coord_container{ - {"", "int coords = (", "ivec2 coords = ivec2(", "ivec3 coords = ivec3("}}; - - std::string coord = coord_container[total_coord_count]; - - // If enabled arrays index is always stored in the gpr8 field - const u64 array_register = instr.gpr8.Value(); - - // if is array gpr20 is used - const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value(); - - const u64 last_coord_register = - ((coord_count > 2) || (coord_count == 2 && !lod_enabled)) && !is_array - ? static_cast(instr.gpr20.Value()) - : coord_register + 1; - - for (size_t i = 0; i < coord_count; ++i) { - const bool last = (i == (coord_count - 1)) && (coord_count > 1); - coord += regs.GetRegisterAsInteger(last ? last_coord_register : coord_register + i); - if (i < coord_count - 1) { - coord += ','; - } - } - if (is_array) { - coord += ',' + regs.GetRegisterAsInteger(array_register); - } - coord += ");"; - - const std::string sampler = GetSampler(instr.sampler, texture_type, is_array, false); - - std::string texture = "texelFetch(" + sampler + ", coords"; - - if (lod_enabled) { - // When lod is used always is in grp20 - texture += ", " + regs.GetRegisterAsInteger(instr.gpr20) + ')'; - } else { - texture += ", 0)"; - } - return std::make_pair(coord, texture); + return std::string(InternalFlagNames[index]) + '_' + suffix; } - /** - * Compiles a single instruction from Tegra to GLSL. - * @param offset the offset of the Tegra shader instruction. - * @return the offset of the next instruction to execute. Usually it is the current offset - * + 1. If the current instruction always terminates the program, returns PROGRAM_END. - */ - u32 CompileInstr(u32 offset) { - // Ignore sched instructions when generating code. - if (IsSchedInstruction(offset)) { - return offset + 1; - } - - const Instruction instr = {program_code[offset]}; - const auto opcode = OpCode::Decode(instr); - - // Decoding failure - if (!opcode) { - UNIMPLEMENTED_MSG("Unhandled instruction: {0:x}", instr.value); - return offset + 1; - } - - shader.AddLine( - fmt::format("// {}: {} (0x{:016x})", offset, opcode->get().GetName(), instr.value)); - - using Tegra::Shader::Pred; - UNIMPLEMENTED_IF_MSG(instr.pred.full_pred == Pred::NeverExecute, - "NeverExecute predicate not implemented"); - - // Some instructions (like SSY) don't have a predicate field, they are always - // unconditionally executed. - bool can_be_predicated = OpCode::IsPredicatedInstruction(opcode->get().GetId()); - - if (can_be_predicated && instr.pred.pred_index != static_cast(Pred::UnusedIndex)) { - shader.AddLine("if (" + - GetPredicateCondition(instr.pred.pred_index, instr.negate_pred != 0) + - ')'); - shader.AddLine('{'); - ++shader.scope; - } - - switch (opcode->get().GetType()) { - case OpCode::Type::Arithmetic: { - std::string op_a = regs.GetRegisterAsFloat(instr.gpr8); - - std::string op_b; - - if (instr.is_b_imm) { - op_b = GetImmediate19(instr); - } else { - if (instr.is_b_gpr) { - op_b = regs.GetRegisterAsFloat(instr.gpr20); - } else { - op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - } - } - - switch (opcode->get().GetId()) { - case OpCode::Id::MOV_C: - case OpCode::Id::MOV_R: { - // MOV does not have neither 'abs' nor 'neg' bits. - regs.SetRegisterToFloat(instr.gpr0, 0, op_b, 1, 1); - break; - } - - case OpCode::Id::FMUL_C: - case OpCode::Id::FMUL_R: - case OpCode::Id::FMUL_IMM: { - // FMUL does not have 'abs' bits and only the second operand has a 'neg' bit. - UNIMPLEMENTED_IF_MSG(instr.fmul.tab5cb8_2 != 0, - "FMUL tab5cb8_2({}) is not implemented", - instr.fmul.tab5cb8_2.Value()); - UNIMPLEMENTED_IF_MSG( - instr.fmul.tab5c68_0 != 1, "FMUL tab5cb8_0({}) is not implemented", - instr.fmul.tab5c68_0 - .Value()); // SMO typical sends 1 here which seems to be the default - - op_b = GetOperandAbsNeg(op_b, false, instr.fmul.negate_b); - - std::string postfactor_op; - if (instr.fmul.postfactor != 0) { - s8 postfactor = static_cast(instr.fmul.postfactor); - - // postfactor encoded as 3-bit 1's complement in instruction, - // interpreted with below logic. - if (postfactor >= 4) { - postfactor = 7 - postfactor; - } else { - postfactor = 0 - postfactor; - } - - if (postfactor > 0) { - postfactor_op = " * " + std::to_string(1 << postfactor); - } else { - postfactor_op = " / " + std::to_string(1 << -postfactor); - } - } - - regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " * " + op_b + postfactor_op, 1, 1, - instr.alu.saturate_d, instr.generates_cc, 0, true); - break; - } - case OpCode::Id::FADD_C: - case OpCode::Id::FADD_R: - case OpCode::Id::FADD_IMM: { - op_a = GetOperandAbsNeg(op_a, instr.alu.abs_a, instr.alu.negate_a); - op_b = GetOperandAbsNeg(op_b, instr.alu.abs_b, instr.alu.negate_b); - - regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " + " + op_b, 1, 1, - instr.alu.saturate_d, instr.generates_cc, 0, true); - break; - } - case OpCode::Id::MUFU: { - op_a = GetOperandAbsNeg(op_a, instr.alu.abs_a, instr.alu.negate_a); - switch (instr.sub_op) { - case SubOp::Cos: - regs.SetRegisterToFloat(instr.gpr0, 0, "cos(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Sin: - regs.SetRegisterToFloat(instr.gpr0, 0, "sin(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Ex2: - regs.SetRegisterToFloat(instr.gpr0, 0, "exp2(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Lg2: - regs.SetRegisterToFloat(instr.gpr0, 0, "log2(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Rcp: - regs.SetRegisterToFloat(instr.gpr0, 0, "1.0 / " + op_a, 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Rsq: - regs.SetRegisterToFloat(instr.gpr0, 0, "inversesqrt(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - case SubOp::Sqrt: - regs.SetRegisterToFloat(instr.gpr0, 0, "sqrt(" + op_a + ')', 1, 1, - instr.alu.saturate_d, false, 0, true); - break; - default: - UNIMPLEMENTED_MSG("Unhandled MUFU sub op={0:x}", - static_cast(instr.sub_op.Value())); - } - break; - } - case OpCode::Id::FMNMX_C: - case OpCode::Id::FMNMX_R: - case OpCode::Id::FMNMX_IMM: { - UNIMPLEMENTED_IF_MSG( - instr.generates_cc, - "Condition codes generation in FMNMX is partially implemented"); - - op_a = GetOperandAbsNeg(op_a, instr.alu.abs_a, instr.alu.negate_a); - op_b = GetOperandAbsNeg(op_b, instr.alu.abs_b, instr.alu.negate_b); - - std::string condition = - GetPredicateCondition(instr.alu.fmnmx.pred, instr.alu.fmnmx.negate_pred != 0); - std::string parameters = op_a + ',' + op_b; - regs.SetRegisterToFloat(instr.gpr0, 0, - '(' + condition + ") ? min(" + parameters + ") : max(" + - parameters + ')', - 1, 1, false, instr.generates_cc, 0, true); - break; - } - case OpCode::Id::RRO_C: - case OpCode::Id::RRO_R: - case OpCode::Id::RRO_IMM: { - // Currently RRO is only implemented as a register move. - op_b = GetOperandAbsNeg(op_b, instr.alu.abs_b, instr.alu.negate_b); - regs.SetRegisterToFloat(instr.gpr0, 0, op_b, 1, 1); - LOG_WARNING(HW_GPU, "RRO instruction is incomplete"); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled arithmetic instruction: {}", opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::ArithmeticImmediate: { - switch (opcode->get().GetId()) { - case OpCode::Id::MOV32_IMM: { - regs.SetRegisterToFloat(instr.gpr0, 0, GetImmediate32(instr), 1, 1); - break; - } - case OpCode::Id::FMUL32_IMM: { - regs.SetRegisterToFloat( - instr.gpr0, 0, - regs.GetRegisterAsFloat(instr.gpr8) + " * " + GetImmediate32(instr), 1, 1, - instr.fmul32.saturate, instr.op_32.generates_cc, 0, true); - break; - } - case OpCode::Id::FADD32I: { - UNIMPLEMENTED_IF_MSG( - instr.op_32.generates_cc, - "Condition codes generation in FADD32I is partially implemented"); - - std::string op_a = regs.GetRegisterAsFloat(instr.gpr8); - std::string op_b = GetImmediate32(instr); - - if (instr.fadd32i.abs_a) { - op_a = "abs(" + op_a + ')'; - } - - if (instr.fadd32i.negate_a) { - op_a = "-(" + op_a + ')'; - } - - if (instr.fadd32i.abs_b) { - op_b = "abs(" + op_b + ')'; - } - - if (instr.fadd32i.negate_b) { - op_b = "-(" + op_b + ')'; - } - - regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " + " + op_b, 1, 1, false, - instr.op_32.generates_cc, 0, true); - break; - } - } - break; - } - case OpCode::Type::Bfe: { - UNIMPLEMENTED_IF(instr.bfe.negate_b); - - std::string op_a = instr.bfe.negate_a ? "-" : ""; - op_a += regs.GetRegisterAsInteger(instr.gpr8); - - switch (opcode->get().GetId()) { - case OpCode::Id::BFE_IMM: { - std::string inner_shift = - '(' + op_a + " << " + std::to_string(instr.bfe.GetLeftShiftValue()) + ')'; - std::string outer_shift = - '(' + inner_shift + " >> " + - std::to_string(instr.bfe.GetLeftShiftValue() + instr.bfe.shift_position) + ')'; - - regs.SetRegisterToInteger(instr.gpr0, true, 0, outer_shift, 1, 1, false, - instr.generates_cc); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled BFE instruction: {}", opcode->get().GetName()); - } - } - - break; - } - case OpCode::Type::Bfi: { - const auto [base, packed_shift] = [&]() -> std::tuple { - switch (opcode->get().GetId()) { - case OpCode::Id::BFI_IMM_R: - return {regs.GetRegisterAsInteger(instr.gpr39, 0, false), - std::to_string(instr.alu.GetSignedImm20_20())}; - default: - UNREACHABLE(); - return {regs.GetRegisterAsInteger(instr.gpr39, 0, false), - std::to_string(instr.alu.GetSignedImm20_20())}; - } - }(); - const std::string offset = '(' + packed_shift + " & 0xff)"; - const std::string bits = "((" + packed_shift + " >> 8) & 0xff)"; - const std::string insert = regs.GetRegisterAsInteger(instr.gpr8, 0, false); - regs.SetRegisterToInteger(instr.gpr0, false, 0, - "bitfieldInsert(" + base + ", " + insert + ", " + offset + - ", " + bits + ')', - 1, 1, false, instr.generates_cc); - break; - } - case OpCode::Type::Shift: { - std::string op_a = regs.GetRegisterAsInteger(instr.gpr8, 0, true); - std::string op_b; - - if (instr.is_b_imm) { - op_b += '(' + std::to_string(instr.alu.GetSignedImm20_20()) + ')'; - } else { - if (instr.is_b_gpr) { - op_b += regs.GetRegisterAsInteger(instr.gpr20); - } else { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Integer); - } - } - - switch (opcode->get().GetId()) { - case OpCode::Id::SHR_C: - case OpCode::Id::SHR_R: - case OpCode::Id::SHR_IMM: { - if (!instr.shift.is_signed) { - // Logical shift right - op_a = "uint(" + op_a + ')'; - } - - // Cast to int is superfluous for arithmetic shift, it's only for a logical shift - regs.SetRegisterToInteger(instr.gpr0, true, 0, "int(" + op_a + " >> " + op_b + ')', - 1, 1, false, instr.generates_cc); - break; - } - case OpCode::Id::SHL_C: - case OpCode::Id::SHL_R: - case OpCode::Id::SHL_IMM: - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in SHL is not implemented"); - regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " << " + op_b, 1, 1, false, - instr.generates_cc); - break; - default: { - UNIMPLEMENTED_MSG("Unhandled shift instruction: {}", opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::ArithmeticIntegerImmediate: { - std::string op_a = regs.GetRegisterAsInteger(instr.gpr8); - std::string op_b = std::to_string(instr.alu.imm20_32.Value()); - - switch (opcode->get().GetId()) { - case OpCode::Id::IADD32I: - UNIMPLEMENTED_IF_MSG( - instr.op_32.generates_cc, - "Condition codes generation in IADD32I is partially implemented"); - - if (instr.iadd32i.negate_a) - op_a = "-(" + op_a + ')'; - - regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " + " + op_b, 1, 1, - instr.iadd32i.saturate, instr.op_32.generates_cc); - break; - case OpCode::Id::LOP32I: { - - if (instr.alu.lop32i.invert_a) - op_a = "~(" + op_a + ')'; - - if (instr.alu.lop32i.invert_b) - op_b = "~(" + op_b + ')'; - - WriteLogicOperation(instr.gpr0, instr.alu.lop32i.operation, op_a, op_b, - Tegra::Shader::PredicateResultMode::None, - Tegra::Shader::Pred::UnusedIndex, instr.op_32.generates_cc); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled ArithmeticIntegerImmediate instruction: {}", - opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::ArithmeticInteger: { - std::string op_a = regs.GetRegisterAsInteger(instr.gpr8); - std::string op_b; - if (instr.is_b_imm) { - op_b += '(' + std::to_string(instr.alu.GetSignedImm20_20()) + ')'; - } else { - if (instr.is_b_gpr) { - op_b += regs.GetRegisterAsInteger(instr.gpr20); - } else { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Integer); - } - } - - switch (opcode->get().GetId()) { - case OpCode::Id::IADD_C: - case OpCode::Id::IADD_R: - case OpCode::Id::IADD_IMM: { - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in IADD is partially implemented"); - - if (instr.alu_integer.negate_a) - op_a = "-(" + op_a + ')'; - - if (instr.alu_integer.negate_b) - op_b = "-(" + op_b + ')'; - - regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " + " + op_b, 1, 1, - instr.alu.saturate_d, instr.generates_cc); - break; - } - case OpCode::Id::IADD3_C: - case OpCode::Id::IADD3_R: - case OpCode::Id::IADD3_IMM: { - UNIMPLEMENTED_IF_MSG( - instr.generates_cc, - "Condition codes generation in IADD3 is partially implemented"); - - std::string op_c = regs.GetRegisterAsInteger(instr.gpr39); - - auto apply_height = [](auto height, auto& oprand) { - switch (height) { - case Tegra::Shader::IAdd3Height::None: - break; - case Tegra::Shader::IAdd3Height::LowerHalfWord: - oprand = "((" + oprand + ") & 0xFFFF)"; - break; - case Tegra::Shader::IAdd3Height::UpperHalfWord: - oprand = "((" + oprand + ") >> 16)"; - break; - default: - UNIMPLEMENTED_MSG("Unhandled IADD3 height: {}", - static_cast(height.Value())); - } - }; - - if (opcode->get().GetId() == OpCode::Id::IADD3_R) { - apply_height(instr.iadd3.height_a, op_a); - apply_height(instr.iadd3.height_b, op_b); - apply_height(instr.iadd3.height_c, op_c); - } - - if (instr.iadd3.neg_a) - op_a = "-(" + op_a + ')'; - - if (instr.iadd3.neg_b) - op_b = "-(" + op_b + ')'; - - if (instr.iadd3.neg_c) - op_c = "-(" + op_c + ')'; - - std::string result; - if (opcode->get().GetId() == OpCode::Id::IADD3_R) { - switch (instr.iadd3.mode) { - case Tegra::Shader::IAdd3Mode::RightShift: - // TODO(tech4me): According to - // https://envytools.readthedocs.io/en/latest/hw/graph/maxwell/cuda/int.html?highlight=iadd3 - // The addition between op_a and op_b should be done in uint33, more - // investigation required - result = "(((" + op_a + " + " + op_b + ") >> 16) + " + op_c + ')'; - break; - case Tegra::Shader::IAdd3Mode::LeftShift: - result = "(((" + op_a + " + " + op_b + ") << 16) + " + op_c + ')'; - break; - default: - result = '(' + op_a + " + " + op_b + " + " + op_c + ')'; - break; - } - } else { - result = '(' + op_a + " + " + op_b + " + " + op_c + ')'; - } - - regs.SetRegisterToInteger(instr.gpr0, true, 0, result, 1, 1, false, - instr.generates_cc); - break; - } - case OpCode::Id::ISCADD_C: - case OpCode::Id::ISCADD_R: - case OpCode::Id::ISCADD_IMM: { - UNIMPLEMENTED_IF_MSG( - instr.generates_cc, - "Condition codes generation in ISCADD is partially implemented"); - - if (instr.alu_integer.negate_a) - op_a = "-(" + op_a + ')'; - - if (instr.alu_integer.negate_b) - op_b = "-(" + op_b + ')'; - - const std::string shift = std::to_string(instr.alu_integer.shift_amount.Value()); - - regs.SetRegisterToInteger(instr.gpr0, true, 0, - "((" + op_a + " << " + shift + ") + " + op_b + ')', 1, 1, - false, instr.generates_cc); - break; - } - case OpCode::Id::POPC_C: - case OpCode::Id::POPC_R: - case OpCode::Id::POPC_IMM: { - if (instr.popc.invert) { - op_b = "~(" + op_b + ')'; - } - regs.SetRegisterToInteger(instr.gpr0, true, 0, "bitCount(" + op_b + ')', 1, 1); - break; - } - case OpCode::Id::SEL_C: - case OpCode::Id::SEL_R: - case OpCode::Id::SEL_IMM: { - const std::string condition = - GetPredicateCondition(instr.sel.pred, instr.sel.neg_pred != 0); - regs.SetRegisterToInteger(instr.gpr0, true, 0, - '(' + condition + ") ? " + op_a + " : " + op_b, 1, 1); - break; - } - case OpCode::Id::LOP_C: - case OpCode::Id::LOP_R: - case OpCode::Id::LOP_IMM: { - - if (instr.alu.lop.invert_a) - op_a = "~(" + op_a + ')'; - - if (instr.alu.lop.invert_b) - op_b = "~(" + op_b + ')'; - - WriteLogicOperation(instr.gpr0, instr.alu.lop.operation, op_a, op_b, - instr.alu.lop.pred_result_mode, instr.alu.lop.pred48, - instr.generates_cc); - break; - } - case OpCode::Id::LOP3_C: - case OpCode::Id::LOP3_R: - case OpCode::Id::LOP3_IMM: { - const std::string op_c = regs.GetRegisterAsInteger(instr.gpr39); - std::string lut; - - if (opcode->get().GetId() == OpCode::Id::LOP3_R) { - lut = '(' + std::to_string(instr.alu.lop3.GetImmLut28()) + ')'; - } else { - lut = '(' + std::to_string(instr.alu.lop3.GetImmLut48()) + ')'; - } - - WriteLop3Instruction(instr.gpr0, op_a, op_b, op_c, lut, instr.generates_cc); - break; - } - case OpCode::Id::IMNMX_C: - case OpCode::Id::IMNMX_R: - case OpCode::Id::IMNMX_IMM: { - UNIMPLEMENTED_IF(instr.imnmx.exchange != Tegra::Shader::IMinMaxExchange::None); - UNIMPLEMENTED_IF_MSG( - instr.generates_cc, - "Condition codes generation in IMNMX is partially implemented"); - - const std::string condition = - GetPredicateCondition(instr.imnmx.pred, instr.imnmx.negate_pred != 0); - const std::string parameters = op_a + ',' + op_b; - regs.SetRegisterToInteger(instr.gpr0, instr.imnmx.is_signed, 0, - '(' + condition + ") ? min(" + parameters + ") : max(" + - parameters + ')', - 1, 1, false, instr.generates_cc); - break; - } - case OpCode::Id::LEA_R2: - case OpCode::Id::LEA_R1: - case OpCode::Id::LEA_IMM: - case OpCode::Id::LEA_RZ: - case OpCode::Id::LEA_HI: { - std::string op_c; - - switch (opcode->get().GetId()) { - case OpCode::Id::LEA_R2: { - op_a = regs.GetRegisterAsInteger(instr.gpr20); - op_b = regs.GetRegisterAsInteger(instr.gpr39); - op_c = std::to_string(instr.lea.r2.entry_a); - break; - } - - case OpCode::Id::LEA_R1: { - const bool neg = instr.lea.r1.neg != 0; - op_a = regs.GetRegisterAsInteger(instr.gpr8); - if (neg) - op_a = "-(" + op_a + ')'; - op_b = regs.GetRegisterAsInteger(instr.gpr20); - op_c = std::to_string(instr.lea.r1.entry_a); - break; - } - - case OpCode::Id::LEA_IMM: { - const bool neg = instr.lea.imm.neg != 0; - op_b = regs.GetRegisterAsInteger(instr.gpr8); - if (neg) - op_b = "-(" + op_b + ')'; - op_a = std::to_string(instr.lea.imm.entry_a); - op_c = std::to_string(instr.lea.imm.entry_b); - break; - } - - case OpCode::Id::LEA_RZ: { - const bool neg = instr.lea.rz.neg != 0; - op_b = regs.GetRegisterAsInteger(instr.gpr8); - if (neg) - op_b = "-(" + op_b + ')'; - op_a = regs.GetUniform(instr.lea.rz.cb_index, instr.lea.rz.cb_offset, - GLSLRegister::Type::Integer); - op_c = std::to_string(instr.lea.rz.entry_a); - - break; - } - - case OpCode::Id::LEA_HI: - default: { - op_b = regs.GetRegisterAsInteger(instr.gpr8); - op_a = std::to_string(instr.lea.imm.entry_a); - op_c = std::to_string(instr.lea.imm.entry_b); - UNIMPLEMENTED_MSG("Unhandled LEA subinstruction: {}", opcode->get().GetName()); - } - } - UNIMPLEMENTED_IF_MSG(instr.lea.pred48 != static_cast(Pred::UnusedIndex), - "Unhandled LEA Predicate"); - const std::string value = '(' + op_a + " + (" + op_b + "*(1 << " + op_c + ")))"; - regs.SetRegisterToInteger(instr.gpr0, true, 0, value, 1, 1, false, - instr.generates_cc); - - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled ArithmeticInteger instruction: {}", - opcode->get().GetName()); - } - } - - break; - } - case OpCode::Type::ArithmeticHalf: { - if (opcode->get().GetId() == OpCode::Id::HADD2_C || - opcode->get().GetId() == OpCode::Id::HADD2_R) { - UNIMPLEMENTED_IF(instr.alu_half.ftz != 0); - } - const bool negate_a = - opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0; - const bool negate_b = - opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0; - - const std::string op_a = - GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.alu_half.type_a, - instr.alu_half.abs_a != 0, negate_a); - - std::string op_b; - switch (opcode->get().GetId()) { - case OpCode::Id::HADD2_C: - case OpCode::Id::HMUL2_C: - op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::UnsignedInteger); - break; - case OpCode::Id::HADD2_R: - case OpCode::Id::HMUL2_R: - op_b = regs.GetRegisterAsInteger(instr.gpr20, 0, false); - break; - default: - UNREACHABLE(); - op_b = "0"; - break; - } - op_b = GetHalfFloat(op_b, instr.alu_half.type_b, instr.alu_half.abs_b != 0, negate_b); - - const std::string result = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::HADD2_C: - case OpCode::Id::HADD2_R: - return '(' + op_a + " + " + op_b + ')'; - case OpCode::Id::HMUL2_C: - case OpCode::Id::HMUL2_R: - return '(' + op_a + " * " + op_b + ')'; - default: - UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", - opcode->get().GetName()); - return std::string("0"); - } - }(); - - regs.SetRegisterToHalfFloat(instr.gpr0, 0, result, instr.alu_half.merge, 1, 1, - instr.alu_half.saturate != 0); - break; - } - case OpCode::Type::ArithmeticHalfImmediate: { - if (opcode->get().GetId() == OpCode::Id::HADD2_IMM) { - UNIMPLEMENTED_IF(instr.alu_half_imm.ftz != 0); - } else { - UNIMPLEMENTED_IF(instr.alu_half_imm.precision != - Tegra::Shader::HalfPrecision::None); - } - - const std::string op_a = GetHalfFloat( - regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.alu_half_imm.type_a, - instr.alu_half_imm.abs_a != 0, instr.alu_half_imm.negate_a != 0); - - const std::string op_b = UnpackHalfImmediate(instr, true); - - const std::string result = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::HADD2_IMM: - return op_a + " + " + op_b; - case OpCode::Id::HMUL2_IMM: - return op_a + " * " + op_b; - default: - UNREACHABLE(); - return std::string("0"); - } - }(); - - regs.SetRegisterToHalfFloat(instr.gpr0, 0, result, instr.alu_half_imm.merge, 1, 1, - instr.alu_half_imm.saturate != 0); - break; - } - case OpCode::Type::Ffma: { - const std::string op_a = regs.GetRegisterAsFloat(instr.gpr8); - std::string op_b = instr.ffma.negate_b ? "-" : ""; - std::string op_c = instr.ffma.negate_c ? "-" : ""; - - UNIMPLEMENTED_IF_MSG(instr.ffma.cc != 0, "FFMA cc not implemented"); - UNIMPLEMENTED_IF_MSG( - instr.ffma.tab5980_0 != 1, "FFMA tab5980_0({}) not implemented", - instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO - UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_1 != 0, "FFMA tab5980_1({}) not implemented", - instr.ffma.tab5980_1.Value()); - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in FFMA is partially implemented"); - - switch (opcode->get().GetId()) { - case OpCode::Id::FFMA_CR: { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - op_c += regs.GetRegisterAsFloat(instr.gpr39); - break; - } - case OpCode::Id::FFMA_RR: { - op_b += regs.GetRegisterAsFloat(instr.gpr20); - op_c += regs.GetRegisterAsFloat(instr.gpr39); - break; - } - case OpCode::Id::FFMA_RC: { - op_b += regs.GetRegisterAsFloat(instr.gpr39); - op_c += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - break; - } - case OpCode::Id::FFMA_IMM: { - op_b += GetImmediate19(instr); - op_c += regs.GetRegisterAsFloat(instr.gpr39); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled FFMA instruction: {}", opcode->get().GetName()); - } - } - - regs.SetRegisterToFloat(instr.gpr0, 0, "fma(" + op_a + ", " + op_b + ", " + op_c + ')', - 1, 1, instr.alu.saturate_d, instr.generates_cc, 0, true); - break; - } - case OpCode::Type::Hfma2: { - if (opcode->get().GetId() == OpCode::Id::HFMA2_RR) { - UNIMPLEMENTED_IF(instr.hfma2.rr.precision != Tegra::Shader::HalfPrecision::None); - } else { - UNIMPLEMENTED_IF(instr.hfma2.precision != Tegra::Shader::HalfPrecision::None); - } - const bool saturate = opcode->get().GetId() == OpCode::Id::HFMA2_RR - ? instr.hfma2.rr.saturate != 0 - : instr.hfma2.saturate != 0; - - const std::string op_a = - GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hfma2.type_a); - std::string op_b, op_c; - - switch (opcode->get().GetId()) { - case OpCode::Id::HFMA2_CR: - op_b = GetHalfFloat(regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::UnsignedInteger), - instr.hfma2.type_b, false, instr.hfma2.negate_b); - op_c = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), - instr.hfma2.type_reg39, false, instr.hfma2.negate_c); - break; - case OpCode::Id::HFMA2_RC: - op_b = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), - instr.hfma2.type_reg39, false, instr.hfma2.negate_b); - op_c = GetHalfFloat(regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::UnsignedInteger), - instr.hfma2.type_b, false, instr.hfma2.negate_c); - break; - case OpCode::Id::HFMA2_RR: - op_b = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false), - instr.hfma2.type_b, false, instr.hfma2.negate_b); - op_c = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), - instr.hfma2.rr.type_c, false, instr.hfma2.rr.negate_c); - break; - case OpCode::Id::HFMA2_IMM_R: - op_b = UnpackHalfImmediate(instr, true); - op_c = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), - instr.hfma2.type_reg39, false, instr.hfma2.negate_c); - break; - default: - UNREACHABLE(); - op_c = op_b = "vec2(0)"; - break; - } - - const std::string result = '(' + op_a + " * " + op_b + " + " + op_c + ')'; - - regs.SetRegisterToHalfFloat(instr.gpr0, 0, result, instr.hfma2.merge, 1, 1, saturate); - break; - } - case OpCode::Type::Conversion: { - switch (opcode->get().GetId()) { - case OpCode::Id::I2I_R: { - UNIMPLEMENTED_IF(instr.conversion.selector); - - std::string op_a = regs.GetRegisterAsInteger( - instr.gpr20, 0, instr.conversion.is_input_signed, instr.conversion.src_size); - - if (instr.conversion.abs_a) { - op_a = "abs(" + op_a + ')'; - } - - if (instr.conversion.negate_a) { - op_a = "-(" + op_a + ')'; - } - - regs.SetRegisterToInteger(instr.gpr0, instr.conversion.is_output_signed, 0, op_a, 1, - 1, instr.alu.saturate_d, instr.generates_cc, 0, - instr.conversion.dest_size); - break; - } - case OpCode::Id::I2F_R: - case OpCode::Id::I2F_C: { - UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word); - UNIMPLEMENTED_IF(instr.conversion.selector); - std::string op_a; - - if (instr.is_b_gpr) { - op_a = - regs.GetRegisterAsInteger(instr.gpr20, 0, instr.conversion.is_input_signed, - instr.conversion.src_size); - } else { - op_a = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - instr.conversion.is_input_signed - ? GLSLRegister::Type::Integer - : GLSLRegister::Type::UnsignedInteger, - instr.conversion.src_size); - } - - if (instr.conversion.abs_a) { - op_a = "abs(" + op_a + ')'; - } - - if (instr.conversion.negate_a) { - op_a = "-(" + op_a + ')'; - } - - regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1, false, instr.generates_cc); - break; - } - case OpCode::Id::F2F_R: { - UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word); - UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); - std::string op_a = regs.GetRegisterAsFloat(instr.gpr20); - - if (instr.conversion.abs_a) { - op_a = "abs(" + op_a + ')'; - } - - if (instr.conversion.negate_a) { - op_a = "-(" + op_a + ')'; - } - - switch (instr.conversion.f2f.rounding) { - case Tegra::Shader::F2fRoundingOp::None: - break; - case Tegra::Shader::F2fRoundingOp::Round: - op_a = "roundEven(" + op_a + ')'; - break; - case Tegra::Shader::F2fRoundingOp::Floor: - op_a = "floor(" + op_a + ')'; - break; - case Tegra::Shader::F2fRoundingOp::Ceil: - op_a = "ceil(" + op_a + ')'; - break; - case Tegra::Shader::F2fRoundingOp::Trunc: - op_a = "trunc(" + op_a + ')'; - break; - default: - UNIMPLEMENTED_MSG("Unimplemented F2F rounding mode {}", - static_cast(instr.conversion.f2f.rounding.Value())); - break; - } - - regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1, instr.alu.saturate_d, - instr.generates_cc); - break; - } - case OpCode::Id::F2I_R: - case OpCode::Id::F2I_C: { - UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); - std::string op_a{}; - - if (instr.is_b_gpr) { - op_a = regs.GetRegisterAsFloat(instr.gpr20); - } else { - op_a = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - } - - if (instr.conversion.abs_a) { - op_a = "abs(" + op_a + ')'; - } - - if (instr.conversion.negate_a) { - op_a = "-(" + op_a + ')'; - } - - switch (instr.conversion.f2i.rounding) { - case Tegra::Shader::F2iRoundingOp::None: - break; - case Tegra::Shader::F2iRoundingOp::Floor: - op_a = "floor(" + op_a + ')'; - break; - case Tegra::Shader::F2iRoundingOp::Ceil: - op_a = "ceil(" + op_a + ')'; - break; - case Tegra::Shader::F2iRoundingOp::Trunc: - op_a = "trunc(" + op_a + ')'; - break; - default: - UNIMPLEMENTED_MSG("Unimplemented F2I rounding mode {}", - static_cast(instr.conversion.f2i.rounding.Value())); - break; - } - - if (instr.conversion.is_output_signed) { - op_a = "int(" + op_a + ')'; - } else { - op_a = "uint(" + op_a + ')'; - } - - regs.SetRegisterToInteger(instr.gpr0, instr.conversion.is_output_signed, 0, op_a, 1, - 1, false, instr.generates_cc, 0, - instr.conversion.dest_size); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled conversion instruction: {}", opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::Memory: { - switch (opcode->get().GetId()) { - case OpCode::Id::LD_A: { - // Note: Shouldn't this be interp mode flat? As in no interpolation made. - UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex, - "Indirect attribute loads are not supported"); - UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, - "Unaligned attribute loads are not supported"); - - Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Perspective, - Tegra::Shader::IpaSampleMode::Default}; - - u64 next_element = instr.attribute.fmt20.element; - u64 next_index = static_cast(instr.attribute.fmt20.index.Value()); - - const auto LoadNextElement = [&](u32 reg_offset) { - regs.SetRegisterToInputAttibute(instr.gpr0.Value() + reg_offset, next_element, - static_cast(next_index), - input_mode, instr.gpr39.Value()); - - // Load the next attribute element into the following register. If the element - // to load goes beyond the vec4 size, load the first element of the next - // attribute. - next_element = (next_element + 1) % 4; - next_index = next_index + (next_element == 0 ? 1 : 0); - }; - - const u32 num_words = static_cast(instr.attribute.fmt20.size.Value()) + 1; - for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { - LoadNextElement(reg_offset); - } - break; - } - case OpCode::Id::LD_C: { - UNIMPLEMENTED_IF(instr.ld_c.unknown != 0); - - const auto scope = shader.Scope(); - - shader.AddLine("uint index = (" + regs.GetRegisterAsInteger(instr.gpr8, 0, false) + - " / 4) & (MAX_CONSTBUFFER_ELEMENTS - 1);"); - - const std::string op_a = - regs.GetUniformIndirect(instr.cbuf36.index, instr.cbuf36.offset + 0, "index", - GLSLRegister::Type::Float); - - switch (instr.ld_c.type.Value()) { - case Tegra::Shader::UniformType::Single: - regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1); - break; - - case Tegra::Shader::UniformType::Double: { - const std::string op_b = - regs.GetUniformIndirect(instr.cbuf36.index, instr.cbuf36.offset + 4, - "index", GLSLRegister::Type::Float); - regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1); - regs.SetRegisterToFloat(instr.gpr0.Value() + 1, 0, op_b, 1, 1); - break; - } - default: - UNIMPLEMENTED_MSG("Unhandled type: {}", - static_cast(instr.ld_c.type.Value())); - } - break; - } - case OpCode::Id::LD_L: { - UNIMPLEMENTED_IF_MSG(instr.ld_l.unknown == 1, "LD_L Unhandled mode: {}", - static_cast(instr.ld_l.unknown.Value())); - - const auto scope = shader.Scope(); - - std::string op = '(' + regs.GetRegisterAsInteger(instr.gpr8, 0, false) + " + " + - std::to_string(instr.smem_imm.Value()) + ')'; - - shader.AddLine("uint index = (" + op + " / 4);"); - - const std::string op_a = regs.GetLocalMemoryAsFloat("index"); - - switch (instr.ldst_sl.type.Value()) { - case Tegra::Shader::StoreType::Bytes32: - regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1); - break; - default: - UNIMPLEMENTED_MSG("LD_L Unhandled type: {}", - static_cast(instr.ldst_sl.type.Value())); - } - break; - } - case OpCode::Id::ST_A: { - UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex, - "Indirect attribute loads are not supported"); - UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, - "Unaligned attribute loads are not supported"); - - u64 next_element = instr.attribute.fmt20.element; - u64 next_index = static_cast(instr.attribute.fmt20.index.Value()); - - const auto StoreNextElement = [&](u32 reg_offset) { - regs.SetOutputAttributeToRegister(static_cast(next_index), - next_element, instr.gpr0.Value() + reg_offset, - instr.gpr39.Value()); - - // Load the next attribute element into the following register. If the element - // to load goes beyond the vec4 size, load the first element of the next - // attribute. - next_element = (next_element + 1) % 4; - next_index = next_index + (next_element == 0 ? 1 : 0); - }; - - const u32 num_words = static_cast(instr.attribute.fmt20.size.Value()) + 1; - for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { - StoreNextElement(reg_offset); - } - - break; - } - case OpCode::Id::ST_L: { - UNIMPLEMENTED_IF_MSG(instr.st_l.unknown == 0, "ST_L Unhandled mode: {}", - static_cast(instr.st_l.unknown.Value())); - - const auto scope = shader.Scope(); - - std::string op = '(' + regs.GetRegisterAsInteger(instr.gpr8, 0, false) + " + " + - std::to_string(instr.smem_imm.Value()) + ')'; - - shader.AddLine("uint index = (" + op + " / 4);"); - - switch (instr.ldst_sl.type.Value()) { - case Tegra::Shader::StoreType::Bytes32: - regs.SetLocalMemoryAsFloat("index", regs.GetRegisterAsFloat(instr.gpr0)); - break; - default: - UNIMPLEMENTED_MSG("ST_L Unhandled type: {}", - static_cast(instr.ldst_sl.type.Value())); - } - break; - } - case OpCode::Id::TEX: { - Tegra::Shader::TextureType texture_type{instr.tex.texture_type}; - const bool is_array = instr.tex.array != 0; - const bool depth_compare = - instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); - const auto process_mode = instr.tex.GetTextureProcessMode(); - UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - - const auto [coord, texture] = - GetTEXCode(instr, texture_type, process_mode, depth_compare, is_array); - - const auto scope = shader.Scope(); - shader.AddLine(coord); - - if (depth_compare) { - regs.SetRegisterToFloat(instr.gpr0, 0, texture, 1, 1); - } else { - shader.AddLine("vec4 texture_tmp = " + texture + ';'); - std::size_t dest_elem{}; - for (std::size_t elem = 0; elem < 4; ++elem) { - if (!instr.tex.IsComponentEnabled(elem)) { - // Skip disabled components - continue; - } - regs.SetRegisterToFloat(instr.gpr0, elem, "texture_tmp", 1, 4, false, false, - dest_elem); - ++dest_elem; - } - } - break; - } - case OpCode::Id::TEXS: { - Tegra::Shader::TextureType texture_type{instr.texs.GetTextureType()}; - const bool is_array{instr.texs.IsArrayTexture()}; - const bool depth_compare = - instr.texs.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); - const auto process_mode = instr.texs.GetTextureProcessMode(); - - UNIMPLEMENTED_IF_MSG(instr.texs.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - - const auto scope = shader.Scope(); - - auto [coord, texture] = - GetTEXSCode(instr, texture_type, process_mode, depth_compare, is_array); - - shader.AddLine(coord); - - if (depth_compare) { - texture = "vec4(" + texture + ')'; - } - shader.AddLine("vec4 texture_tmp = " + texture + ';'); - - if (instr.texs.fp32_flag) { - WriteTexsInstructionFloat(instr, "texture_tmp"); - } else { - WriteTexsInstructionHalfFloat(instr, "texture_tmp"); - } - break; - } - case OpCode::Id::TLDS: { - const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()}; - const bool is_array{instr.tlds.IsArrayTexture()}; - - UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::MZ), - "MZ is not implemented"); - - const auto [coord, texture] = GetTLDSCode(instr, texture_type, is_array); - - const auto scope = shader.Scope(); - - shader.AddLine(coord); - shader.AddLine("vec4 texture_tmp = " + texture + ';'); - WriteTexsInstructionFloat(instr, "texture_tmp"); - break; - } - case OpCode::Id::TLD4: { - - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), - "NDV is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::PTP), - "PTP is not implemented"); - - auto texture_type = instr.tld4.texture_type.Value(); - const bool depth_compare = - instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); - const bool is_array = instr.tld4.array != 0; - - const auto [coord, texture] = - GetTLD4Code(instr, texture_type, depth_compare, is_array); - - const auto scope = shader.Scope(); - - shader.AddLine(coord); - std::size_t dest_elem{}; - - shader.AddLine("vec4 texture_tmp = " + texture + ';'); - for (std::size_t elem = 0; elem < 4; ++elem) { - if (!instr.tex.IsComponentEnabled(elem)) { - // Skip disabled components - continue; - } - regs.SetRegisterToFloat(instr.gpr0, elem, "texture_tmp", 1, 4, false, false, - dest_elem); - ++dest_elem; - } - break; - } - case OpCode::Id::TLD4S: { - UNIMPLEMENTED_IF_MSG( - instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - UNIMPLEMENTED_IF_MSG( - instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - - const auto scope = shader.Scope(); - - std::string coords; - - const bool depth_compare = - instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); - - const std::string sampler = GetSampler( - instr.sampler, Tegra::Shader::TextureType::Texture2D, false, depth_compare); - - const std::string op_a = regs.GetRegisterAsFloat(instr.gpr8); - coords = "vec2 coords = vec2(" + op_a + ", "; - std::string texture = "textureGather(" + sampler + ", coords, "; - - if (!depth_compare) { - const std::string op_b = regs.GetRegisterAsFloat(instr.gpr20); - coords += op_b + ");"; - texture += std::to_string(instr.tld4s.component) + ')'; - } else { - const std::string op_b = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - const std::string op_c = regs.GetRegisterAsFloat(instr.gpr20); - coords += op_b + ");"; - texture += op_c + ')'; - } - shader.AddLine(coords); - shader.AddLine("vec4 texture_tmp = " + texture + ';'); - WriteTexsInstructionFloat(instr, "texture_tmp"); - break; - } - case OpCode::Id::TXQ: { - UNIMPLEMENTED_IF_MSG(instr.txq.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - - const auto scope = shader.Scope(); - - // TODO: The new commits on the texture refactor, change the way samplers work. - // Sadly, not all texture instructions specify the type of texture their sampler - // uses. This must be fixed at a later instance. - const std::string sampler = - GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false); - switch (instr.txq.query_type) { - case Tegra::Shader::TextureQueryType::Dimension: { - const std::string texture = "textureSize(" + sampler + ", " + - regs.GetRegisterAsInteger(instr.gpr8) + ')'; - const std::string mip_level = "textureQueryLevels(" + sampler + ')'; - shader.AddLine("ivec2 sizes = " + texture + ';'); - - regs.SetRegisterToInteger(instr.gpr0.Value() + 0, true, 0, "sizes.x", 1, 1); - regs.SetRegisterToInteger(instr.gpr0.Value() + 1, true, 0, "sizes.y", 1, 1); - regs.SetRegisterToInteger(instr.gpr0.Value() + 2, true, 0, "0", 1, 1); - regs.SetRegisterToInteger(instr.gpr0.Value() + 3, true, 0, mip_level, 1, 1); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled texture query type: {}", - static_cast(instr.txq.query_type.Value())); - } - } - break; - } - case OpCode::Id::TMML: { - UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), - "NDV is not implemented"); - - const std::string x = regs.GetRegisterAsFloat(instr.gpr8); - const bool is_array = instr.tmml.array != 0; - auto texture_type = instr.tmml.texture_type.Value(); - const std::string sampler = - GetSampler(instr.sampler, texture_type, is_array, false); - - const auto scope = shader.Scope(); - - // TODO: Add coordinates for different samplers once other texture types are - // implemented. - switch (texture_type) { - case Tegra::Shader::TextureType::Texture1D: { - shader.AddLine("float coords = " + x + ';'); - break; - } - case Tegra::Shader::TextureType::Texture2D: { - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - shader.AddLine("vec2 coords = vec2(" + x + ", " + y + ");"); - break; - } - default: - UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast(texture_type)); - - // Fallback to interpreting as a 2D texture for now - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - shader.AddLine("vec2 coords = vec2(" + x + ", " + y + ");"); - texture_type = Tegra::Shader::TextureType::Texture2D; - } - - const std::string texture = "textureQueryLod(" + sampler + ", coords)"; - shader.AddLine("vec2 tmp = " + texture + " * vec2(256.0, 256.0);"); - - regs.SetRegisterToInteger(instr.gpr0, true, 0, "int(tmp.y)", 1, 1); - regs.SetRegisterToInteger(instr.gpr0.Value() + 1, false, 0, "uint(tmp.x)", 1, 1); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::FloatSetPredicate: { - const std::string op_a = - GetOperandAbsNeg(regs.GetRegisterAsFloat(instr.gpr8), instr.fsetp.abs_a != 0, - instr.fsetp.neg_a != 0); - - std::string op_b; - - if (instr.is_b_imm) { - op_b += '(' + GetImmediate19(instr) + ')'; - } else { - if (instr.is_b_gpr) { - op_b += regs.GetRegisterAsFloat(instr.gpr20); - } else { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - } - } - - if (instr.fsetp.abs_b) { - op_b = "abs(" + op_b + ')'; - } - - // We can't use the constant predicate as destination. - ASSERT(instr.fsetp.pred3 != static_cast(Pred::UnusedIndex)); - - const std::string second_pred = - GetPredicateCondition(instr.fsetp.pred39, instr.fsetp.neg_pred != 0); - - const std::string combiner = GetPredicateCombiner(instr.fsetp.op); - - const std::string predicate = GetPredicateComparison(instr.fsetp.cond, op_a, op_b); - // Set the primary predicate to the result of Predicate OP SecondPredicate - SetPredicate(instr.fsetp.pred3, - '(' + predicate + ") " + combiner + " (" + second_pred + ')'); - - if (instr.fsetp.pred0 != static_cast(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, - // if enabled - SetPredicate(instr.fsetp.pred0, - "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); - } - break; - } - case OpCode::Type::IntegerSetPredicate: { - const std::string op_a = - regs.GetRegisterAsInteger(instr.gpr8, 0, instr.isetp.is_signed); - std::string op_b; - - if (instr.is_b_imm) { - op_b += '(' + std::to_string(instr.alu.GetSignedImm20_20()) + ')'; - } else { - if (instr.is_b_gpr) { - op_b += regs.GetRegisterAsInteger(instr.gpr20, 0, instr.isetp.is_signed); - } else { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Integer); - } - } - - // We can't use the constant predicate as destination. - ASSERT(instr.isetp.pred3 != static_cast(Pred::UnusedIndex)); - - const std::string second_pred = - GetPredicateCondition(instr.isetp.pred39, instr.isetp.neg_pred != 0); - - const std::string combiner = GetPredicateCombiner(instr.isetp.op); - - const std::string predicate = GetPredicateComparison(instr.isetp.cond, op_a, op_b); - // Set the primary predicate to the result of Predicate OP SecondPredicate - SetPredicate(instr.isetp.pred3, - '(' + predicate + ") " + combiner + " (" + second_pred + ')'); - - if (instr.isetp.pred0 != static_cast(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, - // if enabled - SetPredicate(instr.isetp.pred0, - "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); - } - break; - } - case OpCode::Type::HalfSetPredicate: { - UNIMPLEMENTED_IF(instr.hsetp2.ftz != 0); - - const std::string op_a = - GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hsetp2.type_a, - instr.hsetp2.abs_a, instr.hsetp2.negate_a); - - const std::string op_b = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::HSETP2_R: - return GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false), - instr.hsetp2.type_b, instr.hsetp2.abs_a, - instr.hsetp2.negate_b); - default: - UNREACHABLE(); - return std::string("vec2(0)"); - } - }(); - - // We can't use the constant predicate as destination. - ASSERT(instr.hsetp2.pred3 != static_cast(Pred::UnusedIndex)); - - const std::string second_pred = - GetPredicateCondition(instr.hsetp2.pred39, instr.hsetp2.neg_pred != 0); - - const std::string combiner = GetPredicateCombiner(instr.hsetp2.op); - - const std::string component_combiner = instr.hsetp2.h_and ? "&&" : "||"; - const std::string predicate = - '(' + GetPredicateComparison(instr.hsetp2.cond, op_a + ".x", op_b + ".x") + ' ' + - component_combiner + ' ' + - GetPredicateComparison(instr.hsetp2.cond, op_a + ".y", op_b + ".y") + ')'; - - // Set the primary predicate to the result of Predicate OP SecondPredicate - SetPredicate(instr.hsetp2.pred3, - '(' + predicate + ") " + combiner + " (" + second_pred + ')'); - - if (instr.hsetp2.pred0 != static_cast(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, - // if enabled - SetPredicate(instr.hsetp2.pred0, - "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); - } - break; - } - case OpCode::Type::PredicateSetRegister: { - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in PSET is partially implemented"); - - const std::string op_a = - GetPredicateCondition(instr.pset.pred12, instr.pset.neg_pred12 != 0); - const std::string op_b = - GetPredicateCondition(instr.pset.pred29, instr.pset.neg_pred29 != 0); - - const std::string second_pred = - GetPredicateCondition(instr.pset.pred39, instr.pset.neg_pred39 != 0); - - const std::string combiner = GetPredicateCombiner(instr.pset.op); - - const std::string predicate = - '(' + op_a + ") " + GetPredicateCombiner(instr.pset.cond) + " (" + op_b + ')'; - const std::string result = '(' + predicate + ") " + combiner + " (" + second_pred + ')'; - if (instr.pset.bf == 0) { - const std::string value = '(' + result + ") ? 0xFFFFFFFF : 0"; - regs.SetRegisterToInteger(instr.gpr0, false, 0, value, 1, 1, false, - instr.generates_cc); - } else { - const std::string value = '(' + result + ") ? 1.0 : 0.0"; - regs.SetRegisterToFloat(instr.gpr0, 0, value, 1, 1, false, instr.generates_cc); - } - break; - } - case OpCode::Type::PredicateSetPredicate: { - switch (opcode->get().GetId()) { - case OpCode::Id::PSETP: { - const std::string op_a = - GetPredicateCondition(instr.psetp.pred12, instr.psetp.neg_pred12 != 0); - const std::string op_b = - GetPredicateCondition(instr.psetp.pred29, instr.psetp.neg_pred29 != 0); - - // We can't use the constant predicate as destination. - ASSERT(instr.psetp.pred3 != static_cast(Pred::UnusedIndex)); - - const std::string second_pred = - GetPredicateCondition(instr.psetp.pred39, instr.psetp.neg_pred39 != 0); - - const std::string combiner = GetPredicateCombiner(instr.psetp.op); - - const std::string predicate = - '(' + op_a + ") " + GetPredicateCombiner(instr.psetp.cond) + " (" + op_b + ')'; - - // Set the primary predicate to the result of Predicate OP SecondPredicate - SetPredicate(instr.psetp.pred3, - '(' + predicate + ") " + combiner + " (" + second_pred + ')'); - - if (instr.psetp.pred0 != static_cast(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, - // if enabled - SetPredicate(instr.psetp.pred0, - "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); - } - break; - } - case OpCode::Id::CSETP: { - const std::string pred = - GetPredicateCondition(instr.csetp.pred39, instr.csetp.neg_pred39 != 0); - const std::string combiner = GetPredicateCombiner(instr.csetp.op); - const std::string condition_code = regs.GetConditionCode(instr.csetp.cc); - if (instr.csetp.pred3 != static_cast(Pred::UnusedIndex)) { - SetPredicate(instr.csetp.pred3, - '(' + condition_code + ") " + combiner + " (" + pred + ')'); - } - if (instr.csetp.pred0 != static_cast(Pred::UnusedIndex)) { - SetPredicate(instr.csetp.pred0, - "!(" + condition_code + ") " + combiner + " (" + pred + ')'); - } - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled predicate instruction: {}", opcode->get().GetName()); - } - } - break; - } - case OpCode::Type::RegisterSetPredicate: { - UNIMPLEMENTED_IF(instr.r2p.mode != Tegra::Shader::R2pMode::Pr); - - const std::string apply_mask = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::R2P_IMM: - return std::to_string(instr.r2p.immediate_mask); - default: - UNREACHABLE(); - return std::to_string(instr.r2p.immediate_mask); - } - }(); - const std::string mask = '(' + regs.GetRegisterAsInteger(instr.gpr8, 0, false) + - " >> " + std::to_string(instr.r2p.byte) + ')'; - - constexpr u64 programmable_preds = 7; - for (u64 pred = 0; pred < programmable_preds; ++pred) { - const auto shift = std::to_string(1 << pred); - - shader.AddLine("if ((" + apply_mask + " & " + shift + ") != 0) {"); - ++shader.scope; - - SetPredicate(pred, '(' + mask + " & " + shift + ") != 0"); - - --shader.scope; - shader.AddLine('}'); - } - break; - } - case OpCode::Type::FloatSet: { - const std::string op_a = GetOperandAbsNeg(regs.GetRegisterAsFloat(instr.gpr8), - instr.fset.abs_a != 0, instr.fset.neg_a != 0); - - std::string op_b; - - if (instr.is_b_imm) { - const std::string imm = GetImmediate19(instr); - op_b = imm; - } else { - if (instr.is_b_gpr) { - op_b = regs.GetRegisterAsFloat(instr.gpr20); - } else { - op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); - } - } - - op_b = GetOperandAbsNeg(op_b, instr.fset.abs_b != 0, instr.fset.neg_b != 0); - - // The fset instruction sets a register to 1.0 or -1 (depending on the bf bit) if the - // condition is true, and to 0 otherwise. - const std::string second_pred = - GetPredicateCondition(instr.fset.pred39, instr.fset.neg_pred != 0); - - const std::string combiner = GetPredicateCombiner(instr.fset.op); - - const std::string predicate = "((" + - GetPredicateComparison(instr.fset.cond, op_a, op_b) + - ") " + combiner + " (" + second_pred + "))"; - - if (instr.fset.bf) { - regs.SetRegisterToFloat(instr.gpr0, 0, predicate + " ? 1.0 : 0.0", 1, 1, false, - instr.generates_cc); - } else { - regs.SetRegisterToInteger(instr.gpr0, false, 0, predicate + " ? 0xFFFFFFFF : 0", 1, - 1, false, instr.generates_cc); - } - break; - } - case OpCode::Type::IntegerSet: { - const std::string op_a = regs.GetRegisterAsInteger(instr.gpr8, 0, instr.iset.is_signed); - - std::string op_b; - - if (instr.is_b_imm) { - op_b = std::to_string(instr.alu.GetSignedImm20_20()); - } else { - if (instr.is_b_gpr) { - op_b = regs.GetRegisterAsInteger(instr.gpr20, 0, instr.iset.is_signed); - } else { - op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Integer); - } - } - - // The iset instruction sets a register to 1.0 or -1 (depending on the bf bit) if the - // condition is true, and to 0 otherwise. - const std::string second_pred = - GetPredicateCondition(instr.iset.pred39, instr.iset.neg_pred != 0); - - const std::string combiner = GetPredicateCombiner(instr.iset.op); - - const std::string predicate = "((" + - GetPredicateComparison(instr.iset.cond, op_a, op_b) + - ") " + combiner + " (" + second_pred + "))"; - - if (instr.iset.bf) { - regs.SetRegisterToFloat(instr.gpr0, 0, predicate + " ? 1.0 : 0.0", 1, 1); - } else { - regs.SetRegisterToInteger(instr.gpr0, false, 0, predicate + " ? 0xFFFFFFFF : 0", 1, - 1); - } - break; - } - case OpCode::Type::HalfSet: { - UNIMPLEMENTED_IF(instr.hset2.ftz != 0); - - const std::string op_a = - GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hset2.type_a, - instr.hset2.abs_a != 0, instr.hset2.negate_a != 0); - - const std::string op_b = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::HSET2_R: - return GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false), - instr.hset2.type_b, instr.hset2.abs_b != 0, - instr.hset2.negate_b != 0); - default: - UNREACHABLE(); - return std::string("vec2(0)"); - } - }(); - - const std::string second_pred = - GetPredicateCondition(instr.hset2.pred39, instr.hset2.neg_pred != 0); - - const std::string combiner = GetPredicateCombiner(instr.hset2.op); - - // HSET2 operates on each half float in the pack. - std::string result; - for (int i = 0; i < 2; ++i) { - const std::string float_value = i == 0 ? "0x00003c00" : "0x3c000000"; - const std::string integer_value = i == 0 ? "0x0000ffff" : "0xffff0000"; - const std::string value = instr.hset2.bf == 1 ? float_value : integer_value; - - const std::string comp = std::string(".") + "xy"[i]; - const std::string predicate = - "((" + GetPredicateComparison(instr.hset2.cond, op_a + comp, op_b + comp) + - ") " + combiner + " (" + second_pred + "))"; - - result += '(' + predicate + " ? " + value + " : 0)"; - if (i == 0) { - result += " | "; - } - } - regs.SetRegisterToInteger(instr.gpr0, false, 0, '(' + result + ')', 1, 1); - break; - } - case OpCode::Type::Xmad: { - UNIMPLEMENTED_IF(instr.xmad.sign_a); - UNIMPLEMENTED_IF(instr.xmad.sign_b); - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in XMAD is partially implemented"); - - std::string op_a{regs.GetRegisterAsInteger(instr.gpr8, 0, instr.xmad.sign_a)}; - std::string op_b; - std::string op_c; - - // TODO(bunnei): Needs to be fixed once op_a or op_b is signed - UNIMPLEMENTED_IF(instr.xmad.sign_a != instr.xmad.sign_b); - const bool is_signed{instr.xmad.sign_a == 1}; - - bool is_merge{}; - switch (opcode->get().GetId()) { - case OpCode::Id::XMAD_CR: { - is_merge = instr.xmad.merge_56; - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - instr.xmad.sign_b ? GLSLRegister::Type::Integer - : GLSLRegister::Type::UnsignedInteger); - op_c += regs.GetRegisterAsInteger(instr.gpr39, 0, is_signed); - break; - } - case OpCode::Id::XMAD_RR: { - is_merge = instr.xmad.merge_37; - op_b += regs.GetRegisterAsInteger(instr.gpr20, 0, instr.xmad.sign_b); - op_c += regs.GetRegisterAsInteger(instr.gpr39, 0, is_signed); - break; - } - case OpCode::Id::XMAD_RC: { - op_b += regs.GetRegisterAsInteger(instr.gpr39, 0, instr.xmad.sign_b); - op_c += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - is_signed ? GLSLRegister::Type::Integer - : GLSLRegister::Type::UnsignedInteger); - break; - } - case OpCode::Id::XMAD_IMM: { - is_merge = instr.xmad.merge_37; - op_b += std::to_string(instr.xmad.imm20_16); - op_c += regs.GetRegisterAsInteger(instr.gpr39, 0, is_signed); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled XMAD instruction: {}", opcode->get().GetName()); - } - } - - // TODO(bunnei): Ensure this is right with signed operands - if (instr.xmad.high_a) { - op_a = "((" + op_a + ") >> 16)"; - } else { - op_a = "((" + op_a + ") & 0xFFFF)"; - } - - std::string src2 = '(' + op_b + ')'; // Preserve original source 2 - if (instr.xmad.high_b) { - op_b = '(' + src2 + " >> 16)"; - } else { - op_b = '(' + src2 + " & 0xFFFF)"; - } - - std::string product = '(' + op_a + " * " + op_b + ')'; - if (instr.xmad.product_shift_left) { - product = '(' + product + " << 16)"; - } - - switch (instr.xmad.mode) { - case Tegra::Shader::XmadMode::None: - break; - case Tegra::Shader::XmadMode::CLo: - op_c = "((" + op_c + ") & 0xFFFF)"; - break; - case Tegra::Shader::XmadMode::CHi: - op_c = "((" + op_c + ") >> 16)"; - break; - case Tegra::Shader::XmadMode::CBcc: - op_c = "((" + op_c + ") + (" + src2 + "<< 16))"; - break; - default: { - UNIMPLEMENTED_MSG("Unhandled XMAD mode: {}", - static_cast(instr.xmad.mode.Value())); - } - } - - std::string sum{'(' + product + " + " + op_c + ')'}; - if (is_merge) { - sum = "((" + sum + " & 0xFFFF) | (" + src2 + "<< 16))"; - } - - regs.SetRegisterToInteger(instr.gpr0, is_signed, 0, sum, 1, 1, false, - instr.generates_cc); - break; - } - default: { - switch (opcode->get().GetId()) { - case OpCode::Id::EXIT: { - const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; - UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, - "EXIT condition code used: {}", static_cast(cc)); - - if (stage == Maxwell3D::Regs::ShaderStage::Fragment) { - EmitFragmentOutputsWrite(); - } - - switch (instr.flow.cond) { - case Tegra::Shader::FlowCondition::Always: - shader.AddLine("return true;"); - if (instr.pred.pred_index == static_cast(Pred::UnusedIndex)) { - // If this is an unconditional exit then just end processing here, - // otherwise we have to account for the possibility of the condition - // not being met, so continue processing the next instruction. - offset = PROGRAM_END - 1; - } - break; - - case Tegra::Shader::FlowCondition::Fcsm_Tr: - // TODO(bunnei): What is this used for? If we assume this conditon is not - // satisifed, dual vertex shaders in Farming Simulator make more sense - UNIMPLEMENTED_MSG("Skipping unknown FlowCondition::Fcsm_Tr"); - break; - - default: - UNIMPLEMENTED_MSG("Unhandled flow condition: {}", - static_cast(instr.flow.cond.Value())); - } - break; - } - case OpCode::Id::KIL: { - UNIMPLEMENTED_IF(instr.flow.cond != Tegra::Shader::FlowCondition::Always); - - const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; - UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, - "KIL condition code used: {}", static_cast(cc)); - - // Enclose "discard" in a conditional, so that GLSL compilation does not complain - // about unexecuted instructions that may follow this. - shader.AddLine("if (true) {"); - ++shader.scope; - shader.AddLine("discard;"); - --shader.scope; - shader.AddLine("}"); - - break; - } - case OpCode::Id::OUT_R: { - UNIMPLEMENTED_IF_MSG(instr.gpr20.Value() != Register::ZeroIndex, - "Stream buffer is not supported"); - ASSERT_MSG(stage == Maxwell3D::Regs::ShaderStage::Geometry, - "OUT is expected to be used in a geometry shader."); - - if (instr.out.emit) { - // gpr0 is used to store the next address. Hardware returns a pointer but - // we just return the next index with a cyclic cap. - const std::string current{regs.GetRegisterAsInteger(instr.gpr8, 0, false)}; - const std::string next = "((" + current + " + 1" + ") % " + - std::to_string(MAX_GEOMETRY_BUFFERS) + ')'; - shader.AddLine("emit_vertex(" + current + ");"); - regs.SetRegisterToInteger(instr.gpr0, false, 0, next, 1, 1); - } - if (instr.out.cut) { - shader.AddLine("EndPrimitive();"); - } - - break; - } - case OpCode::Id::MOV_SYS: { - switch (instr.sys20) { - case Tegra::Shader::SystemVariable::InvocationInfo: { - LOG_WARNING(HW_GPU, "MOV_SYS instruction with InvocationInfo is incomplete"); - regs.SetRegisterToInteger(instr.gpr0, false, 0, "0u", 1, 1); - break; - } - case Tegra::Shader::SystemVariable::Ydirection: { - // Config pack's third value is Y_NEGATE's state. - regs.SetRegisterToFloat(instr.gpr0, 0, "uintBitsToFloat(config_pack[2])", 1, 1); - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled system move: {}", - static_cast(instr.sys20.Value())); - } - } - break; - } - case OpCode::Id::ISBERD: { - UNIMPLEMENTED_IF(instr.isberd.o != 0); - UNIMPLEMENTED_IF(instr.isberd.skew != 0); - UNIMPLEMENTED_IF(instr.isberd.shift != Tegra::Shader::IsberdShift::None); - UNIMPLEMENTED_IF(instr.isberd.mode != Tegra::Shader::IsberdMode::None); - ASSERT_MSG(stage == Maxwell3D::Regs::ShaderStage::Geometry, - "ISBERD is expected to be used in a geometry shader."); - LOG_WARNING(HW_GPU, "ISBERD instruction is incomplete"); - regs.SetRegisterToFloat(instr.gpr0, 0, regs.GetRegisterAsFloat(instr.gpr8), 1, 1); - break; - } - case OpCode::Id::BRA: { - UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, - "BRA with constant buffers are not implemented"); - - const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; - const u32 target = offset + instr.bra.GetBranchTarget(); - if (cc != Tegra::Shader::ConditionCode::T) { - const std::string condition_code = regs.GetConditionCode(cc); - shader.AddLine("if (" + condition_code + "){"); - shader.scope++; - shader.AddLine("{ jmp_to = " + std::to_string(target) + "u; break; }"); - shader.scope--; - shader.AddLine('}'); - } else { - shader.AddLine("{ jmp_to = " + std::to_string(target) + "u; break; }"); - } - break; - } - case OpCode::Id::IPA: { - const auto& attribute = instr.attribute.fmt28; - const auto& reg = instr.gpr0; - - Tegra::Shader::IpaMode input_mode{instr.ipa.interp_mode.Value(), - instr.ipa.sample_mode.Value()}; - regs.SetRegisterToInputAttibute(reg, attribute.element, attribute.index, - input_mode); - - if (instr.ipa.saturate) { - regs.SetRegisterToFloat(reg, 0, regs.GetRegisterAsFloat(reg), 1, 1, true); - } - break; - } - case OpCode::Id::SSY: { - // The SSY opcode tells the GPU where to re-converge divergent execution paths, it - // sets the target of the jump that the SYNC instruction will make. The SSY opcode - // has a similar structure to the BRA opcode. - UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, - "Constant buffer flow is not supported"); - - const u32 target = offset + instr.bra.GetBranchTarget(); - EmitPushToFlowStack(target); - break; - } - case OpCode::Id::PBK: { - // PBK pushes to a stack the address where BRK will jump to. This shares stack with - // SSY but using SYNC on a PBK address will kill the shader execution. We don't - // emulate this because it's very unlikely a driver will emit such invalid shader. - UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, - "Constant buffer PBK is not supported"); - - const u32 target = offset + instr.bra.GetBranchTarget(); - EmitPushToFlowStack(target); - break; - } - case OpCode::Id::SYNC: { - const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; - UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, - "SYNC condition code used: {}", static_cast(cc)); - - // The SYNC opcode jumps to the address previously set by the SSY opcode - EmitPopFromFlowStack(); - break; - } - case OpCode::Id::BRK: { - // The BRK opcode jumps to the address previously set by the PBK opcode - const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; - UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, - "BRK condition code used: {}", static_cast(cc)); - - EmitPopFromFlowStack(); - break; - } - case OpCode::Id::DEPBAR: { - // TODO(Subv): Find out if we actually have to care about this instruction or if - // the GLSL compiler takes care of that for us. - LOG_WARNING(HW_GPU, "DEPBAR instruction is stubbed"); - break; - } - case OpCode::Id::VMAD: { - UNIMPLEMENTED_IF_MSG(instr.generates_cc, - "Condition codes generation in VMAD is not implemented"); - - const bool result_signed = instr.video.signed_a == 1 || instr.video.signed_b == 1; - const std::string op_a = GetVideoOperandA(instr); - const std::string op_b = GetVideoOperandB(instr); - const std::string op_c = regs.GetRegisterAsInteger(instr.gpr39, 0, result_signed); - - std::string result = '(' + op_a + " * " + op_b + " + " + op_c + ')'; - - switch (instr.vmad.shr) { - case Tegra::Shader::VmadShr::Shr7: - result = '(' + result + " >> 7)"; - break; - case Tegra::Shader::VmadShr::Shr15: - result = '(' + result + " >> 15)"; - break; - } - - regs.SetRegisterToInteger(instr.gpr0, result_signed, 1, result, 1, 1, - instr.vmad.saturate, instr.vmad.cc); - break; - } - case OpCode::Id::VSETP: { - const std::string op_a = GetVideoOperandA(instr); - const std::string op_b = GetVideoOperandB(instr); - - // We can't use the constant predicate as destination. - ASSERT(instr.vsetp.pred3 != static_cast(Pred::UnusedIndex)); - - const std::string second_pred = GetPredicateCondition(instr.vsetp.pred39, false); - - const std::string combiner = GetPredicateCombiner(instr.vsetp.op); - - const std::string predicate = GetPredicateComparison(instr.vsetp.cond, op_a, op_b); - // Set the primary predicate to the result of Predicate OP SecondPredicate - SetPredicate(instr.vsetp.pred3, - '(' + predicate + ") " + combiner + " (" + second_pred + ')'); - - if (instr.vsetp.pred0 != static_cast(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, - // if enabled - SetPredicate(instr.vsetp.pred0, - "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); - } - break; - } - default: { - UNIMPLEMENTED_MSG("Unhandled instruction: {}", opcode->get().GetName()); - break; - } - } - - break; - } - } - - // Close the predicate condition scope. - if (can_be_predicated && instr.pred.pred_index != static_cast(Pred::UnusedIndex)) { - --shader.scope; - shader.AddLine('}'); - } - - return offset + 1; + std::string GetSampler(const Sampler& sampler) const { + return GetDeclarationWithSuffix(static_cast(sampler.GetIndex()), "sampler"); } - /** - * Compiles a range of instructions from Tegra to GLSL. - * @param begin the offset of the starting instruction. - * @param end the offset where the compilation should stop (exclusive). - * @return the offset of the next instruction to compile. PROGRAM_END if the program - * terminates. - */ - u32 CompileRange(u32 begin, u32 end) { - u32 program_counter; - for (program_counter = begin; program_counter < (begin > end ? PROGRAM_END : end);) { - program_counter = CompileInstr(program_counter); - } - return program_counter; + std::string GetDeclarationWithSuffix(u32 index, const std::string& name) const { + return name + '_' + std::to_string(index) + '_' + suffix; } - void Generate(const std::string& suffix) { - // Add declarations for all subroutines - for (const auto& subroutine : subroutines) { - shader.AddLine("bool " + subroutine.GetName() + "();"); - } - shader.AddNewLine(); + const ShaderIR& ir; + const ShaderStage stage; + const std::string suffix; + const Header header; - // Add the main entry point - shader.AddLine("bool exec_" + suffix + "() {"); - ++shader.scope; - CallSubroutine(GetSubroutine(main_offset, PROGRAM_END)); - --shader.scope; - shader.AddLine("}\n"); - - // Add definitions for all subroutines - for (const auto& subroutine : subroutines) { - std::set labels = subroutine.labels; - - shader.AddLine("bool " + subroutine.GetName() + "() {"); - ++shader.scope; - - if (labels.empty()) { - if (CompileRange(subroutine.begin, subroutine.end) != PROGRAM_END) { - shader.AddLine("return false;"); - } - } else { - labels.insert(subroutine.begin); - shader.AddLine("uint jmp_to = " + std::to_string(subroutine.begin) + "u;"); - - // TODO(Subv): Figure out the actual depth of the flow stack, for now it seems - // unlikely that shaders will use 20 nested SSYs and PBKs. - constexpr u32 FLOW_STACK_SIZE = 20; - shader.AddLine("uint flow_stack[" + std::to_string(FLOW_STACK_SIZE) + "];"); - shader.AddLine("uint flow_stack_top = 0u;"); - - shader.AddLine("while (true) {"); - ++shader.scope; - - shader.AddLine("switch (jmp_to) {"); - - for (auto label : labels) { - shader.AddLine("case " + std::to_string(label) + "u: {"); - ++shader.scope; - - const auto next_it = labels.lower_bound(label + 1); - const u32 next_label = next_it == labels.end() ? subroutine.end : *next_it; - - const u32 compile_end = CompileRange(label, next_label); - if (compile_end > next_label && compile_end != PROGRAM_END) { - // This happens only when there is a label inside a IF/LOOP block - shader.AddLine(" jmp_to = " + std::to_string(compile_end) + "u; break; }"); - labels.emplace(compile_end); - } - - --shader.scope; - shader.AddLine('}'); - } - - shader.AddLine("default: return false;"); - shader.AddLine('}'); - - --shader.scope; - shader.AddLine('}'); - - shader.AddLine("return false;"); - } - - --shader.scope; - shader.AddLine("}\n"); - - DEBUG_ASSERT(shader.scope == 0); - } - - GenerateDeclarations(); - } - - /// Add declarations for registers - void GenerateDeclarations() { - regs.GenerateDeclarations(suffix); - - for (const auto& pred : declr_predicates) { - declarations.AddLine("bool " + pred + " = false;"); - } - declarations.AddNewLine(); - } - -private: - const std::set& subroutines; - const ProgramCode& program_code; - Tegra::Shader::Header header; - const u32 main_offset; - Maxwell3D::Regs::ShaderStage stage; - const std::string& suffix; - u64 local_memory_size; - std::size_t shader_length; - - ShaderWriter shader; - ShaderWriter declarations; - GLSLRegisterManager regs{shader, declarations, stage, suffix, header}; - - // Declarations - std::set declr_predicates; -}; // namespace OpenGL::GLShader::Decompiler + ShaderWriter code; +}; std::string GetCommonDeclarations() { - return fmt::format("#define MAX_CONSTBUFFER_ELEMENTS {}\n", - RasterizerOpenGL::MaxConstbufferSize / sizeof(GLvec4)); + return "#define MAX_CONSTBUFFER_ELEMENTS " + std::to_string(MAX_CONSTBUFFER_ELEMENTS) + + "\n" + "#define ftoi floatBitsToInt\n" + "#define ftou floatBitsToUint\n" + "#define itof intBitsToFloat\n" + "#define utof uintBitsToFloat\n\n" + "float fromHalf2(vec2 pair) {\n" + " return utof(packHalf2x16(pair));\n" + "}\n\n" + "vec2 toHalf2(float value) {\n" + " return unpackHalf2x16(ftou(value));\n" + "}\n"; } -std::optional DecompileProgram(const ProgramCode& program_code, u32 main_offset, - Maxwell3D::Regs::ShaderStage stage, - const std::string& suffix) { - try { - ControlFlowAnalyzer analyzer(program_code, main_offset, suffix); - const auto subroutines = analyzer.GetSubroutines(); - GLSLGenerator generator(subroutines, program_code, main_offset, stage, suffix, - analyzer.GetShaderLength()); - return ProgramResult{generator.GetShaderCode(), generator.GetEntries()}; - } catch (const DecompileFail& exception) { - LOG_ERROR(HW_GPU, "Shader decompilation failed: {}", exception.what()); - } - return {}; +ProgramResult Decompile(const ShaderIR& ir, Maxwell::ShaderStage stage, const std::string& suffix) { + GLSLDecompiler decompiler(ir, stage, suffix); + decompiler.Decompile(); + return {decompiler.GetResult(), decompiler.GetShaderEntries()}; } -} // namespace OpenGL::GLShader::Decompiler +} // namespace OpenGL::GLShader \ No newline at end of file diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h index d01a4a7ee..396a560d8 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.h +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h @@ -5,21 +5,84 @@ #pragma once #include -#include -#include #include +#include +#include #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" -#include "video_core/renderer_opengl/gl_shader_gen.h" +#include "video_core/shader/shader_ir.h" -namespace OpenGL::GLShader::Decompiler { +namespace VideoCommon::Shader { +class ShaderIR; +} -using Tegra::Engines::Maxwell3D; +namespace OpenGL::GLShader { + +using Maxwell = Tegra::Engines::Maxwell3D::Regs; + +class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer { +public: + explicit ConstBufferEntry(const VideoCommon::Shader::ConstBuffer& entry, + Maxwell::ShaderStage stage, const std::string& name, u32 index) + : VideoCommon::Shader::ConstBuffer{entry}, stage{stage}, name{name}, index{index} {} + + const std::string& GetName() const { + return name; + } + + Maxwell::ShaderStage GetStage() const { + return stage; + } + + u32 GetIndex() const { + return index; + } + + u32 GetHash() const { + return (static_cast(stage) << 16) | index; + } + +private: + std::string name; + Maxwell::ShaderStage stage{}; + u32 index{}; +}; + +class SamplerEntry : public VideoCommon::Shader::Sampler { +public: + explicit SamplerEntry(const VideoCommon::Shader::Sampler& entry, Maxwell::ShaderStage stage, + const std::string& name) + : VideoCommon::Shader::Sampler{entry}, stage{stage}, name{name} {} + + const std::string& GetName() const { + return name; + } + + Maxwell::ShaderStage GetStage() const { + return stage; + } + + u32 GetHash() const { + return (static_cast(stage) << 16) | static_cast(GetIndex()); + } + +private: + std::string name; + Maxwell::ShaderStage stage{}; +}; + +struct ShaderEntries { + std::vector const_buffers; + std::vector samplers; + std::array clip_distances{}; + std::size_t shader_length{}; +}; + +using ProgramResult = std::pair; std::string GetCommonDeclarations(); -std::optional DecompileProgram(const ProgramCode& program_code, u32 main_offset, - Maxwell3D::Regs::ShaderStage stage, - const std::string& suffix); +ProgramResult Decompile(const VideoCommon::Shader::ShaderIR& ir, Maxwell::ShaderStage stage, + const std::string& suffix); -} // namespace OpenGL::GLShader::Decompiler +} // namespace OpenGL::GLShader \ No newline at end of file diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp index 5d0819dc5..446d1a93f 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.cpp +++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp @@ -7,22 +7,25 @@ #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_opengl/gl_shader_decompiler.h" #include "video_core/renderer_opengl/gl_shader_gen.h" +#include "video_core/shader/shader_ir.h" namespace OpenGL::GLShader { using Tegra::Engines::Maxwell3D; +using VideoCommon::Shader::ProgramCode; +using VideoCommon::Shader::ShaderIR; static constexpr u32 PROGRAM_OFFSET{10}; ProgramResult GenerateVertexShader(const ShaderSetup& setup) { + const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); + std::string out = "#version 430 core\n"; out += "#extension GL_ARB_separate_shader_objects : enable\n\n"; - const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); out += "// Shader Unique Id: VS" + id + "\n\n"; - out += Decompiler::GetCommonDeclarations(); + out += GetCommonDeclarations(); out += R"( - layout (location = 0) out vec4 position; layout(std140) uniform vs_config { @@ -30,40 +33,32 @@ layout(std140) uniform vs_config { uvec4 config_pack; // instance_id, flip_stage, y_direction, padding uvec4 alpha_test; }; + )"; - - if (setup.IsDualProgram()) { - out += "bool exec_vertex_b();\n"; - } - - ProgramResult program = - Decompiler::DecompileProgram(setup.program.code, PROGRAM_OFFSET, - Maxwell3D::Regs::ShaderStage::Vertex, "vertex") - .value_or(ProgramResult()); + ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET); + ProgramResult program = Decompile(program_ir, Maxwell3D::Regs::ShaderStage::Vertex, "vertex"); out += program.first; if (setup.IsDualProgram()) { + ShaderIR program_ir_b(setup.program.code_b, PROGRAM_OFFSET); ProgramResult program_b = - Decompiler::DecompileProgram(setup.program.code_b, PROGRAM_OFFSET, - Maxwell3D::Regs::ShaderStage::Vertex, "vertex_b") - .value_or(ProgramResult()); + Decompile(program_ir_b, Maxwell3D::Regs::ShaderStage::Vertex, "vertex_b"); + out += program_b.first; } out += R"( - void main() { position = vec4(0.0, 0.0, 0.0, 0.0); - exec_vertex(); + execute_vertex(); )"; if (setup.IsDualProgram()) { - out += " exec_vertex_b();"; + out += " execute_vertex_b();"; } out += R"( - // Check if the flip stage is VertexB // Config pack's second value is flip_stage if (config_pack[1] == 1) { @@ -77,30 +72,20 @@ void main() { if (config_pack[1] == 1) { position.w = 1.0; } -} - -)"; +})"; return {out, program.second}; } ProgramResult GenerateGeometryShader(const ShaderSetup& setup) { // Version is intentionally skipped in shader generation, it's added by the lazy compilation. - std::string out = "#extension GL_ARB_separate_shader_objects : enable\n\n"; const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); + + std::string out = "#extension GL_ARB_separate_shader_objects : enable\n\n"; out += "// Shader Unique Id: GS" + id + "\n\n"; - out += Decompiler::GetCommonDeclarations(); - out += "bool exec_geometry();\n"; + out += GetCommonDeclarations(); - ProgramResult program = - Decompiler::DecompileProgram(setup.program.code, PROGRAM_OFFSET, - Maxwell3D::Regs::ShaderStage::Geometry, "geometry") - .value_or(ProgramResult()); out += R"( -out gl_PerVertex { - vec4 gl_Position; -}; - layout (location = 0) in vec4 gs_position[]; layout (location = 0) out vec4 position; @@ -110,36 +95,37 @@ layout (std140) uniform gs_config { uvec4 alpha_test; }; -void main() { - exec_geometry(); -} - )"; + ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET); + ProgramResult program = + Decompile(program_ir, Maxwell3D::Regs::ShaderStage::Geometry, "geometry"); out += program.first; + + out += R"( +void main() { + execute_geometry(); +};)"; + return {out, program.second}; } ProgramResult GenerateFragmentShader(const ShaderSetup& setup) { + const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); + std::string out = "#version 430 core\n"; out += "#extension GL_ARB_separate_shader_objects : enable\n\n"; - const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); out += "// Shader Unique Id: FS" + id + "\n\n"; - out += Decompiler::GetCommonDeclarations(); - out += "bool exec_fragment();\n"; + out += GetCommonDeclarations(); - ProgramResult program = - Decompiler::DecompileProgram(setup.program.code, PROGRAM_OFFSET, - Maxwell3D::Regs::ShaderStage::Fragment, "fragment") - .value_or(ProgramResult()); out += R"( -layout(location = 0) out vec4 FragColor0; -layout(location = 1) out vec4 FragColor1; -layout(location = 2) out vec4 FragColor2; -layout(location = 3) out vec4 FragColor3; -layout(location = 4) out vec4 FragColor4; -layout(location = 5) out vec4 FragColor5; -layout(location = 6) out vec4 FragColor6; -layout(location = 7) out vec4 FragColor7; +layout (location = 0) out vec4 FragColor0; +layout (location = 1) out vec4 FragColor1; +layout (location = 2) out vec4 FragColor2; +layout (location = 3) out vec4 FragColor3; +layout (location = 4) out vec4 FragColor4; +layout (location = 5) out vec4 FragColor5; +layout (location = 6) out vec4 FragColor6; +layout (location = 7) out vec4 FragColor7; layout (location = 0) in vec4 position; @@ -173,12 +159,20 @@ bool AlphaFunc(in float value) { } } +)"; + ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET); + ProgramResult program = + Decompile(program_ir, Maxwell3D::Regs::ShaderStage::Fragment, "fragment"); + + out += program.first; + + out += R"( void main() { - exec_fragment(); + execute_fragment(); } )"; - out += program.first; return {out, program.second}; } -} // namespace OpenGL::GLShader + +} // namespace OpenGL::GLShader \ No newline at end of file diff --git a/src/video_core/renderer_opengl/gl_shader_gen.h b/src/video_core/renderer_opengl/gl_shader_gen.h index fcc20d3b4..ac5e6917b 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.h +++ b/src/video_core/renderer_opengl/gl_shader_gen.h @@ -10,164 +10,12 @@ #include "common/common_types.h" #include "video_core/engines/shader_bytecode.h" +#include "video_core/renderer_opengl/gl_shader_decompiler.h" +#include "video_core/shader/shader_ir.h" namespace OpenGL::GLShader { -constexpr std::size_t MAX_PROGRAM_CODE_LENGTH{0x1000}; -using ProgramCode = std::vector; - -enum : u32 { POSITION_VARYING_LOCATION = 0, GENERIC_VARYING_START_LOCATION = 1 }; - -class ConstBufferEntry { - using Maxwell = Tegra::Engines::Maxwell3D::Regs; - -public: - void MarkAsUsed(u64 index, u64 offset, Maxwell::ShaderStage stage) { - is_used = true; - this->index = static_cast(index); - this->stage = stage; - max_offset = std::max(max_offset, static_cast(offset)); - } - - void MarkAsUsedIndirect(u64 index, Maxwell::ShaderStage stage) { - is_used = true; - is_indirect = true; - this->index = static_cast(index); - this->stage = stage; - } - - bool IsUsed() const { - return is_used; - } - - bool IsIndirect() const { - return is_indirect; - } - - unsigned GetIndex() const { - return index; - } - - unsigned GetSize() const { - return max_offset + 1; - } - - std::string GetName() const { - return BufferBaseNames[static_cast(stage)] + std::to_string(index); - } - - u32 GetHash() const { - return (static_cast(stage) << 16) | index; - } - -private: - static constexpr std::array BufferBaseNames = { - "buffer_vs_c", "buffer_tessc_c", "buffer_tesse_c", "buffer_gs_c", "buffer_fs_c", - }; - - bool is_used{}; - bool is_indirect{}; - unsigned index{}; - unsigned max_offset{}; - Maxwell::ShaderStage stage; -}; - -class SamplerEntry { - using Maxwell = Tegra::Engines::Maxwell3D::Regs; - -public: - SamplerEntry(Maxwell::ShaderStage stage, std::size_t offset, std::size_t index, - Tegra::Shader::TextureType type, bool is_array, bool is_shadow) - : offset(offset), stage(stage), sampler_index(index), type(type), is_array(is_array), - is_shadow(is_shadow) {} - - std::size_t GetOffset() const { - return offset; - } - - std::size_t GetIndex() const { - return sampler_index; - } - - Maxwell::ShaderStage GetStage() const { - return stage; - } - - std::string GetName() const { - return std::string(TextureSamplerNames[static_cast(stage)]) + '_' + - std::to_string(sampler_index); - } - - std::string GetTypeString() const { - using Tegra::Shader::TextureType; - std::string glsl_type; - - switch (type) { - case TextureType::Texture1D: - glsl_type = "sampler1D"; - break; - case TextureType::Texture2D: - glsl_type = "sampler2D"; - break; - case TextureType::Texture3D: - glsl_type = "sampler3D"; - break; - case TextureType::TextureCube: - glsl_type = "samplerCube"; - break; - default: - UNIMPLEMENTED(); - } - if (is_array) - glsl_type += "Array"; - if (is_shadow) - glsl_type += "Shadow"; - return glsl_type; - } - - Tegra::Shader::TextureType GetType() const { - return type; - } - - bool IsArray() const { - return is_array; - } - - bool IsShadow() const { - return is_shadow; - } - - u32 GetHash() const { - return (static_cast(stage) << 16) | static_cast(sampler_index); - } - - static std::string GetArrayName(Maxwell::ShaderStage stage) { - return TextureSamplerNames[static_cast(stage)]; - } - -private: - static constexpr std::array TextureSamplerNames = { - "tex_vs", "tex_tessc", "tex_tesse", "tex_gs", "tex_fs", - }; - - /// Offset in TSC memory from which to read the sampler object, as specified by the sampling - /// instruction. - std::size_t offset; - Maxwell::ShaderStage stage; ///< Shader stage where this sampler was used. - std::size_t sampler_index; ///< Value used to index into the generated GLSL sampler array. - Tegra::Shader::TextureType type; ///< The type used to sample this texture (Texture2D, etc) - bool is_array; ///< Whether the texture is being sampled as an array texture or not. - bool is_shadow; ///< Whether the texture is being sampled as a depth texture or not. -}; - -struct ShaderEntries { - std::vector const_buffer_entries; - std::vector texture_samplers; - std::array clip_distances; - std::size_t shader_length; -}; - -using ProgramResult = std::pair; +using VideoCommon::Shader::ProgramCode; struct ShaderSetup { explicit ShaderSetup(ProgramCode program_code) { diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp new file mode 100644 index 000000000..6fdcac784 --- /dev/null +++ b/src/video_core/shader/decode.cpp @@ -0,0 +1,206 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include +#include + +#include + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/engines/shader_header.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +namespace { + +/// Merges exit method of two parallel branches. +constexpr ExitMethod ParallelExit(ExitMethod a, ExitMethod b) { + if (a == ExitMethod::Undetermined) { + return b; + } + if (b == ExitMethod::Undetermined) { + return a; + } + if (a == b) { + return a; + } + return ExitMethod::Conditional; +} + +/** + * Returns whether the instruction at the specified offset is a 'sched' instruction. + * Sched instructions always appear before a sequence of 3 instructions. + */ +constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) { + constexpr u32 SchedPeriod = 4; + u32 absolute_offset = offset - main_offset; + + return (absolute_offset % SchedPeriod) == 0; +} + +} // namespace + +void ShaderIR::Decode() { + std::memcpy(&header, program_code.data(), sizeof(Tegra::Shader::Header)); + + std::set labels; + const ExitMethod exit_method = Scan(main_offset, MAX_PROGRAM_LENGTH, labels); + if (exit_method != ExitMethod::AlwaysEnd) { + UNREACHABLE_MSG("Program does not always end"); + } + + if (labels.empty()) { + basic_blocks.insert({main_offset, DecodeRange(main_offset, MAX_PROGRAM_LENGTH)}); + return; + } + + labels.insert(main_offset); + + for (const u32 label : labels) { + const auto next_it = labels.lower_bound(label + 1); + const u32 next_label = next_it == labels.end() ? MAX_PROGRAM_LENGTH : *next_it; + + basic_blocks.insert({label, DecodeRange(label, next_label)}); + } +} + +ExitMethod ShaderIR::Scan(u32 begin, u32 end, std::set& labels) { + const auto [iter, inserted] = + exit_method_map.emplace(std::make_pair(begin, end), ExitMethod::Undetermined); + ExitMethod& exit_method = iter->second; + if (!inserted) + return exit_method; + + for (u32 offset = begin; offset != end && offset != MAX_PROGRAM_LENGTH; ++offset) { + coverage_begin = std::min(coverage_begin, offset); + coverage_end = std::max(coverage_end, offset + 1); + + const Instruction instr = {program_code[offset]}; + const auto opcode = OpCode::Decode(instr); + if (!opcode) + continue; + switch (opcode->get().GetId()) { + case OpCode::Id::EXIT: { + // The EXIT instruction can be predicated, which means that the shader can conditionally + // end on this instruction. We have to consider the case where the condition is not met + // and check the exit method of that other basic block. + using Tegra::Shader::Pred; + if (instr.pred.pred_index == static_cast(Pred::UnusedIndex)) { + return exit_method = ExitMethod::AlwaysEnd; + } else { + const ExitMethod not_met = Scan(offset + 1, end, labels); + return exit_method = ParallelExit(ExitMethod::AlwaysEnd, not_met); + } + } + case OpCode::Id::BRA: { + const u32 target = offset + instr.bra.GetBranchTarget(); + labels.insert(target); + const ExitMethod no_jmp = Scan(offset + 1, end, labels); + const ExitMethod jmp = Scan(target, end, labels); + return exit_method = ParallelExit(no_jmp, jmp); + } + case OpCode::Id::SSY: + case OpCode::Id::PBK: { + // The SSY and PBK use a similar encoding as the BRA instruction. + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "Constant buffer branching is not supported"); + const u32 target = offset + instr.bra.GetBranchTarget(); + labels.insert(target); + // Continue scanning for an exit method. + break; + } + } + } + return exit_method = ExitMethod::AlwaysReturn; +} + +BasicBlock ShaderIR::DecodeRange(u32 begin, u32 end) { + BasicBlock basic_block; + for (u32 pc = begin; pc < (begin > end ? MAX_PROGRAM_LENGTH : end);) { + pc = DecodeInstr(basic_block, pc); + } + return std::move(basic_block); +} + +u32 ShaderIR::DecodeInstr(BasicBlock& bb, u32 pc) { + // Ignore sched instructions when generating code. + if (IsSchedInstruction(pc, main_offset)) { + return pc + 1; + } + + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + // Decoding failure + if (!opcode) { + UNIMPLEMENTED_MSG("Unhandled instruction: {0:x}", instr.value); + return pc + 1; + } + + bb.push_back( + Comment(fmt::format("{}: {} (0x{:016x})", pc, opcode->get().GetName(), instr.value))); + + using Tegra::Shader::Pred; + UNIMPLEMENTED_IF_MSG(instr.pred.full_pred == Pred::NeverExecute, + "NeverExecute predicate not implemented"); + + static const std::map + decoders = { + {OpCode::Type::Arithmetic, &ShaderIR::DecodeArithmetic}, + {OpCode::Type::ArithmeticImmediate, &ShaderIR::DecodeArithmeticImmediate}, + {OpCode::Type::Bfe, &ShaderIR::DecodeBfe}, + {OpCode::Type::Bfi, &ShaderIR::DecodeBfi}, + {OpCode::Type::Shift, &ShaderIR::DecodeShift}, + {OpCode::Type::ArithmeticInteger, &ShaderIR::DecodeArithmeticInteger}, + {OpCode::Type::ArithmeticIntegerImmediate, &ShaderIR::DecodeArithmeticIntegerImmediate}, + {OpCode::Type::ArithmeticHalf, &ShaderIR::DecodeArithmeticHalf}, + {OpCode::Type::ArithmeticHalfImmediate, &ShaderIR::DecodeArithmeticHalfImmediate}, + {OpCode::Type::Ffma, &ShaderIR::DecodeFfma}, + {OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2}, + {OpCode::Type::Conversion, &ShaderIR::DecodeConversion}, + {OpCode::Type::Memory, &ShaderIR::DecodeMemory}, + {OpCode::Type::FloatSetPredicate, &ShaderIR::DecodeFloatSetPredicate}, + {OpCode::Type::IntegerSetPredicate, &ShaderIR::DecodeIntegerSetPredicate}, + {OpCode::Type::HalfSetPredicate, &ShaderIR::DecodeHalfSetPredicate}, + {OpCode::Type::PredicateSetRegister, &ShaderIR::DecodePredicateSetRegister}, + {OpCode::Type::PredicateSetPredicate, &ShaderIR::DecodePredicateSetPredicate}, + {OpCode::Type::RegisterSetPredicate, &ShaderIR::DecodeRegisterSetPredicate}, + {OpCode::Type::FloatSet, &ShaderIR::DecodeFloatSet}, + {OpCode::Type::IntegerSet, &ShaderIR::DecodeIntegerSet}, + {OpCode::Type::HalfSet, &ShaderIR::DecodeHalfSet}, + {OpCode::Type::Video, &ShaderIR::DecodeVideo}, + {OpCode::Type::Xmad, &ShaderIR::DecodeXmad}, + }; + + std::vector tmp_block; + if (const auto decoder = decoders.find(opcode->get().GetType()); decoder != decoders.end()) { + pc = (this->*decoder->second)(tmp_block, bb, pc); + } else { + pc = DecodeOther(tmp_block, bb, pc); + } + + // Some instructions (like SSY) don't have a predicate field, they are always unconditionally + // executed. + const bool can_be_predicated = OpCode::IsPredicatedInstruction(opcode->get().GetId()); + const auto pred_index = static_cast(instr.pred.pred_index); + + if (can_be_predicated && pred_index != static_cast(Pred::UnusedIndex)) { + bb.push_back( + Conditional(GetPredicate(pred_index, instr.negate_pred != 0), std::move(tmp_block))); + } else { + for (auto& node : tmp_block) { + bb.push_back(std::move(node)); + } + } + + return pc + 1; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic.cpp b/src/video_core/shader/decode/arithmetic.cpp new file mode 100644 index 000000000..e7847f614 --- /dev/null +++ b/src/video_core/shader/decode/arithmetic.cpp @@ -0,0 +1,155 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::SubOp; + +u32 ShaderIR::DecodeArithmetic(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + Node op_a = GetRegister(instr.gpr8); + + Node op_b = [&]() -> Node { + if (instr.is_b_imm) { + return GetImmediate19(instr); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + } + }(); + + switch (opcode->get().GetId()) { + case OpCode::Id::MOV_C: + case OpCode::Id::MOV_R: { + // MOV does not have neither 'abs' nor 'neg' bits. + SetRegister(bb, instr.gpr0, op_b); + break; + } + case OpCode::Id::FMUL_C: + case OpCode::Id::FMUL_R: + case OpCode::Id::FMUL_IMM: { + // FMUL does not have 'abs' bits and only the second operand has a 'neg' bit. + UNIMPLEMENTED_IF_MSG(instr.fmul.tab5cb8_2 != 0, "FMUL tab5cb8_2({}) is not implemented", + instr.fmul.tab5cb8_2.Value()); + UNIMPLEMENTED_IF_MSG( + instr.fmul.tab5c68_0 != 1, "FMUL tab5cb8_0({}) is not implemented", + instr.fmul.tab5c68_0.Value()); // SMO typical sends 1 here which seems to be the default + + op_b = GetOperandAbsNegFloat(op_b, false, instr.fmul.negate_b); + + // TODO(Rodrigo): Should precise be used when there's a postfactor? + Node value = Operation(OperationCode::FMul, PRECISE, op_a, op_b); + + if (instr.fmul.postfactor != 0) { + auto postfactor = static_cast(instr.fmul.postfactor); + + // Postfactor encoded as 3-bit 1's complement in instruction, interpreted with below + // logic. + if (postfactor >= 4) { + postfactor = 7 - postfactor; + } else { + postfactor = 0 - postfactor; + } + + if (postfactor > 0) { + value = Operation(OperationCode::FMul, NO_PRECISE, value, + Immediate(static_cast(1 << postfactor))); + } else { + value = Operation(OperationCode::FDiv, NO_PRECISE, value, + Immediate(static_cast(1 << -postfactor))); + } + } + + value = GetSaturatedFloat(value, instr.alu.saturate_d); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::FADD_C: + case OpCode::Id::FADD_R: + case OpCode::Id::FADD_IMM: { + op_a = GetOperandAbsNegFloat(op_a, instr.alu.abs_a, instr.alu.negate_a); + op_b = GetOperandAbsNegFloat(op_b, instr.alu.abs_b, instr.alu.negate_b); + + Node value = Operation(OperationCode::FAdd, PRECISE, op_a, op_b); + value = GetSaturatedFloat(value, instr.alu.saturate_d); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::MUFU: { + op_a = GetOperandAbsNegFloat(op_a, instr.alu.abs_a, instr.alu.negate_a); + + Node value = [&]() { + switch (instr.sub_op) { + case SubOp::Cos: + return Operation(OperationCode::FCos, PRECISE, op_a); + case SubOp::Sin: + return Operation(OperationCode::FSin, PRECISE, op_a); + case SubOp::Ex2: + return Operation(OperationCode::FExp2, PRECISE, op_a); + case SubOp::Lg2: + return Operation(OperationCode::FLog2, PRECISE, op_a); + case SubOp::Rcp: + return Operation(OperationCode::FDiv, PRECISE, Immediate(1.0f), op_a); + case SubOp::Rsq: + return Operation(OperationCode::FInverseSqrt, PRECISE, op_a); + case SubOp::Sqrt: + return Operation(OperationCode::FSqrt, PRECISE, op_a); + default: + UNIMPLEMENTED_MSG("Unhandled MUFU sub op={0:x}", + static_cast(instr.sub_op.Value())); + return Immediate(0); + } + }(); + value = GetSaturatedFloat(value, instr.alu.saturate_d); + + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::FMNMX_C: + case OpCode::Id::FMNMX_R: + case OpCode::Id::FMNMX_IMM: { + op_a = GetOperandAbsNegFloat(op_a, instr.alu.abs_a, instr.alu.negate_a); + op_b = GetOperandAbsNegFloat(op_b, instr.alu.abs_b, instr.alu.negate_b); + + const Node condition = GetPredicate(instr.alu.fmnmx.pred, instr.alu.fmnmx.negate_pred != 0); + + const Node min = Operation(OperationCode::FMin, NO_PRECISE, op_a, op_b); + const Node max = Operation(OperationCode::FMax, NO_PRECISE, op_a, op_b); + const Node value = Operation(OperationCode::Select, NO_PRECISE, condition, min, max); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::RRO_C: + case OpCode::Id::RRO_R: + case OpCode::Id::RRO_IMM: { + // Currently RRO is only implemented as a register move. + op_b = GetOperandAbsNegFloat(op_b, instr.alu.abs_b, instr.alu.negate_b); + SetRegister(bb, instr.gpr0, op_b); + LOG_WARNING(HW_GPU, "RRO instruction is incomplete"); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled arithmetic instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic_half.cpp b/src/video_core/shader/decode/arithmetic_half.cpp new file mode 100644 index 000000000..a237dcb92 --- /dev/null +++ b/src/video_core/shader/decode/arithmetic_half.cpp @@ -0,0 +1,70 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeArithmeticHalf(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + if (opcode->get().GetId() == OpCode::Id::HADD2_C || + opcode->get().GetId() == OpCode::Id::HADD2_R) { + UNIMPLEMENTED_IF(instr.alu_half.ftz != 0); + } + UNIMPLEMENTED_IF_MSG(instr.alu_half.saturate != 0, "Half float saturation not implemented"); + + const bool negate_a = + opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0; + const bool negate_b = + opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0; + + const Node op_a = GetOperandAbsNegHalf(GetRegister(instr.gpr8), instr.alu_half.abs_a, negate_a); + + // instr.alu_half.type_a + + Node op_b = [&]() { + switch (opcode->get().GetId()) { + case OpCode::Id::HADD2_C: + case OpCode::Id::HMUL2_C: + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + case OpCode::Id::HADD2_R: + case OpCode::Id::HMUL2_R: + return GetRegister(instr.gpr20); + default: + UNREACHABLE(); + return Immediate(0); + } + }(); + op_b = GetOperandAbsNegHalf(op_b, instr.alu_half.abs_b, negate_b); + + Node value = [&]() { + MetaHalfArithmetic meta{true, {instr.alu_half_imm.type_a, instr.alu_half.type_b}}; + switch (opcode->get().GetId()) { + case OpCode::Id::HADD2_C: + case OpCode::Id::HADD2_R: + return Operation(OperationCode::HAdd, meta, op_a, op_b); + case OpCode::Id::HMUL2_C: + case OpCode::Id::HMUL2_R: + return Operation(OperationCode::HMul, meta, op_a, op_b); + default: + UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", opcode->get().GetName()); + return Immediate(0); + } + }(); + value = HalfMerge(GetRegister(instr.gpr0), value, instr.alu_half.merge); + + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic_half_immediate.cpp b/src/video_core/shader/decode/arithmetic_half_immediate.cpp new file mode 100644 index 000000000..7b4f7d284 --- /dev/null +++ b/src/video_core/shader/decode/arithmetic_half_immediate.cpp @@ -0,0 +1,51 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeArithmeticHalfImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + if (opcode->get().GetId() == OpCode::Id::HADD2_IMM) { + UNIMPLEMENTED_IF(instr.alu_half_imm.ftz != 0); + } else { + UNIMPLEMENTED_IF(instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::None); + } + UNIMPLEMENTED_IF_MSG(instr.alu_half_imm.saturate != 0, + "Half float immediate saturation not implemented"); + + Node op_a = GetRegister(instr.gpr8); + op_a = GetOperandAbsNegHalf(op_a, instr.alu_half_imm.abs_a, instr.alu_half_imm.negate_a); + + const Node op_b = UnpackHalfImmediate(instr, true); + + Node value = [&]() { + MetaHalfArithmetic meta{true, {instr.alu_half_imm.type_a}}; + switch (opcode->get().GetId()) { + case OpCode::Id::HADD2_IMM: + return Operation(OperationCode::HAdd, meta, op_a, op_b); + case OpCode::Id::HMUL2_IMM: + return Operation(OperationCode::HMul, meta, op_a, op_b); + default: + UNREACHABLE(); + return Immediate(0); + } + }(); + value = HalfMerge(GetRegister(instr.gpr0), value, instr.alu_half_imm.merge); + + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic_immediate.cpp b/src/video_core/shader/decode/arithmetic_immediate.cpp new file mode 100644 index 000000000..4fd3db54e --- /dev/null +++ b/src/video_core/shader/decode/arithmetic_immediate.cpp @@ -0,0 +1,52 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeArithmeticImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::MOV32_IMM: { + SetRegister(bb, instr.gpr0, GetImmediate32(instr)); + break; + } + case OpCode::Id::FMUL32_IMM: { + Node value = + Operation(OperationCode::FMul, PRECISE, GetRegister(instr.gpr8), GetImmediate32(instr)); + value = GetSaturatedFloat(value, instr.fmul32.saturate); + + SetInternalFlagsFromFloat(bb, value, instr.op_32.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::FADD32I: { + const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fadd32i.abs_a, + instr.fadd32i.negate_a); + const Node op_b = GetOperandAbsNegFloat(GetImmediate32(instr), instr.fadd32i.abs_b, + instr.fadd32i.negate_b); + + const Node value = Operation(OperationCode::FAdd, PRECISE, op_a, op_b); + SetInternalFlagsFromFloat(bb, value, instr.op_32.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled arithmetic immediate instruction: {}", + opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp new file mode 100644 index 000000000..4a8cc1a1c --- /dev/null +++ b/src/video_core/shader/decode/arithmetic_integer.cpp @@ -0,0 +1,287 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::IAdd3Height; +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; +using Tegra::Shader::Register; + +u32 ShaderIR::DecodeArithmeticInteger(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + Node op_a = GetRegister(instr.gpr8); + Node op_b = [&]() { + if (instr.is_b_imm) { + return Immediate(instr.alu.GetSignedImm20_20()); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + } + }(); + + switch (opcode->get().GetId()) { + case OpCode::Id::IADD_C: + case OpCode::Id::IADD_R: + case OpCode::Id::IADD_IMM: { + UNIMPLEMENTED_IF_MSG(instr.alu.saturate_d, "IADD saturation not implemented"); + + op_a = GetOperandAbsNegInteger(op_a, false, instr.alu_integer.negate_a, true); + op_b = GetOperandAbsNegInteger(op_b, false, instr.alu_integer.negate_b, true); + + const Node value = Operation(OperationCode::IAdd, PRECISE, op_a, op_b); + + SetInternalFlagsFromInteger(bb, value, instr.op_32.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::IADD3_C: + case OpCode::Id::IADD3_R: + case OpCode::Id::IADD3_IMM: { + Node op_c = GetRegister(instr.gpr39); + + const auto ApplyHeight = [&](IAdd3Height height, Node value) { + switch (height) { + case IAdd3Height::None: + return value; + case IAdd3Height::LowerHalfWord: + return BitfieldExtract(value, 0, 16); + case IAdd3Height::UpperHalfWord: + return BitfieldExtract(value, 16, 16); + default: + UNIMPLEMENTED_MSG("Unhandled IADD3 height: {}", static_cast(height)); + return Immediate(0); + } + }; + + if (opcode->get().GetId() == OpCode::Id::IADD3_R) { + op_a = ApplyHeight(instr.iadd3.height_a, op_a); + op_b = ApplyHeight(instr.iadd3.height_b, op_b); + op_c = ApplyHeight(instr.iadd3.height_c, op_c); + } + + op_a = GetOperandAbsNegInteger(op_a, false, instr.iadd3.neg_a, true); + op_b = GetOperandAbsNegInteger(op_b, false, instr.iadd3.neg_b, true); + op_c = GetOperandAbsNegInteger(op_c, false, instr.iadd3.neg_c, true); + + const Node value = [&]() { + const Node add_ab = Operation(OperationCode::IAdd, NO_PRECISE, op_a, op_b); + if (opcode->get().GetId() != OpCode::Id::IADD3_R) { + return Operation(OperationCode::IAdd, NO_PRECISE, add_ab, op_c); + } + const Node shifted = [&]() { + switch (instr.iadd3.mode) { + case Tegra::Shader::IAdd3Mode::RightShift: + // TODO(tech4me): According to + // https://envytools.readthedocs.io/en/latest/hw/graph/maxwell/cuda/int.html?highlight=iadd3 + // The addition between op_a and op_b should be done in uint33, more + // investigation required + return Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, add_ab, + Immediate(16)); + case Tegra::Shader::IAdd3Mode::LeftShift: + return Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, add_ab, + Immediate(16)); + default: + return add_ab; + } + }(); + return Operation(OperationCode::IAdd, NO_PRECISE, shifted, op_c); + }(); + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::ISCADD_C: + case OpCode::Id::ISCADD_R: + case OpCode::Id::ISCADD_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in ISCADD is not implemented"); + + op_a = GetOperandAbsNegInteger(op_a, false, instr.alu_integer.negate_a, true); + op_b = GetOperandAbsNegInteger(op_b, false, instr.alu_integer.negate_b, true); + + const Node shift = Immediate(static_cast(instr.alu_integer.shift_amount)); + const Node shifted_a = Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, op_a, shift); + const Node value = Operation(OperationCode::IAdd, NO_PRECISE, shifted_a, op_b); + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::POPC_C: + case OpCode::Id::POPC_R: + case OpCode::Id::POPC_IMM: { + if (instr.popc.invert) { + op_b = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_b); + } + const Node value = Operation(OperationCode::IBitCount, PRECISE, op_b); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::SEL_C: + case OpCode::Id::SEL_R: + case OpCode::Id::SEL_IMM: { + const Node condition = GetPredicate(instr.sel.pred, instr.sel.neg_pred != 0); + const Node value = Operation(OperationCode::Select, PRECISE, condition, op_a, op_b); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::LOP_C: + case OpCode::Id::LOP_R: + case OpCode::Id::LOP_IMM: { + if (instr.alu.lop.invert_a) + op_a = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_a); + if (instr.alu.lop.invert_b) + op_b = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_b); + + WriteLogicOperation(bb, instr.gpr0, instr.alu.lop.operation, op_a, op_b, + instr.alu.lop.pred_result_mode, instr.alu.lop.pred48, + instr.generates_cc); + break; + } + case OpCode::Id::LOP3_C: + case OpCode::Id::LOP3_R: + case OpCode::Id::LOP3_IMM: { + const Node op_c = GetRegister(instr.gpr39); + const Node lut = [&]() { + if (opcode->get().GetId() == OpCode::Id::LOP3_R) { + return Immediate(instr.alu.lop3.GetImmLut28()); + } else { + return Immediate(instr.alu.lop3.GetImmLut48()); + } + }(); + + WriteLop3Instruction(bb, instr.gpr0, op_a, op_b, op_c, lut, instr.generates_cc); + break; + } + case OpCode::Id::IMNMX_C: + case OpCode::Id::IMNMX_R: + case OpCode::Id::IMNMX_IMM: { + UNIMPLEMENTED_IF(instr.imnmx.exchange != Tegra::Shader::IMinMaxExchange::None); + + const bool is_signed = instr.imnmx.is_signed; + + const Node condition = GetPredicate(instr.imnmx.pred, instr.imnmx.negate_pred != 0); + const Node min = SignedOperation(OperationCode::IMin, is_signed, NO_PRECISE, op_a, op_b); + const Node max = SignedOperation(OperationCode::IMax, is_signed, NO_PRECISE, op_a, op_b); + const Node value = Operation(OperationCode::Select, NO_PRECISE, condition, min, max); + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::LEA_R2: + case OpCode::Id::LEA_R1: + case OpCode::Id::LEA_IMM: + case OpCode::Id::LEA_RZ: + case OpCode::Id::LEA_HI: { + const auto [op_a, op_b, op_c] = [&]() -> std::tuple { + switch (opcode->get().GetId()) { + case OpCode::Id::LEA_R2: { + return {GetRegister(instr.gpr20), GetRegister(instr.gpr39), + Immediate(static_cast(instr.lea.r2.entry_a))}; + } + + case OpCode::Id::LEA_R1: { + const bool neg = instr.lea.r1.neg != 0; + return {GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true), + GetRegister(instr.gpr20), + Immediate(static_cast(instr.lea.r1.entry_a))}; + } + + case OpCode::Id::LEA_IMM: { + const bool neg = instr.lea.imm.neg != 0; + return {Immediate(static_cast(instr.lea.imm.entry_a)), + GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true), + Immediate(static_cast(instr.lea.imm.entry_b))}; + } + + case OpCode::Id::LEA_RZ: { + const bool neg = instr.lea.rz.neg != 0; + return {GetConstBuffer(instr.lea.rz.cb_index, instr.lea.rz.cb_offset), + GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true), + Immediate(static_cast(instr.lea.rz.entry_a))}; + } + + case OpCode::Id::LEA_HI: + default: + UNIMPLEMENTED_MSG("Unhandled LEA subinstruction: {}", opcode->get().GetName()); + + return {Immediate(static_cast(instr.lea.imm.entry_a)), GetRegister(instr.gpr8), + Immediate(static_cast(instr.lea.imm.entry_b))}; + } + }(); + + UNIMPLEMENTED_IF_MSG(instr.lea.pred48 != static_cast(Pred::UnusedIndex), + "Unhandled LEA Predicate"); + + const Node shifted_c = + Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, Immediate(1), op_c); + const Node mul_bc = Operation(OperationCode::IMul, NO_PRECISE, op_b, shifted_c); + const Node value = Operation(OperationCode::IAdd, NO_PRECISE, op_a, mul_bc); + + SetRegister(bb, instr.gpr0, value); + + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled ArithmeticInteger instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +void ShaderIR::WriteLop3Instruction(BasicBlock& bb, Register dest, Node op_a, Node op_b, Node op_c, + Node imm_lut, bool sets_cc) { + constexpr u32 lop_iterations = 32; + const Node one = Immediate(1); + const Node two = Immediate(2); + + Node value{}; + for (u32 i = 0; i < lop_iterations; ++i) { + const Node shift_amount = Immediate(i); + + const Node a = Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, op_c, shift_amount); + const Node pack_0 = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, a, one); + + const Node b = Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, op_b, shift_amount); + const Node c = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, b, one); + const Node pack_1 = Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, c, one); + + const Node d = Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, op_a, shift_amount); + const Node e = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, d, one); + const Node pack_2 = Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, e, two); + + const Node pack_01 = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, pack_0, pack_1); + const Node pack_012 = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, pack_01, pack_2); + + const Node shifted_bit = + Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, imm_lut, pack_012); + const Node bit = Operation(OperationCode::IBitwiseAnd, NO_PRECISE, shifted_bit, one); + + const Node right = + Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, bit, shift_amount); + + if (i > 0) { + value = Operation(OperationCode::IBitwiseOr, NO_PRECISE, value, right); + } else { + value = right; + } + } + + SetInternalFlagsFromInteger(bb, value, sets_cc); + SetRegister(bb, dest, value); +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/arithmetic_integer_immediate.cpp b/src/video_core/shader/decode/arithmetic_integer_immediate.cpp new file mode 100644 index 000000000..b26a6e473 --- /dev/null +++ b/src/video_core/shader/decode/arithmetic_integer_immediate.cpp @@ -0,0 +1,96 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::LogicOperation; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; +using Tegra::Shader::PredicateResultMode; +using Tegra::Shader::Register; + +u32 ShaderIR::DecodeArithmeticIntegerImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + Node op_a = GetRegister(instr.gpr8); + Node op_b = Immediate(static_cast(instr.alu.imm20_32)); + + switch (opcode->get().GetId()) { + case OpCode::Id::IADD32I: { + UNIMPLEMENTED_IF_MSG(instr.iadd32i.saturate, "IADD32I saturation is not implemented"); + + op_a = GetOperandAbsNegInteger(op_a, false, instr.iadd32i.negate_a, true); + + const Node value = Operation(OperationCode::IAdd, PRECISE, op_a, op_b); + + SetInternalFlagsFromInteger(bb, value, instr.op_32.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::LOP32I: { + if (instr.alu.lop32i.invert_a) + op_a = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_a); + + if (instr.alu.lop32i.invert_b) + op_b = Operation(OperationCode::IBitwiseNot, NO_PRECISE, op_b); + + WriteLogicOperation(bb, instr.gpr0, instr.alu.lop32i.operation, op_a, op_b, + PredicateResultMode::None, Pred::UnusedIndex, instr.op_32.generates_cc); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled ArithmeticIntegerImmediate instruction: {}", + opcode->get().GetName()); + } + + return pc; +} + +void ShaderIR::WriteLogicOperation(BasicBlock& bb, Register dest, LogicOperation logic_op, + Node op_a, Node op_b, PredicateResultMode predicate_mode, + Pred predicate, bool sets_cc) { + const Node result = [&]() { + switch (logic_op) { + case LogicOperation::And: + return Operation(OperationCode::IBitwiseAnd, PRECISE, op_a, op_b); + case LogicOperation::Or: + return Operation(OperationCode::IBitwiseOr, PRECISE, op_a, op_b); + case LogicOperation::Xor: + return Operation(OperationCode::IBitwiseXor, PRECISE, op_a, op_b); + case LogicOperation::PassB: + return op_b; + default: + UNIMPLEMENTED_MSG("Unimplemented logic operation={}", static_cast(logic_op)); + return Immediate(0); + } + }(); + + SetInternalFlagsFromInteger(bb, result, sets_cc); + SetRegister(bb, dest, result); + + // Write the predicate value depending on the predicate mode. + switch (predicate_mode) { + case PredicateResultMode::None: + // Do nothing. + return; + case PredicateResultMode::NotZero: { + // Set the predicate to true if the result is not zero. + const Node compare = Operation(OperationCode::LogicalINotEqual, result, Immediate(0)); + SetPredicate(bb, static_cast(predicate), compare); + break; + } + default: + UNIMPLEMENTED_MSG("Unimplemented predicate result mode: {}", + static_cast(predicate_mode)); + } +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/bfe.cpp b/src/video_core/shader/decode/bfe.cpp new file mode 100644 index 000000000..0734141b0 --- /dev/null +++ b/src/video_core/shader/decode/bfe.cpp @@ -0,0 +1,49 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeBfe(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF(instr.bfe.negate_b); + + Node op_a = GetRegister(instr.gpr8); + op_a = GetOperandAbsNegInteger(op_a, false, instr.bfe.negate_a, false); + + switch (opcode->get().GetId()) { + case OpCode::Id::BFE_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in BFE is not implemented"); + + const Node inner_shift_imm = Immediate(static_cast(instr.bfe.GetLeftShiftValue())); + const Node outer_shift_imm = + Immediate(static_cast(instr.bfe.GetLeftShiftValue() + instr.bfe.shift_position)); + + const Node inner_shift = + Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, op_a, inner_shift_imm); + const Node outer_shift = + Operation(OperationCode::ILogicalShiftRight, NO_PRECISE, inner_shift, outer_shift_imm); + + SetInternalFlagsFromInteger(bb, outer_shift, instr.generates_cc); + SetRegister(bb, instr.gpr0, outer_shift); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled BFE instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/bfi.cpp b/src/video_core/shader/decode/bfi.cpp new file mode 100644 index 000000000..942d6729d --- /dev/null +++ b/src/video_core/shader/decode/bfi.cpp @@ -0,0 +1,41 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeBfi(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const auto [base, packed_shift] = [&]() -> std::tuple { + switch (opcode->get().GetId()) { + case OpCode::Id::BFI_IMM_R: + return {GetRegister(instr.gpr39), Immediate(instr.alu.GetSignedImm20_20())}; + default: + UNREACHABLE(); + return {Immediate(0), Immediate(0)}; + } + }(); + const Node insert = GetRegister(instr.gpr8); + const Node offset = BitfieldExtract(packed_shift, 0, 8); + const Node bits = BitfieldExtract(packed_shift, 8, 8); + + const Node value = + Operation(OperationCode::UBitfieldInsert, PRECISE, base, insert, offset, bits); + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/conversion.cpp b/src/video_core/shader/decode/conversion.cpp new file mode 100644 index 000000000..ee18d3a99 --- /dev/null +++ b/src/video_core/shader/decode/conversion.cpp @@ -0,0 +1,149 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Register; + +u32 ShaderIR::DecodeConversion(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::I2I_R: { + UNIMPLEMENTED_IF(instr.conversion.selector); + + const bool input_signed = instr.conversion.is_input_signed; + const bool output_signed = instr.conversion.is_output_signed; + + Node value = GetRegister(instr.gpr20); + value = ConvertIntegerSize(value, instr.conversion.src_size, input_signed); + + value = GetOperandAbsNegInteger(value, instr.conversion.abs_a, instr.conversion.negate_a, + input_signed); + if (input_signed != output_signed) { + value = SignedOperation(OperationCode::ICastUnsigned, output_signed, NO_PRECISE, value); + } + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::I2F_R: + case OpCode::Id::I2F_C: { + UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word); + UNIMPLEMENTED_IF(instr.conversion.selector); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in I2F is not implemented"); + + Node value = [&]() { + if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + } + }(); + const bool input_signed = instr.conversion.is_input_signed; + value = ConvertIntegerSize(value, instr.conversion.src_size, input_signed); + value = GetOperandAbsNegInteger(value, instr.conversion.abs_a, false, input_signed); + value = SignedOperation(OperationCode::FCastInteger, input_signed, PRECISE, value); + value = GetOperandAbsNegFloat(value, false, instr.conversion.negate_a); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::F2F_R: + case OpCode::Id::F2F_C: { + UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word); + UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in F2F is not implemented"); + + Node value = [&]() { + if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + } + }(); + + value = GetOperandAbsNegFloat(value, instr.conversion.abs_a, instr.conversion.negate_a); + + value = [&]() { + switch (instr.conversion.f2f.rounding) { + case Tegra::Shader::F2fRoundingOp::None: + return value; + case Tegra::Shader::F2fRoundingOp::Round: + return Operation(OperationCode::FRoundEven, PRECISE, value); + case Tegra::Shader::F2fRoundingOp::Floor: + return Operation(OperationCode::FFloor, PRECISE, value); + case Tegra::Shader::F2fRoundingOp::Ceil: + return Operation(OperationCode::FCeil, PRECISE, value); + case Tegra::Shader::F2fRoundingOp::Trunc: + return Operation(OperationCode::FTrunc, PRECISE, value); + } + UNIMPLEMENTED_MSG("Unimplemented F2F rounding mode {}", + static_cast(instr.conversion.f2f.rounding.Value())); + return Immediate(0); + }(); + value = GetSaturatedFloat(value, instr.alu.saturate_d); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::F2I_R: + case OpCode::Id::F2I_C: { + UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in F2I is not implemented"); + Node value = [&]() { + if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + } + }(); + + value = GetOperandAbsNegFloat(value, instr.conversion.abs_a, instr.conversion.negate_a); + + value = [&]() { + switch (instr.conversion.f2i.rounding) { + case Tegra::Shader::F2iRoundingOp::None: + return value; + case Tegra::Shader::F2iRoundingOp::Floor: + return Operation(OperationCode::FFloor, PRECISE, value); + case Tegra::Shader::F2iRoundingOp::Ceil: + return Operation(OperationCode::FCeil, PRECISE, value); + case Tegra::Shader::F2iRoundingOp::Trunc: + return Operation(OperationCode::FTrunc, PRECISE, value); + default: + UNIMPLEMENTED_MSG("Unimplemented F2I rounding mode {}", + static_cast(instr.conversion.f2i.rounding.Value())); + return Immediate(0); + } + }(); + const bool is_signed = instr.conversion.is_output_signed; + value = SignedOperation(OperationCode::ICastFloat, is_signed, PRECISE, value); + value = ConvertIntegerSize(value, instr.conversion.dest_size, is_signed); + + SetRegister(bb, instr.gpr0, value); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled conversion instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/decode_integer_set.cpp b/src/video_core/shader/decode/decode_integer_set.cpp new file mode 100644 index 000000000..e69de29bb diff --git a/src/video_core/shader/decode/ffma.cpp b/src/video_core/shader/decode/ffma.cpp new file mode 100644 index 000000000..be8dc2230 --- /dev/null +++ b/src/video_core/shader/decode/ffma.cpp @@ -0,0 +1,59 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeFfma(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF_MSG(instr.ffma.cc != 0, "FFMA cc not implemented"); + UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_0 != 1, "FFMA tab5980_0({}) not implemented", + instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO + UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_1 != 0, "FFMA tab5980_1({}) not implemented", + instr.ffma.tab5980_1.Value()); + + const Node op_a = GetRegister(instr.gpr8); + + auto [op_b, op_c] = [&]() -> std::tuple { + switch (opcode->get().GetId()) { + case OpCode::Id::FFMA_CR: { + return {GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset), + GetRegister(instr.gpr39)}; + } + case OpCode::Id::FFMA_RR: + return {GetRegister(instr.gpr20), GetRegister(instr.gpr39)}; + case OpCode::Id::FFMA_RC: { + return {GetRegister(instr.gpr39), + GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset)}; + } + case OpCode::Id::FFMA_IMM: + return {GetImmediate19(instr), GetRegister(instr.gpr39)}; + default: + UNIMPLEMENTED_MSG("Unhandled FFMA instruction: {}", opcode->get().GetName()); + return {Immediate(0), Immediate(0)}; + } + }(); + + op_b = GetOperandAbsNegFloat(op_b, false, instr.ffma.negate_b); + op_c = GetOperandAbsNegFloat(op_c, false, instr.ffma.negate_c); + + Node value = Operation(OperationCode::FFma, PRECISE, op_a, op_b, op_c); + value = GetSaturatedFloat(value, instr.alu.saturate_d); + + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/float_set.cpp b/src/video_core/shader/decode/float_set.cpp new file mode 100644 index 000000000..ba846f1bd --- /dev/null +++ b/src/video_core/shader/decode/float_set.cpp @@ -0,0 +1,58 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeFloatSet(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fset.abs_a != 0, + instr.fset.neg_a != 0); + + Node op_b = [&]() { + if (instr.is_b_imm) { + return GetImmediate19(instr); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + } + }(); + + op_b = GetOperandAbsNegFloat(op_b, instr.fset.abs_b != 0, instr.fset.neg_b != 0); + + // The fset instruction sets a register to 1.0 or -1 (depending on the bf bit) if the + // condition is true, and to 0 otherwise. + const Node second_pred = GetPredicate(instr.fset.pred39, instr.fset.neg_pred != 0); + + const OperationCode combiner = GetPredicateCombiner(instr.fset.op); + const Node first_pred = GetPredicateComparisonFloat(instr.fset.cond, op_a, op_b); + + const Node predicate = Operation(combiner, first_pred, second_pred); + + const Node true_value = instr.fset.bf ? Immediate(1.0f) : Immediate(-1); + const Node false_value = instr.fset.bf ? Immediate(0.0f) : Immediate(0); + const Node value = + Operation(OperationCode::Select, PRECISE, predicate, true_value, false_value); + + if (instr.fset.bf) { + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + } else { + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + } + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/float_set_predicate.cpp b/src/video_core/shader/decode/float_set_predicate.cpp new file mode 100644 index 000000000..e88b04d18 --- /dev/null +++ b/src/video_core/shader/decode/float_set_predicate.cpp @@ -0,0 +1,56 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; + +u32 ShaderIR::DecodeFloatSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fsetp.abs_a != 0, + instr.fsetp.neg_a != 0); + Node op_b = [&]() { + if (instr.is_b_imm) { + return GetImmediate19(instr); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + } + }(); + op_b = GetOperandAbsNegFloat(op_b, instr.fsetp.abs_b, false); + + // We can't use the constant predicate as destination. + ASSERT(instr.fsetp.pred3 != static_cast(Pred::UnusedIndex)); + + const Node predicate = GetPredicateComparisonFloat(instr.fsetp.cond, op_a, op_b); + const Node second_pred = GetPredicate(instr.fsetp.pred39, instr.fsetp.neg_pred != 0); + + const OperationCode combiner = GetPredicateCombiner(instr.fsetp.op); + const Node value = Operation(combiner, predicate, second_pred); + + // Set the primary predicate to the result of Predicate OP SecondPredicate + SetPredicate(bb, instr.fsetp.pred3, value); + + if (instr.fsetp.pred0 != static_cast(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, + // if enabled + const Node negated_pred = Operation(OperationCode::LogicalNegate, predicate); + const Node second_value = Operation(combiner, negated_pred, second_pred); + SetPredicate(bb, instr.fsetp.pred0, second_value); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/half_set.cpp b/src/video_core/shader/decode/half_set.cpp new file mode 100644 index 000000000..dfd7cb98f --- /dev/null +++ b/src/video_core/shader/decode/half_set.cpp @@ -0,0 +1,67 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeHalfSet(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF(instr.hset2.ftz != 0); + + // instr.hset2.type_a + // instr.hset2.type_b + Node op_a = GetRegister(instr.gpr8); + Node op_b = [&]() { + switch (opcode->get().GetId()) { + case OpCode::Id::HSET2_R: + return GetRegister(instr.gpr20); + default: + UNREACHABLE(); + return Immediate(0); + } + }(); + + op_a = GetOperandAbsNegHalf(op_a, instr.hset2.abs_a, instr.hset2.negate_a); + op_b = GetOperandAbsNegHalf(op_b, instr.hset2.abs_b, instr.hset2.negate_b); + + const Node second_pred = GetPredicate(instr.hset2.pred39, instr.hset2.neg_pred); + + MetaHalfArithmetic meta{false, {instr.hset2.type_a, instr.hset2.type_b}}; + const Node comparison_pair = GetPredicateComparisonHalf(instr.hset2.cond, meta, op_a, op_b); + + const OperationCode combiner = GetPredicateCombiner(instr.hset2.op); + + // HSET2 operates on each half float in the pack. + std::array values; + for (u32 i = 0; i < 2; ++i) { + const u32 raw_value = instr.hset2.bf ? 0x3c00 : 0xffff; + const Node true_value = Immediate(raw_value << (i * 16)); + const Node false_value = Immediate(0); + + const Node comparison = + Operation(OperationCode::LogicalPick2, comparison_pair, Immediate(i)); + const Node predicate = Operation(combiner, comparison, second_pred); + + values[i] = + Operation(OperationCode::Select, NO_PRECISE, predicate, true_value, false_value); + } + + const Node value = Operation(OperationCode::UBitwiseOr, NO_PRECISE, values[0], values[1]); + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/half_set_predicate.cpp b/src/video_core/shader/decode/half_set_predicate.cpp new file mode 100644 index 000000000..53c44ae5a --- /dev/null +++ b/src/video_core/shader/decode/half_set_predicate.cpp @@ -0,0 +1,62 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; + +u32 ShaderIR::DecodeHalfSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF(instr.hsetp2.ftz != 0); + + Node op_a = GetRegister(instr.gpr8); + op_a = GetOperandAbsNegHalf(op_a, instr.hsetp2.abs_a, instr.hsetp2.negate_a); + + const Node op_b = [&]() { + switch (opcode->get().GetId()) { + case OpCode::Id::HSETP2_R: + return GetOperandAbsNegHalf(GetRegister(instr.gpr20), instr.hsetp2.abs_a, + instr.hsetp2.negate_b); + default: + UNREACHABLE(); + return Immediate(0); + } + }(); + + // We can't use the constant predicate as destination. + ASSERT(instr.hsetp2.pred3 != static_cast(Pred::UnusedIndex)); + + const Node second_pred = GetPredicate(instr.hsetp2.pred39, instr.hsetp2.neg_pred != 0); + + const OperationCode combiner = GetPredicateCombiner(instr.hsetp2.op); + const OperationCode pair_combiner = + instr.hsetp2.h_and ? OperationCode::LogicalAll2 : OperationCode::LogicalAny2; + + MetaHalfArithmetic meta = {false, {instr.hsetp2.type_a, instr.hsetp2.type_b}}; + const Node comparison = GetPredicateComparisonHalf(instr.hsetp2.cond, meta, op_a, op_b); + const Node first_pred = Operation(pair_combiner, comparison); + + // Set the primary predicate to the result of Predicate OP SecondPredicate + const Node value = Operation(combiner, first_pred, second_pred); + SetPredicate(bb, instr.hsetp2.pred3, value); + + if (instr.hsetp2.pred0 != static_cast(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, if enabled + const Node negated_pred = Operation(OperationCode::LogicalNegate, first_pred); + SetPredicate(bb, instr.hsetp2.pred0, Operation(combiner, negated_pred, second_pred)); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/hfma2.cpp b/src/video_core/shader/decode/hfma2.cpp new file mode 100644 index 000000000..4a6b945f9 --- /dev/null +++ b/src/video_core/shader/decode/hfma2.cpp @@ -0,0 +1,76 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::HalfPrecision; +using Tegra::Shader::HalfType; +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeHfma2(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + if (opcode->get().GetId() == OpCode::Id::HFMA2_RR) { + UNIMPLEMENTED_IF(instr.hfma2.rr.precision != HalfPrecision::None); + } else { + UNIMPLEMENTED_IF(instr.hfma2.precision != HalfPrecision::None); + } + + constexpr auto identity = HalfType::H0_H1; + + const HalfType type_a = instr.hfma2.type_a; + const Node op_a = GetRegister(instr.gpr8); + + bool neg_b{}, neg_c{}; + auto [saturate, type_b, op_b, type_c, + op_c] = [&]() -> std::tuple { + switch (opcode->get().GetId()) { + case OpCode::Id::HFMA2_CR: + neg_b = instr.hfma2.negate_b; + neg_c = instr.hfma2.negate_c; + return {instr.hfma2.saturate, instr.hfma2.type_b, + GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset), instr.hfma2.type_reg39, + GetRegister(instr.gpr39)}; + case OpCode::Id::HFMA2_RC: + neg_b = instr.hfma2.negate_b; + neg_c = instr.hfma2.negate_c; + return {instr.hfma2.saturate, instr.hfma2.type_reg39, GetRegister(instr.gpr39), + instr.hfma2.type_b, GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset)}; + case OpCode::Id::HFMA2_RR: + neg_b = instr.hfma2.rr.negate_b; + neg_c = instr.hfma2.rr.negate_c; + return {instr.hfma2.rr.saturate, instr.hfma2.type_b, GetRegister(instr.gpr20), + instr.hfma2.rr.type_c, GetRegister(instr.gpr39)}; + case OpCode::Id::HFMA2_IMM_R: + neg_c = instr.hfma2.negate_c; + return {instr.hfma2.saturate, identity, UnpackHalfImmediate(instr, true), + instr.hfma2.type_reg39, GetRegister(instr.gpr39)}; + default: + return {false, identity, Immediate(0), identity, Immediate(0)}; + } + }(); + UNIMPLEMENTED_IF_MSG(saturate, "HFMA2 saturation is not implemented"); + + op_b = GetOperandAbsNegHalf(op_b, false, neg_b); + op_c = GetOperandAbsNegHalf(op_c, false, neg_c); + + MetaHalfArithmetic meta{true, {type_a, type_b, type_c}}; + Node value = Operation(OperationCode::HFma, meta, op_a, op_b, op_c); + value = HalfMerge(GetRegister(instr.gpr0), value, instr.hfma2.merge); + + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/integer_set.cpp b/src/video_core/shader/decode/integer_set.cpp new file mode 100644 index 000000000..85e67b03b --- /dev/null +++ b/src/video_core/shader/decode/integer_set.cpp @@ -0,0 +1,50 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeIntegerSet(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = GetRegister(instr.gpr8); + const Node op_b = [&]() { + if (instr.is_b_imm) { + return Immediate(instr.alu.GetSignedImm20_20()); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + } + }(); + + // The iset instruction sets a register to 1.0 or -1 (depending on the bf bit) if the condition + // is true, and to 0 otherwise. + const Node second_pred = GetPredicate(instr.iset.pred39, instr.iset.neg_pred != 0); + const Node first_pred = + GetPredicateComparisonInteger(instr.iset.cond, instr.iset.is_signed, op_a, op_b); + + const OperationCode combiner = GetPredicateCombiner(instr.iset.op); + + const Node predicate = Operation(combiner, first_pred, second_pred); + + const Node true_value = instr.iset.bf ? Immediate(1.0f) : Immediate(-1); + const Node false_value = instr.iset.bf ? Immediate(0.0f) : Immediate(0); + const Node value = + Operation(OperationCode::Select, PRECISE, predicate, true_value, false_value); + + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/integer_set_predicate.cpp b/src/video_core/shader/decode/integer_set_predicate.cpp new file mode 100644 index 000000000..c8b105a08 --- /dev/null +++ b/src/video_core/shader/decode/integer_set_predicate.cpp @@ -0,0 +1,53 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; + +u32 ShaderIR::DecodeIntegerSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = GetRegister(instr.gpr8); + + const Node op_b = [&]() { + if (instr.is_b_imm) { + return Immediate(instr.alu.GetSignedImm20_20()); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + } + }(); + + // We can't use the constant predicate as destination. + ASSERT(instr.isetp.pred3 != static_cast(Pred::UnusedIndex)); + + const Node second_pred = GetPredicate(instr.isetp.pred39, instr.isetp.neg_pred != 0); + const Node predicate = + GetPredicateComparisonInteger(instr.isetp.cond, instr.isetp.is_signed, op_a, op_b); + + // Set the primary predicate to the result of Predicate OP SecondPredicate + const OperationCode combiner = GetPredicateCombiner(instr.isetp.op); + const Node value = Operation(combiner, predicate, second_pred); + SetPredicate(bb, instr.isetp.pred3, value); + + if (instr.isetp.pred0 != static_cast(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, if enabled + const Node negated_pred = Operation(OperationCode::LogicalNegate, predicate); + SetPredicate(bb, instr.isetp.pred0, Operation(combiner, negated_pred, second_pred)); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp new file mode 100644 index 000000000..ae71672d6 --- /dev/null +++ b/src/video_core/shader/decode/memory.cpp @@ -0,0 +1,688 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include +#include + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Attribute; +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Register; +using Tegra::Shader::TextureMiscMode; +using Tegra::Shader::TextureProcessMode; +using Tegra::Shader::TextureType; + +static std::size_t GetCoordCount(TextureType texture_type) { + switch (texture_type) { + case TextureType::Texture1D: + return 1; + case TextureType::Texture2D: + return 2; + case TextureType::Texture3D: + case TextureType::TextureCube: + return 3; + default: + UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast(texture_type)); + return 0; + } +} + +u32 ShaderIR::DecodeMemory(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::LD_A: { + // Note: Shouldn't this be interp mode flat? As in no interpolation made. + UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex, + "Indirect attribute loads are not supported"); + UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, + "Unaligned attribute loads are not supported"); + + Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Perspective, + Tegra::Shader::IpaSampleMode::Default}; + + u64 next_element = instr.attribute.fmt20.element; + auto next_index = static_cast(instr.attribute.fmt20.index.Value()); + + const auto LoadNextElement = [&](u32 reg_offset) { + const Node buffer = GetRegister(instr.gpr39); + const Node attribute = GetInputAttribute(static_cast(next_index), + next_element, input_mode, buffer); + + SetRegister(bb, instr.gpr0.Value() + reg_offset, attribute); + + // Load the next attribute element into the following register. If the element + // to load goes beyond the vec4 size, load the first element of the next + // attribute. + next_element = (next_element + 1) % 4; + next_index = next_index + (next_element == 0 ? 1 : 0); + }; + + const u32 num_words = static_cast(instr.attribute.fmt20.size.Value()) + 1; + for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { + LoadNextElement(reg_offset); + } + break; + } + case OpCode::Id::LD_C: { + UNIMPLEMENTED_IF(instr.ld_c.unknown != 0); + + Node index = GetRegister(instr.gpr8); + + const Node op_a = + GetConstBufferIndirect(instr.cbuf36.index, instr.cbuf36.offset + 0, index); + + switch (instr.ld_c.type.Value()) { + case Tegra::Shader::UniformType::Single: + SetRegister(bb, instr.gpr0, op_a); + break; + + case Tegra::Shader::UniformType::Double: { + const Node op_b = + GetConstBufferIndirect(instr.cbuf36.index, instr.cbuf36.offset + 4, index); + + SetTemporal(bb, 0, op_a); + SetTemporal(bb, 1, op_b); + SetRegister(bb, instr.gpr0, GetTemporal(0)); + SetRegister(bb, instr.gpr0.Value() + 1, GetTemporal(1)); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled type: {}", static_cast(instr.ld_c.type.Value())); + } + break; + } + case OpCode::Id::LD_L: { + UNIMPLEMENTED_IF_MSG(instr.ld_l.unknown == 1, "LD_L Unhandled mode: {}", + static_cast(instr.ld_l.unknown.Value())); + + const Node index = Operation(OperationCode::IAdd, GetRegister(instr.gpr8), + Immediate(static_cast(instr.smem_imm))); + const Node lmem = GetLocalMemory(index); + + switch (instr.ldst_sl.type.Value()) { + case Tegra::Shader::StoreType::Bytes32: + SetRegister(bb, instr.gpr0, lmem); + break; + default: + UNIMPLEMENTED_MSG("LD_L Unhandled type: {}", + static_cast(instr.ldst_sl.type.Value())); + } + break; + } + case OpCode::Id::ST_A: { + UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex, + "Indirect attribute loads are not supported"); + UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, + "Unaligned attribute loads are not supported"); + + u64 next_element = instr.attribute.fmt20.element; + auto next_index = static_cast(instr.attribute.fmt20.index.Value()); + + const auto StoreNextElement = [&](u32 reg_offset) { + const auto dest = GetOutputAttribute(static_cast(next_index), + next_element, GetRegister(instr.gpr39)); + const auto src = GetRegister(instr.gpr0.Value() + reg_offset); + + bb.push_back(Operation(OperationCode::Assign, dest, src)); + + // Load the next attribute element into the following register. If the element + // to load goes beyond the vec4 size, load the first element of the next + // attribute. + next_element = (next_element + 1) % 4; + next_index = next_index + (next_element == 0 ? 1 : 0); + }; + + const u32 num_words = static_cast(instr.attribute.fmt20.size.Value()) + 1; + for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { + StoreNextElement(reg_offset); + } + + break; + } + case OpCode::Id::ST_L: { + UNIMPLEMENTED_IF_MSG(instr.st_l.unknown == 0, "ST_L Unhandled mode: {}", + static_cast(instr.st_l.unknown.Value())); + + const Node index = Operation(OperationCode::IAdd, NO_PRECISE, GetRegister(instr.gpr8), + Immediate(static_cast(instr.smem_imm))); + + switch (instr.ldst_sl.type.Value()) { + case Tegra::Shader::StoreType::Bytes32: + SetLocalMemory(bb, index, GetRegister(instr.gpr0)); + break; + default: + UNIMPLEMENTED_MSG("ST_L Unhandled type: {}", + static_cast(instr.ldst_sl.type.Value())); + } + break; + } + case OpCode::Id::TEX: { + UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + + if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete"); + } + + const TextureType texture_type{instr.tex.texture_type}; + const bool is_array = instr.tex.array != 0; + const bool depth_compare = instr.tex.UsesMiscMode(TextureMiscMode::DC); + const auto process_mode = instr.tex.GetTextureProcessMode(); + WriteTexInstructionFloat( + bb, instr, GetTexCode(instr, texture_type, process_mode, depth_compare, is_array)); + break; + } + case OpCode::Id::TEXS: { + const TextureType texture_type{instr.texs.GetTextureType()}; + const bool is_array{instr.texs.IsArrayTexture()}; + const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC); + const auto process_mode = instr.texs.GetTextureProcessMode(); + + if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete"); + } + + const Node4 components = + GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array); + + if (instr.texs.fp32_flag) { + WriteTexsInstructionFloat(bb, instr, components); + } else { + WriteTexsInstructionHalfFloat(bb, instr, components); + } + break; + } + case OpCode::Id::TLD4: { + ASSERT(instr.tld4.array == 0); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV), + "NDV is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP), + "PTP is not implemented"); + + if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete"); + } + + const auto texture_type = instr.tld4.texture_type.Value(); + const bool depth_compare = instr.tld4.UsesMiscMode(TextureMiscMode::DC); + const bool is_array = instr.tld4.array != 0; + WriteTexInstructionFloat(bb, instr, + GetTld4Code(instr, texture_type, depth_compare, is_array)); + break; + } + case OpCode::Id::TLD4S: { + UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + + if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete"); + } + + const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); + const Node op_a = GetRegister(instr.gpr8); + const Node op_b = GetRegister(instr.gpr20); + + std::vector coords; + + // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. + if (depth_compare) { + // Note: TLD4S coordinate encoding works just like TEXS's + const Node op_y = GetRegister(instr.gpr8.Value() + 1); + coords.push_back(op_a); + coords.push_back(op_y); + coords.push_back(op_b); + } else { + coords.push_back(op_a); + coords.push_back(op_b); + } + const auto num_coords = static_cast(coords.size()); + coords.push_back(Immediate(static_cast(instr.tld4s.component))); + + const auto& sampler = + GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare); + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto params = coords; + MetaTexture meta{sampler, element, num_coords}; + values[element] = + Operation(OperationCode::F4TextureGather, std::move(meta), std::move(params)); + } + + WriteTexsInstructionFloat(bb, instr, values); + break; + } + case OpCode::Id::TXQ: { + if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete"); + } + + // TODO: The new commits on the texture refactor, change the way samplers work. + // Sadly, not all texture instructions specify the type of texture their sampler + // uses. This must be fixed at a later instance. + const auto& sampler = + GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false); + + switch (instr.txq.query_type) { + case Tegra::Shader::TextureQueryType::Dimension: { + for (u32 element = 0; element < 4; ++element) { + MetaTexture meta{sampler, element}; + const Node value = Operation(OperationCode::F4TextureQueryDimensions, + std::move(meta), GetRegister(instr.gpr8)); + SetTemporal(bb, element, value); + } + for (u32 i = 0; i < 4; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + } + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled texture query type: {}", + static_cast(instr.txq.query_type.Value())); + } + break; + } + case OpCode::Id::TMML: { + UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), + "NDV is not implemented"); + + if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete"); + } + + auto texture_type = instr.tmml.texture_type.Value(); + const bool is_array = instr.tmml.array != 0; + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); + + std::vector coords; + + // TODO: Add coordinates for different samplers once other texture types are implemented. + switch (texture_type) { + case TextureType::Texture1D: + coords.push_back(GetRegister(instr.gpr8)); + break; + case TextureType::Texture2D: + coords.push_back(GetRegister(instr.gpr8.Value() + 0)); + coords.push_back(GetRegister(instr.gpr8.Value() + 1)); + break; + default: + UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast(texture_type)); + + // Fallback to interpreting as a 2D texture for now + coords.push_back(GetRegister(instr.gpr8.Value() + 0)); + coords.push_back(GetRegister(instr.gpr8.Value() + 1)); + texture_type = TextureType::Texture2D; + } + + for (u32 element = 0; element < 2; ++element) { + auto params = coords; + MetaTexture meta_texture{sampler, element, static_cast(coords.size())}; + const Node value = + Operation(OperationCode::F4TextureQueryLod, meta_texture, std::move(params)); + SetTemporal(bb, element, value); + } + for (u32 element = 0; element < 2; ++element) { + SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element)); + } + + break; + } + case OpCode::Id::TLDS: { + const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()}; + const bool is_array{instr.tlds.IsArrayTexture()}; + + UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented"); + + if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) { + LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete"); + } + + WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array)); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type, + bool is_array, bool is_shadow) { + const auto offset = static_cast(sampler.index.Value()); + + // If this sampler has already been used, return the existing mapping. + const auto itr = + std::find_if(used_samplers.begin(), used_samplers.end(), + [&](const Sampler& entry) { return entry.GetOffset() == offset; }); + if (itr != used_samplers.end()) { + ASSERT(itr->GetType() == type && itr->IsArray() == is_array && + itr->IsShadow() == is_shadow); + return *itr; + } + + // Otherwise create a new mapping for this sampler + const std::size_t next_index = used_samplers.size(); + const Sampler entry{offset, next_index, type, is_array, is_shadow}; + return *used_samplers.emplace(entry).first; +} + +void ShaderIR::WriteTexInstructionFloat(BasicBlock& bb, Instruction instr, + const Node4& components) { + u32 dest_elem = 0; + for (u32 elem = 0; elem < 4; ++elem) { + if (!instr.tex.IsComponentEnabled(elem)) { + // Skip disabled components + continue; + } + SetTemporal(bb, dest_elem++, components[elem]); + } + // After writing values in temporals, move them to the real registers + for (u32 i = 0; i < dest_elem; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + } +} + +void ShaderIR::WriteTexsInstructionFloat(BasicBlock& bb, Instruction instr, + const Node4& components) { + // TEXS has two destination registers and a swizzle. The first two elements in the swizzle + // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1 + + u32 dest_elem = 0; + for (u32 component = 0; component < 4; ++component) { + if (!instr.texs.IsComponentEnabled(component)) + continue; + SetTemporal(bb, dest_elem++, components[component]); + } + + for (u32 i = 0; i < dest_elem; ++i) { + if (i < 2) { + // Write the first two swizzle components to gpr0 and gpr0+1 + SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i)); + } else { + ASSERT(instr.texs.HasTwoDestinations()); + // Write the rest of the swizzle components to gpr28 and gpr28+1 + SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i)); + } + } +} + +void ShaderIR::WriteTexsInstructionHalfFloat(BasicBlock& bb, Instruction instr, + const Node4& components) { + // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half + // float instruction). + + Node4 values; + u32 dest_elem = 0; + for (u32 component = 0; component < 4; ++component) { + if (!instr.texs.IsComponentEnabled(component)) + continue; + values[dest_elem++] = components[component]; + } + if (dest_elem == 0) + return; + + std::generate(values.begin() + dest_elem, values.end(), [&]() { return Immediate(0); }); + + const Node first_value = Operation(OperationCode::HPack2, values[0], values[1]); + if (dest_elem <= 2) { + SetRegister(bb, instr.gpr0, first_value); + return; + } + + SetTemporal(bb, 0, first_value); + SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3])); + + SetRegister(bb, instr.gpr0, GetTemporal(0)); + SetRegister(bb, instr.gpr28, GetTemporal(1)); +} + +Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, + TextureProcessMode process_mode, bool depth_compare, bool is_array, + std::size_t array_offset, std::size_t bias_offset, + std::vector&& coords) { + UNIMPLEMENTED_IF_MSG( + (texture_type == TextureType::Texture3D && (is_array || depth_compare)) || + (texture_type == TextureType::TextureCube && is_array && depth_compare), + "This method is not supported."); + + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); + + const bool lod_needed = process_mode == TextureProcessMode::LZ || + process_mode == TextureProcessMode::LL || + process_mode == TextureProcessMode::LLA; + + // LOD selection (either via bias or explicit textureLod) not supported in GL for + // sampler2DArrayShadow and samplerCubeArrayShadow. + const bool gl_lod_supported = + !((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && depth_compare) || + (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && depth_compare)); + + const OperationCode read_method = + lod_needed && gl_lod_supported ? OperationCode::F4TextureLod : OperationCode::F4Texture; + + UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported); + + std::optional array_offset_value; + if (is_array) + array_offset_value = static_cast(array_offset); + + const auto coords_count = static_cast(coords.size()); + + if (process_mode != TextureProcessMode::None && gl_lod_supported) { + if (process_mode == TextureProcessMode::LZ) { + coords.push_back(Immediate(0.0f)); + } else { + // If present, lod or bias are always stored in the register indexed by the gpr20 + // field with an offset depending on the usage of the other registers + coords.push_back(GetRegister(instr.gpr20.Value() + bias_offset)); + } + } + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto params = coords; + MetaTexture meta{sampler, element, coords_count, array_offset_value}; + values[element] = Operation(read_method, std::move(meta), std::move(params)); + } + + return values; +} + +Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type, + TextureProcessMode process_mode, bool depth_compare, bool is_array) { + const bool lod_bias_enabled = + (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); + + const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( + texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5); + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used + const u64 coord_register = array_register + (is_array ? 1 : 0); + + std::vector coords; + for (std::size_t i = 0; i < coord_count; ++i) { + coords.push_back(GetRegister(coord_register + i)); + } + // 1D.DC in opengl the 2nd component is ignored. + if (depth_compare && !is_array && texture_type == TextureType::Texture1D) { + coords.push_back(Immediate(0.0f)); + } + std::size_t array_offset{}; + if (is_array) { + array_offset = coords.size(); + coords.push_back(GetRegister(array_register)); + } + if (depth_compare) { + // Depth is always stored in the register signaled by gpr20 + // or in the next register if lod or bias are used + const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); + coords.push_back(GetRegister(depth_register)); + } + // Fill ignored coordinates + while (coords.size() < total_coord_count) { + coords.push_back(Immediate(0)); + } + + return GetTextureCode(instr, texture_type, process_mode, depth_compare, is_array, array_offset, + 0, std::move(coords)); +} + +Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type, + TextureProcessMode process_mode, bool depth_compare, bool is_array) { + const bool lod_bias_enabled = + (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); + + const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( + texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4); + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used + const u64 coord_register = array_register + (is_array ? 1 : 0); + const u64 last_coord_register = + (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2)) + ? static_cast(instr.gpr20.Value()) + : coord_register + 1; + + std::vector coords; + for (std::size_t i = 0; i < coord_count; ++i) { + const bool last = (i == (coord_count - 1)) && (coord_count > 1); + coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); + } + + std::size_t array_offset{}; + if (is_array) { + array_offset = coords.size(); + coords.push_back(GetRegister(array_register)); + } + if (depth_compare) { + // Depth is always stored in the register signaled by gpr20 + // or in the next register if lod or bias are used + const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); + coords.push_back(GetRegister(depth_register)); + } + // Fill ignored coordinates + while (coords.size() < total_coord_count) { + coords.push_back(Immediate(0)); + } + + return GetTextureCode(instr, texture_type, process_mode, depth_compare, is_array, array_offset, + (coord_count > 2 ? 1 : 0), std::move(coords)); +} + +Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare, + bool is_array) { + const std::size_t coord_count = GetCoordCount(texture_type); + const std::size_t total_coord_count = coord_count + (is_array ? 1 : 0); + const std::size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0); + + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used + const u64 coord_register = array_register + (is_array ? 1 : 0); + + std::vector coords; + + for (size_t i = 0; i < coord_count; ++i) { + coords.push_back(GetRegister(coord_register + i)); + } + std::optional array_offset; + if (is_array) { + array_offset = static_cast(coords.size()); + coords.push_back(GetRegister(array_register)); + } + + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto params = coords; + MetaTexture meta{sampler, element, static_cast(coords.size()), array_offset}; + values[element] = + Operation(OperationCode::F4TextureGather, std::move(meta), std::move(params)); + } + + return values; +} + +Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { + const std::size_t type_coord_count = GetCoordCount(texture_type); + const std::size_t total_coord_count = type_coord_count + (is_array ? 1 : 0); + const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; + + // If enabled arrays index is always stored in the gpr8 field + const u64 array_register = instr.gpr8.Value(); + // if is array gpr20 is used + const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value(); + + const u64 last_coord_register = + ((type_coord_count > 2) || (type_coord_count == 2 && !lod_enabled)) && !is_array + ? static_cast(instr.gpr20.Value()) + : coord_register + 1; + + std::vector coords; + + for (std::size_t i = 0; i < type_coord_count; ++i) { + const bool last = (i == (type_coord_count - 1)) && (type_coord_count > 1); + coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); + } + std::optional array_offset; + if (is_array) { + array_offset = static_cast(coords.size()); + coords.push_back(GetRegister(array_register)); + } + const auto coords_count = static_cast(coords.size()); + + if (lod_enabled) { + // When lod is used always is in grp20 + coords.push_back(GetRegister(instr.gpr20)); + } else { + coords.push_back(Immediate(0)); + } + + const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); + + Node4 values; + for (u32 element = 0; element < values.size(); ++element) { + auto params = coords; + MetaTexture meta{sampler, element, coords_count, array_offset}; + values[element] = + Operation(OperationCode::F4TexelFetch, std::move(meta), std::move(params)); + } + return values; +} + +std::tuple ShaderIR::ValidateAndGetCoordinateElement( + TextureType texture_type, bool depth_compare, bool is_array, bool lod_bias_enabled, + std::size_t max_coords, std::size_t max_inputs) { + const std::size_t coord_count = GetCoordCount(texture_type); + + std::size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0); + const std::size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0); + if (total_coord_count > max_coords || total_reg_count > max_inputs) { + UNIMPLEMENTED_MSG("Unsupported Texture operation"); + total_coord_count = std::min(total_coord_count, max_coords); + } + // 1D.DC OpenGL is using a vec3 but 2nd component is ignored later. + total_coord_count += + (depth_compare && !is_array && texture_type == TextureType::Texture1D) ? 1 : 0; + + return {coord_count, total_coord_count}; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp new file mode 100644 index 000000000..c1e5f4efb --- /dev/null +++ b/src/video_core/shader/decode/other.cpp @@ -0,0 +1,178 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::ConditionCode; +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Register; + +u32 ShaderIR::DecodeOther(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::EXIT: { + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "EXIT condition code used: {}", + static_cast(cc)); + + switch (instr.flow.cond) { + case Tegra::Shader::FlowCondition::Always: + bb.push_back(Operation(OperationCode::Exit)); + if (instr.pred.pred_index == static_cast(Tegra::Shader::Pred::UnusedIndex)) { + // If this is an unconditional exit then just end processing here, + // otherwise we have to account for the possibility of the condition + // not being met, so continue processing the next instruction. + pc = MAX_PROGRAM_LENGTH - 1; + } + break; + + case Tegra::Shader::FlowCondition::Fcsm_Tr: + // TODO(bunnei): What is this used for? If we assume this conditon is not + // satisifed, dual vertex shaders in Farming Simulator make more sense + UNIMPLEMENTED_MSG("Skipping unknown FlowCondition::Fcsm_Tr"); + break; + + default: + UNIMPLEMENTED_MSG("Unhandled flow condition: {}", + static_cast(instr.flow.cond.Value())); + } + break; + } + case OpCode::Id::KIL: { + UNIMPLEMENTED_IF(instr.flow.cond != Tegra::Shader::FlowCondition::Always); + + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "KIL condition code used: {}", + static_cast(cc)); + + bb.push_back(Operation(OperationCode::Discard)); + break; + } + case OpCode::Id::MOV_SYS: { + switch (instr.sys20) { + case Tegra::Shader::SystemVariable::InvocationInfo: { + LOG_WARNING(HW_GPU, "MOV_SYS instruction with InvocationInfo is incomplete"); + SetRegister(bb, instr.gpr0, Immediate(0u)); + break; + } + case Tegra::Shader::SystemVariable::Ydirection: { + // Config pack's third value is Y_NEGATE's state. + SetRegister(bb, instr.gpr0, Operation(OperationCode::YNegate)); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled system move: {}", static_cast(instr.sys20.Value())); + } + break; + } + case OpCode::Id::BRA: { + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "BRA with constant buffers are not implemented"); + + const u32 target = pc + instr.bra.GetBranchTarget(); + const Node branch = Operation(OperationCode::Branch, Immediate(target)); + + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + if (cc != Tegra::Shader::ConditionCode::T) { + bb.push_back(Conditional(GetConditionCode(cc), {branch})); + } else { + bb.push_back(branch); + } + break; + } + case OpCode::Id::SSY: { + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "Constant buffer flow is not supported"); + + // The SSY opcode tells the GPU where to re-converge divergent execution paths, it sets the + // target of the jump that the SYNC instruction will make. The SSY opcode has a similar + // structure to the BRA opcode. + const u32 target = pc + instr.bra.GetBranchTarget(); + bb.push_back(Operation(OperationCode::PushFlowStack, Immediate(target))); + break; + } + case OpCode::Id::PBK: { + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "Constant buffer PBK is not supported"); + + // PBK pushes to a stack the address where BRK will jump to. This shares stack with SSY but + // using SYNC on a PBK address will kill the shader execution. We don't emulate this because + // it's very unlikely a driver will emit such invalid shader. + const u32 target = pc + instr.bra.GetBranchTarget(); + bb.push_back(Operation(OperationCode::PushFlowStack, Immediate(target))); + break; + } + case OpCode::Id::SYNC: { + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "SYNC condition code used: {}", + static_cast(cc)); + + // The SYNC opcode jumps to the address previously set by the SSY opcode + bb.push_back(Operation(OperationCode::PopFlowStack)); + break; + } + case OpCode::Id::BRK: { + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "BRK condition code used: {}", + static_cast(cc)); + + // The BRK opcode jumps to the address previously set by the PBK opcode + bb.push_back(Operation(OperationCode::PopFlowStack)); + break; + } + case OpCode::Id::IPA: { + const auto& attribute = instr.attribute.fmt28; + const Tegra::Shader::IpaMode input_mode{instr.ipa.interp_mode.Value(), + instr.ipa.sample_mode.Value()}; + + const Node attr = GetInputAttribute(attribute.index, attribute.element, input_mode); + const Node value = GetSaturatedFloat(attr, instr.ipa.saturate); + + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::OUT_R: { + UNIMPLEMENTED_IF_MSG(instr.gpr20.Value() != Register::ZeroIndex, + "Stream buffer is not supported"); + + if (instr.out.emit) { + // gpr0 is used to store the next address and gpr8 contains the address to emit. + // Hardware uses pointers here but we just ignore it + bb.push_back(Operation(OperationCode::EmitVertex)); + SetRegister(bb, instr.gpr0, Immediate(0)); + } + if (instr.out.cut) { + bb.push_back(Operation(OperationCode::EndPrimitive)); + } + break; + } + case OpCode::Id::ISBERD: { + UNIMPLEMENTED_IF(instr.isberd.o != 0); + UNIMPLEMENTED_IF(instr.isberd.skew != 0); + UNIMPLEMENTED_IF(instr.isberd.shift != Tegra::Shader::IsberdShift::None); + UNIMPLEMENTED_IF(instr.isberd.mode != Tegra::Shader::IsberdMode::None); + LOG_WARNING(HW_GPU, "ISBERD instruction is incomplete"); + SetRegister(bb, instr.gpr0, GetRegister(instr.gpr8)); + break; + } + case OpCode::Id::DEPBAR: { + LOG_WARNING(HW_GPU, "DEPBAR instruction is stubbed"); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/predicate_set_predicate.cpp b/src/video_core/shader/decode/predicate_set_predicate.cpp new file mode 100644 index 000000000..1717f0653 --- /dev/null +++ b/src/video_core/shader/decode/predicate_set_predicate.cpp @@ -0,0 +1,67 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; + +u32 ShaderIR::DecodePredicateSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::PSETP: { + const Node op_a = GetPredicate(instr.psetp.pred12, instr.psetp.neg_pred12 != 0); + const Node op_b = GetPredicate(instr.psetp.pred29, instr.psetp.neg_pred29 != 0); + + // We can't use the constant predicate as destination. + ASSERT(instr.psetp.pred3 != static_cast(Pred::UnusedIndex)); + + const Node second_pred = GetPredicate(instr.psetp.pred39, instr.psetp.neg_pred39 != 0); + + const OperationCode combiner = GetPredicateCombiner(instr.psetp.op); + const Node predicate = Operation(combiner, op_a, op_b); + + // Set the primary predicate to the result of Predicate OP SecondPredicate + SetPredicate(bb, instr.psetp.pred3, Operation(combiner, predicate, second_pred)); + + if (instr.psetp.pred0 != static_cast(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, if + // enabled + SetPredicate(bb, instr.psetp.pred0, + Operation(combiner, Operation(OperationCode::LogicalNegate, predicate), + second_pred)); + } + break; + } + case OpCode::Id::CSETP: { + const Node pred = GetPredicate(instr.csetp.pred39, instr.csetp.neg_pred39 != 0); + const Node condition_code = GetConditionCode(instr.csetp.cc); + + const OperationCode combiner = GetPredicateCombiner(instr.csetp.op); + + if (instr.csetp.pred3 != static_cast(Pred::UnusedIndex)) { + SetPredicate(bb, instr.csetp.pred3, Operation(combiner, condition_code, pred)); + } + if (instr.csetp.pred0 != static_cast(Pred::UnusedIndex)) { + const Node neg_cc = Operation(OperationCode::LogicalNegate, condition_code); + SetPredicate(bb, instr.csetp.pred0, Operation(combiner, neg_cc, pred)); + } + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled predicate instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/predicate_set_register.cpp b/src/video_core/shader/decode/predicate_set_register.cpp new file mode 100644 index 000000000..8bd15fb00 --- /dev/null +++ b/src/video_core/shader/decode/predicate_set_register.cpp @@ -0,0 +1,46 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodePredicateSetRegister(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in PSET is not implemented"); + + const Node op_a = GetPredicate(instr.pset.pred12, instr.pset.neg_pred12 != 0); + const Node op_b = GetPredicate(instr.pset.pred29, instr.pset.neg_pred29 != 0); + const Node first_pred = Operation(GetPredicateCombiner(instr.pset.cond), op_a, op_b); + + const Node second_pred = GetPredicate(instr.pset.pred39, instr.pset.neg_pred39 != 0); + + const OperationCode combiner = GetPredicateCombiner(instr.pset.op); + const Node predicate = Operation(combiner, first_pred, second_pred); + + const Node true_value = instr.pset.bf ? Immediate(1.0f) : Immediate(0xffffffff); + const Node false_value = instr.pset.bf ? Immediate(0.0f) : Immediate(0); + const Node value = + Operation(OperationCode::Select, PRECISE, predicate, true_value, false_value); + + if (instr.pset.bf) { + SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + } else { + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + } + SetRegister(bb, instr.gpr0, value); + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/register_set_predicate.cpp b/src/video_core/shader/decode/register_set_predicate.cpp new file mode 100644 index 000000000..bdb4424a6 --- /dev/null +++ b/src/video_core/shader/decode/register_set_predicate.cpp @@ -0,0 +1,51 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeRegisterSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF(instr.r2p.mode != Tegra::Shader::R2pMode::Pr); + + const Node apply_mask = [&]() { + switch (opcode->get().GetId()) { + case OpCode::Id::R2P_IMM: + return Immediate(static_cast(instr.r2p.immediate_mask)); + default: + UNREACHABLE(); + return Immediate(static_cast(instr.r2p.immediate_mask)); + } + }(); + const Node mask = GetRegister(instr.gpr8); + const auto offset = static_cast(instr.r2p.byte) * 8; + + constexpr u32 programmable_preds = 7; + for (u64 pred = 0; pred < programmable_preds; ++pred) { + const auto shift = static_cast(pred); + + const Node apply_compare = BitfieldExtract(apply_mask, shift, 1); + const Node condition = + Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0)); + + const Node value_compare = BitfieldExtract(mask, offset + shift, 1); + const Node value = Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0)); + + const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value); + bb.push_back(Conditional(condition, {code})); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/shift.cpp b/src/video_core/shader/decode/shift.cpp new file mode 100644 index 000000000..85026bb37 --- /dev/null +++ b/src/video_core/shader/decode/shift.cpp @@ -0,0 +1,55 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeShift(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = GetRegister(instr.gpr8); + const Node op_b = [&]() { + if (instr.is_b_imm) { + return Immediate(instr.alu.GetSignedImm20_20()); + } else if (instr.is_b_gpr) { + return GetRegister(instr.gpr20); + } else { + return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset); + } + }(); + + switch (opcode->get().GetId()) { + case OpCode::Id::SHR_C: + case OpCode::Id::SHR_R: + case OpCode::Id::SHR_IMM: { + const Node value = SignedOperation(OperationCode::IArithmeticShiftRight, + instr.shift.is_signed, PRECISE, op_a, op_b); + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::SHL_C: + case OpCode::Id::SHL_R: + case OpCode::Id::SHL_IMM: { + const Node value = Operation(OperationCode::ILogicalShiftLeft, PRECISE, op_a, op_b); + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled shift instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/video.cpp b/src/video_core/shader/decode/video.cpp new file mode 100644 index 000000000..c3432356d --- /dev/null +++ b/src/video_core/shader/decode/video.cpp @@ -0,0 +1,111 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; +using Tegra::Shader::VideoType; +using Tegra::Shader::VmadShr; + +u32 ShaderIR::DecodeVideo(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + const Node op_a = + GetVideoOperand(GetRegister(instr.gpr8), instr.video.is_byte_chunk_a, instr.video.signed_a, + instr.video.type_a, instr.video.byte_height_a); + const Node op_b = [&]() { + if (instr.video.use_register_b) { + return GetVideoOperand(GetRegister(instr.gpr20), instr.video.is_byte_chunk_b, + instr.video.signed_b, instr.video.type_b, + instr.video.byte_height_b); + } + if (instr.video.signed_b) { + const auto imm = static_cast(instr.alu.GetImm20_16()); + return Immediate(static_cast(imm)); + } else { + return Immediate(instr.alu.GetImm20_16()); + } + }(); + + switch (opcode->get().GetId()) { + case OpCode::Id::VMAD: { + const bool result_signed = instr.video.signed_a == 1 || instr.video.signed_b == 1; + const Node op_c = GetRegister(instr.gpr39); + + Node value = SignedOperation(OperationCode::IMul, result_signed, NO_PRECISE, op_a, op_b); + value = SignedOperation(OperationCode::IAdd, result_signed, NO_PRECISE, value, op_c); + + if (instr.vmad.shr == VmadShr::Shr7 || instr.vmad.shr == VmadShr::Shr15) { + const Node shift = Immediate(instr.vmad.shr == VmadShr::Shr7 ? 7 : 15); + value = + SignedOperation(OperationCode::IArithmeticShiftRight, result_signed, value, shift); + } + + SetInternalFlagsFromInteger(bb, value, instr.generates_cc); + SetRegister(bb, instr.gpr0, value); + break; + } + case OpCode::Id::VSETP: { + // We can't use the constant predicate as destination. + ASSERT(instr.vsetp.pred3 != static_cast(Pred::UnusedIndex)); + + const bool sign = instr.video.signed_a == 1 || instr.video.signed_b == 1; + const Node first_pred = GetPredicateComparisonInteger(instr.vsetp.cond, sign, op_a, op_b); + const Node second_pred = GetPredicate(instr.vsetp.pred39, false); + + const OperationCode combiner = GetPredicateCombiner(instr.vsetp.op); + + // Set the primary predicate to the result of Predicate OP SecondPredicate + SetPredicate(bb, instr.vsetp.pred3, Operation(combiner, first_pred, second_pred)); + + if (instr.vsetp.pred0 != static_cast(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, + // if enabled + const Node negate_pred = Operation(OperationCode::LogicalNegate, first_pred); + SetPredicate(bb, instr.vsetp.pred0, Operation(combiner, negate_pred, second_pred)); + } + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled video instruction: {}", opcode->get().GetName()); + } + + return pc; +} + +Node ShaderIR::GetVideoOperand(Node op, bool is_chunk, bool is_signed, + Tegra::Shader::VideoType type, u64 byte_height) { + if (!is_chunk) { + return BitfieldExtract(op, static_cast(byte_height * 8), 8); + } + const Node zero = Immediate(0); + + switch (type) { + case Tegra::Shader::VideoType::Size16_Low: + return BitfieldExtract(op, 0, 16); + case Tegra::Shader::VideoType::Size16_High: + return BitfieldExtract(op, 16, 16); + case Tegra::Shader::VideoType::Size32: + // TODO(Rodrigo): From my hardware tests it becomes a bit "mad" when this type is used + // (1 * 1 + 0 == 0x5b800000). Until a better explanation is found: abort. + UNIMPLEMENTED(); + return zero; + case Tegra::Shader::VideoType::Invalid: + UNREACHABLE_MSG("Invalid instruction encoding"); + return zero; + default: + UNREACHABLE(); + return zero; + } +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/decode/xmad.cpp b/src/video_core/shader/decode/xmad.cpp new file mode 100644 index 000000000..0cd9cd1cc --- /dev/null +++ b/src/video_core/shader/decode/xmad.cpp @@ -0,0 +1,97 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +u32 ShaderIR::DecodeXmad(BasicBlock& bb, const BasicBlock& code, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + UNIMPLEMENTED_IF(instr.xmad.sign_a); + UNIMPLEMENTED_IF(instr.xmad.sign_b); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in XMAD is not implemented"); + + Node op_a = GetRegister(instr.gpr8); + + // TODO(bunnei): Needs to be fixed once op_a or op_b is signed + UNIMPLEMENTED_IF(instr.xmad.sign_a != instr.xmad.sign_b); + const bool is_signed_a = instr.xmad.sign_a == 1; + const bool is_signed_b = instr.xmad.sign_b == 1; + const bool is_signed_c = is_signed_a; + + auto [is_merge, op_b, op_c] = [&]() -> std::tuple { + switch (opcode->get().GetId()) { + case OpCode::Id::XMAD_CR: + return {instr.xmad.merge_56, GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset), + GetRegister(instr.gpr39)}; + case OpCode::Id::XMAD_RR: + return {instr.xmad.merge_37, GetRegister(instr.gpr20), GetRegister(instr.gpr39)}; + case OpCode::Id::XMAD_RC: + return {false, GetRegister(instr.gpr39), + GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset)}; + case OpCode::Id::XMAD_IMM: + return {instr.xmad.merge_37, Immediate(static_cast(instr.xmad.imm20_16)), + GetRegister(instr.gpr39)}; + } + UNIMPLEMENTED_MSG("Unhandled XMAD instruction: {}", opcode->get().GetName()); + return {false, Immediate(0), Immediate(0)}; + }(); + + op_a = BitfieldExtract(op_a, instr.xmad.high_a ? 16 : 0, 16); + + const Node original_b = op_b; + op_b = BitfieldExtract(op_b, instr.xmad.high_b ? 16 : 0, 16); + + // TODO(Rodrigo): Use an appropiate sign for this operation + Node product = Operation(OperationCode::IMul, NO_PRECISE, op_a, op_b); + if (instr.xmad.product_shift_left) { + product = Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, product, Immediate(16)); + } + + const Node original_c = op_c; + op_c = [&]() { + switch (instr.xmad.mode) { + case Tegra::Shader::XmadMode::None: + return original_c; + case Tegra::Shader::XmadMode::CLo: + return BitfieldExtract(original_c, 0, 16); + case Tegra::Shader::XmadMode::CHi: + return BitfieldExtract(original_c, 16, 16); + case Tegra::Shader::XmadMode::CBcc: { + const Node shifted_b = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed_b, + NO_PRECISE, original_b, Immediate(16)); + return SignedOperation(OperationCode::IAdd, is_signed_c, NO_PRECISE, original_c, + shifted_b); + } + default: + UNIMPLEMENTED_MSG("Unhandled XMAD mode: {}", static_cast(instr.xmad.mode.Value())); + return Immediate(0); + } + }(); + + // TODO(Rodrigo): Use an appropiate sign for this operation + Node sum = Operation(OperationCode::IAdd, product, op_c); + if (is_merge) { + const Node a = BitfieldExtract(sum, 0, 16); + const Node b = + Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, original_b, Immediate(16)); + sum = Operation(OperationCode::IBitwiseOr, NO_PRECISE, a, b); + } + + SetInternalFlagsFromInteger(bb, sum, instr.generates_cc); + SetRegister(bb, instr.gpr0, sum); + + return pc; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp new file mode 100644 index 000000000..d7747103e --- /dev/null +++ b/src/video_core/shader/shader_ir.cpp @@ -0,0 +1,444 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include +#include + +#include "common/assert.h" +#include "common/common_types.h" +#include "common/logging/log.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Attribute; +using Tegra::Shader::Instruction; +using Tegra::Shader::IpaMode; +using Tegra::Shader::Pred; +using Tegra::Shader::PredCondition; +using Tegra::Shader::PredOperation; +using Tegra::Shader::Register; + +Node ShaderIR::StoreNode(NodeData&& node_data) { + auto store = std::make_unique(node_data); + const Node node = store.get(); + stored_nodes.push_back(std::move(store)); + return node; +} + +Node ShaderIR::Conditional(Node condition, std::vector&& code) { + return StoreNode(ConditionalNode(condition, std::move(code))); +} + +Node ShaderIR::Comment(const std::string& text) { + return StoreNode(CommentNode(text)); +} + +Node ShaderIR::Immediate(u32 value) { + return StoreNode(ImmediateNode(value)); +} + +Node ShaderIR::GetRegister(Register reg) { + if (reg != Register::ZeroIndex) { + used_registers.insert(static_cast(reg)); + } + return StoreNode(GprNode(reg)); +} + +Node ShaderIR::GetImmediate19(Instruction instr) { + return Immediate(instr.alu.GetImm20_19()); +} + +Node ShaderIR::GetImmediate32(Instruction instr) { + return Immediate(instr.alu.GetImm20_32()); +} + +Node ShaderIR::GetConstBuffer(u64 index_, u64 offset_) { + const auto index = static_cast(index_); + const auto offset = static_cast(offset_); + + const auto [entry, is_new] = used_cbufs.try_emplace(index); + entry->second.MarkAsUsed(offset); + + return StoreNode(CbufNode(index, Immediate(offset))); +} + +Node ShaderIR::GetConstBufferIndirect(u64 index_, u64 offset_, Node node) { + const auto index = static_cast(index_); + const auto offset = static_cast(offset_); + + const auto [entry, is_new] = used_cbufs.try_emplace(index); + entry->second.MarkAsUsedIndirect(); + + const Node final_offset = Operation(OperationCode::UAdd, NO_PRECISE, node, Immediate(offset)); + return StoreNode(CbufNode(index, final_offset)); +} + +Node ShaderIR::GetPredicate(u64 pred_, bool negated) { + const auto pred = static_cast(pred_); + if (pred != Pred::UnusedIndex && pred != Pred::NeverExecute) { + used_predicates.insert(pred); + } + + return StoreNode(PredicateNode(pred, negated)); +} + +Node ShaderIR::GetPredicate(bool immediate) { + return GetPredicate(static_cast(immediate ? Pred::UnusedIndex : Pred::NeverExecute)); +} + +Node ShaderIR::GetInputAttribute(Attribute::Index index, u64 element, + const Tegra::Shader::IpaMode& input_mode, Node buffer) { + const auto [entry, is_new] = + used_input_attributes.emplace(std::make_pair(index, std::set{})); + entry->second.insert(input_mode); + + return StoreNode(AbufNode(index, static_cast(element), input_mode, buffer)); +} + +Node ShaderIR::GetOutputAttribute(Attribute::Index index, u64 element, Node buffer) { + if (index == Attribute::Index::ClipDistances0123 || + index == Attribute::Index::ClipDistances4567) { + const auto clip_index = + static_cast((index == Attribute::Index::ClipDistances4567 ? 1 : 0) + element); + used_clip_distances.at(clip_index) = true; + } + used_output_attributes.insert(index); + + return StoreNode(AbufNode(index, static_cast(element), buffer)); +} + +Node ShaderIR::GetInternalFlag(InternalFlag flag, bool negated) { + const Node node = StoreNode(InternalFlagNode(flag)); + if (negated) { + return Operation(OperationCode::LogicalNegate, node); + } + return node; +} + +Node ShaderIR::GetLocalMemory(Node address) { + return StoreNode(LmemNode(address)); +} + +Node ShaderIR::GetTemporal(u32 id) { + return GetRegister(Register::ZeroIndex + 1 + id); +} + +Node ShaderIR::GetOperandAbsNegFloat(Node value, bool absolute, bool negate) { + if (absolute) { + value = Operation(OperationCode::FAbsolute, NO_PRECISE, value); + } + if (negate) { + value = Operation(OperationCode::FNegate, NO_PRECISE, value); + } + return value; +} + +Node ShaderIR::GetSaturatedFloat(Node value, bool saturate) { + if (!saturate) { + return value; + } + const Node positive_zero = Immediate(std::copysignf(0, 1)); + const Node positive_one = Immediate(1.0f); + return Operation(OperationCode::FClamp, NO_PRECISE, value, positive_zero, positive_one); +} + +Node ShaderIR::ConvertIntegerSize(Node value, Tegra::Shader::Register::Size size, bool is_signed) { + switch (size) { + case Register::Size::Byte: + value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE, value, + Immediate(24)); + value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, value, + Immediate(24)); + return value; + case Register::Size::Short: + value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE, value, + Immediate(16)); + value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, value, + Immediate(16)); + case Register::Size::Word: + // Default - do nothing + return value; + default: + UNREACHABLE_MSG("Unimplemented conversion size: {}", static_cast(size)); + return value; + } +} + +Node ShaderIR::GetOperandAbsNegInteger(Node value, bool absolute, bool negate, bool is_signed) { + if (!is_signed) { + // Absolute or negate on an unsigned is pointless + return value; + } + if (absolute) { + value = Operation(OperationCode::IAbsolute, NO_PRECISE, value); + } + if (negate) { + value = Operation(OperationCode::INegate, NO_PRECISE, value); + } + return value; +} + +Node ShaderIR::UnpackHalfImmediate(Instruction instr, bool has_negation) { + const Node value = Immediate(instr.half_imm.PackImmediates()); + if (!has_negation) { + return value; + } + const Node first_negate = GetPredicate(instr.half_imm.first_negate != 0); + const Node second_negate = GetPredicate(instr.half_imm.second_negate != 0); + + return Operation(OperationCode::HNegate, HALF_NO_PRECISE, value, first_negate, second_negate); +} + +Node ShaderIR::HalfMerge(Node dest, Node src, Tegra::Shader::HalfMerge merge) { + switch (merge) { + case Tegra::Shader::HalfMerge::H0_H1: + return src; + case Tegra::Shader::HalfMerge::F32: + return Operation(OperationCode::HMergeF32, src); + case Tegra::Shader::HalfMerge::Mrg_H0: + return Operation(OperationCode::HMergeH0, dest, src); + case Tegra::Shader::HalfMerge::Mrg_H1: + return Operation(OperationCode::HMergeH1, dest, src); + } + UNREACHABLE(); + return src; +} + +Node ShaderIR::GetOperandAbsNegHalf(Node value, bool absolute, bool negate) { + if (absolute) { + value = Operation(OperationCode::HAbsolute, HALF_NO_PRECISE, value); + } + if (negate) { + value = Operation(OperationCode::HNegate, HALF_NO_PRECISE, value, GetPredicate(true), + GetPredicate(true)); + } + return value; +} + +Node ShaderIR::GetPredicateComparisonFloat(PredCondition condition, Node op_a, Node op_b) { + static const std::unordered_map PredicateComparisonTable = { + {PredCondition::LessThan, OperationCode::LogicalFLessThan}, + {PredCondition::Equal, OperationCode::LogicalFEqual}, + {PredCondition::LessEqual, OperationCode::LogicalFLessEqual}, + {PredCondition::GreaterThan, OperationCode::LogicalFGreaterThan}, + {PredCondition::NotEqual, OperationCode::LogicalFNotEqual}, + {PredCondition::GreaterEqual, OperationCode::LogicalFGreaterEqual}, + {PredCondition::LessThanWithNan, OperationCode::LogicalFLessThan}, + {PredCondition::NotEqualWithNan, OperationCode::LogicalFNotEqual}, + {PredCondition::LessEqualWithNan, OperationCode::LogicalFLessEqual}, + {PredCondition::GreaterThanWithNan, OperationCode::LogicalFGreaterThan}, + {PredCondition::GreaterEqualWithNan, OperationCode::LogicalFGreaterEqual}}; + + const auto comparison{PredicateComparisonTable.find(condition)}; + UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), + "Unknown predicate comparison operation"); + + Node predicate = Operation(comparison->second, NO_PRECISE, op_a, op_b); + + if (condition == PredCondition::LessThanWithNan || + condition == PredCondition::NotEqualWithNan || + condition == PredCondition::LessEqualWithNan || + condition == PredCondition::GreaterThanWithNan || + condition == PredCondition::GreaterEqualWithNan) { + + predicate = Operation(OperationCode::LogicalOr, predicate, + Operation(OperationCode::LogicalFIsNan, op_a)); + predicate = Operation(OperationCode::LogicalOr, predicate, + Operation(OperationCode::LogicalFIsNan, op_b)); + } + + return predicate; +} + +Node ShaderIR::GetPredicateComparisonInteger(PredCondition condition, bool is_signed, Node op_a, + Node op_b) { + static const std::unordered_map PredicateComparisonTable = { + {PredCondition::LessThan, OperationCode::LogicalILessThan}, + {PredCondition::Equal, OperationCode::LogicalIEqual}, + {PredCondition::LessEqual, OperationCode::LogicalILessEqual}, + {PredCondition::GreaterThan, OperationCode::LogicalIGreaterThan}, + {PredCondition::NotEqual, OperationCode::LogicalINotEqual}, + {PredCondition::GreaterEqual, OperationCode::LogicalIGreaterEqual}, + {PredCondition::LessThanWithNan, OperationCode::LogicalILessThan}, + {PredCondition::NotEqualWithNan, OperationCode::LogicalINotEqual}, + {PredCondition::LessEqualWithNan, OperationCode::LogicalILessEqual}, + {PredCondition::GreaterThanWithNan, OperationCode::LogicalIGreaterThan}, + {PredCondition::GreaterEqualWithNan, OperationCode::LogicalIGreaterEqual}}; + + const auto comparison{PredicateComparisonTable.find(condition)}; + UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), + "Unknown predicate comparison operation"); + + Node predicate = SignedOperation(comparison->second, is_signed, NO_PRECISE, op_a, op_b); + + UNIMPLEMENTED_IF_MSG(condition == PredCondition::LessThanWithNan || + condition == PredCondition::NotEqualWithNan || + condition == PredCondition::LessEqualWithNan || + condition == PredCondition::GreaterThanWithNan || + condition == PredCondition::GreaterEqualWithNan, + "NaN comparisons for integers are not implemented"); + return predicate; +} + +Node ShaderIR::GetPredicateComparisonHalf(Tegra::Shader::PredCondition condition, + const MetaHalfArithmetic& meta, Node op_a, Node op_b) { + + UNIMPLEMENTED_IF_MSG(condition == PredCondition::LessThanWithNan || + condition == PredCondition::NotEqualWithNan || + condition == PredCondition::LessEqualWithNan || + condition == PredCondition::GreaterThanWithNan || + condition == PredCondition::GreaterEqualWithNan, + "Unimplemented NaN comparison for half floats"); + + static const std::unordered_map PredicateComparisonTable = { + {PredCondition::LessThan, OperationCode::Logical2HLessThan}, + {PredCondition::Equal, OperationCode::Logical2HEqual}, + {PredCondition::LessEqual, OperationCode::Logical2HLessEqual}, + {PredCondition::GreaterThan, OperationCode::Logical2HGreaterThan}, + {PredCondition::NotEqual, OperationCode::Logical2HNotEqual}, + {PredCondition::GreaterEqual, OperationCode::Logical2HGreaterEqual}, + {PredCondition::LessThanWithNan, OperationCode::Logical2HLessThan}, + {PredCondition::NotEqualWithNan, OperationCode::Logical2HNotEqual}, + {PredCondition::LessEqualWithNan, OperationCode::Logical2HLessEqual}, + {PredCondition::GreaterThanWithNan, OperationCode::Logical2HGreaterThan}, + {PredCondition::GreaterEqualWithNan, OperationCode::Logical2HGreaterEqual}}; + + const auto comparison{PredicateComparisonTable.find(condition)}; + UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), + "Unknown predicate comparison operation"); + + const Node predicate = Operation(comparison->second, meta, op_a, op_b); + + return predicate; +} + +OperationCode ShaderIR::GetPredicateCombiner(PredOperation operation) { + static const std::unordered_map PredicateOperationTable = { + {PredOperation::And, OperationCode::LogicalAnd}, + {PredOperation::Or, OperationCode::LogicalOr}, + {PredOperation::Xor, OperationCode::LogicalXor}, + }; + + const auto op = PredicateOperationTable.find(operation); + UNIMPLEMENTED_IF_MSG(op == PredicateOperationTable.end(), "Unknown predicate operation"); + return op->second; +} + +Node ShaderIR::GetConditionCode(Tegra::Shader::ConditionCode cc) { + switch (cc) { + case Tegra::Shader::ConditionCode::NEU: + return GetInternalFlag(InternalFlag::Zero, true); + default: + UNIMPLEMENTED_MSG("Unimplemented condition code: {}", static_cast(cc)); + return GetPredicate(static_cast(Pred::NeverExecute)); + } +} + +void ShaderIR::SetRegister(BasicBlock& bb, Register dest, Node src) { + bb.push_back(Operation(OperationCode::Assign, GetRegister(dest), src)); +} + +void ShaderIR::SetPredicate(BasicBlock& bb, u64 dest, Node src) { + bb.push_back(Operation(OperationCode::LogicalAssign, GetPredicate(dest), src)); +} + +void ShaderIR::SetInternalFlag(BasicBlock& bb, InternalFlag flag, Node value) { + bb.push_back(Operation(OperationCode::LogicalAssign, GetInternalFlag(flag), value)); +} + +void ShaderIR::SetLocalMemory(BasicBlock& bb, Node address, Node value) { + bb.push_back(Operation(OperationCode::Assign, GetLocalMemory(address), value)); +} + +void ShaderIR::SetTemporal(BasicBlock& bb, u32 id, Node value) { + SetRegister(bb, Register::ZeroIndex + 1 + id, value); +} + +void ShaderIR::SetInternalFlagsFromFloat(BasicBlock& bb, Node value, bool sets_cc) { + if (!sets_cc) { + return; + } + const Node zerop = Operation(OperationCode::LogicalFEqual, value, Immediate(0.0f)); + SetInternalFlag(bb, InternalFlag::Zero, zerop); + LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete"); +} + +void ShaderIR::SetInternalFlagsFromInteger(BasicBlock& bb, Node value, bool sets_cc) { + if (!sets_cc) { + return; + } + const Node zerop = Operation(OperationCode::LogicalIEqual, value, Immediate(0)); + SetInternalFlag(bb, InternalFlag::Zero, zerop); + LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete"); +} + +Node ShaderIR::BitfieldExtract(Node value, u32 offset, u32 bits) { + return Operation(OperationCode::UBitfieldExtract, NO_PRECISE, value, Immediate(offset), + Immediate(bits)); +} + +/*static*/ OperationCode ShaderIR::SignedToUnsignedCode(OperationCode operation_code, + bool is_signed) { + if (is_signed) { + return operation_code; + } + switch (operation_code) { + case OperationCode::FCastInteger: + return OperationCode::FCastUInteger; + case OperationCode::IAdd: + return OperationCode::UAdd; + case OperationCode::IMul: + return OperationCode::UMul; + case OperationCode::IDiv: + return OperationCode::UDiv; + case OperationCode::IMin: + return OperationCode::UMin; + case OperationCode::IMax: + return OperationCode::UMax; + case OperationCode::ICastFloat: + return OperationCode::UCastFloat; + case OperationCode::ICastUnsigned: + return OperationCode::UCastSigned; + case OperationCode::ILogicalShiftLeft: + return OperationCode::ULogicalShiftLeft; + case OperationCode::ILogicalShiftRight: + return OperationCode::ULogicalShiftRight; + case OperationCode::IArithmeticShiftRight: + return OperationCode::UArithmeticShiftRight; + case OperationCode::IBitwiseAnd: + return OperationCode::UBitwiseAnd; + case OperationCode::IBitwiseOr: + return OperationCode::UBitwiseOr; + case OperationCode::IBitwiseXor: + return OperationCode::UBitwiseXor; + case OperationCode::IBitwiseNot: + return OperationCode::UBitwiseNot; + case OperationCode::IBitfieldInsert: + return OperationCode::UBitfieldInsert; + case OperationCode::IBitCount: + return OperationCode::UBitCount; + case OperationCode::LogicalILessThan: + return OperationCode::LogicalULessThan; + case OperationCode::LogicalIEqual: + return OperationCode::LogicalUEqual; + case OperationCode::LogicalILessEqual: + return OperationCode::LogicalULessEqual; + case OperationCode::LogicalIGreaterThan: + return OperationCode::LogicalUGreaterThan; + case OperationCode::LogicalINotEqual: + return OperationCode::LogicalUNotEqual; + case OperationCode::LogicalIGreaterEqual: + return OperationCode::LogicalUGreaterEqual; + case OperationCode::INegate: + UNREACHABLE_MSG("Can't negate an unsigned integer"); + case OperationCode::IAbsolute: + UNREACHABLE_MSG("Can't apply absolute to an unsigned integer"); + } + UNREACHABLE_MSG("Unknown signed operation with code={}", static_cast(operation_code)); + return {}; +} + +} // namespace VideoCommon::Shader \ No newline at end of file diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h new file mode 100644 index 000000000..96e7df6b6 --- /dev/null +++ b/src/video_core/shader/shader_ir.h @@ -0,0 +1,793 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/common_types.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/engines/shader_header.h" + +namespace VideoCommon::Shader { + +class OperationNode; +class ConditionalNode; +class GprNode; +class ImmediateNode; +class InternalFlagNode; +class PredicateNode; +class AbufNode; ///< Attribute buffer +class CbufNode; ///< Constant buffer +class LmemNode; ///< Local memory +class GmemNode; ///< Global memory +class CommentNode; + +using ProgramCode = std::vector; + +using NodeData = + std::variant; +using Node = const NodeData*; +using Node4 = std::array; +using BasicBlock = std::vector; + +constexpr u32 MAX_PROGRAM_LENGTH = 0x1000; + +enum class OperationCode { + Assign, /// (float& dest, float src) -> void + + Select, /// (MetaArithmetic, bool pred, float a, float b) -> float + + FAdd, /// (MetaArithmetic, float a, float b) -> float + FMul, /// (MetaArithmetic, float a, float b) -> float + FDiv, /// (MetaArithmetic, float a, float b) -> float + FFma, /// (MetaArithmetic, float a, float b, float c) -> float + FNegate, /// (MetaArithmetic, float a) -> float + FAbsolute, /// (MetaArithmetic, float a) -> float + FClamp, /// (MetaArithmetic, float value, float min, float max) -> float + FMin, /// (MetaArithmetic, float a, float b) -> float + FMax, /// (MetaArithmetic, float a, float b) -> float + FCos, /// (MetaArithmetic, float a) -> float + FSin, /// (MetaArithmetic, float a) -> float + FExp2, /// (MetaArithmetic, float a) -> float + FLog2, /// (MetaArithmetic, float a) -> float + FInverseSqrt, /// (MetaArithmetic, float a) -> float + FSqrt, /// (MetaArithmetic, float a) -> float + FRoundEven, /// (MetaArithmetic, float a) -> float + FFloor, /// (MetaArithmetic, float a) -> float + FCeil, /// (MetaArithmetic, float a) -> float + FTrunc, /// (MetaArithmetic, float a) -> float + FCastInteger, /// (MetaArithmetic, int a) -> float + FCastUInteger, /// (MetaArithmetic, uint a) -> float + + IAdd, /// (MetaArithmetic, int a, int b) -> int + IMul, /// (MetaArithmetic, int a, int b) -> int + IDiv, /// (MetaArithmetic, int a, int b) -> int + INegate, /// (MetaArithmetic, int a) -> int + IAbsolute, /// (MetaArithmetic, int a) -> int + IMin, /// (MetaArithmetic, int a, int b) -> int + IMax, /// (MetaArithmetic, int a, int b) -> int + ICastFloat, /// (MetaArithmetic, float a) -> int + ICastUnsigned, /// (MetaArithmetic, uint a) -> int + ILogicalShiftLeft, /// (MetaArithmetic, int a, uint b) -> int + ILogicalShiftRight, /// (MetaArithmetic, int a, uint b) -> int + IArithmeticShiftRight, /// (MetaArithmetic, int a, uint b) -> int + IBitwiseAnd, /// (MetaArithmetic, int a, int b) -> int + IBitwiseOr, /// (MetaArithmetic, int a, int b) -> int + IBitwiseXor, /// (MetaArithmetic, int a, int b) -> int + IBitwiseNot, /// (MetaArithmetic, int a) -> int + IBitfieldInsert, /// (MetaArithmetic, int base, int insert, int offset, int bits) -> int + IBitfieldExtract, /// (MetaArithmetic, int value, int offset, int offset) -> int + IBitCount, /// (MetaArithmetic, int) -> int + + UAdd, /// (MetaArithmetic, uint a, uint b) -> uint + UMul, /// (MetaArithmetic, uint a, uint b) -> uint + UDiv, /// (MetaArithmetic, uint a, uint b) -> uint + UMin, /// (MetaArithmetic, uint a, uint b) -> uint + UMax, /// (MetaArithmetic, uint a, uint b) -> uint + UCastFloat, /// (MetaArithmetic, float a) -> uint + UCastSigned, /// (MetaArithmetic, int a) -> uint + ULogicalShiftLeft, /// (MetaArithmetic, uint a, uint b) -> uint + ULogicalShiftRight, /// (MetaArithmetic, uint a, uint b) -> uint + UArithmeticShiftRight, /// (MetaArithmetic, uint a, uint b) -> uint + UBitwiseAnd, /// (MetaArithmetic, uint a, uint b) -> uint + UBitwiseOr, /// (MetaArithmetic, uint a, uint b) -> uint + UBitwiseXor, /// (MetaArithmetic, uint a, uint b) -> uint + UBitwiseNot, /// (MetaArithmetic, uint a) -> uint + UBitfieldInsert, /// (MetaArithmetic, uint base, uint insert, int offset, int bits) -> uint + UBitfieldExtract, /// (MetaArithmetic, uint value, int offset, int offset) -> uint + UBitCount, /// (MetaArithmetic, uint) -> uint + + HAdd, /// (MetaHalfArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 + HMul, /// (MetaHalfArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 + HFma, /// (MetaHalfArithmetic, f16vec2 a, f16vec2 b, f16vec2 c) -> f16vec2 + HAbsolute, /// (f16vec2 a) -> f16vec2 + HNegate, /// (f16vec2 a, bool first, bool second) -> f16vec2 + HMergeF32, /// (f16vec2 src) -> float + HMergeH0, /// (f16vec2 dest, f16vec2 src) -> f16vec2 + HMergeH1, /// (f16vec2 dest, f16vec2 src) -> f16vec2 + HPack2, /// (float a, float b) -> f16vec2 + + LogicalAssign, /// (bool& dst, bool src) -> void + LogicalAnd, /// (bool a, bool b) -> bool + LogicalOr, /// (bool a, bool b) -> bool + LogicalXor, /// (bool a, bool b) -> bool + LogicalNegate, /// (bool a) -> bool + LogicalPick2, /// (bool2 pair, uint index) -> bool + LogicalAll2, /// (bool2 a) -> bool + LogicalAny2, /// (bool2 a) -> bool + + LogicalFLessThan, /// (float a, float b) -> bool + LogicalFEqual, /// (float a, float b) -> bool + LogicalFLessEqual, /// (float a, float b) -> bool + LogicalFGreaterThan, /// (float a, float b) -> bool + LogicalFNotEqual, /// (float a, float b) -> bool + LogicalFGreaterEqual, /// (float a, float b) -> bool + LogicalFIsNan, /// (float a) -> bool + + LogicalILessThan, /// (int a, int b) -> bool + LogicalIEqual, /// (int a, int b) -> bool + LogicalILessEqual, /// (int a, int b) -> bool + LogicalIGreaterThan, /// (int a, int b) -> bool + LogicalINotEqual, /// (int a, int b) -> bool + LogicalIGreaterEqual, /// (int a, int b) -> bool + + LogicalULessThan, /// (uint a, uint b) -> bool + LogicalUEqual, /// (uint a, uint b) -> bool + LogicalULessEqual, /// (uint a, uint b) -> bool + LogicalUGreaterThan, /// (uint a, uint b) -> bool + LogicalUNotEqual, /// (uint a, uint b) -> bool + LogicalUGreaterEqual, /// (uint a, uint b) -> bool + + Logical2HLessThan, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + Logical2HEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + Logical2HLessEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + Logical2HGreaterThan, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + Logical2HNotEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + Logical2HGreaterEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 + + F4Texture, /// (MetaTexture, float[N] coords, float[M] params) -> float4 + F4TextureLod, /// (MetaTexture, float[N] coords, float[M] params) -> float4 + F4TextureGather, /// (MetaTexture, float[N] coords, float[M] params) -> float4 + F4TextureQueryDimensions, /// (MetaTexture, float a) -> float4 + F4TextureQueryLod, /// (MetaTexture, float[N] coords) -> float4 + F4TexelFetch, /// (MetaTexture, int[N], int) -> float4 + + Branch, /// (uint branch_target) -> void + PushFlowStack, /// (uint branch_target) -> void + PopFlowStack, /// () -> void + Exit, /// () -> void + Discard, /// () -> void + + EmitVertex, /// () -> void + EndPrimitive, /// () -> void + + YNegate, /// () -> float + + Amount, +}; + +enum class InternalFlag { + Zero = 0, + Sign = 1, + Carry = 2, + Overflow = 3, + Amount = 4, +}; + +/// Describes the behaviour of code path of a given entry point and a return point. +enum class ExitMethod { + Undetermined, ///< Internal value. Only occur when analyzing JMP loop. + AlwaysReturn, ///< All code paths reach the return point. + Conditional, ///< Code path reaches the return point or an END instruction conditionally. + AlwaysEnd, ///< All code paths reach a END instruction. +}; + +class Sampler { +public: + explicit Sampler(std::size_t offset, std::size_t index, Tegra::Shader::TextureType type, + bool is_array, bool is_shadow) + : offset{offset}, index{index}, type{type}, is_array{is_array}, is_shadow{is_shadow} {} + + std::size_t GetOffset() const { + return offset; + } + + std::size_t GetIndex() const { + return index; + } + + Tegra::Shader::TextureType GetType() const { + return type; + } + + bool IsArray() const { + return is_array; + } + + bool IsShadow() const { + return is_shadow; + } + + bool operator<(const Sampler& rhs) const { + return std::tie(offset, index, type, is_array, is_shadow) < + std::tie(rhs.offset, rhs.index, rhs.type, rhs.is_array, rhs.is_shadow); + } + +private: + /// Offset in TSC memory from which to read the sampler object, as specified by the sampling + /// instruction. + std::size_t offset{}; + std::size_t index{}; ///< Value used to index into the generated GLSL sampler array. + Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc) + bool is_array{}; ///< Whether the texture is being sampled as an array texture or not. + bool is_shadow{}; ///< Whether the texture is being sampled as a depth texture or not. +}; + +class ConstBuffer { +public: + void MarkAsUsed(u64 offset) { + max_offset = std::max(max_offset, static_cast(offset)); + } + + void MarkAsUsedIndirect() { + is_indirect = true; + } + + bool IsIndirect() const { + return is_indirect; + } + + u32 GetSize() const { + return max_offset + 1; + } + +private: + u32 max_offset{}; + bool is_indirect{}; +}; + +struct MetaArithmetic { + bool precise{}; +}; + +struct MetaHalfArithmetic { + bool precise{}; + std::array types = {Tegra::Shader::HalfType::H0_H1, + Tegra::Shader::HalfType::H0_H1, + Tegra::Shader::HalfType::H0_H1}; +}; + +struct MetaTexture { + const Sampler& sampler; + u32 element{}; + u32 coords_count{}; + std::optional array_index; +}; + +constexpr MetaArithmetic PRECISE = {true}; +constexpr MetaArithmetic NO_PRECISE = {false}; +constexpr MetaHalfArithmetic HALF_NO_PRECISE = {false}; + +using Meta = std::variant; + +/// Holds any kind of operation that can be done in the IR +class OperationNode final { +public: + template + explicit constexpr OperationNode(OperationCode code) : code{code}, meta{} {} + + template + explicit constexpr OperationNode(OperationCode code, Meta&& meta) + : code{code}, meta{std::move(meta)} {} + + template + explicit constexpr OperationNode(OperationCode code, const T*... operands) + : OperationNode(code, {}, operands...) {} + + template + explicit constexpr OperationNode(OperationCode code, Meta&& meta, const T*... operands_) + : code{code}, meta{std::move(meta)} { + + auto operands_list = {operands_...}; + for (auto& operand : operands_list) { + operands.push_back(operand); + } + } + + explicit OperationNode(OperationCode code, Meta&& meta, std::vector&& operands) + : code{code}, meta{meta}, operands{std::move(operands)} {} + + explicit OperationNode(OperationCode code, std::vector&& operands) + : code{code}, meta{}, operands{std::move(operands)} {} + + OperationCode GetCode() const { + return code; + } + + const Meta& GetMeta() const { + return meta; + } + + std::size_t GetOperandsCount() const { + return operands.size(); + } + + Node operator[](std::size_t operand_index) const { + return operands.at(operand_index); + } + +private: + const OperationCode code; + const Meta meta; + std::vector operands; +}; + +/// Encloses inside any kind of node that returns a boolean conditionally-executed code +class ConditionalNode final { +public: + explicit ConditionalNode(Node condition, std::vector&& code) + : condition{condition}, code{std::move(code)} {} + + Node GetCondition() const { + return condition; + } + + const std::vector& GetCode() const { + return code; + } + +private: + const Node condition; ///< Condition to be satisfied + std::vector code; ///< Code to execute +}; + +/// A general purpose register +class GprNode final { +public: + explicit constexpr GprNode(Tegra::Shader::Register index) : index{index} {} + + u32 GetIndex() const { + return static_cast(index); + } + +private: + const Tegra::Shader::Register index; +}; + +/// A 32-bits value that represents an immediate value +class ImmediateNode final { +public: + explicit constexpr ImmediateNode(u32 value) : value{value} {} + + u32 GetValue() const { + return value; + } + +private: + const u32 value; +}; + +/// One of Maxwell's internal flags +class InternalFlagNode final { +public: + explicit constexpr InternalFlagNode(InternalFlag flag) : flag{flag} {} + + InternalFlag GetFlag() const { + return flag; + } + +private: + const InternalFlag flag; +}; + +/// A predicate register, it can be negated without aditional nodes +class PredicateNode final { +public: + explicit constexpr PredicateNode(Tegra::Shader::Pred index, bool negated) + : index{index}, negated{negated} {} + + Tegra::Shader::Pred GetIndex() const { + return index; + } + + bool IsNegated() const { + return negated; + } + +private: + const Tegra::Shader::Pred index; + const bool negated; +}; + +/// Attribute buffer memory (known as attributes or varyings in GLSL terms) +class AbufNode final { +public: + explicit constexpr AbufNode(Tegra::Shader::Attribute::Index index, u32 element, + const Tegra::Shader::IpaMode& input_mode, Node buffer = {}) + : input_mode{input_mode}, index{index}, element{element}, buffer{buffer} {} + + explicit constexpr AbufNode(Tegra::Shader::Attribute::Index index, u32 element, + Node buffer = {}) + : input_mode{}, index{index}, element{element}, buffer{buffer} {} + + Tegra::Shader::IpaMode GetInputMode() const { + return input_mode; + } + + Tegra::Shader::Attribute::Index GetIndex() const { + return index; + } + + u32 GetElement() const { + return element; + } + + Node GetBuffer() const { + return buffer; + } + +private: + const Tegra::Shader::IpaMode input_mode; + const Node buffer; + const Tegra::Shader::Attribute::Index index; + const u32 element; +}; + +/// Constant buffer node, usually mapped to uniform buffers in GLSL +class CbufNode final { +public: + explicit constexpr CbufNode(u32 index, Node offset) : index{index}, offset{offset} {} + + u32 GetIndex() const { + return index; + } + + Node GetOffset() const { + return offset; + } + +private: + const u32 index; + const Node offset; +}; + +/// Local memory node +class LmemNode final { +public: + explicit constexpr LmemNode(Node address) : address{address} {} + + Node GetAddress() const { + return address; + } + +private: + const Node address; +}; + +/// Global memory node +class GmemNode final { +public: + explicit constexpr GmemNode(Node address) : address{address} {} + + Node GetAddress() const { + return address; + } + +private: + const Node address; +}; + +/// Commentary, can be dropped +class CommentNode final { +public: + explicit CommentNode(std::string text) : text{std::move(text)} {} + + const std::string& GetText() const { + return text; + } + +private: + std::string text; +}; + +class ShaderIR final { +public: + explicit ShaderIR(const ProgramCode& program_code, u32 main_offset) + : program_code{program_code}, main_offset{main_offset} { + + Decode(); + } + + const std::map& GetBasicBlocks() const { + return basic_blocks; + } + + const std::set& GetRegisters() const { + return used_registers; + } + + const std::set& GetPredicates() const { + return used_predicates; + } + + const std::map>& + GetInputAttributes() const { + return used_input_attributes; + } + + const std::set& GetOutputAttributes() const { + return used_output_attributes; + } + + const std::map& GetConstantBuffers() const { + return used_cbufs; + } + + const std::set& GetSamplers() const { + return used_samplers; + } + + const std::array& GetClipDistances() + const { + return used_clip_distances; + } + + std::size_t GetLength() const { + return static_cast(coverage_end * sizeof(u64)); + } + + const Tegra::Shader::Header& GetHeader() const { + return header; + } + +private: + void Decode(); + + ExitMethod Scan(u32 begin, u32 end, std::set& labels); + + BasicBlock DecodeRange(u32 begin, u32 end); + + /** + * Decodes a single instruction from Tegra to IR. + * @param bb Basic block where the nodes will be written to. + * @param pc Program counter. Offset to decode. + * @return Next address to decode. + */ + u32 DecodeInstr(BasicBlock& bb, u32 pc); + + u32 DecodeArithmetic(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeArithmeticImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeBfe(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeBfi(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeShift(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeArithmeticInteger(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeArithmeticIntegerImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeArithmeticHalf(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeArithmeticHalfImmediate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeFfma(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeHfma2(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeConversion(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeMemory(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeFloatSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeIntegerSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeHalfSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodePredicateSetRegister(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodePredicateSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeRegisterSetPredicate(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeFloatSet(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeIntegerSet(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeHalfSet(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeVideo(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeXmad(BasicBlock& bb, const BasicBlock& code, u32 pc); + u32 DecodeOther(BasicBlock& bb, const BasicBlock& code, u32 pc); + + /// Internalizes node's data and returns a managed pointer to a clone of that node + Node StoreNode(NodeData&& node_data); + + /// Creates a conditional node + Node Conditional(Node condition, std::vector&& code); + /// Creates a commentary + Node Comment(const std::string& text); + /// Creates an u32 immediate + Node Immediate(u32 value); + /// Creates a s32 immediate + Node Immediate(s32 value) { + return Immediate(static_cast(value)); + } + /// Creates a f32 immediate + Node Immediate(f32 value) { + u32 integral; + std::memcpy(&integral, &value, sizeof(u32)); + return Immediate(integral); + } + + /// Generates a node for a passed register. + Node GetRegister(Tegra::Shader::Register reg); + /// Generates a node representing a 19-bit immediate value + Node GetImmediate19(Tegra::Shader::Instruction instr); + /// Generates a node representing a 32-bit immediate value + Node GetImmediate32(Tegra::Shader::Instruction instr); + /// Generates a node representing a constant buffer + Node GetConstBuffer(u64 index, u64 offset); + /// Generates a node representing a constant buffer with a variadic offset + Node GetConstBufferIndirect(u64 index, u64 offset, Node node); + /// Generates a node for a passed predicate. It can be optionally negated + Node GetPredicate(u64 pred, bool negated = false); + /// Generates a predicate node for an immediate true or false value + Node GetPredicate(bool immediate); + /// Generates a node representing an input atttribute. Keeps track of used attributes. + Node GetInputAttribute(Tegra::Shader::Attribute::Index index, u64 element, + const Tegra::Shader::IpaMode& input_mode, Node buffer = {}); + /// Generates a node representing an output atttribute. Keeps track of used attributes. + Node GetOutputAttribute(Tegra::Shader::Attribute::Index index, u64 element, Node buffer); + /// Generates a node representing an internal flag + Node GetInternalFlag(InternalFlag flag, bool negated = false); + /// Generates a node representing a local memory address + Node GetLocalMemory(Node address); + /// Generates a temporal, internally it uses a post-RZ register + Node GetTemporal(u32 id); + + /// Sets a register. src value must be a number-evaluated node. + void SetRegister(BasicBlock& bb, Tegra::Shader::Register dest, Node src); + /// Sets a predicate. src value must be a bool-evaluated node + void SetPredicate(BasicBlock& bb, u64 dest, Node src); + /// Sets an internal flag. src value must be a bool-evaluated node + void SetInternalFlag(BasicBlock& bb, InternalFlag flag, Node value); + /// Sets a local memory address. address and value must be a number-evaluated node + void SetLocalMemory(BasicBlock& bb, Node address, Node value); + /// Sets a temporal. Internally it uses a post-RZ register + void SetTemporal(BasicBlock& bb, u32 id, Node value); + + /// Sets internal flags from a float + void SetInternalFlagsFromFloat(BasicBlock& bb, Node value, bool sets_cc = true); + /// Sets internal flags from an integer + void SetInternalFlagsFromInteger(BasicBlock& bb, Node value, bool sets_cc = true); + + /// Conditionally absolute/negated float. Absolute is applied first + Node GetOperandAbsNegFloat(Node value, bool absolute, bool negate); + /// Conditionally saturates a float + Node GetSaturatedFloat(Node value, bool saturate = true); + + /// Converts an integer to different sizes. + Node ConvertIntegerSize(Node value, Tegra::Shader::Register::Size size, bool is_signed); + /// Conditionally absolute/negated integer. Absolute is applied first + Node GetOperandAbsNegInteger(Node value, bool absolute, bool negate, bool is_signed); + + /// Unpacks a half immediate from an instruction + Node UnpackHalfImmediate(Tegra::Shader::Instruction instr, bool has_negation); + /// Merges a half pair into another value + Node HalfMerge(Node dest, Node src, Tegra::Shader::HalfMerge merge); + /// Conditionally absolute/negated half float pair. Absolute is applied first + Node GetOperandAbsNegHalf(Node value, bool absolute, bool negate); + + /// Returns a predicate comparing two floats + Node GetPredicateComparisonFloat(Tegra::Shader::PredCondition condition, Node op_a, Node op_b); + /// Returns a predicate comparing two integers + Node GetPredicateComparisonInteger(Tegra::Shader::PredCondition condition, bool is_signed, + Node op_a, Node op_b); + /// Returns a predicate comparing two half floats. meta consumes how both pairs will be compared + Node GetPredicateComparisonHalf(Tegra::Shader::PredCondition condition, + const MetaHalfArithmetic& meta, Node op_a, Node op_b); + + /// Returns a predicate combiner operation + OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation); + + /// Returns a condition code evaluated from internal flags + Node GetConditionCode(Tegra::Shader::ConditionCode cc); + + /// Accesses a texture sampler + const Sampler& GetSampler(const Tegra::Shader::Sampler& sampler, + Tegra::Shader::TextureType type, bool is_array, bool is_shadow); + + /// Extracts a sequence of bits from a node + Node BitfieldExtract(Node value, u32 offset, u32 bits); + + void WriteTexInstructionFloat(BasicBlock& bb, Tegra::Shader::Instruction instr, + const Node4& components); + + void WriteTexsInstructionFloat(BasicBlock& bb, Tegra::Shader::Instruction instr, + const Node4& components); + void WriteTexsInstructionHalfFloat(BasicBlock& bb, Tegra::Shader::Instruction instr, + const Node4& components); + + Node4 GetTexCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, + Tegra::Shader::TextureProcessMode process_mode, bool depth_compare, + bool is_array); + + Node4 GetTexsCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, + Tegra::Shader::TextureProcessMode process_mode, bool depth_compare, + bool is_array); + + Node4 GetTld4Code(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, + bool depth_compare, bool is_array); + + Node4 GetTldsCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, + bool is_array); + + std::tuple ValidateAndGetCoordinateElement( + Tegra::Shader::TextureType texture_type, bool depth_compare, bool is_array, + bool lod_bias_enabled, std::size_t max_coords, std::size_t max_inputs); + + Node4 GetTextureCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, + Tegra::Shader::TextureProcessMode process_mode, bool depth_compare, + bool is_array, std::size_t array_offset, std::size_t bias_offset, + std::vector&& coords); + + Node GetVideoOperand(Node op, bool is_chunk, bool is_signed, Tegra::Shader::VideoType type, + u64 byte_height); + + void WriteLogicOperation(BasicBlock& bb, Tegra::Shader::Register dest, + Tegra::Shader::LogicOperation logic_op, Node op_a, Node op_b, + Tegra::Shader::PredicateResultMode predicate_mode, + Tegra::Shader::Pred predicate, bool sets_cc); + void WriteLop3Instruction(BasicBlock& bb, Tegra::Shader::Register dest, Node op_a, Node op_b, + Node op_c, Node imm_lut, bool sets_cc); + + template + Node Operation(OperationCode code, const T*... operands) { + return StoreNode(OperationNode(code, operands...)); + } + + template + Node Operation(OperationCode code, Meta&& meta, const T*... operands) { + return StoreNode(OperationNode(code, std::move(meta), operands...)); + } + + template + Node Operation(OperationCode code, std::vector&& operands) { + return StoreNode(OperationNode(code, std::move(operands))); + } + + template + Node Operation(OperationCode code, Meta&& meta, std::vector&& operands) { + return StoreNode(OperationNode(code, std::move(meta), std::move(operands))); + } + + template + Node SignedOperation(OperationCode code, bool is_signed, const T*... operands) { + return StoreNode(OperationNode(SignedToUnsignedCode(code, is_signed), operands...)); + } + + template + Node SignedOperation(OperationCode code, bool is_signed, Meta&& meta, const T*... operands) { + return StoreNode( + OperationNode(SignedToUnsignedCode(code, is_signed), std::move(meta), operands...)); + } + + static OperationCode SignedToUnsignedCode(OperationCode operation_code, bool is_signed); + + const ProgramCode& program_code; + const u32 main_offset; + + u32 coverage_begin{}; + u32 coverage_end{}; + std::map, ExitMethod> exit_method_map; + + std::map basic_blocks; + + std::vector> stored_nodes; + + std::set used_registers; + std::set used_predicates; + std::map> + used_input_attributes; + std::set used_output_attributes; + std::map used_cbufs; + std::set used_samplers; + std::array used_clip_distances{}; + + Tegra::Shader::Header header; +}; + +} // namespace VideoCommon::Shader