yuzu-emu
/
yuzu-android
Archived
1
0
Fork 0

shader: Properly manage attributes not written from previous stages

This commit is contained in:
ReinUsesLisp 2021-06-16 04:59:30 -03:00 committed by ameerj
parent 892b8aa2ad
commit 374eeda1a3
12 changed files with 62 additions and 41 deletions

View File

@ -327,11 +327,12 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
for (size_t index = 0; index < info.input_generics.size(); ++index) {
const auto& generic{info.input_generics[index]};
if (generic.used) {
header += fmt::format("layout(location={}){}in vec4 in_attr{}{};", index,
InterpDecorator(generic.interpolation), index,
InputArrayDecorator(stage));
if (!generic.used || !runtime_info.previous_stage_stores_generic[index]) {
continue;
}
header +=
fmt::format("layout(location={}){}in vec4 in_attr{}{};", index,
InterpDecorator(generic.interpolation), index, InputArrayDecorator(stage));
}
for (size_t index = 0; index < info.uses_patches.size(); ++index) {
if (!info.uses_patches[index]) {
@ -349,10 +350,10 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
}
}
for (size_t index = 0; index < info.stores_generics.size(); ++index) {
// TODO: Properly resolve attribute issues
if (info.stores_generics[index] || StageInitializesVaryings()) {
DefineGenericOutput(index, program.invocations);
if (!info.stores_generics[index]) {
continue;
}
DefineGenericOutput(index, program.invocations);
}
DefineConstantBuffers(bindings);
DefineStorageBuffers(bindings);
@ -362,17 +363,6 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
DefineConstants();
}
bool EmitContext::StageInitializesVaryings() const noexcept {
switch (stage) {
case Stage::VertexA:
case Stage::VertexB:
case Stage::Geometry:
return true;
default:
return false;
}
}
void EmitContext::SetupExtensions() {
if (info.uses_shadow_lod && profile.support_gl_texture_shadow_lod) {
header += "#extension GL_EXT_texture_shadow_lod : enable\n";

View File

@ -136,8 +136,6 @@ public:
code += '\n';
}
[[nodiscard]] bool StageInitializesVaryings() const noexcept;
std::string header;
std::string code;
VarAlloc var_alloc;

View File

@ -8,6 +8,7 @@
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Backend::GLSL {
namespace {
@ -179,6 +180,10 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr,
const char swizzle{"xyzw"[element]};
if (IR::IsGeneric(attr)) {
const u32 index{IR::GenericAttributeIndex(attr)};
if (!ctx.runtime_info.previous_stage_stores_generic[index]) {
ctx.AddF32("{}=0.f;", inst, attr);
return;
}
ctx.AddF32("{}=in_attr{}{}.{};", inst, index, InputVertexIndex(ctx, vertex), swizzle);
return;
}

View File

@ -12,11 +12,12 @@
namespace Shader::Backend::GLSL {
namespace {
void InitializeVaryings(EmitContext& ctx) {
void InitializeOutputVaryings(EmitContext& ctx) {
if (ctx.stage == Stage::VertexB || ctx.stage == Stage::Geometry) {
ctx.Add("gl_Position=vec4(0,0,0,1);");
// TODO: Properly resolve attribute issues
for (size_t index = 0; index < ctx.info.stores_generics.size() / 2; ++index) {
if (!ctx.info.stores_generics[index]) {
}
for (size_t index = 0; index < 16; ++index) {
if (ctx.info.stores_generics[index]) {
ctx.Add("out_attr{}=vec4(0,0,0,1);", index);
}
}
@ -56,9 +57,8 @@ void EmitPhiMove(EmitContext& ctx, const IR::Value& phi_value, const IR::Value&
}
void EmitPrologue(EmitContext& ctx) {
if (ctx.StageInitializesVaryings()) {
InitializeVaryings(ctx);
}
InitializeOutputVaryings(ctx);
if (ctx.stage == Stage::Fragment && ctx.profile.need_declared_frag_colors) {
for (size_t index = 0; index < ctx.info.stores_frag_color.size(); ++index) {
if (ctx.info.stores_frag_color[index]) {
@ -73,7 +73,7 @@ void EmitEpilogue(EmitContext&) {}
void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) {
ctx.Add("EmitStreamVertex(int({}));", ctx.var_alloc.Consume(stream));
InitializeVaryings(ctx);
InitializeOutputVaryings(ctx);
}
void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) {

View File

@ -1209,6 +1209,9 @@ void EmitContext::DefineInputs(const Info& info) {
tess_coord = DefineInput(*this, F32[3], false, spv::BuiltIn::TessCoord);
}
for (size_t index = 0; index < info.input_generics.size(); ++index) {
if (!runtime_info.previous_stage_stores_generic[index]) {
continue;
}
const InputVarying generic{info.input_generics[index]};
if (!generic.used) {
continue;

View File

@ -286,7 +286,7 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
if (IR::IsGeneric(attr)) {
const u32 index{IR::GenericAttributeIndex(attr)};
const std::optional<AttrInfo> type{AttrTypes(ctx, index)};
if (!type) {
if (!type || !ctx.runtime_info.previous_stage_stores_generic[index]) {
// Attribute is disabled
return ctx.Const(0.0f);
}

View File

@ -192,7 +192,9 @@ IR::Program MergeDualVertexPrograms(IR::Program& vertex_a, IR::Program& vertex_b
result.local_memory_size = std::max(vertex_a.local_memory_size, vertex_b.local_memory_size);
for (size_t index = 0; index < 32; ++index) {
result.info.input_generics[index].used |= vertex_b.info.input_generics[index].used;
result.info.stores_generics[index] |= vertex_b.info.stores_generics[index];
if (vertex_b.info.stores_generics[index]) {
result.info.stores_generics[index] = true;
}
}
Optimization::JoinTextureInfo(result.info, vertex_b.info);
Optimization::JoinStorageInfo(result.info, vertex_b.info);

View File

@ -79,7 +79,7 @@ void GetAttribute(Info& info, IR::Attribute attr) {
void SetAttribute(Info& info, IR::Attribute attr) {
if (IR::IsGeneric(attr)) {
info.stores_generics.at(IR::GenericAttributeIndex(attr)) = true;
info.stores_generics[IR::GenericAttributeIndex(attr)] = true;
return;
}
if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture9Q) {
@ -956,7 +956,9 @@ void GatherInfoFromHeader(Environment& env, Info& info) {
}
if (info.stores_indexed_attributes) {
for (size_t i = 0; i < info.stores_generics.size(); i++) {
info.stores_generics[i] |= header.vtg.IsOutputGenericVectorActive(i);
if (header.vtg.IsOutputGenericVectorActive(i)) {
info.stores_generics[i] = true;
}
}
info.stores_clip_distance |= header.vtg.omap_systemc.clip_distances != 0;
info.stores_position |= header.vtg.omap_systemb.position != 0;

View File

@ -5,6 +5,7 @@
#pragma once
#include <array>
#include <bitset>
#include <optional>
#include <vector>
@ -59,6 +60,8 @@ struct TransformFeedbackVarying {
struct RuntimeInfo {
std::array<AttributeType, 32> generic_input_types{};
std::bitset<32> previous_stage_stores_generic{};
bool convert_depth_mode{};
bool force_early_z{};
@ -72,11 +75,12 @@ struct RuntimeInfo {
std::optional<CompareFunction> alpha_test_func;
float alpha_test_reference{};
// Static y negate value
/// Static Y negate value
bool y_negate{};
// Use storage buffers instead of global pointers on GLASM
/// Use storage buffers instead of global pointers on GLASM
bool glasm_use_storage_buffers{};
/// Transform feedback state for each varying
std::vector<TransformFeedbackVarying> xfb_varyings;
};

View File

@ -140,7 +140,7 @@ struct Info {
bool stores_sample_mask{};
bool stores_frag_depth{};
std::array<bool, 32> stores_generics{};
std::bitset<32> stores_generics{};
bool stores_layer{};
bool stores_viewport_index{};
bool stores_point_size{};

View File

@ -58,8 +58,15 @@ auto MakeSpan(Container& container) {
Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
const Shader::IR::Program& program,
const Shader::IR::Program* previous_program,
bool glasm_use_storage_buffers, bool use_assembly_shaders) {
Shader::RuntimeInfo info;
if (previous_program) {
info.previous_stage_stores_generic = previous_program->info.stores_generics;
} else {
// Mark all stores as available
info.previous_stage_stores_generic.flip();
}
switch (program.stage) {
case Shader::Stage::VertexB:
case Shader::Stage::Geometry:
@ -400,6 +407,7 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
OGLProgram source_program;
std::array<std::string, 5> sources;
Shader::Backend::Bindings binding;
Shader::IR::Program* previous_program{};
const bool use_glasm{device.UseAssemblyShaders()};
const size_t first_index = uses_vertex_a && uses_vertex_b ? 1 : 0;
for (size_t index = first_index; index < Maxwell::MaxShaderProgram; ++index) {
@ -413,12 +421,13 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
infos[stage_index] = &program.info;
const auto runtime_info{
MakeRuntimeInfo(key, program, glasm_use_storage_buffers, use_glasm)};
MakeRuntimeInfo(key, program, previous_program, glasm_use_storage_buffers, use_glasm)};
if (use_glasm) {
sources[stage_index] = EmitGLASM(profile, runtime_info, program, binding);
} else {
sources[stage_index] = EmitGLSL(profile, runtime_info, program, binding);
}
previous_program = &program;
}
auto* const thread_worker{build_in_parallel ? workers.get() : nullptr};
VideoCore::ShaderNotify* const notify{build_in_parallel ? &shader_notify : nullptr};

View File

@ -90,7 +90,7 @@ Shader::CompareFunction MaxwellToCompareFunction(Maxwell::ComparisonOp compariso
return {};
}
static Shader::AttributeType CastAttributeType(const FixedPipelineState::VertexAttribute& attr) {
Shader::AttributeType CastAttributeType(const FixedPipelineState::VertexAttribute& attr) {
if (attr.enabled == 0) {
return Shader::AttributeType::Disabled;
}
@ -124,9 +124,15 @@ Shader::AttributeType AttributeType(const FixedPipelineState& state, size_t inde
}
Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineCacheKey& key,
const Shader::IR::Program& program) {
const Shader::IR::Program& program,
const Shader::IR::Program* previous_program) {
Shader::RuntimeInfo info;
if (previous_program) {
info.previous_stage_stores_generic = previous_program->info.stores_generics;
} else {
// Mark all stores as available
info.previous_stage_stores_generic.flip();
}
const Shader::Stage stage{program.stage};
const bool has_geometry{key.unique_hashes[4] != 0};
const bool gl_ndc{key.state.ndc_minus_one_to_one != 0};
@ -499,6 +505,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
std::array<const Shader::Info*, Maxwell::MaxShaderStage> infos{};
std::array<vk::ShaderModule, Maxwell::MaxShaderStage> modules;
const Shader::IR::Program* previous_stage{};
Shader::Backend::Bindings binding;
for (size_t index = uses_vertex_a && uses_vertex_b ? 1 : 0; index < Maxwell::MaxShaderProgram;
++index) {
@ -511,7 +518,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
const size_t stage_index{index - 1};
infos[stage_index] = &program.info;
const Shader::RuntimeInfo runtime_info{MakeRuntimeInfo(key, program)};
const Shader::RuntimeInfo runtime_info{MakeRuntimeInfo(key, program, previous_stage)};
const std::vector<u32> code{EmitSPIRV(profile, runtime_info, program, binding)};
device.SaveShader(code);
modules[stage_index] = BuildShader(device, code);
@ -519,6 +526,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
const std::string name{fmt::format("Shader {:016x}", key.unique_hashes[index])};
modules[stage_index].SetObjectNameEXT(name.c_str());
}
previous_stage = &program;
}
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
VideoCore::ShaderNotify* const notify{build_in_parallel ? &shader_notify : nullptr};