yuzu-emu
/
yuzu
Archived
1
0
Fork 0

vk_shader_decompiler: Implement force early fragment tests

Force early fragment tests when the 3D method is enabled.
The established pipeline cache takes care of recompiling if needed.

This is implemented only on Vulkan to avoid invalidating the shader
cache on OpenGL.
This commit is contained in:
ReinUsesLisp 2020-11-26 16:49:20 -03:00
parent 322349e8cc
commit 2ccf85a910
6 changed files with 19 additions and 11 deletions

View File

@ -755,7 +755,11 @@ public:
u32 data_upload; u32 data_upload;
INSERT_UNION_PADDING_WORDS(0x44); INSERT_UNION_PADDING_WORDS(0x16);
u32 force_early_fragment_tests;
INSERT_UNION_PADDING_WORDS(0x2D);
struct { struct {
union { union {
@ -1572,6 +1576,7 @@ ASSERT_REG_POSITION(shadow_ram_control, 0x49);
ASSERT_REG_POSITION(upload, 0x60); ASSERT_REG_POSITION(upload, 0x60);
ASSERT_REG_POSITION(exec_upload, 0x6C); ASSERT_REG_POSITION(exec_upload, 0x6C);
ASSERT_REG_POSITION(data_upload, 0x6D); ASSERT_REG_POSITION(data_upload, 0x6D);
ASSERT_REG_POSITION(force_early_fragment_tests, 0x84);
ASSERT_REG_POSITION(sync_info, 0xB2); ASSERT_REG_POSITION(sync_info, 0xB2);
ASSERT_REG_POSITION(tess_mode, 0xC8); ASSERT_REG_POSITION(tess_mode, 0xC8);
ASSERT_REG_POSITION(tess_level_outer, 0xC9); ASSERT_REG_POSITION(tess_level_outer, 0xC9);

View File

@ -46,7 +46,7 @@ void FixedPipelineState::Fill(const Maxwell& regs, bool has_extended_dynamic_sta
regs.polygon_offset_fill_enable}; regs.polygon_offset_fill_enable};
const u32 topology_index = static_cast<u32>(regs.draw.topology.Value()); const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
raw = 0; raw1 = 0;
primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0); primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0); depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value()); depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value());
@ -61,12 +61,13 @@ void FixedPipelineState::Fill(const Maxwell& regs, bool has_extended_dynamic_sta
rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0); rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0);
topology.Assign(regs.draw.topology); topology.Assign(regs.draw.topology);
alpha_raw = 0; raw2 = 0;
const auto test_func = const auto test_func =
regs.alpha_test_enabled == 1 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always; regs.alpha_test_enabled == 1 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always;
alpha_test_func.Assign(PackComparisonOp(test_func)); alpha_test_func.Assign(PackComparisonOp(test_func));
alpha_test_ref = Common::BitCast<u32>(regs.alpha_test_ref); early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
alpha_test_ref = Common::BitCast<u32>(regs.alpha_test_ref);
point_size = Common::BitCast<u32>(regs.point_size); point_size = Common::BitCast<u32>(regs.point_size);
for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) { for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {

View File

@ -171,7 +171,7 @@ struct FixedPipelineState {
}; };
union { union {
u32 raw; u32 raw1;
BitField<0, 1, u32> no_extended_dynamic_state; BitField<0, 1, u32> no_extended_dynamic_state;
BitField<2, 1, u32> primitive_restart_enable; BitField<2, 1, u32> primitive_restart_enable;
BitField<3, 1, u32> depth_bias_enable; BitField<3, 1, u32> depth_bias_enable;
@ -187,13 +187,13 @@ struct FixedPipelineState {
BitField<23, 1, u32> rasterize_enable; BitField<23, 1, u32> rasterize_enable;
BitField<24, 4, Maxwell::PrimitiveTopology> topology; BitField<24, 4, Maxwell::PrimitiveTopology> topology;
}; };
u32 alpha_test_ref; ///< Alpha test reference value
union { union {
u32 alpha_raw; u32 raw2;
BitField<0, 3, u32> alpha_test_func; BitField<0, 3, u32> alpha_test_func;
BitField<3, 1, u32> early_z;
}; };
u32 alpha_test_ref;
u32 point_size; u32 point_size;
std::array<u32, Maxwell::NumVertexArrays> binding_divisors; std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes; std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;

View File

@ -344,6 +344,7 @@ VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) {
specialization.attribute_types[i] = attribute.Type(); specialization.attribute_types[i] = attribute.Type();
} }
specialization.ndc_minus_one_to_one = fixed_state.ndc_minus_one_to_one; specialization.ndc_minus_one_to_one = fixed_state.ndc_minus_one_to_one;
specialization.early_fragment_tests = fixed_state.early_z;
// Alpha test // Alpha test
specialization.alpha_test_func = specialization.alpha_test_func =

View File

@ -315,7 +315,6 @@ public:
"supported on this device"); "supported on this device");
} }
} }
if (ir.UsesLayer() || ir.UsesViewportIndex()) { if (ir.UsesLayer() || ir.UsesViewportIndex()) {
if (ir.UsesViewportIndex()) { if (ir.UsesViewportIndex()) {
AddCapability(spv::Capability::MultiViewport); AddCapability(spv::Capability::MultiViewport);
@ -325,11 +324,9 @@ public:
AddCapability(spv::Capability::ShaderViewportIndexLayerEXT); AddCapability(spv::Capability::ShaderViewportIndexLayerEXT);
} }
} }
if (device.IsFormatlessImageLoadSupported()) { if (device.IsFormatlessImageLoadSupported()) {
AddCapability(spv::Capability::StorageImageReadWithoutFormat); AddCapability(spv::Capability::StorageImageReadWithoutFormat);
} }
if (device.IsFloat16Supported()) { if (device.IsFloat16Supported()) {
AddCapability(spv::Capability::Float16); AddCapability(spv::Capability::Float16);
} }
@ -377,6 +374,9 @@ public:
if (header.ps.omap.depth) { if (header.ps.omap.depth) {
AddExecutionMode(main, spv::ExecutionMode::DepthReplacing); AddExecutionMode(main, spv::ExecutionMode::DepthReplacing);
} }
if (specialization.early_fragment_tests) {
AddExecutionMode(main, spv::ExecutionMode::EarlyFragmentTests);
}
break; break;
case ShaderType::Compute: case ShaderType::Compute:
const auto workgroup_size = specialization.workgroup_size; const auto workgroup_size = specialization.workgroup_size;

View File

@ -95,6 +95,7 @@ struct Specialization final {
std::bitset<Maxwell::NumVertexAttributes> enabled_attributes; std::bitset<Maxwell::NumVertexAttributes> enabled_attributes;
std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{}; std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{};
bool ndc_minus_one_to_one{}; bool ndc_minus_one_to_one{};
bool early_fragment_tests{};
float alpha_test_ref{}; float alpha_test_ref{};
Maxwell::ComparisonOp alpha_test_func{}; Maxwell::ComparisonOp alpha_test_func{};
}; };