yuzu-emu
/
yuzu
Archived
1
0
Fork 0

shader: Use TryInstRecursive on XMAD multiply folding

Simplify a bit the logic.
This commit is contained in:
ReinUsesLisp 2021-07-26 04:15:23 -03:00
parent f6f0383b49
commit 09fb41dc63
1 changed files with 13 additions and 15 deletions

View File

@ -116,33 +116,31 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
* *
* This optimization has been proven safe by LLVM and MSVC. * This optimization has been proven safe by LLVM and MSVC.
*/ */
const IR::Value lhs_arg{inst.Arg(0)}; IR::Inst* const lhs_shl{inst.Arg(0).TryInstRecursive()};
const IR::Value rhs_arg{inst.Arg(1)}; IR::Inst* const rhs_mul{inst.Arg(1).TryInstRecursive()};
if (lhs_arg.IsImmediate() || rhs_arg.IsImmediate()) { if (!lhs_shl || !rhs_mul) {
return false; return false;
} }
IR::Inst* const lhs_shl{lhs_arg.InstRecursive()};
if (lhs_shl->GetOpcode() != IR::Opcode::ShiftLeftLogical32 || if (lhs_shl->GetOpcode() != IR::Opcode::ShiftLeftLogical32 ||
lhs_shl->Arg(1) != IR::Value{16U}) { lhs_shl->Arg(1) != IR::Value{16U}) {
return false; return false;
} }
if (lhs_shl->Arg(0).IsImmediate()) { IR::Inst* const lhs_mul{lhs_shl->Arg(0).TryInstRecursive()};
if (!lhs_mul) {
return false; return false;
} }
IR::Inst* const lhs_mul{lhs_shl->Arg(0).InstRecursive()};
IR::Inst* const rhs_mul{rhs_arg.InstRecursive()};
if (lhs_mul->GetOpcode() != IR::Opcode::IMul32 || rhs_mul->GetOpcode() != IR::Opcode::IMul32) { if (lhs_mul->GetOpcode() != IR::Opcode::IMul32 || rhs_mul->GetOpcode() != IR::Opcode::IMul32) {
return false; return false;
} }
if (lhs_mul->Arg(1).Resolve() != rhs_mul->Arg(1).Resolve()) {
return false;
}
const IR::U32 factor_b{lhs_mul->Arg(1)}; const IR::U32 factor_b{lhs_mul->Arg(1)};
if (lhs_mul->Arg(0).IsImmediate() || rhs_mul->Arg(0).IsImmediate()) { if (factor_b.Resolve() != rhs_mul->Arg(1).Resolve()) {
return false;
}
IR::Inst* const lhs_bfe{lhs_mul->Arg(0).TryInstRecursive()};
IR::Inst* const rhs_bfe{rhs_mul->Arg(0).TryInstRecursive()};
if (!lhs_bfe || !rhs_bfe) {
return false; return false;
} }
IR::Inst* const lhs_bfe{lhs_mul->Arg(0).InstRecursive()};
IR::Inst* const rhs_bfe{rhs_mul->Arg(0).InstRecursive()};
if (lhs_bfe->GetOpcode() != IR::Opcode::BitFieldUExtract) { if (lhs_bfe->GetOpcode() != IR::Opcode::BitFieldUExtract) {
return false; return false;
} }
@ -155,10 +153,10 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
if (rhs_bfe->Arg(1) != IR::Value{0U} || rhs_bfe->Arg(2) != IR::Value{16U}) { if (rhs_bfe->Arg(1) != IR::Value{0U} || rhs_bfe->Arg(2) != IR::Value{16U}) {
return false; return false;
} }
if (lhs_bfe->Arg(0).Resolve() != rhs_bfe->Arg(0).Resolve()) { const IR::U32 factor_a{lhs_bfe->Arg(0)};
if (factor_a.Resolve() != rhs_bfe->Arg(0).Resolve()) {
return false; return false;
} }
const IR::U32 factor_a{lhs_bfe->Arg(0)};
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)}; IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
inst.ReplaceUsesWith(ir.IMul(factor_a, factor_b)); inst.ReplaceUsesWith(ir.IMul(factor_a, factor_b));
return true; return true;