yuzu-emu
/
yuzu-android
Archived
1
0
Fork 0

Fix exception propagation for VFP single precision

This commit is contained in:
Jannik Vogel 2016-05-16 11:11:16 +02:00
parent 7dde13f875
commit 693cca8f1f
2 changed files with 38 additions and 33 deletions

View File

@ -271,8 +271,9 @@ inline int vfp_single_type(const vfp_single* s)
// Unpack a single-precision float. Note that this returns the magnitude // Unpack a single-precision float. Note that this returns the magnitude
// of the single-precision float mantissa with the 1. if necessary, // of the single-precision float mantissa with the 1. if necessary,
// aligned to bit 30. // aligned to bit 30.
inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr) inline u32 vfp_single_unpack(vfp_single* s, s32 val, u32 fpscr)
{ {
u32 exceptions = 0;
s->sign = vfp_single_packed_sign(val) >> 16, s->sign = vfp_single_packed_sign(val) >> 16,
s->exponent = vfp_single_packed_exponent(val); s->exponent = vfp_single_packed_exponent(val);
@ -283,12 +284,13 @@ inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr)
// If flush-to-zero mode is enabled, turn the denormal into zero. // If flush-to-zero mode is enabled, turn the denormal into zero.
// On a VFPv2 architecture, the sign of the zero is always positive. // On a VFPv2 architecture, the sign of the zero is always positive.
if ((*fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_single_type(s) & VFP_DENORMAL) != 0) { if ((fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_single_type(s) & VFP_DENORMAL) != 0) {
s->sign = 0; s->sign = 0;
s->exponent = 0; s->exponent = 0;
s->significand = 0; s->significand = 0;
*fpscr |= FPSCR_IDC; exceptions |= FPSCR_IDC;
} }
return exceptions;
} }
// Re-pack a single-precision float. This assumes that the float is // Re-pack a single-precision float. This assumes that the float is

View File

@ -334,8 +334,9 @@ static u32 vfp_single_fsqrt(ARMul_State* state, int sd, int unused, s32 m, u32 f
{ {
struct vfp_single vsm, vsd, *vsp; struct vfp_single vsm, vsd, *vsp;
int ret, tm; int ret, tm;
u32 exceptions = 0;
vfp_single_unpack(&vsm, m, &fpscr); exceptions |= vfp_single_unpack(&vsm, m, fpscr);
tm = vfp_single_type(&vsm); tm = vfp_single_type(&vsm);
if (tm & (VFP_NAN|VFP_INFINITY)) { if (tm & (VFP_NAN|VFP_INFINITY)) {
vsp = &vsd; vsp = &vsd;
@ -408,7 +409,9 @@ sqrt_invalid:
} }
vsd.significand = vfp_shiftright32jamming(vsd.significand, 1); vsd.significand = vfp_shiftright32jamming(vsd.significand, 1);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fsqrt"); exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fsqrt");
return exceptions;
} }
/* /*
@ -503,7 +506,7 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f
int tm; int tm;
u32 exceptions = 0; u32 exceptions = 0;
vfp_single_unpack(&vsm, m, &fpscr); exceptions |= vfp_single_unpack(&vsm, m, fpscr);
tm = vfp_single_type(&vsm); tm = vfp_single_type(&vsm);
@ -511,7 +514,7 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f
* If we have a signalling NaN, signal invalid operation. * If we have a signalling NaN, signal invalid operation.
*/ */
if (tm == VFP_SNAN) if (tm == VFP_SNAN)
exceptions = FPSCR_IOC; exceptions |= FPSCR_IOC;
if (tm & VFP_DENORMAL) if (tm & VFP_DENORMAL)
vfp_single_normalise_denormal(&vsm); vfp_single_normalise_denormal(&vsm);
@ -568,7 +571,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f
int rmode = fpscr & FPSCR_RMODE_MASK; int rmode = fpscr & FPSCR_RMODE_MASK;
int tm; int tm;
vfp_single_unpack(&vsm, m, &fpscr); exceptions |= vfp_single_unpack(&vsm, m, fpscr);
vfp_single_dump("VSM", &vsm); vfp_single_dump("VSM", &vsm);
/* /*
@ -583,7 +586,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f
if (vsm.exponent >= 127 + 32) { if (vsm.exponent >= 127 + 32) {
d = vsm.sign ? 0 : 0xffffffff; d = vsm.sign ? 0 : 0xffffffff;
exceptions = FPSCR_IOC; exceptions |= FPSCR_IOC;
} else if (vsm.exponent >= 127) { } else if (vsm.exponent >= 127) {
int shift = 127 + 31 - vsm.exponent; int shift = 127 + 31 - vsm.exponent;
u32 rem, incr = 0; u32 rem, incr = 0;
@ -648,7 +651,7 @@ static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 f
int rmode = fpscr & FPSCR_RMODE_MASK; int rmode = fpscr & FPSCR_RMODE_MASK;
int tm; int tm;
vfp_single_unpack(&vsm, m, &fpscr); exceptions |= vfp_single_unpack(&vsm, m, fpscr);
vfp_single_dump("VSM", &vsm); vfp_single_dump("VSM", &vsm);
/* /*
@ -774,7 +777,7 @@ vfp_single_fadd_nonnumber(struct vfp_single *vsd, struct vfp_single *vsn,
/* /*
* different signs -> invalid * different signs -> invalid
*/ */
exceptions = FPSCR_IOC; exceptions |= FPSCR_IOC;
vsp = &vfp_single_default_qnan; vsp = &vfp_single_default_qnan;
} else { } else {
/* /*
@ -921,27 +924,27 @@ static u32
vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr, u32 negate, const char *func) vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr, u32 negate, const char *func)
{ {
vfp_single vsd, vsp, vsn, vsm; vfp_single vsd, vsp, vsn, vsm;
u32 exceptions; u32 exceptions = 0;
s32 v; s32 v;
v = vfp_get_float(state, sn); v = vfp_get_float(state, sn);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, v); LOG_TRACE(Core_ARM11, "s%u = %08x", sn, v);
vfp_single_unpack(&vsn, v, &fpscr); exceptions |= vfp_single_unpack(&vsn, v, fpscr);
if (vsn.exponent == 0 && vsn.significand) if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn); vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m, &fpscr); exceptions |= vfp_single_unpack(&vsm, m, fpscr);
if (vsm.exponent == 0 && vsm.significand) if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm); vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_multiply(&vsp, &vsn, &vsm, fpscr); exceptions |= vfp_single_multiply(&vsp, &vsn, &vsm, fpscr);
if (negate & NEG_MULTIPLY) if (negate & NEG_MULTIPLY)
vsp.sign = vfp_sign_negate(vsp.sign); vsp.sign = vfp_sign_negate(vsp.sign);
v = vfp_get_float(state, sd); v = vfp_get_float(state, sd);
LOG_TRACE(Core_ARM11, "s%u = %08x", sd, v); LOG_TRACE(Core_ARM11, "s%u = %08x", sd, v);
vfp_single_unpack(&vsn, v, &fpscr); exceptions |= vfp_single_unpack(&vsn, v, fpscr);
if (vsn.exponent == 0 && vsn.significand != 0) if (vsn.exponent == 0 && vsn.significand != 0)
vfp_single_normalise_denormal(&vsn); vfp_single_normalise_denormal(&vsn);
@ -1000,20 +1003,20 @@ static u32 vfp_single_fnmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr
static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{ {
struct vfp_single vsd, vsn, vsm; struct vfp_single vsd, vsn, vsm;
u32 exceptions; u32 exceptions = 0;
s32 n = vfp_get_float(state, sn); s32 n = vfp_get_float(state, sn);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
vfp_single_unpack(&vsn, n, &fpscr); exceptions |= vfp_single_unpack(&vsn, n, fpscr);
if (vsn.exponent == 0 && vsn.significand) if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn); vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m, &fpscr); exceptions |= vfp_single_unpack(&vsm, m, fpscr);
if (vsm.exponent == 0 && vsm.significand) if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm); vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); exceptions |= vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fmul"); return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fmul");
} }
@ -1023,20 +1026,20 @@ static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{ {
struct vfp_single vsd, vsn, vsm; struct vfp_single vsd, vsn, vsm;
u32 exceptions; u32 exceptions = 0;
s32 n = vfp_get_float(state, sn); s32 n = vfp_get_float(state, sn);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
vfp_single_unpack(&vsn, n, &fpscr); exceptions |= vfp_single_unpack(&vsn, n, fpscr);
if (vsn.exponent == 0 && vsn.significand) if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn); vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m, &fpscr); exceptions |= vfp_single_unpack(&vsm, m, fpscr);
if (vsm.exponent == 0 && vsm.significand) if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm); vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); exceptions |= vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
vsd.sign = vfp_sign_negate(vsd.sign); vsd.sign = vfp_sign_negate(vsd.sign);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fnmul"); return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fnmul");
} }
@ -1047,7 +1050,7 @@ static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr
static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{ {
struct vfp_single vsd, vsn, vsm; struct vfp_single vsd, vsn, vsm;
u32 exceptions; u32 exceptions = 0;
s32 n = vfp_get_float(state, sn); s32 n = vfp_get_float(state, sn);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
@ -1055,15 +1058,15 @@ static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
/* /*
* Unpack and normalise denormals. * Unpack and normalise denormals.
*/ */
vfp_single_unpack(&vsn, n, &fpscr); exceptions |= vfp_single_unpack(&vsn, n, fpscr);
if (vsn.exponent == 0 && vsn.significand) if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn); vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m, &fpscr); exceptions |= vfp_single_unpack(&vsm, m, fpscr);
if (vsm.exponent == 0 && vsm.significand) if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm); vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr); exceptions |= vfp_single_add(&vsd, &vsn, &vsm, fpscr);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fadd"); return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fadd");
} }
@ -1095,8 +1098,8 @@ static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
vfp_single_unpack(&vsn, n, &fpscr); exceptions |= vfp_single_unpack(&vsn, n, fpscr);
vfp_single_unpack(&vsm, m, &fpscr); exceptions |= vfp_single_unpack(&vsm, m, fpscr);
vsd.sign = vsn.sign ^ vsm.sign; vsd.sign = vsn.sign ^ vsm.sign;
@ -1165,13 +1168,13 @@ static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fdiv"); return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fdiv");
vsn_nan: vsn_nan:
exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr); exceptions |= vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
pack: pack:
vfp_put_float(state, vfp_single_pack(&vsd), sd); vfp_put_float(state, vfp_single_pack(&vsd), sd);
return exceptions; return exceptions;
vsm_nan: vsm_nan:
exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr); exceptions |= vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr);
goto pack; goto pack;
zero: zero:
@ -1180,7 +1183,7 @@ zero:
goto pack; goto pack;
divzero: divzero:
exceptions = FPSCR_DZC; exceptions |= FPSCR_DZC;
infinity: infinity:
vsd.exponent = 255; vsd.exponent = 255;
vsd.significand = 0; vsd.significand = 0;