Richard Henderson <richard.hender...@linaro.org> writes:
> Previously this was only supported for roundAndPackFloat64. > Include support in round_canonical, round_to_int, roundAndPackFloat32, > roundAndPackInt32, roundAndPackInt64, roundAndPackUint64. If we extend the fp-test case we can exercise these routines: ./fp-test f16_roundToInt f32_roundToInt f64_roundToInt -r odd >> Testing f16_roundToInt, rounding odd, exact Not implemented. >> Testing f32_roundToInt, rounding odd, exact Not implemented. >> Testing f64_roundToInt, rounding odd, exact Not implemented. See 4cb780920 for an example... > > This does not include any of the floatx80 routines, as we > do not have users for that rounding mode there. > > Signed-off-by: Richard Henderson <richard.hender...@linaro.org> > --- > > David, if you could test this vs your s390 patches, vs real s390 > hardware, that would be lovely. > > > r~ > > --- > fpu/softfloat.c | 40 ++++++++++++++++++++++++++++++++++++++-- > 1 file changed, 38 insertions(+), 2 deletions(-) > > diff --git a/fpu/softfloat.c b/fpu/softfloat.c > index 9132d7a0b0..325c6e4e79 100644 > --- a/fpu/softfloat.c > +++ b/fpu/softfloat.c > @@ -696,6 +696,7 @@ static FloatParts sf_canonicalize(FloatParts part, const > FloatFmt *parm, > static FloatParts round_canonical(FloatParts p, float_status *s, > const FloatFmt *parm) > { > + const uint64_t frac_lsb = parm->frac_lsb; > const uint64_t frac_lsbm1 = parm->frac_lsbm1; > const uint64_t round_mask = parm->round_mask; > const uint64_t roundeven_mask = parm->roundeven_mask; > @@ -731,6 +732,10 @@ static FloatParts round_canonical(FloatParts p, > float_status *s, > inc = p.sign ? round_mask : 0; > overflow_norm = !p.sign; > break; > + case float_round_to_odd: > + overflow_norm = true; > + inc = frac & frac_lsb ? 0 : frac_lsbm1; > + break; > default: > g_assert_not_reached(); > } > @@ -778,9 +783,14 @@ static FloatParts round_canonical(FloatParts p, > float_status *s, > shift64RightJamming(frac, 1 - exp, &frac); > if (frac & round_mask) { > /* Need to recompute round-to-even. */ > - if (s->float_rounding_mode == float_round_nearest_even) { > + switch (s->float_rounding_mode) { > + case float_round_nearest_even: > inc = ((frac & roundeven_mask) != frac_lsbm1 > ? frac_lsbm1 : 0); > + break; > + case float_round_to_odd: > + inc = frac & frac_lsb ? 0 : frac_lsbm1; > + break; > } > flags |= float_flag_inexact; > frac += inc; > @@ -1988,6 +1998,9 @@ static FloatParts round_to_int(FloatParts a, int rmode, > case float_round_down: > one = a.sign; > break; > + case float_round_to_odd: > + one = true; > + break; > default: > g_assert_not_reached(); > } > @@ -2021,6 +2034,9 @@ static FloatParts round_to_int(FloatParts a, int rmode, > case float_round_down: > inc = a.sign ? rnd_mask : 0; > break; > + case float_round_to_odd: > + inc = a.frac & frac_lsb ? 0 : frac_lsbm1; > + break; > default: > g_assert_not_reached(); > } > @@ -3314,6 +3330,9 @@ static int32_t roundAndPackInt32(flag zSign, uint64_t > absZ, float_status *status > case float_round_down: > roundIncrement = zSign ? 0x7f : 0; > break; > + case float_round_to_odd: > + roundIncrement = absZ & 0x80 ? 0 : 0x7f; > + break; > default: > abort(); > } > @@ -3368,6 +3387,9 @@ static int64_t roundAndPackInt64(flag zSign, uint64_t > absZ0, uint64_t absZ1, > case float_round_down: > increment = zSign && absZ1; > break; > + case float_round_to_odd: > + increment = !(absZ0 & 1) && absZ1; > + break; > default: > abort(); > } > @@ -3424,6 +3446,9 @@ static int64_t roundAndPackUint64(flag zSign, uint64_t > absZ0, > case float_round_down: > increment = zSign && absZ1; > break; > + case float_round_to_odd: > + increment = !(absZ0 & 1) && absZ1; > + break; > default: > abort(); > } > @@ -3526,6 +3551,8 @@ static float32 roundAndPackFloat32(flag zSign, int > zExp, uint32_t zSig, > case float_round_down: > roundIncrement = zSign ? 0x7f : 0; > break; > + case float_round_to_odd: > + roundIncrement = zSig & 0x80 ? 0 : 0x7f; > default: > abort(); > break; > @@ -3536,8 +3563,10 @@ static float32 roundAndPackFloat32(flag zSign, int > zExp, uint32_t zSig, > || ( ( zExp == 0xFD ) > && ( (int32_t) ( zSig + roundIncrement ) < 0 ) ) > ) { > + bool overflow_to_inf = roundingMode != float_round_to_odd && > + roundIncrement != 0; > float_raise(float_flag_overflow | float_flag_inexact, status); > - return packFloat32( zSign, 0xFF, - ( roundIncrement == 0 )); > + return packFloat32(zSign, 0xFF, -!overflow_to_inf); > } > if ( zExp < 0 ) { > if (status->flush_to_zero) { > @@ -3555,6 +3584,13 @@ static float32 roundAndPackFloat32(flag zSign, int > zExp, uint32_t zSig, > if (isTiny && roundBits) { > float_raise(float_flag_underflow, status); > } > + if (roundingMode == float_round_to_odd) { > + /* > + * For round-to-odd case, the roundIncrement depends on > + * zSig which just changed. > + */ > + roundIncrement = zSig & 0x80 ? 0 : 0x7f; > + } > } > } > if (roundBits) { -- Alex Bennée