Richard Henderson <richard.hender...@linaro.org> writes:
> Have x86_64 assembly for them, with a fallback. > This avoids shuffling values through %cl in the x86 case. > > Signed-off-by: Richard Henderson <richard.hender...@linaro.org> > --- > include/fpu/softfloat-macros.h | 36 ++++++++++++ > fpu/softfloat.c | 102 +++++++++++++++++++++++++-------- > 2 files changed, 115 insertions(+), 23 deletions(-) > > diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h > index 672c1db555..ec4e27a595 100644 > --- a/include/fpu/softfloat-macros.h > +++ b/include/fpu/softfloat-macros.h > @@ -85,6 +85,42 @@ this code that are retained. > #include "fpu/softfloat-types.h" > #include "qemu/host-utils.h" > > +/** > + * shl_double: double-word merging left shift > + * @l: left or most-significant word > + * @r: right or least-significant word > + * @c: shift count > + * > + * Shift @l left by @c bits, shifting in bits from @r. > + */ > +static inline uint64_t shl_double(uint64_t l, uint64_t r, int c) > +{ > +#if defined(__x86_64__) > + asm("shld %b2, %1, %0" : "+r"(l) : "r"(r), "ci"(c)); > + return l; > +#else > + return c ? (l << c) | (r >> (64 - c)) : l; > +#endif > +} > + > +/** > + * shr_double: double-word merging right shift > + * @l: left or most-significant word > + * @r: right or least-significant word > + * @c: shift count > + * > + * Shift @r right by @c bits, shifting in bits from @l. > + */ > +static inline uint64_t shr_double(uint64_t l, uint64_t r, int c) > +{ > +#if defined(__x86_64__) > + asm("shrd %b2, %1, %0" : "+r"(r) : "r"(l), "ci"(c)); > + return r; > +#else > + return c ? (r >> c) | (l << (64 - c)) : r; > +#endif > +} > + I was pondering if these deserve to live in bitops but given they are softfloat only for the time being and we don't do arch specific hacks in there: Reviewed-by: Alex Bennée <alex.ben...@linaro.org> -- Alex Bennée