On Mon, 21 Aug 2023 at 18:19, Richard Henderson <richard.hender...@linaro.org> wrote: > > Signed-off-by: Richard Henderson <richard.hender...@linaro.org>
Reviewed-by: Ard Biesheuvel <a...@kernel.org> > --- > host/include/generic/host/crypto/clmul.h | 15 +++++++++++++++ > include/crypto/clmul.h | 19 +++++++++++++++++++ > crypto/clmul.c | 18 ++++++++++++++++++ > 3 files changed, 52 insertions(+) > create mode 100644 host/include/generic/host/crypto/clmul.h > > diff --git a/host/include/generic/host/crypto/clmul.h > b/host/include/generic/host/crypto/clmul.h > new file mode 100644 > index 0000000000..915bfb88d3 > --- /dev/null > +++ b/host/include/generic/host/crypto/clmul.h > @@ -0,0 +1,15 @@ > +/* > + * No host specific carry-less multiply acceleration. > + * SPDX-License-Identifier: GPL-2.0-or-later > + */ > + > +#ifndef GENERIC_HOST_CRYPTO_CLMUL_H > +#define GENERIC_HOST_CRYPTO_CLMUL_H > + > +#define HAVE_CLMUL_ACCEL false > +#define ATTR_CLMUL_ACCEL > + > +Int128 clmul_64_accel(uint64_t, uint64_t) > + QEMU_ERROR("unsupported accel"); > + > +#endif /* GENERIC_HOST_CRYPTO_CLMUL_H */ > diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h > index 0ea25a252c..c82d2d7559 100644 > --- a/include/crypto/clmul.h > +++ b/include/crypto/clmul.h > @@ -8,6 +8,9 @@ > #ifndef CRYPTO_CLMUL_H > #define CRYPTO_CLMUL_H > > +#include "qemu/int128.h" > +#include "host/crypto/clmul.h" > + > /** > * clmul_8x8_low: > * > @@ -61,4 +64,20 @@ uint64_t clmul_16x2_odd(uint64_t, uint64_t); > */ > uint64_t clmul_32(uint32_t, uint32_t); > > +/** > + * clmul_64: > + * > + * Perform a 64x64->128 carry-less multiply. > + */ > +Int128 clmul_64_gen(uint64_t, uint64_t); > + > +static inline Int128 clmul_64(uint64_t a, uint64_t b) > +{ > + if (HAVE_CLMUL_ACCEL) { > + return clmul_64_accel(a, b); > + } else { > + return clmul_64_gen(a, b); > + } > +} > + > #endif /* CRYPTO_CLMUL_H */ > diff --git a/crypto/clmul.c b/crypto/clmul.c > index 36ada1be9d..abf79cc49a 100644 > --- a/crypto/clmul.c > +++ b/crypto/clmul.c > @@ -92,3 +92,21 @@ uint64_t clmul_32(uint32_t n, uint32_t m32) > } > return r; > } > + > +Int128 clmul_64_gen(uint64_t n, uint64_t m) > +{ > + uint64_t rl = 0, rh = 0; > + > + /* Bit 0 can only influence the low 64-bit result. */ > + if (n & 1) { > + rl = m; > + } > + > + for (int i = 1; i < 64; ++i) { > + uint64_t mask = -(n & 1); > + rl ^= (m << i) & mask; > + rh ^= (m >> (64 - i)) & mask; > + n >>= 1; > + } > + return int128_make128(rl, rh); > +} > -- > 2.34.1 >