On Sat, 19 Aug 2023 at 03:02, Richard Henderson
<richard.hender...@linaro.org> wrote:
>
> Signed-off-by: Richard Henderson <richard.hender...@linaro.org>

Reviewed-by: Ard Biesheuvel <a...@kernel.org>

> ---
>  include/crypto/clmul.h |  7 +++++++
>  crypto/clmul.c         | 13 +++++++++++++
>  2 files changed, 20 insertions(+)
>
> diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h
> index c7ad28aa85..0ea25a252c 100644
> --- a/include/crypto/clmul.h
> +++ b/include/crypto/clmul.h
> @@ -54,4 +54,11 @@ uint64_t clmul_16x2_even(uint64_t, uint64_t);
>   */
>  uint64_t clmul_16x2_odd(uint64_t, uint64_t);
>
> +/**
> + * clmul_32:
> + *
> + * Perform a 32x32->64 carry-less multiply.
> + */
> +uint64_t clmul_32(uint32_t, uint32_t);
> +
>  #endif /* CRYPTO_CLMUL_H */
> diff --git a/crypto/clmul.c b/crypto/clmul.c
> index 2c87cfbf8a..36ada1be9d 100644
> --- a/crypto/clmul.c
> +++ b/crypto/clmul.c
> @@ -79,3 +79,16 @@ uint64_t clmul_16x2_odd(uint64_t n, uint64_t m)
>  {
>      return clmul_16x2_even(n >> 16, m >> 16);
>  }
> +
> +uint64_t clmul_32(uint32_t n, uint32_t m32)
> +{
> +    uint64_t r = 0;
> +    uint64_t m = m32;
> +
> +    for (int i = 0; i < 32; ++i) {
> +        r ^= n & 1 ? m : 0;
> +        n >>= 1;
> +        m <<= 1;
> +    }
> +    return r;
> +}
> --
> 2.34.1
>

Reply via email to