Use generic routines for 8-bit carry-less multiply. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/ppc/int_helper.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index 834da80fe3..3bf0f5dbe5 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -26,6 +26,7 @@ #include "exec/helper-proto.h" #include "crypto/aes.h" #include "crypto/aes-round.h" +#include "crypto/clmul.h" #include "fpu/softfloat.h" #include "qapi/error.h" #include "qemu/guest-random.h" @@ -1425,6 +1426,15 @@ void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) #undef VBPERMQ_INDEX #undef VBPERMQ_DW +void helper_vpmsumb(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + Int128 ia = a->s128; + Int128 ib = b->s128; + Int128 e = clmul_8x8_even(ia, ib); + Int128 o = clmul_8x8_odd(ia, ib); + r->s128 = int128_xor(e, o); +} + #define PMSUM(name, srcfld, trgfld, trgtyp) \ void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ @@ -1445,7 +1455,6 @@ void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ } \ } -PMSUM(vpmsumb, u8, u16, uint16_t) PMSUM(vpmsumh, u16, u32, uint32_t) PMSUM(vpmsumw, u32, u64, uint64_t) -- 2.34.1