Add a primitive for SubBytes + ShiftRows + MixColumns + AddRoundKey. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- host/include/generic/host/aes-round.h | 4 ++ include/crypto/aes-round.h | 21 ++++++++++ crypto/aes.c | 56 +++++++++++++++++++++++++++ 3 files changed, 81 insertions(+)
diff --git a/host/include/generic/host/aes-round.h b/host/include/generic/host/aes-round.h index 1e9b97d274..dc2c751ac3 100644 --- a/host/include/generic/host/aes-round.h +++ b/host/include/generic/host/aes-round.h @@ -15,6 +15,10 @@ void aesenc_MC_accel(AESState *, const AESState *, bool) void aesenc_SB_SR_accel(AESState *, const AESState *, bool) QEMU_ERROR("unsupported accel"); +void aesenc_SB_SR_MC_AK_accel(AESState *, const AESState *, + const AESState *, bool) + QEMU_ERROR("unsupported accel"); + void aesdec_IMC_accel(AESState *, const AESState *, bool) QEMU_ERROR("unsupported accel"); diff --git a/include/crypto/aes-round.h b/include/crypto/aes-round.h index 2d962ede0b..aefa17fcc3 100644 --- a/include/crypto/aes-round.h +++ b/include/crypto/aes-round.h @@ -56,6 +56,27 @@ static inline void aesenc_MC(AESState *r, const AESState *st, bool be) } } +/* + * Perform SubBytes + ShiftRows + MixColumns + AddRoundKey. + */ + +void aesenc_SB_SR_MC_AK_gen(AESState *ret, const AESState *st, + const AESState *rk); +void aesenc_SB_SR_MC_AK_genrev(AESState *ret, const AESState *st, + const AESState *rk); + +static inline void aesenc_SB_SR_MC_AK(AESState *r, const AESState *st, + const AESState *rk, bool be) +{ + if (HAVE_AES_ACCEL) { + aesenc_SB_SR_MC_AK_accel(r, st, rk, be); + } else if (HOST_BIG_ENDIAN == be) { + aesenc_SB_SR_MC_AK_gen(r, st, rk); + } else { + aesenc_SB_SR_MC_AK_genrev(r, st, rk); + } +} + /* * Perform InvSubBytes + InvShiftRows. */ diff --git a/crypto/aes.c b/crypto/aes.c index 4e654e5404..6172495b46 100644 --- a/crypto/aes.c +++ b/crypto/aes.c @@ -1356,6 +1356,62 @@ void aesenc_MC_genrev(AESState *r, const AESState *st) aesenc_MC_swap(r, st, true); } +/* Perform SubBytes + ShiftRows + MixColumns + AddRoundKey. */ +static inline void +aesenc_SB_SR_MC_AK_swap(AESState *r, const AESState *st, + const AESState *rk, bool swap) +{ + int swap_b = swap * 0xf; + int swap_w = swap * 0x3; + bool be = HOST_BIG_ENDIAN ^ swap; + uint32_t w0, w1, w2, w3; + + w0 = (AES_Te0[st->b[swap_b ^ AES_SH_0]] ^ + AES_Te1[st->b[swap_b ^ AES_SH_1]] ^ + AES_Te2[st->b[swap_b ^ AES_SH_2]] ^ + AES_Te3[st->b[swap_b ^ AES_SH_3]]); + + w1 = (AES_Te0[st->b[swap_b ^ AES_SH_4]] ^ + AES_Te1[st->b[swap_b ^ AES_SH_5]] ^ + AES_Te2[st->b[swap_b ^ AES_SH_6]] ^ + AES_Te3[st->b[swap_b ^ AES_SH_7]]); + + w2 = (AES_Te0[st->b[swap_b ^ AES_SH_8]] ^ + AES_Te1[st->b[swap_b ^ AES_SH_9]] ^ + AES_Te2[st->b[swap_b ^ AES_SH_A]] ^ + AES_Te3[st->b[swap_b ^ AES_SH_B]]); + + w3 = (AES_Te0[st->b[swap_b ^ AES_SH_C]] ^ + AES_Te1[st->b[swap_b ^ AES_SH_D]] ^ + AES_Te2[st->b[swap_b ^ AES_SH_E]] ^ + AES_Te3[st->b[swap_b ^ AES_SH_F]]); + + /* Note that AES_TeX is encoded for big-endian. */ + if (!be) { + w0 = bswap32(w0); + w1 = bswap32(w1); + w2 = bswap32(w2); + w3 = bswap32(w3); + } + + r->w[swap_w ^ 0] = rk->w[swap_w ^ 0] ^ w0; + r->w[swap_w ^ 1] = rk->w[swap_w ^ 1] ^ w1; + r->w[swap_w ^ 2] = rk->w[swap_w ^ 2] ^ w2; + r->w[swap_w ^ 3] = rk->w[swap_w ^ 3] ^ w3; +} + +void aesenc_SB_SR_MC_AK_gen(AESState *r, const AESState *st, + const AESState *rk) +{ + aesenc_SB_SR_MC_AK_swap(r, st, rk, false); +} + +void aesenc_SB_SR_MC_AK_genrev(AESState *r, const AESState *st, + const AESState *rk) +{ + aesenc_SB_SR_MC_AK_swap(r, st, rk, true); +} + /* Perform InvSubBytes + InvShiftRows. */ static inline void aesdec_ISB_ISR_swap(AESState *r, const AESState *st, bool swap) -- 2.34.1