On 2/27/22 04:25, Weiwei Li wrote:
- add aes64dsm, aes64ds, aes64im, aes64es, aes64esm, aes64ks2, aes64ks1i
instructions
Co-authored-by: Ruibo Lu <luruibo2...@163.com>
Co-authored-by: Zewen Ye <lust...@foxmail.com>
Signed-off-by: Weiwei Li <liwei...@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqi...@iscas.ac.cn>
---
target/riscv/crypto_helper.c | 136 ++++++++++++++++++++++++
target/riscv/helper.h | 8 ++
target/riscv/insn32.decode | 11 ++
target/riscv/insn_trans/trans_rvk.c.inc | 102 ++++++++++++++++++
4 files changed, 257 insertions(+)
diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
index f5a5909538..9e56668627 100644
--- a/target/riscv/crypto_helper.c
+++ b/target/riscv/crypto_helper.c
@@ -136,4 +136,140 @@ target_ulong HELPER(aes32dsi)(target_ulong rs1,
target_ulong rs2,
{
return aes32_operation(bs, rs1, rs2, false, false);
}
+
+static inline target_ulong aes64_operation(target_ulong rs1, target_ulong rs2,
+ bool enc, bool mix)
+{
+ uint64_t RS1 = rs1;
+ uint64_t RS2 = rs2;
+ uint64_t result;
+ uint64_t temp;
+ uint32_t col_0;
+ uint32_t col_1;
+
+ if (enc) {
+ temp = AES_SHIFROWS_LO(RS1, RS2);
Ah, those unused macros get used, and with the right type.
+target_ulong HELPER(aes64ks1i)(target_ulong rs1, target_ulong rnum)
+{
+ uint64_t RS1 = rs1;
+ uint8_t round_consts[10] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
+ };
static const.
+ temp = (temp >> 8) | (temp << 24); /* Rotate right by 8 */
rol32
+DEF_HELPER_2(aes64esm, tl, tl, tl)
+DEF_HELPER_2(aes64es, tl, tl, tl)
+DEF_HELPER_2(aes64ds, tl, tl, tl)
+DEF_HELPER_2(aes64dsm, tl, tl, tl)
+DEF_HELPER_2(aes64ks2, tl, tl, tl)
+DEF_HELPER_2(aes64ks1i, tl, tl, tl)
+DEF_HELPER_1(aes64im, tl, tl)
DEF_HELPER_FLAGS.
+%rnum 20:4
...
+aes64ks1i 00 11000 1.... ..... 001 ..... 0010011 %rnum %rs1 %rd
It is much better to put the field where it belongs,
especially for a one-off like this.
aes64ks1i 00 11000 1 rnum:4 rs1:5 001 rd:5 0010011
The whole of riscv needs a cleanup on this point.
+static bool trans_aes64esm(DisasContext *ctx, arg_aes64esm *a)
+{
+ REQUIRE_ZKNE(ctx);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ gen_helper_aes64esm(dest, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+
+ return true;
+}
...
+static bool trans_aes64es(DisasContext *ctx, arg_aes64es *a)
+{
+ REQUIRE_ZKNE(ctx);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ gen_helper_aes64es(dest, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+
+ return true;
+}
gen_arith.
+static bool trans_aes64dsm(DisasContext *ctx, arg_aes64dsm *a)
+static bool trans_aes64ks2(DisasContext *ctx, arg_aes64ks2 *a)
+static bool trans_aes64ds(DisasContext *ctx, arg_aes64ds *a)
Likewise.
+static bool trans_aes64ks1i(DisasContext *ctx, arg_aes64ks1i *a)
+{
+ REQUIRE_EITHER_EXT(ctx, zknd, zkne);
+
+ if (a->rnum > 0xA) {
+ return false;
+ }
+
+ TCGv rnum = tcg_const_tl(a->rnum);
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+
+ gen_helper_aes64ks1i(dest, src1, rnum);
+ gen_set_gpr(ctx, a->rd, dest);
+
+ tcg_temp_free(rnum);
+ return true;
+}
tcg_constant_tl.
+
+static bool trans_aes64im(DisasContext *ctx, arg_aes64im *a)
+{
+ REQUIRE_ZKND(ctx);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+
+ gen_helper_aes64im(dest, src1);
+ gen_set_gpr(ctx, a->rd, dest);
+
+ return true;
+}
gen_unary.
r~