On Thu, Jul 18, 2024 at 12:10 PM Alistair Francis <alistai...@gmail.com> wrote: > > From: LIU Zhiwei <zhiwei_...@linux.alibaba.com> > > Zama16b is the property that misaligned load/stores/atomics within > a naturally aligned 16-byte region are atomic. > > According to the specification, Zama16b applies only to AMOs, loads > and stores defined in the base ISAs, and loads and stores of no more > than XLEN bits defined in the F, D, and Q extensions. Thus it should > not apply to zacas or RVC instructions. > > For an instruction in that set, if all accessed bytes lie within 16B granule, > the instruction will not raise an exception for reasons of address alignment, > and the instruction will give rise to only one memory operation for the > purposes of RVWMO—i.e., it will execute atomically. > > Signed-off-by: LIU Zhiwei <zhiwei_...@linux.alibaba.com> > Reviewed-by: Alistair Francis <alistair.fran...@wdc.com> > Message-ID: <20240709113652.1239-6-zhiwei_...@linux.alibaba.com> > Signed-off-by: Alistair Francis <alistair.fran...@wdc.com> > --- > target/riscv/cpu_cfg.h | 1 + > target/riscv/cpu.c | 2 ++ > target/riscv/insn_trans/trans_rva.c.inc | 42 ++++++++++++++----------- > target/riscv/insn_trans/trans_rvd.c.inc | 14 +++++++-- > target/riscv/insn_trans/trans_rvf.c.inc | 14 +++++++-- > target/riscv/insn_trans/trans_rvi.c.inc | 6 ++++ > 6 files changed, 57 insertions(+), 22 deletions(-) > > diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h > index d85e54b475..ddbfae37e5 100644 > --- a/target/riscv/cpu_cfg.h > +++ b/target/riscv/cpu_cfg.h > @@ -83,6 +83,7 @@ struct RISCVCPUConfig { > bool ext_zdinx; > bool ext_zaamo; > bool ext_zacas; > + bool ext_zama16b; > bool ext_zalrsc; > bool ext_zawrs; > bool ext_zfa; > diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c > index f4f8287a6d..de9c06904f 100644 > --- a/target/riscv/cpu.c > +++ b/target/riscv/cpu.c > @@ -118,6 +118,7 @@ const RISCVIsaExtData isa_edata_arr[] = { > ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), > ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), > ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), > + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), > ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), > ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), > ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), > @@ -1476,6 +1477,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { > MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), > MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), > MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), > + MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), > MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), > MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), > MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), > diff --git a/target/riscv/insn_trans/trans_rva.c.inc > b/target/riscv/insn_trans/trans_rva.c.inc > index 4a9e4591d1..eb080baddd 100644 > --- a/target/riscv/insn_trans/trans_rva.c.inc > +++ b/target/riscv/insn_trans/trans_rva.c.inc > @@ -103,6 +103,12 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, > TCGv dest = dest_gpr(ctx, a->rd); > TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); > > + if (ctx->cfg_ptr->ext_zama16b) { > + mop |= MO_ATOM_WITHIN16; > + } else { > + mop |= MO_ALIGN; > + } > + > decode_save_opc(ctx); > src1 = get_address(ctx, a->rs1, 0); > func(dest, src1, src2, ctx->mem_idx, mop); > @@ -126,55 +132,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) > static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL); > } > > static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | > MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL); > } > > static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | > MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL); > } > > static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | > MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL); > } > > static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | > MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL); > } > > static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | > MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL); > } > > static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | > MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL); > } > > static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | > MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL); > } > > static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | > MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL); > } > > static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) > @@ -195,61 +201,61 @@ static bool trans_amoswap_d(DisasContext *ctx, > arg_amoswap_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ); > } > > static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | > MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ); > } > > static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | > MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ); > } > > static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | > MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ); > } > > static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | > MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ); > } > > static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | > MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ); > } > > static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | > MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ); > } > > static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | > MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ); > } > > static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | > MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ); > } > diff --git a/target/riscv/insn_trans/trans_rvd.c.inc > b/target/riscv/insn_trans/trans_rvd.c.inc > index d9ce9e407f..1f5fac65a2 100644 > --- a/target/riscv/insn_trans/trans_rvd.c.inc > +++ b/target/riscv/insn_trans/trans_rvd.c.inc > @@ -42,13 +42,18 @@ > static bool trans_fld(DisasContext *ctx, arg_fld *a) > { > TCGv addr; > + MemOp memop = MO_TEUQ; > > REQUIRE_FPU; > REQUIRE_EXT(ctx, RVD); > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
Richard pointed out that checking the cur_insn_len looks a bit strange. He's right, the length of the instruction has no impact. We should instead just check for ext_zama16b Do you mind sending a followup patch to remove all of the instruction length checks? Alistair