Currently the only tag-setting instructions always do so in the context of the current EL, and so we only need one ATA bit in the TB flags. The FEAT_MOPS SETG instructions include ones which set tags for a non-privileged access, so we now also need the equivalent "are tags enabled?" information for EL0.
Add the new TB flag, and convert the existing 'bool ata' field in DisasContext to a 'bool ata[2]' that can be indexed by the is_unpriv bit in an instruction, similarly to mte[2]. Signed-off-by: Peter Maydell <peter.mayd...@linaro.org> --- target/arm/cpu.h | 1 + target/arm/tcg/translate.h | 4 ++-- target/arm/tcg/hflags.c | 12 ++++++++++++ target/arm/tcg/translate-a64.c | 23 ++++++++++++----------- 4 files changed, 27 insertions(+), 13 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 7d2e1a1cfe8..8312291f083 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3147,6 +3147,7 @@ FIELD(TBFLAG_A64, SVL, 24, 4) FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) FIELD(TBFLAG_A64, FGT_ERET, 29, 1) FIELD(TBFLAG_A64, NAA, 30, 1) +FIELD(TBFLAG_A64, ATA0, 31, 1) /* * Helpers for using the above. diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index f748ba6f394..63922f8bad1 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -114,8 +114,8 @@ typedef struct DisasContext { bool unpriv; /* True if v8.3-PAuth is active. */ bool pauth_active; - /* True if v8.5-MTE access to tags is enabled. */ - bool ata; + /* True if v8.5-MTE access to tags is enabled; index with is_unpriv. */ + bool ata[2]; /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */ bool mte_active[2]; /* True with v8.5-BTI and SCTLR_ELx.BT* set. */ diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index ea642384f5a..cea1adb7b62 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -325,6 +325,18 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, && allocation_tag_access_enabled(env, 0, sctlr)) { DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1); } + /* + * For unpriv tag-setting accesses we alse need ATA0. Again, in + * contexts where unpriv and normal insns are the same we + * duplicate the ATA bit to save effort for translate-a64.c. + */ + if (EX_TBFLAG_A64(flags, UNPRIV)) { + if (allocation_tag_access_enabled(env, 0, sctlr)) { + DP_TBFLAG_A64(flags, ATA0, 1); + } + } else { + DP_TBFLAG_A64(flags, ATA0, EX_TBFLAG_A64(flags, ATA)); + } /* Cache TCMA as well as TBI. */ DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx)); } diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index acb96251704..366ddde57f7 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -2251,7 +2251,7 @@ static void handle_sys(DisasContext *s, bool isread, clean_addr = clean_data_tbi(s, tcg_rt); gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8); - if (s->ata) { + if (s->ata[0]) { /* Extract the tag from the register to match STZGM. */ tag = tcg_temp_new_i64(); tcg_gen_shri_i64(tag, tcg_rt, 56); @@ -2268,7 +2268,7 @@ static void handle_sys(DisasContext *s, bool isread, clean_addr = clean_data_tbi(s, tcg_rt); gen_helper_dc_zva(cpu_env, clean_addr); - if (s->ata) { + if (s->ata[0]) { /* Extract the tag from the register to match STZGM. */ tag = tcg_temp_new_i64(); tcg_gen_shri_i64(tag, tcg_rt, 56); @@ -3028,7 +3028,7 @@ static bool trans_STGP(DisasContext *s, arg_ldstpair *a) tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); } - if (!s->ata) { + if (!s->ata[0]) { /* * TODO: We could rely on the stores below, at least for * system mode, if we arrange to add MO_ALIGN_16. @@ -3758,7 +3758,7 @@ static bool trans_STZGM(DisasContext *s, arg_ldst_tag *a) tcg_gen_addi_i64(addr, addr, a->imm); tcg_rt = cpu_reg(s, a->rt); - if (s->ata) { + if (s->ata[0]) { gen_helper_stzgm_tags(cpu_env, addr, tcg_rt); } /* @@ -3790,7 +3790,7 @@ static bool trans_STGM(DisasContext *s, arg_ldst_tag *a) tcg_gen_addi_i64(addr, addr, a->imm); tcg_rt = cpu_reg(s, a->rt); - if (s->ata) { + if (s->ata[0]) { gen_helper_stgm(cpu_env, addr, tcg_rt); } else { MMUAccessType acc = MMU_DATA_STORE; @@ -3822,7 +3822,7 @@ static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a) tcg_gen_addi_i64(addr, addr, a->imm); tcg_rt = cpu_reg(s, a->rt); - if (s->ata) { + if (s->ata[0]) { gen_helper_ldgm(tcg_rt, cpu_env, addr); } else { MMUAccessType acc = MMU_DATA_LOAD; @@ -3857,7 +3857,7 @@ static bool trans_LDG(DisasContext *s, arg_ldst_tag *a) tcg_gen_andi_i64(addr, addr, -TAG_GRANULE); tcg_rt = cpu_reg(s, a->rt); - if (s->ata) { + if (s->ata[0]) { gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt); } else { /* @@ -3894,7 +3894,7 @@ static bool do_STG(DisasContext *s, arg_ldst_tag *a, bool is_zero, bool is_pair) tcg_gen_addi_i64(addr, addr, a->imm); } tcg_rt = cpu_reg_sp(s, a->rt); - if (!s->ata) { + if (!s->ata[0]) { /* * For STG and ST2G, we need to check alignment and probe memory. * TODO: For STZG and STZ2G, we could rely on the stores below, @@ -4063,7 +4063,7 @@ static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a, tcg_rn = cpu_reg_sp(s, a->rn); tcg_rd = cpu_reg_sp(s, a->rd); - if (s->ata) { + if (s->ata[0]) { gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, tcg_constant_i32(imm), tcg_constant_i32(a->uimm4)); @@ -5450,7 +5450,7 @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn) if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { goto do_unallocated; } - if (s->ata) { + if (s->ata[0]) { gen_helper_irg(cpu_reg_sp(s, rd), cpu_env, cpu_reg_sp(s, rn), cpu_reg(s, rm)); } else { @@ -13941,7 +13941,8 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->bt = EX_TBFLAG_A64(tb_flags, BT); dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE); dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV); - dc->ata = EX_TBFLAG_A64(tb_flags, ATA); + dc->ata[0] = EX_TBFLAG_A64(tb_flags, ATA); + dc->ata[1] = EX_TBFLAG_A64(tb_flags, ATA0); dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE); dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM); -- 2.34.1