Yes. Done for next version. On 30 September 2014 17:56, Edgar E. Iglesias <edgar.igles...@gmail.com> wrote:
> On Tue, Sep 30, 2014 at 04:49:16PM -0500, Greg Bellows wrote: > > Renamed the arm_current_pl CPU function to more accurately represent > that it > > returns the ARMv8 EL rather than ARMv7 PL. > > > > Signed-off-by: Greg Bellows <greg.bell...@linaro.org> > > --- > > target-arm/cpu.h | 18 +++++++++--------- > > target-arm/helper-a64.c | 6 +++--- > > target-arm/helper.c | 22 +++++++++++----------- > > target-arm/internals.h | 2 +- > > target-arm/op_helper.c | 16 ++++++++-------- > > target-arm/translate-a64.c | 2 +- > > target-arm/translate.c | 2 +- > > 7 files changed, 34 insertions(+), 34 deletions(-) > > > > diff --git a/target-arm/cpu.h b/target-arm/cpu.h > > index 10afef0..101d139 100644 > > --- a/target-arm/cpu.h > > +++ b/target-arm/cpu.h > > @@ -982,7 +982,7 @@ static inline bool cptype_valid(int cptype) > > #define PL1_RW (PL1_R | PL1_W) > > #define PL0_RW (PL0_R | PL0_W) > > > > -static inline int arm_current_pl(CPUARMState *env) > > +static inline int arm_current_el(CPUARMState *env) > > { > > if (env->aarch64) { > > return extract32(env->pstate, 2, 2); > > @@ -1222,7 +1222,7 @@ bool write_cpustate_to_list(ARMCPU *cpu); > > static inline bool arm_excp_unmasked(CPUState *cs, unsigned int > excp_idx) > > { > > CPUARMState *env = cs->env_ptr; > > - unsigned int cur_el = arm_current_pl(env); > > + unsigned int cur_el = arm_current_el(env); > > unsigned int target_el = arm_excp_target_el(cs, excp_idx); > > /* FIXME: Use actual secure state. */ > > bool secure = false; > > @@ -1294,7 +1294,7 @@ static inline CPUARMState *cpu_init(const char > *cpu_model) > > #define MMU_USER_IDX 0 > > static inline int cpu_mmu_index (CPUARMState *env) > > { > > - return arm_current_pl(env); > > + return arm_current_el(env); > > } > > > > /* Return the Exception Level targeted by debug exceptions; > > @@ -1307,7 +1307,7 @@ static inline int arm_debug_target_el(CPUARMState > *env) > > > > static inline bool aa64_generate_debug_exceptions(CPUARMState *env) > > { > > - if (arm_current_pl(env) == arm_debug_target_el(env)) { > > + if (arm_current_el(env) == arm_debug_target_el(env)) { > > if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0) > > || (env->daif & PSTATE_D)) { > > return false; > > @@ -1318,10 +1318,10 @@ static inline bool > aa64_generate_debug_exceptions(CPUARMState *env) > > > > static inline bool aa32_generate_debug_exceptions(CPUARMState *env) > > { > > - if (arm_current_pl(env) == 0 && arm_el_is_aa64(env, 1)) { > > + if (arm_current_el(env) == 0 && arm_el_is_aa64(env, 1)) { > > return aa64_generate_debug_exceptions(env); > > } > > - return arm_current_pl(env) != 2; > > + return arm_current_el(env) != 2; > > } > > > > /* Return true if debugging exceptions are currently enabled. > > @@ -1451,8 +1451,8 @@ static inline void > cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, > > if (is_a64(env)) { > > *pc = env->pc; > > *flags = ARM_TBFLAG_AARCH64_STATE_MASK > > - | (arm_current_pl(env) << ARM_TBFLAG_AA64_EL_SHIFT); > > - if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) { > > + | (arm_current_el(env) << ARM_TBFLAG_AA64_EL_SHIFT); > > + if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) { > > *flags |= ARM_TBFLAG_AA64_FPEN_MASK; > > } > > /* The SS_ACTIVE and PSTATE_SS bits correspond to the state > machine > > @@ -1488,7 +1488,7 @@ static inline void > cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, > > || arm_el_is_aa64(env, 1)) { > > *flags |= ARM_TBFLAG_VFPEN_MASK; > > } > > - if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) { > > + if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) { > > *flags |= ARM_TBFLAG_CPACR_FPEN_MASK; > > } > > /* The SS_ACTIVE and PSTATE_SS bits correspond to the state > machine > > diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c > > index 8228e29..173f116 100644 > > --- a/target-arm/helper-a64.c > > +++ b/target-arm/helper-a64.c > > @@ -448,7 +448,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs) > > unsigned int new_mode = aarch64_pstate_mode(new_el, true); > > int i; > > > > - if (arm_current_pl(env) < new_el) { > > + if (arm_current_el(env) < new_el) { > > if (env->aarch64) { > > addr += 0x400; > > } else { > > @@ -459,7 +459,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs) > > } > > > > arm_log_exception(cs->exception_index); > > - qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_pl(env)); > > + qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_el(env)); > > if (qemu_loglevel_mask(CPU_LOG_INT) > > && !excp_is_internal(cs->exception_index)) { > > qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n", > > @@ -495,7 +495,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs) > > > > if (is_a64(env)) { > > env->banked_spsr[aarch64_banked_spsr_index(new_el)] = > pstate_read(env); > > - aarch64_save_sp(env, arm_current_pl(env)); > > + aarch64_save_sp(env, arm_current_el(env)); > > env->elr_el[new_el] = env->pc; > > } else { > > env->banked_spsr[0] = cpsr_read(env); > > diff --git a/target-arm/helper.c b/target-arm/helper.c > > index d6e3b52..2381e6f 100644 > > --- a/target-arm/helper.c > > +++ b/target-arm/helper.c > > @@ -571,7 +571,7 @@ static CPAccessResult pmreg_access(CPUARMState *env, > const ARMCPRegInfo *ri) > > /* Performance monitor registers user accessibility is controlled > > * by PMUSERENR. > > */ > > - if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) { > > + if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) { > > return CP_ACCESS_TRAP; > > } > > return CP_ACCESS_OK; > > @@ -996,7 +996,7 @@ static void teecr_write(CPUARMState *env, const > ARMCPRegInfo *ri, > > > > static CPAccessResult teehbr_access(CPUARMState *env, const > ARMCPRegInfo *ri) > > { > > - if (arm_current_pl(env) == 0 && (env->teecr & 1)) { > > + if (arm_current_el(env) == 0 && (env->teecr & 1)) { > > return CP_ACCESS_TRAP; > > } > > return CP_ACCESS_OK; > > @@ -1042,7 +1042,7 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = { > > static CPAccessResult gt_cntfrq_access(CPUARMState *env, const > ARMCPRegInfo *ri) > > { > > /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are > zero */ > > - if (arm_current_pl(env) == 0 && !extract32(env->cp15.c14_cntkctl, > 0, 2)) { > > + if (arm_current_el(env) == 0 && !extract32(env->cp15.c14_cntkctl, > 0, 2)) { > > return CP_ACCESS_TRAP; > > } > > return CP_ACCESS_OK; > > @@ -1051,7 +1051,7 @@ static CPAccessResult gt_cntfrq_access(CPUARMState > *env, const ARMCPRegInfo *ri) > > static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx) > > { > > /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */ > > - if (arm_current_pl(env) == 0 && > > + if (arm_current_el(env) == 0 && > > !extract32(env->cp15.c14_cntkctl, timeridx, 1)) { > > return CP_ACCESS_TRAP; > > } > > @@ -1063,7 +1063,7 @@ static CPAccessResult gt_timer_access(CPUARMState > *env, int timeridx) > > /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if > > * EL0[PV]TEN is zero. > > */ > > - if (arm_current_pl(env) == 0 && > > + if (arm_current_el(env) == 0 && > > !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { > > return CP_ACCESS_TRAP; > > } > > @@ -1911,7 +1911,7 @@ static void aa64_fpsr_write(CPUARMState *env, > const ARMCPRegInfo *ri, > > > > static CPAccessResult aa64_daif_access(CPUARMState *env, const > ARMCPRegInfo *ri) > > { > > - if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) { > > + if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) { > > return CP_ACCESS_TRAP; > > } > > return CP_ACCESS_OK; > > @@ -1929,7 +1929,7 @@ static CPAccessResult > aa64_cacheop_access(CPUARMState *env, > > /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless > > * SCTLR_EL1.UCI is set. > > */ > > - if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) { > > + if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) { > > return CP_ACCESS_TRAP; > > } > > return CP_ACCESS_OK; > > @@ -2006,7 +2006,7 @@ static CPAccessResult aa64_zva_access(CPUARMState > *env, const ARMCPRegInfo *ri) > > /* We don't implement EL2, so the only control on DC ZVA is the > > * bit in the SCTLR which can prohibit access for EL0. > > */ > > - if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) { > > + if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) { > > return CP_ACCESS_TRAP; > > } > > return CP_ACCESS_OK; > > @@ -2366,7 +2366,7 @@ static CPAccessResult ctr_el0_access(CPUARMState > *env, const ARMCPRegInfo *ri) > > /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64, > > * but the AArch32 CTR has its own reginfo struct) > > */ > > - if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) { > > + if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) { > > return CP_ACCESS_TRAP; > > } > > return CP_ACCESS_OK; > > @@ -3773,7 +3773,7 @@ unsigned int arm_excp_target_el(CPUState *cs, > unsigned int excp_idx) > > { > > ARMCPU *cpu = ARM_CPU(cs); > > CPUARMState *env = &cpu->env; > > - unsigned int cur_el = arm_current_pl(env); > > + unsigned int cur_el = arm_current_el(env); > > unsigned int target_el; > > /* FIXME: Use actual secure state. */ > > bool secure = false; > > @@ -4769,7 +4769,7 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr > address, > > int prot; > > int ret, is_user; > > uint32_t syn; > > - bool same_el = (arm_current_pl(env) != 0); > > + bool same_el = (arm_current_el(env) != 0); > > > > is_user = mmu_idx == MMU_USER_IDX; > > ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, > &prot, > > diff --git a/target-arm/internals.h b/target-arm/internals.h > > index b7547bb..fd69a83 100644 > > --- a/target-arm/internals.h > > +++ b/target-arm/internals.h > > @@ -130,7 +130,7 @@ static inline void aarch64_restore_sp(CPUARMState > *env, int el) > > > > static inline void update_spsel(CPUARMState *env, uint32_t imm) > > { > > - unsigned int cur_el = arm_current_pl(env); > > + unsigned int cur_el = arm_current_el(env); > > /* Update PSTATE SPSel bit; this requires us to update the > > * working stack pointer in xregs[31]. > > */ > > diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c > > index 03ac92a..0809d63 100644 > > --- a/target-arm/op_helper.c > > +++ b/target-arm/op_helper.c > > @@ -361,7 +361,7 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t > op, uint32_t imm) > > * Note that SPSel is never OK from EL0; we rely on handle_msr_i() > > * to catch that case at translate time. > > */ > > - if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) { > > + if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) { > > raise_exception(env, EXCP_UDEF); > > } > > > > @@ -387,7 +387,7 @@ void HELPER(clear_pstate_ss)(CPUARMState *env) > > > > void HELPER(pre_hvc)(CPUARMState *env) > > { > > - int cur_el = arm_current_pl(env); > > + int cur_el = arm_current_el(env); > > /* FIXME: Use actual secure state. */ > > bool secure = false; > > bool undef; > > @@ -418,7 +418,7 @@ void HELPER(pre_hvc)(CPUARMState *env) > > > > void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) > > { > > - int cur_el = arm_current_pl(env); > > + int cur_el = arm_current_el(env); > > /* FIXME: Use real secure state. */ > > bool secure = false; > > bool smd = env->cp15.scr_el3 & SCR_SMD; > > @@ -444,7 +444,7 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t > syndrome) > > > > void HELPER(exception_return)(CPUARMState *env) > > { > > - int cur_el = arm_current_pl(env); > > + int cur_el = arm_current_el(env); > > unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); > > uint32_t spsr = env->banked_spsr[spsr_idx]; > > int new_el, i; > > @@ -561,7 +561,7 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn) > > > > switch (bt) { > > case 3: /* linked context ID match */ > > - if (arm_current_pl(env) > 1) { > > + if (arm_current_el(env) > 1) { > > /* Context matches never fire in EL2 or (AArch64) EL3 */ > > return false; > > } > > @@ -641,7 +641,7 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool > is_wp) > > * rely on this behaviour currently. > > * For breakpoints we do want to use the current CPU state. > > */ > > - switch (arm_current_pl(env)) { > > + switch (arm_current_el(env)) { > > case 3: > > case 2: > > if (!hmc) { > > @@ -728,7 +728,7 @@ void arm_debug_excp_handler(CPUState *cs) > > cs->watchpoint_hit = NULL; > > if (check_watchpoints(cpu)) { > > bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != > 0; > > - bool same_el = arm_debug_target_el(env) == > arm_current_pl(env); > > + bool same_el = arm_debug_target_el(env) == > arm_current_el(env); > > > > env->exception.syndrome = syn_watchpoint(same_el, 0, > wnr); > > if (extended_addresses_enabled(env)) { > > @@ -744,7 +744,7 @@ void arm_debug_excp_handler(CPUState *cs) > > } > > } else { > > if (check_breakpoints(cpu)) { > > - bool same_el = (arm_debug_target_el(env) == > arm_current_pl(env)); > > + bool same_el = (arm_debug_target_el(env) == > arm_current_el(env)); > > env->exception.syndrome = syn_breakpoint(same_el); > > if (extended_addresses_enabled(env)) { > > env->exception.fsr = (1 << 9) | 0x22; > > diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c > > index 35ae3ea..f53dc0f 100644 > > --- a/target-arm/translate-a64.c > > +++ b/target-arm/translate-a64.c > > @@ -10930,7 +10930,7 @@ void gen_intermediate_code_internal_a64(ARMCPU > *cpu, > > dc->vec_len = 0; > > dc->vec_stride = 0; > > dc->cp_regs = cpu->cp_regs; > > - dc->current_pl = arm_current_pl(env); > > + dc->current_pl = arm_current_el(env); > > dc->features = env->features; > > > > /* Single step state. The code-generation logic here is: > > diff --git a/target-arm/translate.c b/target-arm/translate.c > > index 8a2994f..f6404be 100644 > > --- a/target-arm/translate.c > > +++ b/target-arm/translate.c > > @@ -10945,7 +10945,7 @@ static inline void > gen_intermediate_code_internal(ARMCPU *cpu, > > dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); > > dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags); > > dc->cp_regs = cpu->cp_regs; > > - dc->current_pl = arm_current_pl(env); > > + dc->current_pl = arm_current_el(env); > > Should we rename dc->current_pl aswell? > This might turn into a patch that is hard to maintain while the series > gets reviewed. Maybe this change to be done earlier and hopefully get > merged early to avoid some pain on your side.... > > Cheers, > Edgar > >