On Wed, Mar 20, 2024 at 5:21 PM Atish Patra <ati...@rivosinc.com> wrote: > > > On 3/19/24 21:54, Alistair Francis wrote: > > On Thu, Mar 7, 2024 at 7:26 PM Atish Patra <ati...@rivosinc.com> wrote: > > On 3/4/24 22:47, LIU Zhiwei wrote: > > On 2024/2/29 2:51, Atish Patra wrote: > > Privilege mode filtering can also be emulated for cycle/instret by > tracking host_ticks/icount during each privilege mode switch. This > patch implements that for both cycle/instret and mhpmcounters. The > first one requires Smcntrpmf while the other one requires Sscofpmf > to be enabled. > > The cycle/instret are still computed using host ticks when icount > is not enabled. Otherwise, they are computed using raw icount which > is more accurate in icount mode. > > Reviewed-by: Daniel Henrique Barboza <dbarb...@ventanamicro.com> > Signed-off-by: Atish Patra <ati...@rivosinc.com> > --- > target/riscv/cpu.h | 11 +++++ > target/riscv/cpu_bits.h | 5 ++ > target/riscv/cpu_helper.c | 17 ++++++- > target/riscv/csr.c | 96 ++++++++++++++++++++++++++++++--------- > target/riscv/pmu.c | 64 ++++++++++++++++++++++++++ > target/riscv/pmu.h | 2 + > 6 files changed, 171 insertions(+), 24 deletions(-) > > diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h > index 174e8ba8e847..9e21d7f7d635 100644 > --- a/target/riscv/cpu.h > +++ b/target/riscv/cpu.h > @@ -157,6 +157,15 @@ typedef struct PMUCTRState { > target_ulong irq_overflow_left; > } PMUCTRState; > +typedef struct PMUFixedCtrState { > + /* Track cycle and icount for each privilege mode */ > + uint64_t counter[4]; > + uint64_t counter_prev[4]; > + /* Track cycle and icount for each privilege mode when V = 1*/ > + uint64_t counter_virt[2]; > + uint64_t counter_virt_prev[2]; > +} PMUFixedCtrState; > + > struct CPUArchState { > target_ulong gpr[32]; > target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */ > @@ -353,6 +362,8 @@ struct CPUArchState { > /* PMU event selector configured values for RV32 */ > target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS]; > + PMUFixedCtrState pmu_fixed_ctrs[2]; > + > target_ulong sscratch; > target_ulong mscratch; > diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h > index e866c60a400c..5fe349e313dc 100644 > --- a/target/riscv/cpu_bits.h > +++ b/target/riscv/cpu_bits.h > @@ -920,6 +920,11 @@ typedef enum RISCVException { > #define MHPMEVENT_BIT_VUINH BIT_ULL(58) > #define MHPMEVENTH_BIT_VUINH BIT(26) > +#define MHPMEVENT_FILTER_MASK (MHPMEVENT_BIT_MINH | \ > + MHPMEVENT_BIT_SINH | \ > + MHPMEVENT_BIT_UINH | \ > + MHPMEVENT_BIT_VSINH | \ > + MHPMEVENT_BIT_VUINH) > #define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000) > #define MHPMEVENT_IDX_MASK 0xFFFFF > #define MHPMEVENT_SSCOF_RESVD 16 > diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c > index d462d95ee165..33965d843d46 100644 > --- a/target/riscv/cpu_helper.c > +++ b/target/riscv/cpu_helper.c > @@ -718,8 +718,21 @@ void riscv_cpu_set_mode(CPURISCVState *env, > target_ulong newpriv) > { > g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED); > - if (icount_enabled() && newpriv != env->priv) { > - riscv_itrigger_update_priv(env); > + /* > + * Invoke cycle/instret update between priv mode changes or > + * VS->HS mode transition is SPV bit must be set > + * HS->VS mode transition where virt_enabled must be set > + * In both cases, priv will S mode only. > + */ > + if (newpriv != env->priv || > + (env->priv == PRV_S && newpriv == PRV_S && > + (env->virt_enabled || get_field(env->hstatus, HSTATUS_SPV)))) { > + if (icount_enabled()) { > + riscv_itrigger_update_priv(env); > + riscv_pmu_icount_update_priv(env, newpriv); > + } else { > + riscv_pmu_cycle_update_priv(env, newpriv); > + } > } > /* tlb_flush is unnecessary as mode is contained in mmu_idx */ > env->priv = newpriv; > diff --git a/target/riscv/csr.c b/target/riscv/csr.c > index ff9bac537593..482e212c5f74 100644 > --- a/target/riscv/csr.c > +++ b/target/riscv/csr.c > @@ -788,32 +788,16 @@ static RISCVException write_vcsr(CPURISCVState > *env, int csrno, > return RISCV_EXCP_NONE; > } > +#if defined(CONFIG_USER_ONLY) > /* User Timers and Counters */ > static target_ulong get_ticks(bool shift) > { > - int64_t val; > - target_ulong result; > - > -#if !defined(CONFIG_USER_ONLY) > - if (icount_enabled()) { > - val = icount_get(); > - } else { > - val = cpu_get_host_ticks(); > - } > -#else > - val = cpu_get_host_ticks(); > -#endif > - > - if (shift) { > - result = val >> 32; > - } else { > - result = val; > - } > + int64_t val = cpu_get_host_ticks(); > + target_ulong result = shift ? val >> 32 : val; > return result; > } > -#if defined(CONFIG_USER_ONLY) > static RISCVException read_time(CPURISCVState *env, int csrno, > target_ulong *val) > { > @@ -952,6 +936,71 @@ static RISCVException > write_mhpmeventh(CPURISCVState *env, int csrno, > return RISCV_EXCP_NONE; > } > +static target_ulong > riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env, > + int > counter_idx, > + bool > upper_half) > +{ > + uint64_t curr_val = 0; > + target_ulong result = 0; > + uint64_t *counter_arr = icount_enabled() ? > env->pmu_fixed_ctrs[1].counter : > + env->pmu_fixed_ctrs[0].counter; > + uint64_t *counter_arr_virt = icount_enabled() ? > + env->pmu_fixed_ctrs[1].counter_virt : > + env->pmu_fixed_ctrs[0].counter_virt; > + uint64_t cfg_val = 0; > + > + if (counter_idx == 0) { > + cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) : > + env->mcyclecfg; > + } else if (counter_idx == 2) { > + cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) : > + env->minstretcfg; > + } else { > + cfg_val = upper_half ? > + ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) : > + env->mhpmevent_val[counter_idx]; > + cfg_val &= MHPMEVENT_FILTER_MASK; > + } > + > + if (!cfg_val) { > + if (icount_enabled()) { > + curr_val = icount_get_raw(); > + } else { > + curr_val = cpu_get_host_ticks(); > + } > + goto done; > + } > + > + if (!(cfg_val & MCYCLECFG_BIT_MINH)) { > + curr_val += counter_arr[PRV_M]; > + } > + > + if (!(cfg_val & MCYCLECFG_BIT_SINH)) { > + curr_val += counter_arr[PRV_S]; > + } > + > + if (!(cfg_val & MCYCLECFG_BIT_UINH)) { > + curr_val += counter_arr[PRV_U]; > + } > + > + if (!(cfg_val & MCYCLECFG_BIT_VSINH)) { > + curr_val += counter_arr_virt[PRV_S]; > + } > + > + if (!(cfg_val & MCYCLECFG_BIT_VUINH)) { > + curr_val += counter_arr_virt[PRV_U]; > + } > + > +done: > + if (riscv_cpu_mxl(env) == MXL_RV32) { > + result = upper_half ? curr_val >> 32 : curr_val; > + } else { > + result = curr_val; > + } > + > + return result; > +} > + > static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, > target_ulong val) > { > @@ -962,7 +1011,8 @@ static RISCVException > write_mhpmcounter(CPURISCVState *env, int csrno, > counter->mhpmcounter_val = val; > if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || > riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { > - counter->mhpmcounter_prev = get_ticks(false); > + counter->mhpmcounter_prev = > riscv_pmu_ctr_get_fixed_counters_val(env, > + ctr_idx, false); > if (ctr_idx > 2) { > if (riscv_cpu_mxl(env) == MXL_RV32) { > mhpmctr_val = mhpmctr_val | > @@ -990,7 +1040,8 @@ static RISCVException > write_mhpmcounterh(CPURISCVState *env, int csrno, > mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32); > if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || > riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { > - counter->mhpmcounterh_prev = get_ticks(true); > + counter->mhpmcounterh_prev = > riscv_pmu_ctr_get_fixed_counters_val(env, > + ctr_idx, true); > if (ctr_idx > 2) { > riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx); > } > @@ -1031,7 +1082,8 @@ static RISCVException > riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, > */ > if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || > riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { > - *val = get_ticks(upper_half) - ctr_prev + ctr_val; > + *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, > upper_half) - > + ctr_prev + ctr_val; > } else { > *val = ctr_val; > } > diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c > index 0e7d58b8a5c2..37309ff64cb6 100644 > --- a/target/riscv/pmu.c > +++ b/target/riscv/pmu.c > @@ -19,6 +19,7 @@ > #include "qemu/osdep.h" > #include "qemu/log.h" > #include "qemu/error-report.h" > +#include "qemu/timer.h" > #include "cpu.h" > #include "pmu.h" > #include "sysemu/cpu-timers.h" > @@ -176,6 +177,69 @@ static int riscv_pmu_incr_ctr_rv64(RISCVCPU > *cpu, uint32_t ctr_idx) > return 0; > } > +void riscv_pmu_icount_update_priv(CPURISCVState *env, target_ulong > newpriv) > +{ > + uint64_t delta; > + uint64_t *counter_arr; > + uint64_t *counter_arr_prev; > + uint64_t current_icount = icount_get_raw(); > + > + if (env->virt_enabled) { > + counter_arr = env->pmu_fixed_ctrs[1].counter_virt; > + counter_arr_prev = env->pmu_fixed_ctrs[1].counter_virt_prev; > + } else { > + counter_arr = env->pmu_fixed_ctrs[1].counter; > + counter_arr_prev = env->pmu_fixed_ctrs[1].counter_prev; > + } > + > + if (newpriv != env->priv) { > + delta = current_icount - counter_arr_prev[env->priv]; > + counter_arr_prev[newpriv] = current_icount; > + } else { > + delta = current_icount - counter_arr_prev[env->priv]; > + if (env->virt_enabled) { > + /* HS->VS transition.The previous value should > correspond to HS. */ > > Hi Atish, > > Dose env->virt_enabled ensure HS->VS transition? > > As per my understanding, riscv_cpu_set_virt_enabled always invoked > before riscv_cpu_set_mode. > > In helper_mret() we call riscv_cpu_set_mode() before > riscv_cpu_set_virt_enabled(). > > Ahh yes. I missed the helper_mret condition. > > I don't think there is any requirement on which order we call the functions > > Indeed. helper_mret and helper_sret invokes them in different order. > > That means env->virt_enabled must be enabled before we enter into this > function. Let me know if I missed > > env->virt_enabled will be true if we are in HS or VS mode, but you > don't know the transition from just env->virt_enabled being set. > > Hmm. In riscv_cpu_do_interrupt(), virt_enabled is set to 0 if a trap is taken > to HS mode > from VS mode. Am I missing something ? >
Whoops, I meant VU and VS mode. Alistair