So far the PMU logic was using PMC5 for instruction counting (linux kernel PM_INST_CMPL) and PMC6 to count cycles (PM_CYC). We aren't using PMCs 1-4.
Let's enable all PMCs to count these 2 events we already provide. The logic used to calculate PMC5 is now being provided by update_PMC_PM_INST_CMPL() and PMC6 logic is now implemented in update_PMC_PM_CYC(). The enablement of these 2 events for all PMUs are done by using the Linux kernel definition of those events: 0x02 for PM_INST_CMPL and 0x1e for PM_CYC, all of those defined by specific bits in MMCR1 for each PMC. PMCs 1-4 relies on the correct event to be defined in MMCR1. PMC5 and PMC6 will count PM_INST_CMPL and PMC_CYC, respectively, regardless of MMCR1 setup. Signed-off-by: Daniel Henrique Barboza <danielhb...@gmail.com> --- target/ppc/cpu.h | 8 +++++ target/ppc/pmu_book3s_helper.c | 60 ++++++++++++++++++++++++++++++++-- 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 8cea8f2aca..afd9cd402b 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -350,6 +350,14 @@ typedef struct ppc_v3_pate_t { #define MMCR0_FCECE PPC_BIT(38) /* FC on Enabled Cond or Event */ #define MMCR0_PMCC PPC_BITMASK(44, 45) /* PMC Control */ +#define MMCR1_PMC1SEL_SHIFT (63 - 39) +#define MMCR1_PMC1SEL PPC_BITMASK(32, 39) +#define MMCR1_PMC2SEL_SHIFT (63 - 47) +#define MMCR1_PMC2SEL PPC_BITMASK(40, 47) +#define MMCR1_PMC3SEL_SHIFT (63 - 55) +#define MMCR1_PMC3SEL PPC_BITMASK(48, 55) +#define MMCR1_PMC4SEL PPC_BITMASK(56, 63) + /* LPCR bits */ #define LPCR_VPM0 PPC_BIT(0) #define LPCR_VPM1 PPC_BIT(1) diff --git a/target/ppc/pmu_book3s_helper.c b/target/ppc/pmu_book3s_helper.c index 0994531f65..99e62bd37b 100644 --- a/target/ppc/pmu_book3s_helper.c +++ b/target/ppc/pmu_book3s_helper.c @@ -28,6 +28,56 @@ static uint64_t get_cycles(uint64_t insns) return insns * 4; } +static void update_PMC_PM_INST_CMPL(CPUPPCState *env, int sprn, + uint64_t curr_icount) +{ + env->spr[sprn] += curr_icount - env->pmu_base_icount; +} + +static void update_PMC_PM_CYC(CPUPPCState *env, int sprn, + uint64_t curr_icount) +{ + uint64_t insns = curr_icount - env->pmu_base_icount; + env->spr[sprn] += get_cycles(insns); +} + +static void update_programmable_PMC_reg(CPUPPCState *env, int sprn, + uint64_t curr_icount) +{ + int event; + + switch (sprn) { + case SPR_POWER_PMC1: + event = MMCR1_PMC1SEL & env->spr[SPR_POWER_MMCR1]; + event = event >> MMCR1_PMC1SEL_SHIFT; + break; + case SPR_POWER_PMC2: + event = MMCR1_PMC2SEL & env->spr[SPR_POWER_MMCR1]; + event = event >> MMCR1_PMC2SEL_SHIFT; + break; + case SPR_POWER_PMC3: + event = MMCR1_PMC3SEL & env->spr[SPR_POWER_MMCR1]; + event = event >> MMCR1_PMC3SEL_SHIFT; + break; + case SPR_POWER_PMC4: + event = MMCR1_PMC4SEL & env->spr[SPR_POWER_MMCR1]; + break; + default: + return; + } + + switch (event) { + case 0x2: + update_PMC_PM_INST_CMPL(env, sprn, curr_icount); + break; + case 0x1E: + update_PMC_PM_CYC(env, sprn, curr_icount); + break; + default: + return; + } +} + /* * Set all PMCs values after a PMU freeze via MMCR0_FC. * @@ -37,10 +87,14 @@ static uint64_t get_cycles(uint64_t insns) static void update_PMCs_on_freeze(CPUPPCState *env) { uint64_t curr_icount = get_insns(); + int sprn; + + for (sprn = SPR_POWER_PMC1; sprn < SPR_POWER_PMC5; sprn++) { + update_programmable_PMC_reg(env, sprn, curr_icount); + } - env->spr[SPR_POWER_PMC5] += curr_icount - env->pmu_base_icount; - env->spr[SPR_POWER_PMC6] += get_cycles(curr_icount - - env->pmu_base_icount); + update_PMC_PM_INST_CMPL(env, SPR_POWER_PMC5, curr_icount); + update_PMC_PM_CYC(env, SPR_POWER_PMC6, curr_icount); } void helper_store_mmcr0(CPUPPCState *env, target_ulong value) -- 2.31.1