On 6/22/21 4:27 PM, Nicholas Piggin wrote:
KVM PMU management code looks for particular frozen/disabled bits in
the PMU registers so it knows whether it must clear them when coming
out of a guest or not. Setting this up helps KVM make these optimisations
without getting confused. Longer term the better approach might be to
move guest/host PMU switching to the perf subsystem.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
  arch/powerpc/kernel/cpu_setup_power.c | 4 ++--
  arch/powerpc/kernel/dt_cpu_ftrs.c     | 6 +++---
  arch/powerpc/kvm/book3s_hv.c          | 5 +++++
  arch/powerpc/perf/core-book3s.c       | 7 +++++++
  4 files changed, 17 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/kernel/cpu_setup_power.c 
b/arch/powerpc/kernel/cpu_setup_power.c
index a29dc8326622..3dc61e203f37 100644
--- a/arch/powerpc/kernel/cpu_setup_power.c
+++ b/arch/powerpc/kernel/cpu_setup_power.c
@@ -109,7 +109,7 @@ static void init_PMU_HV_ISA207(void)
  static void init_PMU(void)
  {
        mtspr(SPRN_MMCRA, 0);
-       mtspr(SPRN_MMCR0, 0);
+       mtspr(SPRN_MMCR0, MMCR0_FC);

Sticky point here is, currently if not frozen, pmc5/6 will
keep countering. And not freezing them at boot is quiet useful
sometime, like say when running in a simulation where we could calculate
approx CPIs for micro benchmarks without perf subsystem.

        mtspr(SPRN_MMCR1, 0);
        mtspr(SPRN_MMCR2, 0);
  }
@@ -123,7 +123,7 @@ static void init_PMU_ISA31(void)
  {
        mtspr(SPRN_MMCR3, 0);
        mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
-       mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
+       mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
  }

  /*
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c 
b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 0a6b36b4bda8..06a089fbeaa7 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -353,7 +353,7 @@ static void init_pmu_power8(void)
        }

        mtspr(SPRN_MMCRA, 0);
-       mtspr(SPRN_MMCR0, 0);
+       mtspr(SPRN_MMCR0, MMCR0_FC);
        mtspr(SPRN_MMCR1, 0);
        mtspr(SPRN_MMCR2, 0);
        mtspr(SPRN_MMCRS, 0);
@@ -392,7 +392,7 @@ static void init_pmu_power9(void)
                mtspr(SPRN_MMCRC, 0);

        mtspr(SPRN_MMCRA, 0);
-       mtspr(SPRN_MMCR0, 0);
+       mtspr(SPRN_MMCR0, MMCR0_FC);
        mtspr(SPRN_MMCR1, 0);
        mtspr(SPRN_MMCR2, 0);
  }
@@ -428,7 +428,7 @@ static void init_pmu_power10(void)

        mtspr(SPRN_MMCR3, 0);
        mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
-       mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
+       mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
  }

  static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 1f30f98b09d1..f7349d150828 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2593,6 +2593,11 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu 
*vcpu)
  #endif
  #endif
        vcpu->arch.mmcr[0] = MMCR0_FC;
+       if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+               vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT;
+               vcpu->arch.mmcra = MMCRA_BHRB_DISABLE;
+       }
+
        vcpu->arch.ctrl = CTRL_RUNLATCH;
        /* default to host PVR, since we can't spoof it */
        kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 51622411a7cc..e33b29ec1a65 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1361,6 +1361,13 @@ static void power_pmu_enable(struct pmu *pmu)
                goto out;

        if (cpuhw->n_events == 0) {
+               if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+                       mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
+                       mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
+               } else {
+                       mtspr(SPRN_MMCRA, 0);
+                       mtspr(SPRN_MMCR0, MMCR0_FC);
+               }
                ppc_set_pmu_inuse(0);
                goto out;
        }

Reply via email to