Stolen time logging in dtl was removed from the P9 path, so guests had
no stolen time accounting. Add it back in a simpler way that still
avoids locks and per-core accounting code.

Fixes: ecb6a7207f92 ("KVM: PPC: Book3S HV P9: Remove most of the vcore logic")
Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 49 +++++++++++++++++++++++++++++++++---
 1 file changed, 45 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 6fa518f6501d..0a0835edb64a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -248,6 +248,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
 
 /*
  * We use the vcpu_load/put functions to measure stolen time.
+ *
  * Stolen time is counted as time when either the vcpu is able to
  * run as part of a virtual core, but the task running the vcore
  * is preempted or sleeping, or when the vcpu needs something done
@@ -277,6 +278,12 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
  * lock.  The stolen times are measured in units of timebase ticks.
  * (Note that the != TB_NIL checks below are purely defensive;
  * they should never fail.)
+ *
+ * The POWER9 path is simpler, one vcpu per virtual core so the
+ * former case does not exist. If a vcpu is preempted when it is
+ * BUSY_IN_HOST and not ceded or otherwise blocked, then accumulate
+ * the stolen cycles in busy_stolen. RUNNING is not a preemptible
+ * state in the P9 path.
  */
 
 static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb)
@@ -310,8 +317,14 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu 
*vcpu, int cpu)
        unsigned long flags;
        u64 now;
 
-       if (cpu_has_feature(CPU_FTR_ARCH_300))
+       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+               if (vcpu->arch.busy_preempt != TB_NIL) {
+                       WARN_ON_ONCE(vcpu->arch.state != 
KVMPPC_VCPU_BUSY_IN_HOST);
+                       vc->stolen_tb += mftb() - vcpu->arch.busy_preempt;
+                       vcpu->arch.busy_preempt = TB_NIL;
+               }
                return;
+       }
 
        now = mftb();
 
@@ -339,8 +352,21 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
        unsigned long flags;
        u64 now;
 
-       if (cpu_has_feature(CPU_FTR_ARCH_300))
+       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+               /*
+                * In the P9 path, RUNNABLE is not preemptible
+                * (nor takes host interrupts)
+                */
+               WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE);
+               /*
+                * Account stolen time when preempted while the vcpu task is
+                * running in the kernel (but not in qemu, which is INACTIVE).
+                */
+               if (task_is_running(current) &&
+                               vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
+                       vcpu->arch.busy_preempt = mftb();
                return;
+       }
 
        now = mftb();
 
@@ -739,6 +765,18 @@ static void __kvmppc_create_dtl_entry(struct kvm_vcpu 
*vcpu,
        vcpu->arch.dtl.dirty = true;
 }
 
+static void kvmppc_create_dtl_entry_p9(struct kvm_vcpu *vcpu,
+                                      struct kvmppc_vcore *vc,
+                                      u64 now)
+{
+       unsigned long stolen;
+
+       stolen = vc->stolen_tb - vcpu->arch.stolen_logged;
+       vcpu->arch.stolen_logged = vc->stolen_tb;
+
+       __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now, stolen);
+}
+
 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
                                    struct kvmppc_vcore *vc)
 {
@@ -4470,7 +4508,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
        vc = vcpu->arch.vcore;
        vcpu->arch.ceded = 0;
        vcpu->arch.run_task = current;
-       vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
        vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
 
        /* See if the MMU is ready to go */
@@ -4497,6 +4534,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
        /* flags save not required, but irq_pmu has no disable/enable API */
        powerpc_local_irq_pmu_save(flags);
 
+       vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
+
        if (signal_pending(current))
                goto sigpend;
        if (need_resched() || !kvm->arch.mmu_ready)
@@ -4536,7 +4575,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
 
        tb = mftb();
 
-       __kvmppc_create_dtl_entry(vcpu, pcpu, tb + vc->tb_offset, 0);
+       kvmppc_create_dtl_entry_p9(vcpu, vc, tb + vc->tb_offset);
 
        trace_kvm_guest_enter(vcpu);
 
@@ -4577,6 +4616,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
 
        vcpu->cpu = -1;
        vcpu->arch.thread_cpu = -1;
+       vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
        powerpc_local_irq_pmu_restore(flags);
 
@@ -4639,6 +4679,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
  out:
        vcpu->cpu = -1;
        vcpu->arch.thread_cpu = -1;
+       vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
        powerpc_local_irq_pmu_restore(flags);
        preempt_enable();
        goto done;
-- 
2.35.1

Reply via email to