PAPR specifies accumulated virtual processor wait intervals that relate
to partition scheduling interval times. Implement these counters in the
same way as they are repoted by dtl.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 62 ++++++++++++++++++++++++------------
 1 file changed, 41 insertions(+), 21 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0a0835edb64a..9f8795d2b0c3 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -732,16 +732,15 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 
now)
 }
 
 static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
+                                       struct lppaca *vpa,
                                        unsigned int pcpu, u64 now,
                                        unsigned long stolen)
 {
        struct dtl_entry *dt;
-       struct lppaca *vpa;
 
        dt = vcpu->arch.dtl_ptr;
-       vpa = vcpu->arch.vpa.pinned_addr;
 
-       if (!dt || !vpa)
+       if (!dt)
                return;
 
        dt->dispatch_reason = 7;
@@ -762,29 +761,23 @@ static void __kvmppc_create_dtl_entry(struct kvm_vcpu 
*vcpu,
        /* order writing *dt vs. writing vpa->dtl_idx */
        smp_wmb();
        vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
-       vcpu->arch.dtl.dirty = true;
-}
-
-static void kvmppc_create_dtl_entry_p9(struct kvm_vcpu *vcpu,
-                                      struct kvmppc_vcore *vc,
-                                      u64 now)
-{
-       unsigned long stolen;
 
-       stolen = vc->stolen_tb - vcpu->arch.stolen_logged;
-       vcpu->arch.stolen_logged = vc->stolen_tb;
-
-       __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now, stolen);
+       /* vcpu->arch.dtl.dirty is set by the caller */
 }
 
-static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
-                                   struct kvmppc_vcore *vc)
+static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu,
+                                      struct kvmppc_vcore *vc)
 {
+       struct lppaca *vpa;
        unsigned long stolen;
        unsigned long core_stolen;
        u64 now;
        unsigned long flags;
 
+       vpa = vcpu->arch.vpa.pinned_addr;
+       if (!vpa)
+               return;
+
        now = mftb();
 
        core_stolen = vcore_stolen_time(vc, now);
@@ -795,7 +788,34 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
        vcpu->arch.busy_stolen = 0;
        spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 
-       __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now + vc->tb_offset, stolen);
+       vpa->enqueue_dispatch_tb = 
cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen);
+
+       __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, 
stolen);
+
+       vcpu->arch.vpa.dirty = true;
+}
+
+static void kvmppc_update_vpa_dispatch_p9(struct kvm_vcpu *vcpu,
+                                      struct kvmppc_vcore *vc,
+                                      u64 now)
+{
+       struct lppaca *vpa;
+       unsigned long stolen;
+       unsigned long stolen_delta;
+
+       vpa = vcpu->arch.vpa.pinned_addr;
+       if (!vpa)
+               return;
+
+       stolen = vc->stolen_tb;
+       stolen_delta = stolen - vcpu->arch.stolen_logged;
+       vcpu->arch.stolen_logged = stolen;
+
+       vpa->enqueue_dispatch_tb = cpu_to_be64(stolen);
+
+       __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta);
+
+       vcpu->arch.vpa.dirty = true;
 }
 
 /* See if there is a doorbell interrupt pending for a vcpu */
@@ -3820,7 +3840,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore 
*vc)
                         * kvmppc_core_prepare_to_enter.
                         */
                        kvmppc_start_thread(vcpu, pvc);
-                       kvmppc_create_dtl_entry(vcpu, pvc);
+                       kvmppc_update_vpa_dispatch(vcpu, pvc);
                        trace_kvm_guest_enter(vcpu);
                        if (!vcpu->arch.ptid)
                                thr0_done = true;
@@ -4392,7 +4412,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
                if ((vc->vcore_state == VCORE_PIGGYBACK ||
                     vc->vcore_state == VCORE_RUNNING) &&
                           !VCORE_IS_EXITING(vc)) {
-                       kvmppc_create_dtl_entry(vcpu, vc);
+                       kvmppc_update_vpa_dispatch(vcpu, vc);
                        kvmppc_start_thread(vcpu, vc);
                        trace_kvm_guest_enter(vcpu);
                } else if (vc->vcore_state == VCORE_SLEEPING) {
@@ -4575,7 +4595,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
 
        tb = mftb();
 
-       kvmppc_create_dtl_entry_p9(vcpu, vc, tb + vc->tb_offset);
+       kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset);
 
        trace_kvm_guest_enter(vcpu);
 
-- 
2.35.1

Reply via email to