Use HFSCR facility disabling to implement demand faulting for EBB, with
a hysteresis counter similar to the load_fp etc counters in context
switching that implement the equivalent demand faulting for userspace
facilities.

This speeds up guest entry/exit by avoiding the register save/restore
when a guest is not frequently using them. When a guest does use them
often, there will be some additional demand fault overhead, but these
are not commonly used facilities.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/include/asm/kvm_host.h   |  1 +
 arch/powerpc/kvm/book3s_hv.c          | 11 +++++++++++
 arch/powerpc/kvm/book3s_hv_nested.c   |  3 ++-
 arch/powerpc/kvm/book3s_hv_p9_entry.c | 26 ++++++++++++++++++++------
 4 files changed, 34 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 118b388ea887..bee95106c1f2 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -585,6 +585,7 @@ struct kvm_vcpu_arch {
        ulong cfar;
        ulong ppr;
        u32 pspb;
+       u8 load_ebb;
        ulong fscr;
        ulong shadow_fscr;
        ulong ebbhr;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index ae528eb37792..99e9da078e7d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1366,6 +1366,13 @@ static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
        return RESUME_GUEST;
 }
 
+static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.hfscr |= HFSCR_EBB;
+
+       return RESUME_GUEST;
+}
+
 static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                                 struct task_struct *tsk)
 {
@@ -1645,6 +1652,8 @@ XXX benchmark guest exits
                                r = kvmppc_emulate_doorbell_instr(vcpu);
                        if (cause == FSCR_PM_LG)
                                r = kvmppc_pmu_unavailable(vcpu);
+                       if (cause == FSCR_EBB_LG)
+                               r = kvmppc_ebb_unavailable(vcpu);
                }
                if (r == EMULATE_FAIL) {
                        kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
@@ -1764,6 +1773,8 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu 
*vcpu)
                r = EMULATE_FAIL;
                if (cause == FSCR_PM_LG && (vcpu->arch.nested_hfscr & HFSCR_PM))
                        r = kvmppc_pmu_unavailable(vcpu);
+               if (cause == FSCR_EBB_LG && (vcpu->arch.nested_hfscr & 
HFSCR_EBB))
+                       r = kvmppc_ebb_unavailable(vcpu);
 
                if (r == EMULATE_FAIL)
                        r = RESUME_HOST;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c 
b/arch/powerpc/kvm/book3s_hv_nested.c
index 024b0ce5b702..ee8668f056f9 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -168,7 +168,8 @@ static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct 
hv_guest_state *hr)
         * but preserve the interrupt cause field and facilities that might
         * be disabled for demand faulting in the L1.
         */
-       hr->hfscr &= (HFSCR_INTR_CAUSE | HFSCR_PM | vcpu->arch.hfscr);
+       hr->hfscr &= (HFSCR_INTR_CAUSE | HFSCR_PM | HFSCR_EBB |
+                       vcpu->arch.hfscr);
 
        /* Don't let data address watchpoint match in hypervisor state */
        hr->dawrx0 &= ~DAWRX_HYP;
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c 
b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 4d1a2d1ff4c1..cf41261daa97 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -218,9 +218,12 @@ static void load_spr_state(struct kvm_vcpu *vcpu,
                                struct p9_host_os_sprs *host_os_sprs)
 {
        mtspr(SPRN_TAR, vcpu->arch.tar);
-       mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
-       mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
-       mtspr(SPRN_BESCR, vcpu->arch.bescr);
+
+       if (vcpu->arch.hfscr & HFSCR_EBB) {
+               mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
+               mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
+               mtspr(SPRN_BESCR, vcpu->arch.bescr);
+       }
 
        if (!cpu_has_feature(CPU_FTR_ARCH_31))
                mtspr(SPRN_TIDR, vcpu->arch.tid);
@@ -251,9 +254,20 @@ static void load_spr_state(struct kvm_vcpu *vcpu,
 static void store_spr_state(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.tar = mfspr(SPRN_TAR);
-       vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
-       vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
-       vcpu->arch.bescr = mfspr(SPRN_BESCR);
+
+       if (vcpu->arch.hfscr & HFSCR_EBB) {
+               vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
+               vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
+               vcpu->arch.bescr = mfspr(SPRN_BESCR);
+               /*
+                * This is like load_fp in context switching, turn off the
+                * facility after it wraps the u8 to try avoiding saving
+                * and restoring the registers each partition switch.
+                */
+               vcpu->arch.load_ebb++;
+               if (!vcpu->arch.load_ebb)
+                       vcpu->arch.hfscr &= ~HFSCR_EBB;
+       }
 
        if (!cpu_has_feature(CPU_FTR_ARCH_31))
                vcpu->arch.tid = mfspr(SPRN_TIDR);
-- 
2.23.0

Reply via email to