This juggles SPR switching on the entry and exit sides to be more
symmetric, which makes the next refactoring patch possible with no
functional change.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 56429b53f4dc..c2c72875fca9 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4175,7 +4175,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
                msr = mfmsr(); /* TM restore can update msr */
        }
 
-       switch_pmu_to_guest(vcpu, &host_os_sprs);
+       load_spr_state(vcpu, &host_os_sprs);
 
        load_fp_state(&vcpu->arch.fp);
 #ifdef CONFIG_ALTIVEC
@@ -4183,7 +4183,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 #endif
        mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
 
-       load_spr_state(vcpu, &host_os_sprs);
+       switch_pmu_to_guest(vcpu, &host_os_sprs);
 
        if (kvmhv_on_pseries()) {
                /*
@@ -4283,6 +4283,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
                        vcpu->arch.slb_max = 0;
        }
 
+       switch_pmu_to_host(vcpu, &host_os_sprs);
+
        store_spr_state(vcpu);
 
        store_fp_state(&vcpu->arch.fp);
@@ -4297,8 +4299,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 
        vcpu_vpa_increment_dispatch(vcpu);
 
-       switch_pmu_to_host(vcpu, &host_os_sprs);
-
        timer_rearm_host_dec(*tb);
 
        restore_p9_host_os_sprs(vcpu, &host_os_sprs);
-- 
2.23.0

Reply via email to