On 06/03/2021 02:06, Nicholas Piggin wrote:
Switching the MMU from radix<->radix mode is tricky particularly as the
MMU can remain enabled and requires a certain sequence of SPR updates.
Move these together into their own functions.

This also includes the radix TLB check / flush because it's tied in to
MMU switching due to tlbiel getting LPID from LPIDR.

(XXX: isync / hwsync synchronisation TBD)


Looks alright but what is this comment about? Is something missing or just sub optimal?



Signed-off-by: Nicholas Piggin <npig...@gmail.com>


---
  arch/powerpc/kvm/book3s_hv.c | 55 +++++++++++++++++++++---------------
  1 file changed, 32 insertions(+), 23 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index f1230f9d98ba..b9cae42b9cd5 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3449,12 +3449,38 @@ static noinline void kvmppc_run_core(struct 
kvmppc_vcore *vc)
        trace_kvmppc_run_core(vc, 1);
  }
+static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
+{
+       struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       struct kvm_nested_guest *nested = vcpu->arch.nested;
+       u32 lpid;
+
+       lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+
+       mtspr(SPRN_LPID, lpid);
+       mtspr(SPRN_LPCR, lpcr);
+       mtspr(SPRN_PID, vcpu->arch.pid);
+       isync();
+
+       /* TLBIEL must have LPIDR set, so set guest LPID before flushing. */
+       kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
+}
+
+static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
+{
+       mtspr(SPRN_PID, pid);
+       mtspr(SPRN_LPID, kvm->arch.host_lpid);
+       mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
+       isync();
+}
+
  /*
   * Load up hypervisor-mode registers on P9.
   */
  static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
                                     unsigned long lpcr)
  {
+       struct kvm *kvm = vcpu->kvm;
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
        s64 hdec;
        u64 tb, purr, spurr;
@@ -3477,12 +3503,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu 
*vcpu, u64 time_limit,
         * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
         * so set HDICE before writing HDEC.
         */
-       mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
+       mtspr(SPRN_LPCR, kvm->arch.host_lpcr | LPCR_HDICE);
        isync();
hdec = time_limit - mftb();
        if (hdec < 0) {
-               mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
+               mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
                isync();
                return BOOK3S_INTERRUPT_HV_DECREMENTER;
        }
@@ -3517,7 +3543,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu 
*vcpu, u64 time_limit,
        }
        mtspr(SPRN_CIABR, vcpu->arch.ciabr);
        mtspr(SPRN_IC, vcpu->arch.ic);
-       mtspr(SPRN_PID, vcpu->arch.pid);
mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
              (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
@@ -3531,8 +3556,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu 
*vcpu, u64 time_limit,
mtspr(SPRN_AMOR, ~0UL); - mtspr(SPRN_LPCR, lpcr);
-       isync();
+       switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
kvmppc_xive_push_vcpu(vcpu); @@ -3571,7 +3595,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
                mtspr(SPRN_DAWR1, host_dawr1);
                mtspr(SPRN_DAWRX1, host_dawrx1);
        }
-       mtspr(SPRN_PID, host_pidr);
/*
         * Since this is radix, do a eieio; tlbsync; ptesync sequence in
@@ -3586,9 +3609,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu 
*vcpu, u64 time_limit,
        if (cpu_has_feature(CPU_FTR_ARCH_31))
                asm volatile(PPC_CP_ABORT);
- mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
-       isync();
-
        vc->dpdes = mfspr(SPRN_DPDES);
        vc->vtb = mfspr(SPRN_VTB);
        mtspr(SPRN_DPDES, 0);
@@ -3605,7 +3625,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu 
*vcpu, u64 time_limit,
        }
mtspr(SPRN_HDEC, 0x7fffffff);
-       mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
+
+       switch_mmu_to_host_radix(kvm, host_pidr);
return trap;
  }
@@ -4138,7 +4159,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
  {
        struct kvm_run *run = vcpu->run;
        int trap, r, pcpu;
-       int srcu_idx, lpid;
+       int srcu_idx;
        struct kvmppc_vcore *vc;
        struct kvm *kvm = vcpu->kvm;
        struct kvm_nested_guest *nested = vcpu->arch.nested;
@@ -4212,13 +4233,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
        vc->vcore_state = VCORE_RUNNING;
        trace_kvmppc_run_core(vc, 0);
- if (cpu_has_feature(CPU_FTR_HVMODE)) {


The new location of mtspr(SPRN_LPID, lpid) does not check for CPU_FTR_HVMODE anymore, is this going to work with HV KVM on pseries? Thanks,




-               lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
-               mtspr(SPRN_LPID, lpid);
-               isync();
-               kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
-       }
-
        guest_enter_irqoff();
srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -4237,11 +4251,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
srcu_read_unlock(&kvm->srcu, srcu_idx); - if (cpu_has_feature(CPU_FTR_HVMODE)) {
-               mtspr(SPRN_LPID, kvm->arch.host_lpid);
-               isync();
-       }
-
        set_irq_happened(trap);
kvmppc_set_host_core(pcpu);


--
Alexey

Reply via email to