On 2019-06-28 15:08, Claudio Carvalho wrote:
From: Sukadev Bhattiprolu <suka...@linux.vnet.ibm.com>

To enter a secure guest, we have to go through the ultravisor, therefore
we do a ucall when we are entering a secure guest.

This change is needed for any sort of entry to the secure guest from the
hypervisor, whether it is a return from an hcall, a return from a
hypervisor interrupt, or the first time that a secure guest vCPU is run.

If we are returning from an hcall, the results are already in the
appropriate registers R3:12, except for R3, R6 and R7. R3 has the status of the reflected hcall, therefore we move it to R0 for the ultravisor and
set R3 to the UV_RETURN ucall number. R6,7 were used as temporary
registers, hence we restore them.

Have fast_guest_return check the kvm_arch.secure_guest field so that a
new CPU enters UV when started (in response to a RTAS start-cpu call).

Thanks to input from Paul Mackerras, Ram Pai and Mike Anderson.

Signed-off-by: Sukadev Bhattiprolu <suka...@linux.vnet.ibm.com>
[ Pass SRR1 in r11 for UV_RETURN, fix kvmppc_msr_interrupt to preserve
  the MSR_S bit ]
Signed-off-by: Paul Mackerras <pau...@ozlabs.org>
[ Fix UV_RETURN ucall number and arch.secure_guest check ]
Signed-off-by: Ram Pai <linux...@us.ibm.com>
[ Save the actual R3 in R0 for the ultravisor and use R3 for the
UV_RETURN ucall number. Update commit message and ret_to_ultra comment ]
Signed-off-by: Claudio Carvalho <cclau...@linux.ibm.com>
 Reviewed-by: Janani Janakiraman <jan...@linux.ibm.com>
---
 arch/powerpc/include/asm/kvm_host.h       |  1 +
 arch/powerpc/include/asm/ultravisor-api.h |  1 +
 arch/powerpc/kernel/asm-offsets.c         |  1 +
 arch/powerpc/kvm/book3s_hv_rmhandlers.S   | 40 +++++++++++++++++++----
 4 files changed, 37 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h
b/arch/powerpc/include/asm/kvm_host.h
index 013c76a0a03e..184becb62ea4 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -294,6 +294,7 @@ struct kvm_arch {
        cpumask_t cpu_in_guest;
        u8 radix;
        u8 fwnmi_enabled;
+       u8 secure_guest;
        bool threads_indep;
        bool nested_enable;
        pgd_t *pgtable;
diff --git a/arch/powerpc/include/asm/ultravisor-api.h
b/arch/powerpc/include/asm/ultravisor-api.h
index 141940771add..7c4d0b4ced12 100644
--- a/arch/powerpc/include/asm/ultravisor-api.h
+++ b/arch/powerpc/include/asm/ultravisor-api.h
@@ -19,5 +19,6 @@

 /* opcodes */
 #define UV_WRITE_PATE                  0xF104
+#define UV_RETURN                      0xF11C

 #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c
b/arch/powerpc/kernel/asm-offsets.c
index 8e02444e9d3d..44742724513e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -508,6 +508,7 @@ int main(void)
        OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
        OFFSET(KVM_RADIX, kvm, arch.radix);
        OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
+       OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
        OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
        OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
        OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index cffb365d9d02..89813ca987c2 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -36,6 +36,7 @@
 #include <asm/asm-compat.h>
 #include <asm/feature-fixups.h>
 #include <asm/cpuidle.h>
+#include <asm/ultravisor-api.h>

 /* Sign-extend HDEC if not on POWER9 */
 #define EXTEND_HDEC(reg)                       \
@@ -1092,16 +1093,12 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

        ld      r5, VCPU_LR(r4)
-       ld      r6, VCPU_CR(r4)
        mtlr    r5
-       mtcr    r6

        ld      r1, VCPU_GPR(R1)(r4)
        ld      r2, VCPU_GPR(R2)(r4)
        ld      r3, VCPU_GPR(R3)(r4)
        ld      r5, VCPU_GPR(R5)(r4)
-       ld      r6, VCPU_GPR(R6)(r4)
-       ld      r7, VCPU_GPR(R7)(r4)
        ld      r8, VCPU_GPR(R8)(r4)
        ld      r9, VCPU_GPR(R9)(r4)
        ld      r10, VCPU_GPR(R10)(r4)
@@ -1119,10 +1116,38 @@ BEGIN_FTR_SECTION
        mtspr   SPRN_HDSISR, r0
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)

+       ld      r6, VCPU_KVM(r4)
+       lbz     r7, KVM_SECURE_GUEST(r6)
+       cmpdi   r7, 0
+       bne     ret_to_ultra
+
+       lwz     r6, VCPU_CR(r4)
+       mtcr    r6
+
+       ld      r7, VCPU_GPR(R7)(r4)
+       ld      r6, VCPU_GPR(R6)(r4)
        ld      r0, VCPU_GPR(R0)(r4)
        ld      r4, VCPU_GPR(R4)(r4)
        HRFI_TO_GUEST
        b       .
+/*
+ * We are entering a secure guest, so we have to invoke the ultravisor to do + * that. If we are returning from a hcall, the results are already in the + * appropriate registers R3:12, except for R3, R6 and R7. R3 has the status of + * the reflected hcall, therefore we move it to R0 for the ultravisor and set + * R3 to the UV_RETURN ucall number. R6,7 were used as temporary registers
+ * above, hence we restore them.
+ */
+ret_to_ultra:
+       lwz     r6, VCPU_CR(r4)
+       mtcr    r6
+       mfspr   r11, SPRN_SRR1
+       mr      r0, r3
+       LOAD_REG_IMMEDIATE(r3, UV_RETURN)
+       ld      r7, VCPU_GPR(R7)(r4)
+       ld      r6, VCPU_GPR(R6)(r4)
+       ld      r4, VCPU_GPR(R4)(r4)
+       sc      2

 /*
  * Enter the guest on a P9 or later system where we have exactly
@@ -3318,13 +3343,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
  *   r0 is used as a scratch register
  */
 kvmppc_msr_interrupt:
+       andis.  r0, r11, MSR_S@h
        rldicl  r0, r11, 64 - MSR_TS_S_LG, 62
-       cmpwi   r0, 2 /* Check if we are in transactional state..  */
+       cmpwi   cr1, r0, 2 /* Check if we are in transactional state..  */
        ld      r11, VCPU_INTR_MSR(r9)
-       bne     1f
+       bne     cr1, 1f
        /* ... if transactional, change to suspended */
        li      r0, 1
 1:     rldimi  r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+       beqlr
+       oris    r11, r11, MSR_S@h               /* preserve MSR_S bit setting */
        blr

 /*

Reply via email to