The softpatch interrupt sets HSRR0 to the faulting instruction +4, so
it should subtract 4 for the faulting instruction address. Also have it
emulate and deliver HFAC interrupts correctly, which is important for
nested HV and facility demand-faulting in future.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/include/asm/reg.h  |  3 +-
 arch/powerpc/kvm/book3s_hv.c    | 35 ++++++++++++--------
 arch/powerpc/kvm/book3s_hv_tm.c | 57 +++++++++++++++++++++------------
 3 files changed, 61 insertions(+), 34 deletions(-)

diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index be85cf156a1f..e9d27265253b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -415,6 +415,7 @@
 #define   FSCR_TAR     __MASK(FSCR_TAR_LG)
 #define   FSCR_EBB     __MASK(FSCR_EBB_LG)
 #define   FSCR_DSCR    __MASK(FSCR_DSCR_LG)
+#define   FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56)      /* interrupt cause */
 #define SPRN_HFSCR     0xbe    /* HV=1 Facility Status & Control Register */
 #define   HFSCR_PREFIX __MASK(FSCR_PREFIX_LG)
 #define   HFSCR_MSGP   __MASK(FSCR_MSGP_LG)
@@ -426,7 +427,7 @@
 #define   HFSCR_DSCR   __MASK(FSCR_DSCR_LG)
 #define   HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
 #define   HFSCR_FP     __MASK(FSCR_FP_LG)
-#define   HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56)     /* interrupt cause */
+#define   HFSCR_INTR_CAUSE FSCR_INTR_CAUSE
 #define SPRN_TAR       0x32f   /* Target Address Register */
 #define SPRN_LPCR      0x13E   /* LPAR Control Register */
 #define   LPCR_VPM0            ASM_CONST(0x8000000000000000)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index ce7ff12cfc03..adac1a6431a0 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1682,6 +1682,21 @@ XXX benchmark guest exits
                        r = RESUME_GUEST;
                }
                break;
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       case BOOK3S_INTERRUPT_HV_SOFTPATCH:
+               /*
+                * This occurs for various TM-related instructions that
+                * we need to emulate on POWER9 DD2.2.  We have already
+                * handled the cases where the guest was in real-suspend
+                * mode and was transitioning to transactional state.
+                */
+               r = kvmhv_p9_tm_emulation(vcpu);
+               if (r != -1)
+                       break;
+               fallthrough; /* go to facility unavailable handler */
+#endif
+
        /*
         * This occurs if the guest (kernel or userspace), does something that
         * is prohibited by HFSCR.
@@ -1700,18 +1715,6 @@ XXX benchmark guest exits
                }
                break;
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-       case BOOK3S_INTERRUPT_HV_SOFTPATCH:
-               /*
-                * This occurs for various TM-related instructions that
-                * we need to emulate on POWER9 DD2.2.  We have already
-                * handled the cases where the guest was in real-suspend
-                * mode and was transitioning to transactional state.
-                */
-               r = kvmhv_p9_tm_emulation(vcpu);
-               break;
-#endif
-
        case BOOK3S_INTERRUPT_HV_RM_HARD:
                r = RESUME_PASSTHROUGH;
                break;
@@ -1814,9 +1817,15 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu 
*vcpu)
                 * mode and was transitioning to transactional state.
                 */
                r = kvmhv_p9_tm_emulation(vcpu);
-               break;
+               if (r != -1)
+                       break;
+               fallthrough; /* go to facility unavailable handler */
 #endif
 
+       case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+               r = RESUME_HOST;
+               break;
+
        case BOOK3S_INTERRUPT_HV_RM_HARD:
                vcpu->arch.trap = 0;
                r = RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
index cc90b8b82329..e4fd4a9dee08 100644
--- a/arch/powerpc/kvm/book3s_hv_tm.c
+++ b/arch/powerpc/kvm/book3s_hv_tm.c
@@ -74,19 +74,23 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
        case PPC_INST_RFEBB:
                if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
                        /* generate an illegal instruction interrupt */
+                       vcpu->arch.regs.nip -= 4;
                        kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
                        return RESUME_GUEST;
                }
                /* check EBB facility is available */
                if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
-                       /* generate an illegal instruction interrupt */
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
-                       return RESUME_GUEST;
+                       vcpu->arch.regs.nip -= 4;
+                       vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+                       vcpu->arch.hfscr |= (u64)FSCR_EBB_LG << 56;
+                       vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+                       return -1; /* rerun host interrupt handler */
                }
                if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
                        /* generate a facility unavailable interrupt */
-                       vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
-                               ((u64)FSCR_EBB_LG << 56);
+                       vcpu->arch.regs.nip -= 4;
+                       vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+                       vcpu->arch.fscr |= (u64)FSCR_EBB_LG << 56;
                        kvmppc_book3s_queue_irqprio(vcpu, 
BOOK3S_INTERRUPT_FAC_UNAVAIL);
                        return RESUME_GUEST;
                }
@@ -123,19 +127,23 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                /* check for PR=1 and arch 2.06 bit set in PCR */
                if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
                        /* generate an illegal instruction interrupt */
+                       vcpu->arch.regs.nip -= 4;
                        kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
                        return RESUME_GUEST;
                }
                /* check for TM disabled in the HFSCR or MSR */
                if (!(vcpu->arch.hfscr & HFSCR_TM)) {
-                       /* generate an illegal instruction interrupt */
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
-                       return RESUME_GUEST;
+                       vcpu->arch.regs.nip -= 4;
+                       vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+                       vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
+                       vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+                       return -1; /* rerun host interrupt handler */
                }
                if (!(msr & MSR_TM)) {
                        /* generate a facility unavailable interrupt */
-                       vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
-                               ((u64)FSCR_TM_LG << 56);
+                       vcpu->arch.regs.nip -= 4;
+                       vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+                       vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
                        kvmppc_book3s_queue_irqprio(vcpu,
                                                BOOK3S_INTERRUPT_FAC_UNAVAIL);
                        return RESUME_GUEST;
@@ -158,20 +166,24 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
        case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
                /* check for TM disabled in the HFSCR or MSR */
                if (!(vcpu->arch.hfscr & HFSCR_TM)) {
-                       /* generate an illegal instruction interrupt */
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
-                       return RESUME_GUEST;
+                       vcpu->arch.regs.nip -= 4;
+                       vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+                       vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
+                       vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+                       return -1; /* rerun host interrupt handler */
                }
                if (!(msr & MSR_TM)) {
                        /* generate a facility unavailable interrupt */
-                       vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
-                               ((u64)FSCR_TM_LG << 56);
+                       vcpu->arch.regs.nip -= 4;
+                       vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+                       vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
                        kvmppc_book3s_queue_irqprio(vcpu,
                                                BOOK3S_INTERRUPT_FAC_UNAVAIL);
                        return RESUME_GUEST;
                }
                /* If no transaction active, generate TM bad thing */
                if (!MSR_TM_ACTIVE(msr)) {
+                       vcpu->arch.regs.nip -= 4;
                        kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
                        return RESUME_GUEST;
                }
@@ -196,20 +208,24 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                /* XXX do we need to check for PR=0 here? */
                /* check for TM disabled in the HFSCR or MSR */
                if (!(vcpu->arch.hfscr & HFSCR_TM)) {
-                       /* generate an illegal instruction interrupt */
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
-                       return RESUME_GUEST;
+                       vcpu->arch.regs.nip -= 4;
+                       vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+                       vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
+                       vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+                       return -1; /* rerun host interrupt handler */
                }
                if (!(msr & MSR_TM)) {
                        /* generate a facility unavailable interrupt */
-                       vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
-                               ((u64)FSCR_TM_LG << 56);
+                       vcpu->arch.regs.nip -= 4;
+                       vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+                       vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
                        kvmppc_book3s_queue_irqprio(vcpu,
                                                BOOK3S_INTERRUPT_FAC_UNAVAIL);
                        return RESUME_GUEST;
                }
                /* If transaction active or TEXASR[FS] = 0, bad thing */
                if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
+                       vcpu->arch.regs.nip -= 4;
                        kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
                        return RESUME_GUEST;
                }
@@ -224,6 +240,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
        }
 
        /* What should we do here? We didn't recognize the instruction */
+       vcpu->arch.regs.nip -= 4;
        kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
        pr_warn_ratelimited("Unrecognized TM-related instruction %#x for 
emulation", instr);
 
-- 
2.23.0

Reply via email to