While trying to get Hyper-V running, I realized that the interrupt injection
mechanisms that are in place right now are not 100% correct.

This patch makes nested SVM's interrupt injection behave more like on a
real machine.

v2 calls BUG_ON when svm_set_irq is called with GIF=0

Signed-off-by: Alexander Graf <ag...@suse.de>
---
 arch/x86/kvm/svm.c |   39 ++++++++++++++++++++++++---------------
 1 files changed, 24 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index fa2a710..5b14c9d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1517,7 +1517,8 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, 
void *arg1,
        /* Kill any pending exceptions */
        if (svm->vcpu.arch.exception.pending == true)
                nsvm_printk("WARNING: Pending Exception\n");
-       svm->vcpu.arch.exception.pending = false;
+       kvm_clear_exception_queue(&svm->vcpu);
+       kvm_clear_interrupt_queue(&svm->vcpu);
 
        /* Restore selected save entries */
        svm->vmcb->save.es = hsave->save.es;
@@ -1585,7 +1586,8 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void 
*arg1,
        svm->nested_vmcb = svm->vmcb->save.rax;
 
        /* Clear internal status */
-       svm->vcpu.arch.exception.pending = false;
+       kvm_clear_exception_queue(&svm->vcpu);
+       kvm_clear_interrupt_queue(&svm->vcpu);
 
        /* Save the old vmcb, so we don't need to pick what we save, but
           can restore everything when a VMEXIT occurs */
@@ -2277,21 +2279,14 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, 
int irq)
                ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
 }
 
-static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
-
-       svm->vmcb->control.event_inj = nr |
-               SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
-}
-
 static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       nested_svm_intr(svm);
+       BUG_ON(!(svm->vcpu.arch.hflags & HF_GIF_MASK));
 
-       svm_queue_irq(vcpu, irq);
+       svm->vmcb->control.event_inj = irq |
+               SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
 }
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
@@ -2319,13 +2314,25 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
        struct vmcb *vmcb = svm->vmcb;
        return (vmcb->save.rflags & X86_EFLAGS_IF) &&
                !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
-               (svm->vcpu.arch.hflags & HF_GIF_MASK);
+               (svm->vcpu.arch.hflags & HF_GIF_MASK) &&
+               !is_nested(svm);
 }
 
 static void enable_irq_window(struct kvm_vcpu *vcpu)
 {
-       svm_set_vintr(to_svm(vcpu));
-       svm_inject_irq(to_svm(vcpu), 0x0);
+       struct vcpu_svm *svm = to_svm(vcpu);
+       nsvm_printk("Trying to open IRQ window\n");
+
+       nested_svm_intr(svm);
+
+       /* In case GIF=0 we can't rely on the CPU to tell us when
+        * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
+        * The next time we get that intercept, this function will be
+        * called again though and we'll get the vintr intercept. */
+       if (svm->vcpu.arch.hflags & HF_GIF_MASK) {
+               svm_set_vintr(svm);
+               svm_inject_irq(svm, 0x0);
+       }
 }
 
 static void enable_nmi_window(struct kvm_vcpu *vcpu)
@@ -2393,6 +2400,8 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
        case SVM_EXITINTINFO_TYPE_EXEPT:
                /* In case of software exception do not reinject an exception
                   vector, but re-execute and instruction instead */
+               if (is_nested(svm))
+                       break;
                if (vector == BP_VECTOR || vector == OF_VECTOR)
                        break;
                if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
-- 
1.6.0.2

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to