NSN's proprietary OS DMX sometimes does task switches.
To get it running in KVM the following changes were necessary:
Interrupt injection only with interrupt flag set.
Linking the tss->prev_task_link to itself removed.
Task linking is required for CALL and GATE.
Do not call skip_emulated_instruction() for GATE.

Signed-off-by: Bernhard Kohl <bernhard.k...@nsn.com>
---
 arch/x86/kvm/vmx.c |    3 ++-
 arch/x86/kvm/x86.c |   19 +++++++++++++++++--
 2 files changed, 19 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5cf28df..eca57a3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3357,7 +3357,8 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
                        enable_irq_window(vcpu);
        }
        if (vcpu->arch.interrupt.pending) {
-               vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
+               if (vcpu->arch.interrupt_window_open)
+                       vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
                if (kvm_cpu_has_interrupt(vcpu))
                        enable_irq_window(vcpu);
        }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b556b6a..9052058 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3683,7 +3683,7 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
        tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
        tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
        tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
-       tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
+       tss->prev_task_link = 0;
 }
 
 static int load_state_from_tss32(struct kvm_vcpu *vcpu,
@@ -3810,6 +3810,7 @@ out:
 
 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
                       u32 old_tss_base,
+                      u16 old_tss_selector, int reason,
                       struct desc_struct *nseg_desc)
 {
        struct tss_segment_32 tss_segment_32;
@@ -3829,6 +3830,18 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16
tss_selector,
                           &tss_segment_32, sizeof tss_segment_32))
                goto out;
 
+       /*
+        * SDM 3: table 6-2
+        * Task linking required for CALL and GATE.
+        */
+       if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
+       {
+               tss_segment_32.prev_task_link = old_tss_selector;
+               kvm_write_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+                               &tss_segment_32, sizeof(struct tss_segment_32));
+
+       }
+
        if (load_state_from_tss32(vcpu, &tss_segment_32))
                goto out;
 
@@ -3882,10 +3895,12 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16
tss_selector, int reason)
                kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
        }
 
-       kvm_x86_ops->skip_emulated_instruction(vcpu);
+       if (reason != TASK_SWITCH_GATE)
+               kvm_x86_ops->skip_emulated_instruction(vcpu);
 
        if (nseg_desc.type & 8)
                ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
+                                        old_tss_sel, reason,
                                         &nseg_desc);
        else
                ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
-- 
1.6.0.6


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to