No matter that we emulate IRET for (guest) real more only right now, we
should get its effect on (virtual) NMI delivery right. Note that we can
simply check the other retire flags also in the !OKAY case, as the
insn emulator now guarantees them to only be set on OKAY.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
---
v3: Drastically simplify the change to _hvm_emulate_one().
v2: Adjust x86_emulate_wrapper() accordingly.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1955,9 +1955,6 @@ static int _hvm_emulate_one(struct hvm_e
         memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes);
     }
 
-    if ( rc != X86EMUL_OKAY )
-        return rc;
-
     if ( hvmemul_ctxt->ctxt.retire.singlestep )
         hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
 
@@ -1975,6 +1972,10 @@ static int _hvm_emulate_one(struct hvm_e
     else
         new_intr_shadow &= ~HVM_INTR_SHADOW_STI;
 
+    /* IRET, if valid in the given context, clears NMI blocking. */
+    if ( hvmemul_ctxt->ctxt.retire.unblock_nmi )
+        new_intr_shadow &= ~HVM_INTR_SHADOW_NMI;
+
     if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
     {
         hvmemul_ctxt->intr_shadow = new_intr_shadow;
@@ -1987,7 +1988,7 @@ static int _hvm_emulate_one(struct hvm_e
         hvm_hlt(regs->eflags);
     }
 
-    return X86EMUL_OKAY;
+    return rc;
 }
 
 int hvm_emulate_one(
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -4002,6 +4002,7 @@ x86_emulate(
         uint32_t mask = X86_EFLAGS_VIP | X86_EFLAGS_VIF | X86_EFLAGS_VM;
 
         fail_if(!in_realmode(ctxt, ops));
+        ctxt->retire.unblock_nmi = true;
         if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
                               &eip, op_bytes, ctxt, ops)) ||
              (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
@@ -7918,9 +7919,17 @@ int x86_emulate_wrapper(
 
     rc = x86_emulate(ctxt, ops);
 
-    /* Retire flags should only be set for successful instruction emulation. */
+    /*
+     * Most retire flags should only be set for successful instruction
+     * emulation.
+     */
     if ( rc != X86EMUL_OKAY )
-        ASSERT(ctxt->retire.raw == 0);
+    {
+        typeof(ctxt->retire) retire = ctxt->retire;
+
+        retire.unblock_nmi = false;
+        ASSERT(!retire.raw);
+    }
 
     /* All cases returning X86EMUL_EXCEPTION should have fault semantics. */
     if ( rc == X86EMUL_EXCEPTION )
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -490,6 +490,7 @@ struct x86_emulate_ctxt
             bool hlt:1;          /* Instruction HLTed. */
             bool mov_ss:1;       /* Instruction sets MOV-SS irq shadow. */
             bool sti:1;          /* Instruction sets STI irq shadow. */
+            bool unblock_nmi:1;  /* Instruction clears NMI blocking. */
             bool singlestep:1;   /* Singlestepping was active. */
         };
     } retire;



x86emul: add "unblock NMI" retire flag

No matter that we emulate IRET for (guest) real more only right now, we
should get its effect on (virtual) NMI delivery right. Note that we can
simply check the other retire flags also in the !OKAY case, as the
insn emulator now guarantees them to only be set on OKAY.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
---
v3: Drastically simplify the change to _hvm_emulate_one().
v2: Adjust x86_emulate_wrapper() accordingly.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1955,9 +1955,6 @@ static int _hvm_emulate_one(struct hvm_e
         memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes);
     }
 
-    if ( rc != X86EMUL_OKAY )
-        return rc;
-
     if ( hvmemul_ctxt->ctxt.retire.singlestep )
         hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
 
@@ -1975,6 +1972,10 @@ static int _hvm_emulate_one(struct hvm_e
     else
         new_intr_shadow &= ~HVM_INTR_SHADOW_STI;
 
+    /* IRET, if valid in the given context, clears NMI blocking. */
+    if ( hvmemul_ctxt->ctxt.retire.unblock_nmi )
+        new_intr_shadow &= ~HVM_INTR_SHADOW_NMI;
+
     if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
     {
         hvmemul_ctxt->intr_shadow = new_intr_shadow;
@@ -1987,7 +1988,7 @@ static int _hvm_emulate_one(struct hvm_e
         hvm_hlt(regs->eflags);
     }
 
-    return X86EMUL_OKAY;
+    return rc;
 }
 
 int hvm_emulate_one(
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -4002,6 +4002,7 @@ x86_emulate(
         uint32_t mask = X86_EFLAGS_VIP | X86_EFLAGS_VIF | X86_EFLAGS_VM;
 
         fail_if(!in_realmode(ctxt, ops));
+        ctxt->retire.unblock_nmi = true;
         if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
                               &eip, op_bytes, ctxt, ops)) ||
              (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
@@ -7918,9 +7919,17 @@ int x86_emulate_wrapper(
 
     rc = x86_emulate(ctxt, ops);
 
-    /* Retire flags should only be set for successful instruction emulation. */
+    /*
+     * Most retire flags should only be set for successful instruction
+     * emulation.
+     */
     if ( rc != X86EMUL_OKAY )
-        ASSERT(ctxt->retire.raw == 0);
+    {
+        typeof(ctxt->retire) retire = ctxt->retire;
+
+        retire.unblock_nmi = false;
+        ASSERT(!retire.raw);
+    }
 
     /* All cases returning X86EMUL_EXCEPTION should have fault semantics. */
     if ( rc == X86EMUL_EXCEPTION )
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -490,6 +490,7 @@ struct x86_emulate_ctxt
             bool hlt:1;          /* Instruction HLTed. */
             bool mov_ss:1;       /* Instruction sets MOV-SS irq shadow. */
             bool sti:1;          /* Instruction sets STI irq shadow. */
+            bool unblock_nmi:1;  /* Instruction clears NMI blocking. */
             bool singlestep:1;   /* Singlestepping was active. */
         };
     } retire;
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

Reply via email to