This removes raw number manipulation, and makes the logic easier to follow.

No functional change.

Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com>
---
CC: Jan Beulich <jbeul...@suse.com>
CC: Roger Pau Monné <roger....@citrix.com>
CC: Wei Liu <w...@xen.org>
---
 xen/arch/x86/hvm/svm/svm.c              | 4 ++--
 xen/arch/x86/include/asm/hvm/svm/vmcb.h | 5 +++++
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 8d8b250101ce..90c2f89c1b0d 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1740,7 +1740,7 @@ static void svm_vmexit_do_cr_access(
     cr = vmcb->exitcode - VMEXIT_CR0_READ;
     dir = (cr > 15);
     cr &= 0xf;
-    gp = vmcb->exitinfo1 & 0xf;
+    gp = vmcb->ei.mov.gpr;
 
     rc = dir ? hvm_mov_to_cr(cr, gp) : hvm_mov_from_cr(cr, gp);
 
@@ -2961,7 +2961,7 @@ void svm_vmexit_handler(void)
 
     case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
     case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
-        if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) )
+        if ( cpu_has_svm_decode && vmcb->ei.mov.mov_insn )
             svm_vmexit_do_cr_access(vmcb, regs);
         else if ( !hvm_emulate_one_insn(x86_insn_is_cr_access, "CR access") )
             hvm_inject_hw_exception(X86_EXC_GP, 0);
diff --git a/xen/arch/x86/include/asm/hvm/svm/vmcb.h 
b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
index b809e85507aa..77e3bd9aa048 100644
--- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
+++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
@@ -450,6 +450,11 @@ struct vmcb_struct {
 
                 uint64_t nrip;
             } io;
+            struct {
+                uint64_t gpr:4;
+                uint64_t :59;
+                bool     mov_insn:1; /* MOV, as opposed to LMSW, CLTS, etc */
+            } mov;
             struct {
                 uint16_t sel;
                 uint64_t :48;
-- 
2.30.2


Reply via email to