This is particularly relevant for the SET form, to ensure proper clean
bits tracking (albeit in the case here it's benign as CPL and other
segment register attributes share a clean bit).

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- unstable.orig/xen/arch/x86/hvm/svm/svm.c    2017-05-24 11:52:44.976284414 
+0200
+++ unstable/xen/arch/x86/hvm/svm/svm.c 2017-04-18 11:25:44.000000000 +0200
@@ -653,7 +653,7 @@ static void svm_get_segment_register(str
         break;
     case x86_seg_ss:
         *reg = vmcb->ss;
-        reg->attr.fields.dpl = vmcb->_cpl;
+        reg->attr.fields.dpl = vmcb_get_cpl(vmcb);
         break;
     case x86_seg_tr:
         svm_sync_vmcb(v);
@@ -726,7 +726,7 @@ static void svm_set_segment_register(str
         break;
     case x86_seg_ss:
         vmcb->ss = *reg;
-        vmcb->_cpl = vmcb->ss.attr.fields.dpl;
+        vmcb_set_cpl(vmcb, reg->attr.fields.dpl);
         break;
     case x86_seg_tr:
         vmcb->tr = *reg;
@@ -1442,7 +1442,7 @@ static void svm_inject_event(const struc
      * If injecting an event outside of 64bit mode, zero the upper bits of the
      * %eip and nextrip after the adjustments above.
      */
-    if ( !((vmcb->_efer & EFER_LMA) && vmcb->cs.attr.fields.l) )
+    if ( !((vmcb_get_efer(vmcb) & EFER_LMA) && vmcb->cs.attr.fields.l) )
     {
         regs->rip = regs->eip;
         vmcb->nextrip = (uint32_t)vmcb->nextrip;



SVM: use VMCB accessors

This is particularly relevant for the SET form, to ensure proper clean
bits tracking (albeit in the case here it's benign as CPL and other
segment register attributes share a clean bit).

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- unstable.orig/xen/arch/x86/hvm/svm/svm.c    2017-05-24 11:52:44.976284414 
+0200
+++ unstable/xen/arch/x86/hvm/svm/svm.c 2017-04-18 11:25:44.000000000 +0200
@@ -653,7 +653,7 @@ static void svm_get_segment_register(str
         break;
     case x86_seg_ss:
         *reg = vmcb->ss;
-        reg->attr.fields.dpl = vmcb->_cpl;
+        reg->attr.fields.dpl = vmcb_get_cpl(vmcb);
         break;
     case x86_seg_tr:
         svm_sync_vmcb(v);
@@ -726,7 +726,7 @@ static void svm_set_segment_register(str
         break;
     case x86_seg_ss:
         vmcb->ss = *reg;
-        vmcb->_cpl = vmcb->ss.attr.fields.dpl;
+        vmcb_set_cpl(vmcb, reg->attr.fields.dpl);
         break;
     case x86_seg_tr:
         vmcb->tr = *reg;
@@ -1442,7 +1442,7 @@ static void svm_inject_event(const struc
      * If injecting an event outside of 64bit mode, zero the upper bits of the
      * %eip and nextrip after the adjustments above.
      */
-    if ( !((vmcb->_efer & EFER_LMA) && vmcb->cs.attr.fields.l) )
+    if ( !((vmcb_get_efer(vmcb) & EFER_LMA) && vmcb->cs.attr.fields.l) )
     {
         regs->rip = regs->eip;
         vmcb->nextrip = (uint32_t)vmcb->nextrip;
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

Reply via email to