From: Denis Mukhin <dmuk...@ford.com>

Use `asm goto()` in vmread_safe() to simplify the error handling logic.

Update __vmread() to return `unsigned long` as per suggestion in [1].
Rename __vmread() to vmread_unsafe() to match the behavior.
Update all call sites everywhere. Drop `UNLIKELY_*()`.

Group all vmread*() calls close to each other in vmx.h

Rename internal vmr*() to vmread*().

[1] 
https://lore.kernel.org/xen-devel/c452a1d7-4a57-4c5f-8a83-36a74ff22...@citrix.com/

Signed-off-by: Denis Mukhin <dmuk...@ford.com>
---
Link to CI: 
https://gitlab.com/xen-project/people/dmukhin/xen/-/pipelines/1756781092
---
 xen/arch/x86/cpu/vpmu_intel.c          |   3 +-
 xen/arch/x86/hvm/vmx/intr.c            |  26 +--
 xen/arch/x86/hvm/vmx/realmode.c        |   2 +-
 xen/arch/x86/hvm/vmx/vmcs.c            | 160 ++++++++++---------
 xen/arch/x86/hvm/vmx/vmx.c             | 209 +++++++++++--------------
 xen/arch/x86/hvm/vmx/vvmx.c            |  43 +++--
 xen/arch/x86/include/asm/domain.h      |   2 +-
 xen/arch/x86/include/asm/hvm/vmx/vmx.h |  69 ++++----
 8 files changed, 235 insertions(+), 279 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index 7ce98ee42e..9c93d1f28c 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -796,8 +796,7 @@ static int cf_check core2_vpmu_do_interrupt(void)
     else
     {
         /* No PMC overflow but perhaps a Trace Message interrupt. */
-        __vmread(GUEST_IA32_DEBUGCTL, &msr_content);
-        if ( !(msr_content & IA32_DEBUGCTLMSR_TR) )
+        if ( !(vmread_unsafe(GUEST_IA32_DEBUGCTL) & IA32_DEBUGCTLMSR_TR) )
             return 0;
     }
 
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index 91b407e6bc..63330615cd 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -63,9 +63,8 @@ static void vmx_enable_intr_window(struct vcpu *v, struct 
hvm_intack intack)
 
     if ( unlikely(tb_init_done) )
     {
-        unsigned long intr;
+        unsigned long intr = vmread_unsafe(VM_ENTRY_INTR_INFO);
 
-        __vmread(VM_ENTRY_INTR_INFO, &intr);
         TRACE(TRC_HVM_INTR_WINDOW, intack.vector, intack.source,
               (intr & INTR_INFO_VALID_MASK) ? intr & 0xff : -1);
     }
@@ -81,9 +80,8 @@ static void vmx_enable_intr_window(struct vcpu *v, struct 
hvm_intack intack)
          * we may immediately vmexit and hance make no progress!
          * (see SDM 3B 21.3, "Other Causes of VM Exits").
          */
-        unsigned long intr_shadow;
+        unsigned long intr_shadow = vmread_unsafe(GUEST_INTERRUPTIBILITY_INFO);
 
-        __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
         if ( intr_shadow & VMX_INTR_SHADOW_STI )
         {
             /* Having both STI-blocking and MOV-SS-blocking fails vmentry. */
@@ -144,14 +142,8 @@ enum hvm_intblk cf_check nvmx_intr_blocked(struct vcpu *v)
         if ( nvcpu->nv_vmexit_pending ||
              nvcpu->nv_vmswitch_in_progress )
             r = hvm_intblk_rflags_ie;
-        else
-        {
-            unsigned long intr_info;
-
-            __vmread(VM_ENTRY_INTR_INFO, &intr_info);
-            if ( intr_info & INTR_INFO_VALID_MASK )
-                r = hvm_intblk_rflags_ie;
-        }
+        else if ( vmread_unsafe(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK )
+            r = hvm_intblk_rflags_ie;
     }
     else if ( nvcpu->nv_vmentry_pending )
         r = hvm_intblk_rflags_ie;
@@ -253,8 +245,6 @@ void asmlinkage vmx_intr_assist(void)
     pt_vector = pt_update_irq(v);
 
     do {
-        unsigned long intr_info;
-
         intack = hvm_vcpu_has_pending_irq(v);
         if ( likely(intack.source == hvm_intsrc_none) )
             goto out;
@@ -275,8 +265,7 @@ void asmlinkage vmx_intr_assist(void)
                 goto out;
             }
 
-            __vmread(VM_ENTRY_INTR_INFO, &intr_info);
-            if ( intr_info & INTR_INFO_VALID_MASK )
+            if ( vmread_unsafe(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK )
             {
                 if ( (intack.source == hvm_intsrc_pic) ||
                      (intack.source == hvm_intsrc_nmi) ||
@@ -299,8 +288,7 @@ void asmlinkage vmx_intr_assist(void)
         }
         else
         {
-            __vmread(VM_ENTRY_INTR_INFO, &intr_info);
-            if ( intr_info & INTR_INFO_VALID_MASK )
+            if ( vmread_unsafe(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK )
             {
                 vmx_enable_intr_window(v, intack);
                 goto out;
@@ -377,7 +365,7 @@ void asmlinkage vmx_intr_assist(void)
         }
 
         /* we need update the RVI field */
-        __vmread(GUEST_INTR_STATUS, &status);
+        status = vmread_unsafe(GUEST_INTR_STATUS);
         status &= ~VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
         status |= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK &
                     intack.vector;
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index ff44ddcfa6..e8eb9773ee 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -159,7 +159,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
     unsigned int emulations = 0;
 
     /* Get-and-clear VM_ENTRY_INTR_INFO. */
-    __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+    intr_info = vmread_unsafe(VM_ENTRY_INTR_INFO);
     if ( intr_info & INTR_INFO_VALID_MASK )
         __vmwrite(VM_ENTRY_INTR_INFO, 0);
 
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index a44475ae15..3bfe48ff96 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1031,7 +1031,7 @@ u64 virtual_vmcs_vmread(const struct vcpu *v, u32 
vmcs_encoding)
     u64 res;
 
     virtual_vmcs_enter(v);
-    __vmread(vmcs_encoding, &res);
+    res = vmread_unsafe(vmcs_encoding);
     virtual_vmcs_exit(v);
 
     return res;
@@ -1691,7 +1691,7 @@ void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
 
     vmx_vmcs_enter(v);
 
-    __vmread(GUEST_PML_INDEX, &pml_idx);
+    pml_idx = vmread_unsafe(GUEST_PML_INDEX);
 
     /* Do nothing if PML buffer is empty. */
     if ( pml_idx == (NR_PML_ENTRIES - 1) )
@@ -1874,9 +1874,8 @@ void vmx_destroy_vmcs(struct vcpu *v)
 void vmx_vmentry_failure(void)
 {
     struct vcpu *curr = current;
-    unsigned long error;
+    unsigned long error = vmread_unsafe(VM_INSTRUCTION_ERROR);
 
-    __vmread(VM_INSTRUCTION_ERROR, &error);
     gprintk(XENLOG_ERR, "VM%s error: %#lx\n",
             curr->arch.hvm.vmx.launched ? "RESUME" : "LAUNCH", error);
 
@@ -1905,7 +1904,6 @@ void cf_check vmx_do_resume(void)
 {
     struct vcpu *v = current;
     bool debug_state;
-    unsigned long host_cr4;
 
     if ( v->arch.hvm.vmx.active_cpu == smp_processor_id() )
         vmx_vmcs_reload(v);
@@ -1957,38 +1955,44 @@ void cf_check vmx_do_resume(void)
     hvm_do_resume(v);
 
     /* Sync host CR4 in case its value has changed. */
-    __vmread(HOST_CR4, &host_cr4);
-    if ( host_cr4 != read_cr4() )
+    if ( vmread_unsafe(HOST_CR4) != read_cr4() )
         __vmwrite(HOST_CR4, read_cr4());
 
     reset_stack_and_jump(vmx_asm_do_vmentry);
 }
 
-static inline unsigned long vmr(unsigned long field)
+static inline unsigned long vmread(unsigned long field)
 {
-    unsigned long val;
+    unsigned long value = 0;
 
-    return vmread_safe(field, &val) ? 0 : val;
+    asm goto ( "vmread %[field], %[value]\n\t"
+               "jmp %l[out]"
+               :
+               : [field] "r" (field), [value] "m" (value)
+               :
+               : out );
+out:
+    return value;
 }
 
-#define vmr16(fld) ({             \
+#define vmread16(fld) ({          \
     BUILD_BUG_ON((fld) & 0x6001); \
-    (uint16_t)vmr(fld);           \
+    (uint16_t)vmread(fld);        \
 })
 
-#define vmr32(fld) ({                         \
+#define vmread32(fld) ({                      \
     BUILD_BUG_ON(((fld) & 0x6001) != 0x4000); \
-    (uint32_t)vmr(fld);                       \
+    (uint32_t)vmread(fld);                    \
 })
 
 static void vmx_dump_sel(const char *name, uint32_t selector)
 {
     uint32_t sel, attr, limit;
     uint64_t base;
-    sel = vmr(selector);
-    attr = vmr(selector + (GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR));
-    limit = vmr(selector + (GUEST_ES_LIMIT - GUEST_ES_SELECTOR));
-    base = vmr(selector + (GUEST_ES_BASE - GUEST_ES_SELECTOR));
+    sel = vmread(selector);
+    attr = vmread(selector + (GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR));
+    limit = vmread(selector + (GUEST_ES_LIMIT - GUEST_ES_SELECTOR));
+    base = vmread(selector + (GUEST_ES_BASE - GUEST_ES_SELECTOR));
     printk("%s: %04x %05x %08x %016"PRIx64"\n", name, sel, attr, limit, base);
 }
 
@@ -1996,8 +2000,8 @@ static void vmx_dump_sel2(const char *name, uint32_t lim)
 {
     uint32_t limit;
     uint64_t base;
-    limit = vmr(lim);
-    base = vmr(lim + (GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
+    limit = vmread(lim);
+    base = vmread(lim + (GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
     printk("%s:            %08x %016"PRIx64"\n", name, limit, base);
 }
 
@@ -2014,9 +2018,9 @@ void vmcs_dump_vcpu(struct vcpu *v)
 
     vmx_vmcs_enter(v);
 
-    vmentry_ctl = vmr32(VM_ENTRY_CONTROLS),
-    vmexit_ctl = vmr32(VM_EXIT_CONTROLS);
-    cr4 = vmr(GUEST_CR4);
+    vmentry_ctl = vmread32(VM_ENTRY_CONTROLS),
+    vmexit_ctl = vmread32(VM_EXIT_CONTROLS);
+    cr4 = vmread(GUEST_CR4);
 
     /*
      * The guests EFER setting comes from the GUEST_EFER VMCS field whenever
@@ -2025,34 +2029,34 @@ void vmcs_dump_vcpu(struct vcpu *v)
      * setting.
      */
     if ( cpu_has_vmx_efer )
-        efer = vmr(GUEST_EFER);
+        efer = vmread(GUEST_EFER);
     else if ( vmx_read_guest_loadonly_msr(v, MSR_EFER, &efer) )
         efer = read_efer();
 
     printk("*** Guest State ***\n");
     printk("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
-           vmr(GUEST_CR0), vmr(CR0_READ_SHADOW), vmr(CR0_GUEST_HOST_MASK));
+           vmread(GUEST_CR0), vmread(CR0_READ_SHADOW), 
vmread(CR0_GUEST_HOST_MASK));
     printk("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
-           cr4, vmr(CR4_READ_SHADOW), vmr(CR4_GUEST_HOST_MASK));
-    printk("CR3 = 0x%016lx\n", vmr(GUEST_CR3));
+           cr4, vmread(CR4_READ_SHADOW), vmread(CR4_GUEST_HOST_MASK));
+    printk("CR3 = 0x%016lx\n", vmread(GUEST_CR3));
     if ( (v->arch.hvm.vmx.secondary_exec_control &
           SECONDARY_EXEC_ENABLE_EPT) &&
          (cr4 & X86_CR4_PAE) && !(vmentry_ctl & VM_ENTRY_IA32E_MODE) )
     {
         printk("PDPTE0 = 0x%016lx  PDPTE1 = 0x%016lx\n",
-               vmr(GUEST_PDPTE(0)), vmr(GUEST_PDPTE(1)));
+               vmread(GUEST_PDPTE(0)), vmread(GUEST_PDPTE(1)));
         printk("PDPTE2 = 0x%016lx  PDPTE3 = 0x%016lx\n",
-               vmr(GUEST_PDPTE(2)), vmr(GUEST_PDPTE(3)));
+               vmread(GUEST_PDPTE(2)), vmread(GUEST_PDPTE(3)));
     }
     printk("RSP = 0x%016lx (0x%016lx)  RIP = 0x%016lx (0x%016lx)\n",
-           vmr(GUEST_RSP), regs->rsp,
-           vmr(GUEST_RIP), regs->rip);
+           vmread(GUEST_RSP), regs->rsp,
+           vmread(GUEST_RIP), regs->rip);
     printk("RFLAGS=0x%08lx (0x%08lx)  DR7 = 0x%016lx\n",
-           vmr(GUEST_RFLAGS), regs->rflags,
-           vmr(GUEST_DR7));
+           vmread(GUEST_RFLAGS), regs->rflags,
+           vmread(GUEST_DR7));
     printk("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
-           vmr(GUEST_SYSENTER_ESP),
-           vmr32(GUEST_SYSENTER_CS), vmr(GUEST_SYSENTER_EIP));
+           vmread(GUEST_SYSENTER_ESP),
+           vmread32(GUEST_SYSENTER_CS), vmread(GUEST_SYSENTER_EIP));
     printk("       sel  attr  limit   base\n");
     vmx_dump_sel("  CS", GUEST_CS_SELECTOR);
     vmx_dump_sel("  DS", GUEST_DS_SELECTOR);
@@ -2065,95 +2069,95 @@ void vmcs_dump_vcpu(struct vcpu *v)
     vmx_dump_sel2("IDTR", GUEST_IDTR_LIMIT);
     vmx_dump_sel("  TR", GUEST_TR_SELECTOR);
     printk("EFER(%s) = 0x%016lx  PAT = 0x%016lx\n",
-           cpu_has_vmx_efer ? "VMCS" : "MSR LL", efer, vmr(GUEST_PAT));
+           cpu_has_vmx_efer ? "VMCS" : "MSR LL", efer, vmread(GUEST_PAT));
     printk("PreemptionTimer = 0x%08x  SM Base = 0x%08x\n",
-           vmr32(GUEST_PREEMPTION_TIMER), vmr32(GUEST_SMBASE));
+           vmread32(GUEST_PREEMPTION_TIMER), vmread32(GUEST_SMBASE));
     printk("DebugCtl = 0x%016lx  DebugExceptions = 0x%016lx\n",
-           vmr(GUEST_IA32_DEBUGCTL), vmr(GUEST_PENDING_DBG_EXCEPTIONS));
+           vmread(GUEST_IA32_DEBUGCTL), vmread(GUEST_PENDING_DBG_EXCEPTIONS));
     if ( vmentry_ctl & (VM_ENTRY_LOAD_PERF_GLOBAL_CTRL | 
VM_ENTRY_LOAD_BNDCFGS) )
         printk("PerfGlobCtl = 0x%016lx  BndCfgS = 0x%016lx\n",
-               vmr(GUEST_PERF_GLOBAL_CTRL), vmr(GUEST_BNDCFGS));
+               vmread(GUEST_PERF_GLOBAL_CTRL), vmread(GUEST_BNDCFGS));
     printk("Interruptibility = %08x  ActivityState = %08x\n",
-           vmr32(GUEST_INTERRUPTIBILITY_INFO), vmr32(GUEST_ACTIVITY_STATE));
+           vmread32(GUEST_INTERRUPTIBILITY_INFO), 
vmread32(GUEST_ACTIVITY_STATE));
     if ( v->arch.hvm.vmx.secondary_exec_control &
          SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY )
-        printk("InterruptStatus = %04x\n", vmr16(GUEST_INTR_STATUS));
+        printk("InterruptStatus = %04x\n", vmread16(GUEST_INTR_STATUS));
     if ( cpu_has_vmx_virt_spec_ctrl )
         printk("SPEC_CTRL mask = 0x%016lx  shadow = 0x%016lx\n",
-               vmr(SPEC_CTRL_MASK), vmr(SPEC_CTRL_SHADOW));
+               vmread(SPEC_CTRL_MASK), vmread(SPEC_CTRL_SHADOW));
 
     printk("*** Host State ***\n");
     printk("RIP = 0x%016lx (%ps)  RSP = 0x%016lx\n",
-           vmr(HOST_RIP), (void *)vmr(HOST_RIP), vmr(HOST_RSP));
+           vmread(HOST_RIP), (void *)vmread(HOST_RIP), vmread(HOST_RSP));
     printk("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
-           vmr16(HOST_CS_SELECTOR), vmr16(HOST_SS_SELECTOR),
-           vmr16(HOST_DS_SELECTOR), vmr16(HOST_ES_SELECTOR),
-           vmr16(HOST_FS_SELECTOR), vmr16(HOST_GS_SELECTOR),
-           vmr16(HOST_TR_SELECTOR));
+           vmread16(HOST_CS_SELECTOR), vmread16(HOST_SS_SELECTOR),
+           vmread16(HOST_DS_SELECTOR), vmread16(HOST_ES_SELECTOR),
+           vmread16(HOST_FS_SELECTOR), vmread16(HOST_GS_SELECTOR),
+           vmread16(HOST_TR_SELECTOR));
     printk("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
-           vmr(HOST_FS_BASE), vmr(HOST_GS_BASE), vmr(HOST_TR_BASE));
+           vmread(HOST_FS_BASE), vmread(HOST_GS_BASE), vmread(HOST_TR_BASE));
     printk("GDTBase=%016lx IDTBase=%016lx\n",
-           vmr(HOST_GDTR_BASE), vmr(HOST_IDTR_BASE));
+           vmread(HOST_GDTR_BASE), vmread(HOST_IDTR_BASE));
     printk("CR0=%016lx CR3=%016lx CR4=%016lx\n",
-           vmr(HOST_CR0), vmr(HOST_CR3), vmr(HOST_CR4));
+           vmread(HOST_CR0), vmread(HOST_CR3), vmread(HOST_CR4));
     printk("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
-           vmr(HOST_SYSENTER_ESP),
-           vmr32(HOST_SYSENTER_CS), vmr(HOST_SYSENTER_EIP));
+           vmread(HOST_SYSENTER_ESP),
+           vmread32(HOST_SYSENTER_CS), vmread(HOST_SYSENTER_EIP));
     if ( vmexit_ctl & (VM_EXIT_LOAD_HOST_PAT | VM_EXIT_LOAD_HOST_EFER) )
-        printk("EFER = 0x%016lx  PAT = 0x%016lx\n", vmr(HOST_EFER), 
vmr(HOST_PAT));
+        printk("EFER = 0x%016lx  PAT = 0x%016lx\n", vmread(HOST_EFER), 
vmread(HOST_PAT));
     if ( vmexit_ctl & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
         printk("PerfGlobCtl = 0x%016lx\n",
-               vmr(HOST_PERF_GLOBAL_CTRL));
+               vmread(HOST_PERF_GLOBAL_CTRL));
 
     printk("*** Control State ***\n");
     printk("PinBased=%08x CPUBased=%08x\n",
-           vmr32(PIN_BASED_VM_EXEC_CONTROL),
-           vmr32(CPU_BASED_VM_EXEC_CONTROL));
+           vmread32(PIN_BASED_VM_EXEC_CONTROL),
+           vmread32(CPU_BASED_VM_EXEC_CONTROL));
     printk("SecondaryExec=%08x TertiaryExec=%016lx\n",
-           vmr32(SECONDARY_VM_EXEC_CONTROL),
-           vmr(TERTIARY_VM_EXEC_CONTROL));
+           vmread32(SECONDARY_VM_EXEC_CONTROL),
+           vmread(TERTIARY_VM_EXEC_CONTROL));
     printk("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
     printk("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
-           vmr32(EXCEPTION_BITMAP),
-           vmr32(PAGE_FAULT_ERROR_CODE_MASK),
-           vmr32(PAGE_FAULT_ERROR_CODE_MATCH));
+           vmread32(EXCEPTION_BITMAP),
+           vmread32(PAGE_FAULT_ERROR_CODE_MASK),
+           vmread32(PAGE_FAULT_ERROR_CODE_MATCH));
     printk("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
-           vmr32(VM_ENTRY_INTR_INFO),
-           vmr32(VM_ENTRY_EXCEPTION_ERROR_CODE),
-           vmr32(VM_ENTRY_INSTRUCTION_LEN));
+           vmread32(VM_ENTRY_INTR_INFO),
+           vmread32(VM_ENTRY_EXCEPTION_ERROR_CODE),
+           vmread32(VM_ENTRY_INSTRUCTION_LEN));
     printk("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
-           vmr32(VM_EXIT_INTR_INFO),
-           vmr32(VM_EXIT_INTR_ERROR_CODE),
-           vmr32(VM_EXIT_INSTRUCTION_LEN));
+           vmread32(VM_EXIT_INTR_INFO),
+           vmread32(VM_EXIT_INTR_ERROR_CODE),
+           vmread32(VM_EXIT_INSTRUCTION_LEN));
     printk("        reason=%08x qualification=%016lx\n",
-           vmr32(VM_EXIT_REASON), vmr(EXIT_QUALIFICATION));
+           vmread32(VM_EXIT_REASON), vmread(EXIT_QUALIFICATION));
     printk("IDTVectoring: info=%08x errcode=%08x\n",
-           vmr32(IDT_VECTORING_INFO), vmr32(IDT_VECTORING_ERROR_CODE));
+           vmread32(IDT_VECTORING_INFO), vmread32(IDT_VECTORING_ERROR_CODE));
     printk("TSC Offset = 0x%016lx  TSC Multiplier = 0x%016lx\n",
-           vmr(TSC_OFFSET), vmr(TSC_MULTIPLIER));
+           vmread(TSC_OFFSET), vmread(TSC_MULTIPLIER));
     if ( (v->arch.hvm.vmx.exec_control & CPU_BASED_TPR_SHADOW) ||
          (vmx_caps.pin_based_exec_control & PIN_BASED_POSTED_INTERRUPT) )
         printk("TPR Threshold = 0x%02x  PostedIntrVec = 0x%02x\n",
-               vmr32(TPR_THRESHOLD), vmr16(POSTED_INTR_NOTIFICATION_VECTOR));
+               vmread32(TPR_THRESHOLD), 
vmread16(POSTED_INTR_NOTIFICATION_VECTOR));
     if ( (v->arch.hvm.vmx.secondary_exec_control &
           SECONDARY_EXEC_ENABLE_EPT) )
         printk("EPT pointer = 0x%016lx  EPTP index = 0x%04x\n",
-               vmr(EPT_POINTER), vmr16(EPTP_INDEX));
-    n = vmr32(CR3_TARGET_COUNT);
+               vmread(EPT_POINTER), vmread16(EPTP_INDEX));
+    n = vmread32(CR3_TARGET_COUNT);
     for ( i = 0; i + 1 < n; i += 2 )
         printk("CR3 target%u=%016lx target%u=%016lx\n",
-               i, vmr(CR3_TARGET_VALUE(i)),
-               i + 1, vmr(CR3_TARGET_VALUE(i + 1)));
+               i, vmread(CR3_TARGET_VALUE(i)),
+               i + 1, vmread(CR3_TARGET_VALUE(i + 1)));
     if ( i < n )
-        printk("CR3 target%u=%016lx\n", i, vmr(CR3_TARGET_VALUE(i)));
+        printk("CR3 target%u=%016lx\n", i, vmread(CR3_TARGET_VALUE(i)));
     if ( v->arch.hvm.vmx.secondary_exec_control &
          SECONDARY_EXEC_PAUSE_LOOP_EXITING )
         printk("PLE Gap=%08x Window=%08x\n",
-               vmr32(PLE_GAP), vmr32(PLE_WINDOW));
+               vmread32(PLE_GAP), vmread32(PLE_WINDOW));
     if ( v->arch.hvm.vmx.secondary_exec_control &
          (SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) )
         printk("Virtual processor ID = 0x%04x VMfunc controls = %016lx\n",
-               vmr16(VIRTUAL_PROCESSOR_ID), vmr(VM_FUNCTION_CONTROL));
+               vmread16(VIRTUAL_PROCESSOR_ID), vmread(VM_FUNCTION_CONTROL));
 
     vmx_vmcs_exit(v);
 }
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 4883bd823d..8a3f5a28ad 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -902,7 +902,7 @@ int cf_check vmx_guest_x86_mode(struct vcpu *v)
         return X86_MODE_REAL;
     if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
         return X86_MODE_VM86;
-    __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
+    cs_ar_bytes = vmread_unsafe(GUEST_CS_AR_BYTES);
     if ( hvm_long_mode_active(v) &&
          likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
         return X86_MODE_64BIT;
@@ -926,7 +926,7 @@ static void vmx_save_dr(struct vcpu *v)
     v->arch.dr[3] = read_debugreg(3);
     v->arch.dr6   = read_debugreg(6);
     /* DR7 must be saved as it is used by vmx_restore_dr(). */
-    __vmread(GUEST_DR7, &v->arch.dr7);
+    v->arch.dr7 = vmread_unsafe(GUEST_DR7);
 }
 
 static void __restore_debug_registers(struct vcpu *v)
@@ -952,7 +952,7 @@ static void __restore_debug_registers(struct vcpu *v)
  */
 static void vmx_restore_dr(struct vcpu *v)
 {
-    /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
+    /* NB. vmread_unsafe() is not usable here, so we cannot read from the 
VMCS. */
     if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
         __restore_debug_registers(v);
 }
@@ -963,17 +963,17 @@ static void vmx_vmcs_save(struct vcpu *v, struct 
hvm_hw_cpu *c)
 
     vmx_vmcs_enter(v);
 
-    __vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
-    __vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
-    __vmread(GUEST_SYSENTER_EIP, &c->sysenter_eip);
+    c->sysenter_cs = vmread_unsafe(GUEST_SYSENTER_CS);
+    c->sysenter_esp = vmread_unsafe(GUEST_SYSENTER_ESP);
+    c->sysenter_eip = vmread_unsafe(GUEST_SYSENTER_EIP);
 
-    __vmread(VM_ENTRY_INTR_INFO, &ev);
+    ev = vmread_unsafe(VM_ENTRY_INTR_INFO);
     if ( (ev & INTR_INFO_VALID_MASK) &&
          hvm_event_needs_reinjection(MASK_EXTR(ev, INTR_INFO_INTR_TYPE_MASK),
                                      ev & INTR_INFO_VECTOR_MASK) )
     {
         c->pending_event = ev;
-        __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE, &ev);
+        ev = vmread_unsafe(VM_ENTRY_EXCEPTION_ERROR_CODE);
         c->error_code = ev;
     }
 
@@ -1197,11 +1197,7 @@ static void cf_check vmx_ctxt_switch_to(struct vcpu *v)
 
 unsigned int vmx_get_cpl(void)
 {
-    unsigned long attr;
-
-    __vmread(GUEST_SS_AR_BYTES, &attr);
-
-    return MASK_EXTR(attr, X86_SEG_AR_DPL);
+    return MASK_EXTR(vmread_unsafe(GUEST_SS_AR_BYTES), X86_SEG_AR_DPL);
 }
 
 static unsigned int cf_check _vmx_get_cpl(struct vcpu *v)
@@ -1271,14 +1267,14 @@ static void cf_check vmx_get_segment_register(
         fallthrough;
 
     case x86_seg_es ... x86_seg_gs:
-        __vmread(GUEST_SEG_SELECTOR(tmp_seg), &sel);
-        __vmread(GUEST_SEG_AR_BYTES(tmp_seg), &attr);
+        sel = vmread_unsafe(GUEST_SEG_SELECTOR(tmp_seg));
+        attr = vmread_unsafe(GUEST_SEG_AR_BYTES(tmp_seg));
         fallthrough;
 
     case x86_seg_gdtr:
     case x86_seg_idtr:
-        __vmread(GUEST_SEG_LIMIT(tmp_seg),    &limit);
-        __vmread(GUEST_SEG_BASE(tmp_seg),     &reg->base);
+        limit = vmread_unsafe(GUEST_SEG_LIMIT(tmp_seg));
+        reg->base = vmread_unsafe(GUEST_SEG_BASE(tmp_seg));
         break;
 
     default:
@@ -1436,7 +1432,7 @@ static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 
*gpat)
         return 0;
 
     vmx_vmcs_enter(v);
-    __vmread(GUEST_PAT, gpat);
+    *gpat = vmread_unsafe(GUEST_PAT);
     vmx_vmcs_exit(v);
     return 1;
 }
@@ -1555,11 +1551,7 @@ static void cf_check vmx_init_hypercall_page(void *p)
 
 static unsigned int cf_check vmx_get_interrupt_shadow(struct vcpu *v)
 {
-    unsigned long intr_shadow;
-
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
-
-    return intr_shadow;
+    return vmread_unsafe(GUEST_INTERRUPTIBILITY_INFO);
 }
 
 static void cf_check vmx_set_interrupt_shadow(
@@ -1573,12 +1565,12 @@ static void cf_check vmx_get_nonreg_state(struct vcpu 
*v,
 {
     vmx_vmcs_enter(v);
 
-    __vmread(GUEST_ACTIVITY_STATE, &nrs->vmx.activity_state);
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &nrs->vmx.interruptibility_info);
-    __vmread(GUEST_PENDING_DBG_EXCEPTIONS, &nrs->vmx.pending_dbg);
+    nrs->vmx.activity_state = vmread_unsafe(GUEST_ACTIVITY_STATE);
+    nrs->vmx.interruptibility_info = 
vmread_unsafe(GUEST_INTERRUPTIBILITY_INFO);
+    nrs->vmx.pending_dbg = vmread_unsafe(GUEST_PENDING_DBG_EXCEPTIONS);
 
     if ( cpu_has_vmx_virtual_intr_delivery )
-        __vmread(GUEST_INTR_STATUS, &nrs->vmx.interrupt_status);
+        nrs->vmx.interrupt_status = vmread_unsafe(GUEST_INTR_STATUS);
 
     vmx_vmcs_exit(v);
 }
@@ -1896,7 +1888,7 @@ static void cf_check vmx_update_guest_efer(struct vcpu *v)
      * The intended guest running mode is derived from VM_ENTRY_IA32E_MODE,
      * which (architecturally) is the guest's LMA setting.
      */
-    __vmread(VM_ENTRY_CONTROLS, &entry_ctls);
+    entry_ctls = vmread_unsafe(VM_ENTRY_CONTROLS);
 
     entry_ctls &= ~VM_ENTRY_IA32E_MODE;
     if ( guest_efer & EFER_LMA )
@@ -2063,9 +2055,10 @@ static void cf_check vmx_inject_event(const struct 
x86_event *event)
         {
             unsigned long val;
 
-            __vmread(GUEST_DR7, &val);
+            val = vmread_unsafe(GUEST_DR7);
             __vmwrite(GUEST_DR7, val & ~DR_GENERAL_DETECT);
-            __vmread(GUEST_IA32_DEBUGCTL, &val);
+
+            val = vmread_unsafe(GUEST_IA32_DEBUGCTL);
             __vmwrite(GUEST_IA32_DEBUGCTL, val & ~IA32_DEBUGCTLMSR_LBR);
         }
         if ( cpu_has_monitor_trap_flag )
@@ -2089,7 +2082,7 @@ static void cf_check vmx_inject_event(const struct 
x86_event *event)
     if ( nestedhvm_vcpu_in_guestmode(curr) )
         intr_info = vcpu_2_nvmx(curr).intr.intr_info;
     else
-        __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+        intr_info = vmread_unsafe(VM_ENTRY_INTR_INFO);
 
     if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
          (MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK) == X86_ET_HW_EXC) )
@@ -2125,12 +2118,9 @@ static void cf_check vmx_inject_event(const struct 
x86_event *event)
 
 static bool cf_check vmx_event_pending(const struct vcpu *v)
 {
-    unsigned long intr_info;
-
     ASSERT(v == current);
-    __vmread(VM_ENTRY_INTR_INFO, &intr_info);
 
-    return intr_info & INTR_INFO_VALID_MASK;
+    return vmread_unsafe(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK;
 }
 
 static void cf_check vmx_set_info_guest(struct vcpu *v)
@@ -2149,7 +2139,7 @@ static void cf_check vmx_set_info_guest(struct vcpu *v)
      * to set the GUEST_PENDING_DBG_EXCEPTIONS.BS here incurs
      * immediately vmexit and hence make no progress.
      */
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
+    intr_shadow = vmread_unsafe(GUEST_INTERRUPTIBILITY_INFO);
     if ( v->domain->debugger_attached &&
          (v->arch.user_regs.eflags & X86_EFLAGS_TF) &&
          (intr_shadow & VMX_INTR_SHADOW_STI) )
@@ -2178,7 +2168,7 @@ static u8 set_svi(int isr)
     if ( isr < 0 )
         isr = 0;
 
-    __vmread(GUEST_INTR_STATUS, &status);
+    status = vmread_unsafe(GUEST_INTR_STATUS);
     old = status >> VMX_GUEST_INTR_STATUS_SVI_OFFSET;
     if ( isr != old )
     {
@@ -2518,9 +2508,9 @@ static bool cf_check vmx_vcpu_emulate_ve(struct vcpu *v)
     veinfo->eptp_index = vcpu_altp2m(v).p2midx;
 
     vmx_vmcs_enter(v);
-    __vmread(EXIT_QUALIFICATION, &veinfo->exit_qualification);
-    __vmread(GUEST_LINEAR_ADDRESS, &veinfo->gla);
-    __vmread(GUEST_PHYSICAL_ADDRESS, &veinfo->gpa);
+    veinfo->exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
+    veinfo->gla = vmread_unsafe(GUEST_LINEAR_ADDRESS);
+    veinfo->gpa = vmread_unsafe(GUEST_PHYSICAL_ADDRESS);
     vmx_vmcs_exit(v);
 
     hvm_inject_hw_exception(X86_EXC_VE,
@@ -2541,8 +2531,8 @@ static bool cf_check vmx_get_pending_event(
     unsigned long intr_info, error_code;
 
     vmx_vmcs_enter(v);
-    __vmread(VM_ENTRY_INTR_INFO, &intr_info);
-    __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE, &error_code);
+    intr_info = vmread_unsafe(VM_ENTRY_INTR_INFO);
+    error_code = vmread_unsafe(VM_ENTRY_EXCEPTION_ERROR_CODE);
     vmx_vmcs_exit(v);
 
     if ( !(intr_info & INTR_INFO_VALID_MASK) )
@@ -2739,11 +2729,11 @@ static uint64_t cf_check vmx_get_reg(struct vcpu *v, 
unsigned int reg)
     {
     case MSR_SPEC_CTRL:
         ASSERT(cpu_has_vmx_virt_spec_ctrl);
-        __vmread(SPEC_CTRL_SHADOW, &val);
+        val = vmread_unsafe(SPEC_CTRL_SHADOW);
         break;
 
     case MSR_IA32_BNDCFGS:
-        __vmread(GUEST_BNDCFGS, &val);
+        val = vmread_unsafe(GUEST_BNDCFGS);
         break;
 
     default:
@@ -3161,9 +3151,8 @@ void __init vmx_fill_funcs(void)
  */
 static int get_instruction_length(void)
 {
-    unsigned long len;
+    unsigned long len = vmread_unsafe(VM_EXIT_INSTRUCTION_LEN); /* Safe: 
callers audited */
 
-    __vmread(VM_EXIT_INSTRUCTION_LEN, &len); /* Safe: callers audited */
     BUG_ON((len < 1) || (len > MAX_INST_LEN));
     return len;
 }
@@ -3176,7 +3165,7 @@ void update_guest_eip(void)
     regs->rip += get_instruction_length(); /* Safe: callers audited */
     regs->eflags &= ~X86_EFLAGS_RF;
 
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &x);
+    x = vmread_unsafe(GUEST_INTERRUPTIBILITY_INFO);
     if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
     {
         x &= ~(VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS);
@@ -3424,21 +3413,21 @@ static int cf_check vmx_msr_read_intercept(
     switch ( msr )
     {
     case MSR_IA32_SYSENTER_CS:
-        __vmread(GUEST_SYSENTER_CS, msr_content);
+        *msr_content = vmread_unsafe(GUEST_SYSENTER_CS);
         break;
     case MSR_IA32_SYSENTER_ESP:
-        __vmread(GUEST_SYSENTER_ESP, msr_content);
+        *msr_content = vmread_unsafe(GUEST_SYSENTER_ESP);
         break;
     case MSR_IA32_SYSENTER_EIP:
-        __vmread(GUEST_SYSENTER_EIP, msr_content);
+        *msr_content = vmread_unsafe(GUEST_SYSENTER_EIP);
         break;
 
     case MSR_FS_BASE:
-        __vmread(GUEST_FS_BASE, msr_content);
+        *msr_content = vmread_unsafe(GUEST_FS_BASE);
         break;
 
     case MSR_GS_BASE:
-        __vmread(GUEST_GS_BASE, msr_content);
+        *msr_content = vmread_unsafe(GUEST_GS_BASE);
         break;
 
     case MSR_SHADOW_GS_BASE:
@@ -3462,7 +3451,7 @@ static int cf_check vmx_msr_read_intercept(
         break;
 
     case MSR_IA32_DEBUGCTLMSR:
-        __vmread(GUEST_IA32_DEBUGCTL, msr_content);
+        *msr_content = vmread_unsafe(GUEST_IA32_DEBUGCTL);
         break;
 
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC:
@@ -3828,7 +3817,7 @@ static void vmx_do_extint(struct cpu_user_regs *regs)
 {
     unsigned long vector;
 
-    __vmread(VM_EXIT_INTR_INFO, &vector);
+    vector = vmread_unsafe(VM_EXIT_INTR_INFO);
     BUG_ON(!(vector & INTR_INFO_VALID_MASK));
 
     vector &= INTR_INFO_VECTOR_MASK;
@@ -3893,7 +3882,7 @@ static void ept_handle_violation(ept_qual_t q, paddr_t 
gpa)
 
     if ( q.gla_valid )
     {
-        __vmread(GUEST_LINEAR_ADDRESS, &gla);
+        gla = vmread_unsafe(GUEST_LINEAR_ADDRESS);
         npfec.gla_valid = 1;
         if( q.gla_fault )
             npfec.kind = npfec_kind_with_gla;
@@ -3944,7 +3933,7 @@ static void vmx_failed_vmentry(unsigned int exit_reason,
     struct vcpu *curr = current;
 
     printk("%pv vmentry failure (reason %#x): ", curr, exit_reason);
-    __vmread(EXIT_QUALIFICATION, &exit_qualification);
+    exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
     switch ( failed_vmentry_reason )
     {
     case EXIT_REASON_INVALID_GUEST_STATE:
@@ -4001,13 +3990,12 @@ void vmx_enter_realmode(struct cpu_user_regs *regs)
 
 static int vmx_handle_eoi_write(void)
 {
-    unsigned long exit_qualification;
+    unsigned long exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
 
     /*
      * 1. Must be a linear access data write.
      * 2. Data write must be to the EOI register.
      */
-    __vmread(EXIT_QUALIFICATION, &exit_qualification);
     if ( (((exit_qualification >> 12) & 0xf) == 1) &&
          ((exit_qualification & 0xfff) == APIC_EOI) )
     {
@@ -4033,21 +4021,14 @@ static void vmx_propagate_intr(unsigned long intr)
         .vector = MASK_EXTR(intr, INTR_INFO_VECTOR_MASK),
         .type = MASK_EXTR(intr, INTR_INFO_INTR_TYPE_MASK),
     };
-    unsigned long tmp;
 
     if ( intr & INTR_INFO_DELIVER_CODE_MASK )
-    {
-        __vmread(VM_EXIT_INTR_ERROR_CODE, &tmp);
-        event.error_code = tmp;
-    }
+        event.error_code = vmread_unsafe(VM_EXIT_INTR_ERROR_CODE);
     else
         event.error_code = X86_EVENT_NO_EC;
 
     if ( event.type >= X86_ET_SW_INT )
-    {
-        __vmread(VM_EXIT_INSTRUCTION_LEN, &tmp);
-        event.insn_len = tmp;
-    }
+        event.insn_len = vmread_unsafe(VM_EXIT_INSTRUCTION_LEN);
     else
         event.insn_len = 0;
 
@@ -4071,7 +4052,7 @@ static void vmx_idtv_reinject(unsigned long idtv_info)
             {
                 unsigned long ec;
 
-                __vmread(IDT_VECTORING_ERROR_CODE, &ec);
+                ec = vmread_unsafe(IDT_VECTORING_ERROR_CODE);
                 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, ec);
             }
         }
@@ -4086,7 +4067,7 @@ static void vmx_idtv_reinject(unsigned long idtv_info)
         {
             unsigned long intr_info;
 
-            __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_info);
+            intr_info = vmread_unsafe(GUEST_INTERRUPTIBILITY_INFO);
             __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
                       intr_info & ~VMX_INTR_SHADOW_NMI);
         }
@@ -4111,8 +4092,8 @@ static void vmx_handle_descriptor_access(uint32_t 
exit_reason)
     uint64_t exit_qualification;
     unsigned int desc;
 
-    __vmread(EXIT_QUALIFICATION, &exit_qualification);
-    __vmread(VMX_INSTRUCTION_INFO, &instr_info);
+    exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
+    instr_info = vmread_unsafe(VMX_INSTRUCTION_INFO);
 
     if ( exit_reason == EXIT_REASON_ACCESS_GDTR_OR_IDTR )
     {
@@ -4137,7 +4118,7 @@ static int vmx_handle_apic_write(void)
     unsigned long exit_qualification;
 
     ASSERT(cpu_has_vmx_apic_reg_virt);
-    __vmread(EXIT_QUALIFICATION, &exit_qualification);
+    exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
 
     return vlapic_apicv_write(current, exit_qualification & 0xfff);
 }
@@ -4146,7 +4127,7 @@ static void undo_nmis_unblocked_by_iret(void)
 {
     unsigned long guest_info;
 
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &guest_info);
+    guest_info = vmread_unsafe(GUEST_INTERRUPTIBILITY_INFO);
     __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
               guest_info | VMX_INTR_SHADOW_NMI);
 }
@@ -4159,12 +4140,12 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
     struct vcpu *v = current;
     struct domain *currd = v->domain;
 
-    __vmread(GUEST_RIP,    &regs->rip);
-    __vmread(GUEST_RSP,    &regs->rsp);
-    __vmread(GUEST_RFLAGS, &regs->rflags);
+    regs->rip = vmread_unsafe(GUEST_RIP);
+    regs->rsp = vmread_unsafe(GUEST_RSP);
+    regs->rflags = vmread_unsafe(GUEST_RFLAGS);
 
     if ( hvm_long_mode_active(v) )
-        __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
+        cs_ar_bytes = vmread_unsafe(GUEST_CS_AR_BYTES);
 
     hvm_sanitize_regs_fields(regs, !(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE));
 
@@ -4174,17 +4155,17 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
          * Xen allows the guest to modify some CR4 bits directly, update cached
          * values to match.
          */
-        __vmread(GUEST_CR4, &v->arch.hvm.hw_cr[4]);
+        v->arch.hvm.hw_cr[4] = vmread_unsafe(GUEST_CR4);
         v->arch.hvm.guest_cr[4] &= v->arch.hvm.vmx.cr4_host_mask;
         v->arch.hvm.guest_cr[4] |= (v->arch.hvm.hw_cr[4] &
                                     ~v->arch.hvm.vmx.cr4_host_mask);
 
-        __vmread(GUEST_CR3, &v->arch.hvm.hw_cr[3]);
+        v->arch.hvm.hw_cr[3] = vmread_unsafe(GUEST_CR3);
         if ( vmx_unrestricted_guest(v) || hvm_paging_enabled(v) )
             v->arch.hvm.guest_cr[3] = v->arch.hvm.hw_cr[3];
     }
 
-    __vmread(VM_EXIT_REASON, &exit_reason);
+    exit_reason = vmread_unsafe(VM_EXIT_REASON);
 
     if ( hvm_long_mode_active(v) )
         TRACE_TIME(TRC_HVM_VMX_EXIT64, exit_reason, regs->rip, regs->rip >> 
32);
@@ -4200,7 +4181,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         vmx_do_extint(regs);
         break;
     case EXIT_REASON_EXCEPTION_NMI:
-        __vmread(VM_EXIT_INTR_INFO, &intr_info);
+        intr_info = vmread_unsafe(VM_EXIT_INTR_INFO);
         BUG_ON(!(intr_info & INTR_INFO_VALID_MASK));
         vector = intr_info & INTR_INFO_VECTOR_MASK;
         if ( vector == X86_EXC_MC )
@@ -4237,12 +4218,12 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
 
         if ( v->arch.hvm.vmx.secondary_exec_control &
             SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS )
-            __vmread(EPTP_INDEX, &idx);
+        {
+            idx = vmread_unsafe(EPTP_INDEX);
+        }
         else
         {
-            unsigned long eptp;
-
-            __vmread(EPT_POINTER, &eptp);
+            unsigned long eptp = vmread_unsafe(EPT_POINTER);
 
             if ( (idx = p2m_find_altp2m_by_eptp(v->domain, eptp)) ==
                  INVALID_ALTP2M )
@@ -4259,10 +4240,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
 
     if ( unlikely(currd->arch.monitor.vmexit_enabled) )
     {
-        int rc;
-
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
-        rc = hvm_monitor_vmexit(exit_reason, exit_qualification);
+        int rc = hvm_monitor_vmexit(exit_reason, 
vmread_unsafe(EXIT_QUALIFICATION));
         if ( rc < 0 )
             goto exit_and_crash;
         if ( rc )
@@ -4327,7 +4305,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
 
     hvm_maybe_deassert_evtchn_irq();
 
-    __vmread(IDT_VECTORING_INFO, &idtv_info);
+    idtv_info = vmread_unsafe(IDT_VECTORING_INFO);
     if ( exit_reason != EXIT_REASON_TASK_SWITCH )
         vmx_idtv_reinject(idtv_info);
 
@@ -4362,7 +4340,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
              * Updates DR6 where debugger can peek (See 3B 23.2.1,
              * Table 23-1, "Exit Qualification for Debug Exceptions").
              */
-            __vmread(EXIT_QUALIFICATION, &exit_qualification);
+            exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
             TRACE(TRC_HVM_TRAP_DEBUG, exit_qualification);
             __restore_debug_registers(v);
             write_debugreg(6, exit_qualification | DR_STATUS_RESERVED_ONE);
@@ -4388,29 +4366,25 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
              */
             if ( unlikely(regs->eflags & X86_EFLAGS_TF) )
             {
-                unsigned long int_info;
-
-                __vmread(GUEST_INTERRUPTIBILITY_INFO, &int_info);
+                unsigned long int_info = 
vmread_unsafe(GUEST_INTERRUPTIBILITY_INFO);
 
                 if ( int_info & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) 
)
                 {
-                    unsigned long pending_dbg;
+                    unsigned long exc = 
vmread_unsafe(GUEST_PENDING_DBG_EXCEPTIONS);
 
-                    __vmread(GUEST_PENDING_DBG_EXCEPTIONS, &pending_dbg);
-                    __vmwrite(GUEST_PENDING_DBG_EXCEPTIONS,
-                              pending_dbg | DR_STEP);
+                    __vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, exc | DR_STEP);
                 }
             }
 
             if ( !v->domain->debugger_attached )
             {
                 unsigned long insn_len = 0;
-                int rc;
                 unsigned long trap_type = MASK_EXTR(intr_info,
                                                     INTR_INFO_INTR_TYPE_MASK);
+                int rc;
 
                 if ( trap_type >= X86_ET_SW_INT )
-                    __vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len);
+                    insn_len = vmread_unsafe(VM_EXIT_INSTRUCTION_LEN);
 
                 rc = hvm_monitor_debug(regs->rip,
                                        HVM_MONITOR_DEBUG_EXCEPTION,
@@ -4428,10 +4402,9 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
             TRACE(TRC_HVM_TRAP, vector);
             if ( !v->domain->debugger_attached )
             {
-                unsigned long insn_len;
+                unsigned long insn_len = 
vmread_unsafe(VM_EXIT_INSTRUCTION_LEN);
                 int rc;
 
-                __vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len);
                 rc = hvm_monitor_debug(regs->rip,
                                        HVM_MONITOR_SOFTWARE_BREAKPOINT,
                                        X86_ET_SW_EXC,
@@ -4454,8 +4427,8 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
             vmx_fpu_dirty_intercept();
             break;
         case X86_EXC_PF:
-            __vmread(EXIT_QUALIFICATION, &exit_qualification);
-            __vmread(VM_EXIT_INTR_ERROR_CODE, &ecode);
+            exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
+            ecode = vmread_unsafe(VM_EXIT_INTR_ERROR_CODE);
             regs->error_code = ecode;
 
             HVM_DBG_LOG(DBG_LEVEL_VMMU,
@@ -4522,7 +4495,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         };
         unsigned int inst_len, source;
 
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
         source = (exit_qualification >> 30) & 3;
         /* Vectored event should fill in interrupt information. */
         WARN_ON((source == 3) && !(idtv_info & INTR_INFO_VALID_MASK));
@@ -4536,9 +4509,9 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
                      > 3)) /* IntrType > 3? */
             ? get_instruction_length() /* Safe: SDM 3B 23.2.4 */ : 0;
         if ( (source == 3) && (idtv_info & INTR_INFO_DELIVER_CODE_MASK) )
-            __vmread(IDT_VECTORING_ERROR_CODE, &ecode);
+            ecode = vmread_unsafe(IDT_VECTORING_ERROR_CODE);
         else
-             ecode = -1;
+            ecode = -1;
 
         hvm_task_switch(exit_qualification, reasons[source], ecode, inst_len,
                         0 /* EFLAGS.RF already updated. */);
@@ -4565,7 +4538,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         break;
     case EXIT_REASON_INVLPG:
         update_guest_eip(); /* Safe: INVLPG */
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
         vmx_invlpg_intercept(exit_qualification);
         break;
     case EXIT_REASON_RDTSCP:
@@ -4591,13 +4564,13 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
 
     case EXIT_REASON_CR_ACCESS:
     {
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
         if ( vmx_cr_access(exit_qualification) == X86EMUL_OKAY )
             update_guest_eip(); /* Safe: MOV Cn, LMSW, CLTS */
         break;
     }
     case EXIT_REASON_DR_ACCESS:
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
         vmx_dr_access(exit_qualification, regs);
         break;
     case EXIT_REASON_MSR_READ:
@@ -4671,7 +4644,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         break;
 
     case EXIT_REASON_EOI_INDUCED:
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
 
         ASSERT(cpu_has_vmx_virtual_intr_delivery);
 
@@ -4695,7 +4668,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         unsigned int bytes;
         int rc;
 
-        __vmread(EXIT_QUALIFICATION, &io_qual.raw);
+        io_qual.raw = vmread_unsafe(EXIT_QUALIFICATION);
         bytes = io_qual.size + 1;
 
         rc = hvm_monitor_io(io_qual.port, bytes, io_qual.in, io_qual.str);
@@ -4728,10 +4701,9 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
 
     case EXIT_REASON_EPT_VIOLATION:
     {
-        paddr_t gpa;
+        paddr_t gpa = vmread_unsafe(GUEST_PHYSICAL_ADDRESS);
 
-        __vmread(GUEST_PHYSICAL_ADDRESS, &gpa);
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
 
         if ( unlikely(exit_qualification & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&
              !(idtv_info & INTR_INFO_VALID_MASK) )
@@ -4743,9 +4715,8 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
 
     case EXIT_REASON_EPT_MISCONFIG:
     {
-        paddr_t gpa;
+        paddr_t gpa = vmread_unsafe(GUEST_PHYSICAL_ADDRESS);
 
-        __vmread(GUEST_PHYSICAL_ADDRESS, &gpa);
         if ( !ept_handle_misconfig(gpa) )
             goto exit_and_crash;
         break;
@@ -4781,7 +4752,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         break;
 
     case EXIT_REASON_PML_FULL:
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
 
         if ( unlikely(exit_qualification & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&
              !(idtv_info & INTR_INFO_VALID_MASK) )
@@ -4804,7 +4775,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs 
*regs)
         break;
 
     case EXIT_REASON_NOTIFY:
-        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        exit_qualification = vmread_unsafe(EXIT_QUALIFICATION);
 
         if ( unlikely(exit_qualification & NOTIFY_VM_CONTEXT_INVALID) )
         {
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index ceb5e5a322..3ae65683ee 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -400,8 +400,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
     unsigned long base, index, seg_base, disp, offset;
     int scale, size;
 
-    __vmread(VMX_INSTRUCTION_INFO, &offset);
-    info.word = offset;
+    info.word = vmread_unsafe(VMX_INSTRUCTION_INFO);
 
     if ( info.fields.memreg ) {
         decode->type = VMX_INST_MEMREG_TYPE_REG;
@@ -428,7 +427,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
 
         scale = 1 << info.fields.scaling;
 
-        __vmread(EXIT_QUALIFICATION, &disp);
+        disp = vmread_unsafe(EXIT_QUALIFICATION);
 
         size = 1 << (info.fields.addr_size + 1);
 
@@ -997,7 +996,7 @@ static void vvmcs_to_shadow_bulk(struct vcpu *v, unsigned 
int n,
 
     virtual_vmcs_enter(v);
     for ( i = 0; i < n; i++ )
-        __vmread(field[i], &value[i]);
+        value[i] = vmread_unsafe(field[i]);
     virtual_vmcs_exit(v);
 
     for ( i = 0; i < n; i++ )
@@ -1012,7 +1011,7 @@ fallback:
 
 static inline void shadow_to_vvmcs(const struct vcpu *v, unsigned int field)
 {
-    unsigned long value;
+    unsigned long value = 0;
 
     if ( vmread_safe(field, &value) == 0 )
         set_vvmcs(v, field, value);
@@ -1036,7 +1035,7 @@ static void shadow_to_vvmcs_bulk(struct vcpu *v, unsigned 
int n,
     }
 
     for ( i = 0; i < n; i++ )
-        __vmread(field[i], &value[i]);
+        value[i] = vmread_unsafe(field[i]);
 
     virtual_vmcs_enter(v);
     for ( i = 0; i < n; i++ )
@@ -1405,7 +1404,7 @@ static void nvmx_update_apicv(struct vcpu *v)
     }
     else
        /* Keep previous SVI if there's any. */
-       __vmread(GUEST_INTR_STATUS, &status);
+       status = vmread_unsafe(GUEST_INTR_STATUS);
 
     rvi = vlapic_has_pending_irq(v);
     if ( rvi != -1 )
@@ -1687,7 +1686,6 @@ static int nvmx_handle_vmresume(struct cpu_user_regs 
*regs)
     bool launched;
     struct vcpu *v = current;
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
-    unsigned long intr_shadow;
     int rc;
 
     if ( !vvmcx_valid(v) )
@@ -1696,8 +1694,7 @@ static int nvmx_handle_vmresume(struct cpu_user_regs 
*regs)
         return X86EMUL_OKAY;        
     }
 
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
-    if ( intr_shadow & VMX_INTR_SHADOW_MOV_SS )
+    if ( vmread_unsafe(GUEST_INTERRUPTIBILITY_INFO) & VMX_INTR_SHADOW_MOV_SS )
     {
         vmfail_valid(regs, VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS);
         return X86EMUL_OKAY;
@@ -1723,7 +1720,6 @@ static int nvmx_handle_vmlaunch(struct cpu_user_regs 
*regs)
     bool launched;
     struct vcpu *v = current;
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
-    unsigned long intr_shadow;
     int rc;
 
     if ( !vvmcx_valid(v) )
@@ -1732,8 +1728,7 @@ static int nvmx_handle_vmlaunch(struct cpu_user_regs 
*regs)
         return X86EMUL_OKAY;
     }
 
-    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
-    if ( intr_shadow & VMX_INTR_SHADOW_MOV_SS )
+    if ( vmread_unsafe(GUEST_INTERRUPTIBILITY_INFO) & VMX_INTR_SHADOW_MOV_SS )
     {
         vmfail_valid(regs, VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS);
         return X86EMUL_OKAY;
@@ -2355,7 +2350,7 @@ int cf_check nvmx_hap_walk_L1_p2m(
 
     vmx_vmcs_enter(v);
 
-    __vmread(EXIT_QUALIFICATION, &exit_qual);
+    exit_qual = vmread_unsafe(EXIT_QUALIFICATION);
     rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn, p2m_acc,
                              &exit_qual, &exit_reason);
     switch ( rc )
@@ -2391,7 +2386,7 @@ void nvmx_idtv_handling(void)
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long idtv_info, reason;
 
-    __vmread(IDT_VECTORING_INFO, &idtv_info);
+    idtv_info = vmread_unsafe(IDT_VECTORING_INFO);
     if ( likely(!(idtv_info & INTR_INFO_VALID_MASK)) )
         return;
 
@@ -2399,7 +2394,7 @@ void nvmx_idtv_handling(void)
      * If L0 can solve the fault that causes idt vectoring, it should
      * be reinjected, otherwise, pass to L1.
      */
-    __vmread(VM_EXIT_REASON, &reason);
+    reason = vmread_unsafe(VM_EXIT_REASON);
     if ( (uint16_t)reason != EXIT_REASON_EPT_VIOLATION ?
          !(nvmx->intr.intr_info & INTR_INFO_VALID_MASK) :
          !nvcpu->nv_vmexit_pending )
@@ -2407,7 +2402,7 @@ void nvmx_idtv_handling(void)
         __vmwrite(VM_ENTRY_INTR_INFO, idtv_info & ~INTR_INFO_RESVD_BITS_MASK);
         if ( idtv_info & INTR_INFO_DELIVER_CODE_MASK )
         {
-            __vmread(IDT_VECTORING_ERROR_CODE, &reason);
+            reason = vmread_unsafe(IDT_VECTORING_ERROR_CODE);
             __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, reason);
         }
         /*
@@ -2418,7 +2413,7 @@ void nvmx_idtv_handling(void)
          * This means EXIT_INSTRUCTION_LEN is always valid here, for
          * software interrupts both injected by L1, and generated in L2.
          */
-        __vmread(VM_EXIT_INSTRUCTION_LEN, &reason);
+        reason = vmread_unsafe(VM_EXIT_INSTRUCTION_LEN);
         __vmwrite(VM_ENTRY_INSTRUCTION_LEN, reason);
    }
 }
@@ -2452,7 +2447,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
         u64 exec_bitmap;
         int vector;
 
-        __vmread(VM_EXIT_INTR_INFO, &intr_info);
+        intr_info = vmread_unsafe(VM_EXIT_INTR_INFO);
         vector = intr_info & INTR_INFO_VECTOR_MASK;
         /*
          * decided by L0 and L1 exception bitmap, if the vetor is set by
@@ -2531,7 +2526,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
             unsigned long qual;
             u16 port, size;
 
-            __vmread(EXIT_QUALIFICATION, &qual);
+            qual = vmread_unsafe(EXIT_QUALIFICATION);
             port = qual >> 16;
             size = (qual & 7) + 1;
             do {
@@ -2638,7 +2633,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
         cr_access_qual_t qual;
         u32 mask = 0;
 
-        __vmread(EXIT_QUALIFICATION, &qual.raw);
+        qual.raw = vmread_unsafe(EXIT_QUALIFICATION);
         /* also according to guest exec_control */
         ctrl = __n2_exec_control(v);
 
@@ -2680,7 +2675,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
                 {
                     u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
 
-                    __vmread(CR0_READ_SHADOW, &old_val);
+                    old_val = vmread_unsafe(CR0_READ_SHADOW);
                     changed_bits = old_val ^ val;
                     if ( changed_bits & cr0_gh_mask )
                         nvcpu->nv_vmexit_pending = 1;
@@ -2696,7 +2691,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
                 {
                     u64 cr4_gh_mask = get_vvmcs(v, CR4_GUEST_HOST_MASK);
 
-                    __vmread(CR4_READ_SHADOW, &old_val);
+                    old_val = vmread_unsafe(CR4_READ_SHADOW);
                     changed_bits = old_val ^ val;
                     if ( changed_bits & cr4_gh_mask )
                         nvcpu->nv_vmexit_pending = 1;
@@ -2732,7 +2727,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
             {
                 u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
 
-                __vmread(CR0_READ_SHADOW, &old_val);
+                old_val = vmread_unsafe(CR0_READ_SHADOW);
                 old_val &= X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS;
                 val = qual.lmsw_data &
                       (X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS);
diff --git a/xen/arch/x86/include/asm/domain.h 
b/xen/arch/x86/include/asm/domain.h
index 6b877e33a1..ffe9acd75d 100644
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -595,7 +595,7 @@ struct arch_vcpu
 
     /* Debug registers. */
     unsigned long dr[4];
-    unsigned long dr7; /* Ideally int, but __vmread() needs long. */
+    unsigned long dr7; /* Ideally int, but vmread_unsafe() needs unsigned 
long. */
     unsigned int dr6;
 
     /* other state */
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h 
b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
index 33d3d43a38..c96e9f4711 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
@@ -320,16 +320,40 @@ static always_inline void __vmpclear(u64 addr)
     BUG();
 }
 
-static always_inline void __vmread(unsigned long field, unsigned long *value)
+static always_inline unsigned long vmread_unsafe(unsigned long field)
 {
-    asm volatile ( "vmread %1, %0\n\t"
-                   /* CF==1 or ZF==1 --> BUG() */
-                   UNLIKELY_START(be, vmread)
-                   _ASM_BUGFRAME_TEXT(0)
-                   UNLIKELY_END_SECTION
-                   : "=rm" (*value)
-                   : "r" (field),
-                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) );
+    unsigned long value;
+
+    asm volatile ( "vmread %[field], %[value]\n\t"
+                   "jc 1f\n\t"
+                   "jz 1f\n\t"
+                   "jmp 2f\n\t"
+                   "1:\n\t"
+                   "    ud2\n\t"
+                   "2:"
+                   : [value] "=rm" (value)
+                   : [field] "r" (field) );
+
+    return value;
+}
+
+static inline enum vmx_insn_errno vmread_safe(unsigned long field,
+                                              unsigned long *value)
+{
+    asm goto ( "vmread %[field], %[value]\n\t"
+               "jc %l[vmfail_invalid]\n\t"
+               "jz %l[vmfail_error]"
+               :
+               : [field] "r" (field), [value] "m" (*value)
+               :
+               : vmfail_invalid, vmfail_error );
+    return VMX_INSN_SUCCEED;
+
+ vmfail_invalid:
+    return VMX_INSN_FAIL_INVALID;
+
+ vmfail_error:
+    return vmread_unsafe(VM_INSTRUCTION_ERROR);
 }
 
 static always_inline void __vmwrite(unsigned long field, unsigned long value)
@@ -346,33 +370,9 @@ static always_inline void __vmwrite(unsigned long field, 
unsigned long value)
     BUG();
 }
 
-static inline enum vmx_insn_errno vmread_safe(unsigned long field,
-                                              unsigned long *value)
-{
-    unsigned long ret = VMX_INSN_SUCCEED;
-    bool fail_invalid, fail_valid;
-
-    asm volatile ( "vmread %[field], %[value]\n\t"
-                   ASM_FLAG_OUT(, "setc %[invalid]\n\t")
-                   ASM_FLAG_OUT(, "setz %[valid]\n\t")
-                   : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
-                     ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid),
-                     [value] "=rm" (*value)
-                   : [field] "r" (field) );
-
-    if ( unlikely(fail_invalid) )
-        ret = VMX_INSN_FAIL_INVALID;
-    else if ( unlikely(fail_valid) )
-        __vmread(VM_INSTRUCTION_ERROR, &ret);
-
-    return ret;
-}
-
 static inline enum vmx_insn_errno vmwrite_safe(unsigned long field,
                                                unsigned long value)
 {
-    unsigned long ret;
-
     asm goto ( "vmwrite %[value], %[field]\n\t"
                "jc %l[vmfail_invalid]\n\t"
                "jz %l[vmfail_error]"
@@ -386,8 +386,7 @@ static inline enum vmx_insn_errno vmwrite_safe(unsigned 
long field,
     return VMX_INSN_FAIL_INVALID;
 
  vmfail_error:
-    __vmread(VM_INSTRUCTION_ERROR, &ret);
-    return ret;
+    return vmread_unsafe(VM_INSTRUCTION_ERROR);
 }
 
 static always_inline void __invept(unsigned long type, uint64_t eptp)
-- 
2.34.1



Reply via email to