With the new toolchain baseline, we can make use of asm goto() in certain places, and the VMXON invocation is one example.
This removes the logic to set up rc (including a fixup section where bactraces have no connection to the invoking function), the logic to decode it, and the default case which was dead but in a way the compiler couldn't prove previously. No functional change. Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com> --- CC: Jan Beulich <jbeul...@suse.com> CC: Roger Pau Monné <roger....@citrix.com> CC: dm...@proton.me RFC. To be rebased over Denis' general cleanup. In principle, we can split fail into fail_valid and fail_invalid, allowing us to spot the VMfail("VMXON executed in VMX root operation") case from the pseduocode. However, getting that involves a VMREAD of VM_INSTRUCTION_ERROR, and error handling in case there isn't a loaded VMCS, so I think the complexity is unwarranted in this case. Bloat-o-meter: add/remove: 0/0 grow/shrink: 1/1 up/down: 13/-32 (-19) Function old new delta _vmx_cpu_up.cold 2460 2473 +13 _vmx_cpu_up 1803 1771 -32 The if ( 0 ) isn't terribly nice, but it's the least bad option I could come up with. It does allow the structure of the switch() to remain largely intact. --- xen/arch/x86/hvm/vmx/vmcs.c | 21 ++++++++++++--------- xen/arch/x86/include/asm/hvm/vmx/vmx.h | 19 ------------------- 2 files changed, 12 insertions(+), 28 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 0ba65becec1e..98f56b636fb3 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -749,9 +749,16 @@ static int _vmx_cpu_up(bool bsp) if ( bsp && (rc = vmx_cpu_up_prepare(cpu)) != 0 ) return rc; - switch ( __vmxon(this_cpu(vmxon_region)) ) + asm goto ( "1: vmxon %[addr]\n\t" + "jbe %l[fail]\n\t" + _ASM_EXTABLE(1b, %l[fault]) + : + : [addr] "m" (this_cpu(vmxon_region)) + : + : fail, fault ); + if ( 0 ) /* asm goto error paths */ { - case -2: /* #UD or #GP */ + fault: if ( bios_locked && test_bit(X86_FEATURE_SMX, &boot_cpu_data.x86_capability) && (!(eax & IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX) || @@ -763,17 +770,13 @@ static int _vmx_cpu_up(bool bsp) "bootloader.\n"); return -EINVAL; } - /* fall through */ - case -1: /* CF==1 or ZF==1 */ + fail: printk("CPU%d: unexpected VMXON failure\n", cpu); return -EINVAL; - case 0: /* success */ - this_cpu(vmxon) = 1; - break; - default: - BUG(); } + this_cpu(vmxon) = 1; + hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0); if ( cpu_has_vmx_ept ) diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h b/xen/arch/x86/include/asm/hvm/vmx/vmx.h index 843f8591b9cf..7c6ba7340744 100644 --- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h +++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h @@ -559,25 +559,6 @@ static inline void __vmxoff(void) : : : "memory" ); } -static inline int __vmxon(u64 addr) -{ - int rc; - - asm volatile ( - "1: " VMXON_OPCODE MODRM_EAX_06 "\n" - " setna %b0 ; neg %0\n" /* CF==1 or ZF==1 --> rc = -1 */ - "2:\n" - ".section .fixup,\"ax\"\n" - "3: sub $2,%0 ; jmp 2b\n" /* #UD or #GP --> rc = -2 */ - ".previous\n" - _ASM_EXTABLE(1b, 3b) - : "=q" (rc) - : "0" (0), "a" (&addr) - : "memory"); - - return rc; -} - int cf_check vmx_guest_x86_mode(struct vcpu *v); unsigned int vmx_get_cpl(void); base-commit: cff389bca78885447c8cfa381e058c6fb983df9c -- 2.39.5