On 29/06/2023 1:17 pm, Alejandro Vallejo wrote:
> Intel CPUs have a bit in MSR_IA32_MISC_ENABLE that may prevent the NX bit
> from being advertised. Clear it unconditionally if we can't find the NX
> feature right away on boot.
>
> The conditions for the MSR being read on early boot are (in this order):
>
> * Long Mode is supported
> * NX isn't advertised
> * The vendor is Intel
>
> The order of checks has been chosen carefully so a virtualized Xen on a
> hypervisor that doesn't emulate that MSR (but supports NX) doesn't triple
> fault trying to access the non-existing MSR.
>
> With that done, we can remove the XD_DISABLE checks in the intel-specific
> init path (as they are already done in early assembly). Keep a printk to
> highlight the fact that NX was forcefully enabled.
>
> Signed-off-by: Alejandro Vallejo <alejandro.vall...@cloud.com>

Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>, with two minor
fixes.

> diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S
> index 9fbd602ea5..0e02c28f37 100644
> --- a/xen/arch/x86/boot/head.S
> +++ b/xen/arch/x86/boot/head.S
> @@ -652,16 +652,53 @@ trampoline_setup:
>          cpuid
>  1:      mov     %edx, CPUINFO_FEATURE_OFFSET(X86_FEATURE_LM) + 
> sym_esi(boot_cpu_data)
>  
> -        /* Check for NX. Adjust EFER setting if available. */
> -        bt      $cpufeat_bit(X86_FEATURE_NX), %edx
> -        jnc     1f
> -        orb     $EFER_NXE >> 8, 1 + sym_esi(trampoline_efer)
> -1:
> -
>          /* Check for availability of long mode. */
>          bt      $cpufeat_bit(X86_FEATURE_LM),%edx
>          jnc     .Lbad_cpu
>  
> +        /* Check for NX */
> +        bt      $cpufeat_bit(X86_FEATURE_NX), %edx
> +        jc     .Lgot_nx
> +
> +        /*
> +         * NX appears to be unsupported, but it might be hidden.
> +         *
> +         * The feature is part of the AMD64 spec, but the very first Intel
> +         * 64bit CPUs lacked the feature, and thereafter there was a
> +         * firmware knob to disable the feature. Undo the disable if
> +         * possible.
> +         *
> +         * All 64bit Intel CPUs support this MSR. If virtualised, expect
> +         * the hypervisor to either emulate the MSR or give us NX.
> +         */
> +        xor     %eax, %eax
> +        cpuid
> +        cmp     $X86_VENDOR_INTEL_EBX, %ebx
> +        jnz     .Lno_nx
> +        cmp     $X86_VENDOR_INTEL_EDX, %edx
> +        jnz     .Lno_nx
> +        cmp     $X86_VENDOR_INTEL_ECX, %ecx
> +        jnz     .Lno_nx
> +
> +        /* Clear the XD_DISABLE bit */
> +        mov    $MSR_IA32_MISC_ENABLE, %ecx

Parameter indention here.

> diff --git a/xen/arch/x86/include/asm/x86-vendors.h 
> b/xen/arch/x86/include/asm/x86-vendors.h
> index 0a37024cbd..9191da26d7 100644
> --- a/xen/arch/x86/include/asm/x86-vendors.h
> +++ b/xen/arch/x86/include/asm/x86-vendors.h
> @@ -12,9 +12,9 @@
>  #define X86_VENDOR_UNKNOWN 0
>  
>  #define X86_VENDOR_INTEL (1 << 0)
> -#define X86_VENDOR_INTEL_EBX 0x756e6547U /* "GenuineIntel" */
> -#define X86_VENDOR_INTEL_ECX 0x6c65746eU
> -#define X86_VENDOR_INTEL_EDX 0x49656e69U
> +#define X86_VENDOR_INTEL_EBX _AC(0x756e6547, U) /* "GenuineIntel" */
> +#define X86_VENDOR_INTEL_ECX _AC(0x6c65746e, U)
> +#define X86_VENDOR_INTEL_EDX _AC(0x49656e69, U)
>  
>  #define X86_VENDOR_AMD (1 << 1)
>  #define X86_VENDOR_AMD_EBX 0x68747541U /* "AuthenticAMD" */

And all vendors should get equivalent _AC() conversions.

Can fix both on commit.

~Andrew

Reply via email to