This property is required for the AMD's recommended mitigation for Branch Target Injection, but Xen needs to cope with being unable to detect or modify the MSR.
Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com> --- v4: * New v5: * Use mnemonics. --- xen/arch/x86/cpu/amd.c | 35 ++++++++++++++++++++++++++++++++++- xen/include/asm-x86/cpufeature.h | 1 + xen/include/asm-x86/cpufeatures.h | 1 + xen/include/asm-x86/msr-index.h | 1 + 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c index 5f36ac7..db78b50 100644 --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -558,8 +558,41 @@ static void init_amd(struct cpuinfo_x86 *c) wrmsr_amd_safe(0xc001100d, l, h & ~1); } + /* + * Attempt to set lfence to be Dispatch Serialising. This MSR almost + * certainly isn't virtualised (and Xen at least will leak the real + * value in but silently discard writes), as well as being per-core + * rather than per-thread, so do a full safe read/write/readback cycle + * in the worst case. + */ + if (c->x86 == 0x0f || c->x86 == 0x11) + /* Always dispatch serialising on this hardare. */ + __set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability); + else if (c->x86 == 0x10 || c->x86 >= 0x12) { + if (rdmsr_safe(MSR_AMD64_DE_CFG, value)) + /* Unable to read. Assume the safer default. */ + __clear_bit(X86_FEATURE_LFENCE_DISPATCH, + c->x86_capability); + else if (value & AMD64_DE_CFG_LFENCE_SERIALISE) + /* Already dispatch serialising. */ + __set_bit(X86_FEATURE_LFENCE_DISPATCH, + c->x86_capability); + else if (wrmsr_safe(MSR_AMD64_DE_CFG, + value | AMD64_DE_CFG_LFENCE_SERIALISE) || + rdmsr_safe(MSR_AMD64_DE_CFG, value) || + !(value & AMD64_DE_CFG_LFENCE_SERIALISE)) + /* Attempt to set failed. Assume the safer default */ + __clear_bit(X86_FEATURE_LFENCE_DISPATCH, + c->x86_capability); + else + /* Successfully enabled! */ + __set_bit(X86_FEATURE_LFENCE_DISPATCH, + c->x86_capability); + } + /* MFENCE stops RDTSC speculation */ - __set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability); + if (!cpu_has_lfence_dispatch) + __set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability); switch(c->x86) { diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h index 84cc51d..adc333f 100644 --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -104,6 +104,7 @@ #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) #define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING) #define cpu_has_aperfmperf boot_cpu_has(X86_FEATURE_APERFMPERF) +#define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH) enum _cache_type { CACHE_TYPE_NULL = 0, diff --git a/xen/include/asm-x86/cpufeatures.h b/xen/include/asm-x86/cpufeatures.h index bc98227..58b37d6 100644 --- a/xen/include/asm-x86/cpufeatures.h +++ b/xen/include/asm-x86/cpufeatures.h @@ -22,3 +22,4 @@ XEN_CPUFEATURE(APERFMPERF, (FSCAPINTS+0)*32+ 8) /* APERFMPERF */ XEN_CPUFEATURE(MFENCE_RDTSC, (FSCAPINTS+0)*32+ 9) /* MFENCE synchronizes RDTSC */ XEN_CPUFEATURE(XEN_SMEP, (FSCAPINTS+0)*32+10) /* SMEP gets used by Xen itself */ XEN_CPUFEATURE(XEN_SMAP, (FSCAPINTS+0)*32+11) /* SMAP gets used by Xen itself */ +XEN_CPUFEATURE(LFENCE_DISPATCH, (FSCAPINTS+0)*32+12) /* lfence set as Dispatch Serialising */ diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h index a834f3b..56f5359 100644 --- a/xen/include/asm-x86/msr-index.h +++ b/xen/include/asm-x86/msr-index.h @@ -207,6 +207,7 @@ #define MSR_AMD64_IC_CFG 0xc0011021 #define MSR_AMD64_DC_CFG 0xc0011022 #define MSR_AMD64_DE_CFG 0xc0011029 +#define AMD64_DE_CFG_LFENCE_SERIALISE (_AC(1, ULL) << 1) #define MSR_AMD64_DR0_ADDRESS_MASK 0xc0011027 #define MSR_AMD64_DR1_ADDRESS_MASK 0xc0011019 -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel