Create the cpu_smt_enabled static key to indicate if we are
using SMT.  SMT specific code paths are executed only when SMT
code paths are enabled by this key.

Signed-off-by: Tim Chen <tim.c.c...@linux.intel.com>
---
 arch/x86/kernel/cpu/bugs.c |  2 +-
 arch/x86/kvm/vmx.c         |  2 +-
 include/linux/cpu.h        |  1 +
 kernel/cpu.c               | 12 ++++++++++--
 4 files changed, 13 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 6ed82ea..0338fa1 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -358,7 +358,7 @@ void arch_smt_update(void)
 
        mutex_lock(&spec_ctrl_mutex);
        mask = x86_spec_ctrl_base;
-       if (cpu_smt_control == CPU_SMT_ENABLED)
+       if (static_branch_likely(&cpu_smt_enabled))
                mask |= SPEC_CTRL_STIBP;
        else
                mask &= ~SPEC_CTRL_STIBP;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 533a327..8ec0ea3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11072,7 +11072,7 @@ static int vmx_vm_init(struct kvm *kvm)
                         * Warn upon starting the first VM in a potentially
                         * insecure environment.
                         */
-                       if (cpu_smt_control == CPU_SMT_ENABLED)
+                       if (static_branch_likely(&cpu_smt_enabled))
                                pr_warn_once(L1TF_MSG_SMT);
                        if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
                                pr_warn_once(L1TF_MSG_L1D);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 218df7f..b54f085 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -188,5 +188,6 @@ static inline void cpu_smt_disable(bool force) { }
 static inline void cpu_smt_check_topology_early(void) { }
 static inline void cpu_smt_check_topology(void) { }
 #endif
+DECLARE_STATIC_KEY_TRUE(cpu_smt_enabled);
 
 #endif /* _LINUX_CPU_H_ */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3adecda..ad28afc 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -349,6 +349,8 @@ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 #ifdef CONFIG_HOTPLUG_SMT
 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
 EXPORT_SYMBOL_GPL(cpu_smt_control);
+DEFINE_STATIC_KEY_TRUE(cpu_smt_enabled);
+EXPORT_SYMBOL_GPL(cpu_smt_enabled);
 
 static bool cpu_smt_available __read_mostly;
 
@@ -364,6 +366,7 @@ void __init cpu_smt_disable(bool force)
        } else {
                cpu_smt_control = CPU_SMT_DISABLED;
        }
+       static_branch_disable(&cpu_smt_enabled);        
 }
 
 /*
@@ -373,8 +376,10 @@ void __init cpu_smt_disable(bool force)
  */
 void __init cpu_smt_check_topology_early(void)
 {
-       if (!topology_smt_supported())
+       if (!topology_smt_supported()) {
                cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+               static_branch_disable(&cpu_smt_enabled);        
+       }
 }
 
 /*
@@ -386,8 +391,10 @@ void __init cpu_smt_check_topology_early(void)
  */
 void __init cpu_smt_check_topology(void)
 {
-       if (!cpu_smt_available)
+       if (!cpu_smt_available) {
                cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+               static_branch_disable(&cpu_smt_enabled);        
+       }
 }
 
 static int __init smt_cmdline_disable(char *str)
@@ -2072,6 +2079,7 @@ static int cpuhp_smt_enable(void)
 
        cpu_maps_update_begin();
        cpu_smt_control = CPU_SMT_ENABLED;
+       static_branch_enable(&cpu_smt_enabled);
        arch_smt_update();
        for_each_present_cpu(cpu) {
                /* Skip online CPUs and CPUs on offline nodes */
-- 
2.9.4

Reply via email to