This patch provides an application property based spectre_v2
protection with STIBP against attack from another app from
a sibling hyper-thread.  For security sensitive non-dumpable
app, STIBP will be turned on before switching to it for Intel
processors vulnerable to spectre_v2.

Signed-off-by: Tim Chen <tim.c.c...@linux.intel.com>
---
 arch/x86/include/asm/msr-index.h   |  3 ++-
 arch/x86/include/asm/spec-ctrl.h   | 12 ++++++++++++
 arch/x86/include/asm/thread_info.h |  4 +++-
 arch/x86/kernel/cpu/bugs.c         | 12 ++++++++++--
 arch/x86/kernel/process.c          |  9 +++++----
 arch/x86/mm/tlb.c                  | 22 ++++++++++++++++++++++
 6 files changed, 54 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4731f0c..0e43388 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -41,7 +41,8 @@
 
 #define MSR_IA32_SPEC_CTRL             0x00000048 /* Speculation Control */
 #define SPEC_CTRL_IBRS                 (1 << 0)   /* Indirect Branch 
Restricted Speculation */
-#define SPEC_CTRL_STIBP                        (1 << 1)   /* Single Thread 
Indirect Branch Predictors */
+#define SPEC_CTRL_STIBP_SHIFT          1          /* Single Thread Indirect 
Branch Predictor bit */
+#define SPEC_CTRL_STIBP                        (1 << SPEC_CTRL_STIBP_SHIFT)  
/* Single Thread Indirect Branch Predictors */
 #define SPEC_CTRL_SSBD_SHIFT           2          /* Speculative Store Bypass 
Disable bit */
 #define SPEC_CTRL_SSBD                 (1 << SPEC_CTRL_SSBD_SHIFT)   /* 
Speculative Store Bypass Disable */
 
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index ae7c2c5..6a962b8 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
        return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
 }
 
+static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
+{
+       BUILD_BUG_ON(TIF_STIBP < SPEC_CTRL_STIBP_SHIFT);
+       return (tifn & _TIF_STIBP) >> (TIF_STIBP - SPEC_CTRL_STIBP_SHIFT);
+}
+
 static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
 {
        BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
        return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - 
SPEC_CTRL_SSBD_SHIFT);
 }
 
+static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+       BUILD_BUG_ON(TIF_STIBP < SPEC_CTRL_STIBP_SHIFT);
+       return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_STIBP - 
SPEC_CTRL_STIBP_SHIFT);
+}
+
 static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
 {
        return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
diff --git a/arch/x86/include/asm/thread_info.h 
b/arch/x86/include/asm/thread_info.h
index 2ff2a30..40c58c286 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -83,6 +83,7 @@ struct thread_info {
 #define TIF_SYSCALL_EMU                6       /* syscall emulation active */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SECCOMP            8       /* secure computing */
+#define TIF_STIBP              9       /* Single threaded indirect branch 
predict */
 #define TIF_USER_RETURN_NOTIFY 11      /* notify kernel of userspace return */
 #define TIF_UPROBE             12      /* breakpointed or singlestepping */
 #define TIF_PATCH_PENDING      13      /* pending live patching update */
@@ -110,6 +111,7 @@ struct thread_info {
 #define _TIF_SYSCALL_EMU       (1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
+#define _TIF_STIBP             (1 << TIF_STIBP)
 #define _TIF_USER_RETURN_NOTIFY        (1 << TIF_USER_RETURN_NOTIFY)
 #define _TIF_UPROBE            (1 << TIF_UPROBE)
 #define _TIF_PATCH_PENDING     (1 << TIF_PATCH_PENDING)
@@ -146,7 +148,7 @@ struct thread_info {
 
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW                                                        
\
-       (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+       
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD|_TIF_STIBP)
 
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c967012..358f2b1 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -187,6 +187,9 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 
guest_virt_spec_ctrl, bool setguest)
                    static_cpu_has(X86_FEATURE_AMD_SSBD))
                        hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
+               if (static_branch_unlikely(&spectre_v2_app_lite))
+                       hostval |= stibp_tif_to_spec_ctrl(ti->flags);
+
                if (hostval != guestval) {
                        msrval = setguest ? guestval : hostval;
                        wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -383,6 +386,11 @@ static enum spectre_v2_mitigation_cmd __init 
spectre_v2_parse_cmdline(void)
 
 static bool stibp_needed(void)
 {
+       /*
+        * Determine if we want to leave STIBP always on.
+        * For lite option, we enable STIBP based on a process's
+        * flag during context switch.
+        */
        if (static_branch_unlikely(&spectre_v2_app_lite))
                return false;
 
@@ -958,14 +966,14 @@ static ssize_t cpu_show_common(struct device *dev, struct 
device_attribute *attr
                        ret = sprintf(buf, "%s%s%s%s%s%s\n", 
spectre_v2_strings[spectre_v2_enabled],
                               boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", 
IBPB-lite" : "",
                               boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", 
IBRS_FW" : "",
-                              (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", 
STIBP" : "",
+                              ", STIBP-lite",
                               boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB 
filling" : "",
                               spectre_v2_module_string());
                else
                        ret = sprintf(buf, "%s%s%s%s%s%s\n", 
spectre_v2_strings[spectre_v2_enabled],
                               boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", 
IBPB-strict" : "",
                               boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", 
IBRS_FW" : "",
-                              (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", 
STIBP" : "",
+                              (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", 
STIBP-strict" : "",
                               boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB 
filling" : "",
                               spectre_v2_module_string());
                mutex_unlock(&spec_ctrl_mutex);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c93fcfd..878301d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -395,9 +395,10 @@ static __always_inline void 
amd_set_ssb_virt_state(unsigned long tifn)
        wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
 }
 
-static __always_inline void intel_set_ssb_state(unsigned long tifn)
+static __always_inline void intel_set_spec_ctrl_state(unsigned long tifn)
 {
-       u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+       u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn)
+                                    | stibp_tif_to_spec_ctrl(tifn);
 
        wrmsrl(MSR_IA32_SPEC_CTRL, msr);
 }
@@ -409,7 +410,7 @@ static __always_inline void 
__speculative_store_bypass_update(unsigned long tifn
        else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
                amd_set_core_ssb_state(tifn);
        else
-               intel_set_ssb_state(tifn);
+               intel_set_spec_ctrl_state(tifn);
 }
 
 void speculative_store_bypass_update(unsigned long tif)
@@ -451,7 +452,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct 
task_struct *next_p,
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
 
-       if ((tifp ^ tifn) & _TIF_SSBD)
+       if ((tifp ^ tifn) & (_TIF_SSBD | _TIF_STIBP))
                __speculative_store_bypass_update(tifn);
 }
 
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 54780a8..dd70bb4 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -205,6 +205,25 @@ static bool ibpb_needed(struct task_struct *tsk, u64 
last_ctx_id)
                return (__ptrace_may_access(tsk, PTRACE_MODE_IBPB));
 }
 
+static void set_stibp(struct task_struct *tsk)
+{
+       /*
+        * For lite protection mode, we set STIBP only 
+        * for non-dumpable processes.
+        */
+
+       if (!static_branch_unlikely(&spectre_v2_app_lite))
+               return;
+
+       if (!tsk || !tsk->mm)
+               return;
+
+       if (get_dumpable(tsk->mm) != SUID_DUMP_USER)
+               test_and_set_tsk_thread_flag(tsk, TIF_STIBP);
+       else
+               test_and_clear_tsk_thread_flag(tsk, TIF_STIBP);
+}
+
 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                        struct task_struct *tsk)
 {
@@ -296,6 +315,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
                                ibpb_needed(tsk, last_ctx_id))
                        indirect_branch_prediction_barrier();
 
+               if (static_cpu_has(X86_FEATURE_STIBP))
+                       set_stibp(tsk); 
+
                if (IS_ENABLED(CONFIG_VMAP_STACK)) {
                        /*
                         * If our current stack is in vmalloc space and isn't
-- 
2.9.4

Reply via email to