On 11/27/2018 09:25 AM, Lendacky, Thomas wrote:
>> --- a/arch/x86/kernel/cpu/bugs.c
>> +++ b/arch/x86/kernel/cpu/bugs.c
>> @@ -148,6 +148,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
>>                  static_cpu_has(X86_FEATURE_AMD_SSBD))
>>                      hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
>>  
>> +            /* Conditional STIBP enabled? */
>> +            if (static_branch_unlikely(&switch_to_cond_stibp))
>> +                    hostval |= stibp_tif_to_spec_ctrl(ti->flags);
>> +
>>              if (hostval != guestval) {
>>                      msrval = setguest ? guestval : hostval;
>>                      wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
>> --- a/arch/x86/kernel/process.c
>> +++ b/arch/x86/kernel/process.c
>> @@ -406,6 +406,11 @@ static __always_inline void spec_ctrl_up
>>      if (static_cpu_has(X86_FEATURE_SSBD))
>>              msr |= ssbd_tif_to_spec_ctrl(tifn);
> 
> I did some quick testing and found my original logic was flawed. Since
> spec_ctrl_update_msr() can now be called for STIBP, an additional check
> is needed to set the SSBD MSR bit.
> 
> Both X86_FEATURE_VIRT_SSBD and X86_FEATURE_LS_CFG_SSBD cause
> X86_FEATURE_SSBD to be set. Before this patch, spec_ctrl_update_msr() was
> only called if X86_FEATURE_SSBD was set and one of the other SSBD features
> wasn't set. But now, STIBP can cause spec_ctrl_update_msr() to get called
> and cause the SSBD MSR bit to be set when it shouldn't (could result in
> a GP fault).
> 

I think it will be cleaner just to fold the msr update into
__speculation_ctrl_update to fix this issue.

Something like this perhaps.

Thanks.

Tim

---

diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3f5e351..614ec51 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -398,25 +398,6 @@ static __always_inline void 
amd_set_ssb_virt_state(unsigned long tifn)
        wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
 }
 
-static __always_inline void spec_ctrl_update_msr(unsigned long tifn)
-{
-       u64 msr = x86_spec_ctrl_base;
-
-       /*
-        * If X86_FEATURE_SSBD is not set, the SSBD bit is not to be
-        * touched.
-        */
-       if (static_cpu_has(X86_FEATURE_SSBD))
-               msr |= ssbd_tif_to_spec_ctrl(tifn);
-
-       /* Only evaluate if conditional STIBP is enabled */
-       if (IS_ENABLED(CONFIG_SMP) &&
-           static_branch_unlikely(&switch_to_cond_stibp))
-               msr |= stibp_tif_to_spec_ctrl(tifn);
-
-       wrmsrl(MSR_IA32_SPEC_CTRL, msr);
-}
-
 /*
  * Update the MSRs managing speculation control, during context switch.
  *
@@ -428,6 +409,7 @@ static __always_inline void 
__speculation_ctrl_update(unsigned long tifp,
 {
        unsigned long tif_diff = tifp ^ tifn;
        bool updmsr = false;
+       u64 msr = x86_spec_ctrl_base;
 
        /*
         * If TIF_SSBD is different, select the proper mitigation
@@ -440,8 +422,10 @@ static __always_inline void 
__speculation_ctrl_update(unsigned long tifp,
                        amd_set_ssb_virt_state(tifn);
                else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
                        amd_set_core_ssb_state(tifn);
-               else if (static_cpu_has(X86_FEATURE_SSBD))
+               else if (static_cpu_has(X86_FEATURE_SSBD)) {
                        updmsr  = true;
+                       msr |= ssbd_tif_to_spec_ctrl(tifn);
+               }
        }
 
        /*
@@ -449,11 +433,13 @@ static __always_inline void 
__speculation_ctrl_update(unsigned long tifp,
         * otherwise avoid the MSR write.
         */
        if (IS_ENABLED(CONFIG_SMP) &&
-           static_branch_unlikely(&switch_to_cond_stibp))
+           static_branch_unlikely(&switch_to_cond_stibp)) {
                updmsr |= !!(tif_diff & _TIF_SPEC_IB);
+               msr |= stibp_tif_to_spec_ctrl(tifn);
+       }
 
        if (updmsr)
-               spec_ctrl_update_msr(tifn);
+               wrmsrl(MSR_IA32_SPEC_CTRL, msr);
 }
 
 void speculation_ctrl_update(unsigned long tif)

Reply via email to