From: Andi Kleen <a...@linux.intel.com>

When we go into deeper idle states the return buffer could be cleared
in MWAIT, but then another thread which wakes up earlier might
be poisoning the indirect branch predictor. Then when the return
buffer underflows there might an uncontrolled indirect branch.

To guard against this always fill the return buffer when exiting idle.
This is only enabled on Skylake.

v2:
Switch to using inline fill_return_buffer macro
Also handle mwait_idle
Port to new fill_return_buffer infrastructure
Signed-off-by: Andi Kleen <a...@linux.intel.com>
---
 arch/x86/include/asm/mwait.h | 10 +++++++++-
 arch/x86/kernel/process.c    |  1 +
 2 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 39a2fb29378a..632b6b39fe01 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -6,6 +6,7 @@
 #include <linux/sched/idle.h>
 
 #include <asm/cpufeature.h>
+#include <asm/nospec-branch.h>
 
 #define MWAIT_SUBSTATE_MASK            0xf
 #define MWAIT_CSTATE_MASK              0xf
@@ -107,8 +108,15 @@ static inline void mwait_idle_with_hints(unsigned long 
eax, unsigned long ecx)
                }
 
                __monitor((void *)&current_thread_info()->flags, 0, 0);
-               if (!need_resched())
+               if (!need_resched()) {
                        __mwait(eax, ecx);
+                       /*
+                        * idle could have cleared the return buffer,
+                        * so fill it to prevent uncontrolled
+                        * speculation.
+                        */
+                       fill_return_buffer();
+               }
        }
        current_clr_polling();
 }
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3cb2486c47e4..9a7c1bb0e001 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -466,6 +466,7 @@ static __cpuidle void mwait_idle(void)
                        __sti_mwait(0, 0);
                else
                        local_irq_enable();
+               fill_return_buffer();
                trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else {
                local_irq_enable();
-- 
2.14.3

Reply via email to