Convert indirect jumps in core 32/64bit entry assembler code to use non-speculative sequences when CONFIG_RETPOLINE is enabled.
KPTI complicates this a little; the one in entry_SYSCALL_64_trampoline can't just jump to the thunk because the thunk isn't mapped. So it gets its own copy of the thunk, inline. Signed-off-by: David Woodhouse <d...@amazon.co.uk> --- arch/x86/entry/entry_32.S | 5 +++-- arch/x86/entry/entry_64.S | 20 ++++++++++++++++---- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index ace8f321a5a1..abd1e5dd487d 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -44,6 +44,7 @@ #include <asm/asm.h> #include <asm/smap.h> #include <asm/frame.h> +#include <asm/nospec-branch.h> .section .entry.text, "ax" @@ -290,7 +291,7 @@ ENTRY(ret_from_fork) /* kernel thread */ 1: movl %edi, %eax - call *%ebx + NOSPEC_CALL ebx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() @@ -919,7 +920,7 @@ common_exception: movl %ecx, %es TRACE_IRQS_OFF movl %esp, %eax # pt_regs pointer - call *%edi + NOSPEC_CALL edi jmp ret_from_exception END(common_exception) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f048e384ff54..9e449701115a 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -37,6 +37,7 @@ #include <asm/pgtable_types.h> #include <asm/export.h> #include <asm/frame.h> +#include <asm/nospec-branch.h> #include <linux/err.h> #include "calling.h" @@ -191,7 +192,17 @@ ENTRY(entry_SYSCALL_64_trampoline) */ pushq %rdi movq $entry_SYSCALL_64_stage2, %rdi - jmp *%rdi + /* + * Open-code the retpoline from retpoline.S, because we can't + * just jump to it directly. + */ + ALTERNATIVE "call 2f", "jmp *%rdi", X86_BUG_NO_RETPOLINE +1: + lfence + jmp 1b +2: + mov %rdi, (%rsp) + ret END(entry_SYSCALL_64_trampoline) .popsection @@ -270,7 +281,8 @@ entry_SYSCALL_64_fastpath: * It might end up jumping to the slow path. If it jumps, RAX * and all argument registers are clobbered. */ - call *sys_call_table(, %rax, 8) + movq sys_call_table(, %rax, 8), %rax + NOSPEC_CALL rax .Lentry_SYSCALL_64_after_fastpath_call: movq %rax, RAX(%rsp) @@ -442,7 +454,7 @@ ENTRY(stub_ptregs_64) jmp entry_SYSCALL64_slow_path 1: - jmp *%rax /* Called from C */ + NOSPEC_JMP rax /* Called from C */ END(stub_ptregs_64) .macro ptregs_stub func @@ -521,7 +533,7 @@ ENTRY(ret_from_fork) 1: /* kernel thread */ movq %r12, %rdi - call *%rbx + NOSPEC_CALL rbx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() -- 2.14.3