Commit-ID: 51ab1433794d9f89257fba187c4f4a8fdfccd96d Gitweb: https://git.kernel.org/tip/51ab1433794d9f89257fba187c4f4a8fdfccd96d Author: David Woodhouse <d...@amazon.co.uk> AuthorDate: Tue, 9 Jan 2018 14:43:15 +0000 Committer: Thomas Gleixner <t...@linutronix.de> CommitDate: Tue, 9 Jan 2018 16:17:54 +0100
x86/retpoline/checksum32: Convert assembler indirect jumps Convert all indirect jumps in 32bit checksum assembler code to use non-speculative sequences when CONFIG_RETPOLINE is enabled. Signed-off-by: David Woodhouse <d...@amazon.co.uk> Signed-off-by: Thomas Gleixner <t...@linutronix.de> Acked-by: Arjan van de Ven <ar...@linux.intel.com> Acked-by: Ingo Molnar <mi...@kernel.org> Cc: gno...@lxorguk.ukuu.org.uk Cc: Rik van Riel <r...@redhat.com> Cc: Andi Kleen <a...@linux.intel.com> Cc: Peter Zijlstra <pet...@infradead.org> Cc: Linus Torvalds <torva...@linux-foundation.org> Cc: Jiri Kosina <ji...@kernel.org> Cc: Andy Lutomirski <l...@amacapital.net> Cc: Dave Hansen <dave.han...@intel.com> Cc: Kees Cook <keesc...@google.com> Cc: Tim Chen <tim.c.c...@linux.intel.com> Cc: Greg Kroah-Hartman <gre...@linux-foundation.org> Cc: Paul Turner <p...@google.com> Link: https://lkml.kernel.org/r/1515508997-6154-10-git-send-email-d...@amazon.co.uk --- arch/x86/lib/checksum_32.S | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 4d34bb5..46e71a7 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -29,7 +29,8 @@ #include <asm/errno.h> #include <asm/asm.h> #include <asm/export.h> - +#include <asm/nospec-branch.h> + /* * computes a partial checksum, e.g. for TCP/UDP fragments */ @@ -156,7 +157,7 @@ ENTRY(csum_partial) negl %ebx lea 45f(%ebx,%ebx,2), %ebx testl %esi, %esi - jmp *%ebx + JMP_NOSPEC %ebx # Handle 2-byte-aligned regions 20: addw (%esi), %ax @@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic) andl $-32,%edx lea 3f(%ebx,%ebx), %ebx testl %esi, %esi - jmp *%ebx + JMP_NOSPEC %ebx 1: addl $64,%esi addl $64,%edi SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)