All of the code paths that ended up doing IRET to usermode did
SWAPGS immediately beforehand.  Move the SWAPGS into the common
code.

Signed-off-by: Andy Lutomirski <l...@kernel.org>
---
 arch/x86/entry/entry_64.S        | 26 ++++++++++----------------
 arch/x86/entry/entry_64_compat.S |  3 +--
 2 files changed, 11 insertions(+), 18 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 493e5e234d36..1909a4e42b81 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -254,7 +254,7 @@ return_from_SYSCALL_64:
        movq    RCX(%rsp), %rcx
        movq    RIP(%rsp), %r11
        cmpq    %rcx, %r11                      /* RCX == RIP */
-       jne     opportunistic_sysret_failed
+       jne     swapgs_restore_regs_and_return_to_usermode
 
        /*
         * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
@@ -272,14 +272,14 @@ return_from_SYSCALL_64:
 
        /* If this changed %rcx, it was not canonical */
        cmpq    %rcx, %r11
-       jne     opportunistic_sysret_failed
+       jne     swapgs_restore_regs_and_return_to_usermode
 
        cmpq    $__USER_CS, CS(%rsp)            /* CS must match SYSRET */
-       jne     opportunistic_sysret_failed
+       jne     swapgs_restore_regs_and_return_to_usermode
 
        movq    R11(%rsp), %r11
        cmpq    %r11, EFLAGS(%rsp)              /* R11 == RFLAGS */
-       jne     opportunistic_sysret_failed
+       jne     swapgs_restore_regs_and_return_to_usermode
 
        /*
         * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
@@ -300,12 +300,12 @@ return_from_SYSCALL_64:
         * would never get past 'stuck_here'.
         */
        testq   $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
-       jnz     opportunistic_sysret_failed
+       jnz     swapgs_restore_regs_and_return_to_usermode
 
        /* nothing to check for RSP */
 
        cmpq    $__USER_DS, SS(%rsp)            /* SS must match SYSRET */
-       jne     opportunistic_sysret_failed
+       jne     swapgs_restore_regs_and_return_to_usermode
 
        /*
         * We win! This label is here just for ease of understanding
@@ -318,10 +318,6 @@ syscall_return_via_sysret:
        movq    RSP(%rsp), %rsp
        UNWIND_HINT_EMPTY
        USERGS_SYSRET64
-
-opportunistic_sysret_failed:
-       SWAPGS
-       jmp     restore_regs_and_return_to_usermode
 END(entry_SYSCALL_64)
 
 ENTRY(stub_ptregs_64)
@@ -422,8 +418,7 @@ ENTRY(ret_from_fork)
        movq    %rsp, %rdi
        call    syscall_return_slowpath /* returns with IRQs disabled */
        TRACE_IRQS_ON                   /* user mode is traced as IRQS on */
-       SWAPGS
-       jmp     restore_regs_and_return_to_usermode
+       jmp     swapgs_restore_regs_and_return_to_usermode
 
 1:
        /* kernel thread */
@@ -611,15 +606,15 @@ GLOBAL(retint_user)
        mov     %rsp,%rdi
        call    prepare_exit_to_usermode
        TRACE_IRQS_IRETQ
-       SWAPGS
 
-GLOBAL(restore_regs_and_return_to_usermode)
+GLOBAL(swapgs_restore_regs_and_return_to_usermode)
 #ifdef CONFIG_DEBUG_ENTRY
        testl   $3, CS(%rsp)
        jnz     1f
        ud2
 1:
 #endif
+       SWAPGS
        RESTORE_EXTRA_REGS
        RESTORE_C_REGS
        REMOVE_PT_GPREGS_FROM_STACK 8
@@ -1340,8 +1335,7 @@ ENTRY(nmi)
         * Return back to user mode.  We must *not* do the normal exit
         * work, because we don't want to enable interrupts.
         */
-       SWAPGS
-       jmp     restore_regs_and_return_to_usermode
+       jmp     swapgs_restore_regs_and_return_to_usermode
 
 .Lnmi_from_kernel:
        /*
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 9ca014a99968..932b96ce1b06 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -336,8 +336,7 @@ ENTRY(entry_INT80_compat)
 
        /* Go back to user mode. */
        TRACE_IRQS_ON
-       SWAPGS
-       jmp     restore_regs_and_return_to_usermode
+       jmp     swapgs_restore_regs_and_return_to_usermode
 END(entry_INT80_compat)
 
 ENTRY(stub32_clone)
-- 
2.13.6

Reply via email to