Now that NT is filtered by the SYSENTER entry code, it is safe to skip saving 
and
restoring flags on task switch.  Also remove a leftover reset of flags on 64-bit
fork.

Signed-off-by: Brian Gerst <brge...@gmail.com>
---
 arch/x86/entry/entry_32.S        | 4 ----
 arch/x86/entry/entry_64.S        | 3 ---
 arch/x86/include/asm/switch_to.h | 4 +---
 3 files changed, 1 insertion(+), 10 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 10868aa..c84d99b 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -209,8 +209,6 @@ ENTRY(ret_from_fork)
        call    schedule_tail
        GET_THREAD_INFO(%ebp)
        popl    %eax
-       pushl   $0x0202                         # Reset kernel eflags
-       popfl
 
        /* When we fork, we trace the syscall return in the child, too. */
        movl    %esp, %eax
@@ -223,8 +221,6 @@ ENTRY(ret_from_kernel_thread)
        call    schedule_tail
        GET_THREAD_INFO(%ebp)
        popl    %eax
-       pushl   $0x0202                         # Reset kernel eflags
-       popfl
        movl    PT_EBP(%esp), %eax
        call    *PT_EBX(%esp)
        movl    $0, PT_EAX(%esp)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 6344629..9ee0da1 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -372,9 +372,6 @@ END(ptregs_\func)
 ENTRY(ret_from_fork)
        LOCK ; btr $TIF_FORK, TI_flags(%r8)
 
-       pushq   $0x0002
-       popfq                                   /* reset kernel eflags */
-
        call    schedule_tail                   /* rdi: 'prev' task parameter */
 
        testb   $3, CS(%rsp)                    /* from kernel_thread? */
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 751bf4b..8f321a1 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -39,8 +39,7 @@ do {                                                          
        \
         */                                                             \
        unsigned long ebx, ecx, edx, esi, edi;                          \
                                                                        \
-       asm volatile("pushfl\n\t"               /* save    flags */     \
-                    "pushl %%ebp\n\t"          /* save    EBP   */     \
+       asm volatile("pushl %%ebp\n\t"          /* save    EBP   */     \
                     "movl %%esp,%[prev_sp]\n\t"        /* save    ESP   */ \
                     "movl %[next_sp],%%esp\n\t"        /* restore ESP   */ \
                     "movl $1f,%[prev_ip]\n\t"  /* save    EIP   */     \
@@ -49,7 +48,6 @@ do {                                                          
        \
                     "jmp __switch_to\n"        /* regparm call  */     \
                     "1:\t"                                             \
                     "popl %%ebp\n\t"           /* restore EBP   */     \
-                    "popfl\n"                  /* restore flags */     \
                                                                        \
                     /* output parameters */                            \
                     : [prev_sp] "=m" (prev->thread.sp),                \
-- 
2.5.5

Reply via email to