Since the enabling and disabling of IRQs within preempt_schedule_irq()
is contained in a need_resched() loop, we don't need the outer arch
code loop.

Signed-off-by: Valentin Schneider <valentin.schnei...@arm.com>
Cc: Benjamin Herrenschmidt <b...@kernel.crashing.org>
Cc: Paul Mackerras <pau...@samba.org>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: linuxppc-dev@lists.ozlabs.org
---
 arch/powerpc/kernel/entry_32.S | 6 +-----
 arch/powerpc/kernel/entry_64.S | 8 +-------
 2 files changed, 2 insertions(+), 12 deletions(-)

diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 0768dfd8a64e..ff3fe3824a4a 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -896,11 +896,7 @@ resume_kernel:
         */
        bl      trace_hardirqs_off
 #endif
-1:     bl      preempt_schedule_irq
-       CURRENT_THREAD_INFO(r9, r1)
-       lwz     r3,TI_FLAGS(r9)
-       andi.   r0,r3,_TIF_NEED_RESCHED
-       bne-    1b
+       bl      preempt_schedule_irq
 #ifdef CONFIG_TRACE_IRQFLAGS
        /* And now, to properly rebalance the above, we tell lockdep they
         * are being turned back on, which will happen when we return
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 435927f549c4..9c86c6826856 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -857,13 +857,7 @@ resume_kernel:
         * sure we are soft-disabled first and reconcile irq state.
         */
        RECONCILE_IRQ_STATE(r3,r4)
-1:     bl      preempt_schedule_irq
-
-       /* Re-test flags and eventually loop */
-       CURRENT_THREAD_INFO(r9, r1)
-       ld      r4,TI_FLAGS(r9)
-       andi.   r0,r4,_TIF_NEED_RESCHED
-       bne     1b
+       bl      preempt_schedule_irq
 
        /*
         * arch_local_irq_restore() from preempt_schedule_irq above may
-- 
2.20.1

Reply via email to