Signed-off-by: Thomas Gleixner <t...@linutronix.de>

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 7292525e2557..f213d573038e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -370,102 +370,6 @@ SYM_CODE_END(ret_from_fork)
 #endif
 .endm
 
-/*
- * Enters the IRQ stack if we're not already using it.  NMI-safe.  Clobbers
- * flags and puts old RSP into old_rsp, and leaves all other GPRs alone.
- * Requires kernel GSBASE.
- *
- * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
- */
-.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0
-       DEBUG_ENTRY_ASSERT_IRQS_OFF
-
-       .if \save_ret
-       /*
-        * If save_ret is set, the original stack contains one additional
-        * entry -- the return address. Therefore, move the address one
-        * entry below %rsp to \old_rsp.
-        */
-       leaq    8(%rsp), \old_rsp
-       .else
-       movq    %rsp, \old_rsp
-       .endif
-
-       .if \regs
-       UNWIND_HINT_REGS base=\old_rsp
-       .endif
-
-       incl    PER_CPU_VAR(irq_count)
-       jnz     .Lirq_stack_push_old_rsp_\@
-
-       /*
-        * Right now, if we just incremented irq_count to zero, we've
-        * claimed the IRQ stack but we haven't switched to it yet.
-        *
-        * If anything is added that can interrupt us here without using IST,
-        * it must be *extremely* careful to limit its stack usage.  This
-        * could include kprobes and a hypothetical future IST-less #DB
-        * handler.
-        *
-        * The OOPS unwinder relies on the word at the top of the IRQ
-        * stack linking back to the previous RSP for the entire time we're
-        * on the IRQ stack.  For this to work reliably, we need to write
-        * it before we actually move ourselves to the IRQ stack.
-        */
-
-       movq    \old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE 
- 8)
-       movq    PER_CPU_VAR(hardirq_stack_ptr), %rsp
-
-#ifdef CONFIG_DEBUG_ENTRY
-       /*
-        * If the first movq above becomes wrong due to IRQ stack layout
-        * changes, the only way we'll notice is if we try to unwind right
-        * here.  Assert that we set up the stack right to catch this type
-        * of bug quickly.
-        */
-       cmpq    -8(%rsp), \old_rsp
-       je      .Lirq_stack_okay\@
-       ud2
-       .Lirq_stack_okay\@:
-#endif
-
-.Lirq_stack_push_old_rsp_\@:
-       pushq   \old_rsp
-
-       .if \regs
-       UNWIND_HINT_REGS indirect=1
-       .endif
-
-       .if \save_ret
-       /*
-        * Push the return address to the stack. This return address can
-        * be found at the "real" original RSP, which was offset by 8 at
-        * the beginning of this macro.
-        */
-       pushq   -8(\old_rsp)
-       .endif
-.endm
-
-/*
- * Undoes ENTER_IRQ_STACK.
- */
-.macro LEAVE_IRQ_STACK regs=1
-       DEBUG_ENTRY_ASSERT_IRQS_OFF
-       /* We need to be off the IRQ stack before decrementing irq_count. */
-       popq    %rsp
-
-       .if \regs
-       UNWIND_HINT_REGS
-       .endif
-
-       /*
-        * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
-        * the irq stack but we're not on it.
-        */
-
-       decl    PER_CPU_VAR(irq_count)
-.endm
-
 /**
  * idtentry_body - Macro to emit code calling the C function
  * @cfunc:             C function to be called

Reply via email to