Preparatory patch to share code with 32bit. Signed-off-by: Thomas Gleixner <t...@linutronix.de> --- arch/x86/entry/entry_64.S | 2 +- arch/x86/include/asm/processor.h | 2 +- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/dumpstack_64.c | 2 +- arch/x86/kernel/irq_64.c | 2 +- arch/x86/kernel/setup_percpu.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-)
--- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -431,7 +431,7 @@ END(irq_entries_start) */ movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) - movq PER_CPU_VAR(irq_stack_ptr), %rsp + movq PER_CPU_VAR(hardirq_stack_ptr), %rsp #ifdef CONFIG_DEBUG_ENTRY /* --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -396,7 +396,7 @@ static inline unsigned long cpu_kernelmo return (unsigned long)per_cpu(irq_stack_union.gs_base, cpu); } -DECLARE_PER_CPU(char *, irq_stack_ptr); +DECLARE_PER_CPU(char *, hardirq_stack_ptr); DECLARE_PER_CPU(unsigned int, irq_count); extern asmlinkage void ignore_sysret(void); --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1510,7 +1510,7 @@ DEFINE_PER_CPU(struct task_struct *, cur &init_task; EXPORT_PER_CPU_SYMBOL(current_task); -DEFINE_PER_CPU(char *, irq_stack_ptr) = +DEFINE_PER_CPU(char *, hardirq_stack_ptr) = init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE; DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -120,7 +120,7 @@ static bool in_exception_stack(unsigned static bool in_irq_stack(unsigned long *stack, struct stack_info *info) { - unsigned long *end = (unsigned long *)this_cpu_read(irq_stack_ptr); + unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr); unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long)); /* --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -56,7 +56,7 @@ static inline void stack_overflow_check( regs->sp <= curbase + THREAD_SIZE) return; - irq_stack_top = (u64)__this_cpu_read(irq_stack_ptr); + irq_stack_top = (u64)__this_cpu_read(hardirq_stack_ptr); irq_stack_bottom = irq_stack_top - IRQ_STACK_SIZE + STACK_MARGIN; if (regs->sp >= irq_stack_bottom && regs->sp <= irq_stack_top) return; --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -245,7 +245,7 @@ void __init setup_per_cpu_areas(void) early_per_cpu_map(x86_cpu_to_logical_apicid, cpu); #endif #ifdef CONFIG_X86_64 - per_cpu(irq_stack_ptr, cpu) = + per_cpu(hardirq_stack_ptr, cpu) = per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE; #endif