Since only the virtual address of allocated blocks is used, lets use functions returning directly virtual address.
Those functions have the advantage of also zeroing the block. Suggested-by: Mike Rapoport <r...@linux.ibm.com> Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr> --- @Mike: Part of this is taken from your serie. I was not sure how to include you in the commit log, I used Suggested-by: but could use any other property, so if you prefer something different just tell me. Also, if you could review this patch, it would be nice. arch/powerpc/kernel/irq.c | 5 ----- arch/powerpc/kernel/setup_32.c | 15 +++++---------- arch/powerpc/kernel/setup_64.c | 16 ++-------------- 3 files changed, 7 insertions(+), 29 deletions(-) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 916ddc4aac44..4a44bc395fbc 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -725,18 +725,15 @@ void exc_lvl_ctx_init(void) #endif #endif - memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = critirq_ctx[cpu_nr]; tp->cpu = cpu_nr; tp->preempt_count = 0; #ifdef CONFIG_BOOKE - memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = dbgirq_ctx[cpu_nr]; tp->cpu = cpu_nr; tp->preempt_count = 0; - memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = mcheckirq_ctx[cpu_nr]; tp->cpu = cpu_nr; tp->preempt_count = HARDIRQ_OFFSET; @@ -754,12 +751,10 @@ void irq_ctx_init(void) int i; for_each_possible_cpu(i) { - memset((void *)softirq_ctx[i], 0, THREAD_SIZE); tp = softirq_ctx[i]; tp->cpu = i; klp_init_thread_info(tp); - memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); tp = hardirq_ctx[i]; tp->cpu = i; klp_init_thread_info(tp); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 947f904688b0..bfe1b46a26dc 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -203,10 +203,8 @@ void __init irqstack_early_init(void) /* interrupt stacks must be in lowmem, we get that for free on ppc32 * as the memblock is limited to lowmem by default */ for_each_possible_cpu(i) { - softirq_ctx[i] = (struct thread_info *) - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); - hardirq_ctx[i] = (struct thread_info *) - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); + softirq_ctx[i] = memblock_alloc(THREAD_SIZE, THREAD_SIZE); + hardirq_ctx[i] = memblock_alloc(THREAD_SIZE, THREAD_SIZE); } } @@ -224,13 +222,10 @@ void __init exc_lvl_early_init(void) hw_cpu = 0; #endif - critirq_ctx[hw_cpu] = (struct thread_info *) - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); + critirq_ctx[hw_cpu] = memblock_alloc(THREAD_SIZE, THREAD_SIZE); #ifdef CONFIG_BOOKE - dbgirq_ctx[hw_cpu] = (struct thread_info *) - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); - mcheckirq_ctx[hw_cpu] = (struct thread_info *) - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); + dbgirq_ctx[hw_cpu] = memblock_alloc(THREAD_SIZE, THREAD_SIZE); + mcheckirq_ctx[hw_cpu] = memblock_alloc(THREAD_SIZE, THREAD_SIZE); #endif } } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 236c1151a3a7..943503f1e2c0 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -634,19 +634,10 @@ __init u64 ppc64_bolted_size(void) static void *__init alloc_stack(unsigned long limit, int cpu) { - unsigned long pa; - BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16); - pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, - early_cpu_to_node(cpu), MEMBLOCK_NONE); - if (!pa) { - pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); - if (!pa) - panic("cannot allocate stacks"); - } - - return __va(pa); + return memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE, MEMBLOCK_LOW_LIMIT, + limit, early_cpu_to_node(cpu)); } void __init irqstack_early_init(void) @@ -739,20 +730,17 @@ void __init emergency_stack_init(void) struct thread_info *ti; ti = alloc_stack(limit, i); - memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE; #ifdef CONFIG_PPC_BOOK3S_64 /* emergency stack for NMI exception handling. */ ti = alloc_stack(limit, i); - memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE; /* emergency stack for machine check exception handling. */ ti = alloc_stack(limit, i); - memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE; #endif -- 2.13.3