If the boot CPU tries to access per-cpu data of other CPUs before per cpu areas are set up, it will unexpectedly use offset 0.
Try to catch such accesses by poisoning the __per_cpu_offset array. Signed-off-by: Nicholas Piggin <npig...@gmail.com> --- arch/powerpc/include/asm/percpu.h | 1 + arch/powerpc/kernel/paca.c | 2 +- arch/powerpc/kernel/setup_64.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index 8e5b7d0b851c..6ca1a9fc5725 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -7,6 +7,7 @@ * Same as asm-generic/percpu.h, except that we store the per cpu offset * in the paca. Based on the x86-64 implementation. */ +#define PER_CPU_OFFSET_POISON 0xfeeeeeeeeeeeeeeeULL #ifdef CONFIG_SMP diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ba593fd60124..914d27c8b84a 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -223,7 +223,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) new_paca->hw_cpu_id = 0xffff; new_paca->kexec_state = KEXEC_STATE_NONE; new_paca->__current = &init_task; - new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL; + new_paca->data_offset = PER_CPU_OFFSET_POISON; #ifdef CONFIG_PPC_64S_HASH_MMU new_paca->slb_shadow_ptr = NULL; #endif diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 5761f08dae95..60f0d1258526 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -812,7 +812,7 @@ static __init int pcpu_cpu_to_node(int cpu) return early_cpu_to_node(cpu); } -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1 ] = PER_CPU_OFFSET_POISON }; EXPORT_SYMBOL(__per_cpu_offset); void __init setup_per_cpu_areas(void) -- 2.35.1