It's easier to reason about the code if we only set mmu_slb_size in one place, so convert open-coded assignments to use slb_set_size().
Signed-off-by: Michael Ellerman <m...@ellerman.id.au> --- arch/powerpc/kernel/prom.c | 2 +- arch/powerpc/mm/pgtable-radix.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 4181ec715f88..14693f8ccb80 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -238,7 +238,7 @@ static void __init init_mmu_slb_size(unsigned long node) of_get_flat_dt_prop(node, "ibm,slb-size", NULL); if (slb_size_ptr) - mmu_slb_size = be32_to_cpup(slb_size_ptr); + slb_set_size(be32_to_cpup(slb_size_ptr)); } #else #define init_mmu_slb_size(node) do { } while(0) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 931156069a81..949fbc96b237 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -328,7 +328,8 @@ void __init radix_init_pgtable(void) struct memblock_region *reg; /* We don't support slb for radix */ - mmu_slb_size = 0; + slb_set_size(0); + /* * Create the linear mapping, using standard page size for now */ -- 2.20.1