Christophe Leroy <christophe.le...@c-s.fr> writes: > Move slice_mask_for_size() into subarch mmu.h > > At the same time, replace BUG() by VM_BUG_ON() as those BUG() are not > there to catch runtime errors but to catch errors during development > cycle only. > Reviewed-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
> Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr> > --- > arch/powerpc/include/asm/book3s/64/mmu.h | 17 +++++++++++ > arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 42 > +++++++++++++++++++--------- > arch/powerpc/mm/slice.c | 34 ---------------------- > 3 files changed, 46 insertions(+), 47 deletions(-) > > diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h > b/arch/powerpc/include/asm/book3s/64/mmu.h > index 230a9dec7677..ad00355f874f 100644 > --- a/arch/powerpc/include/asm/book3s/64/mmu.h > +++ b/arch/powerpc/include/asm/book3s/64/mmu.h > @@ -203,6 +203,23 @@ static inline struct slice_mask > *mm_ctx_slice_mask_16g(mm_context_t *ctx) > } > #endif > > +static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int > psize) > +{ > +#ifdef CONFIG_PPC_64K_PAGES > + if (psize == MMU_PAGE_64K) > + return mm_ctx_slice_mask_64k(&ctx); > +#endif > +#ifdef CONFIG_HUGETLB_PAGE > + if (psize == MMU_PAGE_16M) > + return mm_ctx_slice_mask_16m(&ctx); > + if (psize == MMU_PAGE_16G) > + return mm_ctx_slice_mask_16g(&ctx); > +#endif > + VM_BUG_ON(psize != MMU_PAGE_4K); > + > + return mm_ctx_slice_mask_4k(&ctx); > +} > + > #ifdef CONFIG_PPC_SUBPAGE_PROT > static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t > *ctx) > { > diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h > b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h > index c503e2f05e61..a0f6844a1498 100644 > --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h > +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h > @@ -184,7 +184,23 @@ > #define LOW_SLICE_ARRAY_SZ SLICE_ARRAY_SIZE > #endif > > +#if defined(CONFIG_PPC_4K_PAGES) > +#define mmu_virtual_psize MMU_PAGE_4K > +#elif defined(CONFIG_PPC_16K_PAGES) > +#define mmu_virtual_psize MMU_PAGE_16K > +#define PTE_FRAG_NR 4 > +#define PTE_FRAG_SIZE_SHIFT 12 > +#define PTE_FRAG_SIZE (1UL << 12) > +#else > +#error "Unsupported PAGE_SIZE" > +#endif > + > +#define mmu_linear_psize MMU_PAGE_8M > + > #ifndef __ASSEMBLY__ > + > +#include <linux/mmdebug.h> > + > struct slice_mask { > u64 low_slices; > DECLARE_BITMAP(high_slices, 0); > @@ -255,6 +271,19 @@ static inline struct slice_mask > *mm_ctx_slice_mask_8m(mm_context_t *ctx) > return &ctx->mask_8m; > } > #endif > + > +static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int > psize) > +{ > +#ifdef CONFIG_HUGETLB_PAGE > + if (psize == MMU_PAGE_512K) > + return &ctx->mask_512k; > + if (psize == MMU_PAGE_8M) > + return &ctx->mask_8m; > +#endif > + VM_BUG_ON(psize != mmu_virtual_psize); > + > + return &ctx->mask_base_psize; > +} > #endif /* CONFIG_PPC_MM_SLICE */ > > #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) > @@ -306,17 +335,4 @@ extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf; > > #endif /* !__ASSEMBLY__ */ > > -#if defined(CONFIG_PPC_4K_PAGES) > -#define mmu_virtual_psize MMU_PAGE_4K > -#elif defined(CONFIG_PPC_16K_PAGES) > -#define mmu_virtual_psize MMU_PAGE_16K > -#define PTE_FRAG_NR 4 > -#define PTE_FRAG_SIZE_SHIFT 12 > -#define PTE_FRAG_SIZE (1UL << 12) > -#else > -#error "Unsupported PAGE_SIZE" > -#endif > - > -#define mmu_linear_psize MMU_PAGE_8M > - > #endif /* _ASM_POWERPC_MMU_8XX_H_ */ > diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c > index 8eb7e8b09c75..31de91b65a64 100644 > --- a/arch/powerpc/mm/slice.c > +++ b/arch/powerpc/mm/slice.c > @@ -150,40 +150,6 @@ static void slice_mask_for_free(struct mm_struct *mm, > struct slice_mask *ret, > __set_bit(i, ret->high_slices); > } > > -#ifdef CONFIG_PPC_BOOK3S_64 > -static struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize) > -{ > -#ifdef CONFIG_PPC_64K_PAGES > - if (psize == MMU_PAGE_64K) > - return mm_ctx_slice_mask_64k(&ctx); > -#endif > - if (psize == MMU_PAGE_4K) > - return mm_ctx_slice_mask_4k(&ctx); > -#ifdef CONFIG_HUGETLB_PAGE > - if (psize == MMU_PAGE_16M) > - return mm_ctx_slice_mask_16m(&ctx); > - if (psize == MMU_PAGE_16G) > - return mm_ctx_slice_mask_16g(&ctx); > -#endif > - BUG(); > -} > -#elif defined(CONFIG_PPC_8xx) > -static struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize) > -{ > - if (psize == mmu_virtual_psize) > - return &ctx->mask_base_psize; > -#ifdef CONFIG_HUGETLB_PAGE > - if (psize == MMU_PAGE_512K) > - return &ctx->mask_512k; > - if (psize == MMU_PAGE_8M) > - return &ctx->mask_8m; > -#endif > - BUG(); > -} > -#else > -#error "Must define the slice masks for page sizes supported by the platform" > -#endif > - > static bool slice_check_range_fits(struct mm_struct *mm, > const struct slice_mask *available, > unsigned long start, unsigned long len) > -- > 2.13.3