Excerpts from Christophe Leroy's message of December 8, 2021 7:38 pm:
> 
> 
> Le 01/12/2021 à 15:41, Nicholas Piggin a écrit :
>> To avoid any functional changes to radix paths when building with hash
>> MMU support disabled (and CONFIG_PPC_MM_SLICES=n), always define the
>> arch get_unmapped_area calls on 64s platforms.
>> 
>> Signed-off-by: Nicholas Piggin <npig...@gmail.com>
>> ---
>>   arch/powerpc/include/asm/book3s/64/hash.h |  4 ---
>>   arch/powerpc/include/asm/book3s/64/mmu.h  |  6 ++++
>>   arch/powerpc/mm/hugetlbpage.c             | 16 ++++++---
>>   arch/powerpc/mm/mmap.c                    | 40 +++++++++++++++++++----
>>   arch/powerpc/mm/slice.c                   | 20 ------------
>>   5 files changed, 51 insertions(+), 35 deletions(-)
>> 
>> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h 
>> b/arch/powerpc/include/asm/book3s/64/hash.h
>> index 674fe0e890dc..a7a0572f3846 100644
>> --- a/arch/powerpc/include/asm/book3s/64/hash.h
>> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
>> @@ -99,10 +99,6 @@
>>    * Defines the address of the vmemap area, in its own region on
>>    * hash table CPUs.
>>    */
>> -#ifdef CONFIG_PPC_MM_SLICES
>> -#define HAVE_ARCH_UNMAPPED_AREA
>> -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
>> -#endif /* CONFIG_PPC_MM_SLICES */
>>   
>>   /* PTEIDX nibble */
>>   #define _PTEIDX_SECONDARY  0x8
>> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h 
>> b/arch/powerpc/include/asm/book3s/64/mmu.h
>> index c02f42d1031e..015d7d972d16 100644
>> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
>> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
>> @@ -4,6 +4,12 @@
>>   
>>   #include <asm/page.h>
>>   
>> +#ifdef CONFIG_HUGETLB_PAGE
>> +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
>> +#endif
>> +#define HAVE_ARCH_UNMAPPED_AREA
>> +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
>> +
>>   #ifndef __ASSEMBLY__
>>   /*
>>    * Page size definition
>> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
>> index 82d8b368ca6d..ddead41e2194 100644
>> --- a/arch/powerpc/mm/hugetlbpage.c
>> +++ b/arch/powerpc/mm/hugetlbpage.c
>> @@ -542,20 +542,26 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
>>      return page;
>>   }
>>   
>> -#ifdef CONFIG_PPC_MM_SLICES
>> +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
>> +static inline int file_to_psize(struct file *file)
>> +{
>> +    struct hstate *hstate = hstate_file(file);
>> +    return shift_to_mmu_psize(huge_page_shift(hstate));
>> +}
>> +
>>   unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long 
>> addr,
>>                                      unsigned long len, unsigned long pgoff,
>>                                      unsigned long flags)
>>   {
>> -    struct hstate *hstate = hstate_file(file);
>> -    int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
>> -
>>   #ifdef CONFIG_PPC_RADIX_MMU
>>      if (radix_enabled())
>>              return radix__hugetlb_get_unmapped_area(file, addr, len,
>>                                                     pgoff, flags);
>>   #endif
>> -    return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
>> +#ifdef CONFIG_PPC_MM_SLICES
>> +    return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 
>> 1);
>> +#endif
>> +    BUG();
> 
> We shouldn't had new instances of BUG().
> 
> BUILD_BUG() should do the trick here.
> 
>>   }
>>   #endif
>>   
>> diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
>> index ae683fdc716c..c475cf810aa8 100644
>> --- a/arch/powerpc/mm/mmap.c
>> +++ b/arch/powerpc/mm/mmap.c
>> @@ -80,6 +80,7 @@ static inline unsigned long mmap_base(unsigned long rnd,
>>      return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
>>   }
>>   
>> +#ifdef HAVE_ARCH_UNMAPPED_AREA
>>   #ifdef CONFIG_PPC_RADIX_MMU
>>   /*
>>    * Same function as generic code used only for radix, because we don't 
>> need to overload
>> @@ -181,11 +182,42 @@ radix__arch_get_unmapped_area_topdown(struct file 
>> *filp,
>>       */
>>      return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
>>   }
>> +#endif
>> +
>> +unsigned long arch_get_unmapped_area(struct file *filp,
>> +                                 unsigned long addr,
>> +                                 unsigned long len,
>> +                                 unsigned long pgoff,
>> +                                 unsigned long flags)
>> +{
>> +#ifdef CONFIG_PPC_MM_SLICES
>> +    return slice_get_unmapped_area(addr, len, flags,
>> +                                   
>> mm_ctx_user_psize(&current->mm->context), 0);
>> +#else
>> +    BUG();
> 
> Same.
> 
> And the #else isn't needed
> 
>> +#endif
>> +}
>> +
>> +unsigned long arch_get_unmapped_area_topdown(struct file *filp,
>> +                                         const unsigned long addr0,
>> +                                         const unsigned long len,
>> +                                         const unsigned long pgoff,
>> +                                         const unsigned long flags)
>> +{
>> +#ifdef CONFIG_PPC_MM_SLICES
>> +    return slice_get_unmapped_area(addr0, len, flags,
>> +                                   
>> mm_ctx_user_psize(&current->mm->context), 1);
>> +#else
>> +    BUG();
> 
> Same
> 
> And the #else isn't needed

Fair enough. I'll see if mpe can squash in an incremental patch.

Thanks,
Nick

Reply via email to