Hi Aneesh,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on powerpc/next]
[also build test WARNING on v4.18 next-20180822]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:    
https://github.com/0day-ci/linux/commits/Aneesh-Kumar-K-V/powerpc-mm-book3s-Update-pmd_present-to-look-at-_PAGE_PRESENT-bit/20180824-141837
base:   https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-obs600_defconfig (attached as .config)
compiler: powerpc-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        GCC_VERSION=7.2.0 make.cross ARCH=powerpc 

All warnings (new ones prefixed by >>):

   In file included from include/linux/kernel.h:15:0,
                    from arch/powerpc/mm/pgtable.c:24:
   arch/powerpc/mm/pgtable.c: In function 'set_pte_at':
   arch/powerpc/mm/pgtable.c:195:14: error: implicit declaration of function 
'pte_raw'; did you mean 'pte_read'? [-Werror=implicit-function-declaration]
     VM_WARN_ON((pte_raw(*ptep) & cpu_to_be64(_PAGE_PRESENT)) &&
                 ^
   include/linux/build_bug.h:36:63: note: in definition of macro 
'BUILD_BUG_ON_INVALID'
    #define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
                                                                  ^
>> arch/powerpc/mm/pgtable.c:195:2: note: in expansion of macro 'VM_WARN_ON'
     VM_WARN_ON((pte_raw(*ptep) & cpu_to_be64(_PAGE_PRESENT)) &&
     ^~~~~~~~~~
   cc1: all warnings being treated as errors

vim +/VM_WARN_ON +195 arch/powerpc/mm/pgtable.c

  > 24  #include <linux/kernel.h>
    25  #include <linux/gfp.h>
    26  #include <linux/mm.h>
    27  #include <linux/percpu.h>
    28  #include <linux/hardirq.h>
    29  #include <linux/hugetlb.h>
    30  #include <asm/pgalloc.h>
    31  #include <asm/tlbflush.h>
    32  #include <asm/tlb.h>
    33  
    34  static inline int is_exec_fault(void)
    35  {
    36          return current->thread.regs && TRAP(current->thread.regs) == 
0x400;
    37  }
    38  
    39  /* We only try to do i/d cache coherency on stuff that looks like
    40   * reasonably "normal" PTEs. We currently require a PTE to be present
    41   * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do 
that
    42   * on userspace PTEs
    43   */
    44  static inline int pte_looks_normal(pte_t pte)
    45  {
    46  
    47  #if defined(CONFIG_PPC_BOOK3S_64)
    48          if ((pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL)) == 
_PAGE_PRESENT) {
    49                  if (pte_ci(pte))
    50                          return 0;
    51                  if (pte_user(pte))
    52                          return 1;
    53          }
    54          return 0;
    55  #else
    56          return (pte_val(pte) &
    57                  (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | 
_PAGE_USER |
    58                   _PAGE_PRIVILEGED)) ==
    59                  (_PAGE_PRESENT | _PAGE_USER);
    60  #endif
    61  }
    62  
    63  static struct page *maybe_pte_to_page(pte_t pte)
    64  {
    65          unsigned long pfn = pte_pfn(pte);
    66          struct page *page;
    67  
    68          if (unlikely(!pfn_valid(pfn)))
    69                  return NULL;
    70          page = pfn_to_page(pfn);
    71          if (PageReserved(page))
    72                  return NULL;
    73          return page;
    74  }
    75  
    76  #if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0
    77  
    78  /* Server-style MMU handles coherency when hashing if HW exec permission
    79   * is supposed per page (currently 64-bit only). If not, then, we always
    80   * flush the cache for valid PTEs in set_pte. Embedded CPU without HW 
exec
    81   * support falls into the same category.
    82   */
    83  
    84  static pte_t set_pte_filter(pte_t pte)
    85  {
    86          if (radix_enabled())
    87                  return pte;
    88  
    89          pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
    90          if (pte_looks_normal(pte) && 
!(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
    91                                         
cpu_has_feature(CPU_FTR_NOEXECUTE))) {
    92                  struct page *pg = maybe_pte_to_page(pte);
    93                  if (!pg)
    94                          return pte;
    95                  if (!test_bit(PG_arch_1, &pg->flags)) {
    96                          flush_dcache_icache_page(pg);
    97                          set_bit(PG_arch_1, &pg->flags);
    98                  }
    99          }
   100          return pte;
   101  }
   102  
   103  static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct 
*vma,
   104                                       int dirty)
   105  {
   106          return pte;
   107  }
   108  
   109  #else /* defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 */
   110  
   111  /* Embedded type MMU with HW exec support. This is a bit more 
complicated
   112   * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
   113   * instead we "filter out" the exec permission for non clean pages.
   114   */
   115  static pte_t set_pte_filter(pte_t pte)
   116  {
   117          struct page *pg;
   118  
   119          /* No exec permission in the first place, move on */
   120          if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte))
   121                  return pte;
   122  
   123          /* If you set _PAGE_EXEC on weird pages you're on your own */
   124          pg = maybe_pte_to_page(pte);
   125          if (unlikely(!pg))
   126                  return pte;
   127  
   128          /* If the page clean, we move on */
   129          if (test_bit(PG_arch_1, &pg->flags))
   130                  return pte;
   131  
   132          /* If it's an exec fault, we flush the cache and make it clean 
*/
   133          if (is_exec_fault()) {
   134                  flush_dcache_icache_page(pg);
   135                  set_bit(PG_arch_1, &pg->flags);
   136                  return pte;
   137          }
   138  
   139          /* Else, we filter out _PAGE_EXEC */
   140          return __pte(pte_val(pte) & ~_PAGE_EXEC);
   141  }
   142  
   143  static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct 
*vma,
   144                                       int dirty)
   145  {
   146          struct page *pg;
   147  
   148          /* So here, we only care about exec faults, as we use them
   149           * to recover lost _PAGE_EXEC and perform I$/D$ coherency
   150           * if necessary. Also if _PAGE_EXEC is already set, same deal,
   151           * we just bail out
   152           */
   153          if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault())
   154                  return pte;
   155  
   156  #ifdef CONFIG_DEBUG_VM
   157          /* So this is an exec fault, _PAGE_EXEC is not set. If it was
   158           * an error we would have bailed out earlier in do_page_fault()
   159           * but let's make sure of it
   160           */
   161          if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
   162                  return pte;
   163  #endif /* CONFIG_DEBUG_VM */
   164  
   165          /* If you set _PAGE_EXEC on weird pages you're on your own */
   166          pg = maybe_pte_to_page(pte);
   167          if (unlikely(!pg))
   168                  goto bail;
   169  
   170          /* If the page is already clean, we move on */
   171          if (test_bit(PG_arch_1, &pg->flags))
   172                  goto bail;
   173  
   174          /* Clean the page and set PG_arch_1 */
   175          flush_dcache_icache_page(pg);
   176          set_bit(PG_arch_1, &pg->flags);
   177  
   178   bail:
   179          return __pte(pte_val(pte) | _PAGE_EXEC);
   180  }
   181  
   182  #endif /* !(defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0) */
   183  
   184  /*
   185   * set_pte stores a linux PTE into the linux page table.
   186   */
   187  void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
   188                  pte_t pte)
   189  {
   190          /*
   191           * When handling numa faults, we already have the pte marked
   192           * _PAGE_PRESENT, but we can be sure that it is not in hpte.
   193           * Hence we can use set_pte_at for them.
   194           */
 > 195          VM_WARN_ON((pte_raw(*ptep) & cpu_to_be64(_PAGE_PRESENT)) &&
   196                  !pte_protnone(*ptep));
   197  
   198          /* Add the pte bit when trying to set a pte */
   199          pte = __pte(pte_val(pte) | _PAGE_PTE);
   200  
   201          /* Note: mm->context.id might not yet have been assigned as
   202           * this context might not have been activated yet when this
   203           * is called.
   204           */
   205          pte = set_pte_filter(pte);
   206  
   207          /* Perform the setting of the PTE */
   208          __set_pte_at(mm, addr, ptep, pte, 0);
   209  }
   210  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to