On Thu, Mar 10, 2022 at 11:26:31AM -0600, Alex Sierra wrote:
> @@ -606,7 +606,7 @@ static void print_bad_pte(struct vm_area_struct *vma, 
> unsigned long addr,
>   * PFNMAP mappings in order to support COWable mappings.
>   *
>   */
> -struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
> +struct page *vm_normal_any_page(struct vm_area_struct *vma, unsigned long 
> addr,
>                           pte_t pte)
>  {
>       unsigned long pfn = pte_pfn(pte);
> @@ -620,8 +620,6 @@ struct page *vm_normal_page(struct vm_area_struct *vma, 
> unsigned long addr,
>                       return NULL;
>               if (is_zero_pfn(pfn))
>                       return NULL;
> -             if (pte_devmap(pte))
> -                     return NULL;
>  
>               print_bad_pte(vma, addr, pte, NULL);
>               return NULL;

... what?

Haven't you just made it so that a devmap page always prints a bad PTE
message, and then returns NULL anyway?

Surely this should be:

                if (pte_devmap(pte))
-                       return NULL;
+                       return pfn_to_page(pfn);

or maybe

+                       goto check_pfn;

But I don't know about that highest_memmap_pfn check.

> @@ -661,6 +659,22 @@ struct page *vm_normal_page(struct vm_area_struct *vma, 
> unsigned long addr,
>       return pfn_to_page(pfn);
>  }
>  
> +/*
> + * vm_normal_lru_page -- This function gets the "struct page" associated
> + * with a pte only for page cache and anon page. These pages are LRU handled.
> + */
> +struct page *vm_normal_lru_page(struct vm_area_struct *vma, unsigned long 
> addr,
> +                         pte_t pte)

It seems a shame to add a new function without proper kernel-doc.

Reply via email to