On Mon, 3 Jun 2019, Minchan Kim wrote:

> @@ -415,6 +416,128 @@ static long madvise_cold(struct vm_area_struct *vma,
>       return 0;
>  }
>
> +static int madvise_pageout_pte_range(pmd_t *pmd, unsigned long addr,
> +                             unsigned long end, struct mm_walk *walk)
> +{
> +     pte_t *orig_pte, *pte, ptent;
> +     spinlock_t *ptl;
> +     LIST_HEAD(page_list);
> +     struct page *page;
> +     int isolated = 0;
> +     struct vm_area_struct *vma = walk->vma;
> +     unsigned long next;
> +
> +     if (fatal_signal_pending(current))
> +             return -EINTR;
> +
> +     next = pmd_addr_end(addr, end);
> +     if (pmd_trans_huge(*pmd)) {
> +             ptl = pmd_trans_huge_lock(pmd, vma);
> +             if (!ptl)
> +                     return 0;
> +
> +             if (is_huge_zero_pmd(*pmd))
> +                     goto huge_unlock;
> +
> +             page = pmd_page(*pmd);
> +             if (page_mapcount(page) > 1)
> +                     goto huge_unlock;
> +
> +             if (next - addr != HPAGE_PMD_SIZE) {
> +                     int err;
> +
> +                     get_page(page);
> +                     spin_unlock(ptl);
> +                     lock_page(page);
> +                     err = split_huge_page(page);
> +                     unlock_page(page);
> +                     put_page(page);
> +                     if (!err)
> +                             goto regular_page;
> +                     return 0;
> +             }

I have seen this before multiple times. Is there a way to avoid
replicating the whole shebang?

Reply via email to