On Tue, Jan 27, 2026 at 09:29:28PM +0200, Mike Rapoport wrote:
[...]
> -static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
> - struct vm_area_struct *dst_vma,
> - unsigned long dst_addr)
> +static int mfill_atomic_pte_copy(struct mfill_state *state)
> {
> - struct folio *folio;
> - int ret = -ENOMEM;
> -
> - folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
> - if (!folio)
> - return ret;
> -
> - if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
> - goto out_put;
> + const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
>
> - /*
> - * The memory barrier inside __folio_mark_uptodate makes sure that
> - * zeroing out the folio become visible before mapping the page
> - * using set_pte_at(). See do_anonymous_page().
> - */
> - __folio_mark_uptodate(folio);
> + return __mfill_atomic_pte(state, ops);
> +}
>
> - ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
> - &folio->page, true, 0);
> - if (ret)
> - goto out_put;
> +static int mfill_atomic_pte_zeroed_folio(struct mfill_state *state)
> +{
> + const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
>
> - return 0;
> -out_put:
> - folio_put(folio);
> - return ret;
> + return __mfill_atomic_pte(state, ops);
> }
>
> static int mfill_atomic_pte_zeropage(struct mfill_state *state)
> @@ -542,7 +546,7 @@ static int mfill_atomic_pte_zeropage(struct mfill_state
> *state)
> int ret;
>
> if (mm_forbids_zeropage(dst_vma->vm_mm))
> - return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma,
> dst_addr);
> + return mfill_atomic_pte_zeroed_folio(state);
After this patch, mfill_atomic_pte_zeroed_folio() should be 100% the same
impl with mfill_atomic_pte_copy(), so IIUC we can drop it.
>
> _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
> dst_vma->vm_page_prot));
> --
> 2.51.0
>
--
Peter Xu