On Tue, Jan 27, 2026 at 09:29:26PM +0200, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <[email protected]>
>
> Current userfaultfd implementation works only with memory managed by
> core MM: anonymous, shmem and hugetlb.
>
> First, there is no fundamental reason to limit userfaultfd support only
> to the core memory types and userfaults can be handled similarly to
> regular page faults provided a VMA owner implements appropriate
> callbacks.
>
> Second, historically various code paths were conditioned on
> vma_is_anonymous(), vma_is_shmem() and is_vm_hugetlb_page() and some of
> these conditions can be expressed as operations implemented by a
> particular memory type.
>
> Introduce vm_uffd_ops extension to vm_operations_struct that will
> delegate memory type specific operations to a VMA owner.
>
> Operations for anonymous memory are handled internally in userfaultfd
> using anon_uffd_ops that implicitly assigned to anonymous VMAs.
>
> Start with a single operation, ->can_userfault() that will verify that a
> VMA meets requirements for userfaultfd support at registration time.
>
> Implement that method for anonymous, shmem and hugetlb and move relevant
> parts of vma_can_userfault() into the new callbacks.
>
> Signed-off-by: Mike Rapoport (Microsoft) <[email protected]>
> ---
> include/linux/mm.h | 5 +++++
> include/linux/userfaultfd_k.h | 6 +++++
> mm/hugetlb.c | 21 ++++++++++++++++++
> mm/shmem.c | 23 ++++++++++++++++++++
> mm/userfaultfd.c | 41 ++++++++++++++++++++++-------------
> 5 files changed, 81 insertions(+), 15 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 15076261d0c2..3c2caff646c3 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -732,6 +732,8 @@ struct vm_fault {
> */
> };
>
> +struct vm_uffd_ops;
> +
> /*
> * These are the virtual MM functions - opening of an area, closing and
> * unmapping it (needed to keep files on disk up-to-date etc), pointer
> @@ -817,6 +819,9 @@ struct vm_operations_struct {
> struct page *(*find_normal_page)(struct vm_area_struct *vma,
> unsigned long addr);
> #endif /* CONFIG_FIND_NORMAL_PAGE */
> +#ifdef CONFIG_USERFAULTFD
> + const struct vm_uffd_ops *uffd_ops;
> +#endif
> };
>
> #ifdef CONFIG_NUMA_BALANCING
> diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
> index a49cf750e803..56e85ab166c7 100644
> --- a/include/linux/userfaultfd_k.h
> +++ b/include/linux/userfaultfd_k.h
> @@ -80,6 +80,12 @@ struct userfaultfd_ctx {
>
> extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long
> reason);
>
> +/* VMA userfaultfd operations */
> +struct vm_uffd_ops {
> + /* Checks if a VMA can support userfaultfd */
> + bool (*can_userfault)(struct vm_area_struct *vma, vm_flags_t vm_flags);
> +};
> +
> /* A combined operation mode + behavior flags. */
> typedef unsigned int __bitwise uffd_flags_t;
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 51273baec9e5..909131910c43 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -4797,6 +4797,24 @@ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault
> *vmf)
> return 0;
> }
>
> +#ifdef CONFIG_USERFAULTFD
> +static bool hugetlb_can_userfault(struct vm_area_struct *vma,
> + vm_flags_t vm_flags)
> +{
> + /*
> + * If user requested uffd-wp but not enabled pte markers for
> + * uffd-wp, then hugetlb is not supported.
> + */
> + if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP))
> + return false;
IMHO we don't need to dup this for every vm_uffd_ops driver. It might be
unnecessary to even make driver be aware how pte marker plays the role
here, because pte markers are needed for all page cache file systems
anyway. There should have no outliers. Instead we can just let
can_userfault() report whether the driver generically supports userfaultfd,
leaving the detail checks for core mm.
I understand you wanted to also make anon to be a driver, so this line
won't apply to anon. However IMHO anon is special enough so we can still
make this in the generic path.
> + return true;
> +}
> +
> +static const struct vm_uffd_ops hugetlb_uffd_ops = {
> + .can_userfault = hugetlb_can_userfault,
> +};
> +#endif
> +
> /*
> * When a new function is introduced to vm_operations_struct and added
> * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
> @@ -4810,6 +4828,9 @@ const struct vm_operations_struct hugetlb_vm_ops = {
> .close = hugetlb_vm_op_close,
> .may_split = hugetlb_vm_op_split,
> .pagesize = hugetlb_vm_op_pagesize,
> +#ifdef CONFIG_USERFAULTFD
> + .uffd_ops = &hugetlb_uffd_ops,
> +#endif
> };
>
> static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
> diff --git a/mm/shmem.c b/mm/shmem.c
> index ec6c01378e9d..9b82cda271c4 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -5290,6 +5290,23 @@ static const struct super_operations shmem_ops = {
> #endif
> };
>
> +#ifdef CONFIG_USERFAULTFD
> +static bool shmem_can_userfault(struct vm_area_struct *vma, vm_flags_t
> vm_flags)
> +{
> + /*
> + * If user requested uffd-wp but not enabled pte markers for
> + * uffd-wp, then shmem is not supported.
> + */
> + if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP))
> + return false;
> + return true;
> +}
> +
> +static const struct vm_uffd_ops shmem_uffd_ops = {
> + .can_userfault = shmem_can_userfault,
> +};
> +#endif
> +
> static const struct vm_operations_struct shmem_vm_ops = {
> .fault = shmem_fault,
> .map_pages = filemap_map_pages,
> @@ -5297,6 +5314,9 @@ static const struct vm_operations_struct shmem_vm_ops =
> {
> .set_policy = shmem_set_policy,
> .get_policy = shmem_get_policy,
> #endif
> +#ifdef CONFIG_USERFAULTFD
> + .uffd_ops = &shmem_uffd_ops,
> +#endif
> };
>
> static const struct vm_operations_struct shmem_anon_vm_ops = {
> @@ -5306,6 +5326,9 @@ static const struct vm_operations_struct
> shmem_anon_vm_ops = {
> .set_policy = shmem_set_policy,
> .get_policy = shmem_get_policy,
> #endif
> +#ifdef CONFIG_USERFAULTFD
> + .uffd_ops = &shmem_uffd_ops,
> +#endif
> };
>
> int shmem_init_fs_context(struct fs_context *fc)
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index 786f0a245675..d035f5e17f07 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -34,6 +34,25 @@ struct mfill_state {
> pmd_t *pmd;
> };
>
> +static bool anon_can_userfault(struct vm_area_struct *vma, vm_flags_t
> vm_flags)
> +{
> + /* anonymous memory does not support MINOR mode */
> + if (vm_flags & VM_UFFD_MINOR)
> + return false;
> + return true;
> +}
> +
> +static const struct vm_uffd_ops anon_uffd_ops = {
> + .can_userfault = anon_can_userfault,
> +};
> +
> +static const struct vm_uffd_ops *vma_uffd_ops(struct vm_area_struct *vma)
> +{
> + if (vma_is_anonymous(vma))
> + return &anon_uffd_ops;
> + return vma->vm_ops ? vma->vm_ops->uffd_ops : NULL;
> +}
> +
> static __always_inline
> bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
> {
> @@ -2019,13 +2038,15 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx,
> unsigned long dst_start,
> bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags,
> bool wp_async)
> {
> - vm_flags &= __VM_UFFD_FLAGS;
> + const struct vm_uffd_ops *ops = vma_uffd_ops(vma);
>
> - if (vma->vm_flags & VM_DROPPABLE)
> + /* only VMAs that implement vm_uffd_ops are supported */
> + if (!ops)
> return false;
>
> - if ((vm_flags & VM_UFFD_MINOR) &&
> - (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
> + vm_flags &= __VM_UFFD_FLAGS;
> +
> + if (vma->vm_flags & VM_DROPPABLE)
> return false;
>
> /*
> @@ -2035,18 +2056,8 @@ bool vma_can_userfault(struct vm_area_struct *vma,
> vm_flags_t vm_flags,
> if (wp_async && (vm_flags == VM_UFFD_WP))
> return true;
>
> - /*
> - * If user requested uffd-wp but not enabled pte markers for
> - * uffd-wp, then shmem & hugetlbfs are not supported but only
> - * anonymous.
> - */
> - if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP) &&
> - !vma_is_anonymous(vma))
> - return false;
> -
> /* By default, allow any of anon|shmem|hugetlb */
> - return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
> - vma_is_shmem(vma);
> + return ops->can_userfault(vma, vm_flags);
> }
>
> static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
> --
> 2.51.0
>
--
Peter Xu