> From: Nicolin Chen <nicol...@nvidia.com>
> Sent: Friday, May 9, 2025 11:03 AM
> 
> +/*
> + * Kernel driver must first use the for-driver helpers to register an
> mmappable
> + * MMIO region to the iommufd core to allocate an offset. Then, it should
> report
> + * to user space this offset and the length of the MMIO region for mmap
> syscall,
> + * via a prior IOMMU_VIOMMU_ALLOC ioctl.
> + */

this comment better suits _iommufd_alloc_mmap()

> +static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +     struct iommufd_ctx *ictx = filp->private_data;
> +     size_t length = vma->vm_end - vma->vm_start;
> +     struct iommufd_mmap *immap;
> +     int rc;
> +
> +     if (!PAGE_ALIGNED(length))
> +             return -EINVAL;
> +     if (!(vma->vm_flags & VM_SHARED))
> +             return -EINVAL;
> +     if (vma->vm_flags & VM_EXEC)
> +             return -EPERM;
> +
> +     /* vma->vm_pgoff carries an index to an mtree entry (immap) */
> +     immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff);
> +     if (!immap)
> +             return -ENXIO;
> +     if (length >> PAGE_SHIFT != immap->num_pfns)
> +             return -ENXIO;
> +
> +     vma->vm_pgoff = 0;
> +     vma->vm_private_data = immap;
> +     vma->vm_ops = &iommufd_vma_ops;
> +     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
> +     vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND |
> VM_DONTDUMP | VM_IO);
> +
> +     rc = remap_pfn_range(vma, vma->vm_start, immap->base_pfn,
> length,
> +                          vma->vm_page_prot);
> +     if (!rc) /* vm_ops.open won't be called for mmap itself. */
> +             refcount_inc(&immap->owner->users);
> +     return rc;

let's add some words for this refcnt thing in the commit msg.

Reviewed-by: Kevin Tian <kevin.t...@intel.com>

Reply via email to