On Tue, Apr 30, 2024 at 10:57:06PM +0800, Lu Baolu wrote:
> +static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
> +                                    size_t count, loff_t *ppos)
> +{
> +     size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
> +     struct iommufd_fault *fault = filep->private_data;
> +     struct iommu_hwpt_pgfault data;
> +     struct iommufd_device *idev;
> +     struct iopf_group *group;
> +     struct iopf_fault *iopf;
> +     size_t done = 0;
> +     int rc;
> +
> +     if (*ppos || count % fault_size)
> +             return -ESPIPE;
> +
> +     mutex_lock(&fault->mutex);
> +     while (!list_empty(&fault->deliver) && count > done) {
> +             group = list_first_entry(&fault->deliver,
> +                                      struct iopf_group, node);
> +
> +             if (list_count_nodes(&group->faults) * fault_size > count - 
> done)
> +                     break;

Can this list_count be precomputed when we build the fault group?

> +
> +             idev = group->attach_handle->idev;
> +             if (!idev)
> +                     break;

This check should be done before adding the fault to the linked
list. See my other note about the race.

> +
> +             rc = xa_alloc(&idev->faults, &group->cookie, group,
> +                           xa_limit_32b, GFP_KERNEL);
> +             if (rc)
> +                     break;

This error handling is not quite right, if done == 0 then this should
return rc.


> +
> +             list_for_each_entry(iopf, &group->faults, list) {
> +                     iommufd_compose_fault_message(&iopf->fault,
> +                                                   &data, idev,
> +                                                   group->cookie);
> +                     rc = copy_to_user(buf + done, &data, fault_size);
> +                     if (rc) {
> +                             xa_erase(&idev->faults, group->cookie);
> +                             break;

Same here

(same comment on the write side too)

Jason

Reply via email to