On Mon, Sep 18, 2023 at 01:51:43PM +0200, Niklas Schnelle wrote:
> Pull out the sync operation from viommu_map_pages() by implementing
> ops->iotlb_sync_map. This allows the common IOMMU code to map multiple
> elements of an sg with a single sync (see iommu_map_sg()). Furthermore,
> it is also a requirement for IOMMU_CAP_DEFERRED_FLUSH.
> 
> Link: 
> https://lore.kernel.org/lkml/20230726111433.1105665-1-schne...@linux.ibm.com/
> Signed-off-by: Niklas Schnelle <schne...@linux.ibm.com>

Reviewed-by: Jean-Philippe Brucker <jean-phili...@linaro.org>

This must be merged after "iommu/dma: s390 DMA API conversion and
optimized IOTLB flushing" because of the updated iotlb_sync_map()
prototype.

Thanks,
Jean

> ---
>  drivers/iommu/virtio-iommu.c | 17 ++++++++++++++++-
>  1 file changed, 16 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> index 17dcd826f5c2..3649586f0e5c 100644
> --- a/drivers/iommu/virtio-iommu.c
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -189,6 +189,12 @@ static int viommu_sync_req(struct viommu_dev *viommu)
>       int ret;
>       unsigned long flags;
>  
> +     /*
> +      * .iotlb_sync_map and .flush_iotlb_all may be called before the viommu
> +      * is initialized e.g. via iommu_create_device_direct_mappings()
> +      */
> +     if (!viommu)
> +             return 0;
>       spin_lock_irqsave(&viommu->request_lock, flags);
>       ret = __viommu_sync_req(viommu);
>       if (ret)
> @@ -843,7 +849,7 @@ static int viommu_map_pages(struct iommu_domain *domain, 
> unsigned long iova,
>                       .flags          = cpu_to_le32(flags),
>               };
>  
> -             ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> +             ret = viommu_add_req(vdomain->viommu, &map, sizeof(map));
>               if (ret) {
>                       viommu_del_mappings(vdomain, iova, end);
>                       return ret;
> @@ -912,6 +918,14 @@ static void viommu_iotlb_sync(struct iommu_domain 
> *domain,
>       viommu_sync_req(vdomain->viommu);
>  }
>  
> +static int viommu_iotlb_sync_map(struct iommu_domain *domain,
> +                              unsigned long iova, size_t size)
> +{
> +     struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +     return viommu_sync_req(vdomain->viommu);
> +}
> +
>  static void viommu_get_resv_regions(struct device *dev, struct list_head 
> *head)
>  {
>       struct iommu_resv_region *entry, *new_entry, *msi = NULL;
> @@ -1058,6 +1072,7 @@ static struct iommu_ops viommu_ops = {
>               .unmap_pages            = viommu_unmap_pages,
>               .iova_to_phys           = viommu_iova_to_phys,
>               .iotlb_sync             = viommu_iotlb_sync,
> +             .iotlb_sync_map         = viommu_iotlb_sync_map,
>               .free                   = viommu_domain_free,
>       }
>  };
> 
> -- 
> 2.39.2
> 
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to