Add an argument to the kAPI that unmaps an IOVA from the attached
domains, to also receive a bitmap.

When passed an iommufd_dirty_data::bitmap we call out the special
dirty unmap (iommu_unmap_read_dirty()). The bitmap data is
iterated, similarly, like the read_and_clear_dirty() in IOVA
chunks using the previously added iommufd_dirty_iter* helper
functions.

Signed-off-by: Joao Martins <joao.m.mart...@oracle.com>
---
 drivers/iommu/iommufd/io_pagetable.c    | 13 ++--
 drivers/iommu/iommufd/io_pagetable.h    |  3 +-
 drivers/iommu/iommufd/ioas.c            |  2 +-
 drivers/iommu/iommufd/iommufd_private.h |  4 +-
 drivers/iommu/iommufd/pages.c           | 79 +++++++++++++++++++++----
 drivers/iommu/iommufd/vfio_compat.c     |  2 +-
 6 files changed, 80 insertions(+), 23 deletions(-)

diff --git a/drivers/iommu/iommufd/io_pagetable.c 
b/drivers/iommu/iommufd/io_pagetable.c
index 835b5040fce9..6f4117c629d4 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -542,13 +542,14 @@ struct iopt_pages *iopt_get_pages(struct io_pagetable 
*iopt, unsigned long iova,
 }
 
 static int __iopt_unmap_iova(struct io_pagetable *iopt, struct iopt_area *area,
-                            struct iopt_pages *pages)
+                            struct iopt_pages *pages,
+                            struct iommufd_dirty_data *bitmap)
 {
        /* Drivers have to unpin on notification. */
        if (WARN_ON(atomic_read(&area->num_users)))
                return -EBUSY;
 
-       iopt_area_unfill_domains(area, pages);
+       iopt_area_unfill_domains(area, pages, bitmap);
        WARN_ON(atomic_read(&area->num_users));
        iopt_abort_area(area);
        iopt_put_pages(pages);
@@ -560,12 +561,13 @@ static int __iopt_unmap_iova(struct io_pagetable *iopt, 
struct iopt_area *area,
  * @iopt: io_pagetable to act on
  * @iova: Starting iova to unmap
  * @length: Number of bytes to unmap
+ * @bitmap: Bitmap of dirtied IOVAs
  *
  * The requested range must exactly match an existing range.
  * Splitting/truncating IOVA mappings is not allowed.
  */
 int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
-                   unsigned long length)
+                   unsigned long length, struct iommufd_dirty_data *bitmap)
 {
        struct iopt_pages *pages;
        struct iopt_area *area;
@@ -590,7 +592,8 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned 
long iova,
        area->pages = NULL;
        up_write(&iopt->iova_rwsem);
 
-       rc = __iopt_unmap_iova(iopt, area, pages);
+       rc = __iopt_unmap_iova(iopt, area, pages, bitmap);
+
        up_read(&iopt->domains_rwsem);
        return rc;
 }
@@ -614,7 +617,7 @@ int iopt_unmap_all(struct io_pagetable *iopt)
                area->pages = NULL;
                up_write(&iopt->iova_rwsem);
 
-               rc = __iopt_unmap_iova(iopt, area, pages);
+               rc = __iopt_unmap_iova(iopt, area, pages, NULL);
                if (rc)
                        goto out_unlock_domains;
 
diff --git a/drivers/iommu/iommufd/io_pagetable.h 
b/drivers/iommu/iommufd/io_pagetable.h
index c8b6a60ff24c..c8baab25ab08 100644
--- a/drivers/iommu/iommufd/io_pagetable.h
+++ b/drivers/iommu/iommufd/io_pagetable.h
@@ -48,7 +48,8 @@ struct iopt_area {
 };
 
 int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages);
-void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages 
*pages);
+void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages,
+                             struct iommufd_dirty_data *bitmap);
 
 int iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain);
 void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages,
diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c
index 48149988c84b..19d6591aa005 100644
--- a/drivers/iommu/iommufd/ioas.c
+++ b/drivers/iommu/iommufd/ioas.c
@@ -243,7 +243,7 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
                        rc = -EOVERFLOW;
                        goto out_put;
                }
-               rc = iopt_unmap_iova(&ioas->iopt, cmd->iova, cmd->length);
+               rc = iopt_unmap_iova(&ioas->iopt, cmd->iova, cmd->length, NULL);
        }
 
 out_put:
diff --git a/drivers/iommu/iommufd/iommufd_private.h 
b/drivers/iommu/iommufd/iommufd_private.h
index 4c12b4a8f1a6..3e3a97f623a1 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -47,8 +47,6 @@ int iopt_map_user_pages(struct io_pagetable *iopt, unsigned 
long *iova,
 int iopt_map_pages(struct io_pagetable *iopt, struct iopt_pages *pages,
                   unsigned long *dst_iova, unsigned long start_byte,
                   unsigned long length, int iommu_prot, unsigned int flags);
-int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
-                   unsigned long length);
 int iopt_unmap_all(struct io_pagetable *iopt);
 
 struct iommufd_dirty_data {
@@ -63,6 +61,8 @@ int iopt_set_dirty_tracking(struct io_pagetable *iopt,
 int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt,
                                   struct iommu_domain *domain,
                                   struct iommufd_dirty_data *bitmap);
+int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
+                   unsigned long length, struct iommufd_dirty_data *bitmap);
 
 struct iommufd_dirty_iter {
        struct iommu_dirty_bitmap dirty;
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index 3fd39e0201f5..722c77cbbe3a 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -144,16 +144,64 @@ static void iommu_unmap_nofail(struct iommu_domain 
*domain, unsigned long iova,
        WARN_ON(ret != size);
 }
 
+static void iommu_unmap_read_dirty_nofail(struct iommu_domain *domain,
+                                         unsigned long iova, size_t size,
+                                         struct iommufd_dirty_data *bitmap,
+                                         struct iommufd_dirty_iter *iter)
+{
+       size_t ret = 0;
+
+       ret = iommufd_dirty_iter_init(iter, bitmap);
+       WARN_ON(ret);
+
+       for (; iommufd_dirty_iter_done(iter);
+            iommufd_dirty_iter_advance(iter)) {
+               ret = iommufd_dirty_iter_get(iter);
+               if (ret < 0)
+                       break;
+
+               ret = iommu_unmap_read_dirty(domain,
+                       iommufd_dirty_iova(iter),
+                       iommufd_dirty_iova_length(iter), &iter->dirty);
+
+               iommufd_dirty_iter_put(iter);
+
+               /*
+                * It is a logic error in this code or a driver bug
+                * if the IOMMU unmaps something other than exactly
+                * as requested.
+                */
+               if (ret != size) {
+                       WARN_ONCE(1, "unmapped %ld instead of %ld", ret, size);
+                       break;
+               }
+       }
+
+       iommufd_dirty_iter_free(iter);
+}
+
 static void iopt_area_unmap_domain_range(struct iopt_area *area,
                                         struct iommu_domain *domain,
                                         unsigned long start_index,
-                                        unsigned long last_index)
+                                        unsigned long last_index,
+                                        struct iommufd_dirty_data *bitmap)
 {
        unsigned long start_iova = iopt_area_index_to_iova(area, start_index);
 
-       iommu_unmap_nofail(domain, start_iova,
-                          iopt_area_index_to_iova_last(area, last_index) -
-                                  start_iova + 1);
+       if (bitmap) {
+               struct iommufd_dirty_iter iter;
+
+               iommu_dirty_bitmap_init(&iter.dirty, bitmap->iova,
+                                       __ffs(bitmap->page_size), NULL);
+
+               iommu_unmap_read_dirty_nofail(domain, start_iova,
+                       iopt_area_index_to_iova_last(area, last_index) -
+                                          start_iova + 1, bitmap, &iter);
+       } else {
+               iommu_unmap_nofail(domain, start_iova,
+                                  iopt_area_index_to_iova_last(area, 
last_index) -
+                                          start_iova + 1);
+       }
 }
 
 static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages,
@@ -808,7 +856,8 @@ static bool interval_tree_fully_covers_area(struct 
rb_root_cached *root,
 static void __iopt_area_unfill_domain(struct iopt_area *area,
                                      struct iopt_pages *pages,
                                      struct iommu_domain *domain,
-                                     unsigned long last_index)
+                                     unsigned long last_index,
+                                     struct iommufd_dirty_data *bitmap)
 {
        unsigned long unmapped_index = iopt_area_index(area);
        unsigned long cur_index = unmapped_index;
@@ -821,7 +870,8 @@ static void __iopt_area_unfill_domain(struct iopt_area 
*area,
        if (interval_tree_fully_covers_area(&pages->domains_itree, area) ||
            interval_tree_fully_covers_area(&pages->users_itree, area)) {
                iopt_area_unmap_domain_range(area, domain,
-                                            iopt_area_index(area), last_index);
+                                            iopt_area_index(area),
+                                            last_index, bitmap);
                return;
        }
 
@@ -837,7 +887,7 @@ static void __iopt_area_unfill_domain(struct iopt_area 
*area,
                batch_from_domain(&batch, domain, area, cur_index, last_index);
                cur_index += batch.total_pfns;
                iopt_area_unmap_domain_range(area, domain, unmapped_index,
-                                            cur_index - 1);
+                                            cur_index - 1, bitmap);
                unmapped_index = cur_index;
                iopt_pages_unpin(pages, &batch, batch_index, cur_index - 1);
                batch_clear(&batch);
@@ -852,7 +902,8 @@ static void iopt_area_unfill_partial_domain(struct 
iopt_area *area,
                                            unsigned long end_index)
 {
        if (end_index != iopt_area_index(area))
-               __iopt_area_unfill_domain(area, pages, domain, end_index - 1);
+               __iopt_area_unfill_domain(area, pages, domain,
+                                         end_index - 1, NULL);
 }
 
 /**
@@ -891,7 +942,7 @@ void iopt_area_unfill_domain(struct iopt_area *area, struct 
iopt_pages *pages,
                             struct iommu_domain *domain)
 {
        __iopt_area_unfill_domain(area, pages, domain,
-                                 iopt_area_last_index(area));
+                                 iopt_area_last_index(area), NULL);
 }
 
 /**
@@ -1004,7 +1055,7 @@ int iopt_area_fill_domains(struct iopt_area *area, struct 
iopt_pages *pages)
                        if (end_index != iopt_area_index(area))
                                iopt_area_unmap_domain_range(
                                        area, domain, iopt_area_index(area),
-                                       end_index - 1);
+                                       end_index - 1, NULL);
                } else {
                        iopt_area_unfill_partial_domain(area, pages, domain,
                                                        end_index);
@@ -1025,7 +1076,8 @@ int iopt_area_fill_domains(struct iopt_area *area, struct 
iopt_pages *pages)
  * Called during area destruction. This unmaps the iova's covered by all the
  * area's domains and releases the PFNs.
  */
-void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages)
+void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages,
+                             struct iommufd_dirty_data *bitmap)
 {
        struct io_pagetable *iopt = area->iopt;
        struct iommu_domain *domain;
@@ -1041,10 +1093,11 @@ void iopt_area_unfill_domains(struct iopt_area *area, 
struct iopt_pages *pages)
                if (domain != area->storage_domain)
                        iopt_area_unmap_domain_range(
                                area, domain, iopt_area_index(area),
-                               iopt_area_last_index(area));
+                               iopt_area_last_index(area), bitmap);
 
        interval_tree_remove(&area->pages_node, &pages->domains_itree);
-       iopt_area_unfill_domain(area, pages, area->storage_domain);
+       __iopt_area_unfill_domain(area, pages, area->storage_domain,
+                                 iopt_area_last_index(area), bitmap);
        area->storage_domain = NULL;
 out_unlock:
        mutex_unlock(&pages->mutex);
diff --git a/drivers/iommu/iommufd/vfio_compat.c 
b/drivers/iommu/iommufd/vfio_compat.c
index 5b196de00ff9..dbe39404a105 100644
--- a/drivers/iommu/iommufd/vfio_compat.c
+++ b/drivers/iommu/iommufd/vfio_compat.c
@@ -148,7 +148,7 @@ static int iommufd_vfio_unmap_dma(struct iommufd_ctx *ictx, 
unsigned int cmd,
        if (unmap.flags & VFIO_DMA_UNMAP_FLAG_ALL)
                rc = iopt_unmap_all(&ioas->iopt);
        else
-               rc = iopt_unmap_iova(&ioas->iopt, unmap.iova, unmap.size);
+               rc = iopt_unmap_iova(&ioas->iopt, unmap.iova, unmap.size, NULL);
        iommufd_put_object(&ioas->obj);
        return rc;
 }
-- 
2.17.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to