When IOPF enabled, the pages are pinned and mapped on demand, we add
a bitmap to track them.

Signed-off-by: Shenming Lu <lushenm...@huawei.com>
---
 drivers/vfio/vfio_iommu_type1.c | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0b4dedaa9128..f1d4de5ab094 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -95,6 +95,7 @@ struct vfio_dma {
        struct task_struct      *task;
        struct rb_root          pfn_list;       /* Ex-user pinned pfn list */
        unsigned long           *bitmap;
+       unsigned long           *iommu_mapped_bitmap;   /* IOPF mapped bitmap */
 };
 
 struct vfio_group {
@@ -143,6 +144,8 @@ struct vfio_regions {
 #define DIRTY_BITMAP_PAGES_MAX  ((u64)INT_MAX)
 #define DIRTY_BITMAP_SIZE_MAX   DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
 
+#define IOMMU_MAPPED_BITMAP_BYTES(n) DIRTY_BITMAP_BYTES(n)
+
 static int put_pfn(unsigned long pfn, int prot);
 
 static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
@@ -949,6 +952,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, 
struct vfio_dma *dma)
        vfio_unlink_dma(iommu, dma);
        put_task_struct(dma->task);
        vfio_dma_bitmap_free(dma);
+       kfree(dma->iommu_mapped_bitmap);
        kfree(dma);
        iommu->dma_avail++;
 }
@@ -1354,6 +1358,14 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                goto out_unlock;
        }
 
+       dma->iommu_mapped_bitmap = kvzalloc(IOMMU_MAPPED_BITMAP_BYTES(size / 
PAGE_SIZE),
+                                           GFP_KERNEL);
+       if (!dma->iommu_mapped_bitmap) {
+               ret = -ENOMEM;
+               kfree(dma);
+               goto out_unlock;
+       }
+
        iommu->dma_avail--;
        dma->iova = iova;
        dma->vaddr = vaddr;
-- 
2.19.1

Reply via email to