Implement the map_pages() callback for the ARM LPAE io-pgtable
format.

Signed-off-by: Isaac J. Manjarres <isa...@codeaurora.org>
Suggested-by: Will Deacon <w...@kernel.org>
---
 drivers/iommu/io-pgtable-arm.c | 95 +++++++++++++++++++++++++++++++---
 1 file changed, 88 insertions(+), 7 deletions(-)

diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index fc63d57b8037..b8464305f1c2 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -355,20 +355,35 @@ static arm_lpae_iopte 
arm_lpae_install_table(arm_lpae_iopte *table,
 }
 
 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
-                         phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
-                         int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
+                         phys_addr_t paddr, size_t size, size_t pgcount,
+                         arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
+                         gfp_t gfp, size_t *mapped)
 {
        arm_lpae_iopte *cptep, pte;
        size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
        size_t tblsz = ARM_LPAE_GRANULE(data);
        struct io_pgtable_cfg *cfg = &data->iop.cfg;
+       int ret = 0;
 
        /* Find our entry at the current level */
        ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 
-       /* If we can install a leaf entry at this level, then do so */
-       if (size == block_size)
-               return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
+       /* If we can install leaf entries at this level, then do so */
+       if (size == block_size) {
+               while (pgcount--) {
+                       ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, 
ptep);
+                       if (ret)
+                               return ret;
+
+                       iova += size;
+                       paddr += size;
+                       ptep++;
+                       if (mapped)
+                               *mapped += size;
+               }
+
+               return ret;
+       }
 
        /* We can't allocate tables at the final level */
        if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
@@ -397,7 +412,8 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, 
unsigned long iova,
        }
 
        /* Rinse, repeat */
-       return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, 
gfp);
+       return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1, 
cptep,
+                             gfp, mapped);
 }
 
 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
@@ -487,7 +503,71 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, 
unsigned long iova,
                return 0;
 
        prot = arm_lpae_prot_to_pte(data, iommu_prot);
-       ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
+       ret = __arm_lpae_map(data, iova, paddr, size, 1, prot, lvl, ptep, gfp,
+                            NULL);
+       /*
+        * Synchronise all PTE updates for the new mapping before there's
+        * a chance for anything to kick off a table walk for the new iova.
+        */
+       wmb();
+
+       return ret;
+}
+
+static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+                             phys_addr_t paddr, size_t pgsize, size_t pgcount,
+                             int iommu_prot, gfp_t gfp, size_t *mapped)
+{
+       struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
+       arm_lpae_iopte *ptep = data->pgd;
+       int ret, lvl = data->start_level, last_lvl;
+       arm_lpae_iopte prot;
+       long iaext = (s64)iova >> cfg->ias;
+       size_t table_size, pages, tbl_offset, max_entries;
+
+       /* If no access, then nothing to do */
+       if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+               return 0;
+
+       if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
+               return -EINVAL;
+
+       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+               iaext = ~iaext;
+       if (WARN_ON(iaext || paddr >> cfg->oas))
+               return -ERANGE;
+
+       prot = arm_lpae_prot_to_pte(data, iommu_prot);
+
+       /*
+        * Calculating the page table size here helps avoid situations where
+        * a page range that is being mapped may be mapped at the same level
+        * but not mapped by the same tables. Allowing such a scenario to
+        * occur can complicate the logic in __arm_lpae_map().
+        */
+       last_lvl = ARM_LPAE_BLOCK_SIZE_LVL(pgsize, data);
+
+       if (last_lvl == data->start_level)
+               table_size = ARM_LPAE_PGD_SIZE(data);
+       else
+               table_size = ARM_LPAE_GRANULE(data);
+
+       max_entries = table_size / sizeof(*ptep);
+
+       while (pgcount) {
+               tbl_offset = ARM_LPAE_LVL_IDX(iova, last_lvl, data);
+               pages = min_t(size_t, pgcount, max_entries - tbl_offset);
+               ret = __arm_lpae_map(data, iova, paddr, pgsize, pages, prot,
+                                    lvl, ptep, gfp, mapped);
+               if (ret)
+                       break;
+
+               iova += pages * pgsize;
+               paddr += pages * pgsize;
+               pgcount -= pages;
+       }
+
        /*
         * Synchronise all PTE updates for the new mapping before there's
         * a chance for anything to kick off a table walk for the new iova.
@@ -833,6 +913,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
 
        data->iop.ops = (struct io_pgtable_ops) {
                .map            = arm_lpae_map,
+               .map_pages      = arm_lpae_map_pages,
                .unmap          = arm_lpae_unmap,
                .unmap_pages    = arm_lpae_unmap_pages,
                .iova_to_phys   = arm_lpae_iova_to_phys,
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to