On Wed, Jan 27, 2021 at 03:00:53PM -0500, Chuck Lever wrote: > From: Isaac J. Manjarres <[email protected]> > > Implement the map_sg io-pgtable op for the ARM LPAE io-pgtable > code, so that IOMMU drivers can call it when they need to map > a scatter-gather list. > > Signed-off-by: Isaac J. Manjarres <[email protected]> > Tested-by: Sai Prakash Ranjan <[email protected]> > Signed-off-by: Chuck Lever <[email protected]> > --- > drivers/iommu/io-pgtable-arm.c | 86 > ++++++++++++++++++++++++++++++++++++++++ > drivers/iommu/iommu.c | 12 +++--- > include/linux/iommu.h | 8 ++++ > 3 files changed, 101 insertions(+), 5 deletions(-) > > diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c > index 87def58e79b5..0c11529442b8 100644 > --- a/drivers/iommu/io-pgtable-arm.c > +++ b/drivers/iommu/io-pgtable-arm.c > @@ -473,6 +473,91 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, > unsigned long iova, > return ret; > } > > +static int arm_lpae_map_by_pgsize(struct io_pgtable_ops *ops, > + unsigned long iova, phys_addr_t paddr, > + size_t size, int iommu_prot, gfp_t gfp, > + size_t *mapped) > +{ > + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); > + struct io_pgtable_cfg *cfg = &data->iop.cfg; > + arm_lpae_iopte *ptep = data->pgd; > + int ret, lvl = data->start_level; > + arm_lpae_iopte prot = arm_lpae_prot_to_pte(data, iommu_prot); > + unsigned int min_pagesz = 1 << __ffs(cfg->pgsize_bitmap); > + long iaext = (s64)(iova + size - 1) >> cfg->ias; > + size_t pgsize; > + > + if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { > + pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz > 0x%x\n", > + iova, &paddr, size, min_pagesz); > + return -EINVAL; > + } > + > + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) > + iaext = ~iaext; > + if (WARN_ON(iaext || (paddr + size - 1) >> cfg->oas)) > + return -ERANGE; > + > + while (size) { > + pgsize = iommu_pgsize(cfg->pgsize_bitmap, iova | paddr, size); > + ret = __arm_lpae_map(data, iova, paddr, pgsize, prot, lvl, ptep, > + gfp); > + if (ret) > + return ret; > + > + iova += pgsize; > + paddr += pgsize; > + *mapped += pgsize; > + size -= pgsize; > + } > + > + return 0; > +} > + > +static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova, > + struct scatterlist *sg, unsigned int nents, > + int iommu_prot, gfp_t gfp, size_t *mapped) > +{ > + > + size_t len = 0; > + unsigned int i = 0; > + int ret; > + phys_addr_t start; > + > + *mapped = 0; > + > + /* If no access, then nothing to do */ > + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) > + return 0; > + > + while (i <= nents) { > + phys_addr_t s_phys = sg_phys(sg); > + > + if (len && s_phys != start + len) { > + ret = arm_lpae_map_by_pgsize(ops, iova + *mapped, start, > + len, iommu_prot, gfp, > + mapped); > + > + if (ret) > + return ret; > + > + len = 0; > + } > + > + if (len) { > + len += sg->length; > + } else { > + len = sg->length; > + start = s_phys; > + } > + > + if (++i < nents) > + sg = sg_next(sg); > + } > + > + return 0; > +}
Although I really like the idea of reducing the layering here, I think we need to figure out a way to reduce the amount of boiler-plate that ends up in the pgtable code. Otherwise it's pretty unmaintainable. Will _______________________________________________ iommu mailing list [email protected] https://lists.linuxfoundation.org/mailman/listinfo/iommu
