static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t size, int lvl,
- arm_lpae_iopte *ptep);
+ unsigned long iova, size_t size, size_t pgcount,
+ int lvl, arm_lpae_iopte *ptep);
static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
phys_addr_t paddr, arm_lpae_iopte prot,
@@ -289,7 +305,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable
*data,
size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
- if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
+ if (__arm_lpae_unmap(data, NULL, iova, sz, 1, lvl, tblp) != sz)
{
WARN_ON(1);
return -EINVAL;
}
@@ -516,14 +532,14 @@ static size_t arm_lpae_split_blk_unmap(struct
arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size,
arm_lpae_iopte blk_pte, int lvl,
- arm_lpae_iopte *ptep)
+ arm_lpae_iopte *ptep, size_t pgcount)
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte pte, *tablep;
phys_addr_t blk_paddr;
size_t tablesz = ARM_LPAE_GRANULE(data);
size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
- int i, unmap_idx = -1;
+ int i, unmap_idx_start = -1;
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
return 0;
@@ -533,14 +549,14 @@ static size_t arm_lpae_split_blk_unmap(struct
arm_lpae_io_pgtable *data,
return 0; /* Bytes unmapped */
if (size == split_sz)
- unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
+ unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
blk_paddr = iopte_to_paddr(blk_pte, data);
pte = iopte_prot(blk_pte);
for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
/* Unmap! */
- if (i == unmap_idx)
+ if (i >= unmap_idx_start && i < (unmap_idx_start + pgcount))
continue;
__arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
@@ -558,20 +574,24 @@ static size_t arm_lpae_split_blk_unmap(struct
arm_lpae_io_pgtable *data,
return 0;
tablep = iopte_deref(pte, data);
- } else if (unmap_idx >= 0) {
- io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
- return size;
+ } else if (unmap_idx_start >= 0) {
+ for (i = 0; i < pgcount; i++) {
+ io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
+ iova += size;
+ }
+ return pgcount * size;
}
- return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
+ return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t size, int lvl,
- arm_lpae_iopte *ptep)
+ unsigned long iova, size_t size, size_t pgcount,
+ int lvl, arm_lpae_iopte *ptep)
{
arm_lpae_iopte pte;
+ size_t i;
struct io_pgtable *iop = &data->iop;
/* Something went horribly wrong and we ran out of page table */
@@ -585,11 +605,11 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data,
/* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
- __arm_lpae_set_pte(ptep, 0, &iop->cfg);
+ __arm_lpae_clear_ptes(ptep, pgcount, &iop->cfg);
if (!iopte_leaf(pte, lvl, iop->fmt)) {
/* Also flush any partial walks */
- io_pgtable_tlb_flush_walk(iop, iova, size,
+ io_pgtable_tlb_flush_walk(iop, iova, pgcount * size,
ARM_LPAE_GRANULE(data));
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
@@ -601,22 +621,25 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data,
*/
smp_wmb();
} else {
- io_pgtable_tlb_add_page(iop, gather, iova, size);
+ for (i = 0; i < pgcount; i++) {
+ io_pgtable_tlb_add_page(iop, gather, iova,
size);
+ iova += size;
+ }
}
- return size;
+ return pgcount * size;
} else if (iopte_leaf(pte, lvl, iop->fmt)) {
/*
* Insert a table at the next level to map the old region,
* minus the part we want to unmap
*/
return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
- lvl + 1, ptep);
+ lvl + 1, ptep, pgcount);
}
/* Keep on walkin' */
ptep = iopte_deref(pte, data);
- return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
+ return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1,
ptep);
}
static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
@@ -635,7 +658,57 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops,
unsigned long iova,
if (WARN_ON(iaext))
return 0;
- return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
+ return __arm_lpae_unmap(data, gather, iova, size, 1, data->start_level,
ptep);
+}
+
+static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long
iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ arm_lpae_iopte *ptep = data->pgd;
+ long iaext = (s64)iova >> cfg->ias;
+ size_t unmapped = 0, unmapped_page;
+ int last_lvl = data->start_level;
+ size_t table_size, pages, tbl_offset, max_entries;
+
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize ||
!pgcount))
+ return 0;
+
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+ iaext = ~iaext;
+ if (WARN_ON(iaext))
+ return 0;
+
+ /*
+ * Calculating the page table size here helps avoid situations where
+ * a page range that is being unmapped may be mapped at the same level
+ * but not mapped by the same tables. Allowing such a scenario to
+ * occur can complicate the logic in arm_lpae_split_blk_unmap().
+ */
+ while (ARM_LPAE_BLOCK_SIZE(last_lvl, data) != pgsize)
+ last_lvl++;
+
+ table_size = last_lvl == data->start_level ? ARM_LPAE_PGD_SIZE(data) :
+ ARM_LPAE_GRANULE(data);
+ max_entries = table_size / sizeof(*ptep);