From: Rob Clark <robdcl...@chromium.org> In situations where mapping/unmapping squence can be controlled by userspace, attempting to map over a region that has not yet been unmapped is an error. But not something that should spam dmesg.
Now that there is a quirk, we can also drop the selftest_running flag, and use the quirk instead for selftests. Signed-off-by: Rob Clark <robdcl...@chromium.org> --- drivers/iommu/io-pgtable-arm.c | 27 ++++++++++++++------------- include/linux/io-pgtable.h | 8 ++++++++ 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index f27965caf6a1..a535d88f8943 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -253,8 +253,6 @@ static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg, (data->start_level == 1) && (oas == 40); } -static bool selftest_running = false; - static dma_addr_t __arm_lpae_dma_addr(void *pages) { return (dma_addr_t)virt_to_phys(pages); @@ -373,7 +371,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, for (i = 0; i < num_entries; i++) if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { /* We require an unmap first */ - WARN_ON(!selftest_running); + WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN_ON)); return -EEXIST; } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) { /* @@ -475,7 +473,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, cptep = iopte_deref(pte, data); } else if (pte) { /* We require an unmap first */ - WARN_ON(!selftest_running); + WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN_ON)); return -EEXIST; } @@ -649,8 +647,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); ptep += unmap_idx_start; pte = READ_ONCE(*ptep); - if (WARN_ON(!pte)) - return 0; + if (!pte) { + WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN_ON)); + return -ENOENT; + } /* If the size matches this level, we're in the right place */ if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { @@ -660,8 +660,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, /* Find and handle non-leaf entries */ for (i = 0; i < num_entries; i++) { pte = READ_ONCE(ptep[i]); - if (WARN_ON(!pte)) + if (!pte) { + WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN_ON)); break; + } if (!iopte_leaf(pte, lvl, iop->fmt)) { __arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1); @@ -976,7 +978,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_ARM_TTBR1 | IO_PGTABLE_QUIRK_ARM_OUTER_WBWA | - IO_PGTABLE_QUIRK_ARM_HD)) + IO_PGTABLE_QUIRK_ARM_HD | + IO_PGTABLE_QUIRK_NO_WARN_ON)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -1079,7 +1082,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) struct arm_lpae_io_pgtable *data; typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; - if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB)) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB | + IO_PGTABLE_QUIRK_NO_WARN_ON)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -1320,7 +1324,6 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) #define __FAIL(ops, i) ({ \ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ arm_lpae_dump_ops(ops); \ - selftest_running = false; \ -EFAULT; \ }) @@ -1336,8 +1339,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) size_t size, mapped; struct io_pgtable_ops *ops; - selftest_running = true; - for (i = 0; i < ARRAY_SIZE(fmts); ++i) { cfg_cookie = cfg; ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); @@ -1426,7 +1427,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) free_io_pgtable_ops(ops); } - selftest_running = false; return 0; } @@ -1448,6 +1448,7 @@ static int __init arm_lpae_do_selftests(void) .tlb = &dummy_tlb_ops, .coherent_walk = true, .iommu_dev = &dev, + .quirks = IO_PGTABLE_QUIRK_NO_WARN_ON, }; /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */ diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index bba2a51c87d2..639b8f4fb87d 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -88,6 +88,13 @@ struct io_pgtable_cfg { * * IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable. * IO_PGTABLE_QUIRK_ARM_S2FWB: Use the FWB format for the MemAttrs bits + * + * IO_PGTABLE_QUIRK_NO_WARN_ON: Do not WARN_ON() on conflicting + * mappings, but silently return -EEXISTS. Normally an attempt + * to map over an existing mapping would indicate some sort of + * kernel bug, which would justify the WARN_ON(). But for GPU + * drivers, this could be under control of userspace. Which + * deserves an error return, but not to spam dmesg. */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) @@ -97,6 +104,7 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) #define IO_PGTABLE_QUIRK_ARM_HD BIT(7) #define IO_PGTABLE_QUIRK_ARM_S2FWB BIT(8) + #define IO_PGTABLE_QUIRK_NO_WARN_ON BIT(9) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; -- 2.49.0