When using a virtual SMMU and running the driver in TLBI_ON_MAP
mode we need invalidate large IOVA ranges. This typically happens
in DPDK use case where hugepages are used. In that case, invalidating
pages by page is really inefficient and we would need to invalidate
by iova range. Unfortunately there is no such command specified in the
SMMUv3 architecture spec. Let's add a new implementation defined command
that takes an address mask.

The CMD_TLBI_NH_VA_AM command format is inherited from CMD_TLBI_NH_VA's
one, replace the currently unused VMID field by the AM field.

Signed-off-by: Eric Auger <eric.au...@redhat.com>
---
 drivers/iommu/arm-smmu-v3.c | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)

diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index a1c10af..9da2785 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -492,9 +492,15 @@ struct arm_smmu_cmdq_ent {
                #define CMDQ_OP_TLBI_S12_VMALL  0x28
                #define CMDQ_OP_TLBI_S2_IPA     0x2a
                #define CMDQ_OP_TLBI_NSNH_ALL   0x30
+
+               /* vIOMMU ASID/IOVA Range Invalidation */
+               #define CMDQ_OP_TLBI_NH_VA_AM   0x8F
                struct {
                        u16                     asid;
-                       u16                     vmid;
+                       union {
+                               u16             vmid;
+                               u16             am; /* address mask */
+                       };
                        bool                    leaf;
                        u64                     addr;
                } tlbi;
@@ -853,6 +859,12 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct 
arm_smmu_cmdq_ent *ent)
                cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
                cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
                break;
+       case CMDQ_OP_TLBI_NH_VA_AM:
+               cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
+               cmd[0] |= (u64)ent->tlbi.am << CMDQ_TLBI_0_VMID_SHIFT;
+               cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
+               cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
+               break;
        case CMDQ_OP_TLBI_S2_IPA:
                cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
                cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
@@ -1402,8 +1414,14 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long 
iova, size_t size,
        };
 
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
-               cmd.opcode      = CMDQ_OP_TLBI_NH_VA;
                cmd.tlbi.asid   = smmu_domain->s1_cfg.cd.asid;
+               if (smmu->options & ARM_SMMU_OPT_TLBI_ON_MAP) {
+                       cmd.opcode      = CMDQ_OP_TLBI_NH_VA_AM;
+                       cmd.tlbi.am   = size >> 12;
+                       granule = size;
+               } else {
+                       cmd.opcode      = CMDQ_OP_TLBI_NH_VA;
+               }
        } else {
                cmd.opcode      = CMDQ_OP_TLBI_S2_IPA;
                cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
-- 
2.5.5

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to