From: Suravee Suthikulpanit <[email protected]>

This patch introduces a new IOMMU interface, amd_iommu_update_ga(),
which allows KVM (SVM) to update existing posted interrupt IOMMU IRTE when
load/unload vcpu.

Signed-off-by: Suravee Suthikulpanit <[email protected]>
---
 drivers/iommu/amd_iommu.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/amd-iommu.h |  8 ++++++
 2 files changed, 78 insertions(+)

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8cdde339..1d17597 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4330,4 +4330,74 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
 
        return 0;
 }
+
+static int
+set_irte_ga(struct amd_iommu *iommu, unsigned int devid,
+           u64 base, int cpu, bool is_run)
+{
+       struct irq_remap_table *irt = get_irq_table(devid, false);
+       unsigned long flags;
+       int index;
+
+       if (!irt)
+               return -ENODEV;
+
+       spin_lock_irqsave(&irt->lock, flags);
+
+       for (index = irt->min_index; index < MAX_IRQS_PER_TABLE; ++index) {
+               struct irte_ga *irte = amd_iommu_get_irte(irt, index);
+
+               if (!irte->lo.fields_vapic.guest_mode)
+                       continue;
+
+               irte->hi.fields.ga_root_ptr = (base >> 12);
+               irte->lo.fields_vapic.destination = cpu;
+               irte->lo.fields_vapic.is_run = is_run;
+               barrier();
+       }
+
+       spin_unlock_irqrestore(&irt->lock, flags);
+
+       iommu_flush_irt(iommu, devid);
+       iommu_completion_wait(iommu);
+
+       return 0;
+}
+
+int amd_iommu_update_ga(u32 vcpu_id, u32 cpu, u32 ga_tag,
+                       u64 base, bool is_run)
+{
+       unsigned long flags;
+       struct amd_iommu *iommu;
+
+       if (amd_iommu_guest_ir < AMD_IOMMU_GUEST_IR_GA)
+               return 0;
+
+       for_each_iommu(iommu) {
+               struct amd_ir_data *ir_data;
+
+               spin_lock_irqsave(&iommu->ga_hash_lock, flags);
+
+               hash_for_each_possible(iommu->ga_hash, ir_data, hnode,
+                                      AMD_IOMMU_GATAG(ga_tag, vcpu_id)) {
+                       struct iommu_dev_data *dev_data;
+
+                       if (!ir_data)
+                               break;
+
+                       dev_data = search_dev_data(ir_data->irq_2_irte.devid);
+
+                       if (!dev_data || !dev_data->guest_mode)
+                               continue;
+
+                       set_irte_ga(iommu, ir_data->irq_2_irte.devid,
+                                   base, cpu, is_run);
+               }
+
+               spin_unlock_irqrestore(&iommu->ga_hash_lock, flags);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(amd_iommu_update_ga);
 #endif
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 36648fe..e52cee5 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -173,6 +173,9 @@ extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev 
*pdev,
 extern int
 amd_iommu_register_ga_log_notifier(int (*notifier)(int, int, int));
 
+extern int
+amd_iommu_update_ga(u32 vcpu_id, u32 cpu, u32 ga_tag, u64 base, bool is_run);
+
 #else
 
 static inline int amd_iommu_detect(void) { return -ENODEV; }
@@ -183,6 +186,11 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(int, 
int, int))
        return 0;
 }
 
+static inline int
+amd_iommu_update_ga(u32 vcpu_id, u32 cpu, u32 ga_tag, u64 base, bool is_run)
+{
+       return 0;
+}
 #endif
 
 #endif /* _ASM_X86_AMD_IOMMU_H */
-- 
1.9.1

Reply via email to