From: Suravee Suthikulpanit <[email protected]>

This patch introduces a new IOMMU driver parameter, amd_iommu_guest_ir,
which can be used to specify different interrupt remapping mode for
passthrough devices to VM guest:
    * legacy: Legacy interrupt remapping mode (w/ 32-bit IRTE)
    * ga    : Guest vAPIC interrupt remapping mode (w/ 128-bit IRTE)

Note that the GA mode also supports legacy interrupt remapping
for non-passthrough devices with the 128-bit IRTE.

Signed-off-by: Suravee Suthikulpanit <[email protected]>
---
 drivers/iommu/amd_iommu_init.c  | 74 +++++++++++++++++++++++++++++++++++++----
 drivers/iommu/amd_iommu_proto.h |  1 +
 drivers/iommu/amd_iommu_types.h | 14 ++++++++
 3 files changed, 83 insertions(+), 6 deletions(-)

diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index bf4959f..83a5300 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -131,6 +131,7 @@ struct ivmd_header {
 bool amd_iommu_dump;
 bool amd_iommu_irq_remap __read_mostly;
 
+int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_GA;
 static bool amd_iommu_detected;
 static bool __initdata amd_iommu_disabled;
 
@@ -1087,6 +1088,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, 
struct ivhd_header *h)
                iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
        }
 
+       if (((h->efr & (0x1 << 6)) == 0))
+               amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+
        iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
                                                iommu->mmio_phys_end);
        if (!iommu->mmio_base)
@@ -1283,6 +1287,14 @@ static int iommu_init_pci(struct amd_iommu *iommu)
        if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
                return -ENOMEM;
 
+       /* Note: We have already checked GASup from IVRS table.
+        *       Now, we need to make sure that GAMSup is set.
+        */
+       if (amd_iommu_guest_ir >= AMD_IOMMU_GUEST_IR_GA &&
+           !iommu_feature(iommu, FEATURE_GAM_VAPIC))
+               amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+
+
        if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
                amd_iommu_np_cache = true;
 
@@ -1340,16 +1352,23 @@ static void print_iommu_info(void)
                        dev_name(&iommu->dev->dev), iommu->cap_ptr);
 
                if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
-                       pr_info("AMD-Vi:  Extended features: ");
+                       pr_info("AMD-Vi: Extended features (%#llx):\n",
+                               iommu->features);
                        for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
                                if (iommu_feature(iommu, (1ULL << i)))
                                        pr_cont(" %s", feat_str[i]);
                        }
+
+                       if (iommu->features & FEATURE_GAM_VAPIC)
+                               pr_cont(" GA_vAPIC");
+
                        pr_cont("\n");
                }
        }
        if (irq_remapping_enabled)
                pr_info("AMD-Vi: Interrupt remapping enabled\n");
+       if (amd_iommu_guest_ir)
+               pr_info("AMD-Vi: AVIC enabled (%#x)\n", amd_iommu_guest_ir);
 }
 
 static int __init amd_iommu_init_pci(void)
@@ -1647,6 +1666,20 @@ static void iommu_apply_resume_quirks(struct amd_iommu 
*iommu)
                               iommu->stored_addr_lo | 1);
 }
 
+static void iommu_enable_ga(struct amd_iommu *iommu)
+{
+       switch (amd_iommu_guest_ir) {
+       case AMD_IOMMU_GUEST_IR_GA:
+               iommu_feature_enable(iommu, CONTROL_GAM_EN);
+               /* Fall through */
+       case AMD_IOMMU_GUEST_IR_LEGACY_GA:
+               iommu_feature_enable(iommu, CONTROL_GA_EN);
+               break;
+       default:
+               break;
+       }
+}
+
 /*
  * This function finally enables all IOMMUs found in the system after
  * they have been initialized
@@ -1662,9 +1695,13 @@ static void early_enable_iommus(void)
                iommu_enable_command_buffer(iommu);
                iommu_enable_event_buffer(iommu);
                iommu_set_exclusion_range(iommu);
+               iommu_enable_ga(iommu);
                iommu_enable(iommu);
                iommu_flush_all_caches(iommu);
        }
+
+       if (amd_iommu_guest_ir >= AMD_IOMMU_GUEST_IR_GA)
+               amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
 }
 
 static void enable_iommus_v2(void)
@@ -1690,6 +1727,9 @@ static void disable_iommus(void)
 
        for_each_iommu(iommu)
                iommu_disable(iommu);
+
+       if (amd_iommu_guest_ir >= AMD_IOMMU_GUEST_IR_GA)
+               amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
 }
 
 /*
@@ -1929,10 +1969,16 @@ static int __init early_amd_iommu_init(void)
                 * remapping tables.
                 */
                ret = -ENOMEM;
-               amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
-                               MAX_IRQS_PER_TABLE * sizeof(u32),
-                               IRQ_TABLE_ALIGNMENT,
-                               0, NULL);
+               if (amd_iommu_guest_ir == AMD_IOMMU_GUEST_IR_LEGACY)
+                       amd_iommu_irq_cache = 
kmem_cache_create("irq_remap_cache",
+                                       MAX_IRQS_PER_TABLE * sizeof(u32),
+                                       IRQ_TABLE_ALIGNMENT,
+                                       0, NULL);
+               else
+                       amd_iommu_irq_cache = 
kmem_cache_create("irq_remap_cache",
+                                       MAX_IRQS_PER_TABLE * (sizeof(u64) * 2),
+                                       IRQ_TABLE_ALIGNMENT,
+                                       0, NULL);
                if (!amd_iommu_irq_cache)
                        goto out;
 
@@ -2128,7 +2174,7 @@ static int __init amd_iommu_init(void)
        ret = iommu_go_to_state(IOMMU_INITIALIZED);
        if (ret) {
                free_dma_resources();
-               if (!irq_remapping_enabled) {
+               if (!irq_remapping_enabled && !amd_iommu_guest_ir) {
                        disable_iommus();
                        free_on_init_error();
                } else {
@@ -2185,6 +2231,21 @@ static int __init parse_amd_iommu_dump(char *str)
        return 1;
 }
 
+static int __init parse_amd_iommu_intr(char *str)
+{
+       for (; *str; ++str) {
+               if (strncmp(str, "legacy", 6) == 0) {
+                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+                       break;
+               }
+               if (strncmp(str, "ga", 2) == 0) {
+                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_GA;
+                       break;
+               }
+       }
+       return 1;
+}
+
 static int __init parse_amd_iommu_options(char *str)
 {
        for (; *str; ++str) {
@@ -2261,6 +2322,7 @@ static int __init parse_ivrs_hpet(char *str)
 
 __setup("amd_iommu_dump",      parse_amd_iommu_dump);
 __setup("amd_iommu=",          parse_amd_iommu_options);
+__setup("amd_iommu_intr=",     parse_amd_iommu_intr);
 __setup("ivrs_ioapic",         parse_ivrs_ioapic);
 __setup("ivrs_hpet",           parse_ivrs_hpet);
 
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 0bd9eb3..faa3b48 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -38,6 +38,7 @@ extern int amd_iommu_enable(void);
 extern void amd_iommu_disable(void);
 extern int amd_iommu_reenable(int);
 extern int amd_iommu_enable_faulting(void);
+extern int amd_iommu_guest_ir;
 
 /* IOMMUv2 specific functions */
 struct iommu_domain;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 9d32b20..95414120 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -92,6 +92,7 @@
 #define FEATURE_GA             (1ULL<<7)
 #define FEATURE_HE             (1ULL<<8)
 #define FEATURE_PC             (1ULL<<9)
+#define FEATURE_GAM_VAPIC      (1ULL<<21)
 
 #define FEATURE_PASID_SHIFT    32
 #define FEATURE_PASID_MASK     (0x1fULL << FEATURE_PASID_SHIFT)
@@ -146,6 +147,8 @@
 #define CONTROL_PPFINT_EN       0x0eULL
 #define CONTROL_PPR_EN          0x0fULL
 #define CONTROL_GT_EN           0x10ULL
+#define CONTROL_GA_EN           0x11ULL
+#define CONTROL_GAM_EN          0x19ULL
 
 #define CTRL_INV_TO_MASK       (7 << CONTROL_INV_TIMEOUT)
 #define CTRL_INV_TO_NONE       0
@@ -694,4 +697,15 @@ struct __iommu_counter {
 
 #endif /* CONFIG_AMD_IOMMU_STATS */
 
+enum amd_iommu_intr_mode_type {
+       AMD_IOMMU_GUEST_IR_LEGACY,
+
+       /* This mode is not visible to users. It is used when
+        * we cannot fully enable GA and fallback to only support
+        * legacy interrupt remapping via 128-bit IRTE.
+        */
+       AMD_IOMMU_GUEST_IR_LEGACY_GA,
+       AMD_IOMMU_GUEST_IR_GA,
+};
+
 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
-- 
1.9.1

Reply via email to