From: Paul Greenwalt <paul.greenw...@intel.com>

Add malicious driver detection. Support enabling MDD, disabling MDD,
handling a MDD event, and restoring a MDD VF.

Reviewed-by: Przemek Kitszel <przemyslaw.kits...@intel.com>
Reviewed-by: Jedrzej Jagielski <jedrzej.jagiel...@intel.com>
Reviewed-by: Marcin Szycik <marcin.szy...@linux.intel.com>
Signed-off-by: Paul Greenwalt <paul.greenw...@intel.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkow...@linux.intel.com>
---
 drivers/net/ethernet/intel/ixgbe/ixgbe_type.h |  28 ++++
 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h |   5 +
 drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c |   4 +
 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 120 ++++++++++++++++++
 4 files changed, 157 insertions(+)

diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h 
b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 5fdf32d79d82..d446c375335a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2746,6 +2746,28 @@ enum ixgbe_fdir_pballoc_type {
 #define FW_PHY_INFO_ID_HI_MASK         0xFFFF0000u
 #define FW_PHY_INFO_ID_LO_MASK         0x0000FFFFu
 
+/* There are only 3 options for VFs creation on this device:
+ * 16 VFs pool with 8 queues each
+ * 32 VFs pool with 4 queues each
+ * 64 VFs pool with 2 queues each
+ *
+ * That means reading some VF registers that map VF to queue depending on
+ * chosen option. Define values that help dealing with each scenario.
+ */
+/* Number of queues based on VFs pool */
+#define IXGBE_16VFS_QUEUES             8
+#define IXGBE_32VFS_QUEUES             4
+#define IXGBE_64VFS_QUEUES             2
+/* Mask for getting queues bits based on VFs pool */
+#define IXGBE_16VFS_BITMASK            GENMASK(IXGBE_16VFS_QUEUES - 1, 0)
+#define IXGBE_32VFS_BITMASK            GENMASK(IXGBE_32VFS_QUEUES - 1, 0)
+#define IXGBE_64VFS_BITMASK            GENMASK(IXGBE_64VFS_QUEUES - 1, 0)
+/* Convert queue index to register number.
+ * We have 4 registers with 32 queues in each.
+ */
+#define IXGBE_QUEUES_PER_REG           32
+#define IXGBE_QUEUES_REG_AMOUNT                4
+
 /* Host Interface Command Structures */
 struct ixgbe_hic_hdr {
        u8 cmd;
@@ -3534,6 +3556,12 @@ struct ixgbe_mac_operations {
        int (*dmac_config_tcs)(struct ixgbe_hw *hw);
        int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
        int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+
+       /* MDD events */
+       void (*enable_mdd)(struct ixgbe_hw *hw);
+       void (*disable_mdd)(struct ixgbe_hw *hw);
+       void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
+       void (*handle_mdd)(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
 };
 
 struct ixgbe_phy_operations {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h 
b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
index 3e4092f8da3e..2a11147fb1bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
@@ -17,4 +17,9 @@ void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw 
*hw,
 void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
                                            bool enable, int vf);
 
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
+
 #endif /* _IXGBE_X550_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c 
b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index cb07ecd8937d..788f3372ebf1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -2630,6 +2630,10 @@ static const struct ixgbe_mac_operations mac_ops_e610 = {
        .prot_autoc_write               = prot_autoc_write_generic,
        .setup_fc                       = ixgbe_setup_fc_e610,
        .fc_autoneg                     = ixgbe_fc_autoneg_e610,
+       .enable_mdd                     = ixgbe_enable_mdd_x550,
+       .disable_mdd                    = ixgbe_disable_mdd_x550,
+       .restore_mdd_vf                 = ixgbe_restore_mdd_vf_x550,
+       .handle_mdd                     = ixgbe_handle_mdd_x550,
 };
 
 static const struct ixgbe_phy_operations phy_ops_e610 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c 
b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 277ceaf8a793..b5cbfd1f71fd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -3800,6 +3800,122 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw 
*hw, u32 reg_addr,
        return status;
 }
 
+static void ixgbe_set_mdd_x550(struct ixgbe_hw *hw, bool ena)
+{
+       u32 reg_dma, reg_rdr;
+
+       reg_dma = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+       reg_rdr = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+
+       if (ena) {
+               reg_dma |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+               reg_rdr |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+       } else {
+               reg_dma &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+               reg_rdr &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_dma);
+       IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_rdr);
+}
+
+/**
+ * ixgbe_enable_mdd_x550 - enable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw)
+{
+       ixgbe_set_mdd_x550(hw, true);
+}
+
+/**
+ * ixgbe_disable_mdd_x550 - disable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw)
+{
+       ixgbe_set_mdd_x550(hw, false);
+}
+
+/**
+ * ixgbe_restore_mdd_vf_x550 - restore VF that was disabled during MDD event
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ */
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf)
+{
+       u32 idx, reg, val, num_qs, start_q, bitmask;
+
+       /* Map VF to queues */
+       reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+       switch (reg & IXGBE_MRQC_MRQE_MASK) {
+       case IXGBE_MRQC_VMDQRT8TCEN:
+               num_qs = IXGBE_16VFS_QUEUES;
+               bitmask = IXGBE_16VFS_BITMASK;
+               break;
+       case IXGBE_MRQC_VMDQRSS32EN:
+       case IXGBE_MRQC_VMDQRT4TCEN:
+               num_qs = IXGBE_32VFS_QUEUES;
+               bitmask = IXGBE_32VFS_BITMASK;
+               break;
+       default:
+               num_qs = IXGBE_64VFS_QUEUES;
+               bitmask = IXGBE_64VFS_BITMASK;
+               break;
+       }
+       start_q = vf * num_qs;
+
+       /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
+       idx = start_q / IXGBE_QUEUES_PER_REG;
+       val = bitmask << (start_q % IXGBE_QUEUES_PER_REG);
+       IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), val);
+       IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), val);
+}
+
+/**
+ * ixgbe_handle_mdd_x550 - handle malicious driver detection event
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: output vf bitmap of malicious vfs
+ */
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap)
+{
+       u32 i, j, reg, q, div, vf;
+       unsigned long wqbr;
+
+       /* figure out pool size for mapping to vf's */
+       reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+       switch (reg & IXGBE_MRQC_MRQE_MASK) {
+       case IXGBE_MRQC_VMDQRT8TCEN:
+               div = IXGBE_16VFS_QUEUES;
+               break;
+       case IXGBE_MRQC_VMDQRSS32EN:
+       case IXGBE_MRQC_VMDQRT4TCEN:
+               div = IXGBE_32VFS_QUEUES;
+               break;
+       default:
+               div = IXGBE_64VFS_QUEUES;
+               break;
+       }
+
+       /* Read WQBR_TX and WQBR_RX and check for malicious queues */
+       for (i = 0; i < IXGBE_QUEUES_REG_AMOUNT; i++) {
+               wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)) |
+                      IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
+               if (!wqbr)
+                       continue;
+
+               /* Get malicious queue */
+               for_each_set_bit(j, (unsigned long *)&wqbr,
+                                IXGBE_QUEUES_PER_REG) {
+                       /* Get queue from bitmask */
+                       q = j + (i * IXGBE_QUEUES_PER_REG);
+                       /* Map queue to vf */
+                       vf = q / div;
+                       set_bit(vf, vf_bitmap);
+               }
+       }
+}
+
 #define X550_COMMON_MAC \
        .init_hw                        = &ixgbe_init_hw_generic, \
        .start_hw                       = &ixgbe_start_hw_X540, \
@@ -3863,6 +3979,10 @@ static const struct ixgbe_mac_operations mac_ops_X550 = {
        .prot_autoc_write       = prot_autoc_write_generic,
        .setup_fc               = ixgbe_setup_fc_generic,
        .fc_autoneg             = ixgbe_fc_autoneg,
+       .enable_mdd             = ixgbe_enable_mdd_x550,
+       .disable_mdd            = ixgbe_disable_mdd_x550,
+       .restore_mdd_vf         = ixgbe_restore_mdd_vf_x550,
+       .handle_mdd             = ixgbe_handle_mdd_x550,
 };
 
 static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
-- 
2.42.0

Reply via email to