To support kernel VF, PBA bit is always set. But it may
cause the too many interrupts issue on specific Linux
kernel, like 4.4.0-116.
PF host should set the atuo clean, mask and throttling
as we always set the register for kernel VF.

Fixes: 6b75183ac4d0 ("net/ixgbe: fix wrong PBA setting")

Signed-off-by: Michael Luo <michael....@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo...@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 49 +++++++++++++++++++++++-----------------
 1 file changed, 28 insertions(+), 21 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index f5006bc..7219f02 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5800,8 +5800,12 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev 
*dev, bool on)
 
        /* won't configure msix register if no mapping is done
         * between intr vector and event fd
+        * but if misx has been enabled already, need to configure
+        * auto clean, auto mask and throttling.
         */
-       if (!rte_intr_dp_is_en(intr_handle))
+       gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+       if (!rte_intr_dp_is_en(intr_handle) &&
+           !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
                return;
 
        if (rte_intr_allow_others(intr_handle))
@@ -5825,27 +5829,30 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev 
*dev, bool on)
        /* Populate the IVAR table and set the ITR values to the
         * corresponding register.
         */
-       for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
-            queue_id++) {
-               /* by default, 1:1 mapping */
-               ixgbe_set_ivar_map(hw, 0, queue_id, vec);
-               intr_handle->intr_vec[queue_id] = vec;
-               if (vec < base + intr_handle->nb_efd - 1)
-                       vec++;
-       }
+       if (rte_intr_dp_is_en(intr_handle)) {
+               for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+                       queue_id++) {
+                       /* by default, 1:1 mapping */
+                       ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+                       intr_handle->intr_vec[queue_id] = vec;
+                       if (vec < base + intr_handle->nb_efd - 1)
+                               vec++;
+               }
 
-       switch (hw->mac.type) {
-       case ixgbe_mac_82598EB:
-               ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
-                                  IXGBE_MISC_VEC_ID);
-               break;
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-       case ixgbe_mac_X550:
-               ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
-               break;
-       default:
-               break;
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       ixgbe_set_ivar_map(hw, -1,
+                                          IXGBE_IVAR_OTHER_CAUSES_INDEX,
+                                          IXGBE_MISC_VEC_ID);
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+               case ixgbe_mac_X550:
+                       ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
+                       break;
+               default:
+                       break;
+               }
        }
        IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
                        IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
-- 
1.9.3

Reply via email to