Queue management ops unconditionally enable netdev locking. The same
lock is taken by default by several NAPI configuration functions,
such as napi_enable() and netif_napi_set_irq().
Request ops locking in advance and make sure we use the _locked
counterparts of those functions to avoid deadlocks, taking the lock
manually where needed (suspend/resume, queue rebuild and resets).

Reviewed-by: Jacob Keller <[email protected]>
Reviewed-by: Aleksandr Loktionov <[email protected]>
Signed-off-by: Alexander Lobakin <[email protected]>
---
 drivers/net/ethernet/intel/ice/ice_base.h    |   2 +
 drivers/net/ethernet/intel/ice/ice_lib.h     |  10 +-
 drivers/net/ethernet/intel/ice/ice_base.c    |  63 ++++++-
 drivers/net/ethernet/intel/ice/ice_dcb_lib.c |  15 +-
 drivers/net/ethernet/intel/ice/ice_lib.c     | 183 ++++++++++++++++---
 drivers/net/ethernet/intel/ice/ice_main.c    |  69 +++----
 drivers/net/ethernet/intel/ice/ice_sf_eth.c  |   3 +
 drivers/net/ethernet/intel/ice/ice_xsk.c     |   4 +-
 8 files changed, 266 insertions(+), 83 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice_base.h 
b/drivers/net/ethernet/intel/ice/ice_base.h
index d28294247599..99b2c7232829 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -12,8 +12,10 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
 int
 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool 
wait);
 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
+int ice_vsi_alloc_q_vectors_locked(struct ice_vsi *vsi);
 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
+void ice_vsi_free_q_vectors_locked(struct ice_vsi *vsi);
 void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
                           u16 q_idx);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h 
b/drivers/net/ethernet/intel/ice/ice_lib.h
index 49454d98dcfe..0565374b58a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -53,9 +53,11 @@ struct ice_vsi *
 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
 
 void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
-void ice_napi_add(struct ice_vsi *vsi);
-
+void ice_vsi_set_napi_queues_locked(struct ice_vsi *vsi);
 void ice_vsi_clear_napi_queues(struct ice_vsi *vsi);
+void ice_vsi_clear_napi_queues_locked(struct ice_vsi *vsi);
+
+void ice_napi_add(struct ice_vsi *vsi);
 
 int ice_vsi_release(struct ice_vsi *vsi);
 
@@ -64,8 +66,10 @@ void ice_vsi_close(struct ice_vsi *vsi);
 int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
 
 void ice_vsi_decfg(struct ice_vsi *vsi);
-void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
+void ice_dis_vsi_locked(struct ice_vsi *vsi);
+void ice_dis_vsi(struct ice_vsi *vsi);
 
+int ice_vsi_rebuild_locked(struct ice_vsi *vsi, u32 vsi_flags);
 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
 int ice_vsi_cfg(struct ice_vsi *vsi);
 struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c 
b/drivers/net/ethernet/intel/ice/ice_base.c
index 1b7d10fad4f2..f084b1c48e5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -153,8 +153,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 
v_idx)
         * handler here (i.e. resume, reset/rebuild, etc.)
         */
        if (vsi->netdev)
-               netif_napi_add_config(vsi->netdev, &q_vector->napi,
-                                     ice_napi_poll, v_idx);
+               netif_napi_add_config_locked(vsi->netdev, &q_vector->napi,
+                                            ice_napi_poll, v_idx);
 
 out:
        /* tie q_vector and VSI together */
@@ -196,7 +196,7 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int 
v_idx)
 
        /* only VSI with an associated netdev is set up with NAPI */
        if (vsi->netdev)
-               netif_napi_del(&q_vector->napi);
+               netif_napi_del_locked(&q_vector->napi);
 
        /* release MSIX interrupt if q_vector had interrupt allocated */
        if (q_vector->irq.index < 0)
@@ -896,13 +896,15 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool 
ena, u16 rxq_idx)
 }
 
 /**
- * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * ice_vsi_alloc_q_vectors_locked - Allocate memory for interrupt vectors
  * @vsi: the VSI being configured
  *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
+ * Should be called only under the netdev lock.
+ * We allocate one q_vector per queue interrupt.
+ *
+ * Return: 0 on success, -ENOMEM if allocation fails.
  */
-int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+int ice_vsi_alloc_q_vectors_locked(struct ice_vsi *vsi)
 {
        struct device *dev = ice_pf_to_dev(vsi->back);
        u16 v_idx;
@@ -929,6 +931,30 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
        return v_idx ? 0 : err;
 }
 
+/**
+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt.
+ *
+ * Return: 0 on success, -ENOMEM if allocation fails.
+ */
+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+{
+       struct net_device *dev = vsi->netdev;
+       int ret;
+
+       if (dev)
+               netdev_lock(dev);
+
+       ret = ice_vsi_alloc_q_vectors_locked(vsi);
+
+       if (dev)
+               netdev_unlock(dev);
+
+       return ret;
+}
+
 /**
  * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
  * @vsi: the VSI being configured
@@ -992,10 +1018,12 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
 }
 
 /**
- * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * ice_vsi_free_q_vectors_locked - Free memory allocated for interrupt vectors
  * @vsi: the VSI having memory freed
+ *
+ * Should be called only under the netdev lock.
  */
-void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
+void ice_vsi_free_q_vectors_locked(struct ice_vsi *vsi)
 {
        int v_idx;
 
@@ -1005,6 +1033,23 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
        vsi->num_q_vectors = 0;
 }
 
+/**
+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI having memory freed
+ */
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
+{
+       struct net_device *dev = vsi->netdev;
+
+       if (dev)
+               netdev_lock(dev);
+
+       ice_vsi_free_q_vectors_locked(vsi);
+
+       if (dev)
+               netdev_unlock(dev);
+}
+
 /**
  * ice_cfg_tstamp - Configure Tx time stamp queue
  * @tx_ring: Tx ring to be configured with timestamping
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c 
b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 9fc8681cc58e..eee1eb30dc6f 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -273,14 +273,13 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
  * ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig
  * @pf: pointer to the PF instance
  * @ena: true to enable VSIs, false to disable
- * @locked: true if caller holds RTNL lock, false otherwise
  *
  * Before a new DCB configuration can be applied, VSIs of type PF, SWITCHDEV
  * and CHNL need to be brought down. Following completion of DCB configuration
  * the VSIs that were downed need to be brought up again. This helper function
  * does both.
  */
-static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
+static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena)
 {
        int i;
 
@@ -294,9 +293,9 @@ static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool 
ena, bool locked)
                case ICE_VSI_CHNL:
                case ICE_VSI_PF:
                        if (ena)
-                               ice_ena_vsi(vsi, locked);
+                               ice_ena_vsi(vsi, true);
                        else
-                               ice_dis_vsi(vsi, locked);
+                               ice_dis_vsi_locked(vsi);
                        break;
                default:
                        continue;
@@ -416,7 +415,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg 
*new_cfg, bool locked)
                rtnl_lock();
 
        /* disable VSIs affected by DCB changes */
-       ice_dcb_ena_dis_vsi(pf, false, true);
+       ice_dcb_ena_dis_vsi(pf, false);
 
        memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
        memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
@@ -445,7 +444,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg 
*new_cfg, bool locked)
 
 out:
        /* enable previously downed VSIs */
-       ice_dcb_ena_dis_vsi(pf, true, true);
+       ice_dcb_ena_dis_vsi(pf, true);
        if (!locked)
                rtnl_unlock();
 free_cfg:
@@ -1107,7 +1106,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
 
        rtnl_lock();
        /* disable VSIs affected by DCB changes */
-       ice_dcb_ena_dis_vsi(pf, false, true);
+       ice_dcb_ena_dis_vsi(pf, false);
 
        ret = ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
        if (ret) {
@@ -1119,7 +1118,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
        ice_pf_dcb_recfg(pf, false);
 
        /* enable previously downed VSIs */
-       ice_dcb_ena_dis_vsi(pf, true, true);
+       ice_dcb_ena_dis_vsi(pf, true);
 unlock_rtnl:
        rtnl_unlock();
 out:
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c 
b/drivers/net/ethernet/intel/ice/ice_lib.c
index d921269e1fe7..936d3adf056f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2308,10 +2308,14 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct 
ice_vsi *vsi)
 }
 
 /**
- * ice_vsi_cfg_def - configure default VSI based on the type
+ * ice_vsi_cfg_def_locked - configure default VSI based on the type
  * @vsi: pointer to VSI
+ *
+ * Should be called only with the netdev lock taken.
+ *
+ * Return: 0 on success, -errno on failure.
  */
-static int ice_vsi_cfg_def(struct ice_vsi *vsi)
+static int ice_vsi_cfg_def_locked(struct ice_vsi *vsi)
 {
        struct device *dev = ice_pf_to_dev(vsi->back);
        struct ice_pf *pf = vsi->back;
@@ -2354,7 +2358,7 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
        case ICE_VSI_CTRL:
        case ICE_VSI_SF:
        case ICE_VSI_PF:
-               ret = ice_vsi_alloc_q_vectors(vsi);
+               ret = ice_vsi_alloc_q_vectors_locked(vsi);
                if (ret)
                        goto unroll_vsi_init;
 
@@ -2404,7 +2408,7 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
                 * creates a VSI and corresponding structures for bookkeeping
                 * purpose
                 */
-               ret = ice_vsi_alloc_q_vectors(vsi);
+               ret = ice_vsi_alloc_q_vectors_locked(vsi);
                if (ret)
                        goto unroll_vsi_init;
 
@@ -2448,7 +2452,7 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
 unroll_vector_base:
        /* reclaim SW interrupts back to the common pool */
 unroll_alloc_q_vector:
-       ice_vsi_free_q_vectors(vsi);
+       ice_vsi_free_q_vectors_locked(vsi);
 unroll_vsi_init:
        ice_vsi_delete_from_hw(vsi);
 unroll_get_qs:
@@ -2460,6 +2464,28 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
        return ret;
 }
 
+/**
+ * ice_vsi_cfg_def - configure default VSI based on the type
+ * @vsi: pointer to VSI
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int ice_vsi_cfg_def(struct ice_vsi *vsi)
+{
+       struct net_device *dev = vsi->netdev;
+       int ret;
+
+       if (dev)
+               netdev_lock(dev);
+
+       ret = ice_vsi_cfg_def_locked(vsi);
+
+       if (dev)
+               netdev_unlock(dev);
+
+       return ret;
+}
+
 /**
  * ice_vsi_cfg - configure a previously allocated VSI
  * @vsi: pointer to VSI
@@ -2494,10 +2520,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
 }
 
 /**
- * ice_vsi_decfg - remove all VSI configuration
+ * ice_vsi_decfg_locked - remove all VSI configuration
  * @vsi: pointer to VSI
+ *
+ * Should be called only under the netdev lock.
  */
-void ice_vsi_decfg(struct ice_vsi *vsi)
+static void ice_vsi_decfg_locked(struct ice_vsi *vsi)
 {
        struct ice_pf *pf = vsi->back;
        int err;
@@ -2515,7 +2543,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
                ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
 
        ice_vsi_clear_rings(vsi);
-       ice_vsi_free_q_vectors(vsi);
+       ice_vsi_free_q_vectors_locked(vsi);
        ice_vsi_put_qs(vsi);
        ice_vsi_free_arrays(vsi);
 
@@ -2530,6 +2558,23 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
                vsi->agg_node->num_vsis--;
 }
 
+/**
+ * ice_vsi_decfg - remove all VSI configuration
+ * @vsi: pointer to VSI
+ */
+void ice_vsi_decfg(struct ice_vsi *vsi)
+{
+       struct net_device *dev = vsi->netdev;
+
+       if (dev)
+               netdev_lock(dev);
+
+       ice_vsi_decfg_locked(vsi);
+
+       if (dev)
+               netdev_unlock(dev);
+}
+
 /**
  * ice_vsi_setup - Set up a VSI by a given type
  * @pf: board private structure
@@ -2703,7 +2748,7 @@ void ice_vsi_close(struct ice_vsi *vsi)
        if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
                ice_down(vsi);
 
-       ice_vsi_clear_napi_queues(vsi);
+       ice_vsi_clear_napi_queues_locked(vsi);
        ice_vsi_free_irq(vsi);
        ice_vsi_free_tx_rings(vsi);
        ice_vsi_free_rx_rings(vsi);
@@ -2742,11 +2787,12 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
 }
 
 /**
- * ice_dis_vsi - pause a VSI
+ * ice_dis_vsi_locked - pause a VSI
  * @vsi: the VSI being paused
- * @locked: is the rtnl_lock already held
+ *
+ * The caller must always hold the netdev lock.
  */
-void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+void ice_dis_vsi_locked(struct ice_vsi *vsi)
 {
        bool already_down = test_bit(ICE_VSI_DOWN, vsi->state);
 
@@ -2755,14 +2801,9 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
        if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
                            vsi->type == ICE_VSI_SF)) {
                if (netif_running(vsi->netdev)) {
-                       if (!locked)
-                               rtnl_lock();
                        already_down = test_bit(ICE_VSI_DOWN, vsi->state);
                        if (!already_down)
                                ice_vsi_close(vsi);
-
-                       if (!locked)
-                               rtnl_unlock();
                } else if (!already_down) {
                        ice_vsi_close(vsi);
                }
@@ -2772,12 +2813,30 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
 }
 
 /**
- * ice_vsi_set_napi_queues - associate netdev queues with napi
+ * ice_dis_vsi - pause a VSI
+ * @vsi: the VSI being paused
+ */
+void ice_dis_vsi(struct ice_vsi *vsi)
+{
+       struct net_device *dev = vsi->netdev;
+
+       if (dev)
+               netdev_lock(dev);
+
+       ice_dis_vsi_locked(vsi);
+
+       if (dev)
+               netdev_unlock(dev);
+}
+
+/**
+ * ice_vsi_set_napi_queues_locked - associate netdev queues with napi
  * @vsi: VSI pointer
  *
  * Associate queue[s] with napi for all vectors.
+ * Must be called only with the netdev_lock taken.
  */
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
+void ice_vsi_set_napi_queues_locked(struct ice_vsi *vsi)
 {
        struct net_device *netdev = vsi->netdev;
        int q_idx, v_idx;
@@ -2785,7 +2844,6 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
        if (!netdev)
                return;
 
-       ASSERT_RTNL();
        ice_for_each_rxq(vsi, q_idx)
                if (vsi->rx_rings[q_idx] && vsi->rx_rings[q_idx]->q_vector)
                        netif_queue_set_napi(netdev, q_idx, 
NETDEV_QUEUE_TYPE_RX,
@@ -2799,17 +2857,37 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
        ice_for_each_q_vector(vsi, v_idx) {
                struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
 
-               netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+               netif_napi_set_irq_locked(&q_vector->napi, q_vector->irq.virq);
        }
 }
 
 /**
- * ice_vsi_clear_napi_queues - dissociate netdev queues from napi
+ * ice_vsi_set_napi_queues - associate VSI queues with NAPIs
  * @vsi: VSI pointer
  *
+ * Version of ice_vsi_set_napi_queues_locked() that takes the netdev_lock,
+ * to use it outside of the net_device_ops context.
+ */
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
+{
+       struct net_device *netdev = vsi->netdev;
+
+       if (!netdev)
+               return;
+
+       netdev_lock(netdev);
+       ice_vsi_set_napi_queues_locked(vsi);
+       netdev_unlock(netdev);
+}
+
+/**
+ * ice_vsi_clear_napi_queues_locked - dissociate netdev queues from napi
+ * @vsi: VSI to process
+ *
  * Clear the association between all VSI queues queue[s] and napi.
+ * Must be called only with the netdev_lock taken.
  */
-void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
+void ice_vsi_clear_napi_queues_locked(struct ice_vsi *vsi)
 {
        struct net_device *netdev = vsi->netdev;
        int q_idx, v_idx;
@@ -2817,12 +2895,11 @@ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
        if (!netdev)
                return;
 
-       ASSERT_RTNL();
        /* Clear the NAPI's interrupt number */
        ice_for_each_q_vector(vsi, v_idx) {
                struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
 
-               netif_napi_set_irq(&q_vector->napi, -1);
+               netif_napi_set_irq_locked(&q_vector->napi, -1);
        }
 
        ice_for_each_txq(vsi, q_idx)
@@ -2832,6 +2909,25 @@ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
                netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL);
 }
 
+/**
+ * ice_vsi_clear_napi_queues - dissociate VSI queues from NAPIs
+ * @vsi: VSI to process
+ *
+ * Version of ice_vsi_clear_napi_queues_locked() that takes the netdev lock,
+ * to use it outside of the net_device_ops context.
+ */
+void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
+{
+       struct net_device *netdev = vsi->netdev;
+
+       if (!netdev)
+               return;
+
+       netdev_lock(netdev);
+       ice_vsi_clear_napi_queues_locked(vsi);
+       netdev_unlock(netdev);
+}
+
 /**
  * ice_napi_add - register NAPI handler for the VSI
  * @vsi: VSI for which NAPI handler is to be registered
@@ -3069,16 +3165,17 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
 }
 
 /**
- * ice_vsi_rebuild - Rebuild VSI after reset
+ * ice_vsi_rebuild_locked - Rebuild VSI after reset
  * @vsi: VSI to be rebuild
  * @vsi_flags: flags used for VSI rebuild flow
  *
  * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
  * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
+ * Should be called only under the netdev lock.
  *
  * Returns 0 on success and negative value on failure
  */
-int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
+int ice_vsi_rebuild_locked(struct ice_vsi *vsi, u32 vsi_flags)
 {
        struct ice_coalesce_stored *coalesce;
        int prev_num_q_vectors;
@@ -3099,8 +3196,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
        if (ret)
                goto unlock;
 
-       ice_vsi_decfg(vsi);
-       ret = ice_vsi_cfg_def(vsi);
+       ice_vsi_decfg_locked(vsi);
+       ret = ice_vsi_cfg_def_locked(vsi);
        if (ret)
                goto unlock;
 
@@ -3131,12 +3228,38 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
        kfree(coalesce);
 decfg:
        if (ret)
-               ice_vsi_decfg(vsi);
+               ice_vsi_decfg_locked(vsi);
 unlock:
        mutex_unlock(&vsi->xdp_state_lock);
        return ret;
 }
 
+/**
+ * ice_vsi_rebuild - Rebuild VSI after reset
+ * @vsi: VSI to be rebuild
+ * @vsi_flags: flags used for VSI rebuild flow
+ *
+ * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
+ * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
+{
+       struct net_device *dev = vsi->netdev;
+       int ret;
+
+       if (dev)
+               netdev_lock(dev);
+
+       ret = ice_vsi_rebuild_locked(vsi, vsi_flags);
+
+       if (dev)
+               netdev_unlock(dev);
+
+       return ret;
+}
+
 /**
  * ice_is_reset_in_progress - check for a reset in progress
  * @state: PF state field
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c 
b/drivers/net/ethernet/intel/ice/ice_main.c
index 4da37caa3ec9..ae7e9b332d72 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -507,16 +507,15 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
 /**
  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
  * @pf: the PF
- * @locked: is the rtnl_lock already held
  */
-static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
+static void ice_pf_dis_all_vsi(struct ice_pf *pf)
 {
        int node;
        int v;
 
        ice_for_each_vsi(pf, v)
                if (pf->vsi[v])
-                       ice_dis_vsi(pf->vsi[v], locked);
+                       ice_dis_vsi(pf->vsi[v]);
 
        for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
                pf->pf_agg_node[node].num_vsis = 0;
@@ -605,7 +604,7 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req 
reset_type)
        ice_clear_hw_tbls(hw);
        /* disable the VSIs and their queues that are not already DOWN */
        set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
-       ice_pf_dis_all_vsi(pf, false);
+       ice_pf_dis_all_vsi(pf);
 
        if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
                ice_ptp_prepare_for_reset(pf, reset_type);
@@ -3525,6 +3524,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
        }
 
        netdev->netdev_ops = &ice_netdev_ops;
+       netdev->request_ops_lock = true;
        netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
        netdev->xdp_metadata_ops = &ice_xdp_md_ops;
        ice_set_ethtool_ops(netdev);
@@ -4131,6 +4131,7 @@ bool ice_is_wol_supported(struct ice_hw *hw)
  * @locked: is adev device_lock held
  *
  * Only change the number of queues if new_tx, or new_rx is non-0.
+ * Note that it should be called only with the netdev lock taken.
  *
  * Returns 0 on success.
  */
@@ -4156,7 +4157,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int 
new_tx, bool locked)
 
        /* set for the next time the netdev is started */
        if (!netif_running(vsi->netdev)) {
-               err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+               err = ice_vsi_rebuild_locked(vsi, ICE_VSI_FLAG_NO_INIT);
                if (err)
                        goto rebuild_err;
                dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change 
happens when link is brought up\n");
@@ -4164,7 +4165,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int 
new_tx, bool locked)
        }
 
        ice_vsi_close(vsi);
-       err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+       err = ice_vsi_rebuild_locked(vsi, ICE_VSI_FLAG_NO_INIT);
        if (err)
                goto rebuild_err;
 
@@ -5498,7 +5499,7 @@ static void ice_prepare_for_shutdown(struct ice_pf *pf)
        dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for 
shutdown\n");
 
        /* disable the VSIs and their queues that are not already DOWN */
-       ice_pf_dis_all_vsi(pf, false);
+       ice_pf_dis_all_vsi(pf);
 
        ice_for_each_vsi(pf, v)
                if (pf->vsi[v])
@@ -5534,16 +5535,17 @@ static int ice_reinit_interrupt_scheme(struct ice_pf 
*pf)
 
        /* Remap vectors and rings, after successful re-init interrupts */
        ice_for_each_vsi(pf, v) {
-               if (!pf->vsi[v])
+               struct ice_vsi *vsi = pf->vsi[v];
+
+               if (!vsi)
                        continue;
 
-               ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
+               ret = ice_vsi_alloc_q_vectors(vsi);
                if (ret)
                        goto err_reinit;
-               ice_vsi_map_rings_to_vectors(pf->vsi[v]);
-               rtnl_lock();
-               ice_vsi_set_napi_queues(pf->vsi[v]);
-               rtnl_unlock();
+
+               ice_vsi_map_rings_to_vectors(vsi);
+               ice_vsi_set_napi_queues(vsi);
        }
 
        ret = ice_req_irq_msix_misc(pf);
@@ -5556,13 +5558,15 @@ static int ice_reinit_interrupt_scheme(struct ice_pf 
*pf)
        return 0;
 
 err_reinit:
-       while (v--)
-               if (pf->vsi[v]) {
-                       rtnl_lock();
-                       ice_vsi_clear_napi_queues(pf->vsi[v]);
-                       rtnl_unlock();
-                       ice_vsi_free_q_vectors(pf->vsi[v]);
-               }
+       while (v--) {
+               struct ice_vsi *vsi = pf->vsi[v];
+
+               if (!vsi)
+                       continue;
+
+               ice_vsi_clear_napi_queues(vsi);
+               ice_vsi_free_q_vectors(vsi);
+       }
 
        return ret;
 }
@@ -5624,14 +5628,17 @@ static int ice_suspend(struct device *dev)
         * to CPU0.
         */
        ice_free_irq_msix_misc(pf);
+
        ice_for_each_vsi(pf, v) {
-               if (!pf->vsi[v])
+               struct ice_vsi *vsi = pf->vsi[v];
+
+               if (!vsi)
                        continue;
-               rtnl_lock();
-               ice_vsi_clear_napi_queues(pf->vsi[v]);
-               rtnl_unlock();
-               ice_vsi_free_q_vectors(pf->vsi[v]);
+
+               ice_vsi_clear_napi_queues(vsi);
+               ice_vsi_free_q_vectors(vsi);
        }
+
        ice_clear_interrupt_scheme(pf);
 
        pci_save_state(pdev);
@@ -6759,7 +6766,7 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
                ice_init_moderation(q_vector);
 
                if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
-                       napi_enable(&q_vector->napi);
+                       napi_enable_locked(&q_vector->napi);
        }
 }
 
@@ -7258,7 +7265,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
                struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
 
                if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
-                       napi_disable(&q_vector->napi);
+                       napi_disable_locked(&q_vector->napi);
 
                cancel_work_sync(&q_vector->tx.dim.work);
                cancel_work_sync(&q_vector->rx.dim.work);
@@ -7558,7 +7565,7 @@ int ice_vsi_open(struct ice_vsi *vsi)
                if (err)
                        goto err_set_qs;
 
-               ice_vsi_set_napi_queues(vsi);
+               ice_vsi_set_napi_queues_locked(vsi);
        }
 
        err = ice_up_complete(vsi);
@@ -9250,7 +9257,7 @@ static int ice_setup_tc_mqprio_qdisc(struct net_device 
*netdev, void *type_data)
                return 0;
 
        /* Pause VSI queues */
-       ice_dis_vsi(vsi, true);
+       ice_dis_vsi_locked(vsi);
 
        if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
                ice_remove_q_channels(vsi, true);
@@ -9289,14 +9296,14 @@ static int ice_setup_tc_mqprio_qdisc(struct net_device 
*netdev, void *type_data)
        cur_rxq = vsi->num_rxq;
 
        /* proceed with rebuild main VSI using correct number of queues */
-       ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+       ret = ice_vsi_rebuild_locked(vsi, ICE_VSI_FLAG_NO_INIT);
        if (ret) {
                /* fallback to current number of queues */
                dev_info(dev, "Rebuild failed with new queues, try with current 
number of queues\n");
                vsi->req_txq = cur_txq;
                vsi->req_rxq = cur_rxq;
                clear_bit(ICE_RESET_FAILED, pf->state);
-               if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
+               if (ice_vsi_rebuild_locked(vsi, ICE_VSI_FLAG_NO_INIT)) {
                        dev_err(dev, "Rebuild of main VSI failed again\n");
                        return ret;
                }
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c 
b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
index 1a2c94375ca7..4f11b008fea5 100644
--- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -58,6 +58,7 @@ static int ice_sf_cfg_netdev(struct ice_dynamic_port 
*dyn_port,
        eth_hw_addr_set(netdev, dyn_port->hw_addr);
        ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr);
        netdev->netdev_ops = &ice_sf_netdev_ops;
+       netdev->request_ops_lock = true;
        SET_NETDEV_DEVLINK_PORT(netdev, devlink_port);
 
        err = register_netdev(netdev);
@@ -183,7 +184,9 @@ static void ice_sf_dev_remove(struct auxiliary_device *adev)
        devlink = priv_to_devlink(sf_dev->priv);
        devl_lock(devlink);
 
+       netdev_lock(vsi->netdev);
        ice_vsi_close(vsi);
+       netdev_unlock(vsi->netdev);
 
        ice_sf_decfg_netdev(vsi);
        ice_devlink_destroy_sf_dev_port(sf_dev);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c 
b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 953e68ed0f9a..6d08a11a86a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -33,9 +33,9 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector 
*q_vector,
                return;
 
        if (enable)
-               napi_enable(&q_vector->napi);
+               napi_enable_locked(&q_vector->napi);
        else
-               napi_disable(&q_vector->napi);
+               napi_disable_locked(&q_vector->napi);
 }
 
 /**
-- 
2.53.0

Reply via email to