Commit 6624e780a577 ("ice: split ice_vsi_setup into smaller functions")
has placed ice_vsi_free_q_vectors() after ice_destroy_xdp_rings() in
the rebuild process. The behaviour of the XDP rings config functions is
context-dependent, so the change of order has led to
ice_destroy_xdp_rings() doing additional work and removing XDP prog, when
it was supposed to be preserved.

Also, dependency on the PF state reset flags creates an additional,
fortunately less common problem:

* PFR is requested e.g. by tx_timeout handler
* .ndo_bpf() is asked to delete the program, calls ice_destroy_xdp_rings(),
  but reset flag is set, so rings are destroyed without deleting the
  program
* ice_vsi_rebuild tries to delete non-existent XDP rings, because the
  program is still on the VSI
* system crashes

With a similar race, when requested to attach a program,
ice_prepare_xdp_rings() can actually skip setting the program in the VSI
and nevertheless report success.

Instead of reverting to the old order of function calls, add an enum
argument to both ice_prepare_xdp_rings() and ice_destroy_xdp_rings() in
order to distinguish between calls from rebuild and .ndo_bpf().

Fixes: efc2214b6047 ("ice: Add support for XDP")
Reviewed-by: Igor Bagnucki <igor.bagnu...@intel.com>
Signed-off-by: Larysa Zaremba <larysa.zare...@intel.com>
---
 drivers/net/ethernet/intel/ice/ice.h      | 11 +++++++++--
 drivers/net/ethernet/intel/ice/ice_lib.c  |  5 +++--
 drivers/net/ethernet/intel/ice/ice_main.c | 22 ++++++++++++----------
 3 files changed, 24 insertions(+), 14 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice.h 
b/drivers/net/ethernet/intel/ice/ice.h
index d4d840729bda..b91b2594b29d 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -930,9 +930,16 @@ int ice_down(struct ice_vsi *vsi);
 int ice_down_up(struct ice_vsi *vsi);
 int ice_vsi_cfg_lan(struct ice_vsi *vsi);
 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+
+enum ice_xdp_cfg {
+       ICE_XDP_CFG_FULL,       /* Fully apply new config in .ndo_bpf() */
+       ICE_XDP_CFG_PART,       /* Save/use part of config in VSI rebuild */
+};
+
 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+                         enum ice_xdp_cfg cfg_type);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
 int
 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
             u32 flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c 
b/drivers/net/ethernet/intel/ice/ice_lib.c
index c0a7ff6c7e87..dd8b374823ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2285,7 +2285,8 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
                        ret = ice_vsi_determine_xdp_res(vsi);
                        if (ret)
                                goto unroll_vector_base;
-                       ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+                       ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
+                                                   ICE_XDP_CFG_PART);
                        if (ret)
                                goto unroll_vector_base;
                }
@@ -2429,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
                /* return value check can be skipped here, it always returns
                 * 0 if reset is in progress
                 */
-               ice_destroy_xdp_rings(vsi);
+               ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
 
        ice_vsi_clear_rings(vsi);
        ice_vsi_free_q_vectors(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c 
b/drivers/net/ethernet/intel/ice/ice_main.c
index f60c022f7960..2a270aacd24a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2711,10 +2711,12 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi 
*vsi, struct bpf_prog *prog)
  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
  * @vsi: VSI to bring up Tx rings used by XDP
  * @prog: bpf program that will be assigned to VSI
+ * @cfg_type: create from scratch or restore the existing configuration
  *
  * Return 0 on success and negative value on error
  */
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+                         enum ice_xdp_cfg cfg_type)
 {
        u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
        int xdp_rings_rem = vsi->num_xdp_txq;
@@ -2790,7 +2792,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct 
bpf_prog *prog)
         * taken into account at the end of ice_vsi_rebuild, where
         * ice_cfg_vsi_lan is being called
         */
-       if (ice_is_reset_in_progress(pf->state))
+       if (cfg_type == ICE_XDP_CFG_PART)
                return 0;
 
        /* tell the Tx scheduler that right now we have
@@ -2842,22 +2844,21 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct 
bpf_prog *prog)
 /**
  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
  * @vsi: VSI to remove XDP rings
+ * @cfg_type: disable XDP permanently or allow it to be restored later
  *
  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
  * resources
  */
-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
 {
        u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
        struct ice_pf *pf = vsi->back;
        int i, v_idx;
 
        /* q_vectors are freed in reset path so there's no point in detaching
-        * rings; in case of rebuild being triggered not from reset bits
-        * in pf->state won't be set, so additionally check first q_vector
-        * against NULL
+        * rings
         */
-       if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+       if (cfg_type == ICE_XDP_CFG_PART)
                goto free_qmap;
 
        ice_for_each_q_vector(vsi, v_idx) {
@@ -2898,7 +2899,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
        if (static_key_enabled(&ice_xdp_locking_key))
                static_branch_dec(&ice_xdp_locking_key);
 
-       if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+       if (cfg_type == ICE_XDP_CFG_PART)
                return 0;
 
        ice_vsi_assign_bpf_prog(vsi, NULL);
@@ -3009,7 +3010,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog 
*prog,
                if (xdp_ring_err) {
                        NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for 
XDP");
                } else {
-                       xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+                       xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
+                                                            ICE_XDP_CFG_FULL);
                        if (xdp_ring_err)
                                NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx 
resources failed");
                }
@@ -3020,7 +3022,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog 
*prog,
                        NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources 
failed");
        } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
                xdp_features_clear_redirect_target(vsi->netdev);
-               xdp_ring_err = ice_destroy_xdp_rings(vsi);
+               xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
                if (xdp_ring_err)
                        NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources 
failed");
                /* reallocate Rx queues that were used for zero-copy */
-- 
2.43.0

Reply via email to