Some indirect table and hrxq is created in the rule creation with
QUEUE or RSS action. When stopping a port, the 'dev_started' is set
to 0 in the beginning. The mlx5_ind_table_obj_release() should still
do the dereference of the queue(s) when it is called in the polling
of flow rule deletion, due to the fact that a flow with Q/RSS action
is always referring to the active Rx queues.

The callback now can only pass one input parameter. Using a global
flag per device to indicate that the user flows flushing is in
progress. Then the reference count of the queue(s) should be
decreased.

Fixes: 3a2f674b6aa8 ("net/mlx5: add queue and RSS HW steering action")
Cc: sta...@dpdk.org

Signed-off-by: Bing Zhao <bi...@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnow...@nvidia.com>
---
 drivers/net/mlx5/mlx5.h      | 1 +
 drivers/net/mlx5/mlx5_flow.c | 3 +++
 drivers/net/mlx5/mlx5_rxq.c  | 8 +++++---
 3 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 503366580b..80e59a6cb5 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2033,6 +2033,7 @@ struct mlx5_priv {
        RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference 
counter. */
        struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set 
flow engine info. */
        struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
+       bool hws_rule_flushing; /**< Whether this port is in rules flushing 
stage. */
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        /* Item template list. */
        LIST_HEAD(flow_hw_itt, rte_flow_pattern_template) flow_hw_itt;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f8cfa661ec..533ae7a63d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -8118,7 +8118,10 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum 
mlx5_flow_type type,
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        if (priv->sh->config.dv_flow_en == 2 &&
            type == MLX5_FLOW_TYPE_GEN) {
+               priv->hws_rule_flushing = true;
                flow_hw_q_flow_flush(dev, NULL);
+               priv->hws_rule_flushing = false;
+               return;
        }
 #endif
        MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 5eac224b76..3f492ef163 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2894,6 +2894,7 @@ static void
 __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
+       bool deref_rxqs = true;
 
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        if (hrxq->hws_flags)
@@ -2903,9 +2904,10 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct 
mlx5_hrxq *hrxq)
 #endif
        priv->obj_ops.hrxq_destroy(hrxq);
        if (!hrxq->standalone) {
-               mlx5_ind_table_obj_release(dev, hrxq->ind_table,
-                                          hrxq->hws_flags ?
-                                          (!!dev->data->dev_started) : true);
+               if (!dev->data->dev_started && hrxq->hws_flags &&
+                   !priv->hws_rule_flushing)
+                       deref_rxqs = false;
+               mlx5_ind_table_obj_release(dev, hrxq->ind_table, deref_rxqs);
        }
        mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
 }
-- 
2.34.1

Reply via email to