In case port restarting, all created flows should be flushed.
This commit adds the flow flush helper function.

Signed-off-by: Suanming Mou <suanmi...@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    |   8 +++
 drivers/net/mlx5/mlx5_flow_hw.c | 117 ++++++++++++++++++++++++++++++++
 2 files changed, 125 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index b48a3af0fb..9ac96ac979 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6991,6 +6991,14 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum 
mlx5_flow_type type,
        uint32_t num_flushed = 0, fidx = 1;
        struct rte_flow *flow;
 
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       if (priv->config.dv_flow_en == 2 &&
+           type == MLX5_FLOW_TYPE_GEN) {
+               flow_hw_q_flow_flush(dev, NULL);
+               return;
+       }
+#endif
+
        MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
                flow_list_destroy(dev, type, fidx);
                num_flushed++;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index a74825312f..dcf72ab89f 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -377,6 +377,123 @@ flow_hw_q_push(struct rte_eth_dev *dev,
                                        MLX5DR_SEND_QUEUE_ACTION_DRAIN);
 }
 
+/**
+ * Drain the enqueued flows' completion.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   The queue to pull the flow.
+ * @param[in] pending_rules
+ *   The pending flow number.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    0 on success, negative value otherwise and rte_errno is set.
+ */
+static int
+__flow_hw_pull_comp(struct rte_eth_dev *dev,
+                   uint32_t queue,
+                   uint32_t pending_rules,
+                   struct rte_flow_error *error)
+{
+#define BURST_THR 32u
+       struct rte_flow_q_op_res comp[BURST_THR];
+       int ret, i, empty_loop = 0;
+
+       flow_hw_q_push(dev, queue, error);
+       while (pending_rules) {
+               ret = flow_hw_q_pull(dev, 0, comp, BURST_THR, error);
+               if (ret < 0)
+                       return -1;
+               if (!ret) {
+                       usleep(200);
+                       if (++empty_loop > 5) {
+                               DRV_LOG(WARNING, "No available dequeue, quit.");
+                               break;
+                       }
+                       continue;
+               }
+               for (i = 0; i < ret; i++) {
+                       if (comp[i].status == RTE_FLOW_Q_OP_ERROR)
+                               DRV_LOG(WARNING, "Flow flush get error CQE.");
+               }
+               if ((uint32_t)ret > pending_rules) {
+                       DRV_LOG(WARNING, "Flow flush get extra CQE.");
+                       return -1;
+               }
+               pending_rules -= ret;
+               empty_loop = 0;
+       }
+       return 0;
+}
+
+/**
+ * Flush created flows.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    0 on success, negative value otherwise and rte_errno is set.
+ */
+int
+flow_hw_q_flow_flush(struct rte_eth_dev *dev,
+                    struct rte_flow_error *error)
+{
+#define DEFAULT_QUEUE 0
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_hw_q *hw_q;
+       struct rte_flow_template_table *tbl;
+       struct rte_flow_hw *flow;
+       struct rte_flow_q_ops_attr attr = {
+               .postpone = 0,
+       };
+       uint32_t pending_rules = 0;
+       uint32_t queue;
+       uint32_t fidx;
+
+       /*
+        * Ensure to push and dequeue all the enqueued flows in case user
+        * forgot to dequeue. Or the enqueued created flows will be leaked.
+        * The forgot dequeue will also cause flow flush get extra CQEs as
+        * expected and pending_rules be minus value.
+        */
+       for (queue = 0; queue < priv->nb_queue; queue++) {
+               hw_q = &priv->hw_q[queue];
+               if (__flow_hw_pull_comp(dev, queue, hw_q->size - hw_q->job_idx,
+                                       error))
+                       return -1;
+       }
+       /* Flush flow per-table from DEFAULT_QUEUE. */
+       hw_q = &priv->hw_q[DEFAULT_QUEUE];
+       LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
+               MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
+                       if (flow_hw_q_flow_destroy(dev, DEFAULT_QUEUE, &attr,
+                                                  (struct rte_flow *)flow,
+                                                  error))
+                               return -1;
+                       pending_rules++;
+                       /* Drain completion with queue size. */
+                       if (pending_rules >= hw_q->size) {
+                               if (__flow_hw_pull_comp(dev, DEFAULT_QUEUE,
+                                                       pending_rules, error))
+                                       return -1;
+                               pending_rules = 0;
+                       }
+               }
+       }
+       /* Drain left completion. */
+       if (pending_rules &&
+           __flow_hw_pull_comp(dev, DEFAULT_QUEUE, pending_rules,
+                               error))
+               return -1;
+       return 0;
+}
+
 /**
  * Create flow table.
  *
-- 
2.25.1

Reply via email to