New Flow API allows to insert flow rules into a specified
index for tables with the index-based insertion type.
Implement rte_flow_async_create_by_index API in mlx5 PMD.

Signed-off-by: Alexander Kozyrev <akozy...@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    |  61 +++++++++++++++++
 drivers/net/mlx5/mlx5_flow.h    |  12 ++++
 drivers/net/mlx5/mlx5_flow_hw.c | 114 ++++++++++++++++++++++++++++++++
 3 files changed, 187 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f5e2831480..ba1eb5309b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1027,6 +1027,16 @@ mlx5_flow_async_flow_create(struct rte_eth_dev *dev,
                            uint8_t action_template_index,
                            void *user_data,
                            struct rte_flow_error *error);
+static struct rte_flow *
+mlx5_flow_async_flow_create_by_index(struct rte_eth_dev *dev,
+                           uint32_t queue,
+                           const struct rte_flow_op_attr *attr,
+                           struct rte_flow_template_table *table,
+                           uint32_t rule_index,
+                           const struct rte_flow_action actions[],
+                           uint8_t action_template_index,
+                           void *user_data,
+                           struct rte_flow_error *error);
 static int
 mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev,
                             uint32_t queue,
@@ -1107,6 +1117,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {
        .template_table_create = mlx5_flow_table_create,
        .template_table_destroy = mlx5_flow_table_destroy,
        .async_create = mlx5_flow_async_flow_create,
+       .async_create_by_index = mlx5_flow_async_flow_create_by_index,
        .async_destroy = mlx5_flow_async_flow_destroy,
        .pull = mlx5_flow_pull,
        .push = mlx5_flow_push,
@@ -8853,6 +8864,56 @@ mlx5_flow_async_flow_create(struct rte_eth_dev *dev,
                                       user_data, error);
 }
 
+/**
+ * Enqueue flow creation by index.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue_id
+ *   The queue to create the flow.
+ * @param[in] attr
+ *   Pointer to the flow operation attributes.
+ * @param[in] rule_index
+ *   The item pattern flow follows from the table.
+ * @param[in] actions
+ *   Action with flow spec value.
+ * @param[in] action_template_index
+ *   The action pattern flow follows from the table.
+ * @param[in] user_data
+ *   Pointer to the user_data.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Flow pointer on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow *
+mlx5_flow_async_flow_create_by_index(struct rte_eth_dev *dev,
+                           uint32_t queue_id,
+                           const struct rte_flow_op_attr *attr,
+                           struct rte_flow_template_table *table,
+                           uint32_t rule_index,
+                           const struct rte_flow_action actions[],
+                           uint8_t action_template_index,
+                           void *user_data,
+                           struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+       struct rte_flow_attr fattr = {0};
+
+       if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) {
+               rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL,
+                               "flow_q create with incorrect steering mode");
+               return NULL;
+       }
+       fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+       return fops->async_flow_create_by_index(dev, queue_id, attr, table,
+                                      rule_index, actions, 
action_template_index,
+                                      user_data, error);
+}
+
 /**
  * Enqueue flow destruction.
  *
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index e376dcae93..c2f9ffd760 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1149,6 +1149,7 @@ struct rte_flow_hw {
        uint32_t age_idx;
        cnt_id_t cnt_id;
        uint32_t mtr_id;
+       uint32_t rule_idx;
        uint8_t rule[0]; /* HWS layer data struct. */
 } __rte_packed;
 
@@ -1810,6 +1811,16 @@ typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)
                         uint8_t action_template_index,
                         void *user_data,
                         struct rte_flow_error *error);
+typedef struct rte_flow *(*mlx5_flow_async_flow_create_by_index_t)
+                       (struct rte_eth_dev *dev,
+                        uint32_t queue,
+                        const struct rte_flow_op_attr *attr,
+                        struct rte_flow_template_table *table,
+                        uint32_t rule_index,
+                        const struct rte_flow_action actions[],
+                        uint8_t action_template_index,
+                        void *user_data,
+                        struct rte_flow_error *error);
 typedef int (*mlx5_flow_async_flow_destroy_t)
                        (struct rte_eth_dev *dev,
                         uint32_t queue,
@@ -1912,6 +1923,7 @@ struct mlx5_flow_driver_ops {
        mlx5_flow_table_create_t template_table_create;
        mlx5_flow_table_destroy_t template_table_destroy;
        mlx5_flow_async_flow_create_t async_flow_create;
+       mlx5_flow_async_flow_create_by_index_t async_flow_create_by_index;
        mlx5_flow_async_flow_destroy_t async_flow_destroy;
        mlx5_flow_pull_t pull;
        mlx5_flow_push_t push;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 8002c88e4a..b209b448c6 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -2586,6 +2586,118 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
        return NULL;
 }
 
+/**
+ * Enqueue HW steering flow creation by index.
+ *
+ * The flow will be applied to the HW only if the postpone bit is not set or
+ * the extra push function is called.
+ * The flow creation status should be checked from dequeue result.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   The queue to create the flow.
+ * @param[in] attr
+ *   Pointer to the flow operation attributes.
+ * @param[in] rule_index
+ *   The item pattern flow follows from the table.
+ * @param[in] actions
+ *   Action with flow spec value.
+ * @param[in] action_template_index
+ *   The action pattern flow follows from the table.
+ * @param[in] user_data
+ *   Pointer to the user_data.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Flow pointer on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow *
+flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,
+                         uint32_t queue,
+                         const struct rte_flow_op_attr *attr,
+                         struct rte_flow_template_table *table,
+                         uint32_t rule_index,
+                         const struct rte_flow_action actions[],
+                         uint8_t action_template_index,
+                         void *user_data,
+                         struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5dr_rule_attr rule_attr = {
+               .queue_id = queue,
+               .user_data = user_data,
+               .burst = attr->postpone,
+       };
+       struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
+       struct rte_flow_hw *flow;
+       struct mlx5_hw_q_job *job;
+       uint32_t flow_idx;
+       int ret;
+
+       if (unlikely(rule_index >= table->cfg.attr.nb_flows)) {
+               rte_errno = EINVAL;
+               goto error;
+       }
+       if (unlikely(!priv->hw_q[queue].job_idx)) {
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
+       if (!flow)
+               goto error;
+       /*
+        * Set the table here in order to know the destination table
+        * when free the flow afterwards.
+        */
+       flow->table = table;
+       flow->idx = flow_idx;
+       job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
+       /*
+        * Set the job type here in order to know if the flow memory
+        * should be freed or not when get the result from dequeue.
+        */
+       job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
+       job->flow = flow;
+       job->user_data = user_data;
+       rule_attr.user_data = job;
+       /*
+        * Set the rule index.
+        */
+       MLX5_ASSERT(flow_idx > 0);
+       rule_attr.rule_idx = rule_index;
+       flow->rule_idx = rule_index;
+       /*
+        * Construct the flow actions based on the input actions.
+        * The implicitly appended action is always fixed, like metadata
+        * copy action from FDB to NIC Rx.
+        * No need to copy and contrust a new "actions" list based on the
+        * user's input, in order to save the cost.
+        */
+       if (flow_hw_actions_construct(dev, job,
+                                     &table->ats[action_template_index],
+                                     action_template_index, actions,
+                                     rule_acts, queue, error)) {
+               rte_errno = EINVAL;
+               goto free;
+       }
+       ret = mlx5dr_rule_create(table->matcher,
+                                0, NULL, action_template_index, rule_acts,
+                                &rule_attr, (struct mlx5dr_rule *)flow->rule);
+       if (likely(!ret))
+               return (struct rte_flow *)flow;
+free:
+       /* Flow created fail, return the descriptor and flow memory. */
+       mlx5_ipool_free(table->flow, flow_idx);
+       priv->hw_q[queue].job_idx++;
+error:
+       rte_flow_error_set(error, rte_errno,
+                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                          "fail to create rte flow");
+       return NULL;
+}
+
 /**
  * Enqueue HW steering flow destruction.
  *
@@ -2636,6 +2748,7 @@ flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
        job->user_data = user_data;
        job->flow = fh;
        rule_attr.user_data = job;
+       rule_attr.rule_idx = fh->rule_idx;
        ret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);
        if (likely(!ret))
                return 0;
@@ -8345,6 +8458,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
        .template_table_create = flow_hw_template_table_create,
        .template_table_destroy = flow_hw_table_destroy,
        .async_flow_create = flow_hw_async_flow_create,
+       .async_flow_create_by_index = flow_hw_async_flow_create_by_index,
        .async_flow_destroy = flow_hw_async_flow_destroy,
        .pull = flow_hw_pull,
        .push = flow_hw_push,
-- 
2.18.2

Reply via email to