Support template table API in PMD.
The patch allows to increase existing table capacity.

Signed-off-by: Gregory Etelson <getel...@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnow...@nvidia.com>
---
 drivers/net/mlx5/mlx5.h         |   5 +
 drivers/net/mlx5/mlx5_flow.c    |  51 ++++
 drivers/net/mlx5/mlx5_flow.h    |  84 ++++--
 drivers/net/mlx5/mlx5_flow_hw.c | 518 +++++++++++++++++++++++++++-----
 4 files changed, 553 insertions(+), 105 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 99850a58af..bb1853e797 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -380,6 +380,9 @@ enum mlx5_hw_job_type {
        MLX5_HW_Q_JOB_TYPE_UPDATE, /* Flow update job type. */
        MLX5_HW_Q_JOB_TYPE_QUERY, /* Flow query job type. */
        MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY, /* Flow update and query job type. */
+       MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE, /* Non-optimized flow create job 
type. */
+       MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY, /* Non-optimized destroy create 
job type. */
+       MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE, /* Move flow after table resize. */
 };
 
 enum mlx5_hw_indirect_type {
@@ -422,6 +425,8 @@ struct mlx5_hw_q {
        struct mlx5_hw_q_job **job; /* LIFO header. */
        struct rte_ring *indir_cq; /* Indirect action SW completion queue. */
        struct rte_ring *indir_iq; /* Indirect action SW in progress queue. */
+       struct rte_ring *flow_transfer_pending;
+       struct rte_ring *flow_transfer_completed;
 } __rte_cache_aligned;
 
 
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 3e179110a0..477b13e04d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1095,6 +1095,20 @@ mlx5_flow_calc_encap_hash(struct rte_eth_dev *dev,
                          uint8_t *hash,
                          struct rte_flow_error *error);
 
+static int
+mlx5_template_table_resize(struct rte_eth_dev *dev,
+                          struct rte_flow_template_table *table,
+                          uint32_t nb_rules, struct rte_flow_error *error);
+static int
+mlx5_flow_async_update_resized(struct rte_eth_dev *dev, uint32_t queue,
+                              const struct rte_flow_op_attr *attr,
+                              struct rte_flow *rule, void *user_data,
+                              struct rte_flow_error *error);
+static int
+mlx5_table_resize_complete(struct rte_eth_dev *dev,
+                          struct rte_flow_template_table *table,
+                          struct rte_flow_error *error);
+
 static const struct rte_flow_ops mlx5_flow_ops = {
        .validate = mlx5_flow_validate,
        .create = mlx5_flow_create,
@@ -1133,6 +1147,9 @@ static const struct rte_flow_ops mlx5_flow_ops = {
                mlx5_flow_action_list_handle_query_update,
        .flow_calc_table_hash = mlx5_flow_calc_table_hash,
        .flow_calc_encap_hash = mlx5_flow_calc_encap_hash,
+       .flow_template_table_resize = mlx5_template_table_resize,
+       .flow_update_resized = mlx5_flow_async_update_resized,
+       .flow_template_table_resize_complete = mlx5_table_resize_complete,
 };
 
 /* Tunnel information. */
@@ -10548,6 +10565,40 @@ mlx5_flow_calc_encap_hash(struct rte_eth_dev *dev,
        return fops->flow_calc_encap_hash(dev, pattern, dest_field, hash, 
error);
 }
 
+static int
+mlx5_template_table_resize(struct rte_eth_dev *dev,
+                          struct rte_flow_template_table *table,
+                          uint32_t nb_rules, struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       MLX5_DRV_FOPS_OR_ERR(dev, fops, table_resize, ENOTSUP);
+       return fops->table_resize(dev, table, nb_rules, error);
+}
+
+static int
+mlx5_table_resize_complete(struct rte_eth_dev *dev,
+                          struct rte_flow_template_table *table,
+                          struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       MLX5_DRV_FOPS_OR_ERR(dev, fops, table_resize_complete, ENOTSUP);
+       return fops->table_resize_complete(dev, table, error);
+}
+
+static int
+mlx5_flow_async_update_resized(struct rte_eth_dev *dev, uint32_t queue,
+                              const struct rte_flow_op_attr *op_attr,
+                              struct rte_flow *rule, void *user_data,
+                              struct rte_flow_error *error)
+{
+       const struct mlx5_flow_driver_ops *fops;
+
+       MLX5_DRV_FOPS_OR_ERR(dev, fops, flow_update_resized, ENOTSUP);
+       return fops->flow_update_resized(dev, queue, op_attr, rule, user_data, 
error);
+}
+
 /**
  * Destroy all indirect actions (shared RSS).
  *
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 9cc237c542..6c2944c21a 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1217,6 +1217,7 @@ struct rte_flow {
        uint32_t tunnel:1;
        uint32_t meter:24; /**< Holds flow meter id. */
        uint32_t indirect_type:2; /**< Indirect action type. */
+       uint32_t matcher_selector:1; /**< Matcher index in resizable table. */
        uint32_t rix_mreg_copy;
        /**< Index to metadata register copy table resource. */
        uint32_t counter; /**< Holds flow counter. */
@@ -1262,6 +1263,7 @@ struct rte_flow_hw {
        };
        struct rte_flow_template_table *table; /* The table flow allcated from. 
*/
        uint8_t mt_idx;
+       uint8_t matcher_selector:1;
        uint32_t age_idx;
        cnt_id_t cnt_id;
        uint32_t mtr_id;
@@ -1489,6 +1491,11 @@ struct mlx5_flow_group {
 #define MLX5_MAX_TABLE_RESIZE_NUM 64
 
 struct mlx5_multi_pattern_segment {
+       /*
+        * Modify Header Argument Objects number allocated for action in that
+        * segment.
+        * Capacity is always power of 2.
+        */
        uint32_t capacity;
        uint32_t head_index;
        struct mlx5dr_action *mhdr_action;
@@ -1527,43 +1534,22 @@ mlx5_is_multi_pattern_active(const struct 
mlx5_tbl_multi_pattern_ctx *mpctx)
        return mpctx->segments[0].head_index == 1;
 }
 
-static __rte_always_inline struct mlx5_multi_pattern_segment *
-mlx5_multi_pattern_segment_get_next(struct mlx5_tbl_multi_pattern_ctx *mpctx)
-{
-       int i;
-
-       for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
-               if (!mpctx->segments[i].capacity)
-                       return &mpctx->segments[i];
-       }
-       return NULL;
-}
-
-static __rte_always_inline struct mlx5_multi_pattern_segment *
-mlx5_multi_pattern_segment_find(struct mlx5_tbl_multi_pattern_ctx *mpctx,
-                               uint32_t flow_resource_ix)
-{
-       int i;
-
-       for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
-               uint32_t limit = mpctx->segments[i].head_index +
-                                mpctx->segments[i].capacity;
-
-               if (flow_resource_ix < limit)
-                       return &mpctx->segments[i];
-       }
-       return NULL;
-}
-
 struct mlx5_flow_template_table_cfg {
        struct rte_flow_template_table_attr attr; /* Table attributes passed 
through flow API. */
        bool external; /* True if created by flow API, false if table is 
internal to PMD. */
 };
 
+struct mlx5_matcher_info {
+       struct mlx5dr_matcher *matcher; /* Template matcher. */
+       uint32_t refcnt;
+};
+
 struct rte_flow_template_table {
        LIST_ENTRY(rte_flow_template_table) next;
        struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. 
*/
-       struct mlx5dr_matcher *matcher; /* Template matcher. */
+       struct mlx5_matcher_info matcher_info[2];
+       uint32_t matcher_selector;
+       rte_rwlock_t matcher_replace_rwlk; /* RW lock for resizable tables */
        /* Item templates bind to the table. */
        struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
        /* Action templates bind to the table. */
@@ -1576,8 +1562,34 @@ struct rte_flow_template_table {
        uint8_t nb_action_templates; /* Action template number. */
        uint32_t refcnt; /* Table reference counter. */
        struct mlx5_tbl_multi_pattern_ctx mpctx;
+       struct mlx5dr_matcher_attr matcher_attr;
 };
 
+static __rte_always_inline struct mlx5dr_matcher *
+mlx5_table_matcher(const struct rte_flow_template_table *table)
+{
+       return table->matcher_info[table->matcher_selector].matcher;
+}
+
+static __rte_always_inline struct mlx5_multi_pattern_segment *
+mlx5_multi_pattern_segment_find(struct rte_flow_template_table *table,
+                               uint32_t flow_resource_ix)
+{
+       int i;
+       struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
+
+       if (likely(!rte_flow_template_table_resizable(0, &table->cfg.attr)))
+               return &mpctx->segments[0];
+       for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
+               uint32_t limit = mpctx->segments[i].head_index +
+                                mpctx->segments[i].capacity;
+
+               if (flow_resource_ix < limit)
+                       return &mpctx->segments[i];
+       }
+       return NULL;
+}
+
 #endif
 
 /*
@@ -2274,6 +2286,17 @@ typedef int
                         enum rte_flow_encap_hash_field dest_field,
                         uint8_t *hash,
                         struct rte_flow_error *error);
+typedef int (*mlx5_table_resize_t)(struct rte_eth_dev *dev,
+                                  struct rte_flow_template_table *table,
+                                  uint32_t nb_rules, struct rte_flow_error 
*error);
+typedef int (*mlx5_flow_update_resized_t)
+                       (struct rte_eth_dev *dev, uint32_t queue,
+                        const struct rte_flow_op_attr *attr,
+                        struct rte_flow *rule, void *user_data,
+                        struct rte_flow_error *error);
+typedef int (*table_resize_complete_t)(struct rte_eth_dev *dev,
+                                      struct rte_flow_template_table *table,
+                                      struct rte_flow_error *error);
 
 struct mlx5_flow_driver_ops {
        mlx5_flow_validate_t validate;
@@ -2348,6 +2371,9 @@ struct mlx5_flow_driver_ops {
                async_action_list_handle_query_update;
        mlx5_flow_calc_table_hash_t flow_calc_table_hash;
        mlx5_flow_calc_encap_hash_t flow_calc_encap_hash;
+       mlx5_table_resize_t table_resize;
+       mlx5_flow_update_resized_t flow_update_resized;
+       table_resize_complete_t table_resize_complete;
 };
 
 /* mlx5_flow.c */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 38aed03970..1bd29999f9 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -2904,7 +2904,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
        int ret;
        uint32_t age_idx = 0;
        struct mlx5_aso_mtr *aso_mtr;
-       struct mlx5_multi_pattern_segment *mp_segment;
+       struct mlx5_multi_pattern_segment *mp_segment = NULL;
 
        rte_memcpy(rule_acts, hw_acts->rule_acts, sizeof(*rule_acts) * 
at->dr_actions_num);
        attr.group = table->grp->group_id;
@@ -2918,17 +2918,20 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
        } else {
                attr.ingress = 1;
        }
-       if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0) {
+       if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0 && 
!hw_acts->mhdr->shared) {
                uint16_t pos = hw_acts->mhdr->pos;
 
-               if (!hw_acts->mhdr->shared) {
-                       rule_acts[pos].modify_header.offset =
-                                               job->flow->res_idx - 1;
-                       rule_acts[pos].modify_header.data =
-                                               (uint8_t *)job->mhdr_cmd;
-                       rte_memcpy(job->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
-                                  sizeof(*job->mhdr_cmd) * 
hw_acts->mhdr->mhdr_cmds_num);
-               }
+               mp_segment = mlx5_multi_pattern_segment_find(table, 
job->flow->res_idx);
+               if (!mp_segment || !mp_segment->mhdr_action)
+                       return -1;
+               rule_acts[pos].action = mp_segment->mhdr_action;
+               /* offset is relative to DR action */
+               rule_acts[pos].modify_header.offset =
+                                       job->flow->res_idx - 
mp_segment->head_index;
+               rule_acts[pos].modify_header.data =
+                                       (uint8_t *)job->mhdr_cmd;
+               rte_memcpy(job->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
+                          sizeof(*job->mhdr_cmd) * 
hw_acts->mhdr->mhdr_cmds_num);
        }
        LIST_FOREACH(act_data, &hw_acts->act_list, next) {
                uint32_t jump_group;
@@ -3035,10 +3038,6 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
                        MLX5_ASSERT(ipv6_push->size == act_data->ipv6_ext.len);
                        break;
                case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
-                       mp_segment = 
mlx5_multi_pattern_segment_find(&table->mpctx, job->flow->res_idx);
-                       if (!mp_segment || !mp_segment->mhdr_action)
-                               return -1;
-                       rule_acts[hw_acts->mhdr->pos].action = 
mp_segment->mhdr_action;
                        if (action->type == 
RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
                                ret = flow_hw_set_vlan_vid_construct(dev, job,
                                                                     act_data,
@@ -3195,11 +3194,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
 
                if (ix < 0)
                        return -1;
-               mp_segment = mlx5_multi_pattern_segment_find(&table->mpctx, 
job->flow->res_idx);
+               if (!mp_segment)
+                       mp_segment = mlx5_multi_pattern_segment_find(table, 
job->flow->res_idx);
                if (!mp_segment || !mp_segment->reformat_action[ix])
                        return -1;
                ra->action = mp_segment->reformat_action[ix];
-               ra->reformat.offset = job->flow->res_idx - 1;
+               /* reformat offset is relative to selected DR action */
+               ra->reformat.offset = job->flow->res_idx - 
mp_segment->head_index;
                ra->reformat.data = buf;
        }
        if (hw_acts->push_remove && !hw_acts->push_remove->shared) {
@@ -3371,10 +3372,26 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
                                            pattern_template_index, job);
        if (!rule_items)
                goto error;
-       ret = mlx5dr_rule_create(table->matcher,
-                                pattern_template_index, rule_items,
-                                action_template_index, rule_acts,
-                                &rule_attr, (struct mlx5dr_rule *)flow->rule);
+       if (likely(!rte_flow_template_table_resizable(dev->data->port_id, 
&table->cfg.attr))) {
+               ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
+                                        pattern_template_index, rule_items,
+                                        action_template_index, rule_acts,
+                                        &rule_attr,
+                                        (struct mlx5dr_rule *)flow->rule);
+       } else {
+               uint32_t selector;
+
+               job->type = MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE;
+               rte_rwlock_read_lock(&table->matcher_replace_rwlk);
+               selector = table->matcher_selector;
+               ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
+                                        pattern_template_index, rule_items,
+                                        action_template_index, rule_acts,
+                                        &rule_attr,
+                                        (struct mlx5dr_rule *)flow->rule);
+               rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
+               flow->matcher_selector = selector;
+       }
        if (likely(!ret))
                return (struct rte_flow *)flow;
 error:
@@ -3491,9 +3508,23 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev 
*dev,
                rte_errno = EINVAL;
                goto error;
        }
-       ret = mlx5dr_rule_create(table->matcher,
-                                0, items, action_template_index, rule_acts,
-                                &rule_attr, (struct mlx5dr_rule *)flow->rule);
+       if (likely(!rte_flow_template_table_resizable(dev->data->port_id, 
&table->cfg.attr))) {
+               ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
+                                        0, items, action_template_index,
+                                        rule_acts, &rule_attr,
+                                        (struct mlx5dr_rule *)flow->rule);
+       } else {
+               uint32_t selector;
+
+               job->type = MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE;
+               rte_rwlock_read_lock(&table->matcher_replace_rwlk);
+               selector = table->matcher_selector;
+               ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
+                                        0, items, action_template_index,
+                                        rule_acts, &rule_attr,
+                                        (struct mlx5dr_rule *)flow->rule);
+               rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
+       }
        if (likely(!ret))
                return (struct rte_flow *)flow;
 error:
@@ -3673,7 +3704,8 @@ flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
                return rte_flow_error_set(error, ENOMEM,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                                          "fail to destroy rte flow: flow queue 
full");
-       job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
+       job->type = !rte_flow_template_table_resizable(dev->data->port_id, 
&fh->table->cfg.attr) ?
+                   MLX5_HW_Q_JOB_TYPE_DESTROY : 
MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY;
        job->user_data = user_data;
        job->flow = fh;
        rule_attr.user_data = job;
@@ -3785,6 +3817,26 @@ flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev 
*dev, struct mlx5_hw_q_job
        }
 }
 
+static __rte_always_inline int
+mlx5_hw_pull_flow_transfer_comp(struct rte_eth_dev *dev,
+                               uint32_t queue, struct rte_flow_op_result res[],
+                               uint16_t n_res)
+{
+       uint32_t size, i;
+       struct mlx5_hw_q_job *job = NULL;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct rte_ring *ring = priv->hw_q[queue].flow_transfer_completed;
+
+       size = RTE_MIN(rte_ring_count(ring), n_res);
+       for (i = 0; i < size; i++) {
+               res[i].status = RTE_FLOW_OP_SUCCESS;
+               rte_ring_dequeue(ring, (void **)&job);
+               res[i].user_data = job->user_data;
+               flow_hw_job_put(priv, job, queue);
+       }
+       return (int)size;
+}
+
 static inline int
 __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
                                 uint32_t queue,
@@ -3833,6 +3885,79 @@ __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
        return ret_comp;
 }
 
+static __rte_always_inline void
+hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,
+                              struct mlx5_hw_q_job *job,
+                              uint32_t queue, struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
+       struct rte_flow_hw *flow = job->flow;
+       struct rte_flow_template_table *table = flow->table;
+       /* Release the original resource index in case of update. */
+       uint32_t res_idx = flow->res_idx;
+
+       if (flow->fate_type == MLX5_FLOW_FATE_JUMP)
+               flow_hw_jump_release(dev, flow->jump);
+       else if (flow->fate_type == MLX5_FLOW_FATE_QUEUE)
+               mlx5_hrxq_obj_release(dev, flow->hrxq);
+       if (mlx5_hws_cnt_id_valid(flow->cnt_id))
+               flow_hw_age_count_release(priv, queue,
+                                         flow, error);
+       if (flow->mtr_id) {
+               mlx5_ipool_free(pool->idx_pool, flow->mtr_id);
+               flow->mtr_id = 0;
+       }
+       if (job->type != MLX5_HW_Q_JOB_TYPE_UPDATE) {
+               if (table) {
+                       mlx5_ipool_free(table->resource, res_idx);
+                       mlx5_ipool_free(table->flow, flow->idx);
+               }
+       } else {
+               rte_memcpy(flow, job->upd_flow,
+                          offsetof(struct rte_flow_hw, rule));
+               mlx5_ipool_free(table->resource, res_idx);
+       }
+}
+
+static __rte_always_inline void
+hw_cmpl_resizable_tbl(struct rte_eth_dev *dev,
+                     struct mlx5_hw_q_job *job,
+                     uint32_t queue, enum rte_flow_op_status status,
+                     struct rte_flow_error *error)
+{
+       struct rte_flow_hw *flow = job->flow;
+       struct rte_flow_template_table *table = flow->table;
+       uint32_t selector = flow->matcher_selector;
+       uint32_t other_selector = (selector + 1) & 1;
+       uint32_t __rte_unused refcnt;
+
+       switch (job->type) {
+       case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE:
+               __atomic_add_fetch(&table->matcher_info[selector].refcnt,
+                                  1, __ATOMIC_RELAXED);
+               break;
+       case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY:
+               refcnt = 
__atomic_sub_fetch(&table->matcher_info[selector].refcnt, 1,
+                                           __ATOMIC_RELAXED);
+               MLX5_ASSERT((int)refcnt >= 0);
+               hw_cmpl_flow_update_or_destroy(dev, job, queue, error);
+               break;
+       case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE:
+               if (status == RTE_FLOW_OP_SUCCESS) {
+                       refcnt = 
__atomic_sub_fetch(&table->matcher_info[selector].refcnt,
+                                                   1, __ATOMIC_RELAXED);
+                       MLX5_ASSERT((int)refcnt >= 0);
+                       
__atomic_add_fetch(&table->matcher_info[other_selector].refcnt,
+                                          1, __ATOMIC_RELAXED);
+                       flow->matcher_selector = other_selector;
+               }
+               break;
+       default:
+               break;
+       }
+}
+
 /**
  * Pull the enqueued flows.
  *
@@ -3861,9 +3986,7 @@ flow_hw_pull(struct rte_eth_dev *dev,
             struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
        struct mlx5_hw_q_job *job;
-       uint32_t res_idx;
        int ret, i;
 
        /* 1. Pull the flow completion. */
@@ -3874,31 +3997,20 @@ flow_hw_pull(struct rte_eth_dev *dev,
                                "fail to query flow queue");
        for (i = 0; i <  ret; i++) {
                job = (struct mlx5_hw_q_job *)res[i].user_data;
-               /* Release the original resource index in case of update. */
-               res_idx = job->flow->res_idx;
                /* Restore user data. */
                res[i].user_data = job->user_data;
-               if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY ||
-                   job->type == MLX5_HW_Q_JOB_TYPE_UPDATE) {
-                       if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
-                               flow_hw_jump_release(dev, job->flow->jump);
-                       else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
-                               mlx5_hrxq_obj_release(dev, job->flow->hrxq);
-                       if (mlx5_hws_cnt_id_valid(job->flow->cnt_id))
-                               flow_hw_age_count_release(priv, queue,
-                                                         job->flow, error);
-                       if (job->flow->mtr_id) {
-                               mlx5_ipool_free(pool->idx_pool, 
job->flow->mtr_id);
-                               job->flow->mtr_id = 0;
-                       }
-                       if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
-                               mlx5_ipool_free(job->flow->table->resource, 
res_idx);
-                               mlx5_ipool_free(job->flow->table->flow, 
job->flow->idx);
-                       } else {
-                               rte_memcpy(job->flow, job->upd_flow,
-                                       offsetof(struct rte_flow_hw, rule));
-                               mlx5_ipool_free(job->flow->table->resource, 
res_idx);
-                       }
+               switch (job->type) {
+               case MLX5_HW_Q_JOB_TYPE_DESTROY:
+               case MLX5_HW_Q_JOB_TYPE_UPDATE:
+                       hw_cmpl_flow_update_or_destroy(dev, job, queue, error);
+                       break;
+               case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE:
+               case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE:
+               case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY:
+                       hw_cmpl_resizable_tbl(dev, job, queue, res[i].status, 
error);
+                       break;
+               default:
+                       break;
                }
                flow_hw_job_put(priv, job, queue);
        }
@@ -3906,24 +4018,36 @@ flow_hw_pull(struct rte_eth_dev *dev,
        if (ret < n_res)
                ret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],
                                                        n_res - ret);
+       if (ret < n_res)
+               ret += mlx5_hw_pull_flow_transfer_comp(dev, queue, &res[ret],
+                                                      n_res - ret);
+
        return ret;
 }
 
+static uint32_t
+mlx5_hw_push_queue(struct rte_ring *pending_q, struct rte_ring *cmpl_q)
+{
+       void *job = NULL;
+       uint32_t i, size = rte_ring_count(pending_q);
+
+       for (i = 0; i < size; i++) {
+               rte_ring_dequeue(pending_q, &job);
+               rte_ring_enqueue(cmpl_q, job);
+       }
+       return size;
+}
+
 static inline uint32_t
 __flow_hw_push_action(struct rte_eth_dev *dev,
                    uint32_t queue)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct rte_ring *iq = priv->hw_q[queue].indir_iq;
-       struct rte_ring *cq = priv->hw_q[queue].indir_cq;
-       void *job = NULL;
-       uint32_t ret, i;
+       struct mlx5_hw_q *hw_q = &priv->hw_q[queue];
 
-       ret = rte_ring_count(iq);
-       for (i = 0; i < ret; i++) {
-               rte_ring_dequeue(iq, &job);
-               rte_ring_enqueue(cq, job);
-       }
+       mlx5_hw_push_queue(hw_q->indir_iq, hw_q->indir_cq);
+       mlx5_hw_push_queue(hw_q->flow_transfer_pending,
+                          hw_q->flow_transfer_completed);
        if (!priv->shared_host) {
                if (priv->hws_ctpool)
                        mlx5_aso_push_wqe(priv->sh,
@@ -4332,6 +4456,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,
        grp = container_of(ge, struct mlx5_flow_group, entry);
        tbl->grp = grp;
        /* Prepare matcher information. */
+       matcher_attr.resizable = 
!!rte_flow_template_table_resizable(dev->data->port_id, &table_cfg->attr);
        matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
        matcher_attr.priority = attr->flow_attr.priority;
        matcher_attr.optimize_using_rule_idx = true;
@@ -4350,7 +4475,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,
                               RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG;
 
                if ((attr->specialize & val) == val) {
-                       DRV_LOG(INFO, "Invalid hint value %x",
+                       DRV_LOG(ERR, "Invalid hint value %x",
                                attr->specialize);
                        rte_errno = EINVAL;
                        goto it_error;
@@ -4394,10 +4519,11 @@ flow_hw_table_create(struct rte_eth_dev *dev,
                i = nb_item_templates;
                goto it_error;
        }
-       tbl->matcher = mlx5dr_matcher_create
+       tbl->matcher_info[0].matcher = mlx5dr_matcher_create
                (tbl->grp->tbl, mt, nb_item_templates, at, nb_action_templates, 
&matcher_attr);
-       if (!tbl->matcher)
+       if (!tbl->matcher_info[0].matcher)
                goto at_error;
+       tbl->matcher_attr = matcher_attr;
        tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
                    (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
                    MLX5DR_TABLE_TYPE_NIC_RX);
@@ -4405,6 +4531,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,
                LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
        else
                LIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);
+       rte_rwlock_init(&tbl->matcher_replace_rwlk);
        return tbl;
 at_error:
        for (i = 0; i < nb_action_templates; i++) {
@@ -4576,6 +4703,11 @@ flow_hw_template_table_create(struct rte_eth_dev *dev,
 
        if (flow_hw_translate_group(dev, &cfg, group, 
&cfg.attr.flow_attr.group, error))
                return NULL;
+       if (!cfg.attr.flow_attr.group && 
rte_flow_template_table_resizable(dev->data->port_id, attr)) {
+               rte_flow_error_set(error, EINVAL, 
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "table cannot be resized: invalid group");
+               return NULL;
+       }
        return flow_hw_table_create(dev, &cfg, item_templates, 
nb_item_templates,
                                    action_templates, nb_action_templates, 
error);
 }
@@ -4648,7 +4780,10 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
                                   1, __ATOMIC_RELAXED);
        }
        flow_hw_destroy_table_multi_pattern_ctx(table);
-       mlx5dr_matcher_destroy(table->matcher);
+       if (table->matcher_info[0].matcher)
+               mlx5dr_matcher_destroy(table->matcher_info[0].matcher);
+       if (table->matcher_info[1].matcher)
+               mlx5dr_matcher_destroy(table->matcher_info[1].matcher);
        mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
        mlx5_ipool_destroy(table->resource);
        mlx5_ipool_destroy(table->flow);
@@ -9642,6 +9777,16 @@ action_template_drop_init(struct rte_eth_dev *dev,
        return 0;
 }
 
+static __rte_always_inline struct rte_ring *
+mlx5_hwq_ring_create(uint16_t port_id, uint32_t queue, uint32_t size, const 
char *str)
+{
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+
+       snprintf(mz_name, sizeof(mz_name), "port_%u_%s_%u", port_id, str, 
queue);
+       return rte_ring_create(mz_name, size, SOCKET_ID_ANY,
+                              RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
+}
+
 /**
  * Configure port HWS resources.
  *
@@ -9769,7 +9914,6 @@ flow_hw_configure(struct rte_eth_dev *dev,
                goto err;
        }
        for (i = 0; i < nb_q_updated; i++) {
-               char mz_name[RTE_MEMZONE_NAMESIZE];
                uint8_t *encap = NULL, *push = NULL;
                struct mlx5_modification_cmd *mhdr_cmd = NULL;
                struct rte_flow_item *items = NULL;
@@ -9803,22 +9947,23 @@ flow_hw_configure(struct rte_eth_dev *dev,
                        job[j].upd_flow = &upd_flow[j];
                        priv->hw_q[i].job[j] = &job[j];
                }
-               snprintf(mz_name, sizeof(mz_name), "port_%u_indir_act_cq_%u",
-                        dev->data->port_id, i);
-               priv->hw_q[i].indir_cq = rte_ring_create(mz_name,
-                               _queue_attr[i]->size, SOCKET_ID_ANY,
-                               RING_F_SP_ENQ | RING_F_SC_DEQ |
-                               RING_F_EXACT_SZ);
+               /* Notice ring name length is limited. */
+               priv->hw_q[i].indir_cq = mlx5_hwq_ring_create
+                       (dev->data->port_id, i, _queue_attr[i]->size, 
"indir_act_cq");
                if (!priv->hw_q[i].indir_cq)
                        goto err;
-               snprintf(mz_name, sizeof(mz_name), "port_%u_indir_act_iq_%u",
-                        dev->data->port_id, i);
-               priv->hw_q[i].indir_iq = rte_ring_create(mz_name,
-                               _queue_attr[i]->size, SOCKET_ID_ANY,
-                               RING_F_SP_ENQ | RING_F_SC_DEQ |
-                               RING_F_EXACT_SZ);
+               priv->hw_q[i].indir_iq = mlx5_hwq_ring_create
+                       (dev->data->port_id, i, _queue_attr[i]->size, 
"indir_act_iq");
                if (!priv->hw_q[i].indir_iq)
                        goto err;
+               priv->hw_q[i].flow_transfer_pending = mlx5_hwq_ring_create
+                       (dev->data->port_id, i, _queue_attr[i]->size, 
"tx_pending");
+               if (!priv->hw_q[i].flow_transfer_pending)
+                       goto err;
+               priv->hw_q[i].flow_transfer_completed = mlx5_hwq_ring_create
+                       (dev->data->port_id, i, _queue_attr[i]->size, 
"tx_done");
+               if (!priv->hw_q[i].flow_transfer_completed)
+                       goto err;
        }
        dr_ctx_attr.pd = priv->sh->cdev->pd;
        dr_ctx_attr.queues = nb_q_updated;
@@ -10039,6 +10184,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
        for (i = 0; i < nb_q_updated; i++) {
                rte_ring_free(priv->hw_q[i].indir_iq);
                rte_ring_free(priv->hw_q[i].indir_cq);
+               rte_ring_free(priv->hw_q[i].flow_transfer_pending);
+               rte_ring_free(priv->hw_q[i].flow_transfer_completed);
        }
        mlx5_free(priv->hw_q);
        priv->hw_q = NULL;
@@ -10139,6 +10286,8 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
        for (i = 0; i < priv->nb_queue; i++) {
                rte_ring_free(priv->hw_q[i].indir_iq);
                rte_ring_free(priv->hw_q[i].indir_cq);
+               rte_ring_free(priv->hw_q[i].flow_transfer_pending);
+               rte_ring_free(priv->hw_q[i].flow_transfer_completed);
        }
        mlx5_free(priv->hw_q);
        priv->hw_q = NULL;
@@ -11969,7 +12118,7 @@ flow_hw_calc_table_hash(struct rte_eth_dev *dev,
        items = flow_hw_get_rule_items(dev, table, pattern,
                                       pattern_template_index,
                                       &job);
-       res = mlx5dr_rule_hash_calculate(table->matcher, items,
+       res = mlx5dr_rule_hash_calculate(mlx5_table_matcher(table), items,
                                         pattern_template_index,
                                         MLX5DR_RULE_HASH_CALC_MODE_RAW,
                                         hash);
@@ -12046,6 +12195,220 @@ flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int
+flow_hw_table_resize_multi_pattern_actions(struct rte_eth_dev *dev,
+                                          struct rte_flow_template_table 
*table,
+                                          uint32_t nb_flows,
+                                          struct rte_flow_error *error)
+{
+       struct mlx5_multi_pattern_segment *segment = table->mpctx.segments;
+       uint32_t bulk_size;
+       int i, ret;
+
+       /**
+        * Segment always allocates Modify Header Argument Objects number in
+        * powers of 2.
+        * On resize, PMD adds minimal required argument objects number.
+        * For example, if table size was 10, it allocated 16 argument objects.
+        * Resize to 15 will not add new objects.
+        */
+       for (i = 1;
+            i < MLX5_MAX_TABLE_RESIZE_NUM && segment->capacity;
+            i++, segment++);
+       if (i == MLX5_MAX_TABLE_RESIZE_NUM)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "too many resizes");
+       if (segment->head_index - 1 >= nb_flows)
+               return 0;
+       bulk_size = rte_align32pow2(nb_flows - segment->head_index + 1);
+       ret = mlx5_tbl_multi_pattern_process(dev, table, segment,
+                                            rte_log2_u32(bulk_size),
+                                            error);
+       if (ret)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "too many resizes");
+       return i;
+}
+
+static int
+flow_hw_table_resize(struct rte_eth_dev *dev,
+                    struct rte_flow_template_table *table,
+                    uint32_t nb_flows,
+                    struct rte_flow_error *error)
+{
+       struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
+       struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
+       struct mlx5dr_matcher_attr matcher_attr = table->matcher_attr;
+       struct mlx5_multi_pattern_segment *segment = NULL;
+       struct mlx5dr_matcher *matcher = NULL;
+       uint32_t i, selector = table->matcher_selector;
+       uint32_t other_selector = (selector + 1) & 1;
+       int ret;
+
+       if (!rte_flow_template_table_resizable(dev->data->port_id, 
&table->cfg.attr))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "no resizable attribute");
+       if (table->matcher_info[other_selector].matcher)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "last table resize was not 
completed");
+       if (nb_flows <= table->cfg.attr.nb_flows)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "shrinking table is not 
supported");
+       ret = mlx5_ipool_resize(table->flow, nb_flows);
+       if (ret)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "cannot resize flows pool");
+       ret = mlx5_ipool_resize(table->resource, nb_flows);
+       if (ret)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "cannot resize resources 
pool");
+       if (mlx5_is_multi_pattern_active(&table->mpctx)) {
+               ret = flow_hw_table_resize_multi_pattern_actions(dev, table, 
nb_flows, error);
+               if (ret < 0)
+                       return ret;
+               if (ret > 0)
+                       segment = table->mpctx.segments + ret;
+       }
+       for (i = 0; i < table->nb_item_templates; i++)
+               mt[i] = table->its[i]->mt;
+       for (i = 0; i < table->nb_action_templates; i++)
+               at[i] = table->ats[i].action_template->tmpl;
+       nb_flows = rte_align32pow2(nb_flows);
+       matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
+       matcher = mlx5dr_matcher_create(table->grp->tbl, mt,
+                                       table->nb_item_templates, at,
+                                       table->nb_action_templates,
+                                       &matcher_attr);
+       if (!matcher) {
+               ret = rte_flow_error_set(error, rte_errno,
+                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                        table, "failed to create new matcher");
+               goto error;
+       }
+       rte_rwlock_write_lock(&table->matcher_replace_rwlk);
+       ret = mlx5dr_matcher_resize_set_target
+                       (table->matcher_info[selector].matcher, matcher);
+       if (ret) {
+               rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
+               ret = rte_flow_error_set(error, rte_errno,
+                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                        table, "failed to initiate matcher 
swap");
+               goto error;
+       }
+       table->cfg.attr.nb_flows = nb_flows;
+       table->matcher_info[other_selector].matcher = matcher;
+       table->matcher_info[other_selector].refcnt = 0;
+       table->matcher_selector = other_selector;
+       rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
+       return 0;
+error:
+       if (segment)
+               mlx5_destroy_multi_pattern_segment(segment);
+       if (matcher) {
+               ret = mlx5dr_matcher_destroy(matcher);
+               return rte_flow_error_set(error, rte_errno,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "failed to destroy new 
matcher");
+       }
+       return ret;
+}
+
+static int
+flow_hw_table_resize_complete(__rte_unused struct rte_eth_dev *dev,
+                             struct rte_flow_template_table *table,
+                             struct rte_flow_error *error)
+{
+       int ret;
+       uint32_t selector = table->matcher_selector;
+       uint32_t other_selector = (selector + 1) & 1;
+       struct mlx5_matcher_info *matcher_info = 
&table->matcher_info[other_selector];
+
+       if (!rte_flow_template_table_resizable(dev->data->port_id, 
&table->cfg.attr))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "no resizable attribute");
+       if (!matcher_info->matcher || matcher_info->refcnt)
+               return rte_flow_error_set(error, EBUSY,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "cannot complete table 
resize");
+       ret = mlx5dr_matcher_destroy(matcher_info->matcher);
+       if (ret)
+               return rte_flow_error_set(error, rte_errno,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         table, "failed to destroy retired 
matcher");
+       matcher_info->matcher = NULL;
+       return 0;
+}
+
+static int
+flow_hw_update_resized(struct rte_eth_dev *dev, uint32_t queue,
+                      const struct rte_flow_op_attr *attr,
+                      struct rte_flow *flow, void *user_data,
+                      struct rte_flow_error *error)
+{
+       int ret;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_hw_q_job *job;
+       struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
+       struct rte_flow_template_table *table = hw_flow->table;
+       uint32_t table_selector = table->matcher_selector;
+       uint32_t rule_selector = hw_flow->matcher_selector;
+       uint32_t other_selector;
+       struct mlx5dr_matcher *other_matcher;
+       struct mlx5dr_rule_attr rule_attr = {
+               .queue_id = queue,
+               .burst = attr->postpone,
+       };
+
+       /**
+        * mlx5dr_matcher_resize_rule_move() accepts original table matcher -
+        * the one that was used BEFORE table resize.
+        * Since the function is called AFTER table resize,
+        * `table->matcher_selector` always points to the new matcher and
+        * `hw_flow->matcher_selector` points to a matcher used to create the 
flow.
+        */
+       other_selector = rule_selector == table_selector ?
+                        (rule_selector + 1) & 1 : rule_selector;
+       other_matcher = table->matcher_info[other_selector].matcher;
+       if (!other_matcher)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                         "no active table resize");
+       job = flow_hw_job_get(priv, queue);
+       if (!job)
+               return rte_flow_error_set(error, ENOMEM,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                         "queue is full");
+       job->type = MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE;
+       job->user_data = user_data;
+       job->flow = hw_flow;
+       rule_attr.user_data = job;
+       if (rule_selector == table_selector) {
+               struct rte_ring *ring = !attr->postpone ?
+                                       
priv->hw_q[queue].flow_transfer_completed :
+                                       priv->hw_q[queue].flow_transfer_pending;
+               rte_ring_enqueue(ring, job);
+               return 0;
+       }
+       ret = mlx5dr_matcher_resize_rule_move(other_matcher,
+                                             (struct mlx5dr_rule 
*)hw_flow->rule,
+                                             &rule_attr);
+       if (ret) {
+               flow_hw_job_put(priv, job, queue);
+               return rte_flow_error_set(error, rte_errno,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                         "flow transfer failed");
+       }
+       return 0;
+}
+
 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
        .info_get = flow_hw_info_get,
        .configure = flow_hw_configure,
@@ -12057,11 +12420,14 @@ const struct mlx5_flow_driver_ops 
mlx5_flow_hw_drv_ops = {
        .actions_template_destroy = flow_hw_actions_template_destroy,
        .template_table_create = flow_hw_template_table_create,
        .template_table_destroy = flow_hw_table_destroy,
+       .table_resize = flow_hw_table_resize,
        .group_set_miss_actions = flow_hw_group_set_miss_actions,
        .async_flow_create = flow_hw_async_flow_create,
        .async_flow_create_by_index = flow_hw_async_flow_create_by_index,
        .async_flow_update = flow_hw_async_flow_update,
        .async_flow_destroy = flow_hw_async_flow_destroy,
+       .flow_update_resized = flow_hw_update_resized,
+       .table_resize_complete = flow_hw_table_resize_complete,
        .pull = flow_hw_pull,
        .push = flow_hw_push,
        .async_action_create = flow_hw_action_handle_create,
-- 
2.39.2

Reply via email to