Reserve the push data buffer for each job and the maximum
length is set to 128 for now.

Only supports type IPPROTO_ROUTING when translating the rte
flow action.

Pop actions must be shared globally and only supports next layer
as TCP or UDP.

Signed-off-by: Rongwei Liu <rongw...@nvidia.com>
---
 doc/guides/nics/mlx5.rst        |   9 +-
 drivers/net/mlx5/mlx5.h         |   1 +
 drivers/net/mlx5/mlx5_flow.h    |  25 ++-
 drivers/net/mlx5/mlx5_flow_hw.c | 268 ++++++++++++++++++++++++++++++--
 4 files changed, 291 insertions(+), 12 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 7a137d5f6a..11b7864d23 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -162,7 +162,7 @@ Features
 - Sub-Function.
 - Matching on represented port.
 - Matching on aggregated affinity.
-
+- Push or remove IPv6 routing extension.
 
 Limitations
 -----------
@@ -694,6 +694,13 @@ Limitations
   The flow engine of a process cannot move from active to standby mode
   if preceding active application rules are still present and vice versa.
 
+- IPv6 routing extension push or remove:
+
+  - Supported only with HW Steering enabled (``dv_flow_en`` = 2).
+  - Supported in non-zero group (No limits on transfer domain if 
`fdb_def_rule_en` = 1 which is default).
+  - Only supports TCP or UDP as next layer.
+  - IPv6 routing header must be the only present extension.
+
 
 Statistics
 ----------
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 2cb6364957..5c568070a3 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -364,6 +364,7 @@ struct mlx5_hw_q_job {
        };
        void *user_data; /* Job user data. */
        uint8_t *encap_data; /* Encap data. */
+       uint8_t *push_data; /* IPv6 routing push data. */
        struct mlx5_modification_cmd *mhdr_cmd;
        struct rte_flow_item *items;
        union {
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 821c6ca281..97dc7c3b4d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -311,6 +311,8 @@ enum mlx5_feature_name {
 #define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42)
 #define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43)
 #define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44)
+#define MLX5_FLOW_ACTION_IPV6_ROUTING_POP (1ull << 45)
+#define MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH (1ull << 46)
 
 #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \
        (MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | 
MLX5_FLOW_ACTION_AGE)
@@ -538,6 +540,7 @@ struct mlx5_flow_dv_matcher {
        struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
 };
 
+#define MLX5_PUSH_MAX_LEN 128
 #define MLX5_ENCAP_MAX_LEN 132
 
 /* Encap/decap resource structure. */
@@ -1167,6 +1170,8 @@ struct rte_flow_hw {
 #pragma GCC diagnostic error "-Wpedantic"
 #endif
 
+#define MLX5_MHDR_MAX_CMD ((MLX5_MAX_MODIFY_NUM) * 2 + 1)
+
 /* rte flow action translate to DR action struct. */
 struct mlx5_action_construct_data {
        LIST_ENTRY(mlx5_action_construct_data) next;
@@ -1211,6 +1216,12 @@ struct mlx5_action_construct_data {
                struct {
                        cnt_id_t id;
                } shared_counter;
+               struct {
+                       /* IPv6 routing push data len. */
+                       uint16_t len;
+                       /* Modify header actions to keep valid checksum. */
+                       struct mlx5_modification_cmd cmd[MLX5_MHDR_MAX_CMD];
+               } recom;
                struct {
                        uint32_t id;
                } shared_meter;
@@ -1253,6 +1264,7 @@ struct rte_flow_actions_template {
        uint16_t *actions_off; /* DR action offset for given rte action offset. 
*/
        uint16_t reformat_off; /* Offset of DR reformat action. */
        uint16_t mhdr_off; /* Offset of DR modify header action. */
+       uint16_t recom_off;  /* Offset of DR IPv6 routing push pop action. */
        uint32_t refcnt; /* Reference counter. */
        uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
        uint8_t flex_item; /* flex item index. */
@@ -1275,7 +1287,14 @@ struct mlx5_hw_encap_decap_action {
        uint8_t data[]; /* Action data. */
 };
 
-#define MLX5_MHDR_MAX_CMD ((MLX5_MAX_MODIFY_NUM) * 2 + 1)
+/* Push pop action struct. */
+struct mlx5_hw_push_pop_action {
+       struct mlx5dr_action *action; /* Action object. */
+       /* Is push_pop action shared across flows in table. */
+       uint8_t shared;
+       size_t data_size; /* Action metadata size. */
+       uint8_t data[]; /* Action data. */
+};
 
 /* Modify field action struct. */
 struct mlx5_hw_modify_header_action {
@@ -1304,6 +1323,9 @@ struct mlx5_hw_actions {
        /* Encap/Decap action. */
        struct mlx5_hw_encap_decap_action *encap_decap;
        uint16_t encap_decap_pos; /* Encap/Decap action position. */
+       /* Push/Pop action. */
+       struct mlx5_hw_push_pop_action *push_pop;
+       uint16_t push_pop_pos; /* Push/Pop action position. */
        uint32_t mark:1; /* Indicate the mark action. */
        cnt_id_t cnt_id; /* Counter id. */
        uint32_t mtr_id; /* Meter id. */
@@ -1329,7 +1351,6 @@ struct mlx5_flow_group {
        uint32_t idx; /* Group memory index. */
 };
 
-
 #define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 2
 #define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32
 
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 7e0ee8d883..d6b2953d55 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -479,6 +479,12 @@ __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
                mlx5_free(acts->encap_decap);
                acts->encap_decap = NULL;
        }
+       if (acts->push_pop) {
+               if (acts->push_pop->action)
+                       mlx5dr_action_destroy(acts->push_pop->action);
+               mlx5_free(acts->push_pop);
+               acts->push_pop = NULL;
+       }
        if (acts->mhdr) {
                if (acts->mhdr->action)
                        mlx5dr_action_destroy(acts->mhdr->action);
@@ -601,6 +607,53 @@ __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
        return 0;
 }
 
+/**
+ * Append dynamic push action to the dynamic action list.
+ *
+ * @param[in] dev
+ *   Pointer to the port.
+ * @param[in] acts
+ *   Pointer to the template HW steering DR actions.
+ * @param[in] type
+ *   Action type.
+ * @param[in] action_src
+ *   Offset of source rte flow action.
+ * @param[in] action_dst
+ *   Offset of destination DR action.
+ * @param[in] len
+ *   Length of the data to be updated.
+ * @param[in] buf
+ *   Data to be updated.
+ *
+ * @return
+ *    Data pointer on success, NULL otherwise and rte_errno is set.
+ */
+static __rte_always_inline void *
+__flow_hw_act_data_push_append(struct rte_eth_dev *dev,
+                              struct mlx5_hw_actions *acts,
+                              enum rte_flow_action_type type,
+                              uint16_t action_src,
+                              uint16_t action_dst,
+                              uint16_t len, uint8_t *buf)
+{
+       struct mlx5_modification_cmd cmd[MLX5_MHDR_MAX_CMD];
+       struct mlx5_action_construct_data *act_data;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       int ret;
+
+       memset(cmd, 0, sizeof(cmd));
+       ret = flow_dv_generate_ipv6_routing_push_mhdr2(dev, NULL, cmd, 
MLX5_MHDR_MAX_CMD, buf);
+       if (ret < 0)
+               return NULL;
+       act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
+       if (!act_data)
+               return NULL;
+       act_data->recom.len = len;
+       memcpy(act_data->recom.cmd, cmd, ret * sizeof(struct 
mlx5_modification_cmd));
+       LIST_INSERT_HEAD(&acts->act_list, act_data, next);
+       return act_data;
+}
+
 static __rte_always_inline int
 __flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
                                     struct mlx5_hw_actions *acts,
@@ -1359,20 +1412,25 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
+       struct mlx5_hca_flex_attr *hca_attr = 
&priv->sh->cdev->config.hca_attr.flex;
        const struct rte_flow_attr *attr = &table_attr->flow_attr;
        struct rte_flow_action *actions = at->actions;
        struct rte_flow_action *action_start = actions;
        struct rte_flow_action *masks = at->masks;
-       enum mlx5dr_action_reformat_type refmt_type = 0;
+       enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
+       enum mlx5dr_action_type recom_type = (enum mlx5dr_action_type)0;
        const struct rte_flow_action_raw_encap *raw_encap_data;
+       const struct rte_flow_action_ipv6_ext_push *ipv6_ext_data;
        const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
-       uint16_t reformat_src = 0;
+       uint16_t reformat_src = 0, recom_src = 0;
        uint8_t *encap_data = NULL, *encap_data_m = NULL;
-       size_t data_size = 0;
+       uint8_t *push_data = NULL, *push_data_m = NULL;
+       size_t data_size = 0, push_size = 0;
        struct mlx5_hw_modify_header_action mhdr = { 0 };
        bool actions_end = false;
        uint32_t type;
        bool reformat_used = false;
+       bool recom_used = false;
        unsigned int of_vlan_offset;
        uint16_t action_pos;
        uint16_t jump_pos;
@@ -1564,6 +1622,36 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
                        reformat_used = true;
                        refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_TNL_L2_TO_L2;
                        break;
+               case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
+                       if (!hca_attr->query_match_sample_info || 
!hca_attr->parse_graph_anchor ||
+                           !priv->sh->srh_flex_parser.flex.mapnum) {
+                               DRV_LOG(ERR, "SRv6 anchor is not supported.");
+                               goto err;
+                       }
+                       MLX5_ASSERT(!recom_used && !recom_type);
+                       recom_used = true;
+                       recom_type = MLX5DR_ACTION_TYP_IPV6_ROUTING_PUSH;
+                       if (masks) {
+                               ipv6_ext_data =
+                                       (const struct 
rte_flow_action_ipv6_ext_push *)masks->conf;
+                               if (ipv6_ext_data)
+                                       push_data_m = ipv6_ext_data->data;
+                       }
+                       ipv6_ext_data =
+                               (const struct rte_flow_action_ipv6_ext_push 
*)actions->conf;
+                       push_data = ipv6_ext_data->data;
+                       push_size = ipv6_ext_data->size;
+                       recom_src = actions - action_start;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
+                       if (!hca_attr->query_match_sample_info || 
!hca_attr->parse_graph_anchor ||
+                           !priv->sh->srh_flex_parser.flex.mapnum) {
+                               DRV_LOG(ERR, "SRv6 anchor is not supported.");
+                               goto err;
+                       }
+                       recom_used = true;
+                       recom_type = MLX5DR_ACTION_TYP_IPV6_ROUTING_POP;
+                       break;
                case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
                        DRV_LOG(ERR, "send to kernel action is not supported in 
HW steering.");
                        goto err;
@@ -1767,6 +1855,47 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
                acts->encap_decap->shared = shared_rfmt;
                acts->encap_decap_pos = at->reformat_off;
        }
+       if (recom_used) {
+               struct mlx5_action_construct_data *act_data;
+               uint32_t flag, bulk = 0;
+
+               flag = mlx5_hw_act_flag[!!attr->group][type];
+               if (push_data && !push_data_m)
+                       bulk = rte_log2_u32(table_attr->nb_flows);
+               else
+                       flag |= MLX5DR_ACTION_FLAG_SHARED;
+
+               MLX5_ASSERT(at->recom_off != UINT16_MAX);
+               acts->push_pop = mlx5_malloc(MLX5_MEM_ZERO,
+                                sizeof(*acts->push_pop) + push_size, 0, 
SOCKET_ID_ANY);
+               if (!acts->push_pop)
+                       goto err;
+               if (push_data && push_size) {
+                       acts->push_pop->data_size = push_size;
+                       memcpy(acts->push_pop->data, push_data, push_size);
+               }
+               acts->push_pop->action = 
mlx5dr_action_create_recombination(priv->dr_ctx,
+                                        recom_type, push_size, push_data, 
bulk, flag);
+               if (!acts->push_pop->action)
+                       goto err;
+               acts->rule_acts[at->recom_off].action = acts->push_pop->action;
+               acts->rule_acts[at->recom_off].recom.data = 
acts->push_pop->data;
+               acts->rule_acts[at->recom_off].recom.offset = 0;
+               acts->push_pop->shared = flag & MLX5DR_ACTION_FLAG_SHARED;
+               acts->push_pop_pos = at->recom_off;
+               if (!acts->push_pop->shared) {
+                       act_data = __flow_hw_act_data_push_append(dev, acts,
+                                       RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH,
+                                       recom_src, at->recom_off, push_size,
+                                       acts->push_pop->data);
+                       if (!act_data)
+                               goto err;
+                       /* Clear srv6 next header */
+                       *acts->push_pop->data = 0;
+                       acts->rule_acts[at->recom_off].recom.mhdr =
+                                               (uint8_t *)act_data->recom.cmd;
+               }
+       }
        return 0;
 err:
        err = rte_errno;
@@ -2143,11 +2272,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
        const struct mlx5_hw_actions *hw_acts = &hw_at->acts;
        const struct rte_flow_action *action;
        const struct rte_flow_action_raw_encap *raw_encap_data;
+       const struct rte_flow_action_ipv6_ext_push *ipv6_push;
        const struct rte_flow_item *enc_item = NULL;
        const struct rte_flow_action_ethdev *port_action = NULL;
        const struct rte_flow_action_meter *meter = NULL;
        const struct rte_flow_action_age *age = NULL;
        uint8_t *buf = job->encap_data;
+       uint8_t *push_buf = job->push_data;
        struct rte_flow_attr attr = {
                        .ingress = 1,
        };
@@ -2273,6 +2404,12 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
                        MLX5_ASSERT(raw_encap_data->size ==
                                    act_data->encap.len);
                        break;
+               case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
+                       ipv6_push =
+                               (const struct rte_flow_action_ipv6_ext_push 
*)action->conf;
+                       rte_memcpy((void *)push_buf, ipv6_push->data, 
act_data->recom.len);
+                       MLX5_ASSERT(ipv6_push->size == act_data->recom.len);
+                       break;
                case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
                        if (action->type == 
RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
                                ret = flow_hw_set_vlan_vid_construct(dev, job,
@@ -2428,6 +2565,32 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
                                job->flow->idx - 1;
                rule_acts[hw_acts->encap_decap_pos].reformat.data = buf;
        }
+       if (hw_acts->push_pop && !hw_acts->push_pop->shared) {
+               struct mlx5_modification_cmd *mhdr;
+               uint32_t data_ofs, rule_data;
+               int i;
+
+               rule_acts[hw_acts->push_pop_pos].recom.offset =
+                               job->flow->idx - 1;
+               mhdr = (struct mlx5_modification_cmd *)rule_acts
+                               [hw_acts->push_pop_pos].recom.mhdr;
+               /* Modify IPv6 dst address is in reverse order. */
+               data_ofs = sizeof(struct rte_ipv6_routing_ext) + *(push_buf + 
3) * 16;
+               data_ofs -= sizeof(uint32_t);
+               /* next_hop address. */
+               for (i = 0; i < 4; i++) {
+                       rule_data = flow_dv_fetch_field(push_buf + data_ofs,
+                                                       sizeof(uint32_t));
+                       mhdr[i].data1 = rte_cpu_to_be_32(rule_data);
+                       data_ofs -= sizeof(uint32_t);
+               }
+               /* next_hdr */
+               rule_data = flow_dv_fetch_field(push_buf, sizeof(uint8_t));
+               mhdr[i].data1 = rte_cpu_to_be_32(rule_data);
+               /* clear next_hdr for insert. */
+               *push_buf = 0;
+               rule_acts[hw_acts->push_pop_pos].recom.data = push_buf;
+       }
        if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id))
                job->flow->cnt_id = hw_acts->cnt_id;
        return 0;
@@ -3864,6 +4027,38 @@ flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
        return 0;
 }
 
+/**
+ * Validate ipv6_ext_push action.
+ *
+ * @param[in] dev
+ *   Pointer to rte_eth_dev structure.
+ * @param[in] action
+ *   Pointer to the indirect action.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_hw_validate_action_ipv6_ext_push(struct rte_eth_dev *dev __rte_unused,
+                                     const struct rte_flow_action *action,
+                                     struct rte_flow_error *error)
+{
+       const struct rte_flow_action_ipv6_ext_push *raw_push_data = 
action->conf;
+
+       if (!raw_push_data || !raw_push_data->size || !raw_push_data->data)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
+                                         "invalid ipv6_ext_push data");
+       if (raw_push_data->type != IPPROTO_ROUTING ||
+           raw_push_data->size > MLX5_PUSH_MAX_LEN)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
+                                         "Unsupported ipv6_ext_push type or 
length");
+       return 0;
+}
+
 /**
  * Validate raw_encap action.
  *
@@ -4046,6 +4241,7 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
        uint16_t i;
        bool actions_end = false;
        int ret;
+       const struct rte_flow_action_ipv6_ext_remove *remove_data;
 
        /* FDB actions are only valid to proxy port. */
        if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))
@@ -4122,6 +4318,21 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
                        /* TODO: Validation logic */
                        action_flags |= MLX5_FLOW_ACTION_DECAP;
                        break;
+               case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
+                       ret = flow_hw_validate_action_ipv6_ext_push(dev, 
action, error);
+                       if (ret < 0)
+                               return ret;
+                       action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
+                       remove_data = action->conf;
+                       /* Remove action must be shared. */
+                       if (remove_data->type != IPPROTO_ROUTING || !mask) {
+                               DRV_LOG(ERR, "Only supports shared IPv6 routing 
remove");
+                               return -EINVAL;
+                       }
+                       action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_POP;
+                       break;
                case RTE_FLOW_ACTION_TYPE_METER:
                        /* TODO: Validation logic */
                        action_flags |= MLX5_FLOW_ACTION_METER;
@@ -4229,6 +4440,8 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] 
= {
        [RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
        [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
        [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
+       [RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH] = 
MLX5DR_ACTION_TYP_IPV6_ROUTING_PUSH,
+       [RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE] = 
MLX5DR_ACTION_TYP_IPV6_ROUTING_POP,
 };
 
 static int
@@ -4285,6 +4498,8 @@ flow_hw_dr_actions_template_handle_shared(const struct 
rte_flow_action *mask,
 /**
  * Create DR action template based on a provided sequence of flow actions.
  *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
  * @param[in] at
  *   Pointer to flow actions template to be updated.
  *
@@ -4293,7 +4508,8 @@ flow_hw_dr_actions_template_handle_shared(const struct 
rte_flow_action *mask,
  *   NULL otherwise.
  */
 static struct mlx5dr_action_template *
-flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
+flow_hw_dr_actions_template_create(struct rte_eth_dev *dev,
+                                  struct rte_flow_actions_template *at)
 {
        struct mlx5dr_action_template *dr_template;
        enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS] = { 
MLX5DR_ACTION_TYP_LAST };
@@ -4302,8 +4518,11 @@ flow_hw_dr_actions_template_create(struct 
rte_flow_actions_template *at)
        enum mlx5dr_action_type reformat_act_type = 
MLX5DR_ACTION_TYP_TNL_L2_TO_L2;
        uint16_t reformat_off = UINT16_MAX;
        uint16_t mhdr_off = UINT16_MAX;
+       uint16_t recom_off = UINT16_MAX;
        uint16_t cnt_off = UINT16_MAX;
+       enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_IPV6_ROUTING_POP;
        int ret;
+
        for (i = 0, curr_off = 0; at->actions[i].type != 
RTE_FLOW_ACTION_TYPE_END; ++i) {
                const struct rte_flow_action_raw_encap *raw_encap_data;
                size_t data_size;
@@ -4332,6 +4551,16 @@ flow_hw_dr_actions_template_create(struct 
rte_flow_actions_template *at)
                        reformat_off = curr_off++;
                        reformat_act_type = 
mlx5_hw_dr_action_types[at->actions[i].type];
                        break;
+               case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
+                       MLX5_ASSERT(recom_off == UINT16_MAX);
+                       recom_type = MLX5DR_ACTION_TYP_IPV6_ROUTING_PUSH;
+                       recom_off = curr_off++;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
+                       MLX5_ASSERT(recom_off == UINT16_MAX);
+                       recom_type = MLX5DR_ACTION_TYP_IPV6_ROUTING_POP;
+                       recom_off = curr_off++;
+                       break;
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
                        raw_encap_data = at->actions[i].conf;
                        data_size = raw_encap_data->size;
@@ -4404,11 +4633,25 @@ flow_hw_dr_actions_template_create(struct 
rte_flow_actions_template *at)
                at->reformat_off = reformat_off;
                action_types[reformat_off] = reformat_act_type;
        }
+       if (recom_off != UINT16_MAX) {
+               at->recom_off = recom_off;
+               action_types[recom_off] = recom_type;
+       }
        dr_template = mlx5dr_action_template_create(action_types);
-       if (dr_template)
+       if (dr_template) {
                at->dr_actions_num = curr_off;
-       else
+       } else {
                DRV_LOG(ERR, "Failed to create DR action template: %d", 
rte_errno);
+               return NULL;
+       }
+       /* Create srh flex parser for pop anchor. */
+       if ((recom_type == MLX5DR_ACTION_TYP_IPV6_ROUTING_POP ||
+            recom_type == MLX5DR_ACTION_TYP_IPV6_ROUTING_PUSH) &&
+           mlx5_alloc_srh_flex_parser(dev)) {
+               DRV_LOG(ERR, "Failed to create srv6 flex parser");
+               claim_zero(mlx5dr_action_template_destroy(dr_template));
+               return NULL;
+       }
        return dr_template;
 err_actions_num:
        DRV_LOG(ERR, "Number of HW actions (%u) exceeded maximum (%u) allowed 
in template",
@@ -4706,6 +4949,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
                at->actions_off[i] = UINT16_MAX;
        at->reformat_off = UINT16_MAX;
        at->mhdr_off = UINT16_MAX;
+       at->recom_off = UINT16_MAX;
        at->rx_cpy_pos = pos;
        /*
         * mlx5 PMD hacks indirect action index directly to the action conf.
@@ -4734,7 +4978,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
                        }
                }
        }
-       at->tmpl = flow_hw_dr_actions_template_create(at);
+       at->tmpl = flow_hw_dr_actions_template_create(dev, at);
        if (!at->tmpl)
                goto error;
        at->action_flags = action_flags;
@@ -4779,6 +5023,8 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
                                   NULL,
                                   "action template in using");
        }
+       if (template->tmpl && 
mlx5dr_action_template_contain_srv6(template->tmpl))
+               mlx5_free_srh_flex_parser(dev);
        LIST_REMOVE(template, next);
        flow_hw_flex_item_release(dev, &template->flex_item);
        if (template->tmpl)
@@ -7230,6 +7476,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
                mem_size += (sizeof(struct mlx5_hw_q_job *) +
                            sizeof(struct mlx5_hw_q_job) +
                            sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
+                           sizeof(uint8_t) * MLX5_PUSH_MAX_LEN +
                            sizeof(struct mlx5_modification_cmd) *
                            MLX5_MHDR_MAX_CMD +
                            sizeof(struct rte_flow_item) *
@@ -7244,7 +7491,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
        }
        for (i = 0; i < nb_q_updated; i++) {
                char mz_name[RTE_MEMZONE_NAMESIZE];
-               uint8_t *encap = NULL;
+               uint8_t *encap = NULL, *push = NULL;
                struct mlx5_modification_cmd *mhdr_cmd = NULL;
                struct rte_flow_item *items = NULL;
 
@@ -7263,11 +7510,14 @@ flow_hw_configure(struct rte_eth_dev *dev,
                           &job[_queue_attr[i]->size];
                encap = (uint8_t *)
                         &mhdr_cmd[_queue_attr[i]->size * MLX5_MHDR_MAX_CMD];
-               items = (struct rte_flow_item *)
+               push = (uint8_t *)
                         &encap[_queue_attr[i]->size * MLX5_ENCAP_MAX_LEN];
+               items = (struct rte_flow_item *)
+                        &push[_queue_attr[i]->size * MLX5_PUSH_MAX_LEN];
                for (j = 0; j < _queue_attr[i]->size; j++) {
                        job[j].mhdr_cmd = &mhdr_cmd[j * MLX5_MHDR_MAX_CMD];
                        job[j].encap_data = &encap[j * MLX5_ENCAP_MAX_LEN];
+                       job[j].push_data = &push[j * MLX5_PUSH_MAX_LEN];
                        job[j].items = &items[j * MLX5_HW_MAX_ITEMS];
                        priv->hw_q[i].job[j] = &job[j];
                }
-- 
2.27.0

Reply via email to