Add basic actions validation before creating flow table.

Signed-off-by: Gregory Etelson <getel...@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnow...@nvidia.com>
---
 drivers/net/mlx5/mlx5.h            |  13 +
 drivers/net/mlx5/mlx5_flow.c       |  15 +-
 drivers/net/mlx5/mlx5_flow.h       |  33 ++-
 drivers/net/mlx5/mlx5_flow_dv.c    |  20 +-
 drivers/net/mlx5/mlx5_flow_hw.c    | 431 +++++++++++++++++++++++++----
 drivers/net/mlx5/mlx5_flow_verbs.c |   2 +-
 6 files changed, 445 insertions(+), 69 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 9e4a5feb49..e2c22ffe97 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2010,6 +2010,19 @@ struct mlx5_priv {
        RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference 
counter. */
 };
 
+static __rte_always_inline bool
+mlx5_hws_active(const struct rte_eth_dev *dev)
+{
+#if defined(HAVE_MLX5_HWS_SUPPORT)
+       const struct mlx5_priv *priv = dev->data->dev_private;
+
+       return priv->sh->config.dv_flow_en == 2;
+#else
+       RTE_SET_USED(dev);
+       return false;
+#endif
+}
+
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
 #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
 #define CTRL_QUEUE_ID(priv) ((priv)->nb_queue - 1)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8eafceff37..c90b87c8ef 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1939,7 +1939,8 @@ mlx5_flow_validate_action_flag(uint64_t action_flags,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
+mlx5_flow_validate_action_mark(struct rte_eth_dev *dev,
+                              const struct rte_flow_action *action,
                               uint64_t action_flags,
                               const struct rte_flow_attr *attr,
                               struct rte_flow_error *error)
@@ -1971,6 +1972,10 @@ mlx5_flow_validate_action_mark(const struct 
rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
                                          "mark action not supported for "
                                          "egress");
+       if (attr->transfer && mlx5_hws_active(dev))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+                                         "non-template mark action not 
supported for transfer");
        return 0;
 }
 
@@ -2039,6 +2044,10 @@ mlx5_flow_validate_action_queue(const struct 
rte_flow_action *action,
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_queue *queue = action->conf;
 
+       if (!queue)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
+                                         "no QUEUE action configuration");
        if (action_flags & MLX5_FLOW_FATE_ACTIONS)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -2152,6 +2161,10 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
        const char *message;
        uint32_t queue_idx;
 
+       if (!rss)
+               return rte_flow_error_set
+                       (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+                        action, "no RSS action configuration");
        if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
                DRV_LOG(WARNING, "port %u symmetric RSS supported with SORT",
                        dev->data->port_id);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 8b4088e35e..dd5b30a8a4 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2874,7 +2874,8 @@ int mlx5_flow_validate_action_drop(struct rte_eth_dev 
*dev,
 int mlx5_flow_validate_action_flag(uint64_t action_flags,
                                   const struct rte_flow_attr *attr,
                                   struct rte_flow_error *error);
-int mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
+int mlx5_flow_validate_action_mark(struct rte_eth_dev *dev,
+                                  const struct rte_flow_action *action,
                                   uint64_t action_flags,
                                   const struct rte_flow_attr *attr,
                                   struct rte_flow_error *error);
@@ -2895,6 +2896,33 @@ int mlx5_flow_validate_action_default_miss(uint64_t 
action_flags,
 int flow_validate_modify_field_level
                        (const struct rte_flow_field_data *data,
                         struct rte_flow_error *error);
+int
+flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
+                                uint64_t action_flags,
+                                const struct rte_flow_action *action,
+                                const struct rte_flow_attr *attr,
+                                struct rte_flow_error *error);
+int
+flow_dv_validate_action_decap(struct rte_eth_dev *dev,
+                             uint64_t action_flags,
+                             const struct rte_flow_action *action,
+                             const uint64_t item_flags,
+                             const struct rte_flow_attr *attr,
+                             struct rte_flow_error *error);
+int
+flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
+                              uint64_t action_flags,
+                              uint64_t item_flags,
+                              bool root,
+                              struct rte_flow_error *error);
+int
+flow_dv_validate_action_raw_encap_decap
+       (struct rte_eth_dev *dev,
+        const struct rte_flow_action_raw_decap *decap,
+        const struct rte_flow_action_raw_encap *encap,
+        const struct rte_flow_attr *attr, uint64_t *action_flags,
+        int *actions_n, const struct rte_flow_action *action,
+        uint64_t item_flags, struct rte_flow_error *error);
 int mlx5_flow_item_acceptable(const struct rte_flow_item *item,
                              const uint8_t *mask,
                              const uint8_t *nic_mask,
@@ -3348,5 +3376,8 @@ mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
 void
 mlx5_hw_decap_encap_destroy(struct rte_eth_dev *dev,
                            struct mlx5_indirect_list *reformat);
+
+extern const struct rte_flow_action_raw_decap empty_decap;
+
 #endif
 #endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 6f72185916..06f5427abf 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -3659,7 +3659,7 @@ flow_dv_validate_action_mark(struct rte_eth_dev *dev,
                                          "if tunnel offload active");
        /* Fall back if no extended metadata register support. */
        if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
-               return mlx5_flow_validate_action_mark(action, action_flags,
+               return mlx5_flow_validate_action_mark(dev, action, action_flags,
                                                      attr, error);
        /* Extensive metadata mode requires registers. */
        if (!mlx5_flow_ext_mreg_supported(dev))
@@ -3898,7 +3898,7 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, 
bool shared,
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
+int
 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
                                 uint64_t action_flags,
                                 const struct rte_flow_action *action,
@@ -3943,7 +3943,7 @@ flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
+int
 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
                              uint64_t action_flags,
                              const struct rte_flow_action *action,
@@ -4016,7 +4016,7 @@ const struct rte_flow_action_raw_decap empty_decap = 
{.data = NULL, .size = 0,};
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
+int
 flow_dv_validate_action_raw_encap_decap
        (struct rte_eth_dev *dev,
         const struct rte_flow_action_raw_decap *decap,
@@ -4105,7 +4105,7 @@ flow_dv_validate_action_raw_encap_decap
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
+int
 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
                               uint64_t action_flags,
                               uint64_t item_flags,
@@ -4124,10 +4124,12 @@ flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "CT cannot follow a fate action");
        if ((action_flags & MLX5_FLOW_ACTION_METER) ||
-           (action_flags & MLX5_FLOW_ACTION_AGE))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
-                                         "Only one ASO action is supported");
+           (action_flags & MLX5_FLOW_ACTION_AGE)) {
+               if (!mlx5_hws_active(dev))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 NULL, "Only one ASO action is 
supported");
+       }
        if (action_flags & MLX5_FLOW_ACTION_ENCAP)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 427d7f2359..a60d1e594e 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4609,6 +4609,25 @@ mlx5_hw_build_template_table(struct rte_eth_dev *dev,
        return rte_errno;
 }
 
+static bool
+flow_hw_validate_template_domain(const struct rte_flow_attr *table_attr,
+                                uint32_t ingress, uint32_t egress, uint32_t 
transfer)
+{
+       if (table_attr->ingress)
+               return ingress != 0;
+       else if (table_attr->egress)
+               return egress != 0;
+       else
+               return transfer;
+}
+
+static bool
+flow_hw_validate_table_domain(const struct rte_flow_attr *table_attr)
+{
+       return table_attr->ingress + table_attr->egress + table_attr->transfer
+               == 1;
+}
+
 /**
  * Create flow table.
  *
@@ -4679,6 +4698,38 @@ flow_hw_table_create(struct rte_eth_dev *dev,
        size_t tbl_mem_size;
        int err;
 
+       if (!flow_hw_validate_table_domain(&attr->flow_attr)) {
+               rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+                                  NULL, "invalid table domain attributes");
+               return NULL;
+       }
+       for (i = 0; i < nb_item_templates; i++) {
+               const struct rte_flow_pattern_template_attr *pt_attr =
+                       &item_templates[i]->attr;
+               bool match = flow_hw_validate_template_domain(&attr->flow_attr,
+                                                             pt_attr->ingress,
+                                                             pt_attr->egress,
+                                                             
pt_attr->transfer);
+               if (!match) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                          NULL, "pattern template domain does 
not match table");
+                       return NULL;
+               }
+       }
+       for (i = 0; i < nb_action_templates; i++) {
+               const struct rte_flow_actions_template *at = 
action_templates[i];
+               bool match = flow_hw_validate_template_domain(&attr->flow_attr,
+                                                             at->attr.ingress,
+                                                             at->attr.egress,
+                                                             
at->attr.transfer);
+               if (!match) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                          NULL, "action template domain does 
not match table");
+                       return NULL;
+               }
+       }
        /* HWS layer accepts only 1 item template with root table. */
        if (!attr->flow_attr.group)
                max_tpl = 1;
@@ -6026,42 +6077,6 @@ flow_hw_validate_action_ipv6_ext_push(struct rte_eth_dev 
*dev __rte_unused,
        return 0;
 }
 
-/**
- * Validate raw_encap action.
- *
- * @param[in] dev
- *   Pointer to rte_eth_dev structure.
- * @param[in] action
- *   Pointer to the indirect action.
- * @param[out] error
- *   Pointer to error structure.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_hw_validate_action_raw_encap(const struct rte_flow_action *action,
-                                 const struct rte_flow_action *mask,
-                                 struct rte_flow_error *error)
-{
-       const struct rte_flow_action_raw_encap *mask_conf = mask->conf;
-       const struct rte_flow_action_raw_encap *action_conf = action->conf;
-
-       if (!mask_conf || !mask_conf->size)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION, mask,
-                                         "raw_encap: size must be masked");
-       if (!action_conf || !action_conf->size)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
-                                         "raw_encap: invalid action 
configuration");
-       if (mask_conf->data && !action_conf->data)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
-                                         "raw_encap: masked data is missing");
-       return 0;
-}
-
 /**
  * Process `... / raw_decap / raw_encap / ...` actions sequence.
  * The PMD handles the sequence as a single encap or decap reformat action,
@@ -6378,6 +6393,278 @@ flow_hw_validate_action_nat64(struct rte_eth_dev *dev,
                                  NULL, "NAT64 action is not supported.");
 }
 
+static int
+flow_hw_validate_action_jump(struct rte_eth_dev *dev,
+                            const struct rte_flow_actions_template_attr *attr,
+                            const struct rte_flow_action *action,
+                            const struct rte_flow_action *mask,
+                            struct rte_flow_error *error)
+{
+       const struct rte_flow_action_jump *m = mask->conf;
+       const struct rte_flow_action_jump *v = action->conf;
+       struct mlx5_flow_template_table_cfg cfg = {
+               .external = true,
+               .attr = {
+                       .flow_attr = {
+                               .ingress = attr->ingress,
+                               .egress = attr->egress,
+                               .transfer = attr->transfer,
+                       },
+               },
+       };
+       uint32_t t_group = 0;
+
+       if (!m || !m->group)
+               return 0;
+       if (!v)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
+                                         "Invalid jump action configuration");
+       if (flow_hw_translate_group(dev, &cfg, v->group, &t_group, error))
+               return -rte_errno;
+       if (t_group == 0)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
+                                         "Unsupported action - jump to root 
table");
+       return 0;
+}
+
+static int
+mlx5_hw_validate_action_mark(struct rte_eth_dev *dev,
+                            const struct rte_flow_action *template_action,
+                            const struct rte_flow_action *template_mask,
+                            uint64_t action_flags,
+                            const struct rte_flow_actions_template_attr 
*template_attr,
+                            struct rte_flow_error *error)
+{
+       const struct rte_flow_action_mark *mark_mask = template_mask->conf;
+       const struct rte_flow_action *action =
+               mark_mask && mark_mask->id ? template_action :
+               &(const struct rte_flow_action) {
+               .type = RTE_FLOW_ACTION_TYPE_MARK,
+               .conf = &(const struct rte_flow_action_mark) {
+                       .id = MLX5_FLOW_MARK_MAX - 1
+               }
+       };
+       const struct rte_flow_attr attr = {
+               .ingress = template_attr->ingress,
+               .egress = template_attr->egress,
+               .transfer = template_attr->transfer
+       };
+
+       return mlx5_flow_validate_action_mark(dev, action, action_flags,
+                                             &attr, error);
+}
+
+#define MLX5_FLOW_DEFAULT_INGRESS_QUEUE 0
+
+static int
+mlx5_hw_validate_action_queue(struct rte_eth_dev *dev,
+                             const struct rte_flow_action *template_action,
+                             const struct rte_flow_action *template_mask,
+                             const struct rte_flow_actions_template_attr 
*template_attr,
+                             uint64_t action_flags,
+                             struct rte_flow_error *error)
+{
+       const struct rte_flow_action_queue *queue_mask = template_mask->conf;
+       const struct rte_flow_action *action =
+               queue_mask && queue_mask->index ? template_action :
+               &(const struct rte_flow_action) {
+               .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+               .conf = &(const struct rte_flow_action_queue) {
+                       .index = MLX5_FLOW_DEFAULT_INGRESS_QUEUE
+               }
+       };
+       const struct rte_flow_attr attr = {
+               .ingress = template_attr->ingress,
+               .egress = template_attr->egress,
+               .transfer = template_attr->transfer
+       };
+
+       return mlx5_flow_validate_action_queue(action, action_flags,
+                                              dev, &attr, error);
+}
+
+static int
+mlx5_hw_validate_action_rss(struct rte_eth_dev *dev,
+                             const struct rte_flow_action *template_action,
+                             const struct rte_flow_action *template_mask,
+                             const struct rte_flow_actions_template_attr 
*template_attr,
+                             __rte_unused uint64_t action_flags,
+                             struct rte_flow_error *error)
+{
+       const struct rte_flow_action_rss *mask = template_mask->conf;
+       const struct rte_flow_action *action = mask ? template_action :
+               &(const struct rte_flow_action) {
+               .type = RTE_FLOW_ACTION_TYPE_RSS,
+               .conf = &(const struct rte_flow_action_rss) {
+                       .queue_num = 1,
+                       .queue = (uint16_t [1]) {
+                               MLX5_FLOW_DEFAULT_INGRESS_QUEUE
+                       }
+               }
+       };
+
+       if (template_attr->egress || template_attr->transfer)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+                                         "RSS action supported for ingress 
only");
+       return mlx5_validate_action_rss(dev, action, error);
+}
+
+static int
+mlx5_hw_validate_action_l2_encap(struct rte_eth_dev *dev,
+                                const struct rte_flow_action *template_action,
+                                const struct rte_flow_action *template_mask,
+                                const struct rte_flow_actions_template_attr 
*template_attr,
+                                uint64_t action_flags,
+                                struct rte_flow_error *error)
+{
+       const struct rte_flow_action_vxlan_encap default_action_conf = {
+               .definition = (struct rte_flow_item *)
+                       (struct rte_flow_item [1]) {
+                       [0] = { .type = RTE_FLOW_ITEM_TYPE_END }
+               }
+       };
+       const struct rte_flow_action *action = template_mask->conf ?
+               template_action : &(const struct rte_flow_action) {
+                       .type = template_mask->type,
+                       .conf = &default_action_conf
+       };
+       const struct rte_flow_attr attr = {
+               .ingress = template_attr->ingress,
+               .egress = template_attr->egress,
+               .transfer = template_attr->transfer
+       };
+
+       return flow_dv_validate_action_l2_encap(dev, action_flags, action,
+                                               &attr, error);
+}
+
+static int
+mlx5_hw_validate_action_l2_decap(struct rte_eth_dev *dev,
+                                const struct rte_flow_action *template_action,
+                                const struct rte_flow_action *template_mask,
+                                const struct rte_flow_actions_template_attr 
*template_attr,
+                                uint64_t action_flags,
+                                struct rte_flow_error *error)
+{
+       const struct rte_flow_action_vxlan_encap default_action_conf = {
+               .definition = (struct rte_flow_item *)
+                       (struct rte_flow_item [1]) {
+                               [0] = { .type = RTE_FLOW_ITEM_TYPE_END }
+                       }
+       };
+       const struct rte_flow_action *action = template_mask->conf ?
+                                              template_action : &(const struct 
rte_flow_action) {
+                       .type = template_mask->type,
+                       .conf = &default_action_conf
+               };
+       const struct rte_flow_attr attr = {
+               .ingress = template_attr->ingress,
+               .egress = template_attr->egress,
+               .transfer = template_attr->transfer
+       };
+       uint64_t item_flags =
+               action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
+               MLX5_FLOW_LAYER_VXLAN : 0;
+
+       return flow_dv_validate_action_decap(dev, action_flags, action,
+                                            item_flags, &attr, error);
+}
+
+static int
+mlx5_hw_validate_action_conntrack(struct rte_eth_dev *dev,
+                                 const struct rte_flow_action *template_action,
+                                 const struct rte_flow_action *template_mask,
+                                 const struct rte_flow_actions_template_attr 
*template_attr,
+                                 uint64_t action_flags,
+                                 struct rte_flow_error *error)
+{
+       RTE_SET_USED(template_action);
+       RTE_SET_USED(template_mask);
+       RTE_SET_USED(template_attr);
+       return flow_dv_validate_action_aso_ct(dev, action_flags,
+                                             MLX5_FLOW_LAYER_OUTER_L4_TCP,
+                                             false, error);
+}
+
+static int
+flow_hw_validate_action_raw_encap(const struct rte_flow_action *action,
+                                 const struct rte_flow_action *mask,
+                                 struct rte_flow_error *error)
+{
+       const struct rte_flow_action_raw_encap *mask_conf = mask->conf;
+       const struct rte_flow_action_raw_encap *action_conf = action->conf;
+
+       if (!mask_conf || !mask_conf->size)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, mask,
+                                         "raw_encap: size must be masked");
+       if (!action_conf || !action_conf->size)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
+                                         "raw_encap: invalid action 
configuration");
+       if (mask_conf->data && !action_conf->data)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION,
+                                         action, "raw_encap: masked data is 
missing");
+       return 0;
+}
+
+
+static int
+flow_hw_validate_action_raw_reformat(struct rte_eth_dev *dev,
+                                    const struct rte_flow_action 
*template_action,
+                                    const struct rte_flow_action 
*template_mask,
+                                    const struct
+                                    rte_flow_actions_template_attr 
*template_attr,
+                                    uint64_t *action_flags,
+                                    struct rte_flow_error *error)
+{
+       const struct rte_flow_action *encap_action = NULL;
+       const struct rte_flow_action *encap_mask = NULL;
+       const struct rte_flow_action_raw_decap *raw_decap = NULL;
+       const struct rte_flow_action_raw_encap *raw_encap = NULL;
+       const struct rte_flow_attr attr = {
+               .ingress = template_attr->ingress,
+               .egress = template_attr->egress,
+               .transfer = template_attr->transfer
+       };
+       uint64_t item_flags = 0;
+       int ret, actions_n = 0;
+
+       if (template_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
+               raw_decap = template_mask->conf ?
+                           template_action->conf : &empty_decap;
+               if ((template_action + 1)->type == 
RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+                       if ((template_mask + 1)->type != 
RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
+                               return rte_flow_error_set(error, EINVAL,
+                                                         
RTE_FLOW_ERROR_TYPE_ACTION,
+                                                         template_mask + 1, 
"invalid mask type");
+                       encap_action = template_action + 1;
+                       encap_mask = template_mask + 1;
+               }
+       } else {
+               encap_action = template_action;
+               encap_mask = template_mask;
+       }
+       if (encap_action) {
+               raw_encap = encap_action->conf;
+               ret = flow_hw_validate_action_raw_encap(encap_action,
+                                                       encap_mask, error);
+               if (ret)
+                       return ret;
+       }
+       return flow_dv_validate_action_raw_encap_decap(dev, raw_decap,
+                                                      raw_encap, &attr,
+                                                      action_flags, &actions_n,
+                                                      template_action,
+                                                      item_flags, error);
+}
+
+
+
 static int
 mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
                              const struct rte_flow_actions_template_attr *attr,
@@ -6432,15 +6719,27 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
                                return ret;
                        break;
                case RTE_FLOW_ACTION_TYPE_MARK:
-                       /* TODO: Validation logic */
+                       ret = mlx5_hw_validate_action_mark(dev, action, mask,
+                                                          action_flags,
+                                                          attr, error);
+                       if (ret)
+                               return ret;
                        action_flags |= MLX5_FLOW_ACTION_MARK;
                        break;
                case RTE_FLOW_ACTION_TYPE_DROP:
-                       /* TODO: Validation logic */
+                       ret = mlx5_flow_validate_action_drop
+                               (dev, action_flags,
+                                &(struct rte_flow_attr){.egress = 
attr->egress},
+                                error);
+                       if (ret)
+                               return ret;
                        action_flags |= MLX5_FLOW_ACTION_DROP;
                        break;
                case RTE_FLOW_ACTION_TYPE_JUMP:
-                       /* TODO: Validation logic */
+                       /* Only validate the jump to root table in template 
stage. */
+                       ret = flow_hw_validate_action_jump(dev, attr, action, 
mask, error);
+                       if (ret)
+                               return ret;
                        action_flags |= MLX5_FLOW_ACTION_JUMP;
                        break;
 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
@@ -6462,38 +6761,52 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
                        break;
 #endif
                case RTE_FLOW_ACTION_TYPE_QUEUE:
-                       /* TODO: Validation logic */
+                       ret = mlx5_hw_validate_action_queue(dev, action, mask,
+                                                           attr, action_flags,
+                                                           error);
+                       if (ret)
+                               return ret;
                        action_flags |= MLX5_FLOW_ACTION_QUEUE;
                        break;
                case RTE_FLOW_ACTION_TYPE_RSS:
-                       /* TODO: Validation logic */
+                       ret = mlx5_hw_validate_action_rss(dev, action, mask,
+                                                         attr, action_flags,
+                                                         error);
+                       if (ret)
+                               return ret;
                        action_flags |= MLX5_FLOW_ACTION_RSS;
                        break;
                case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
-                       /* TODO: Validation logic */
-                       action_flags |= MLX5_FLOW_ACTION_ENCAP;
-                       break;
                case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
-                       /* TODO: Validation logic */
+                       ret = mlx5_hw_validate_action_l2_encap(dev, action, 
mask,
+                                                              attr, 
action_flags,
+                                                              error);
+                       if (ret)
+                               return ret;
                        action_flags |= MLX5_FLOW_ACTION_ENCAP;
                        break;
                case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
-                       /* TODO: Validation logic */
-                       action_flags |= MLX5_FLOW_ACTION_DECAP;
-                       break;
                case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
-                       /* TODO: Validation logic */
+                       ret = mlx5_hw_validate_action_l2_decap(dev, action, 
mask,
+                                                              attr, 
action_flags,
+                                                              error);
+                       if (ret)
+                               return ret;
                        action_flags |= MLX5_FLOW_ACTION_DECAP;
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
-                       ret = flow_hw_validate_action_raw_encap(action, mask, 
error);
-                       if (ret < 0)
-                               return ret;
-                       action_flags |= MLX5_FLOW_ACTION_ENCAP;
-                       break;
                case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
-                       /* TODO: Validation logic */
-                       action_flags |= MLX5_FLOW_ACTION_DECAP;
+                       ret = flow_hw_validate_action_raw_reformat(dev, action,
+                                                                  mask, attr,
+                                                                  
&action_flags,
+                                                                  error);
+                       if (ret)
+                               return ret;
+                       if (action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
+                          (action + 1)->type == 
RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+                               action_flags |= MLX5_FLOW_XCAP_ACTIONS;
+                               i++;
+                       }
                        break;
                case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
                        ret = flow_hw_validate_action_ipv6_ext_push(dev, 
action, error);
@@ -6561,7 +6874,11 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
                        action_flags |= MLX5_FLOW_ACTION_COUNT;
                        break;
                case RTE_FLOW_ACTION_TYPE_CONNTRACK:
-                       /* TODO: Validation logic */
+                       ret = mlx5_hw_validate_action_conntrack(dev, action, 
mask,
+                                                               attr, 
action_flags,
+                                                               error);
+                       if (ret)
+                               return ret;
                        action_flags |= MLX5_FLOW_ACTION_CT;
                        break;
                case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c 
b/drivers/net/mlx5/mlx5_flow_verbs.c
index fe9c818abc..9879f14213 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1522,7 +1522,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                        action_flags |= MLX5_FLOW_ACTION_FLAG;
                        break;
                case RTE_FLOW_ACTION_TYPE_MARK:
-                       ret = mlx5_flow_validate_action_mark(actions,
+                       ret = mlx5_flow_validate_action_mark(dev, actions,
                                                             action_flags,
                                                             attr,
                                                             error);
-- 
2.43.0

Reply via email to