Current PMD implementation validates pattern templates that will
always be rejected during table template creation.

The patch adds basic HWS verifications to pattern validation to
ensure that the pattern can be used in table template.

PMD updates `rte_errno` if pattern template validation failed:

E2BIG - pattern too big for PMD
ENOTSUP - pattern not supported by PMD
ENOMEM - PMD allocation failure

Signed-off-by: Gregory Etelson <getel...@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnow...@nvidia.com>
---
 drivers/net/mlx5/mlx5.h         |   1 +
 drivers/net/mlx5/mlx5_flow_hw.c | 116 ++++++++++++++++++++++++++++++++
 2 files changed, 117 insertions(+)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f2e2e04429..e98db91888 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1965,6 +1965,7 @@ struct mlx5_priv {
        struct mlx5_aso_mtr_pool *hws_mpool; /* HW steering's Meter pool. */
        struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
        /**< HW steering templates used to create control flow rules. */
+       struct rte_flow_actions_template 
*action_template_drop[MLX5DR_TABLE_TYPE_MAX];
 #endif
        struct rte_eth_dev *shared_host; /* Host device for HW steering. */
        uint16_t shared_refcnt; /* HW steering host reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index da873ae2e2..ebb2efb2e1 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -6840,6 +6840,45 @@ flow_hw_pattern_has_sq_match(const struct rte_flow_item 
*items)
        return false;
 }
 
+static int
+pattern_template_validate(struct rte_eth_dev *dev,
+                         struct rte_flow_pattern_template *pt[], uint32_t 
pt_num)
+{
+       uint32_t group = 0;
+       struct rte_flow_template_table_attr tbl_attr = {
+               .nb_flows = 64,
+               .insertion_type = RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN,
+               .hash_func = RTE_FLOW_TABLE_HASH_FUNC_DEFAULT,
+               .flow_attr = {
+                       .ingress = pt[0]->attr.ingress,
+                       .egress = pt[0]->attr.egress,
+                       .transfer = pt[0]->attr.transfer
+               }
+       };
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct rte_flow_actions_template *action_template;
+
+       if (pt[0]->attr.ingress)
+               action_template = 
priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX];
+       else if (pt[0]->attr.egress)
+               action_template = 
priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX];
+       else if (pt[0]->attr.transfer)
+               action_template = 
priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB];
+       else
+               return EINVAL;
+       do {
+               struct rte_flow_template_table *tmpl_tbl;
+
+               tbl_attr.flow_attr.group = group;
+               tmpl_tbl = flow_hw_table_create(dev, &tbl_attr, pt, pt_num,
+                                               &action_template, 1, NULL);
+               if (!tmpl_tbl)
+                       return rte_errno;
+               flow_hw_table_destroy(dev, tmpl_tbl, NULL);
+       } while (++group <= 1);
+       return 0;
+}
+
 /**
  * Create flow item template.
  *
@@ -6975,8 +7014,19 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
                }
        }
        __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+       rte_errno = pattern_template_validate(dev, &it, 1);
+       if (rte_errno)
+               goto error;
        LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
        return it;
+error:
+       flow_hw_flex_item_release(dev, &it->flex_item);
+       claim_zero(mlx5dr_match_template_destroy(it->mt));
+       mlx5_free(it);
+       rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 
NULL,
+                          "Failed to create pattern template");
+       return NULL;
+
 }
 
 /**
@@ -9184,6 +9234,67 @@ flow_hw_compare_config(const struct mlx5_flow_hw_attr 
*hw_attr,
        return true;
 }
 
+/*
+ * No need to explicitly release drop action templates on port stop.
+ * Drop action templates release with other action templates during
+ * mlx5_dev_close -> flow_hw_resource_release -> 
flow_hw_actions_template_destroy
+ */
+static void
+action_template_drop_release(struct rte_eth_dev *dev)
+{
+       int i;
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
+               if (!priv->action_template_drop[i])
+                       continue;
+               flow_hw_actions_template_destroy(dev,
+                                                priv->action_template_drop[i],
+                                                NULL);
+               priv->action_template_drop[i] = NULL;
+       }
+}
+
+static int
+action_template_drop_init(struct rte_eth_dev *dev,
+                         struct rte_flow_error *error)
+{
+       const struct rte_flow_action drop[2] = {
+               [0] = { .type = RTE_FLOW_ACTION_TYPE_DROP },
+               [1] = { .type = RTE_FLOW_ACTION_TYPE_END },
+       };
+       const struct rte_flow_action *actions = drop;
+       const struct rte_flow_action *masks = drop;
+       const struct rte_flow_actions_template_attr attr[MLX5DR_TABLE_TYPE_MAX] 
= {
+               [MLX5DR_TABLE_TYPE_NIC_RX] = { .ingress = 1 },
+               [MLX5DR_TABLE_TYPE_NIC_TX] = { .egress = 1 },
+               [MLX5DR_TABLE_TYPE_FDB] = { .transfer = 1 }
+       };
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX] =
+               flow_hw_actions_template_create(dev,
+                                               &attr[MLX5DR_TABLE_TYPE_NIC_RX],
+                                               actions, masks, error);
+       if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX])
+               return -1;
+       priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX] =
+               flow_hw_actions_template_create(dev,
+                                               &attr[MLX5DR_TABLE_TYPE_NIC_TX],
+                                               actions, masks, error);
+       if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX])
+               return -1;
+       if (priv->sh->config.dv_esw_en && priv->master) {
+               priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB] =
+                       flow_hw_actions_template_create(dev,
+                                                       
&attr[MLX5DR_TABLE_TYPE_FDB],
+                                                       actions, masks, error);
+               if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB])
+                       return -1;
+       }
+       return 0;
+}
+
 /**
  * Configure port HWS resources.
  *
@@ -9426,6 +9537,9 @@ flow_hw_configure(struct rte_eth_dev *dev,
        rte_spinlock_init(&priv->hw_ctrl_lock);
        LIST_INIT(&priv->hw_ctrl_flows);
        LIST_INIT(&priv->hw_ext_ctrl_flows);
+       ret = action_template_drop_init(dev, error);
+       if (ret)
+               goto err;
        ret = flow_hw_create_ctrl_rx_tables(dev);
        if (ret) {
                rte_flow_error_set(error, -ret, 
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -9559,6 +9673,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
                mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
                priv->hws_cpool = NULL;
        }
+       action_template_drop_release(dev);
        mlx5_flow_quota_destroy(dev);
        flow_hw_destroy_send_to_kernel_action(priv);
        flow_hw_free_vport_actions(priv);
@@ -9621,6 +9736,7 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
        flow_hw_flush_all_ctrl_flows(dev);
        flow_hw_cleanup_tx_repr_tagging(dev);
        flow_hw_cleanup_ctrl_rx_tables(dev);
+       action_template_drop_release(dev);
        while (!LIST_EMPTY(&priv->flow_hw_grp)) {
                grp = LIST_FIRST(&priv->flow_hw_grp);
                flow_hw_group_unset_miss_group(dev, grp, NULL);
-- 
2.39.2

Reply via email to