From: Bing Zhao <bi...@nvidia.com>

The RTE_FLOW_ACTION_TYPE_END action needs to be translated into the
MLX5DR_ACTION_TYP_LAST for the future usage. It is only needed in
the hardware steering backward compatible API.

Signed-off-by: Bing Zhao <bi...@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnow...@nvidia.com>
---
 drivers/net/mlx5/mlx5.h         |  1 +
 drivers/net/mlx5/mlx5_flow_hw.c | 19 +++++++++++++++++++
 2 files changed, 20 insertions(+)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a6ff1f8ac08..69e0dcff4c6 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1529,6 +1529,7 @@ struct mlx5_dev_ctx_shared {
        /* Direct Rules tables for FDB, NIC TX+RX */
        void *dr_drop_action; /* Pointer to DR drop action, any domain. */
        void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
+       void *hw_dummy_last; /* Pointer to the DR dummy end action, any domain. 
*/
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        struct mlx5_send_to_kernel_action 
send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
 #endif
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index d8a166ed4ec..5f1e93c3aad 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -11219,6 +11219,17 @@ flow_hw_configure(struct rte_eth_dev *dev,
                if (ret)
                        goto err;
        }
+       /*
+        * All domains will use the same dummy action, only used in the backward
+        * compatible API. Initialize it for only once. No order dependency.
+        */
+       if (!priv->sh->hw_dummy_last) {
+               priv->sh->hw_dummy_last = 
mlx5dr_action_create_last(priv->dr_ctx,
+                                                                   
MLX5DR_ACTION_FLAG_HWS_RX);
+               if (!priv->sh->hw_dummy_last)
+                       /* Do not overwrite the rte_errno. */
+                       goto err;
+       }
        if (!priv->shared_host)
                flow_hw_create_send_to_kernel_actions(priv);
        if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
@@ -11297,6 +11308,10 @@ flow_hw_configure(struct rte_eth_dev *dev,
                flow_hw_ct_mng_destroy(dev, priv->ct_mng);
                priv->ct_mng = NULL;
        }
+       if (priv->sh->hw_dummy_last) {
+               mlx5dr_action_destroy(priv->sh->hw_dummy_last);
+               priv->sh->hw_dummy_last = NULL;
+       }
        flow_hw_destroy_send_to_kernel_action(priv);
        flow_hw_cleanup_ctrl_fdb_tables(dev);
        flow_hw_free_vport_actions(priv);
@@ -11422,6 +11437,10 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
                mlx5dr_action_destroy(priv->hw_def_miss);
        flow_hw_destroy_nat64_actions(priv);
        flow_hw_destroy_vlan(dev);
+       if (priv->sh->hw_dummy_last) {
+               mlx5dr_action_destroy(priv->sh->hw_dummy_last);
+               priv->sh->hw_dummy_last = NULL;
+       }
        flow_hw_destroy_send_to_kernel_action(priv);
        flow_hw_free_vport_actions(priv);
        if (priv->acts_ipool) {
-- 
2.21.0

Reply via email to