Whenever a unicast DMAC or unicast DMAC with VLAN ID control flow rule
is created when working with Verbs or DV flow engine,
add this flow rule to the control flow rule list,
with information required for recognizing it.

Signed-off-by: Dariusz Sosnowski <dsosnow...@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 32 +++++++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_trigger.c | 26 ++++++++++++++++++++++++--
 2 files changed, 53 insertions(+), 5 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 463edae70e..2038f78481 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -8495,8 +8495,9 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
                        .type = RTE_FLOW_ACTION_TYPE_END,
                },
        };
-       uint32_t flow_idx;
+       uintptr_t flow_idx;
        struct rte_flow_error error;
+       struct mlx5_ctrl_flow_entry *entry;
        unsigned int i;
 
        if (!priv->reta_idx_n || !priv->rxqs_n) {
@@ -8506,11 +8507,36 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
                action_rss.types = 0;
        for (i = 0; i != priv->reta_idx_n; ++i)
                queue[i] = (*priv->reta_idx)[i];
+
+       entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry), 
alignof(typeof(*entry)), SOCKET_ID_ANY);
+       if (entry == NULL) {
+               rte_errno = ENOMEM;
+               goto err;
+       }
+
+       entry->owner_dev = dev;
+       if (vlan_spec == NULL) {
+               entry->info.type = 
MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC;
+       } else {
+               entry->info.type = 
MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN;
+               entry->info.uc.vlan = rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
+       }
+       entry->info.uc.dmac = eth_spec->hdr.dst_addr;
+
        flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
                                    &attr, items, actions, false, &error);
-       if (!flow_idx)
-               return -rte_errno;
+       if (!flow_idx) {
+               mlx5_free(entry);
+               goto err;
+       }
+
+       entry->flow = (struct rte_flow *)flow_idx;
+       LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
+
        return 0;
+
+err:
+       return -rte_errno;
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index bf836c92fc..4fa9319c4d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -20,6 +20,8 @@
 #include "mlx5_utils.h"
 #include "rte_pmd_mlx5.h"
 
+static void mlx5_traffic_disable_legacy(struct rte_eth_dev *dev);
+
 /**
  * Stop traffic on Tx queues.
  *
@@ -1736,11 +1738,31 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
        return 0;
 error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
-       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
+       mlx5_traffic_disable_legacy(dev);
        rte_errno = ret; /* Restore rte_errno. */
        return -rte_errno;
 }
 
+static void
+mlx5_traffic_disable_legacy(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_ctrl_flow_entry *entry;
+       struct mlx5_ctrl_flow_entry *tmp;
+
+       /*
+        * Free registered control flow rules first,
+        * to free the memory allocated for list entries
+        */
+       entry = LIST_FIRST(&priv->hw_ctrl_flows);
+       while (entry != NULL) {
+               tmp = LIST_NEXT(entry, next);
+               mlx5_legacy_ctrl_flow_destroy(dev, entry);
+               entry = tmp;
+       }
+
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
+}
 
 /**
  * Disable traffic flows configured by control plane
@@ -1758,7 +1780,7 @@ mlx5_traffic_disable(struct rte_eth_dev *dev)
                mlx5_flow_hw_flush_ctrl_flows(dev);
        else
 #endif
-               mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
+               mlx5_traffic_disable_legacy(dev);
 }
 
 /**
-- 
2.39.5

Reply via email to