Rework the code responsible for creation of unicast control flow rules,
to allow creation of:

- unicast DMAC flow rules and
- unicast DMAC with VMAN flow rules,

outside of mlx5_traffic_enable() called when port is started.

Signed-off-by: Dariusz Sosnowski <dsosnow...@nvidia.com>
---
 drivers/net/mlx5/meson.build          |   1 +
 drivers/net/mlx5/mlx5_flow.h          |   9 ++
 drivers/net/mlx5/mlx5_flow_hw.c       | 215 ++++++++++++++++++++------
 drivers/net/mlx5/mlx5_flow_hw_stubs.c |  41 +++++
 4 files changed, 219 insertions(+), 47 deletions(-)
 create mode 100644 drivers/net/mlx5/mlx5_flow_hw_stubs.c

diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index eb5eb2cce7..0114673491 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -23,6 +23,7 @@ sources = files(
         'mlx5_flow_dv.c',
         'mlx5_flow_aso.c',
         'mlx5_flow_flex.c',
+        'mlx5_flow_hw_stubs.c',
         'mlx5_mac.c',
         'mlx5_rss.c',
         'mlx5_rx.c',
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 86a1476879..2ff0b25d4d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2990,6 +2990,15 @@ struct mlx5_flow_hw_ctrl_fdb {
 #define MLX5_CTRL_VLAN_FILTER    (RTE_BIT32(6))
 
 int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
+
+/** Create a control flow rule for matching unicast DMAC (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct 
rte_ether_addr *addr);
+
+/** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
+                                    const struct rte_ether_addr *addr,
+                                    const uint16_t vlan);
+
 void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
 
 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index f6918825eb..afc9778b97 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -15896,12 +15896,14 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev 
*dev,
 }
 
 static int
-__flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
-                            struct rte_flow_template_table *tbl,
-                            const enum mlx5_flow_ctrl_rx_eth_pattern_type 
pattern_type,
-                            const enum mlx5_flow_ctrl_rx_expanded_rss_type 
rss_type)
+__flow_hw_ctrl_flows_unicast_create(struct rte_eth_dev *dev,
+                                   struct rte_flow_template_table *tbl,
+                                   const enum 
mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
+                                   const struct rte_ether_addr *addr)
 {
-       struct rte_flow_item_eth eth_spec;
+       struct rte_flow_item_eth eth_spec = {
+               .hdr.dst_addr = *addr,
+       };
        struct rte_flow_item items[5];
        struct rte_flow_action actions[] = {
                { .type = RTE_FLOW_ACTION_TYPE_RSS },
@@ -15909,15 +15911,11 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
        };
        struct mlx5_hw_ctrl_flow_info flow_info = {
                .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
+               .uc = {
+                       .dmac = *addr,
+               },
        };
-       const struct rte_ether_addr cmp = {
-               .addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
-       };
-       unsigned int i;
-
-       RTE_SET_USED(pattern_type);
 
-       memset(&eth_spec, 0, sizeof(eth_spec));
        memset(items, 0, sizeof(items));
        items[0] = (struct rte_flow_item){
                .type = RTE_FLOW_ITEM_TYPE_ETH,
@@ -15927,28 +15925,47 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
        items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
        items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
        items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
+
+       if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, 
&flow_info, false))
+               return -rte_errno;
+
+       return 0;
+}
+
+static int
+__flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
+                            struct rte_flow_template_table *tbl,
+                            const enum mlx5_flow_ctrl_rx_expanded_rss_type 
rss_type)
+{
+       unsigned int i;
+       int ret;
+
        for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
                struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
 
-               if (!memcmp(mac, &cmp, sizeof(*mac)))
+               if (rte_is_zero_ether_addr(mac))
                        continue;
-               eth_spec.hdr.dst_addr = *mac;
-               flow_info.uc.dmac = *mac;
-               if (flow_hw_create_ctrl_flow(dev, dev,
-                                            tbl, items, 0, actions, 0, 
&flow_info, false))
-                       return -rte_errno;
+
+               ret = __flow_hw_ctrl_flows_unicast_create(dev, tbl, rss_type, 
mac);
+               if (ret < 0)
+                       return ret;
        }
        return 0;
 }
 
 static int
-__flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
-                                 struct rte_flow_template_table *tbl,
-                                 const enum mlx5_flow_ctrl_rx_eth_pattern_type 
pattern_type,
-                                 const enum 
mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
-{
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct rte_flow_item_eth eth_spec;
+__flow_hw_ctrl_flows_unicast_vlan_create(struct rte_eth_dev *dev,
+                                        struct rte_flow_template_table *tbl,
+                                        const enum 
mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
+                                        const struct rte_ether_addr *addr,
+                                        const uint16_t vid)
+{
+       struct rte_flow_item_eth eth_spec = {
+               .hdr.dst_addr = *addr,
+       };
+       struct rte_flow_item_vlan vlan_spec = {
+               .tci = rte_cpu_to_be_16(vid),
+       };
        struct rte_flow_item items[5];
        struct rte_flow_action actions[] = {
                { .type = RTE_FLOW_ACTION_TYPE_RSS },
@@ -15956,43 +15973,54 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev 
*dev,
        };
        struct mlx5_hw_ctrl_flow_info flow_info = {
                .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
+               .uc = {
+                       .dmac = *addr,
+                       .vlan = vid,
+               },
        };
-       const struct rte_ether_addr cmp = {
-               .addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
-       };
-       unsigned int i;
-       unsigned int j;
-
-       RTE_SET_USED(pattern_type);
 
-       memset(&eth_spec, 0, sizeof(eth_spec));
        memset(items, 0, sizeof(items));
        items[0] = (struct rte_flow_item){
                .type = RTE_FLOW_ITEM_TYPE_ETH,
                .spec = &eth_spec,
        };
-       items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
+       items[1] = (struct rte_flow_item){
+               .type = RTE_FLOW_ITEM_TYPE_VLAN,
+               .spec = &vlan_spec,
+       };
        items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
        items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
        items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
+
+       if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, 
&flow_info, false))
+               return -rte_errno;
+
+       return 0;
+}
+
+static int
+__flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
+                                 struct rte_flow_template_table *tbl,
+                                 const enum 
mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       unsigned int i;
+       unsigned int j;
+
        for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
                struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
 
-               if (!memcmp(mac, &cmp, sizeof(*mac)))
+               if (rte_is_zero_ether_addr(mac))
                        continue;
-               eth_spec.hdr.dst_addr = *mac;
-               flow_info.uc.dmac = *mac;
+
                for (j = 0; j < priv->vlan_filter_n; ++j) {
                        uint16_t vlan = priv->vlan_filter[j];
-                       struct rte_flow_item_vlan vlan_spec = {
-                               .hdr.vlan_tci = rte_cpu_to_be_16(vlan),
-                       };
+                       int ret;
 
-                       flow_info.uc.vlan = vlan;
-                       items[1].spec = &vlan_spec;
-                       if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, 
actions, 0,
-                                                    &flow_info, false))
-                               return -rte_errno;
+                       ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, 
tbl, rss_type,
+                                                                      mac, 
vlan);
+                       if (ret < 0)
+                               return ret;
                }
        }
        return 0;
@@ -16016,9 +16044,9 @@ __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
        case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
                return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, 
rss_type);
        case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
-               return __flow_hw_ctrl_flows_unicast(dev, tbl, pattern_type, 
rss_type);
+               return __flow_hw_ctrl_flows_unicast(dev, tbl, rss_type);
        case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
-               return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, 
pattern_type, rss_type);
+               return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, rss_type);
        default:
                /* Should not reach here. */
                MLX5_ASSERT(false);
@@ -16099,6 +16127,99 @@ mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, 
uint32_t flags)
        return 0;
 }
 
+static int
+mlx5_flow_hw_ctrl_flow_single(struct rte_eth_dev *dev,
+                             const enum mlx5_flow_ctrl_rx_eth_pattern_type 
eth_pattern_type,
+                             const struct rte_ether_addr *addr,
+                             const uint16_t vlan)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
+       unsigned int j;
+       int ret = 0;
+
+       if (!priv->dr_ctx) {
+               DRV_LOG(DEBUG, "port %u Control flow rules will not be created. 
"
+                              "HWS needs to be configured beforehand.",
+                              dev->data->port_id);
+               return 0;
+       }
+       if (!priv->hw_ctrl_rx) {
+               DRV_LOG(ERR, "port %u Control flow rules templates were not 
created.",
+                       dev->data->port_id);
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
+       hw_ctrl_rx = priv->hw_ctrl_rx;
+
+       /* TODO: this part should be somehow refactored. It's common with 
common flow creation. */
+       for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
+               const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
+               const unsigned int pti = eth_pattern_type;
+               struct rte_flow_actions_template *at;
+               struct mlx5_flow_hw_ctrl_rx_table *tmpls = 
&hw_ctrl_rx->tables[pti][j];
+               const struct mlx5_flow_template_table_cfg cfg = {
+                       .attr = tmpls->attr,
+                       .external = 0,
+               };
+
+               if (!hw_ctrl_rx->rss[rss_type]) {
+                       at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
+                       if (!at)
+                               return -rte_errno;
+                       hw_ctrl_rx->rss[rss_type] = at;
+               } else {
+                       at = hw_ctrl_rx->rss[rss_type];
+               }
+               if (!rss_type_is_requested(priv, rss_type))
+                       continue;
+               if (!tmpls->tbl) {
+                       tmpls->tbl = flow_hw_table_create(dev, &cfg,
+                                                         &tmpls->pt, 1, &at, 
1, NULL);
+                       if (!tmpls->tbl) {
+                               DRV_LOG(ERR, "port %u Failed to create template 
table "
+                                            "for control flow rules. Unable to 
create "
+                                            "control flow rules.",
+                                            dev->data->port_id);
+                               return -rte_errno;
+                       }
+               }
+
+               MLX5_ASSERT(eth_pattern_type == 
MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC ||
+                           eth_pattern_type == 
MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN);
+
+               if (eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC)
+                       ret = __flow_hw_ctrl_flows_unicast_create(dev, 
tmpls->tbl, rss_type, addr);
+               else
+                       ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, 
tmpls->tbl, rss_type,
+                                                                      addr, 
vlan);
+               if (ret) {
+                       DRV_LOG(ERR, "port %u Failed to create unicast control 
flow rule.",
+                               dev->data->port_id);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+int
+mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev,
+                           const struct rte_ether_addr *addr)
+{
+       return mlx5_flow_hw_ctrl_flow_single(dev, 
MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
+                                            addr, 0);
+}
+
+int
+mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
+                                const struct rte_ether_addr *addr,
+                                const uint16_t vlan)
+{
+       return mlx5_flow_hw_ctrl_flow_single(dev, 
MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
+                                            addr, vlan);
+}
+
 static __rte_always_inline uint32_t
 mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf 
*domain)
 {
diff --git a/drivers/net/mlx5/mlx5_flow_hw_stubs.c 
b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
new file mode 100644
index 0000000000..985c046056
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 NVIDIA Corporation & Affiliates
+ */
+
+/**
+ * @file
+ *
+ * mlx5_flow_hw.c source file is included in the build only on Linux.
+ * Functions defined there are compiled if and only if available rdma-core 
supports DV.
+ *
+ * This file contains stubs (through weak linking) for any functions exported 
from that file.
+ */
+
+#include "mlx5_flow.h"
+
+/*
+ * This is a stub for the real implementation of this function in 
mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev __rte_unused,
+                           const struct rte_ether_addr *addr __rte_unused)
+{
+       rte_errno = ENOTSUP;
+       return -rte_errno;
+}
+
+/*
+ * This is a stub for the real implementation of this function in 
mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev __rte_unused,
+                                const struct rte_ether_addr *addr __rte_unused,
+                                const uint16_t vlan __rte_unused)
+{
+       rte_errno = ENOTSUP;
+       return -rte_errno;
+}
-- 
2.39.5

Reply via email to