This patch implements the feature described in RFC [1], adding
support of RSS action on L3 and/or L4 source or destination only.

[1] http://mails.dpdk.org/archives/dev/2019-December/152796.html

Signed-off-by: Dekel Peled <dek...@mellanox.com>
Acked-by: Matan Azrad <ma...@mellanox.com>
---
 doc/guides/nics/mlx5.rst               |  2 +
 doc/guides/rel_notes/release_20_02.rst |  6 +++
 drivers/net/mlx5/mlx5_defs.h           |  7 ++-
 drivers/net/mlx5/mlx5_flow.c           | 12 +++++
 drivers/net/mlx5/mlx5_flow_dv.c        | 91 ++++++++++++++++++++++++++--------
 5 files changed, 97 insertions(+), 21 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 18573cf..92228d3 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -64,6 +64,8 @@ Features
 - Multiple TX and RX queues.
 - Support for scattered TX and RX frames.
 - IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues.
+- RSS using different combinations of fields: L3 only, L4 only or both,
+  and source only, destination only or both.
 - Several RSS hash keys, one for each flow type.
 - Default RSS operation with no hash key specification.
 - Configurable RETA table.
diff --git a/doc/guides/rel_notes/release_20_02.rst 
b/doc/guides/rel_notes/release_20_02.rst
index 0eaa45a..8cd3470 100644
--- a/doc/guides/rel_notes/release_20_02.rst
+++ b/doc/guides/rel_notes/release_20_02.rst
@@ -56,6 +56,12 @@ New Features
      Also, make sure to start the actual text at the margin.
      =========================================================
 
+* **Updated Mellanox mlx5 driver.**
+
+  Updated Mellanox mlx5 driver with new features and improvements, including:
+
+  * Added support for RSS using L3/L4 source/destination only.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 042e1f3..dc9b965 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -104,8 +104,13 @@
 /* Number of packets vectorized Rx can simultaneously process in a loop. */
 #define MLX5_VPMD_DESCS_PER_LOOP      4
 
+/* Mask of RSS on source only or destination only. */
+#define MLX5_RSS_SRC_DST_ONLY (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | \
+                              ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+
 /* Supported RSS */
-#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
+                           MLX5_RSS_SRC_DST_ONLY))
 
 /* Timeout in seconds to get a valid link status. */
 #define MLX5_LINK_STATUS_TIMEOUT 10
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 0087163..cb9d265 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1150,6 +1150,18 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev 
*dev, int32_t priority,
                                          &rss->types,
                                          "some RSS protocols are not"
                                          " supported");
+       if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
+           !(rss->types & ETH_RSS_IP))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "L3 partial RSS requested but L3 RSS"
+                                         " type not specified");
+       if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
+           !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "L4 partial RSS requested but L4 RSS"
+                                         " type not specified");
        if (!priv->rxqs_n)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 73aaea4..12cbf17 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -6577,6 +6577,75 @@ struct field_modify_info modify_tcp[] = {
 }
 
 /**
+ * Set the hash fields according to the @p flow information.
+ *
+ * @param[in] dev_flow
+ *   Pointer to the mlx5_flow.
+ */
+static void
+flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
+{
+       struct rte_flow *flow = dev_flow->flow;
+       uint64_t items = dev_flow->layers;
+       int rss_inner = 0;
+       uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
+
+       dev_flow->hash_fields = 0;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       if (flow->rss.level >= 2) {
+               dev_flow->hash_fields |= IBV_RX_HASH_INNER;
+               rss_inner = 1;
+       }
+#endif
+       if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
+           (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
+               if (rss_types & MLX5_IPV4_LAYER_TYPES) {
+                       if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                               dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
+                       else if (rss_types & ETH_RSS_L3_DST_ONLY)
+                               dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
+                       else
+                               dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
+               }
+       } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
+                  (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
+               if (rss_types & MLX5_IPV6_LAYER_TYPES) {
+                       if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                               dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
+                       else if (rss_types & ETH_RSS_L3_DST_ONLY)
+                               dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
+                       else
+                               dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
+               }
+       }
+       if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
+           (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
+               if (rss_types & ETH_RSS_UDP) {
+                       if (rss_types & ETH_RSS_L4_SRC_ONLY)
+                               dev_flow->hash_fields |=
+                                               IBV_RX_HASH_SRC_PORT_UDP;
+                       else if (rss_types & ETH_RSS_L4_DST_ONLY)
+                               dev_flow->hash_fields |=
+                                               IBV_RX_HASH_DST_PORT_UDP;
+                       else
+                               dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
+               }
+       } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
+                  (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
+               if (rss_types & ETH_RSS_TCP) {
+                       if (rss_types & ETH_RSS_L4_SRC_ONLY)
+                               dev_flow->hash_fields |=
+                                               IBV_RX_HASH_SRC_PORT_TCP;
+                       else if (rss_types & ETH_RSS_L4_DST_ONLY)
+                               dev_flow->hash_fields |=
+                                               IBV_RX_HASH_DST_PORT_TCP;
+                       else
+                               dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
+               }
+       }
+}
+
+/**
  * Fill the flow with DV spec, lock free
  * (mutex should be acquired by caller).
  *
@@ -7086,11 +7155,6 @@ struct field_modify_info modify_tcp[] = {
                                                    items, tunnel,
                                                    dev_flow->group);
                        matcher.priority = MLX5_PRIORITY_MAP_L3;
-                       dev_flow->hash_fields |=
-                               mlx5_flow_hashfields_adjust
-                                       (dev_flow, tunnel,
-                                        MLX5_IPV4_LAYER_TYPES,
-                                        MLX5_IPV4_IBV_RX_HASH);
                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
                                             MLX5_FLOW_LAYER_OUTER_L3_IPV4;
                        if (items->mask != NULL &&
@@ -7114,11 +7178,6 @@ struct field_modify_info modify_tcp[] = {
                                                    items, tunnel,
                                                    dev_flow->group);
                        matcher.priority = MLX5_PRIORITY_MAP_L3;
-                       dev_flow->hash_fields |=
-                               mlx5_flow_hashfields_adjust
-                                       (dev_flow, tunnel,
-                                        MLX5_IPV6_LAYER_TYPES,
-                                        MLX5_IPV6_IBV_RX_HASH);
                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
                                             MLX5_FLOW_LAYER_OUTER_L3_IPV6;
                        if (items->mask != NULL &&
@@ -7139,11 +7198,6 @@ struct field_modify_info modify_tcp[] = {
                        flow_dv_translate_item_tcp(match_mask, match_value,
                                                   items, tunnel);
                        matcher.priority = MLX5_PRIORITY_MAP_L4;
-                       dev_flow->hash_fields |=
-                               mlx5_flow_hashfields_adjust
-                                       (dev_flow, tunnel, ETH_RSS_TCP,
-                                        IBV_RX_HASH_SRC_PORT_TCP |
-                                        IBV_RX_HASH_DST_PORT_TCP);
                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
                                             MLX5_FLOW_LAYER_OUTER_L4_TCP;
                        break;
@@ -7151,11 +7205,6 @@ struct field_modify_info modify_tcp[] = {
                        flow_dv_translate_item_udp(match_mask, match_value,
                                                   items, tunnel);
                        matcher.priority = MLX5_PRIORITY_MAP_L4;
-                       dev_flow->hash_fields |=
-                               mlx5_flow_hashfields_adjust
-                                       (dev_flow, tunnel, ETH_RSS_UDP,
-                                        IBV_RX_HASH_SRC_PORT_UDP |
-                                        IBV_RX_HASH_DST_PORT_UDP);
                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
                                             MLX5_FLOW_LAYER_OUTER_L4_UDP;
                        break;
@@ -7251,6 +7300,8 @@ struct field_modify_info modify_tcp[] = {
        assert(!flow_dv_check_valid_spec(matcher.mask.buf,
                                         dev_flow->dv.value.buf));
        dev_flow->layers = item_flags;
+       if (action_flags & MLX5_FLOW_ACTION_RSS)
+               flow_dv_hashfields_set(dev_flow);
        /* Register matcher. */
        matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
                                    matcher.mask.size);
-- 
1.8.3.1

Reply via email to