Add HWS support for IPv6 fragment extension header
matching for HWS.
Supported field is next_header.

Signed-off-by: Maayan Kashani <mkash...@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnow...@nvidia.com>
---
 doc/guides/rel_notes/release_25_07.rst |  4 ++
 drivers/net/mlx5/hws/mlx5dr_definer.c  | 51 ++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_flow_hw.c        |  1 +
 3 files changed, 56 insertions(+)

diff --git a/doc/guides/rel_notes/release_25_07.rst 
b/doc/guides/rel_notes/release_25_07.rst
index cd1025aac09..6503f5a9f3c 100644
--- a/doc/guides/rel_notes/release_25_07.rst
+++ b/doc/guides/rel_notes/release_25_07.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated NVIDIA mlx5 driver.**
+
+  * Support matching on IPv6 frag extension header with rte_flow template API.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c 
b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 5272119bcbe..9c11d6c2cb9 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -176,6 +176,7 @@ struct mlx5dr_definer_conv_data {
        X(SET,          ip_fragmented,          !!v->fragment_offset,   
rte_ipv4_hdr) \
        X(SET_BE16,     ipv6_payload_len,       v->hdr.payload_len,     
rte_flow_item_ipv6) \
        X(SET,          ipv6_proto,             v->hdr.proto,           
rte_flow_item_ipv6) \
+       X(SET,          ipv6_frag_proto,        v->hdr.next_header, 
rte_flow_item_ipv6_frag_ext) \
        X(SET,          ipv6_routing_hdr,       IPPROTO_ROUTING,        
rte_flow_item_ipv6) \
        X(SET,          ipv6_hop_limits,        v->hdr.hop_limits,      
rte_flow_item_ipv6) \
        X(SET_BE32P,    ipv6_src_addr_127_96,   &v->hdr.src_addr.a[0],  
rte_flow_item_ipv6) \
@@ -2553,6 +2554,51 @@ mlx5dr_definer_conv_item_ipv6_routing_ext(struct 
mlx5dr_definer_conv_data *cd,
        return 0;
 }
 
+static int
+mlx5dr_definer_conv_item_ipv6_frag_ext(struct mlx5dr_definer_conv_data *cd,
+                                         struct rte_flow_item *item,
+                                         int item_idx)
+{
+       const struct rte_flow_item_ipv6_frag_ext *m = item->mask;
+       struct mlx5dr_definer_fc *fc;
+       bool inner = cd->tunnel;
+
+       if (!cd->relaxed) {
+               fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
+               fc->item_idx = item_idx;
+               fc->tag_set = &mlx5dr_definer_ipv6_version_set;
+               fc->tag_mask_set = &mlx5dr_definer_ones_set;
+               DR_CALC_SET(fc, eth_l2, l3_type, inner);
+
+               /* Overwrite - Unset ethertype if present */
+               memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
+
+               fc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)];
+               if (!fc->tag_set) {
+                       fc->item_idx = item_idx;
+                       fc->tag_set = &mlx5dr_definer_ones_set;
+                       fc->tag_mask_set = &mlx5dr_definer_ones_set;
+                       DR_CALC_SET(fc, eth_l4, ip_fragmented, inner);
+               }
+       }
+
+       if (!m)
+               return 0;
+
+       if (m->hdr.frag_data || m->hdr.id || m->hdr.reserved) {
+               rte_errno = ENOTSUP;
+               return rte_errno;
+       }
+
+       if (m->hdr.next_header) {
+               fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
+               fc->item_idx = item_idx;
+               fc->tag_set = &mlx5dr_definer_ipv6_frag_proto_set;
+               DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
+       }
+       return 0;
+}
+
 static int
 mlx5dr_definer_conv_item_random(struct mlx5dr_definer_conv_data *cd,
                                struct rte_flow_item *item,
@@ -3285,6 +3331,11 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context 
*ctx,
                        item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 
:
                                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
                        break;
+               case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+                       ret = mlx5dr_definer_conv_item_ipv6_frag_ext(&cd, 
items, i);
+                       item_flags |= cd.tunnel ? 
MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
+                                                 
MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
+                       break;
                case RTE_FLOW_ITEM_TYPE_UDP:
                        ret = mlx5dr_definer_conv_item_udp(&cd, items, i);
                        item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 20d38ce4141..9a281b67dc7 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -8899,6 +8899,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
                        *item_flags |= MLX5_FLOW_LAYER_ECPRI;
                        break;
                case RTE_FLOW_ITEM_TYPE_IB_BTH:
+               case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
                case RTE_FLOW_ITEM_TYPE_VOID:
                case RTE_FLOW_ITEM_TYPE_END:
                        break;
-- 
2.21.0

Reply via email to