Add mask support for FDIR, including eth/ipv4/ipv6/tcp/udp flow items.

This patch is based on DPDK v21.11 LTS
[45cef8185a4fcb3aea4279711850c16e580b9a36], for customer cherry-pick.

Signed-off-by: Zhichao Zeng <zhichaox.z...@intel.com>
---
 drivers/common/iavf/virtchnl.h |  40 ++-
 drivers/net/iavf/iavf_fdir.c   | 435 +++++++++++++++++++--------------
 drivers/net/iavf/iavf_hash.c   | 112 ++++-----
 3 files changed, 346 insertions(+), 241 deletions(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 80e754a1b2..bc8f355db1 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -1482,6 +1482,8 @@ enum virtchnl_vfr_states {
 };
 
 #define VIRTCHNL_MAX_NUM_PROTO_HDRS    32
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK 16
+#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
 #define PROTO_HDR_SHIFT                        5
 #define PROTO_HDR_FIELD_START(proto_hdr_type) \
                                        (proto_hdr_type << PROTO_HDR_SHIFT)
@@ -1669,6 +1671,22 @@ struct virtchnl_proto_hdr {
 
 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
 
+struct virtchnl_proto_hdr_w_msk {
+       /* see enum virtchnl_proto_hdr_type */
+       s32 type;
+       u32 pad;
+       /**
+        * binary buffer in network order for specific header type.
+        * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+        * header is expected to be copied into the buffer.
+        */
+       u8 buffer_spec[64];
+       /* binary buffer for bit-mask applied to specific header type */
+       u8 buffer_mask[64];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk);
+
 struct virtchnl_proto_hdrs {
        u8 tunnel_level;
        /**
@@ -1678,8 +1696,26 @@ struct virtchnl_proto_hdrs {
         * 2 - from the second inner layer
         * ....
         **/
-       int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
-       struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+       int count;
+       /**
+        * count must <=
+        * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK
+        * count = 0 :                                  select raw
+        * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS :   select proto_hdr
+        * count > VIRTCHNL_MAX_NUM_PROTO_HDRS :        select proto_hdr_w_msk
+        * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS
+        */
+       union {
+               struct virtchnl_proto_hdr
+                       proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+               struct virtchnl_proto_hdr_w_msk
+                       proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK];
+               struct {
+                       u16 pkt_len;
+                       u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+                       u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+               } raw;
+       };
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index c30853dd94..0afb262262 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -696,6 +696,7 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
        const struct rte_flow_item_gre *gre_spec, *gre_mask;
        const struct rte_flow_item *item = pattern;
        struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
+       struct virtchnl_proto_hdr_w_msk *hdr_w_msk, *hdr1_w_msk = NULL;
        struct rte_ecpri_common_hdr ecpri_common;
        uint64_t input_set = IAVF_INSET_NONE;
        enum rte_flow_item_type item_type;
@@ -703,6 +704,7 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
        uint8_t tun_inner = 0;
        uint16_t ether_type;
        int layer = 0;
+       int with_mask = 0;
 
        uint8_t  ipv6_addr_mask[16] = {
                0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -727,8 +729,10 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                        next_type = (item + 1)->type;
 
                        hdr1 = &hdrs->proto_hdr[layer];
+                       hdr1_w_msk = &hdrs->proto_hdr_w_msk[layer];
 
                        VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
+                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1_w_msk, ETH);
 
                        if (next_type == RTE_FLOW_ITEM_TYPE_END &&
                            (!eth_spec || !eth_mask)) {
@@ -739,39 +743,59 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                        }
 
                        if (eth_spec && eth_mask) {
-                               if (!rte_is_zero_ether_addr(&eth_mask->src) ||
-                                   !rte_is_zero_ether_addr(&eth_mask->dst)) {
-                                       rte_flow_error_set(error, EINVAL,
-                                               RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                               "Invalid MAC_addr mask.");
-                                       return -rte_errno;
-                               }
-                       }
-
-                       if (eth_spec && eth_mask && eth_mask->type) {
-                               if (eth_mask->type != RTE_BE16(0xffff)) {
-                                       rte_flow_error_set(error, EINVAL,
-                                               RTE_FLOW_ERROR_TYPE_ITEM,
-                                               item, "Invalid type mask.");
-                                       return -rte_errno;
-                               }
-
-                               ether_type = rte_be_to_cpu_16(eth_spec->type);
-                               if (ether_type == RTE_ETHER_TYPE_IPV4 ||
-                                       ether_type == RTE_ETHER_TYPE_IPV6) {
-                                       rte_flow_error_set(error, EINVAL,
-                                               RTE_FLOW_ERROR_TYPE_ITEM,
-                                               item,
-                                               "Unsupported ether_type.");
-                                       return -rte_errno;
+                               if 
((!rte_is_zero_ether_addr(&eth_mask->hdr.dst_addr) &&
+                                       
!rte_is_broadcast_ether_addr(&eth_mask->hdr.dst_addr)) ||
+                                       
(!rte_is_zero_ether_addr(&eth_mask->hdr.src_addr) &&
+                                       
!rte_is_broadcast_ether_addr(&eth_mask->hdr.src_addr))) {
+                                       if 
(!rte_is_zero_ether_addr(&eth_mask->hdr.dst_addr))
+                                               input_set |= IAVF_INSET_DMAC;
+                                       if 
(!rte_is_zero_ether_addr(&eth_mask->hdr.src_addr))
+                                               input_set |= IAVF_INSET_SMAC;
+                                       if (eth_mask->hdr.ether_type)
+                                               input_set |= 
IAVF_INSET_ETHERTYPE;
+                                       rte_memcpy(hdr1_w_msk->buffer_spec, 
eth_spec,
+                                       sizeof(struct rte_ether_hdr));
+                                       rte_memcpy(hdr1_w_msk->buffer_mask, 
eth_mask,
+                                       sizeof(struct rte_ether_hdr));
+                                       with_mask = 1;
+                               } else {
+                                       if 
(!rte_is_zero_ether_addr(&eth_mask->hdr.dst_addr)) {
+                                               input_set |= IAVF_INSET_DMAC;
+                                               
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
+                                                                               
ETH,
+                                                                               
DST);
+                                       } else if 
(!rte_is_zero_ether_addr(&eth_mask->hdr.src_addr)) {
+                                               input_set |= IAVF_INSET_SMAC;
+                                               
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
+                                                                               
ETH,
+                                                                               
SRC);
+                                       }
+
+                                       if (eth_spec && eth_mask && 
eth_mask->type) {
+                                               if (eth_mask->type != 
RTE_BE16(0xffff)) {
+                                                       
rte_flow_error_set(error, EINVAL,
+                                                               
RTE_FLOW_ERROR_TYPE_ITEM,
+                                                               item, "Invalid 
type mask.");
+                                                       return -rte_errno;
+                                               }
+
+                                               ether_type = 
rte_be_to_cpu_16(eth_spec->type);
+                                               if (ether_type == 
RTE_ETHER_TYPE_IPV4 ||
+                                                       ether_type == 
RTE_ETHER_TYPE_IPV6) {
+                                                       
rte_flow_error_set(error, EINVAL,
+                                                               
RTE_FLOW_ERROR_TYPE_ITEM,
+                                                               item,
+                                                               "Unsupported 
ether_type.");
+                                                       return -rte_errno;
+                                               }
+
+                                               input_set |= 
IAVF_INSET_ETHERTYPE;
+                                               
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
+                                                                               
 ETHERTYPE);
+                                       }
+                                       rte_memcpy(hdr1->buffer, eth_spec,
+                                               sizeof(struct rte_ether_hdr));
                                }
-
-                               input_set |= IAVF_INSET_ETHERTYPE;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
-                                                                ETHERTYPE);
-
-                               rte_memcpy(hdr1->buffer, eth_spec,
-                                          sizeof(struct rte_ether_hdr));
                        }
 
                        hdrs->count = ++layer;
@@ -785,8 +809,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                        next_type = (item + 1)->type;
 
                        hdr = &hdrs->proto_hdr[layer];
-
+                       hdr_w_msk = &hdrs->proto_hdr_w_msk[layer];
                        VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, IPV4);
 
                        if (!(ipv4_spec && ipv4_mask)) {
                                hdrs->count = ++layer;
@@ -817,79 +842,81 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                                return -rte_errno;
                        }
 
-                       /* Mask for IPv4 src/dst addrs not supported */
-                       if (ipv4_mask->hdr.src_addr &&
-                               ipv4_mask->hdr.src_addr != UINT32_MAX)
-                               return -rte_errno;
-                       if (ipv4_mask->hdr.dst_addr &&
-                               ipv4_mask->hdr.dst_addr != UINT32_MAX)
-                               return -rte_errno;
-
-                       if (ipv4_mask->hdr.type_of_service ==
-                           UINT8_MAX) {
-                               input_set |= IAVF_INSET_IPV4_TOS;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
-                                                                DSCP);
-                       }
+                       if ((ipv4_mask->hdr.src_addr &&
+                               ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+                               (ipv4_mask->hdr.dst_addr &&
+                               ipv4_mask->hdr.dst_addr != UINT32_MAX)) {
+                               if (ipv4_mask->hdr.src_addr)
+                                       input_set |= IAVF_INSET_IPV4_SRC;
+                               if (ipv4_mask->hdr.dst_addr)
+                                       input_set |= IAVF_INSET_IPV4_DST;
+                               rte_memcpy(hdr_w_msk->buffer_spec, 
&ipv4_spec->hdr,
+                                          sizeof(ipv4_spec->hdr));
+                               rte_memcpy(hdr_w_msk->buffer_mask, 
&ipv4_mask->hdr,
+                                          sizeof(ipv4_mask->hdr));
+                               with_mask = 1;
+                       } else {
+                               if (ipv4_mask->hdr.type_of_service ==
+                                   UINT8_MAX) {
+                                       input_set |= IAVF_INSET_IPV4_TOS;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV4,
+                                                                        DSCP);
+                               }
 
-                       if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
-                               input_set |= IAVF_INSET_IPV4_PROTO;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
-                                                                PROT);
-                       }
+                               if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+                                       input_set |= IAVF_INSET_IPV4_PROTO;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV4,
+                                                                        PROT);
+                               }
 
-                       if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
-                               input_set |= IAVF_INSET_IPV4_TTL;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
-                                                                TTL);
-                       }
+                               if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+                                       input_set |= IAVF_INSET_IPV4_TTL;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV4,
+                                                                        TTL);
+                               }
 
-                       if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
-                               input_set |= IAVF_INSET_IPV4_SRC;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
-                                                                SRC);
-                       }
+                               if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+                                       input_set |= IAVF_INSET_IPV4_SRC;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV4,
+                                                                        SRC);
+                               }
 
-                       if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
-                               input_set |= IAVF_INSET_IPV4_DST;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
-                                                                DST);
+                               if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+                                       input_set |= IAVF_INSET_IPV4_DST;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV4,
+                                                                        DST);
+                               }
+                               rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
+                                          sizeof(ipv4_spec->hdr));
+                               /* fragment Ipv4:
+                                * spec is 0x2000, mask is 0x2000
+                                */
+                               if (ipv4_spec->hdr.fragment_offset ==
+                                   rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
+                                   ipv4_mask->hdr.fragment_offset ==
+                                   rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
+                                       /* all IPv4 fragment packet has the same
+                                        * ethertype, if the spec and mask is 
valid,
+                                        * set ethertype into input set.
+                                        */
+                                       input_set |= IAVF_INSET_ETHERTYPE;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, 
ETH,
+                                                                        
ETHERTYPE);
+
+                                       /* add dummy header for IPv4 Fragment */
+                                       iavf_fdir_add_fragment_hdr(hdrs, layer);
+                               } else if (ipv4_mask->hdr.packet_id == 
UINT16_MAX) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                          
RTE_FLOW_ERROR_TYPE_ITEM,
+                                                          item, "Invalid IPv4 
mask.");
+                                       return -rte_errno;
+                               }
                        }
-
                        if (tun_inner) {
                                input_set &= ~IAVF_PROT_IPV4_OUTER;
                                input_set |= IAVF_PROT_IPV4_INNER;
                        }
-
-                       rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
-                                  sizeof(ipv4_spec->hdr));
-
                        hdrs->count = ++layer;
-
-                       /* fragment Ipv4:
-                        * spec is 0x2000, mask is 0x2000
-                        */
-                       if (ipv4_spec->hdr.fragment_offset ==
-                           rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
-                           ipv4_mask->hdr.fragment_offset ==
-                           rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
-                               /* all IPv4 fragment packet has the same
-                                * ethertype, if the spec and mask is valid,
-                                * set ethertype into input set.
-                                */
-                               input_set |= IAVF_INSET_ETHERTYPE;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
-                                                                ETHERTYPE);
-
-                               /* add dummy header for IPv4 Fragment */
-                               iavf_fdir_add_fragment_hdr(hdrs, layer);
-                       } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
-                               rte_flow_error_set(error, EINVAL,
-                                                  RTE_FLOW_ERROR_TYPE_ITEM,
-                                                  item, "Invalid IPv4 mask.");
-                               return -rte_errno;
-                       }
-
                        break;
 
                case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -898,8 +925,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                        ipv6_mask = item->mask;
 
                        hdr = &hdrs->proto_hdr[layer];
-
+                       hdr_w_msk = &hdrs->proto_hdr_w_msk[layer];
                        VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, IPV6);
 
                        if (!(ipv6_spec && ipv6_mask)) {
                                hdrs->count = ++layer;
@@ -913,46 +941,70 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                                return -rte_errno;
                        }
 
-                       if ((ipv6_mask->hdr.vtc_flow &
-                             rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
-                            == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
-                               input_set |= IAVF_INSET_IPV6_TC;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
-                                                                TC);
-                       }
+                       if (memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+                                       RTE_DIM(ipv6_mask->hdr.src_addr)) ||
+                               memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+                                       RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+                               if (memcmp(ipv6_mask->hdr.src_addr, 
ipv6_addr_mask,
+                                               
RTE_DIM(ipv6_mask->hdr.src_addr)))
+                                       input_set |= IAVF_INSET_IPV6_SRC;
+                               if (memcmp(ipv6_mask->hdr.dst_addr, 
ipv6_addr_mask,
+                                               
RTE_DIM(ipv6_mask->hdr.dst_addr)))
+                                       input_set |= IAVF_INSET_IPV6_DST;
+                               if (ipv6_mask->hdr.proto)
+                                       input_set |= IAVF_INSET_IPV6_NEXT_HDR;
+                               if (ipv6_mask->hdr.hop_limits)
+                                       input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
+                               if (ipv6_mask->hdr.vtc_flow &
+                                   rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
+                                       input_set |= IAVF_INSET_IPV6_TC;
+                               rte_memcpy(hdr_w_msk->buffer_spec, 
&ipv6_spec->hdr,
+                                               sizeof(ipv6_spec->hdr));
+                               rte_memcpy(hdr_w_msk->buffer_mask, 
&ipv6_mask->hdr,
+                                               sizeof(ipv6_mask->hdr));
+                               with_mask = 1;
+                       } else {
+                               if ((ipv6_mask->hdr.vtc_flow &
+                                     rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
+                                    == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
+                                       input_set |= IAVF_INSET_IPV6_TC;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV6,
+                                                                        TC);
+                               }
 
-                       if (ipv6_mask->hdr.proto == UINT8_MAX) {
-                               input_set |= IAVF_INSET_IPV6_NEXT_HDR;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
-                                                                PROT);
-                       }
+                               if (ipv6_mask->hdr.proto == UINT8_MAX) {
+                                       input_set |= IAVF_INSET_IPV6_NEXT_HDR;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV6,
+                                                                        PROT);
+                               }
 
-                       if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
-                               input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
-                                                                HOP_LIMIT);
-                       }
+                               if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+                                       input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV6,
+                                                                        
HOP_LIMIT);
+                               }
 
-                       if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
-                                   RTE_DIM(ipv6_mask->hdr.src_addr))) {
-                               input_set |= IAVF_INSET_IPV6_SRC;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
-                                                                SRC);
-                       }
-                       if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
-                                   RTE_DIM(ipv6_mask->hdr.dst_addr))) {
-                               input_set |= IAVF_INSET_IPV6_DST;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
-                                                                DST);
-                       }
+                               if (!memcmp(ipv6_mask->hdr.src_addr, 
ipv6_addr_mask,
+                                               
RTE_DIM(ipv6_mask->hdr.src_addr))) {
+                                       input_set |= IAVF_INSET_IPV6_SRC;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV6,
+                                                                        SRC);
+                               }
+                               if (!memcmp(ipv6_mask->hdr.dst_addr, 
ipv6_addr_mask,
+                                               
RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+                                       input_set |= IAVF_INSET_IPV6_DST;
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV6,
+                                                                        DST);
+                               }
 
-                       if (tun_inner) {
-                               input_set &= ~IAVF_PROT_IPV6_OUTER;
-                               input_set |= IAVF_PROT_IPV6_INNER;
-                       }
+                               if (tun_inner) {
+                                       input_set &= ~IAVF_PROT_IPV6_OUTER;
+                                       input_set |= IAVF_PROT_IPV6_INNER;
+                               }
 
-                       rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
-                                  sizeof(ipv6_spec->hdr));
+                               rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
+                                          sizeof(ipv6_spec->hdr));
+                       }
 
                        hdrs->count = ++layer;
                        break;
@@ -1003,8 +1055,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                        udp_mask = item->mask;
 
                        hdr = &hdrs->proto_hdr[layer];
-
+                       hdr_w_msk = &hdrs->proto_hdr_w_msk[layer];
                        VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, UDP);
 
                        if (udp_spec && udp_mask) {
                                if (udp_mask->hdr.dgram_len ||
@@ -1016,35 +1069,42 @@ iavf_fdir_parse_pattern(__rte_unused struct 
iavf_adapter *ad,
                                }
 
                                /* Mask for UDP src/dst ports not supported */
-                               if (udp_mask->hdr.src_port &&
-                                       udp_mask->hdr.src_port != UINT16_MAX)
-                                       return -rte_errno;
-                               if (udp_mask->hdr.dst_port &&
-                                       udp_mask->hdr.dst_port != UINT16_MAX)
-                                       return -rte_errno;
-
-                               if (udp_mask->hdr.src_port == UINT16_MAX) {
-                                       input_set |= IAVF_INSET_UDP_SRC_PORT;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
UDP, SRC_PORT);
-                               }
-                               if (udp_mask->hdr.dst_port == UINT16_MAX) {
-                                       input_set |= IAVF_INSET_UDP_DST_PORT;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
UDP, DST_PORT);
+                               if ((udp_mask->hdr.src_port &&
+                                       udp_mask->hdr.src_port != UINT16_MAX) ||
+                                       (udp_mask->hdr.dst_port &&
+                                       udp_mask->hdr.dst_port != UINT16_MAX)) {
+                                       if (udp_mask->hdr.src_port)
+                                               input_set |= 
IAVF_INSET_UDP_SRC_PORT;
+                                       if (udp_mask->hdr.dst_port)
+                                               input_set |= 
IAVF_INSET_UDP_DST_PORT;
+                                       rte_memcpy(hdr_w_msk->buffer_spec, 
&udp_spec->hdr,
+                                                  sizeof(udp_spec->hdr));
+                                       rte_memcpy(hdr_w_msk->buffer_mask, 
&udp_mask->hdr,
+                                                  sizeof(udp_mask->hdr));
+                                       with_mask = 1;
+                               } else {
+                                       if (udp_mask->hdr.src_port == 
UINT16_MAX) {
+                                               input_set |= 
IAVF_INSET_UDP_SRC_PORT;
+                                               
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+                                       }
+                                       if (udp_mask->hdr.dst_port == 
UINT16_MAX) {
+                                               input_set |= 
IAVF_INSET_UDP_DST_PORT;
+                                               
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+                                       }
+
+                                       if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+                                               rte_memcpy(hdr->buffer,
+                                                       &udp_spec->hdr,
+                                                       sizeof(udp_spec->hdr));
+                                       else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+                                               rte_memcpy(hdr->buffer,
+                                                       &udp_spec->hdr,
+                                                       sizeof(udp_spec->hdr));
                                }
-
                                if (tun_inner) {
                                        input_set &= ~IAVF_PROT_UDP_OUTER;
                                        input_set |= IAVF_PROT_UDP_INNER;
                                }
-
-                               if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-                                       rte_memcpy(hdr->buffer,
-                                               &udp_spec->hdr,
-                                               sizeof(udp_spec->hdr));
-                               else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-                                       rte_memcpy(hdr->buffer,
-                                               &udp_spec->hdr,
-                                               sizeof(udp_spec->hdr));
                        }
 
                        hdrs->count = ++layer;
@@ -1055,8 +1115,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                        tcp_mask = item->mask;
 
                        hdr = &hdrs->proto_hdr[layer];
-
+                       hdr_w_msk = &hdrs->proto_hdr_w_msk[layer];
                        VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_w_msk, TCP);
 
                        if (tcp_spec && tcp_mask) {
                                if (tcp_mask->hdr.sent_seq ||
@@ -1072,36 +1133,41 @@ iavf_fdir_parse_pattern(__rte_unused struct 
iavf_adapter *ad,
                                        return -rte_errno;
                                }
 
-                               /* Mask for TCP src/dst ports not supported */
-                               if (tcp_mask->hdr.src_port &&
-                                       tcp_mask->hdr.src_port != UINT16_MAX)
-                                       return -rte_errno;
-                               if (tcp_mask->hdr.dst_port &&
-                                       tcp_mask->hdr.dst_port != UINT16_MAX)
-                                       return -rte_errno;
-
-                               if (tcp_mask->hdr.src_port == UINT16_MAX) {
-                                       input_set |= IAVF_INSET_TCP_SRC_PORT;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
TCP, SRC_PORT);
+                               if ((tcp_mask->hdr.src_port &&
+                                       tcp_mask->hdr.src_port != UINT16_MAX) ||
+                                       (tcp_mask->hdr.dst_port &&
+                                       tcp_mask->hdr.dst_port != UINT16_MAX)) {
+                                       if (tcp_mask->hdr.src_port)
+                                               input_set |= 
IAVF_INSET_TCP_SRC_PORT;
+                                       if (tcp_mask->hdr.dst_port)
+                                               input_set |= 
IAVF_INSET_TCP_DST_PORT;
+                                       rte_memcpy(hdr_w_msk->buffer_spec, 
&tcp_spec->hdr,
+                                                       sizeof(tcp_spec->hdr));
+                                       rte_memcpy(hdr_w_msk->buffer_mask, 
&tcp_mask->hdr,
+                                                       sizeof(tcp_mask->hdr));
+                                       with_mask = 1;
+                               } else {
+                                       if (tcp_mask->hdr.src_port == 
UINT16_MAX) {
+                                               input_set |= 
IAVF_INSET_TCP_SRC_PORT;
+                                               
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+                                       }
+                                       if (tcp_mask->hdr.dst_port == 
UINT16_MAX) {
+                                               input_set |= 
IAVF_INSET_TCP_DST_PORT;
+                                               
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+                                       }
+                                       if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+                                               rte_memcpy(hdr->buffer,
+                                                       &tcp_spec->hdr,
+                                                       sizeof(tcp_spec->hdr));
+                                       else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+                                               rte_memcpy(hdr->buffer,
+                                                       &tcp_spec->hdr,
+                                                       sizeof(tcp_spec->hdr));
                                }
-                               if (tcp_mask->hdr.dst_port == UINT16_MAX) {
-                                       input_set |= IAVF_INSET_TCP_DST_PORT;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
TCP, DST_PORT);
-                               }
-
                                if (tun_inner) {
                                        input_set &= ~IAVF_PROT_TCP_OUTER;
                                        input_set |= IAVF_PROT_TCP_INNER;
                                }
-
-                               if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-                                       rte_memcpy(hdr->buffer,
-                                               &tcp_spec->hdr,
-                                               sizeof(tcp_spec->hdr));
-                               else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-                                       rte_memcpy(hdr->buffer,
-                                               &tcp_spec->hdr,
-                                               sizeof(tcp_spec->hdr));
                        }
 
                        hdrs->count = ++layer;
@@ -1376,6 +1442,9 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                }
        }
 
+       if (with_mask)
+               hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS;
+
        if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ITEM, item,
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 5e0888ea68..e43ed412bf 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -178,218 +178,218 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
 /* proto_hdrs template */
 struct virtchnl_proto_hdrs outer_ipv4_tmplt = {
        TUNNEL_LEVEL_OUTER, 4,
-       {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4}
+       {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv4_udp_tmplt = {
        TUNNEL_LEVEL_OUTER, 5,
-       {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+       {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
         proto_hdr_ipv4_with_prot,
-        proto_hdr_udp}
+        proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv4_tcp_tmplt = {
        TUNNEL_LEVEL_OUTER, 5,
-       {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+       {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
         proto_hdr_ipv4_with_prot,
-        proto_hdr_tcp}
+        proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv4_sctp_tmplt = {
        TUNNEL_LEVEL_OUTER, 5,
-       {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
-        proto_hdr_sctp}
+       {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
+        proto_hdr_sctp}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv6_tmplt = {
        TUNNEL_LEVEL_OUTER, 4,
-       {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6}
+       {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv6_frag_tmplt = {
        TUNNEL_LEVEL_OUTER, 5,
-       {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
-        proto_hdr_ipv6, proto_hdr_ipv6_frag}
+       {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+        proto_hdr_ipv6, proto_hdr_ipv6_frag}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv6_udp_tmplt = {
        TUNNEL_LEVEL_OUTER, 5,
-       {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+       {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
         proto_hdr_ipv6_with_prot,
-        proto_hdr_udp}
+        proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv6_tcp_tmplt = {
        TUNNEL_LEVEL_OUTER, 5,
-       {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+       {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
         proto_hdr_ipv6_with_prot,
-        proto_hdr_tcp}
+        proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv6_sctp_tmplt = {
        TUNNEL_LEVEL_OUTER, 5,
-       {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
-        proto_hdr_sctp}
+       {{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
+        proto_hdr_sctp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_tmplt = {
-       TUNNEL_LEVEL_INNER, 1, {proto_hdr_ipv4}
+       TUNNEL_LEVEL_INNER, 1, {{proto_hdr_ipv4}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_udp_tmplt = {
-       TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot, proto_hdr_udp}
+       TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_tcp_tmplt = {
-       TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot, proto_hdr_tcp}
+       TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs second_inner_ipv4_tmplt = {
-       2, 1, {proto_hdr_ipv4}
+       2, 1, {{proto_hdr_ipv4}}
 };
 
 struct virtchnl_proto_hdrs second_inner_ipv4_udp_tmplt = {
-       2, 2, {proto_hdr_ipv4_with_prot, proto_hdr_udp}
+       2, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs second_inner_ipv4_tcp_tmplt = {
-       2, 2, {proto_hdr_ipv4_with_prot, proto_hdr_tcp}
+       2, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs second_inner_ipv6_tmplt = {
-       2, 1, {proto_hdr_ipv6}
+       2, 1, {{proto_hdr_ipv6}}
 };
 
 struct virtchnl_proto_hdrs second_inner_ipv6_udp_tmplt = {
-       2, 2, {proto_hdr_ipv6_with_prot, proto_hdr_udp}
+       2, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs second_inner_ipv6_tcp_tmplt = {
-       2, 2, {proto_hdr_ipv6_with_prot, proto_hdr_tcp}
+       2, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_sctp_tmplt = {
-       TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4, proto_hdr_sctp}
+       TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4, proto_hdr_sctp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_tmplt = {
-       TUNNEL_LEVEL_INNER, 1, {proto_hdr_ipv6}
+       TUNNEL_LEVEL_INNER, 1, {{proto_hdr_ipv6}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_udp_tmplt = {
-       TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot, proto_hdr_udp}
+       TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_tcp_tmplt = {
-       TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot, proto_hdr_tcp}
+       TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_sctp_tmplt = {
-       TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6, proto_hdr_sctp}
+       TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6, proto_hdr_sctp}}
 };
 
 struct virtchnl_proto_hdrs ipv4_esp_tmplt = {
-       TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_esp}
+       TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_esp}}
 };
 
 struct virtchnl_proto_hdrs ipv4_udp_esp_tmplt = {
        TUNNEL_LEVEL_OUTER, 3,
-       {proto_hdr_ipv4, proto_hdr_udp, proto_hdr_esp}
+       {{proto_hdr_ipv4, proto_hdr_udp, proto_hdr_esp}}
 };
 
 struct virtchnl_proto_hdrs ipv4_ah_tmplt = {
-       TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_ah}
+       TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_ah}}
 };
 
 struct virtchnl_proto_hdrs ipv6_esp_tmplt = {
-       TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_esp}
+       TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_esp}}
 };
 
 struct virtchnl_proto_hdrs ipv6_udp_esp_tmplt = {
        TUNNEL_LEVEL_OUTER, 3,
-       {proto_hdr_ipv6, proto_hdr_udp, proto_hdr_esp}
+       {{proto_hdr_ipv6, proto_hdr_udp, proto_hdr_esp}}
 };
 
 struct virtchnl_proto_hdrs ipv6_ah_tmplt = {
-       TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_ah}
+       TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_ah}}
 };
 
 struct virtchnl_proto_hdrs ipv4_l2tpv3_tmplt = {
-       TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_l2tpv3}
+       TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_l2tpv3}}
 };
 
 struct virtchnl_proto_hdrs ipv6_l2tpv3_tmplt = {
-       TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_l2tpv3}
+       TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_l2tpv3}}
 };
 
 struct virtchnl_proto_hdrs ipv4_pfcp_tmplt = {
-       TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_pfcp}
+       TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_pfcp}}
 };
 
 struct virtchnl_proto_hdrs ipv6_pfcp_tmplt = {
-       TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_pfcp}
+       TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_pfcp}}
 };
 
 struct virtchnl_proto_hdrs ipv4_udp_gtpc_tmplt = {
-       TUNNEL_LEVEL_OUTER, 3, {proto_hdr_ipv4, proto_hdr_udp, proto_hdr_gtpc}
+       TUNNEL_LEVEL_OUTER, 3, {{proto_hdr_ipv4, proto_hdr_udp, proto_hdr_gtpc}}
 };
 
 struct virtchnl_proto_hdrs ipv6_udp_gtpc_tmplt = {
-       TUNNEL_LEVEL_OUTER, 3, {proto_hdr_ipv6, proto_hdr_udp, proto_hdr_gtpc}
+       TUNNEL_LEVEL_OUTER, 3, {{proto_hdr_ipv6, proto_hdr_udp, proto_hdr_gtpc}}
 };
 
 struct virtchnl_proto_hdrs eth_ecpri_tmplt = {
-       TUNNEL_LEVEL_OUTER, 2, {proto_hdr_eth, proto_hdr_ecpri}
+       TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_eth, proto_hdr_ecpri}}
 };
 
 struct virtchnl_proto_hdrs ipv4_ecpri_tmplt = {
-       TUNNEL_LEVEL_OUTER, 3, {proto_hdr_ipv4, proto_hdr_udp, proto_hdr_ecpri}
+       TUNNEL_LEVEL_OUTER, 3, {{proto_hdr_ipv4, proto_hdr_udp, 
proto_hdr_ecpri}}
 };
 
 struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv4_tmplt = {
        TUNNEL_LEVEL_INNER, 3,
-       {proto_hdr_l2tpv2,
+       {{proto_hdr_l2tpv2,
         proto_hdr_ppp,
-        proto_hdr_ipv4}
+        proto_hdr_ipv4}}
 };
 
 struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_tmplt = {
        TUNNEL_LEVEL_INNER, 3,
-       {proto_hdr_l2tpv2,
+       {{proto_hdr_l2tpv2,
         proto_hdr_ppp,
-        proto_hdr_ipv6}
+        proto_hdr_ipv6}}
 };
 
 struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv4_udp_tmplt = {
        TUNNEL_LEVEL_INNER, 4,
-       {proto_hdr_l2tpv2,
+       {{proto_hdr_l2tpv2,
         proto_hdr_ppp,
         proto_hdr_ipv4_with_prot,
-        proto_hdr_udp}
+        proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv4_tcp_tmplt = {
        TUNNEL_LEVEL_INNER, 4,
-       {proto_hdr_l2tpv2,
+       {{proto_hdr_l2tpv2,
         proto_hdr_ppp,
         proto_hdr_ipv4_with_prot,
-        proto_hdr_tcp}
+        proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_udp_tmplt = {
        TUNNEL_LEVEL_INNER, 4,
-       {proto_hdr_l2tpv2,
+       {{proto_hdr_l2tpv2,
         proto_hdr_ppp,
         proto_hdr_ipv6_with_prot,
-        proto_hdr_udp}
+        proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs udp_l2tpv2_ppp_ipv6_tcp_tmplt = {
        TUNNEL_LEVEL_INNER, 4,
-       {proto_hdr_l2tpv2,
+       {{proto_hdr_l2tpv2,
         proto_hdr_ppp,
         proto_hdr_ipv6_with_prot,
-        proto_hdr_tcp}
+        proto_hdr_tcp}}
 };
 
 /* rss type super set */
-- 
2.34.1

Reply via email to