Although currently only the gtpu inner hash be enabled while not the gtpu outer hash, but the outer protocol still needed to co-exist with inner protocol when configure the gtpu inner hash rule, that would allow the gtpu innner hash support for the different outer protocols.
Signed-off-by: Jeff Guo <jia....@intel.com> --- v2->v1: use phint to replace of template for outer_inner --- drivers/net/iavf/iavf_hash.c | 46 +++++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c index 3152218dc..0f51e100e 100644 --- a/drivers/net/iavf/iavf_hash.c +++ b/drivers/net/iavf/iavf_hash.c @@ -29,11 +29,21 @@ #define IAVF_PHINT_GTPU_EH_DWN BIT_ULL(2) #define IAVF_PHINT_GTPU_EH_UP BIT_ULL(3) +#define IAVF_PHINT_OUTER_IPV4_INNER_IPV4 BIT_ULL(4) +#define IAVF_PHINT_OUTER_IPV4_INNER_IPV6 BIT_ULL(5) +#define IAVF_PHINT_OUTER_IPV6_INNER_IPV4 BIT_ULL(6) +#define IAVF_PHINT_OUTER_IPV6_INNER_IPV6 BIT_ULL(7) + #define IAVF_PHINT_GTPU_MSK (IAVF_PHINT_GTPU | \ IAVF_PHINT_GTPU_EH | \ IAVF_PHINT_GTPU_EH_DWN | \ IAVF_PHINT_GTPU_EH_UP) +#define IAVF_PHINT_LAYERS_MSK (IAVF_PHINT_OUTER_IPV4_INNER_IPV4 | \ + IAVF_PHINT_OUTER_IPV4_INNER_IPV6 | \ + IAVF_PHINT_OUTER_IPV6_INNER_IPV4 | \ + IAVF_PHINT_OUTER_IPV6_INNER_IPV6) + #define IAVF_GTPU_EH_DWNLINK 0 #define IAVF_GTPU_EH_UPLINK 1 @@ -505,6 +515,8 @@ iavf_hash_parse_pattern(struct iavf_pattern_match_item *pattern_match_item, { const struct rte_flow_item *item = pattern; const struct rte_flow_item_gtp_psc *psc; + bool outer_ipv4 = false; + bool outer_ipv6 = false; for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -515,6 +527,22 @@ iavf_hash_parse_pattern(struct iavf_pattern_match_item *pattern_match_item, } switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + if (outer_ipv4) + *phint |= IAVF_PHINT_OUTER_IPV4_INNER_IPV4; + else if (outer_ipv6) + *phint |= IAVF_PHINT_OUTER_IPV6_INNER_IPV4; + else + outer_ipv4 = true; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + if (outer_ipv4) + *phint |= IAVF_PHINT_OUTER_IPV4_INNER_IPV6; + else if (outer_ipv6) + *phint |= IAVF_PHINT_OUTER_IPV6_INNER_IPV6; + else + outer_ipv6 = true; + break; case RTE_FLOW_ITEM_TYPE_GTPU: *phint |= IAVF_PHINT_GTPU; break; @@ -533,9 +561,6 @@ iavf_hash_parse_pattern(struct iavf_pattern_match_item *pattern_match_item, } } - /* update and restore pattern hint */ - *phint |= *(uint64_t *)(pattern_match_item->meta); - return 0; } @@ -712,6 +737,7 @@ static void iavf_refine_proto_hdrs_by_pattern(struct virtchnl_proto_hdrs *proto_hdrs, uint64_t phint) { + struct virtchnl_proto_hdr *hdr_outer; struct virtchnl_proto_hdr *hdr1; struct virtchnl_proto_hdr *hdr2; int i; @@ -720,6 +746,20 @@ iavf_refine_proto_hdrs_by_pattern(struct virtchnl_proto_hdrs *proto_hdrs, return; if (proto_hdrs->tunnel_level == TUNNEL_LEVEL_INNER) { + if (phint & IAVF_PHINT_LAYERS_MSK) { + /* adding gtpu outer header */ + hdr_outer = &proto_hdrs->proto_hdr[proto_hdrs->count]; + hdr_outer->field_selector = 0; + proto_hdrs->count++; + + if (phint & (IAVF_PHINT_OUTER_IPV4_INNER_IPV4 | + IAVF_PHINT_OUTER_IPV4_INNER_IPV6)) + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_outer, IPV4); + else if (phint & (IAVF_PHINT_OUTER_IPV6_INNER_IPV4 | + IAVF_PHINT_OUTER_IPV6_INNER_IPV6)) + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr_outer, IPV6); + } + /* shift headers 1 layer */ for (i = proto_hdrs->count; i > 0; i--) { hdr1 = &proto_hdrs->proto_hdr[i]; -- 2.20.1