Enable GTPU pattern for CVL switch filter. This patch only supports outer l3/l4 filtering.
Signed-off-by: Yuying <yuying.zh...@intel.com> --- doc/guides/rel_notes/release_21_05.rst | 3 + drivers/net/ice/ice_switch_filter.c | 91 ++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/doc/guides/rel_notes/release_21_05.rst b/doc/guides/rel_notes/release_21_05.rst index 88e7607a08..8507dc948f 100644 --- a/doc/guides/rel_notes/release_21_05.rst +++ b/doc/guides/rel_notes/release_21_05.rst @@ -91,6 +91,9 @@ New Features * Added a command line option to configure forced speed for Ethernet port. ``dpdk-testpmd -c 0xff -- -i --eth-link-speed N`` +* **Updated Intel ice driver.** + + * Added GTP TEID support for DCF switch filter. Removed Items ------------- diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index ada3ecf60b..9147a5fdbe 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -137,6 +137,17 @@ #define ICE_SW_INSET_MAC_IPV6_PFCP ( \ ICE_SW_INSET_MAC_IPV6 | \ ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID) +#define ICE_SW_INSET_MAC_IPV4_GTPU ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID) +#define ICE_SW_INSET_MAC_IPV4_GTPU_EH ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID | \ + ICE_INSET_GTPU_QFI) +#define ICE_SW_INSET_MAC_IPV6_GTPU ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID) +#define ICE_SW_INSET_MAC_IPV6_GTPU_EH ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID | \ + ICE_INSET_GTPU_QFI) + struct sw_meta { struct ice_adv_lkup_elem *list; @@ -198,6 +209,10 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = { {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh, ICE_SW_INSET_MAC_IPV4_GTPU_EH, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh, ICE_SW_INSET_MAC_IPV6_GTPU_EH, ICE_INSET_NONE, ICE_INSET_NONE}, }; static struct @@ -251,6 +266,10 @@ ice_pattern_match_item ice_switch_pattern_perm_list[] = { {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh, ICE_SW_INSET_MAC_IPV4_GTPU_EH, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh, ICE_SW_INSET_MAC_IPV6_GTPU_EH, ICE_INSET_NONE, ICE_INSET_NONE}, }; static int @@ -378,6 +397,8 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], const struct rte_flow_item_ah *ah_spec, *ah_mask; const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask; const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; + const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; + const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask; uint64_t input_set = ICE_INSET_NONE; uint16_t input_set_byte = 0; bool pppoe_elem_valid = 0; @@ -1255,6 +1276,76 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } break; + case RTE_FLOW_ITEM_TYPE_GTPU: + gtp_spec = item->spec; + gtp_mask = item->mask; + if (gtp_spec && !gtp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTPU item"); + return 0; + } + if (gtp_spec && gtp_mask) { + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTPU mask"); + return 0; + } + if (gtp_mask->teid) + input_set |= ICE_INSET_GTPU_TEID; + list[t].type = ICE_GTP; + list[t].h_u.gtp_hdr.teid = + gtp_spec->teid; + list[t].m_u.gtp_hdr.teid = + gtp_mask->teid; + input_set_byte += 4; + t++; + } + if (ipv4_valid) + *tun_type = ICE_SW_TUN_GTP_IPV4; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_GTP_IPV6; + break; + + case RTE_FLOW_ITEM_TYPE_GTP_PSC: + gtp_psc_spec = item->spec; + gtp_psc_mask = item->mask; + if (gtp_psc_spec && !gtp_psc_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTPU_EH item"); + return 0; + } + if (gtp_psc_spec && gtp_psc_mask) { + if (gtp_psc_mask->pdu_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTPU_EH mask"); + return 0; + } + if (gtp_psc_mask->qfi) + input_set |= ICE_INSET_GTPU_QFI; + list[t].type = ICE_GTP; + list[t].h_u.gtp_hdr.qfi = + gtp_psc_spec->qfi; + list[t].m_u.gtp_hdr.qfi = + gtp_psc_mask->qfi; + input_set_byte += 1; + t++; + } + if (ipv4_valid) + *tun_type = ICE_SW_TUN_GTP_IPV4_EH; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_GTP_IPV6_EH; + break; + case RTE_FLOW_ITEM_TYPE_VOID: break; -- 2.25.1