Hi, Guojia > -----Original Message----- > From: Guo, Jia <jia....@intel.com> > Sent: Friday, July 24, 2020 3:13 PM > To: Su, Simei <simei...@intel.com>; Zhang, Qi Z <qi.z.zh...@intel.com>; Xing, > Beilei <beilei.x...@intel.com> > Cc: dev@dpdk.org > Subject: Re: [PATCH] net/ice: fix GTPU down/uplink and extension conflict > > hi, simei > > On 7/24/2020 10:10 AM, Simei Su wrote: > > When adding a RSS rule with GTPU_DWN/UP, it will search profile table > > from the top index. If a RSS rule with GTPU_EH already exists, then > > GTPU_DWN/UP packet will match GTPU_EH profile. This patch solves this > > issue by removing existed GTPU_EH rule before creating a new > > GTPU_DWN/UP rule. > > > Suggest interpret the relation ship bettween GTPU_EH_UPLINK/DWNLINK with > GTPU_EH to help knowledge the reason.
Ok, I will refine the commit message. > > > > Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS") > > > > Signed-off-by: Simei Su <simei...@intel.com> > > --- > > drivers/net/ice/ice_ethdev.c | 47 +++++++++++++++ > > drivers/net/ice/ice_ethdev.h | 15 +++++ > > drivers/net/ice/ice_hash.c | 139 > +++++++++++++++++++++++++++++++++++++++++++ > > 3 files changed, 201 insertions(+) > > > > diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c > > index a4a0390..8839146 100644 > > --- a/drivers/net/ice/ice_ethdev.c > > +++ b/drivers/net/ice/ice_ethdev.c > > @@ -2538,6 +2538,11 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d", > > __func__, ret); > > > A blank line need. Ok. > > > > + /* Store hash field and header for gtpu_eh ipv4 */ > > + pf->gtpu_eh.ipv4.hash_fld = ICE_FLOW_HASH_IPV4; > > + pf->gtpu_eh.ipv4.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | > > + ICE_FLOW_SEG_HDR_IPV4 | > > + ICE_FLOW_SEG_HDR_IPV_OTHER; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, > > ICE_FLOW_SEG_HDR_PPPOE | > > @@ -2564,6 +2569,11 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d", > > __func__, ret); > > + /* Store hash field and header for gtpu_eh ipv6 */ > > + pf->gtpu_eh.ipv6.hash_fld = ICE_FLOW_HASH_IPV6; > > + pf->gtpu_eh.ipv6.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | > > + ICE_FLOW_SEG_HDR_IPV6 | > > + ICE_FLOW_SEG_HDR_IPV_OTHER; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, > > ICE_FLOW_SEG_HDR_PPPOE | > > @@ -2586,6 +2596,9 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d", > > __func__, ret); > > + /* Store hash field and header for gtpu_eh ipv4_udp */ > > + pf->gtpu_eh.ipv4_udp.hash_fld = ICE_HASH_UDP_IPV4; > > + pf->gtpu_eh.ipv4_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, > > ICE_FLOW_SEG_HDR_PPPOE, 0); > > @@ -2606,6 +2619,9 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d", > > __func__, ret); > > + /* Store hash field and header for gtpu_eh ipv6_udp */ > > + pf->gtpu_eh.ipv6_udp.hash_fld = ICE_HASH_UDP_IPV6; > > + pf->gtpu_eh.ipv6_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, > > ICE_FLOW_SEG_HDR_PPPOE, 0); > > @@ -2626,6 +2642,9 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d", > > __func__, ret); > > + /* Store hash field and header for gtpu_eh ipv4_tcp */ > > + pf->gtpu_eh.ipv4_tcp.hash_fld = ICE_HASH_TCP_IPV4; > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, > > ICE_FLOW_SEG_HDR_PPPOE, 0); > > @@ -2646,6 +2665,9 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d", > > __func__, ret); > > + /* Store hash field and header for gtpu_eh ipv6_tcp */ > > + pf->gtpu_eh.ipv6_tcp.hash_fld = ICE_HASH_TCP_IPV6; > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, > > ICE_FLOW_SEG_HDR_PPPOE, 0); > > @@ -2695,6 +2717,28 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > } > > } > > > > +static void > > +ice_rss_ctx_init(struct ice_pf *pf) > > +{ > > + pf->gtpu_eh.ipv4.hash_fld = 0; > > + pf->gtpu_eh.ipv4.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6.hash_fld = 0; > > + pf->gtpu_eh.ipv6.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv4_udp.hash_fld = 0; > > + pf->gtpu_eh.ipv4_udp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6_udp.hash_fld = 0; > > + pf->gtpu_eh.ipv6_udp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv4_tcp.hash_fld = 0; > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6_tcp.hash_fld = 0; > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0; > > +} > > + > > static int ice_init_rss(struct ice_pf *pf) > > { > > struct ice_hw *hw = ICE_PF_TO_HW(pf); > > @@ -2755,6 +2799,9 @@ static int ice_init_rss(struct ice_pf *pf) > > (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); > > ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); > > > > + /* Initialize RSS context for gtpu_eh */ > > + ice_rss_ctx_init(pf); > > + > > /* RSS hash configuration */ > > ice_rss_hash_set(pf, rss_conf->rss_hf); > > > > diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h > > index 87984ef..1baf0b4 100644 > > --- a/drivers/net/ice/ice_ethdev.h > > +++ b/drivers/net/ice/ice_ethdev.h > > @@ -358,6 +358,20 @@ struct ice_fdir_info { > > struct ice_fdir_counter_pool_container counter; > > }; > > > > +struct ice_gtpu_eh { > > + uint32_t pkt_hdr; > > + uint64_t hash_fld; > > +}; > > + > > > The naming "ice_gtpu_eh" is not clear, ice_hash_gtpu_eh_ctx? > Ok, I will rename it to make it clear. > > > +struct ice_hash_gtpu_eh { > > + struct ice_gtpu_eh ipv4; > > + struct ice_gtpu_eh ipv6; > > + struct ice_gtpu_eh ipv4_udp; > > + struct ice_gtpu_eh ipv6_udp; > > + struct ice_gtpu_eh ipv4_tcp; > > + struct ice_gtpu_eh ipv6_tcp; > > +}; > > + > > > I think you don't need to define struct for each pattern and set/unset > value for all of them, what you considate just 3 item, that are hdr, > hash filed and the pattern type, so you could just defined as below > > struct ice_hash_gtpu_eh_ctx { > > uint32_t pkt_hdr; > > uint64_t hash_fld; > > uint64_t hdr_hint; // for example: hdr = > ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP > > } > > and then only check this hdr_hint when set uplink/dwnlink > > if(hdr & pf->gtpu_eh_ctx->hdr_hint) > > ice_rem_rss_cfg Because we should also remember default rule context, so we still need to define structure for each pattern. > > > struct ice_pf { > > struct ice_adapter *adapter; /* The adapter this PF associate to */ > > struct ice_vsi *main_vsi; /* pointer to main VSI structure */ > > @@ -381,6 +395,7 @@ struct ice_pf { > > uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ > > uint16_t fdir_qp_offset; > > struct ice_fdir_info fdir; /* flow director info */ > > + struct ice_hash_gtpu_eh gtpu_eh; > > uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; > > uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; > > struct ice_hw_port_stats stats_offset; > > diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c > > index e535e4b..dd70353 100644 > > --- a/drivers/net/ice/ice_hash.c > > +++ b/drivers/net/ice/ice_hash.c > > @@ -1232,6 +1232,117 @@ struct ice_hash_match_type > ice_hash_type_list[] = { > > } > > > > static int > > +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr, uint64_t fld) > > +{ > > + struct ice_hw *hw = ICE_PF_TO_HW(pf); > > + struct ice_vsi *vsi = pf->main_vsi; > > + int ret; > > + > > + /** > > + * If header field contains GTPU_EH, store gtpu_eh context. > > + * If header field contains GTPU_DWN/UP, remove existed gtpu_eh. > > + */ > > + if ((hdr & ICE_FLOW_SEG_HDR_GTPU_EH) && > > + (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | > > + ICE_FLOW_SEG_HDR_GTPU_UP)) == 0) { > > > No need to check !=DWN/UP here, EH/DWN/UP are mutual exclusion, they > also handler when parse pattern. Ok. I thought EH and DWN/UP could co-exist. > > > > + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > > > Alignment should match open parenthesis. Below is the same. OK. > > > > + pf->gtpu_eh.ipv4_udp.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv4_udp.hash_fld = fld; > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > > + pf->gtpu_eh.ipv6_udp.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv6_udp.hash_fld = fld; > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv4_tcp.hash_fld = fld; > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv6_tcp.hash_fld = fld; > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > > + pf->gtpu_eh.ipv4.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv4.hash_fld = fld; > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > > + pf->gtpu_eh.ipv6.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv6.hash_fld = fld; > > + } > > + } else if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | > > + ICE_FLOW_SEG_HDR_GTPU_UP)) { > > + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > > + if (pf->gtpu_eh.ipv4_udp.hash_fld && > > + pf->gtpu_eh.ipv4_udp.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv4_udp.hash_fld, > > + pf->gtpu_eh.ipv4_udp.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > > Is it better to use a local variable and then call ice_rem_rss_cfg one > time after if-else? Yes, you are right. I will clean code in next version. Thanks. Br Simei > > > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > > + if (pf->gtpu_eh.ipv6_udp.hash_fld && > > + pf->gtpu_eh.ipv6_udp.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv6_udp.hash_fld, > > + pf->gtpu_eh.ipv6_udp.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > > + if (pf->gtpu_eh.ipv4_tcp.hash_fld && > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv4_tcp.hash_fld, > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > > + if (pf->gtpu_eh.ipv6_tcp.hash_fld && > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv6_tcp.hash_fld, > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > > + if (pf->gtpu_eh.ipv4.hash_fld && > > + pf->gtpu_eh.ipv4.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv4.hash_fld, > > + pf->gtpu_eh.ipv4.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > > + if (pf->gtpu_eh.ipv6.hash_fld && > > + pf->gtpu_eh.ipv6.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv6.hash_fld, > > + pf->gtpu_eh.ipv6.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > + } > > + } > > + > > + return 0; > > +} > > + > > +static int > > ice_hash_create(struct ice_adapter *ad, > > struct rte_flow *flow, > > void *meta, > > @@ -1248,6 +1359,10 @@ struct ice_hash_match_type ice_hash_type_list[] > = { > > uint64_t hash_field = ((struct rss_meta *)meta)->hash_flds; > > uint8_t hash_function = ((struct rss_meta *)meta)->hash_function; > > > > + ret = ice_add_rss_cfg_pre(pf, headermask, hash_field); > > + if (ret) > > + return -rte_errno; > > + > > filter_ptr = rte_zmalloc("ice_rss_filter", > > sizeof(struct ice_hash_flow_cfg), 0); > > if (!filter_ptr) { > > @@ -1297,6 +1412,28 @@ struct ice_hash_match_type ice_hash_type_list[] > = { > > return -rte_errno; > > } > > > > +static void > > +ice_rem_rss_cfg_post(struct ice_pf *pf) > > +{ > > + pf->gtpu_eh.ipv4.hash_fld = 0; > > + pf->gtpu_eh.ipv4.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6.hash_fld = 0; > > + pf->gtpu_eh.ipv6.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv4_udp.hash_fld = 0; > > + pf->gtpu_eh.ipv4_udp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6_udp.hash_fld = 0; > > + pf->gtpu_eh.ipv6_udp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv4_tcp.hash_fld = 0; > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6_tcp.hash_fld = 0; > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0; > > +} > > + > > static int > > ice_hash_destroy(struct ice_adapter *ad, > > struct rte_flow *flow, > > @@ -1334,6 +1471,8 @@ struct ice_hash_match_type ice_hash_type_list[] > = { > > } > > } > > > > + ice_rem_rss_cfg_post(pf); > > + > > rte_free(filter_ptr); > > return 0; > >