Acked-by: Jeff Guo <jia....@intel.com>
> -----Original Message-----
> From: Wang, Haiyue <haiyue.w...@intel.com>
> Sent: Tuesday, September 22, 2020 2:40 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zh...@intel.com>; Jiang, JunyuX
> <junyux.ji...@intel.com>; Rong, Leyi <leyi.r...@intel.com>; Yang, Qiming
> <qiming.y...@intel.com>; Sun, GuinanX <guinanx....@intel.com>; Guo,
> Junfeng <junfeng....@intel.com>; Guo, Jia <jia....@intel.com>; Wang,
> Haiyue <haiyue.w...@intel.com>
> Subject: [PATCH v4] net/ice: refactor the Rx FlexiMD handling
>
> The hardware supports many kinds of FlexiMDs set into Rx descriptor, and
> the FlexiMDs can have different offsets in the descriptor according the DDP
> package setting.
>
> The FlexiMDs type and offset are identified by the RXDID, which will be used
> to setup the queue.
>
> For expanding to support different RXDIDs in the future, refactor the Rx
> FlexiMD handling by the functions mapped to related RXDIDs.
>
> Signed-off-by: Haiyue Wang <haiyue.w...@intel.com>
> ---
> v4: Add comment for the new FlexiMD handler type.
> v3: remove the typedef's ___rte_unused, and rewrite the commit title and
> message.
> v2: assign the handle for ICE_RXDID_COMMS_OVS directly, not use
> fall-through.
> ---
> drivers/net/ice/ice_rxtx.c | 263 ++++++++++++++++++++++---------------
> drivers/net/ice/ice_rxtx.h | 5 +
> 2 files changed, 163 insertions(+), 105 deletions(-)
>
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> fef6ad454..93a0ac691 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -25,40 +25,6 @@ uint64_t
> rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
> uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
>
> -static inline uint64_t
> -ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid) -{
> - static struct {
> - uint64_t *ol_flag;
> - bool chk_valid;
> - } ol_flag_map[] = {
> - [ICE_RXDID_COMMS_AUX_VLAN] = {
> - &rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
> - [ICE_RXDID_COMMS_AUX_IPV4] = {
> - &rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
> - [ICE_RXDID_COMMS_AUX_IPV6] = {
> - &rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
> - [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
> - &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
> true },
> - [ICE_RXDID_COMMS_AUX_TCP] = {
> - &rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
> - [ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
> - &rte_net_ice_dynflag_proto_xtr_ip_offset_mask,
> false },
> - };
> - uint64_t *ol_flag;
> -
> - if (rxdid < RTE_DIM(ol_flag_map)) {
> - ol_flag = ol_flag_map[rxdid].ol_flag;
> - if (!ol_flag)
> - return 0ULL;
> -
> - *chk_valid = ol_flag_map[rxdid].chk_valid;
> - return *ol_flag;
> - }
> -
> - return 0ULL;
> -}
> -
> static inline uint8_t
> ice_proto_xtr_type_to_rxdid(uint8_t xtr_type) { @@ -76,6 +42,159 @@
> ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
> rxdid_map[xtr_type] :
> ICE_RXDID_COMMS_OVS; }
>
> +static inline void
> +ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue
> *rxq,
> + struct rte_mbuf *mb,
> + volatile union ice_rx_flex_desc *rxdp) {
> + volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
> + (volatile struct ice_32b_rx_flex_desc_comms_ovs
> *)rxdp; #ifndef
> +RTE_LIBRTE_ICE_16BYTE_RX_DESC
> + uint16_t stat_err;
> +#endif
> +
> + if (desc->flow_id != 0xFFFFFFFF) {
> + mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> + mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> + }
> +
> +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> + stat_err = rte_le_to_cpu_16(desc->status_error0);
> + if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> + mb->ol_flags |= PKT_RX_RSS_HASH;
> + mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> + }
> +#endif
> +}
> +
> +static inline void
> +ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
> + struct rte_mbuf *mb,
> + volatile union ice_rx_flex_desc *rxdp) {
> + volatile struct ice_32b_rx_flex_desc_comms *desc =
> + (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
> + uint16_t stat_err;
> +
> + stat_err = rte_le_to_cpu_16(desc->status_error0);
> + if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> + mb->ol_flags |= PKT_RX_RSS_HASH;
> + mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> + }
> +
> +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> + if (desc->flow_id != 0xFFFFFFFF) {
> + mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> + mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> + }
> +
> + if (rxq->xtr_ol_flag) {
> + uint32_t metadata = 0;
> +
> + stat_err = rte_le_to_cpu_16(desc->status_error1);
> +
> + if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
> + metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> +
> + if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> + metadata |=
> + rte_le_to_cpu_16(desc->flex_ts.flex.aux1)
> << 16;
> +
> + if (metadata) {
> + mb->ol_flags |= rxq->xtr_ol_flag;
> +
> + *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb)
> = metadata;
> + }
> + }
> +#endif
> +}
> +
> +static inline void
> +ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
> + struct rte_mbuf *mb,
> + volatile union ice_rx_flex_desc *rxdp) {
> + volatile struct ice_32b_rx_flex_desc_comms *desc =
> + (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
> + uint16_t stat_err;
> +
> + stat_err = rte_le_to_cpu_16(desc->status_error0);
> + if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> + mb->ol_flags |= PKT_RX_RSS_HASH;
> + mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> + }
> +
> +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> + if (desc->flow_id != 0xFFFFFFFF) {
> + mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> + mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> + }
> +
> + if (rxq->xtr_ol_flag) {
> + uint32_t metadata = 0;
> +
> + if (desc->flex_ts.flex.aux0 != 0xFFFF)
> + metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> + else if (desc->flex_ts.flex.aux1 != 0xFFFF)
> + metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux1);
> +
> + if (metadata) {
> + mb->ol_flags |= rxq->xtr_ol_flag;
> +
> + *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb)
> = metadata;
> + }
> + }
> +#endif
> +}
> +
> +static void
> +ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t
> +rxdid) {
> + switch (rxdid) {
> + case ICE_RXDID_COMMS_AUX_VLAN:
> + rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_vlan_mask;
> + rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + break;
> +
> + case ICE_RXDID_COMMS_AUX_IPV4:
> + rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ipv4_mask;
> + rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + break;
> +
> + case ICE_RXDID_COMMS_AUX_IPV6:
> + rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ipv6_mask;
> + rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + break;
> +
> + case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
> + rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> + rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + break;
> +
> + case ICE_RXDID_COMMS_AUX_TCP:
> + rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
> + rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + break;
> +
> + case ICE_RXDID_COMMS_AUX_IP_OFFSET:
> + rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> + rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v2;
> + break;
> +
> + case ICE_RXDID_COMMS_OVS:
> + rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_ovs;
> + break;
> +
> + default:
> + /* update this according to the RXDID for PROTO_XTR_NONE
> */
> + rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_ovs;
> + break;
> + }
> +
> + if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
> + rxq->xtr_ol_flag = 0;
> +}
> +
> static enum ice_status
> ice_program_hw_rx_queue(struct ice_rx_queue *rxq) { @@ -158,6 +277,8
> @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
> return -EINVAL;
> }
>
> + ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
> +
> /* Enable Flexible Descriptors in the queue context which
> * allows this driver to select a specific receive descriptor format
> */
> @@ -1338,74 +1459,6 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile
> union ice_rx_flex_desc *rxdp)
> mb->vlan_tci, mb->vlan_tci_outer); }
>
> -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> -#define ICE_RX_PROTO_XTR_VALID \
> - ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
> - (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> -
> -static void
> -ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
> - volatile struct ice_32b_rx_flex_desc_comms_ovs *desc)
> -{
> - uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
> - uint32_t metadata = 0;
> - uint64_t ol_flag;
> - bool chk_valid;
> -
> - ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
> - if (unlikely(!ol_flag))
> - return;
> -
> - if (chk_valid) {
> - if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
> - metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> -
> - if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> - metadata |=
> - rte_le_to_cpu_16(desc->flex_ts.flex.aux1)
> << 16;
> - } else {
> - if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)
> - metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> - else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
> - metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux1);
> - }
> -
> - if (!metadata)
> - return;
> -
> - mb->ol_flags |= ol_flag;
> -
> - *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
> -}
> -#endif
> -
> -static inline void
> -ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
> - volatile union ice_rx_flex_desc *rxdp)
> -{
> - volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
> - (volatile struct ice_32b_rx_flex_desc_comms_ovs
> *)rxdp;
> -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> - uint16_t stat_err;
> -
> - stat_err = rte_le_to_cpu_16(desc->status_error0);
> - if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> - mb->ol_flags |= PKT_RX_RSS_HASH;
> - mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> - }
> -#endif
> -
> - if (desc->flow_id != 0xFFFFFFFF) {
> - mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> - mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> - }
> -
> -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> - if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
> - ice_rxd_to_proto_xtr(mb, desc);
> -#endif
> -}
> -
> #define ICE_LOOK_AHEAD 8
> #if (ICE_LOOK_AHEAD != 8)
> #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
> @@ -1463,7 +1516,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
> mb->packet_type =
> ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
>
> rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(mb, &rxdp[j]);
> - ice_rxd_to_pkt_fields(mb, &rxdp[j]);
> + rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
>
> mb->ol_flags |= pkt_flags;
> }
> @@ -1760,7 +1813,7 @@ ice_recv_scattered_pkts(void *rx_queue,
> first_seg->packet_type =
> ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(first_seg, &rxd);
> - ice_rxd_to_pkt_fields(first_seg, &rxd);
> + rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
> pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> first_seg->ol_flags |= pkt_flags;
> /* Prefetch data of first segment, if configured to do so. */
> @@ -2160,7 +2213,7 @@ ice_recv_pkts(void *rx_queue,
> rxm->packet_type =
> ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(rxm, &rxd);
> - ice_rxd_to_pkt_fields(rxm, &rxd);
> + rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
> pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> rxm->ol_flags |= pkt_flags;
> /* copy old mbuf to rx_pkts */
> diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index
> 9fa57b3b2..6937faec3 100644
> --- a/drivers/net/ice/ice_rxtx.h
> +++ b/drivers/net/ice/ice_rxtx.h
> @@ -42,6 +42,9 @@
>
> typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
> typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
> +typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
> + struct rte_mbuf *mb,
> + volatile union ice_rx_flex_desc
> *rxdp);
>
> struct ice_rx_entry {
> struct rte_mbuf *mbuf;
> @@ -82,6 +85,8 @@ struct ice_rx_queue {
> bool q_set; /* indicate if rx queue has been configured */
> bool rx_deferred_start; /* don't start this queue in dev start */
> uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
> + uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> + ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by
> RXDID
> +*/
> ice_rx_release_mbufs_t rx_rel_mbufs;
> };
>
> --
> 2.28.0