> -----Original Message-----
> From: Wu, Wenjun1 <wenjun1...@intel.com>
> Sent: Monday, February 28, 2022 3:36 PM
> To: dev@dpdk.org; Zhang, Qi Z <qi.z.zh...@intel.com>; Yang, Qiming
> <qiming.y...@intel.com>
> Cc: Van Haaren, Harry <harry.van.haa...@intel.com>; Su, Simei
> <simei...@intel.com>; Wu, Wenjun1 <wenjun1...@intel.com>
> Subject: [PATCH v4] net/ice: improve performance of RX timestamp offload
> 
> Previously, each time a burst of packets is received, SW reads HW register
> and assembles it and the timestamp from descriptor together to get the
> complete 64 bits timestamp.
> 
> This patch optimizes the algorithm. The SW only needs to check the
> monotonicity of the low 32bits timestamp to avoid crossing borders.
> Each time before SW receives a burst of packets, it should check the time
> difference between current time and last update time to avoid the low 32
> bits timestamp cycling twice.

Overall, the patch looks good to me and we can cc-stable for LTS
but I'd like to defer this to the next release as we are close to the release 
date and don't want to take the risk to merge complex changes at this moment.

Regards
Qi
> 
> Signed-off-by: Wenjun Wu <wenjun1...@intel.com>
> 
> ---
> v4: rework initialization behavior
> v3: add missing conditional compilation
> v2: add conditional compilation
> ---
>  drivers/net/ice/ice_ethdev.h |   3 +
>  drivers/net/ice/ice_rxtx.c   | 118 +++++++++++++++++++++++++----------
>  2 files changed, 88 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index
> 3ed580d438..6778941d7d 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -554,6 +554,9 @@ struct ice_adapter {
>       struct rte_timecounter tx_tstamp_tc;
>       bool ptp_ena;
>       uint64_t time_hw;
> +     uint32_t hw_time_high; /* high 32 bits of timestamp */
> +     uint32_t hw_time_low; /* low 32 bits of timestamp */
> +     uint64_t hw_time_update; /* SW time of HW record updating */
>       struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
>       struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
>       /* True if DCF state of the associated PF is on */ diff --git
> a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> 4f218bcd0d..4b0bcd4863 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -1574,9 +1574,10 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
>       uint64_t pkt_flags = 0;
>       uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;  #ifndef
> RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +     bool is_tsinit = false;
> +     uint64_t ts_ns;
>       struct ice_vsi *vsi = rxq->vsi;
>       struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
> -     uint64_t ts_ns;
>       struct ice_adapter *ad = rxq->vsi->adapter;  #endif
>       rxdp = &rxq->rx_ring[rxq->rx_tail];
> @@ -1588,8 +1589,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
>       if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
>               return 0;
> 
> -     if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
> -             rxq->hw_register_set = 1;
> +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +     if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
> +             uint64_t sw_cur_time = rte_get_timer_cycles() /
> (rte_get_timer_hz() /
> +1000);
> +
> +             if (unlikely(sw_cur_time - ad->hw_time_update > 4))
> +                     is_tsinit = 1;
> +     }
> +#endif
> 
>       /**
>        * Scan LOOK_AHEAD descriptors at a time to determine which @@ -
> 1625,14 +1632,26 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
>                       rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
>                       if (ice_timestamp_dynflag > 0) {
> -                             ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
> -                                     rxq->hw_register_set,
> -
>       rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
> -                             rxq->hw_register_set = 0;
> +                             rxq->time_high =
> +                             rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
> +                             if (unlikely(is_tsinit)) {
> +                                     ts_ns =
> ice_tstamp_convert_32b_64b(hw, ad, 1,
> +                                                                        rxq-
> >time_high);
> +                                     ad->hw_time_low = (uint32_t)ts_ns;
> +                                     ad->hw_time_high =
> (uint32_t)(ts_ns >> 32);
> +                                     is_tsinit = false;
> +                             } else {
> +                                     if (rxq->time_high < ad-
> >hw_time_low)
> +                                             ad->hw_time_high += 1;
> +                                     ts_ns = (uint64_t)ad->hw_time_high
> << 32 | rxq->time_high;
> +                                     ad->hw_time_low = rxq->time_high;
> +                             }
> +                             ad->hw_time_update = rte_get_timer_cycles()
> /
> +                                                  (rte_get_timer_hz() /
> 1000);
>                               *RTE_MBUF_DYNFIELD(mb,
> -                                     ice_timestamp_dynfield_offset,
> -                                     rte_mbuf_timestamp_t *) = ts_ns;
> -                             mb->ol_flags |= ice_timestamp_dynflag;
> +
> ice_timestamp_dynfield_offset,
> +                                                rte_mbuf_timestamp_t *) =
> ts_ns;
> +                             pkt_flags |= ice_timestamp_dynflag;
>                       }
> 
>                       if (ad->ptp_ena && ((mb->packet_type & @@ -
> 1831,14 +1850,19 @@ ice_recv_scattered_pkts(void *rx_queue,
>       uint64_t pkt_flags;
>       uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;  #ifndef
> RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +     bool is_tsinit = false;
> +     uint64_t ts_ns;
>       struct ice_vsi *vsi = rxq->vsi;
>       struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
> -     uint64_t ts_ns;
>       struct ice_adapter *ad = rxq->vsi->adapter; -#endif
> 
> -     if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
> -             rxq->hw_register_set = 1;
> +     if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
> +             uint64_t sw_cur_time = rte_get_timer_cycles() /
> (rte_get_timer_hz() /
> +1000);
> +
> +             if (unlikely(sw_cur_time - ad->hw_time_update > 4))
> +                     is_tsinit = true;
> +     }
> +#endif
> 
>       while (nb_rx < nb_pkts) {
>               rxdp = &rx_ring[rx_id];
> @@ -1951,14 +1975,25 @@ ice_recv_scattered_pkts(void *rx_queue,
>               pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
>  #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
>               if (ice_timestamp_dynflag > 0) {
> -                     ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
> -                             rxq->hw_register_set,
> -                             rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
> -                     rxq->hw_register_set = 0;
> -                     *RTE_MBUF_DYNFIELD(first_seg,
> -                             ice_timestamp_dynfield_offset,
> -                             rte_mbuf_timestamp_t *) = ts_ns;
> -                     first_seg->ol_flags |= ice_timestamp_dynflag;
> +                     rxq->time_high =
> +                        rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
> +                     if (unlikely(is_tsinit)) {
> +                             ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
> 1, rxq->time_high);
> +                             ad->hw_time_low = (uint32_t)ts_ns;
> +                             ad->hw_time_high = (uint32_t)(ts_ns >> 32);
> +                             is_tsinit = false;
> +                     } else {
> +                             if (rxq->time_high < ad->hw_time_low)
> +                                     ad->hw_time_high += 1;
> +                             ts_ns = (uint64_t)ad->hw_time_high << 32 |
> rxq->time_high;
> +                             ad->hw_time_low = rxq->time_high;
> +                     }
> +                     ad->hw_time_update = rte_get_timer_cycles() /
> +                                          (rte_get_timer_hz() / 1000);
> +                     *RTE_MBUF_DYNFIELD(rxm,
> +                                        (ice_timestamp_dynfield_offset),
> +                                        rte_mbuf_timestamp_t *) = ts_ns;
> +                     pkt_flags |= ice_timestamp_dynflag;
>               }
> 
>               if (ad->ptp_ena && ((first_seg->packet_type &
> RTE_PTYPE_L2_MASK) @@ -2325,14 +2360,19 @@ ice_recv_pkts(void
> *rx_queue,
>       uint64_t pkt_flags;
>       uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;  #ifndef
> RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +     bool is_tsinit = false;
> +     uint64_t ts_ns;
>       struct ice_vsi *vsi = rxq->vsi;
>       struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
> -     uint64_t ts_ns;
>       struct ice_adapter *ad = rxq->vsi->adapter; -#endif
> 
> -     if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
> -             rxq->hw_register_set = 1;
> +     if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
> +             uint64_t sw_cur_time = rte_get_timer_cycles() /
> (rte_get_timer_hz() /
> +1000);
> +
> +             if (unlikely(sw_cur_time - ad->hw_time_update > 4))
> +                     is_tsinit = 1;
> +     }
> +#endif
> 
>       while (nb_rx < nb_pkts) {
>               rxdp = &rx_ring[rx_id];
> @@ -2386,14 +2426,25 @@ ice_recv_pkts(void *rx_queue,
>               pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
>  #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
>               if (ice_timestamp_dynflag > 0) {
> -                     ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
> -                             rxq->hw_register_set,
> -                             rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
> -                     rxq->hw_register_set = 0;
> +                     rxq->time_high =
> +                        rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
> +                     if (unlikely(is_tsinit)) {
> +                             ts_ns = ice_tstamp_convert_32b_64b(hw, ad,
> 1, rxq->time_high);
> +                             ad->hw_time_low = (uint32_t)ts_ns;
> +                             ad->hw_time_high = (uint32_t)(ts_ns >> 32);
> +                             is_tsinit = false;
> +                     } else {
> +                             if (rxq->time_high < ad->hw_time_low)
> +                                     ad->hw_time_high += 1;
> +                             ts_ns = (uint64_t)ad->hw_time_high << 32 |
> rxq->time_high;
> +                             ad->hw_time_low = rxq->time_high;
> +                     }
> +                     ad->hw_time_update = rte_get_timer_cycles() /
> +                                          (rte_get_timer_hz() / 1000);
>                       *RTE_MBUF_DYNFIELD(rxm,
> -                             ice_timestamp_dynfield_offset,
> -                             rte_mbuf_timestamp_t *) = ts_ns;
> -                     rxm->ol_flags |= ice_timestamp_dynflag;
> +                                        (ice_timestamp_dynfield_offset),
> +                                        rte_mbuf_timestamp_t *) = ts_ns;
> +                     pkt_flags |= ice_timestamp_dynflag;
>               }
> 
>               if (ad->ptp_ena && ((rxm->packet_type &
> RTE_PTYPE_L2_MASK) == @@ -2408,6 +2459,7 @@ ice_recv_pkts(void
> *rx_queue,
>               /* copy old mbuf to rx_pkts */
>               rx_pkts[nb_rx++] = rxm;
>       }
> +
>       rxq->rx_tail = rx_id;
>       /**
>        * If the number of free RX descriptors is greater than the RX free
> --
> 2.25.1

Reply via email to