This patch enables Rx timestamp offload on AVX512 data path.

Enable timestamp offload with the command '--enable-rx-timestamp',
pay attention that getting Rx timestamp offload will drop the performance.

Signed-off-by: Wenjun Wu <wenjun1...@intel.com>
Signed-off-by: Zhichao Zeng <zhichaox.z...@intel.com>

---
v4: rework avx2 patch based on offload path
---
v3: logging with driver dedicated macro
---
v2: fix compile warning
---
 drivers/net/iavf/iavf_rxtx.h            |   3 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c | 203 +++++++++++++++++++++++-
 drivers/net/iavf/iavf_rxtx_vec_common.h |   3 -
 3 files changed, 200 insertions(+), 9 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 547b68f441..0345a6a51d 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -47,7 +47,8 @@
                RTE_ETH_RX_OFFLOAD_CHECKSUM |            \
                RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |          \
                RTE_ETH_RX_OFFLOAD_VLAN |                \
-               RTE_ETH_RX_OFFLOAD_RSS_HASH)
+               RTE_ETH_RX_OFFLOAD_RSS_HASH |    \
+               RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 
 /**
  * According to the vlan capabilities returned by the driver and FW, the vlan 
tci
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c 
b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 4fe9b97278..f9961e53b8 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -16,18 +16,20 @@
 /******************************************************************************
  * If user knows a specific offload is not enabled by APP,
  * the macro can be commented to save the effort of fast path.
- * Currently below 2 features are supported in RX path,
+ * Currently below 6 features are supported in RX path,
  * 1, checksum offload
  * 2, VLAN/QINQ stripping
  * 3, RSS hash
  * 4, packet type analysis
  * 5, flow director ID report
+ * 6, timestamp offload
  
******************************************************************************/
 #define IAVF_RX_CSUM_OFFLOAD
 #define IAVF_RX_VLAN_OFFLOAD
 #define IAVF_RX_RSS_OFFLOAD
 #define IAVF_RX_PTYPE_OFFLOAD
 #define IAVF_RX_FDIR_OFFLOAD
+#define IAVF_RX_TS_OFFLOAD
 
 static __rte_always_inline void
 iavf_rxq_rearm(struct iavf_rx_queue *rxq)
@@ -587,9 +589,9 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct 
iavf_rx_queue *rxq,
                                        bool offload)
 {
        struct iavf_adapter *adapter = rxq->vsi->adapter;
-
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
        uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
-
+#endif
 #ifdef IAVF_RX_PTYPE_OFFLOAD
        const uint32_t *type_table = adapter->ptype_tbl;
 #endif
@@ -618,6 +620,25 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct 
iavf_rx_queue *rxq,
              rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
                return 0;
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#ifdef IAVF_RX_TS_OFFLOAD
+       uint8_t inflection_point = 0;
+       bool is_tsinit = false;
+       __m256i hw_low_last = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, 
(uint32_t)rxq->phc_time);
+
+       if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+               uint64_t sw_cur_time = rte_get_timer_cycles() / 
(rte_get_timer_hz() / 1000);
+
+               if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) {
+                       hw_low_last = _mm256_setzero_si256();
+                       is_tsinit = 1;
+               } else {
+                       hw_low_last = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, 
(uint32_t)rxq->phc_time);
+               }
+       }
+#endif
+#endif
+
        /* constants used in processing loop */
        const __m512i crc_adjust =
                _mm512_set_epi32
@@ -1081,12 +1102,13 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct 
iavf_rx_queue *rxq,
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
                if (offload) {
-#ifdef IAVF_RX_RSS_OFFLOAD
+#if defined(IAVF_RX_RSS_OFFLOAD) || defined(IAVF_RX_TS_OFFLOAD)
                        /**
                         * needs to load 2nd 16B of each desc for RSS hash 
parsing,
                         * will cause performance drop to get into this context.
                         */
                        if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
+                               offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP ||
                            rxq->rx_flags & 
IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
                                /* load bottom half of every 32B desc */
                                const __m128i raw_desc_bh7 =
@@ -1138,6 +1160,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct 
iavf_rx_queue *rxq,
                                                
(_mm256_castsi128_si256(raw_desc_bh0),
                                                 raw_desc_bh1, 1);
 
+#ifdef IAVF_RX_RSS_OFFLOAD
                                if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
                                        /**
                                         * to shift the 32b RSS hash value to 
the
@@ -1275,7 +1298,125 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct 
iavf_rx_queue *rxq,
                                        mb0_1 = _mm256_or_si256
                                                        (mb0_1, vlan_tci0_1);
                                }
-                       } /* if() on RSS hash parsing */
+#endif /* IAVF_RX_RSS_OFFLOAD */
+
+#ifdef IAVF_RX_TS_OFFLOAD
+                               if (offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+                                       uint32_t mask = 0xFFFFFFFF;
+                                       __m256i ts;
+                                       __m256i ts_low = _mm256_setzero_si256();
+                                       __m256i ts_low1;
+                                       __m256i ts_low2;
+                                       __m256i max_ret;
+                                       __m256i cmp_ret;
+                                       uint8_t ret = 0;
+                                       uint8_t shift = 8;
+                                       __m256i ts_desp_mask = 
_mm256_set_epi32(mask, 0, 0, 0, mask, 0, 0, 0);
+                                       __m256i cmp_mask = 
_mm256_set1_epi32(mask);
+                                       __m256i ts_permute_mask = 
_mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
+
+                                       ts = _mm256_and_si256(raw_desc_bh0_1, 
ts_desp_mask);
+                                       ts_low = _mm256_or_si256(ts_low, 
_mm256_srli_si256(ts, 3 * 4));
+                                       ts = _mm256_and_si256(raw_desc_bh2_3, 
ts_desp_mask);
+                                       ts_low = _mm256_or_si256(ts_low, 
_mm256_srli_si256(ts, 2 * 4));
+                                       ts = _mm256_and_si256(raw_desc_bh4_5, 
ts_desp_mask);
+                                       ts_low = _mm256_or_si256(ts_low, 
_mm256_srli_si256(ts, 4));
+                                       ts = _mm256_and_si256(raw_desc_bh6_7, 
ts_desp_mask);
+                                       ts_low = _mm256_or_si256(ts_low, ts);
+
+                                       ts_low1 = 
_mm256_permutevar8x32_epi32(ts_low, ts_permute_mask);
+                                       ts_low2 = 
_mm256_permutevar8x32_epi32(ts_low1,
+                                                               
_mm256_set_epi32(6, 5, 4, 3, 2, 1, 0, 7));
+                                       ts_low2 = _mm256_and_si256(ts_low2,
+                                                               
_mm256_set_epi32(mask, mask, mask, mask, mask, mask, mask, 0));
+                                       ts_low2 = _mm256_or_si256(ts_low2, 
hw_low_last);
+                                       hw_low_last = _mm256_and_si256(ts_low1,
+                                                               
_mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, mask));
+
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 0],
+                                               iavf_timestamp_dynfield_offset, 
uint32_t *) = _mm256_extract_epi32(ts_low1, 0);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 1],
+                                               iavf_timestamp_dynfield_offset, 
uint32_t *) = _mm256_extract_epi32(ts_low1, 1);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 2],
+                                               iavf_timestamp_dynfield_offset, 
uint32_t *) = _mm256_extract_epi32(ts_low1, 2);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 3],
+                                               iavf_timestamp_dynfield_offset, 
uint32_t *) = _mm256_extract_epi32(ts_low1, 3);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 4],
+                                               iavf_timestamp_dynfield_offset, 
uint32_t *) = _mm256_extract_epi32(ts_low1, 4);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 5],
+                                               iavf_timestamp_dynfield_offset, 
uint32_t *) = _mm256_extract_epi32(ts_low1, 5);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 6],
+                                               iavf_timestamp_dynfield_offset, 
uint32_t *) = _mm256_extract_epi32(ts_low1, 6);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 7],
+                                               iavf_timestamp_dynfield_offset, 
uint32_t *) = _mm256_extract_epi32(ts_low1, 7);
+
+                                       if (unlikely(is_tsinit)) {
+                                               uint32_t in_timestamp;
+
+                                               if (iavf_get_phc_time(rxq))
+                                                       PMD_DRV_LOG(ERR, "get 
physical time failed");
+                                               in_timestamp = 
*RTE_MBUF_DYNFIELD(rx_pkts[i + 0],
+                                                                               
iavf_timestamp_dynfield_offset, uint32_t *);
+                                               rxq->phc_time = 
iavf_tstamp_convert_32b_64b(rxq->phc_time, in_timestamp);
+                                       }
+
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 0],
+                                               iavf_timestamp_dynfield_offset 
+ 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 1],
+                                               iavf_timestamp_dynfield_offset 
+ 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 2],
+                                               iavf_timestamp_dynfield_offset 
+ 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 3],
+                                               iavf_timestamp_dynfield_offset 
+ 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 4],
+                                               iavf_timestamp_dynfield_offset 
+ 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 5],
+                                               iavf_timestamp_dynfield_offset 
+ 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 6],
+                                               iavf_timestamp_dynfield_offset 
+ 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+                                       *RTE_MBUF_DYNFIELD(rx_pkts[i + 7],
+                                               iavf_timestamp_dynfield_offset 
+ 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+
+                                       max_ret = _mm256_max_epu32(ts_low2, 
ts_low1);
+                                       cmp_ret = 
_mm256_andnot_si256(_mm256_cmpeq_epi32(max_ret, ts_low1), cmp_mask);
+
+                                       if (_mm256_testz_si256(cmp_ret, 
cmp_mask)) {
+                                               inflection_point = 0;
+                                       } else {
+                                               inflection_point = 1;
+                                               while (shift > 1) {
+                                                       shift = shift >> 1;
+                                                       __m256i mask_low;
+                                                       __m256i mask_high;
+                                                       switch (shift) {
+                                                       case 4:
+                                                               mask_low = 
_mm256_set_epi32(0, 0, 0, 0, mask, mask, mask, mask);
+                                                               mask_high = 
_mm256_set_epi32(mask, mask, mask, mask, 0, 0, 0, 0);
+                                                               break;
+                                                       case 2:
+                                                               mask_low = 
_mm256_srli_si256(cmp_mask, 2 * 4);
+                                                               mask_high = 
_mm256_slli_si256(cmp_mask, 2 * 4);
+                                                               break;
+                                                       case 1:
+                                                               mask_low = 
_mm256_srli_si256(cmp_mask, 1 * 4);
+                                                               mask_high = 
_mm256_slli_si256(cmp_mask, 1 * 4);
+                                                               break;
+                                                       }
+                                                       ret = 
_mm256_testz_si256(cmp_ret, mask_low);
+                                                       if (ret) {
+                                                               ret = 
_mm256_testz_si256(cmp_ret, mask_high);
+                                                               
inflection_point += ret ? 0 : shift;
+                                                               cmp_mask = 
mask_high;
+                                                       } else {
+                                                               cmp_mask = 
mask_low;
+                                                       }
+                                               }
+                                       }
+                                       mbuf_flags = _mm256_or_si256(mbuf_flags,
+                                               
_mm256_set1_epi32(iavf_timestamp_dynflag));
+                               }
+#endif /* IAVF_RX_TS_OFFLOAD */
+                       } /* if() on RSS hash or RX timestamp parsing */
 #endif
                }
 #endif
@@ -1408,10 +1549,62 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct 
iavf_rx_queue *rxq,
                                (_mm_cvtsi128_si64
                                        (_mm256_castsi256_si128(status0_7)));
                received += burst;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#ifdef IAVF_RX_TS_OFFLOAD
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+                       inflection_point = (inflection_point <= burst) ? 
inflection_point : 0;
+                       switch (inflection_point) {
+                       case 1:
+                               *RTE_MBUF_DYNFIELD(rx_pkts[i + 0],
+                                       iavf_timestamp_dynfield_offset + 4, 
uint32_t *) += 1;
+                       case 2:
+                               *RTE_MBUF_DYNFIELD(rx_pkts[i + 1],
+                                       iavf_timestamp_dynfield_offset + 4, 
uint32_t *) += 1;
+                       case 3:
+                               *RTE_MBUF_DYNFIELD(rx_pkts[i + 2],
+                                       iavf_timestamp_dynfield_offset + 4, 
uint32_t *) += 1;
+                       case 4:
+                               *RTE_MBUF_DYNFIELD(rx_pkts[i + 3],
+                                       iavf_timestamp_dynfield_offset + 4, 
uint32_t *) += 1;
+                       case 5:
+                               *RTE_MBUF_DYNFIELD(rx_pkts[i + 4],
+                                       iavf_timestamp_dynfield_offset + 4, 
uint32_t *) += 1;
+                       case 6:
+                               *RTE_MBUF_DYNFIELD(rx_pkts[i + 5],
+                                       iavf_timestamp_dynfield_offset + 4, 
uint32_t *) += 1;
+                       case 7:
+                               *RTE_MBUF_DYNFIELD(rx_pkts[i + 6],
+                                       iavf_timestamp_dynfield_offset + 4, 
uint32_t *) += 1;
+                       case 8:
+                               *RTE_MBUF_DYNFIELD(rx_pkts[i + 7],
+                                       iavf_timestamp_dynfield_offset + 4, 
uint32_t *) += 1;
+                               rxq->phc_time += (uint64_t)1 << 32;
+                       case 0:
+                               break;
+                       default:
+                               PMD_DRV_LOG(ERR, "invalid inflection point for 
rx timestamp");
+                               break;
+                       }
+
+                       rxq->hw_time_update = rte_get_timer_cycles() / 
(rte_get_timer_hz() / 1000);
+               }
+#pragma GCC diagnostic pop
+#endif
+#endif
                if (burst != IAVF_DESCS_PER_LOOP_AVX)
                        break;
        }
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#ifdef IAVF_RX_TS_OFFLOAD
+       if (received > 0 && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
+               rxq->phc_time = *RTE_MBUF_DYNFIELD(rx_pkts[received - 1],
+                       iavf_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
+#endif
+#endif
+
        /* update tail pointers */
        rxq->rx_tail += received;
        rxq->rx_tail &= (rxq->nb_rx_desc - 1);
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h 
b/drivers/net/iavf/iavf_rxtx_vec_common.h
index cc38f70ce2..ddb13ce8c3 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -231,9 +231,6 @@ iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq)
        if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE)
                return -1;
 
-       if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-               return -1;
-
        if (rxq->offloads & IAVF_RX_VECTOR_OFFLOAD)
                return IAVF_VECTOR_OFFLOAD_PATH;
 
-- 
2.34.1

Reply via email to