Split AVX512 Rx data path into two, one is for basic,
the other one can support additional Rx offload features,
including Rx checksum offload, Rx vlan offload, RSS offload.

Signed-off-by: Leyi Rong <leyi.r...@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo...@intel.com>
---
 drivers/net/ice/ice_rxtx.c            |  46 ++-
 drivers/net/ice/ice_rxtx.h            |   7 +
 drivers/net/ice/ice_rxtx_vec_avx512.c | 496 ++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx_vec_common.h |  49 ++-
 4 files changed, 567 insertions(+), 31 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 73f4342187..16b374ac8d 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1059,6 +1059,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
        uint32_t ring_size;
        uint16_t len;
        int use_def_burst_func = 1;
+       uint64_t offloads;
 
        if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
            nb_desc > ICE_MAX_RING_DESC ||
@@ -1068,6 +1069,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
        /* Free memory if needed */
        if (dev->data->rx_queues[queue_idx]) {
                ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
@@ -1088,6 +1091,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
        rxq->queue_id = queue_idx;
+       rxq->offloads = offloads;
 
        rxq->reg_idx = vsi->base_queue + queue_idx;
        rxq->port_id = dev->data->port_id;
@@ -3052,12 +3056,14 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
        struct ice_rx_queue *rxq;
        int i;
+       int rx_check_ret;
        bool use_avx512 = false;
        bool use_avx2 = false;
 
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-               if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
-                               rte_vect_get_max_simd_bitwidth() >= 
RTE_VECT_SIMD_128) {
+               rx_check_ret = ice_rx_vec_dev_check(dev);
+               if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
+                   rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
                        ad->rx_vec_allowed = true;
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                                rxq = dev->data->rx_queues[i];
@@ -3091,11 +3097,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                if (dev->data->scattered_rx) {
                        if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-                               PMD_DRV_LOG(NOTICE,
-                                       "Using AVX512 Vector Scattered Rx (port 
%d).",
-                                       dev->data->port_id);
-                               dev->rx_pkt_burst =
-                                       ice_recv_scattered_pkts_vec_avx512;
+                               if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+                                       PMD_DRV_LOG(NOTICE,
+                                               "Using AVX512 OFFLOAD Vector 
Scattered Rx (port %d).",
+                                               dev->data->port_id);
+                                       dev->rx_pkt_burst =
+                                               
ice_recv_scattered_pkts_vec_avx512_offload;
+                               } else {
+                                       PMD_DRV_LOG(NOTICE,
+                                               "Using AVX512 Vector Scattered 
Rx (port %d).",
+                                               dev->data->port_id);
+                                       dev->rx_pkt_burst =
+                                               
ice_recv_scattered_pkts_vec_avx512;
+                               }
 #endif
                        } else {
                                PMD_DRV_LOG(DEBUG,
@@ -3109,11 +3123,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                } else {
                        if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-                               PMD_DRV_LOG(NOTICE,
-                                       "Using AVX512 Vector Rx (port %d).",
-                                       dev->data->port_id);
-                               dev->rx_pkt_burst =
-                                       ice_recv_pkts_vec_avx512;
+                               if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+                                       PMD_DRV_LOG(NOTICE,
+                                               "Using AVX512 OFFLOAD Vector Rx 
(port %d).",
+                                               dev->data->port_id);
+                                       dev->rx_pkt_burst =
+                                               
ice_recv_pkts_vec_avx512_offload;
+                               } else {
+                                       PMD_DRV_LOG(NOTICE,
+                                               "Using AVX512 Vector Rx (port 
%d).",
+                                               dev->data->port_id);
+                                       dev->rx_pkt_burst =
+                                               ice_recv_pkts_vec_avx512;
+                               }
 #endif
                        } else {
                                PMD_DRV_LOG(DEBUG,
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index f72fad0255..b29387ca0f 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -88,6 +88,7 @@ struct ice_rx_queue {
        uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
        ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
        ice_rx_release_mbufs_t rx_rel_mbufs;
+       uint64_t offloads;
 };
 
 struct ice_tx_entry {
@@ -256,9 +257,15 @@ uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct 
rte_mbuf **tx_pkts,
                                uint16_t nb_pkts);
 uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
                                  uint16_t nb_pkts);
+uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue,
+                                         struct rte_mbuf **rx_pkts,
+                                         uint16_t nb_pkts);
 uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
                                            struct rte_mbuf **rx_pkts,
                                            uint16_t nb_pkts);
+uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+                                                   struct rte_mbuf **rx_pkts,
+                                                   uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
                                  uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c 
b/drivers/net/ice/ice_rxtx_vec_avx512.c
index a112d7838e..bf0ac04be6 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -221,6 +221,422 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
                        offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
 
+       uint16_t i, received;
+
+       for (i = 0, received = 0; i < nb_pkts;
+            i += ICE_DESCS_PER_LOOP_AVX,
+            rxdp += ICE_DESCS_PER_LOOP_AVX) {
+               /* step 1, copy over 8 mbuf pointers to rx_pkts array */
+               _mm256_storeu_si256((void *)&rx_pkts[i],
+                                   _mm256_loadu_si256((void *)&sw_ring[i]));
+#ifdef RTE_ARCH_X86_64
+               _mm256_storeu_si256
+                       ((void *)&rx_pkts[i + 4],
+                        _mm256_loadu_si256((void *)&sw_ring[i + 4]));
+#endif
+
+               __m512i raw_desc0_3, raw_desc4_7;
+               __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
+
+               /* load in descriptors, in reverse order */
+               const __m128i raw_desc7 =
+                       _mm_load_si128((void *)(rxdp + 7));
+               rte_compiler_barrier();
+               const __m128i raw_desc6 =
+                       _mm_load_si128((void *)(rxdp + 6));
+               rte_compiler_barrier();
+               const __m128i raw_desc5 =
+                       _mm_load_si128((void *)(rxdp + 5));
+               rte_compiler_barrier();
+               const __m128i raw_desc4 =
+                       _mm_load_si128((void *)(rxdp + 4));
+               rte_compiler_barrier();
+               const __m128i raw_desc3 =
+                       _mm_load_si128((void *)(rxdp + 3));
+               rte_compiler_barrier();
+               const __m128i raw_desc2 =
+                       _mm_load_si128((void *)(rxdp + 2));
+               rte_compiler_barrier();
+               const __m128i raw_desc1 =
+                       _mm_load_si128((void *)(rxdp + 1));
+               rte_compiler_barrier();
+               const __m128i raw_desc0 =
+                       _mm_load_si128((void *)(rxdp + 0));
+
+               raw_desc6_7 =
+                       _mm256_inserti128_si256
+                               (_mm256_castsi128_si256(raw_desc6),
+                                raw_desc7, 1);
+               raw_desc4_5 =
+                       _mm256_inserti128_si256
+                               (_mm256_castsi128_si256(raw_desc4),
+                                raw_desc5, 1);
+               raw_desc2_3 =
+                       _mm256_inserti128_si256
+                               (_mm256_castsi128_si256(raw_desc2),
+                                raw_desc3, 1);
+               raw_desc0_1 =
+                       _mm256_inserti128_si256
+                               (_mm256_castsi128_si256(raw_desc0),
+                                raw_desc1, 1);
+
+               raw_desc4_7 =
+                       _mm512_inserti64x4
+                               (_mm512_castsi256_si512(raw_desc4_5),
+                                raw_desc6_7, 1);
+               raw_desc0_3 =
+                       _mm512_inserti64x4
+                               (_mm512_castsi256_si512(raw_desc0_1),
+                                raw_desc2_3, 1);
+
+               if (split_packet) {
+                       int j;
+
+                       for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++)
+                               rte_mbuf_prefetch_part2(rx_pkts[i + j]);
+               }
+
+               /**
+                * convert descriptors 0-7 into mbufs, re-arrange fields.
+                * Then write into the mbuf.
+                */
+               __m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk);
+               __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk);
+
+               mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
+               mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
+
+               /**
+                * to get packet types, ptype is located in bit16-25
+                * of each 128bits
+                */
+               const __m512i ptype_mask =
+                       _mm512_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M);
+
+               /**
+                * to get packet types, ptype is located in bit16-25
+                * of each 128bits
+                */
+               const __m512i ptypes4_7 =
+                       _mm512_and_si512(raw_desc4_7, ptype_mask);
+               const __m512i ptypes0_3 =
+                       _mm512_and_si512(raw_desc0_3, ptype_mask);
+
+               const __m256i ptypes6_7 =
+                       _mm512_extracti64x4_epi64(ptypes4_7, 1);
+               const __m256i ptypes4_5 =
+                       _mm512_extracti64x4_epi64(ptypes4_7, 0);
+               const __m256i ptypes2_3 =
+                       _mm512_extracti64x4_epi64(ptypes0_3, 1);
+               const __m256i ptypes0_1 =
+                       _mm512_extracti64x4_epi64(ptypes0_3, 0);
+               const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
+               const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
+               const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
+               const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
+               const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
+               const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
+               const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
+               const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
+
+               const __m512i ptype4_7 = _mm512_set_epi32
+                       (0, 0, 0, ptype_tbl[ptype7],
+                        0, 0, 0, ptype_tbl[ptype6],
+                        0, 0, 0, ptype_tbl[ptype5],
+                        0, 0, 0, ptype_tbl[ptype4]);
+               const __m512i ptype0_3 = _mm512_set_epi32
+                       (0, 0, 0, ptype_tbl[ptype3],
+                        0, 0, 0, ptype_tbl[ptype2],
+                        0, 0, 0, ptype_tbl[ptype1],
+                        0, 0, 0, ptype_tbl[ptype0]);
+
+               mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
+               mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
+
+               __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
+               __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
+               __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
+               __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
+
+               /**
+                * use permute/extract to get status content
+                * After the operations, the packets status flags are in the
+                * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
+                */
+               /* merge the status bits into one register */
+               const __m512i status_permute_msk = _mm512_set_epi32
+                       (0, 0, 0, 0,
+                        0, 0, 0, 0,
+                        22, 30, 6, 14,
+                        18, 26, 2, 10);
+               const __m512i raw_status0_7 = _mm512_permutex2var_epi32
+                       (raw_desc4_7, status_permute_msk, raw_desc0_3);
+               __m256i status0_7 = _mm512_extracti64x4_epi64
+                       (raw_status0_7, 0);
+
+               __m256i mbuf_flags = _mm256_set1_epi32(0);
+
+               if (rxq->fdir_enabled) {
+                       const __m256i fdir_id4_7 =
+                               _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
+
+                       const __m256i fdir_id0_3 =
+                               _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
+
+                       const __m256i fdir_id0_7 =
+                               _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
+
+                       mbuf_flags =
+                               ice_flex_rxd_to_fdir_flags_vec_avx512
+                                       (fdir_id0_7);
+
+                       /* write to mbuf: have to use scalar store here */
+                       rx_pkts[i + 0]->hash.fdir.hi =
+                               _mm256_extract_epi32(fdir_id0_7, 3);
+
+                       rx_pkts[i + 1]->hash.fdir.hi =
+                               _mm256_extract_epi32(fdir_id0_7, 7);
+
+                       rx_pkts[i + 2]->hash.fdir.hi =
+                               _mm256_extract_epi32(fdir_id0_7, 2);
+
+                       rx_pkts[i + 3]->hash.fdir.hi =
+                               _mm256_extract_epi32(fdir_id0_7, 6);
+
+                       rx_pkts[i + 4]->hash.fdir.hi =
+                               _mm256_extract_epi32(fdir_id0_7, 1);
+
+                       rx_pkts[i + 5]->hash.fdir.hi =
+                               _mm256_extract_epi32(fdir_id0_7, 5);
+
+                       rx_pkts[i + 6]->hash.fdir.hi =
+                               _mm256_extract_epi32(fdir_id0_7, 0);
+
+                       rx_pkts[i + 7]->hash.fdir.hi =
+                               _mm256_extract_epi32(fdir_id0_7, 4);
+               } /* if() on fdir_enabled */
+
+               /**
+                * At this point, we have the 8 sets of flags in the low 16-bits
+                * of each 32-bit value in vlan0.
+                * We want to extract these, and merge them with the mbuf init
+                * data so we can do a single write to the mbuf to set the flags
+                * and all the other initialization fields. Extracting the
+                * appropriate flags means that we have to do a shift and blend
+                * for each mbuf before we do the write. However, we can also
+                * add in the previously computed rx_descriptor fields to
+                * make a single 256-bit write per mbuf
+                */
+               /* check the structure matches expectations */
+               RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+                                offsetof(struct rte_mbuf, rearm_data) + 8);
+               RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+                                RTE_ALIGN(offsetof(struct rte_mbuf,
+                                                   rearm_data),
+                                          16));
+               /* build up data and do writes */
+               __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
+                       rearm6, rearm7;
+
+               rearm6 = _mm256_blend_epi32(mbuf_init,
+                                           _mm256_slli_si256(mbuf_flags, 8),
+                                           0x04);
+               rearm4 = _mm256_blend_epi32(mbuf_init,
+                                           _mm256_slli_si256(mbuf_flags, 4),
+                                           0x04);
+               rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
+               rearm0 = _mm256_blend_epi32(mbuf_init,
+                                           _mm256_srli_si256(mbuf_flags, 4),
+                                           0x04);
+
+               /* permute to add in the rx_descriptor e.g. rss fields */
+               rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
+               rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
+               rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
+               rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
+
+               /* write to mbuf */
+               _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
+                                   rearm6);
+               _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
+                                   rearm4);
+               _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
+                                   rearm2);
+               _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
+                                   rearm0);
+
+               /* repeat for the odd mbufs */
+               const __m256i odd_flags =
+                       _mm256_castsi128_si256
+                               (_mm256_extracti128_si256(mbuf_flags, 1));
+               rearm7 = _mm256_blend_epi32(mbuf_init,
+                                           _mm256_slli_si256(odd_flags, 8),
+                                           0x04);
+               rearm5 = _mm256_blend_epi32(mbuf_init,
+                                           _mm256_slli_si256(odd_flags, 4),
+                                           0x04);
+               rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
+               rearm1 = _mm256_blend_epi32(mbuf_init,
+                                           _mm256_srli_si256(odd_flags, 4),
+                                           0x04);
+
+               /* since odd mbufs are already in hi 128-bits use blend */
+               rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
+               rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
+               rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
+               rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
+               /* again write to mbufs */
+               _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
+                                   rearm7);
+               _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
+                                   rearm5);
+               _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
+                                   rearm3);
+               _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
+                                   rearm1);
+
+               /* extract and record EOP bit */
+               if (split_packet) {
+                       const __m128i eop_mask =
+                               _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S);
+                       const __m256i eop_bits256 = _mm256_and_si256(status0_7,
+                                                                    eop_check);
+                       /* pack status bits into a single 128-bit register */
+                       const __m128i eop_bits =
+                               _mm_packus_epi32
+                                       (_mm256_castsi256_si128(eop_bits256),
+                                        _mm256_extractf128_si256(eop_bits256,
+                                                                 1));
+                       /**
+                        * flip bits, and mask out the EOP bit, which is now
+                        * a split-packet bit i.e. !EOP, rather than EOP one.
+                        */
+                       __m128i split_bits = _mm_andnot_si128(eop_bits,
+                                       eop_mask);
+                       /**
+                        * eop bits are out of order, so we need to shuffle them
+                        * back into order again. In doing so, only use low 8
+                        * bits, which acts like another pack instruction
+                        * The original order is (hi->lo): 1,3,5,7,0,2,4,6
+                        * [Since we use epi8, the 16-bit positions are
+                        * multiplied by 2 in the eop_shuffle value.]
+                        */
+                       __m128i eop_shuffle =
+                               _mm_set_epi8(/* zero hi 64b */
+                                            0xFF, 0xFF, 0xFF, 0xFF,
+                                            0xFF, 0xFF, 0xFF, 0xFF,
+                                            /* move values to lo 64b */
+                                            8, 0, 10, 2,
+                                            12, 4, 14, 6);
+                       split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
+                       *(uint64_t *)split_packet =
+                               _mm_cvtsi128_si64(split_bits);
+                       split_packet += ICE_DESCS_PER_LOOP_AVX;
+               }
+
+               /* perform dd_check */
+               status0_7 = _mm256_and_si256(status0_7, dd_check);
+               status0_7 = _mm256_packs_epi32(status0_7,
+                                              _mm256_setzero_si256());
+
+               uint64_t burst = __builtin_popcountll
+                                       (_mm_cvtsi128_si64
+                                               (_mm256_extracti128_si256
+                                                       (status0_7, 1)));
+               burst += __builtin_popcountll
+                               (_mm_cvtsi128_si64
+                                       (_mm256_castsi256_si128(status0_7)));
+               received += burst;
+               if (burst != ICE_DESCS_PER_LOOP_AVX)
+                       break;
+       }
+
+       /* update tail pointers */
+       rxq->rx_tail += received;
+       rxq->rx_tail &= (rxq->nb_rx_desc - 1);
+       if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
+               rxq->rx_tail--;
+               received--;
+       }
+       rxq->rxrearm_nb += received;
+       return received;
+}
+
+static inline uint16_t
+_ice_recv_raw_pkts_vec_avx512_offload(struct ice_rx_queue *rxq,
+                                     struct rte_mbuf **rx_pkts,
+                                     uint16_t nb_pkts, uint8_t *split_packet)
+{
+       const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+       const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
+                       0, rxq->mbuf_initializer);
+       struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
+       volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
+
+       rte_prefetch0(rxdp);
+
+       /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */
+       nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX);
+
+       /* See if we need to rearm the RX queue - gives the prefetch a bit
+        * of time to act
+        */
+       if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
+               ice_rxq_rearm(rxq);
+
+       /* Before we start moving massive data around, check to see if
+        * there is actually a packet available
+        */
+       if (!(rxdp->wb.status_error0 &
+                       rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
+               return 0;
+
+       /* constants used in processing loop */
+       const __m512i crc_adjust =
+               _mm512_set4_epi32
+                       (0,             /* ignore non-length fields */
+                        -rxq->crc_len, /* sub crc on data_len */
+                        -rxq->crc_len, /* sub crc on pkt_len */
+                        0              /* ignore non-length fields */
+                       );
+
+       /* 8 packets DD mask, LSB in each 32-bit value */
+       const __m256i dd_check = _mm256_set1_epi32(1);
+
+       /* 8 packets EOP mask, second-LSB in each 32-bit value */
+       const __m256i eop_check = _mm256_slli_epi32(dd_check,
+                       ICE_RX_DESC_STATUS_EOF_S);
+
+       /* mask to shuffle from desc. to mbuf (4 descriptors)*/
+       const __m512i shuf_msk =
+               _mm512_set4_epi32
+                       (/* rss hash parsed separately */
+                        0xFFFFFFFF,
+                        /* octet 10~11, 16 bits vlan_macip */
+                        /* octet 4~5, 16 bits data_len */
+                        11 << 24 | 10 << 16 | 5 << 8 | 4,
+                        /* skip hi 16 bits pkt_len, zero out */
+                        /* octet 4~5, 16 bits pkt_len */
+                        0xFFFF << 16 | 5 << 8 | 4,
+                        /* pkt_type set as unknown */
+                        0xFFFFFFFF
+                       );
+
+       /**
+        * compile-time check the above crc and shuffle layout is correct.
+        * NOTE: the first field (lowest address) is given last in set_epi
+        * calls above.
+        */
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+                       offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+                       offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+                       offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+                       offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+       /* following code block is for Rx Checksum Offload */
        /* Status/Error flag masks */
        /**
         * mask everything except Checksum Reports, RSS indication
@@ -806,6 +1222,18 @@ ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf 
**rx_pkts,
        return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
+/**
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+                                uint16_t nb_pkts)
+{
+       return _ice_recv_raw_pkts_vec_avx512_offload(rx_queue, rx_pkts,
+                                                    nb_pkts, NULL);
+}
+
 /**
  * vPMD receive routine that reassembles single burst of 32 scattered packets
  * Notice:
@@ -847,6 +1275,48 @@ ice_recv_scattered_burst_vec_avx512(void *rx_queue, 
struct rte_mbuf **rx_pkts,
                                             &split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue,
+                                           struct rte_mbuf **rx_pkts,
+                                           uint16_t nb_pkts)
+{
+       struct ice_rx_queue *rxq = rx_queue;
+       uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
+
+       /* get some new buffers */
+       uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512_offload(rxq,
+                               rx_pkts, nb_pkts, split_flags);
+       if (nb_bufs == 0)
+               return 0;
+
+       /* happy day case, full burst + no packets to be joined */
+       const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+       if (!rxq->pkt_first_seg &&
+           split_fl64[0] == 0 && split_fl64[1] == 0 &&
+           split_fl64[2] == 0 && split_fl64[3] == 0)
+               return nb_bufs;
+
+       /* reassemble any packets that need reassembly */
+       unsigned int i = 0;
+
+       if (!rxq->pkt_first_seg) {
+               /* find the first split flag, and only reassemble then */
+               while (i < nb_bufs && !split_flags[i])
+                       i++;
+               if (i == nb_bufs)
+                       return nb_bufs;
+               rxq->pkt_first_seg = rx_pkts[i];
+       }
+       return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+                                            &split_flags[i]);
+}
+
 /**
  * vPMD receive routine that reassembles scattered packets.
  * Main receive routine that can handle arbitrary burst sizes
@@ -871,6 +1341,32 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                                rx_pkts + retval, nb_pkts);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+                                          struct rte_mbuf **rx_pkts,
+                                          uint16_t nb_pkts)
+{
+       uint16_t retval = 0;
+
+       while (nb_pkts > ICE_VPMD_RX_BURST) {
+               uint16_t burst =
+                       ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+                               rx_pkts + retval, ICE_VPMD_RX_BURST);
+               retval += burst;
+               nb_pkts -= burst;
+               if (burst < ICE_VPMD_RX_BURST)
+                       return retval;
+       }
+       return retval + ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+                               rx_pkts + retval, nb_pkts);
+}
+
 static __rte_always_inline int
 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 {
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h 
b/drivers/net/ice/ice_rxtx_vec_common.h
index d5419e6ba4..e555f797e1 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -243,6 +243,27 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
        return 0;
 }
 
+#define ICE_TX_NO_VECTOR_FLAGS (                       \
+               DEV_TX_OFFLOAD_MULTI_SEGS |             \
+               DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD (                                \
+               DEV_TX_OFFLOAD_VLAN_INSERT |            \
+               DEV_TX_OFFLOAD_QINQ_INSERT |            \
+               DEV_TX_OFFLOAD_IPV4_CKSUM |             \
+               DEV_TX_OFFLOAD_SCTP_CKSUM |             \
+               DEV_TX_OFFLOAD_UDP_CKSUM |              \
+               DEV_TX_OFFLOAD_TCP_CKSUM)
+
+#define ICE_RX_VECTOR_OFFLOAD (                                \
+               DEV_RX_OFFLOAD_CHECKSUM |               \
+               DEV_RX_OFFLOAD_SCTP_CKSUM |             \
+               DEV_RX_OFFLOAD_VLAN |                   \
+               DEV_RX_OFFLOAD_RSS_HASH)
+
+#define ICE_VECTOR_PATH                0
+#define ICE_VECTOR_OFFLOAD_PATH        1
+
 static inline int
 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 {
@@ -261,23 +282,11 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
        if (rxq->proto_xtr != PROTO_XTR_NONE)
                return -1;
 
-       return 0;
-}
-
-#define ICE_TX_NO_VECTOR_FLAGS (                       \
-               DEV_TX_OFFLOAD_MULTI_SEGS |             \
-               DEV_TX_OFFLOAD_TCP_TSO)
-
-#define ICE_TX_VECTOR_OFFLOAD (                                \
-               DEV_TX_OFFLOAD_VLAN_INSERT |            \
-               DEV_TX_OFFLOAD_QINQ_INSERT |            \
-               DEV_TX_OFFLOAD_IPV4_CKSUM |             \
-               DEV_TX_OFFLOAD_SCTP_CKSUM |             \
-               DEV_TX_OFFLOAD_UDP_CKSUM |              \
-               DEV_TX_OFFLOAD_TCP_CKSUM)
+       if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
+               return ICE_VECTOR_OFFLOAD_PATH;
 
-#define ICE_VECTOR_PATH                0
-#define ICE_VECTOR_OFFLOAD_PATH        1
+       return ICE_VECTOR_PATH;
+}
 
 static inline int
 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
@@ -303,14 +312,16 @@ ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
        int i;
        struct ice_rx_queue *rxq;
+       int ret = 0;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
-               if (ice_rx_vec_queue_default(rxq))
-                       return -1;
+               ret = (ice_rx_vec_queue_default(rxq));
+               if (ret < 0)
+                       break;
        }
 
-       return 0;
+       return ret;
 }
 
 static inline int
-- 
2.17.1

Reply via email to