From: "Chen Jing D(Mark)" <jing.d.c...@intel.com>

Add func fm10k_recv_raw_pkts_vec to parse raw packets, in which
includes possible chained packets.
Add func fm10k_recv_pkts_vec to receive single mbuf packet.

Signed-off-by: Chen Jing D(Mark) <jing.d.chen at intel.com>
---
 drivers/net/fm10k/fm10k.h          |    1 +
 drivers/net/fm10k/fm10k_rxtx_vec.c |  201 ++++++++++++++++++++++++++++++++++++
 2 files changed, 202 insertions(+), 0 deletions(-)

diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 5df7960..f04ba2c 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -327,4 +327,5 @@ uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
        uint16_t nb_pkts);

 int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq);
+uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t);
 #endif
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c 
b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 581a309..6127cf9 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -281,3 +281,204 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
        /* Update the tail pointer on the NIC */
        FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id);
 }
+
+static inline uint16_t
+fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+               uint16_t nb_pkts, uint8_t *split_packet)
+{
+       volatile union fm10k_rx_desc *rxdp;
+       struct rte_mbuf **mbufp;
+       uint16_t nb_pkts_recd;
+       int pos;
+       struct fm10k_rx_queue *rxq = rx_queue;
+       uint64_t var;
+       __m128i shuf_msk;
+       __m128i dd_check, eop_check;
+       uint16_t next_dd;
+
+       next_dd = rxq->next_dd;
+
+       /* Just the act of getting into the function from the application is
+        * going to cost about 7 cycles
+        */
+       rxdp = rxq->hw_ring + next_dd;
+
+       _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+
+       /* See if we need to rearm the RX queue - gives the prefetch a bit
+        * of time to act
+        */
+       if (rxq->rxrearm_nb > RTE_FM10K_RXQ_REARM_THRESH)
+               fm10k_rxq_rearm(rxq);
+
+       /* Before we start moving massive data around, check to see if
+        * there is actually a packet available
+        */
+       if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
+               return 0;
+
+       /* Vecotr RX will process 4 packets at a time, strip the unaligned
+        * tails in case it's not multiple of 4.
+        */
+       nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
+
+       /* 4 packets DD mask */
+       dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+       /* 4 packets EOP mask */
+       eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+       /* mask to shuffle from desc. to mbuf */
+       shuf_msk = _mm_set_epi8(
+               7, 6, 5, 4,  /* octet 4~7, 32bits rss */
+               15, 14,      /* octet 14~15, low 16 bits vlan_macip */
+               13, 12,      /* octet 12~13, 16 bits data_len */
+               0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
+               13, 12,      /* octet 12~13, low 16 bits pkt_len */
+               0xFF, 0xFF,  /* skip high 16 bits pkt_type */
+               0xFF, 0xFF   /* Skip pkt_type field in shuffle operation */
+               );
+
+       /* Cache is empty -> need to scan the buffer rings, but first move
+        * the next 'n' mbufs into the cache
+        */
+       mbufp = &rxq->sw_ring[next_dd];
+
+       /* A. load 4 packet in one loop
+        * [A*. mask out 4 unused dirty field in desc]
+        * B. copy 4 mbuf point from swring to rx_pkts
+        * C. calc the number of DD bits among the 4 packets
+        * [C*. extract the end-of-packet bit, if requested]
+        * D. fill info. from desc to mbuf
+        */
+       for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+                       pos += RTE_FM10K_DESCS_PER_LOOP,
+                       rxdp += RTE_FM10K_DESCS_PER_LOOP) {
+               __m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
+               __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+               __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+               __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+
+               /* B.1 load 1 mbuf point */
+               mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
+
+               /* Read desc statuses backwards to avoid race condition */
+               /* A.1 load 4 pkts desc */
+               descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+
+               /* B.2 copy 2 mbuf point into rx_pkts  */
+               _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+               /* B.1 load 1 mbuf point */
+               mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
+
+               descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+               /* B.1 load 2 mbuf point */
+               descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+               descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+               /* B.2 copy 2 mbuf point into rx_pkts  */
+               _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+
+               /* avoid compiler reorder optimization */
+               rte_compiler_barrier();
+
+               if (split_packet) {
+                       rte_prefetch0(&rx_pkts[pos]->cacheline1);
+                       rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
+                       rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
+                       rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+               }
+
+               /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+               pkt_mb4 = _mm_shuffle_epi8(descs0[3], shuf_msk);
+               pkt_mb3 = _mm_shuffle_epi8(descs0[2], shuf_msk);
+
+               /* C.1 4=>2 filter staterr info only */
+               sterr_tmp2 = _mm_unpackhi_epi32(descs0[3], descs0[2]);
+               /* C.1 4=>2 filter staterr info only */
+               sterr_tmp1 = _mm_unpackhi_epi32(descs0[1], descs0[0]);
+
+               /* set ol_flags with vlan packet type */
+               fm10k_desc_to_olflags_v(descs0, &rx_pkts[pos]);
+
+               /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+               pkt_mb2 = _mm_shuffle_epi8(descs0[1], shuf_msk);
+               pkt_mb1 = _mm_shuffle_epi8(descs0[0], shuf_msk);
+
+               /* C.2 get 4 pkts staterr value  */
+               zero = _mm_xor_si128(dd_check, dd_check);
+               staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+               /* D.3 copy final 3,4 data to rx_pkts */
+               _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+                               pkt_mb4);
+               _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+                               pkt_mb3);
+
+               /* C* extract and record EOP bit */
+               if (split_packet) {
+                       __m128i eop_shuf_mask = _mm_set_epi8(
+                                       0xFF, 0xFF, 0xFF, 0xFF,
+                                       0xFF, 0xFF, 0xFF, 0xFF,
+                                       0xFF, 0xFF, 0xFF, 0xFF,
+                                       0x04, 0x0C, 0x00, 0x08
+                                       );
+
+                       /* and with mask to extract bits, flipping 1-0 */
+                       __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+                       /* the staterr values are not in order, as the count
+                        * count of dd bits doesn't care. However, for end of
+                        * packet tracking, we do care, so shuffle. This also
+                        * compresses the 32-bit values to 8-bit
+                        */
+                       eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+                       /* store the resulting 32-bit value */
+                       *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+                       split_packet += RTE_FM10K_DESCS_PER_LOOP;
+
+                       /* zero-out next pointers */
+                       rx_pkts[pos]->next = NULL;
+                       rx_pkts[pos + 1]->next = NULL;
+                       rx_pkts[pos + 2]->next = NULL;
+                       rx_pkts[pos + 3]->next = NULL;
+               }
+
+               /* C.3 calc available number of desc */
+               staterr = _mm_and_si128(staterr, dd_check);
+               staterr = _mm_packs_epi32(staterr, zero);
+
+               /* D.3 copy final 1,2 data to rx_pkts */
+               _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+                               pkt_mb2);
+               _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+                               pkt_mb1);
+
+               fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
+
+               /* C.4 calc avaialbe number of desc */
+               var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+               nb_pkts_recd += var;
+               if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
+                       break;
+       }
+
+       /* Update our internal tail pointer */
+       rxq->next_dd = (uint16_t)(rxq->next_dd + nb_pkts_recd);
+       rxq->next_dd = (uint16_t)(rxq->next_dd & (rxq->nb_desc - 1));
+       rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+       return nb_pkts_recd;
+}
+
+/* vPMD receive routine
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ */
+uint16_t
+fm10k_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+               uint16_t nb_pkts)
+{
+       return fm10k_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
-- 
1.7.7.6

Reply via email to