vPMD will check 4 descriptors in one time, but the statuses are not consistent
because the memory allocated for RX descriptors is cacheable huagepage.
This patch is to calculate the number of received packets by scanning DD bit
sequentially, and stops when meeting the first packet with DD bit unset.

Signed-off-by: Jianbo Liu <jianbo....@linaro.org>
---
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index f96cc85..0b1338d 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -196,7 +196,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
        struct ixgbe_rx_entry *sw_ring;
        uint16_t nb_pkts_recd;
        int pos;
-       uint64_t var;
        uint8x16_t shuf_msk = {
                0xFF, 0xFF,
                0xFF, 0xFF,  /* skip 32 bits pkt_type */
@@ -255,6 +254,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
                uint64x2_t mbp1, mbp2;
                uint8x16_t staterr;
                uint16x8_t tmp;
+               uint32_t var = 0;
                uint32_t stat;
 
                /* B.1 load 1 mbuf point */
@@ -349,11 +349,19 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
                vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
                         pkt_mb1);
 
+               stat &= IXGBE_VPMD_DESC_DD_MASK;
+
                /* C.4 calc avaialbe number of desc */
-               var =  __builtin_popcount(stat & IXGBE_VPMD_DESC_DD_MASK);
-               nb_pkts_recd += var;
-               if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
+               if (likely(var != IXGBE_VPMD_DESC_DD_MASK)) {
+                       while (stat & 0x01) {
+                               ++var;
+                               stat = stat >> 8;
+                       }
+                       nb_pkts_recd += var;
                        break;
+               } else {
+                       nb_pkts_recd += RTE_IXGBE_DESCS_PER_LOOP;
+               }
        }
 
        /* Update our internal tail pointer */
-- 
2.4.11

Reply via email to