From: Beilei Xing <beilei.x...@intel.com>

Move some static inline functions to header file.

Signed-off-by: Beilei Xing <beilei.x...@intel.com>
---
 drivers/common/idpf/idpf_common_rxtx.c | 246 -------------------------
 drivers/common/idpf/idpf_common_rxtx.h | 246 +++++++++++++++++++++++++
 drivers/common/idpf/version.map        |   3 +
 3 files changed, 249 insertions(+), 246 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c 
b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..50465e76ea 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -442,188 +442,6 @@ idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
        return 0;
 }
 
-#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000
-/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
-static inline uint64_t
-idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
-                           uint32_t in_timestamp)
-{
-#ifdef RTE_ARCH_X86_64
-       struct idpf_hw *hw = &ad->hw;
-       const uint64_t mask = 0xFFFFFFFF;
-       uint32_t hi, lo, lo2, delta;
-       uint64_t ns;
-
-       if (flag != 0) {
-               IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, 
PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
-               IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, 
PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |
-                              PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
-               lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-               hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
-               /*
-                * On typical system, the delta between lo and lo2 is ~1000ns,
-                * so 10000 seems a large-enough but not overly-big guard band.
-                */
-               if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
-                       lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-               else
-                       lo2 = lo;
-
-               if (lo2 < lo) {
-                       lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-                       hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
-               }
-
-               ad->time_hw = ((uint64_t)hi << 32) | lo;
-       }
-
-       delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
-       if (delta > (mask / 2)) {
-               delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
-               ns = ad->time_hw - delta;
-       } else {
-               ns = ad->time_hw + delta;
-       }
-
-       return ns;
-#else /* !RTE_ARCH_X86_64 */
-       RTE_SET_USED(ad);
-       RTE_SET_USED(flag);
-       RTE_SET_USED(in_timestamp);
-       return 0;
-#endif /* RTE_ARCH_X86_64 */
-}
-
-#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S                           \
-       (RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) |     \
-        RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) |     \
-        RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |    \
-        RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))
-
-static inline uint64_t
-idpf_splitq_rx_csum_offload(uint8_t err)
-{
-       uint64_t flags = 0;
-
-       if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))
-               return flags;
-
-       if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {
-               flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
-                         RTE_MBUF_F_RX_L4_CKSUM_GOOD);
-               return flags;
-       }
-
-       if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))
-               flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
-       else
-               flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
-
-       if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))
-               flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
-       else
-               flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
-
-       if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))
-               flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
-
-       if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))
-               flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
-       else
-               flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
-
-       return flags;
-}
-
-#define IDPF_RX_FLEX_DESC_ADV_HASH1_S  0
-#define IDPF_RX_FLEX_DESC_ADV_HASH2_S  16
-#define IDPF_RX_FLEX_DESC_ADV_HASH3_S  24
-
-static inline uint64_t
-idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,
-                          volatile struct virtchnl2_rx_flex_desc_adv_nic_3 
*rx_desc)
-{
-       uint8_t status_err0_qw0;
-       uint64_t flags = 0;
-
-       status_err0_qw0 = rx_desc->status_err0_qw0;
-
-       if ((status_err0_qw0 & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {
-               flags |= RTE_MBUF_F_RX_RSS_HASH;
-               mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<
-                               IDPF_RX_FLEX_DESC_ADV_HASH1_S) |
-                       ((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<
-                        IDPF_RX_FLEX_DESC_ADV_HASH2_S) |
-                       ((uint32_t)(rx_desc->hash3) <<
-                        IDPF_RX_FLEX_DESC_ADV_HASH3_S);
-       }
-
-       return flags;
-}
-
-static void
-idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
-{
-       volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;
-       volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;
-       uint16_t nb_refill = rx_bufq->rx_free_thresh;
-       uint16_t nb_desc = rx_bufq->nb_rx_desc;
-       uint16_t next_avail = rx_bufq->rx_tail;
-       struct rte_mbuf *nmb[rx_bufq->rx_free_thresh];
-       uint64_t dma_addr;
-       uint16_t delta;
-       int i;
-
-       if (rx_bufq->nb_rx_hold < rx_bufq->rx_free_thresh)
-               return;
-
-       rx_buf_ring = rx_bufq->rx_ring;
-       delta = nb_desc - next_avail;
-       if (unlikely(delta < nb_refill)) {
-               if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta) == 
0)) {
-                       for (i = 0; i < delta; i++) {
-                               rx_buf_desc = &rx_buf_ring[next_avail + i];
-                               rx_bufq->sw_ring[next_avail + i] = nmb[i];
-                               dma_addr = 
rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
-                               rx_buf_desc->hdr_addr = 0;
-                               rx_buf_desc->pkt_addr = dma_addr;
-                       }
-                       nb_refill -= delta;
-                       next_avail = 0;
-                       rx_bufq->nb_rx_hold -= delta;
-               } else {
-                       __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
-                                          nb_desc - next_avail, 
__ATOMIC_RELAXED);
-                       RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u 
queue_id=%u",
-                              rx_bufq->port_id, rx_bufq->queue_id);
-                       return;
-               }
-       }
-
-       if (nb_desc - next_avail >= nb_refill) {
-               if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill) 
== 0)) {
-                       for (i = 0; i < nb_refill; i++) {
-                               rx_buf_desc = &rx_buf_ring[next_avail + i];
-                               rx_bufq->sw_ring[next_avail + i] = nmb[i];
-                               dma_addr = 
rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
-                               rx_buf_desc->hdr_addr = 0;
-                               rx_buf_desc->pkt_addr = dma_addr;
-                       }
-                       next_avail += nb_refill;
-                       rx_bufq->nb_rx_hold -= nb_refill;
-               } else {
-                       __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
-                                          nb_desc - next_avail, 
__ATOMIC_RELAXED);
-                       RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u 
queue_id=%u",
-                              rx_bufq->port_id, rx_bufq->queue_id);
-               }
-       }
-
-       IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);
-
-       rx_bufq->rx_tail = next_avail;
-}
-
 uint16_t
 idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
@@ -749,70 +567,6 @@ idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
        return nb_rx;
 }
 
-static inline void
-idpf_split_tx_free(struct idpf_tx_queue *cq)
-{
-       volatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;
-       volatile struct idpf_splitq_tx_compl_desc *txd;
-       uint16_t next = cq->tx_tail;
-       struct idpf_tx_entry *txe;
-       struct idpf_tx_queue *txq;
-       uint16_t gen, qid, q_head;
-       uint16_t nb_desc_clean;
-       uint8_t ctype;
-
-       txd = &compl_ring[next];
-       gen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
-              IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
-       if (gen != cq->expected_gen_id)
-               return;
-
-       ctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
-                IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> IDPF_TXD_COMPLQ_COMPL_TYPE_S;
-       qid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
-              IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
-       q_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);
-       txq = cq->txqs[qid - cq->tx_start_qid];
-
-       switch (ctype) {
-       case IDPF_TXD_COMPLT_RE:
-               /* clean to q_head which indicates be fetched txq desc id + 1.
-                * TODO: need to refine and remove the if condition.
-                */
-               if (unlikely(q_head % 32)) {
-                       TX_LOG(ERR, "unexpected desc (head = %u) completion.",
-                              q_head);
-                       return;
-               }
-               if (txq->last_desc_cleaned > q_head)
-                       nb_desc_clean = (txq->nb_tx_desc - 
txq->last_desc_cleaned) +
-                               q_head;
-               else
-                       nb_desc_clean = q_head - txq->last_desc_cleaned;
-               txq->nb_free += nb_desc_clean;
-               txq->last_desc_cleaned = q_head;
-               break;
-       case IDPF_TXD_COMPLT_RS:
-               /* q_head indicates sw_id when ctype is 2 */
-               txe = &txq->sw_ring[q_head];
-               if (txe->mbuf != NULL) {
-                       rte_pktmbuf_free_seg(txe->mbuf);
-                       txe->mbuf = NULL;
-               }
-               break;
-       default:
-               TX_LOG(ERR, "unknown completion type.");
-               return;
-       }
-
-       if (++next == cq->nb_tx_desc) {
-               next = 0;
-               cq->expected_gen_id ^= 1;
-       }
-
-       cq->tx_tail = next;
-}
-
 /* Check if the context descriptor is needed for TX offloading */
 static inline uint16_t
 idpf_calc_context_desc(uint64_t flags)
diff --git a/drivers/common/idpf/idpf_common_rxtx.h 
b/drivers/common/idpf/idpf_common_rxtx.h
index 6cb83fc0a6..a53335616a 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -229,6 +229,252 @@ struct idpf_txq_ops {
 extern int idpf_timestamp_dynfield_offset;
 extern uint64_t idpf_timestamp_dynflag;
 
+static inline void
+idpf_split_tx_free(struct idpf_tx_queue *cq)
+{
+       volatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;
+       volatile struct idpf_splitq_tx_compl_desc *txd;
+       uint16_t next = cq->tx_tail;
+       struct idpf_tx_entry *txe;
+       struct idpf_tx_queue *txq;
+       uint16_t gen, qid, q_head;
+       uint16_t nb_desc_clean;
+       uint8_t ctype;
+
+       txd = &compl_ring[next];
+       gen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+              IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
+       if (gen != cq->expected_gen_id)
+               return;
+
+       ctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+                IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> IDPF_TXD_COMPLQ_COMPL_TYPE_S;
+       qid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+              IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
+       q_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);
+       txq = cq->txqs[qid - cq->tx_start_qid];
+
+       switch (ctype) {
+       case IDPF_TXD_COMPLT_RE:
+               /* clean to q_head which indicates be fetched txq desc id + 1.
+                * TODO: need to refine and remove the if condition.
+                */
+               if (unlikely(q_head % 32)) {
+                       TX_LOG(ERR, "unexpected desc (head = %u) completion.",
+                              q_head);
+                       return;
+               }
+               if (txq->last_desc_cleaned > q_head)
+                       nb_desc_clean = (txq->nb_tx_desc - 
txq->last_desc_cleaned) +
+                               q_head;
+               else
+                       nb_desc_clean = q_head - txq->last_desc_cleaned;
+               txq->nb_free += nb_desc_clean;
+               txq->last_desc_cleaned = q_head;
+               break;
+       case IDPF_TXD_COMPLT_RS:
+               /* q_head indicates sw_id when ctype is 2 */
+               txe = &txq->sw_ring[q_head];
+               if (txe->mbuf != NULL) {
+                       rte_pktmbuf_free_seg(txe->mbuf);
+                       txe->mbuf = NULL;
+               }
+               break;
+       default:
+               TX_LOG(ERR, "unknown completion type.");
+               return;
+       }
+
+       if (++next == cq->nb_tx_desc) {
+               next = 0;
+               cq->expected_gen_id ^= 1;
+       }
+
+       cq->tx_tail = next;
+}
+
+#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S                           \
+       (RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) |     \
+        RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) |     \
+        RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |    \
+        RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))
+
+static inline uint64_t
+idpf_splitq_rx_csum_offload(uint8_t err)
+{
+       uint64_t flags = 0;
+
+       if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))
+               return flags;
+
+       if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {
+               flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+                         RTE_MBUF_F_RX_L4_CKSUM_GOOD);
+               return flags;
+       }
+
+       if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))
+               flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+       else
+               flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+
+       if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))
+               flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+       else
+               flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+
+       if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))
+               flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+
+       if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))
+               flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
+       else
+               flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
+
+       return flags;
+}
+
+#define IDPF_RX_FLEX_DESC_ADV_HASH1_S  0
+#define IDPF_RX_FLEX_DESC_ADV_HASH2_S  16
+#define IDPF_RX_FLEX_DESC_ADV_HASH3_S  24
+
+static inline uint64_t
+idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,
+                          volatile struct virtchnl2_rx_flex_desc_adv_nic_3 
*rx_desc)
+{
+       uint8_t status_err0_qw0;
+       uint64_t flags = 0;
+
+       status_err0_qw0 = rx_desc->status_err0_qw0;
+
+       if ((status_err0_qw0 & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {
+               flags |= RTE_MBUF_F_RX_RSS_HASH;
+               mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<
+                               IDPF_RX_FLEX_DESC_ADV_HASH1_S) |
+                       ((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<
+                        IDPF_RX_FLEX_DESC_ADV_HASH2_S) |
+                       ((uint32_t)(rx_desc->hash3) <<
+                        IDPF_RX_FLEX_DESC_ADV_HASH3_S);
+       }
+
+       return flags;
+}
+
+#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000
+/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
+static inline uint64_t
+idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
+                           uint32_t in_timestamp)
+{
+#ifdef RTE_ARCH_X86_64
+       struct idpf_hw *hw = &ad->hw;
+       const uint64_t mask = 0xFFFFFFFF;
+       uint32_t hi, lo, lo2, delta;
+       uint64_t ns;
+
+       if (flag != 0) {
+               IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, 
PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
+               IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, 
PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |
+                              PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
+               lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+               hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
+               /*
+                * On typical system, the delta between lo and lo2 is ~1000ns,
+                * so 10000 seems a large-enough but not overly-big guard band.
+                */
+               if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
+                       lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+               else
+                       lo2 = lo;
+
+               if (lo2 < lo) {
+                       lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+                       hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
+               }
+
+               ad->time_hw = ((uint64_t)hi << 32) | lo;
+       }
+
+       delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
+       if (delta > (mask / 2)) {
+               delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
+               ns = ad->time_hw - delta;
+       } else {
+               ns = ad->time_hw + delta;
+       }
+
+       return ns;
+#else /* !RTE_ARCH_X86_64 */
+       RTE_SET_USED(ad);
+       RTE_SET_USED(flag);
+       RTE_SET_USED(in_timestamp);
+       return 0;
+#endif /* RTE_ARCH_X86_64 */
+}
+
+static inline void
+idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
+{
+       volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;
+       volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;
+       uint16_t nb_refill = rx_bufq->rx_free_thresh;
+       uint16_t nb_desc = rx_bufq->nb_rx_desc;
+       uint16_t next_avail = rx_bufq->rx_tail;
+       struct rte_mbuf *nmb[rx_bufq->rx_free_thresh];
+       uint64_t dma_addr;
+       uint16_t delta;
+       int i;
+
+       if (rx_bufq->nb_rx_hold < rx_bufq->rx_free_thresh)
+               return;
+
+       rx_buf_ring = rx_bufq->rx_ring;
+       delta = nb_desc - next_avail;
+       if (unlikely(delta < nb_refill)) {
+               if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta) == 
0)) {
+                       for (i = 0; i < delta; i++) {
+                               rx_buf_desc = &rx_buf_ring[next_avail + i];
+                               rx_bufq->sw_ring[next_avail + i] = nmb[i];
+                               dma_addr = 
rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+                               rx_buf_desc->hdr_addr = 0;
+                               rx_buf_desc->pkt_addr = dma_addr;
+                       }
+                       nb_refill -= delta;
+                       next_avail = 0;
+                       rx_bufq->nb_rx_hold -= delta;
+               } else {
+                       __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
+                                          nb_desc - next_avail, 
__ATOMIC_RELAXED);
+                       RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u 
queue_id=%u",
+                              rx_bufq->port_id, rx_bufq->queue_id);
+                       return;
+               }
+       }
+
+       if (nb_desc - next_avail >= nb_refill) {
+               if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill) 
== 0)) {
+                       for (i = 0; i < nb_refill; i++) {
+                               rx_buf_desc = &rx_buf_ring[next_avail + i];
+                               rx_bufq->sw_ring[next_avail + i] = nmb[i];
+                               dma_addr = 
rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+                               rx_buf_desc->hdr_addr = 0;
+                               rx_buf_desc->pkt_addr = dma_addr;
+                       }
+                       next_avail += nb_refill;
+                       rx_bufq->nb_rx_hold -= nb_refill;
+               } else {
+                       __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
+                                          nb_desc - next_avail, 
__ATOMIC_RELAXED);
+                       RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u 
queue_id=%u",
+                              rx_bufq->port_id, rx_bufq->queue_id);
+               }
+       }
+
+       IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);
+
+       rx_bufq->rx_tail = next_avail;
+}
+
 __rte_internal
 int idpf_qc_rx_thresh_check(uint16_t nb_desc, uint16_t thresh);
 __rte_internal
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 0729f6b912..8a637b3a0d 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -74,5 +74,8 @@ INTERNAL {
        idpf_vport_rss_config;
        idpf_vport_stats_update;
 
+       idpf_timestamp_dynfield_offset;
+       idpf_timestamp_dynflag;
+
        local: *;
 };
-- 
2.34.1

Reply via email to