As the variable mbuf_alloc_failed is operated by more than thread,
change it to type rte_atomic64_t and operated by rte_atomic64_xx()
function, this will avoid multithreading issue.

Signed-off-by: Mingxia Liu <mingxia....@intel.com>
---
 drivers/common/idpf/idpf_common_rxtx.c        | 10 ++++++----
 drivers/common/idpf/idpf_common_rxtx.h        |  2 +-
 drivers/common/idpf/idpf_common_rxtx_avx512.c | 12 ++++++++----
 drivers/net/idpf/idpf_ethdev.c                |  5 +++--
 4 files changed, 18 insertions(+), 11 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c 
b/drivers/common/idpf/idpf_common_rxtx.c
index cec99d2951..dd8e761834 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -592,7 +592,8 @@ idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
                        next_avail = 0;
                        rx_bufq->nb_rx_hold -= delta;
                } else {
-                       rx_bufq->rx_stats.mbuf_alloc_failed += nb_desc - 
next_avail;
+                       rte_atomic64_add(&(rx_bufq->rx_stats.mbuf_alloc_failed),
+                                        nb_desc - next_avail);
                        RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u 
queue_id=%u",
                               rx_bufq->port_id, rx_bufq->queue_id);
                        return;
@@ -611,7 +612,8 @@ idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
                        next_avail += nb_refill;
                        rx_bufq->nb_rx_hold -= nb_refill;
                } else {
-                       rx_bufq->rx_stats.mbuf_alloc_failed += nb_desc - 
next_avail;
+                       rte_atomic64_add(&(rx_bufq->rx_stats.mbuf_alloc_failed),
+                                        nb_desc - next_avail);
                        RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u 
queue_id=%u",
                               rx_bufq->port_id, rx_bufq->queue_id);
                }
@@ -1088,7 +1090,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
 
                nmb = rte_mbuf_raw_alloc(rxq->mp);
                if (unlikely(nmb == NULL)) {
-                       rxq->rx_stats.mbuf_alloc_failed++;
+                       rte_atomic64_inc(&(rxq->rx_stats.mbuf_alloc_failed));
                        RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
                               "queue_id=%u", rxq->port_id, rxq->queue_id);
                        break;
@@ -1197,7 +1199,7 @@ idpf_singleq_recv_scatter_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
 
                nmb = rte_mbuf_raw_alloc(rxq->mp);
                if (unlikely(!nmb)) {
-                       rxq->rx_stats.mbuf_alloc_failed++;
+                       rte_atomic64_inc(&(rxq->rx_stats.mbuf_alloc_failed));
                        RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
                               "queue_id=%u", rxq->port_id, rxq->queue_id);
                        break;
diff --git a/drivers/common/idpf/idpf_common_rxtx.h 
b/drivers/common/idpf/idpf_common_rxtx.h
index eee9fdbd9e..0209750187 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -91,7 +91,7 @@
 #define PF_GLTSYN_SHTIME_H_5   (PF_TIMESYNC_BAR4_BASE + 0x13C)
 
 struct idpf_rx_stats {
-       uint64_t mbuf_alloc_failed;
+       rte_atomic64_t mbuf_alloc_failed;
 };
 
 struct idpf_rx_queue {
diff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c 
b/drivers/common/idpf/idpf_common_rxtx_avx512.c
index 5a91ed610e..1fc110cc94 100644
--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c
@@ -38,7 +38,8 @@ idpf_singleq_rearm_common(struct idpf_rx_queue *rxq)
                                                dma_addr0);
                        }
                }
-               rxq->rx_stats.mbuf_alloc_failed += IDPF_RXQ_REARM_THRESH;
+               rte_atomic64_add(&(rxq->rx_stats.mbuf_alloc_failed),
+                                IDPF_RXQ_REARM_THRESH);
                return;
        }
        struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
@@ -167,7 +168,8 @@ idpf_singleq_rearm(struct idpf_rx_queue *rxq)
                                                         dma_addr0);
                                }
                        }
-                       rxq->rx_stats.mbuf_alloc_failed += 
IDPF_RXQ_REARM_THRESH;
+                       rte_atomic64_add(&(rxq->rx_stats.mbuf_alloc_failed),
+                                        IDPF_RXQ_REARM_THRESH);
                        return;
                }
        }
@@ -562,7 +564,8 @@ idpf_splitq_rearm_common(struct idpf_rx_queue *rx_bufq)
                                                dma_addr0);
                        }
                }
-               rx_bufq->rx_stats.mbuf_alloc_failed += IDPF_RXQ_REARM_THRESH;
+               rte_atomic64_add(&(rx_bufq->rx_stats.mbuf_alloc_failed),
+                                IDPF_RXQ_REARM_THRESH);
                return;
        }
 
@@ -635,7 +638,8 @@ idpf_splitq_rearm(struct idpf_rx_queue *rx_bufq)
                                                         dma_addr0);
                                }
                        }
-                       rx_bufq->rx_stats.mbuf_alloc_failed += 
IDPF_RXQ_REARM_THRESH;
+                       rte_atomic64_add(&(rx_bufq->rx_stats.mbuf_alloc_failed),
+                                        IDPF_RXQ_REARM_THRESH);
                        return;
                }
        }
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 97c03118e0..1a7dab1844 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -256,7 +256,8 @@ idpf_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
-               mbuf_alloc_failed += rxq->rx_stats.mbuf_alloc_failed;
+               mbuf_alloc_failed +=
+                   rte_atomic64_read(&(rxq->rx_stats.mbuf_alloc_failed));
        }
 
        return mbuf_alloc_failed;
@@ -303,7 +304,7 @@ idpf_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
-               rxq->rx_stats.mbuf_alloc_failed = 0;
+               rte_atomic64_set(&(rxq->rx_stats.mbuf_alloc_failed), 0);
        }
 }
 
-- 
2.25.1

Reply via email to