From: Pavan Nikhilesh <pbhagavat...@marvell.com>

Remove recalculating SQB thresholds in Tx queue buffer adjustment.
The adjustment is already done during Tx queue setup.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
---
Depends-on: series-27660

 drivers/event/cnxk/cn10k_eventdev.c  |  9 +--------
 drivers/event/cnxk/cn10k_tx_worker.h |  6 +++---
 drivers/event/cnxk/cn9k_eventdev.c   |  9 +--------
 drivers/event/cnxk/cn9k_worker.h     | 12 +++++++++---
 drivers/net/cnxk/cn10k_tx.h          | 12 ++++++------
 drivers/net/cnxk/cn9k_tx.h           |  5 +++--
 6 files changed, 23 insertions(+), 30 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c 
b/drivers/event/cnxk/cn10k_eventdev.c
index 89f32c4d1e..f7c6a83ff0 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -840,16 +840,9 @@ cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, 
int32_t tx_queue_id)
                sq = &cnxk_eth_dev->sqs[tx_queue_id];
                txq = eth_dev->data->tx_queues[tx_queue_id];
                sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
-               sq->nb_sqb_bufs_adj =
-                       sq->nb_sqb_bufs -
-                       RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
-                               sqes_per_sqb;
                if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
-                       sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
-                                               (sqes_per_sqb - 1));
+                       sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc / 
sqes_per_sqb);
                txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
-               txq->nb_sqb_bufs_adj =
-                       ((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 
100;
        }
 }

diff --git a/drivers/event/cnxk/cn10k_tx_worker.h 
b/drivers/event/cnxk/cn10k_tx_worker.h
index c18786a14c..7b2798ad2e 100644
--- a/drivers/event/cnxk/cn10k_tx_worker.h
+++ b/drivers/event/cnxk/cn10k_tx_worker.h
@@ -32,9 +32,9 @@ cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq)
 static __rte_always_inline int32_t
 cn10k_sso_sq_depth(const struct cn10k_eth_txq *txq)
 {
-       return (txq->nb_sqb_bufs_adj -
-               __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
-              << txq->sqes_per_sqb_log2;
+       int32_t avail = (int32_t)txq->nb_sqb_bufs_adj -
+                       (int32_t)__atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+       return (avail << txq->sqes_per_sqb_log2) - avail;
 }

 static __rte_always_inline uint16_t
diff --git a/drivers/event/cnxk/cn9k_eventdev.c 
b/drivers/event/cnxk/cn9k_eventdev.c
index df23219f14..a9d603c22f 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -893,16 +893,9 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, 
int32_t tx_queue_id)
                sq = &cnxk_eth_dev->sqs[tx_queue_id];
                txq = eth_dev->data->tx_queues[tx_queue_id];
                sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
-               sq->nb_sqb_bufs_adj =
-                       sq->nb_sqb_bufs -
-                       RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
-                               sqes_per_sqb;
                if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
-                       sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
-                                               (sqes_per_sqb - 1));
+                       sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc / 
sqes_per_sqb);
                txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
-               txq->nb_sqb_bufs_adj =
-                       ((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 
100;
        }
 }

diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 988cb3acb6..d15dd309fe 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -711,6 +711,14 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, 
uint64_t base,
 }
 #endif

+static __rte_always_inline int32_t
+cn9k_sso_sq_depth(const struct cn9k_eth_txq *txq)
+{
+       int32_t avail = (int32_t)txq->nb_sqb_bufs_adj -
+                       (int32_t)__atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+       return (avail << txq->sqes_per_sqb_log2) - avail;
+}
+
 static __rte_always_inline uint16_t
 cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
                      uint64_t *txq_data, const uint32_t flags)
@@ -734,9 +742,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, 
uint64_t *cmd,
        if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
                handle_tx_completion_pkts(txq, 1, 1);

-       if (((txq->nb_sqb_bufs_adj -
-             __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
-            << txq->sqes_per_sqb_log2) <= 0)
+       if (cn9k_sso_sq_depth(txq) <= 0)
                return 0;
        cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
        cn9k_nix_xmit_prepare(txq, m, cmd, flags, txq->lso_tun_fmt, 
txq->mark_flag,
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index c9ec01cd9d..bab08a2d3b 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -35,12 +35,13 @@

 #define NIX_XMIT_FC_OR_RETURN(txq, pkts)                                       
\
        do {                                                                   \
+               int64_t avail;                                                 \
                /* Cached value is low, Update the fc_cache_pkts */            \
                if (unlikely((txq)->fc_cache_pkts < (pkts))) {                 \
+                       avail = txq->nb_sqb_bufs_adj - *txq->fc_mem;           \
                        /* Multiply with sqe_per_sqb to express in pkts */     \
                        (txq)->fc_cache_pkts =                                 \
-                               ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem)      \
-                               << (txq)->sqes_per_sqb_log2;                   \
+                               (avail << (txq)->sqes_per_sqb_log2) - avail;   \
                        /* Check it again for the room */                      \
                        if (unlikely((txq)->fc_cache_pkts < (pkts)))           \
                                return 0;                                      \
@@ -113,10 +114,9 @@ cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t 
req)
        if (cached < 0) {
                /* Check if we have space else retry. */
                do {
-                       refill =
-                               (txq->nb_sqb_bufs_adj -
-                                __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
-                               << txq->sqes_per_sqb_log2;
+                       refill = txq->nb_sqb_bufs_adj -
+                                __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+                       refill = (refill << txq->sqes_per_sqb_log2) - refill;
                } while (refill <= 0);
                __atomic_compare_exchange(&txq->fc_cache_pkts, &cached, &refill,
                                          0, __ATOMIC_RELEASE,
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index e956c1ad2a..8efb75b505 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -32,12 +32,13 @@

 #define NIX_XMIT_FC_OR_RETURN(txq, pkts)                                       
\
        do {                                                                   \
+               int64_t avail;                                                 \
                /* Cached value is low, Update the fc_cache_pkts */            \
                if (unlikely((txq)->fc_cache_pkts < (pkts))) {                 \
+                       avail = txq->nb_sqb_bufs_adj - *txq->fc_mem;           \
                        /* Multiply with sqe_per_sqb to express in pkts */     \
                        (txq)->fc_cache_pkts =                                 \
-                               ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem)      \
-                               << (txq)->sqes_per_sqb_log2;                   \
+                               (avail << (txq)->sqes_per_sqb_log2) - avail;   \
                        /* Check it again for the room */                      \
                        if (unlikely((txq)->fc_cache_pkts < (pkts)))           \
                                return 0;                                      \
--
2.39.1

Reply via email to