From: Pavan Nikhilesh <pbhagavat...@marvell.com>

Update event vector transmit routine to honor elem_offset.
Use ``rte_event_vector::elem_offset`` to report partial
vector transmission to the application when there is not
enough space in the SQ.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
---
 drivers/event/cnxk/cn10k_worker.h | 145 ++++++++++++++++++------------
 1 file changed, 89 insertions(+), 56 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_worker.h 
b/drivers/event/cnxk/cn10k_worker.h
index 55d9e56766..23b5d7ca7d 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -534,12 +534,21 @@ cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq)
                ;
 }
 
-static __rte_always_inline void
+static __rte_always_inline int32_t
+cn10k_sso_sq_depth(const struct cn10k_eth_txq *txq)
+{
+       return (txq->nb_sqb_bufs_adj -
+               __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
+              << txq->sqes_per_sqb_log2;
+}
+
+static __rte_always_inline uint16_t
 cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
                 uint16_t lmt_id, uintptr_t lmt_addr, uint8_t sched_type,
                 const uint64_t *txq_data, const uint32_t flags)
 {
        uint8_t lnum = 0, loff = 0, shft = 0;
+       uint16_t ref_cnt = m->refcnt;
        struct cn10k_eth_txq *txq;
        uintptr_t laddr;
        uint16_t segdw;
@@ -547,6 +556,9 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf 
*m, uint64_t *cmd,
        bool sec;
 
        txq = cn10k_sso_hws_xtract_meta(m, txq_data);
+       if (cn10k_sso_sq_depth(txq) <= 0)
+               return 0;
+
        cn10k_nix_tx_skeleton(txq, cmd, flags, 0);
        /* Perform header writes before barrier
         * for TSO
@@ -584,55 +596,66 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct 
rte_mbuf *m, uint64_t *cmd,
 
        cn10k_sso_txq_fc_wait(txq);
        roc_lmt_submit_steorl(lmt_id, pa);
+
+       if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+               if (ref_cnt > 1)
+                       rte_io_wmb();
+       }
+       return 1;
 }
 
-static __rte_always_inline void
+static __rte_always_inline uint16_t
 cn10k_sso_vwqe_split_tx(struct cn10k_sso_hws *ws, struct rte_mbuf **mbufs,
-                       uint16_t nb_mbufs, uint64_t *cmd, uint16_t lmt_id,
-                       uintptr_t lmt_addr, uint8_t sched_type,
+                       uint16_t nb_mbufs, uint64_t *cmd,
                        const uint64_t *txq_data, const uint32_t flags)
 {
-       uint16_t port[4], queue[4];
-       uint16_t i, j, pkts, scalar;
+       uint16_t count = 0, port, queue, ret = 0, last_idx = 0;
        struct cn10k_eth_txq *txq;
+       int32_t space;
+       int i;
 
-       scalar = nb_mbufs & (NIX_DESCS_PER_LOOP - 1);
-       pkts = RTE_ALIGN_FLOOR(nb_mbufs, NIX_DESCS_PER_LOOP);
-
-       for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
-               port[0] = mbufs[i]->port;
-               port[1] = mbufs[i + 1]->port;
-               port[2] = mbufs[i + 2]->port;
-               port[3] = mbufs[i + 3]->port;
-
-               queue[0] = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
-               queue[1] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 1]);
-               queue[2] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 2]);
-               queue[3] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 3]);
-
-               if (((port[0] ^ port[1]) & (port[2] ^ port[3])) ||
-                   ((queue[0] ^ queue[1]) & (queue[2] ^ queue[3]))) {
-                       for (j = 0; j < 4; j++)
-                               cn10k_sso_tx_one(ws, mbufs[i + j], cmd, lmt_id,
-                                                lmt_addr, sched_type, txq_data,
-                                                flags);
-               } else {
-                       txq = (struct cn10k_eth_txq
-                                      *)(txq_data[(txq_data[port[0]] >> 48) +
-                                                  queue[0]] &
-                                         (BIT_ULL(48) - 1));
-                       cn10k_nix_xmit_pkts_vector(txq, (uint64_t *)ws,
-                                                  &mbufs[i], 4, cmd,
-                                                  flags | NIX_TX_VWQE_F);
+       port = mbufs[0]->port;
+       queue = rte_event_eth_tx_adapter_txq_get(mbufs[0]);
+       for (i = 0; i < nb_mbufs; i++) {
+               if (port != mbufs[i]->port ||
+                   queue != rte_event_eth_tx_adapter_txq_get(mbufs[i])) {
+                       if (count) {
+                               txq = (struct cn10k_eth_txq
+                                              *)(txq_data[(txq_data[port] >>
+                                                           48) +
+                                                          queue] &
+                                                 (BIT_ULL(48) - 1));
+                               /* Transmit based on queue depth */
+                               space = cn10k_sso_sq_depth(txq);
+                               if (space < count)
+                                       goto done;
+                               cn10k_nix_xmit_pkts_vector(
+                                       txq, (uint64_t *)ws, &mbufs[last_idx],
+                                       count, cmd, flags | NIX_TX_VWQE_F);
+                               ret += count;
+                               count = 0;
+                       }
+                       port = mbufs[i]->port;
+                       queue = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
+                       last_idx = i;
                }
+               count++;
        }
-
-       mbufs += i;
-
-       for (i = 0; i < scalar; i++) {
-               cn10k_sso_tx_one(ws, mbufs[i], cmd, lmt_id, lmt_addr,
-                                sched_type, txq_data, flags);
+       if (count) {
+               txq = (struct cn10k_eth_txq
+                              *)(txq_data[(txq_data[port] >> 48) + queue] &
+                                 (BIT_ULL(48) - 1));
+               /* Transmit based on queue depth */
+               space = cn10k_sso_sq_depth(txq);
+               if (space < count)
+                       goto done;
+               cn10k_nix_xmit_pkts_vector(txq, (uint64_t *)ws,
+                                          &mbufs[last_idx], count, cmd,
+                                          flags | NIX_TX_VWQE_F);
+               ret += count;
        }
+done:
+       return ret;
 }
 
 static __rte_always_inline uint16_t
@@ -651,7 +674,11 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct 
rte_event *ev,
        if (ev->event_type & RTE_EVENT_TYPE_VECTOR) {
                struct rte_mbuf **mbufs = ev->vec->mbufs;
                uint64_t meta = *(uint64_t *)ev->vec;
+               uint16_t offset, nb_pkts, left;
+               int32_t space;
 
+               nb_pkts = meta & 0xFFFF;
+               offset = (meta >> 16) & 0xFFF;
                if (meta & BIT(31)) {
                        txq = (struct cn10k_eth_txq
                                       *)(txq_data[(txq_data[meta >> 32] >>
@@ -659,29 +686,35 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct 
rte_event *ev,
                                                   (meta >> 48)] &
                                          (BIT_ULL(48) - 1));
 
-                       cn10k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, mbufs,
-                                                  meta & 0xFFFF, cmd,
+                       /* Transmit based on queue depth */
+                       space = cn10k_sso_sq_depth(txq);
+                       if (space <= 0)
+                               return 0;
+                       nb_pkts = nb_pkts < space ? nb_pkts : (uint16_t)space;
+                       cn10k_nix_xmit_pkts_vector(txq, (uint64_t *)ws,
+                                                  mbufs + offset, nb_pkts, cmd,
                                                   flags | NIX_TX_VWQE_F);
                } else {
-                       cn10k_sso_vwqe_split_tx(
-                               ws, mbufs, meta & 0xFFFF, cmd, lmt_id, lmt_addr,
-                               ev->sched_type, txq_data, flags);
+                       nb_pkts = cn10k_sso_vwqe_split_tx(ws, mbufs + offset,
+                                                         nb_pkts, cmd,
+                                                         txq_data, flags);
+               }
+               left = (meta & 0xFFFF) - nb_pkts;
+
+               if (!left) {
+                       rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec);
+               } else {
+                       *(uint64_t *)ev->vec =
+                               (meta & ~0xFFFFFFFUL) |
+                               (((uint32_t)nb_pkts + offset) << 16) | left;
                }
-               rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec);
                rte_prefetch0(ws);
-               return 1;
+               return !left;
        }
 
        m = ev->mbuf;
-       txq = cn10k_sso_hws_xtract_meta(m, txq_data);
-       if (((txq->nb_sqb_bufs_adj -
-             __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
-            << txq->sqes_per_sqb_log2) <= 0)
-               return 0;
-       cn10k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type, txq_data,
-                        flags);
-
-       return 1;
+       return cn10k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type,
+                               txq_data, flags);
 }
 
 #define T(name, sz, flags)                                                     
\
-- 
2.25.1

Reply via email to