Optimized kni_allocate_mbufs and kni_free_mbufs by using mbuf bulk
functions. This can improve performance more than two times.

Signed-off-by: Sergey Vyazmitinov <s.vyazmiti...@brain4net.com>
---
 lib/librte_kni/rte_kni.c         | 44 +++++++++++++++++-----------------------
 lib/librte_kni/rte_kni_fifo.h    | 18 ++++++++++++++++
 lib/librte_mbuf/rte_mbuf.h       | 32 +++++++++++++++++++++++++++++
 lib/librte_mempool/rte_mempool.h |  6 ++++++
 4 files changed, 75 insertions(+), 25 deletions(-)

diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index a80cefd..cb4cfa6 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -590,22 +590,21 @@ rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf 
**mbufs, unsigned num)
 static void
 kni_free_mbufs(struct rte_kni *kni)
 {
-       int i, ret;
+       unsigned freeing;
        struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
 
-       ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
-       if (likely(ret > 0)) {
-               for (i = 0; i < ret; i++)
-                       rte_pktmbuf_free(pkts[i]);
+       freeing = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
+       if (likely(freeing > 0)) {
+               rte_pktmbuf_free_bulk(kni->pktmbuf_pool, pkts, freeing);
        }
 }
 
 static void
 kni_allocate_mbufs(struct rte_kni *kni)
 {
-       int i, ret;
-       struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
-       void *phys[MAX_MBUF_BURST_NUM];
+       unsigned count, allocated, put;
+       struct rte_mbuf *pkts[KNI_FIFO_COUNT_MAX];
+       void *phys[KNI_FIFO_COUNT_MAX];
 
        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
                         offsetof(struct rte_kni_mbuf, pool));
@@ -628,28 +627,23 @@ kni_allocate_mbufs(struct rte_kni *kni)
                return;
        }
 
-       for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
-               pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
-               if (unlikely(pkts[i] == NULL)) {
-                       /* Out of memory */
-                       RTE_LOG(ERR, KNI, "Out of memory\n");
-                       break;
-               }
-               phys[i] = va2pa(pkts[i]);
-       }
+       /* Calculate alloc queue free space */
+       count = kni_fifo_free_count(kni->alloc_q);
 
-       /* No pkt mbuf alocated */
-       if (i <= 0)
-               return;
+       /* Get buffers from mempool */
+       allocated = rte_pktmbuf_alloc_bulk(kni->pktmbuf_pool, pkts, count);
+       for (unsigned i = 0; i < allocated; i++)
+               phys[i] = va2pa(pkts[i]);
 
-       ret = kni_fifo_put(kni->alloc_q, phys, i);
+       /* Put buffers into alloc queue */
+       put = kni_fifo_put(kni->alloc_q, (void **)phys, allocated);
 
        /* Check if any mbufs not put into alloc_q, and then free them */
-       if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
-               int j;
-
-               for (j = ret; j < i; j++)
+       if (unlikely(put < allocated)) {
+               for (unsigned j = put; j < allocated; j++) {
+                       RTE_LOG(ERR, KNI, "Free allocated buffer\n");
                        rte_pktmbuf_free(pkts[j]);
+               }
        }
 }
 
diff --git a/lib/librte_kni/rte_kni_fifo.h b/lib/librte_kni/rte_kni_fifo.h
index 8cb8587..361ddb0 100644
--- a/lib/librte_kni/rte_kni_fifo.h
+++ b/lib/librte_kni/rte_kni_fifo.h
@@ -91,3 +91,21 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, 
unsigned num)
        fifo->read = new_read;
        return i;
 }
+
+/**
+ * Get the num of elements in the fifo
+ */
+static inline unsigned
+kni_fifo_count(struct rte_kni_fifo *fifo)
+{
+       return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
+}
+
+/**
+ * Get the num of available elements in the fifo
+ */
+static inline unsigned
+kni_fifo_free_count(struct rte_kni_fifo *fifo)
+{
+       return (fifo->read - fifo->write - 1) & (fifo->len - 1);
+}
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 4476d75..707c300 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -1261,6 +1261,38 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
 }
 
 /**
+ * Free n packets mbuf back into its original mempool.
+ *
+ * Free each mbuf, and all its segments in case of chained buffers. Each
+ * segment is added back into its original mempool.
+ *
+ * @param mp
+ *   The packets mempool.
+ * @param mbufs
+ *   The packets mbufs array to be freed.
+ * @param n
+ *   Number of packets.
+ */
+static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
+               struct rte_mbuf **mbufs, unsigned n)
+{
+       struct rte_mbuf *mbuf, *m_next;
+       unsigned i;
+       for (i = 0; i < n; ++i) {
+               mbuf = mbufs[i];
+               __rte_mbuf_sanity_check(mbuf, 1);
+
+               mbuf = mbuf->next;
+               while (mbuf != NULL) {
+                       m_next = mbuf->next;
+                       rte_pktmbuf_free_seg(mbuf);
+                       mbuf = m_next;
+               }
+       }
+       rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
+}
+
+/**
  * Creates a "clone" of the given packet mbuf.
  *
  * Walks through all segments of the given packet mbuf, and for each of them:
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index d315d42..e612a0a 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -1497,6 +1497,12 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p)
        return rte_mempool_get_bulk(mp, obj_p, 1);
 }
 
+static inline int __attribute__((always_inline))
+rte_mempool_get_n(struct rte_mempool *mp, void **obj_p, int n)
+{
+       return rte_mempool_get_bulk(mp, obj_p, n);
+}
+
 /**
  * Return the number of entries in the mempool.
  *
-- 
2.7.4

Reply via email to