Optimized kni_allocate_mbufs and kni_free_mbufs by using mbuf bulk functions. This can improve performance more than two times.
Signed-off-by: Sergey Vyazmitinov <s.vyazmiti...@brain4net.com> --- v5: * use rte_pktmbuf_free_bulk for removing packets which was not put in alloc queue. v6: * fix c99 compilation error --- lib/librte_kni/rte_kni.c | 47 ++++++++++++++++++++----------------------- lib/librte_kni/rte_kni_fifo.h | 18 +++++++++++++++++ 2 files changed, 40 insertions(+), 25 deletions(-) diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c index a80cefd..6591f4f 100644 --- a/lib/librte_kni/rte_kni.c +++ b/lib/librte_kni/rte_kni.c @@ -590,22 +590,21 @@ rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) static void kni_free_mbufs(struct rte_kni *kni) { - int i, ret; + unsigned int freeing; struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM]; - ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM); - if (likely(ret > 0)) { - for (i = 0; i < ret; i++) - rte_pktmbuf_free(pkts[i]); + freeing = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM); + if (likely(freeing > 0)) { + rte_pktmbuf_free_bulk(pkts, freeing); } } static void kni_allocate_mbufs(struct rte_kni *kni) { - int i, ret; - struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM]; - void *phys[MAX_MBUF_BURST_NUM]; + unsigned int count, put, i; + struct rte_mbuf *pkts[KNI_FIFO_COUNT_MAX]; + void *phys[KNI_FIFO_COUNT_MAX]; RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) != offsetof(struct rte_kni_mbuf, pool)); @@ -628,28 +627,26 @@ kni_allocate_mbufs(struct rte_kni *kni) return; } - for (i = 0; i < MAX_MBUF_BURST_NUM; i++) { - pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool); - if (unlikely(pkts[i] == NULL)) { - /* Out of memory */ - RTE_LOG(ERR, KNI, "Out of memory\n"); - break; - } - phys[i] = va2pa(pkts[i]); - } + /* Calculate alloc queue free space */ + count = kni_fifo_free_count(kni->alloc_q); - /* No pkt mbuf alocated */ - if (i <= 0) + /* Get buffers from mempool */ + if (rte_pktmbuf_alloc_bulk(kni->pktmbuf_pool, pkts, count) != 0) { + RTE_LOG(ERR, KNI, "Can`t allocate %d mbufs\n", count); return; + } + + for (i = 0; i < count; i++) + phys[i] = va2pa(pkts[i]); - ret = kni_fifo_put(kni->alloc_q, phys, i); + /* Put buffers into alloc queue */ + put = kni_fifo_put(kni->alloc_q, (void **)phys, count); /* Check if any mbufs not put into alloc_q, and then free them */ - if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) { - int j; - - for (j = ret; j < i; j++) - rte_pktmbuf_free(pkts[j]); + if (unlikely(put < count)) { + RTE_LOG(ERR, KNI, "Free %u of %u allocated buffers\n", + count - put, count); + rte_pktmbuf_free_bulk(pkts + put, count - put); } } diff --git a/lib/librte_kni/rte_kni_fifo.h b/lib/librte_kni/rte_kni_fifo.h index 8cb8587..361ddb0 100644 --- a/lib/librte_kni/rte_kni_fifo.h +++ b/lib/librte_kni/rte_kni_fifo.h @@ -91,3 +91,21 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num) fifo->read = new_read; return i; } + +/** + * Get the num of elements in the fifo + */ +static inline unsigned +kni_fifo_count(struct rte_kni_fifo *fifo) +{ + return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1); +} + +/** + * Get the num of available elements in the fifo + */ +static inline unsigned +kni_fifo_free_count(struct rte_kni_fifo *fifo) +{ + return (fifo->read - fifo->write - 1) & (fifo->len - 1); +} -- 2.7.4