The kni alloc queue is filled with physical addresses of mbufs for kernel consumption. Any unused mbufs in the alloc queue are freed during shutdown sequence in rte_kni_release.
In the existing implementation, for freeing one entry of alloc queue all the objects of the mempool are traversed to find a match. This process is repeated for all the objects of the alloc queue which consumes lot of cpu cycles. Instead of using mempool object iteration method,use ``rte_mem_iova2virt()`` api to get the virtual address for the physical addresses of alloc_q objects. This speeds up the freeing process. Signed-off-by: Naga Harish K S V <s.v.naga.haris...@intel.com> --- v2: * fix checkpatch errors v3: * fix commit message as per review comments v4: * update commit message as per review comments --- lib/kni/rte_kni.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/lib/kni/rte_kni.c b/lib/kni/rte_kni.c index 7971c56bb4..f443e5b2fc 100644 --- a/lib/kni/rte_kni.c +++ b/lib/kni/rte_kni.c @@ -375,26 +375,19 @@ va2pa_all(struct rte_mbuf *mbuf) } static void -obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj, - unsigned obj_idx __rte_unused) -{ - struct rte_mbuf *m = obj; - void *mbuf_phys = opaque; - - if (va2pa(m) == mbuf_phys) - rte_pktmbuf_free(m); -} - -static void -kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo) +kni_free_fifo_phy(struct rte_kni_fifo *fifo) { void *mbuf_phys; int ret; + struct rte_mbuf *m; do { ret = kni_fifo_get(fifo, &mbuf_phys, 1); - if (ret) - rte_mempool_obj_iter(mp, obj_free, mbuf_phys); + if (ret) { + m = (struct rte_mbuf *) + rte_mem_iova2virt((rte_iova_t)mbuf_phys); + rte_pktmbuf_free(m); + } } while (ret); } @@ -440,7 +433,7 @@ rte_kni_release(struct rte_kni *kni) if (kni_fifo_count(kni->rx_q)) RTE_LOG(ERR, KNI, "Fail to free all Rx-q items\n"); - kni_free_fifo_phy(kni->pktmbuf_pool, kni->alloc_q); + kni_free_fifo_phy(kni->alloc_q); kni_free_fifo(kni->tx_q); kni_free_fifo(kni->free_q); -- 2.23.0