This allow to significant reduces packets processing latency.

Signed-off-by: Sergey Vyazmitinov <s.vyazmiti...@brain4net.com>
---
 .../linuxapp/eal/include/exec-env/rte_kni_common.h |  6 ++++
 lib/librte_eal/linuxapp/kni/kni_misc.c             | 33 ++++++++++++++++------
 2 files changed, 30 insertions(+), 9 deletions(-)

diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h 
b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index 09713b0..8183a8e 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -109,6 +109,12 @@ struct rte_kni_fifo {
        void *volatile buffer[];     /**< The buffer contains mbuf pointers */
 };
 
+static inline int
+kni_fifo_empty(struct rte_kni_fifo *fifo)
+{
+       return fifo->write == fifo->read;
+}
+
 /*
  * The kernel image of the rte_mbuf struct, with only the relevant fields.
  * Padding is necessary to assure the offsets of these fields
diff --git a/lib/librte_eal/linuxapp/kni/kni_misc.c 
b/lib/librte_eal/linuxapp/kni/kni_misc.c
index 497db9b..4bf9bfa 100644
--- a/lib/librte_eal/linuxapp/kni/kni_misc.c
+++ b/lib/librte_eal/linuxapp/kni/kni_misc.c
@@ -45,6 +45,7 @@ MODULE_AUTHOR("Intel Corporation");
 MODULE_DESCRIPTION("Kernel Module for managing kni devices");
 
 #define KNI_RX_LOOP_NUM 1000
+#define KNI_RX_DATA_LOOP_NUM 2500
 
 #define KNI_MAX_DEVICES 32
 
@@ -129,25 +130,39 @@ static struct pernet_operations kni_net_ops = {
 #endif
 };
 
-static int
-kni_thread_single(void *data)
+static inline void
+kni_thread_single_rx_data_loop(struct kni_net *knet)
 {
-       struct kni_net *knet = data;
-       int j;
        struct kni_dev *dev;
+       int i;
 
-       while (!kthread_should_stop()) {
-               down_read(&knet->kni_list_lock);
-               for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
-                       list_for_each_entry(dev, &knet->kni_list_head, list) {
+       for (i = 0; i < KNI_RX_DATA_LOOP_NUM; ++i) {
+               list_for_each_entry(dev, &knet->kni_list_head, list) {
+                       /* Burst dequeue from rx_q */
+                       if (!kni_fifo_empty((struct rte_kni_fifo *)dev->rx_q)) {
 #ifdef RTE_KNI_VHOST
                                kni_chk_vhost_rx(dev);
 #else
                                kni_net_rx(dev);
 #endif
-                               kni_net_poll_resp(dev);
                        }
                }
+       }
+       list_for_each_entry(dev, &knet->kni_list_head, list) {
+               kni_net_poll_resp(dev);
+       }
+}
+
+static int
+kni_thread_single(void *data)
+{
+       struct kni_net *knet = data;
+       int j;
+
+       while (!kthread_should_stop()) {
+               down_read(&knet->kni_list_lock);
+               for (j = 0; j < KNI_RX_LOOP_NUM; j++)
+                       kni_thread_single_rx_data_loop(knet);
                up_read(&knet->kni_list_lock);
 #ifdef RTE_KNI_PREEMPT_DEFAULT
                /* reschedule out for a while */
-- 
2.7.4

Reply via email to