Function implementations kept same.

Signed-off-by: Ferruh Yigit <ferruh.yigit at intel.com>
---
 lib/librte_eal/linuxapp/kni/kni_misc.c | 301 ++++++++++++++++-----------------
 lib/librte_eal/linuxapp/kni/kni_net.c  | 293 ++++++++++++++++----------------
 2 files changed, 287 insertions(+), 307 deletions(-)

diff --git a/lib/librte_eal/linuxapp/kni/kni_misc.c 
b/lib/librte_eal/linuxapp/kni/kni_misc.c
index 6947483..235ce1a 100644
--- a/lib/librte_eal/linuxapp/kni/kni_misc.c
+++ b/lib/librte_eal/linuxapp/kni/kni_misc.c
@@ -50,35 +50,6 @@ MODULE_DESCRIPTION("Kernel Module for managing kni devices");
 extern const struct pci_device_id ixgbe_pci_tbl[];
 extern const struct pci_device_id igb_pci_tbl[];

-static int kni_open(struct inode *inode, struct file *file);
-static int kni_release(struct inode *inode, struct file *file);
-static int kni_ioctl(struct inode *inode, unsigned int ioctl_num,
-                                       unsigned long ioctl_param);
-static int kni_compat_ioctl(struct inode *inode, unsigned int ioctl_num,
-                                               unsigned long ioctl_param);
-static int kni_dev_remove(struct kni_dev *dev);
-
-static int __init kni_parse_kthread_mode(void);
-
-/* KNI processing for single kernel thread mode */
-static int kni_thread_single(void *unused);
-/* KNI processing for multiple kernel thread mode */
-static int kni_thread_multiple(void *param);
-
-static const struct file_operations kni_fops = {
-       .owner = THIS_MODULE,
-       .open = kni_open,
-       .release = kni_release,
-       .unlocked_ioctl = (void *)kni_ioctl,
-       .compat_ioctl = (void *)kni_compat_ioctl,
-};
-
-static struct miscdevice kni_misc = {
-       .minor = MISC_DYNAMIC_MINOR,
-       .name = KNI_DEVICE,
-       .fops = &kni_fops,
-};
-
 /* loopback mode */
 static char *lo_mode;

@@ -149,72 +120,56 @@ static struct pernet_operations kni_net_ops = {
 #endif
 };

-static int __init
-kni_init(void)
+static int
+kni_thread_single(void *data)
 {
-       int rc;
-
-       pr_debug("######## DPDK kni module loading ########\n");
-
-       if (kni_parse_kthread_mode() < 0) {
-               pr_err("Invalid parameter for kthread_mode\n");
-               return -EINVAL;
-       }
+       struct kni_net *knet = data;
+       int j;
+       struct kni_dev *dev;

-#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
-       rc = register_pernet_subsys(&kni_net_ops);
+       while (!kthread_should_stop()) {
+               down_read(&knet->kni_list_lock);
+               for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
+                       list_for_each_entry(dev, &knet->kni_list_head, list) {
+#ifdef RTE_KNI_VHOST
+                               kni_chk_vhost_rx(dev);
 #else
-       rc = register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
+                               kni_net_rx(dev);
+#endif
+                               kni_net_poll_resp(dev);
+                       }
+               }
+               up_read(&knet->kni_list_lock);
+#ifdef RTE_KNI_PREEMPT_DEFAULT
+               /* reschedule out for a while */
+               schedule_timeout_interruptible(
+                       usecs_to_jiffies(KNI_KTHREAD_RESCHEDULE_INTERVAL));
 #endif
-       if (rc)
-               return -EPERM;
-
-       rc = misc_register(&kni_misc);
-       if (rc != 0) {
-               pr_err("Misc registration failed\n");
-               goto out;
        }

-       /* Configure the lo mode according to the input parameter */
-       kni_net_config_lo_mode(lo_mode);
-
-       pr_debug("######## DPDK kni module loaded  ########\n");
-
        return 0;
-
-out:
-#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
-       unregister_pernet_subsys(&kni_net_ops);
-#else
-       unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
-#endif
-       return rc;
 }

-static void __exit
-kni_exit(void)
+static int
+kni_thread_multiple(void *param)
 {
-       misc_deregister(&kni_misc);
-#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
-       unregister_pernet_subsys(&kni_net_ops);
+       int j;
+       struct kni_dev *dev = (struct kni_dev *)param;
+
+       while (!kthread_should_stop()) {
+               for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
+#ifdef RTE_KNI_VHOST
+                       kni_chk_vhost_rx(dev);
 #else
-       unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
+                       kni_net_rx(dev);
 #endif
-       pr_debug("####### DPDK kni module unloaded  #######\n");
-}
-
-static int __init
-kni_parse_kthread_mode(void)
-{
-       if (!kthread_mode)
-               return 0;
-
-       if (strcmp(kthread_mode, "single") == 0)
-               return 0;
-       else if (strcmp(kthread_mode, "multiple") == 0)
-               multiple_kthread_on = 1;
-       else
-               return -1;
+                       kni_net_poll_resp(dev);
+               }
+#ifdef RTE_KNI_PREEMPT_DEFAULT
+               schedule_timeout_interruptible(
+                       usecs_to_jiffies(KNI_KTHREAD_RESCHEDULE_INTERVAL));
+#endif
+       }

        return 0;
 }
@@ -249,6 +204,27 @@ kni_open(struct inode *inode, struct file *file)
 }

 static int
+kni_dev_remove(struct kni_dev *dev)
+{
+       if (!dev)
+               return -ENODEV;
+
+       if (dev->pci_dev) {
+               if (pci_match_id(ixgbe_pci_tbl, dev->pci_dev))
+                       ixgbe_kni_remove(dev->pci_dev);
+               else if (pci_match_id(igb_pci_tbl, dev->pci_dev))
+                       igb_kni_remove(dev->pci_dev);
+       }
+
+       if (dev->net_dev) {
+               unregister_netdev(dev->net_dev);
+               free_netdev(dev->net_dev);
+       }
+
+       return 0;
+}
+
+static int
 kni_release(struct inode *inode, struct file *file)
 {
        struct net *net = file->private_data;
@@ -288,81 +264,6 @@ kni_release(struct inode *inode, struct file *file)
 }

 static int
-kni_thread_single(void *data)
-{
-       struct kni_net *knet = data;
-       int j;
-       struct kni_dev *dev;
-
-       while (!kthread_should_stop()) {
-               down_read(&knet->kni_list_lock);
-               for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
-                       list_for_each_entry(dev, &knet->kni_list_head, list) {
-#ifdef RTE_KNI_VHOST
-                               kni_chk_vhost_rx(dev);
-#else
-                               kni_net_rx(dev);
-#endif
-                               kni_net_poll_resp(dev);
-                       }
-               }
-               up_read(&knet->kni_list_lock);
-#ifdef RTE_KNI_PREEMPT_DEFAULT
-               /* reschedule out for a while */
-               schedule_timeout_interruptible(
-                       usecs_to_jiffies(KNI_KTHREAD_RESCHEDULE_INTERVAL));
-#endif
-       }
-
-       return 0;
-}
-
-static int
-kni_thread_multiple(void *param)
-{
-       int j;
-       struct kni_dev *dev = (struct kni_dev *)param;
-
-       while (!kthread_should_stop()) {
-               for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
-#ifdef RTE_KNI_VHOST
-                       kni_chk_vhost_rx(dev);
-#else
-                       kni_net_rx(dev);
-#endif
-                       kni_net_poll_resp(dev);
-               }
-#ifdef RTE_KNI_PREEMPT_DEFAULT
-               schedule_timeout_interruptible(
-                       usecs_to_jiffies(KNI_KTHREAD_RESCHEDULE_INTERVAL));
-#endif
-       }
-
-       return 0;
-}
-
-static int
-kni_dev_remove(struct kni_dev *dev)
-{
-       if (!dev)
-               return -ENODEV;
-
-       if (dev->pci_dev) {
-               if (pci_match_id(ixgbe_pci_tbl, dev->pci_dev))
-                       ixgbe_kni_remove(dev->pci_dev);
-               else if (pci_match_id(igb_pci_tbl, dev->pci_dev))
-                       igb_kni_remove(dev->pci_dev);
-       }
-
-       if (dev->net_dev) {
-               unregister_netdev(dev->net_dev);
-               free_netdev(dev->net_dev);
-       }
-
-       return 0;
-}
-
-static int
 kni_check_param(struct kni_dev *kni, struct rte_kni_device_info *dev)
 {
        if (!kni || !dev)
@@ -660,6 +561,90 @@ kni_compat_ioctl(struct inode *inode,
        return -EINVAL;
 }

+static const struct file_operations kni_fops = {
+       .owner = THIS_MODULE,
+       .open = kni_open,
+       .release = kni_release,
+       .unlocked_ioctl = (void *)kni_ioctl,
+       .compat_ioctl = (void *)kni_compat_ioctl,
+};
+
+static struct miscdevice kni_misc = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = KNI_DEVICE,
+       .fops = &kni_fops,
+};
+
+static int __init
+kni_parse_kthread_mode(void)
+{
+       if (!kthread_mode)
+               return 0;
+
+       if (strcmp(kthread_mode, "single") == 0)
+               return 0;
+       else if (strcmp(kthread_mode, "multiple") == 0)
+               multiple_kthread_on = 1;
+       else
+               return -1;
+
+       return 0;
+}
+
+static int __init
+kni_init(void)
+{
+       int rc;
+
+       pr_debug("######## DPDK kni module loading ########\n");
+
+       if (kni_parse_kthread_mode() < 0) {
+               pr_err("Invalid parameter for kthread_mode\n");
+               return -EINVAL;
+       }
+
+#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
+       rc = register_pernet_subsys(&kni_net_ops);
+#else
+       rc = register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
+#endif
+       if (rc)
+               return -EPERM;
+
+       rc = misc_register(&kni_misc);
+       if (rc != 0) {
+               pr_err("Misc registration failed\n");
+               goto out;
+       }
+
+       /* Configure the lo mode according to the input parameter */
+       kni_net_config_lo_mode(lo_mode);
+
+       pr_debug("######## DPDK kni module loaded  ########\n");
+
+       return 0;
+
+out:
+#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
+       unregister_pernet_subsys(&kni_net_ops);
+#else
+       unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
+#endif
+       return rc;
+}
+
+static void __exit
+kni_exit(void)
+{
+       misc_deregister(&kni_misc);
+#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
+       unregister_pernet_subsys(&kni_net_ops);
+#else
+       unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
+#endif
+       pr_debug("####### DPDK kni module unloaded  #######\n");
+}
+
 module_init(kni_init);
 module_exit(kni_exit);

diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c 
b/lib/librte_eal/linuxapp/kni/kni_net.c
index 9585879..a732cbd 100644
--- a/lib/librte_eal/linuxapp/kni/kni_net.c
+++ b/lib/librte_eal/linuxapp/kni/kni_net.c
@@ -51,17 +51,61 @@
 /* typedef for rx function */
 typedef void (*kni_net_rx_t)(struct kni_dev *kni);

-static int kni_net_tx(struct sk_buff *skb, struct net_device *dev);
 static void kni_net_rx_normal(struct kni_dev *kni);
-static void kni_net_rx_lo_fifo(struct kni_dev *kni);
-static void kni_net_rx_lo_fifo_skb(struct kni_dev *kni);
-static int kni_net_process_request(struct kni_dev *kni,
-                       struct rte_kni_request *req);

 /* kni rx function pointer, with default to normal rx */
 static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;

 /*
+ * It can be called to process the request.
+ */
+static int
+kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
+{
+       int ret = -1;
+       void *resp_va;
+       unsigned int num;
+       int ret_val;
+
+       if (!kni || !req) {
+               pr_err("No kni instance or request\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&kni->sync_lock);
+
+       /* Construct data */
+       memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
+       num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
+       if (num < 1) {
+               pr_err("Cannot send to req_q\n");
+               ret = -EBUSY;
+               goto fail;
+       }
+
+       ret_val = wait_event_interruptible_timeout(kni->wq,
+                       kni_fifo_count(kni->resp_q), 3 * HZ);
+       if (signal_pending(current) || ret_val <= 0) {
+               ret = -ETIME;
+               goto fail;
+       }
+       num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
+       if (num != 1 || resp_va != kni->sync_va) {
+               /* This should never happen */
+               pr_err("No data in resp_q\n");
+               ret = -ENODATA;
+               goto fail;
+       }
+
+       memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
+       ret = 0;
+
+fail:
+       mutex_unlock(&kni->sync_lock);
+       return ret;
+}
+
+/*
  * Open and close
  */
 static int
@@ -116,6 +160,101 @@ kni_net_config(struct net_device *dev, struct ifmap *map)
 }

 /*
+ * Transmit a packet (called by the kernel)
+ */
+#ifdef RTE_KNI_VHOST
+static int
+kni_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+       struct kni_dev *kni = netdev_priv(dev);
+
+       dev_kfree_skb(skb);
+       kni->stats.tx_dropped++;
+
+       return NETDEV_TX_OK;
+}
+#else
+static int
+kni_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+       int len = 0;
+       unsigned int ret;
+       struct kni_dev *kni = netdev_priv(dev);
+       struct rte_kni_mbuf *pkt_kva = NULL;
+       struct rte_kni_mbuf *pkt_va = NULL;
+
+       /* save the timestamp */
+#ifdef HAVE_TRANS_START_HELPER
+       netif_trans_update(dev);
+#else
+       dev->trans_start = jiffies;
+#endif
+
+       /* Check if the length of skb is less than mbuf size */
+       if (skb->len > kni->mbuf_size)
+               goto drop;
+
+       /**
+        * Check if it has at least one free entry in tx_q and
+        * one entry in alloc_q.
+        */
+       if (kni_fifo_free_count(kni->tx_q) == 0 ||
+                       kni_fifo_count(kni->alloc_q) == 0) {
+               /**
+                * If no free entry in tx_q or no entry in alloc_q,
+                * drops skb and goes out.
+                */
+               goto drop;
+       }
+
+       /* dequeue a mbuf from alloc_q */
+       ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
+       if (likely(ret == 1)) {
+               void *data_kva;
+
+               pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
+               data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va
+                               + kni->mbuf_kva;
+
+               len = skb->len;
+               memcpy(data_kva, skb->data, len);
+               if (unlikely(len < ETH_ZLEN)) {
+                       memset(data_kva + len, 0, ETH_ZLEN - len);
+                       len = ETH_ZLEN;
+               }
+               pkt_kva->pkt_len = len;
+               pkt_kva->data_len = len;
+
+               /* enqueue mbuf into tx_q */
+               ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
+               if (unlikely(ret != 1)) {
+                       /* Failing should not happen */
+                       pr_err("Fail to enqueue mbuf into tx_q\n");
+                       goto drop;
+               }
+       } else {
+               /* Failing should not happen */
+               pr_err("Fail to dequeue mbuf from alloc_q\n");
+               goto drop;
+       }
+
+       /* Free skb and update statistics */
+       dev_kfree_skb(skb);
+       kni->stats.tx_bytes += len;
+       kni->stats.tx_packets++;
+
+       return NETDEV_TX_OK;
+
+drop:
+       /* Free skb and update statistics */
+       dev_kfree_skb(skb);
+       kni->stats.tx_dropped++;
+
+       return NETDEV_TX_OK;
+}
+#endif
+
+/*
  * RX: normal working mode
  */
 static void
@@ -401,101 +540,6 @@ kni_net_rx(struct kni_dev *kni)
 }

 /*
- * Transmit a packet (called by the kernel)
- */
-#ifdef RTE_KNI_VHOST
-static int
-kni_net_tx(struct sk_buff *skb, struct net_device *dev)
-{
-       struct kni_dev *kni = netdev_priv(dev);
-
-       dev_kfree_skb(skb);
-       kni->stats.tx_dropped++;
-
-       return NETDEV_TX_OK;
-}
-#else
-static int
-kni_net_tx(struct sk_buff *skb, struct net_device *dev)
-{
-       int len = 0;
-       unsigned int ret;
-       struct kni_dev *kni = netdev_priv(dev);
-       struct rte_kni_mbuf *pkt_kva = NULL;
-       struct rte_kni_mbuf *pkt_va = NULL;
-
-       /* save the timestamp */
-#ifdef HAVE_TRANS_START_HELPER
-       netif_trans_update(dev);
-#else
-       dev->trans_start = jiffies;
-#endif
-
-       /* Check if the length of skb is less than mbuf size */
-       if (skb->len > kni->mbuf_size)
-               goto drop;
-
-       /**
-        * Check if it has at least one free entry in tx_q and
-        * one entry in alloc_q.
-        */
-       if (kni_fifo_free_count(kni->tx_q) == 0 ||
-                       kni_fifo_count(kni->alloc_q) == 0) {
-               /**
-                * If no free entry in tx_q or no entry in alloc_q,
-                * drops skb and goes out.
-                */
-               goto drop;
-       }
-
-       /* dequeue a mbuf from alloc_q */
-       ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
-       if (likely(ret == 1)) {
-               void *data_kva;
-
-               pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
-               data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va
-                               + kni->mbuf_kva;
-
-               len = skb->len;
-               memcpy(data_kva, skb->data, len);
-               if (unlikely(len < ETH_ZLEN)) {
-                       memset(data_kva + len, 0, ETH_ZLEN - len);
-                       len = ETH_ZLEN;
-               }
-               pkt_kva->pkt_len = len;
-               pkt_kva->data_len = len;
-
-               /* enqueue mbuf into tx_q */
-               ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
-               if (unlikely(ret != 1)) {
-                       /* Failing should not happen */
-                       pr_err("Fail to enqueue mbuf into tx_q\n");
-                       goto drop;
-               }
-       } else {
-               /* Failing should not happen */
-               pr_err("Fail to dequeue mbuf from alloc_q\n");
-               goto drop;
-       }
-
-       /* Free skb and update statistics */
-       dev_kfree_skb(skb);
-       kni->stats.tx_bytes += len;
-       kni->stats.tx_packets++;
-
-       return NETDEV_TX_OK;
-
-drop:
-       /* Free skb and update statistics */
-       dev_kfree_skb(skb);
-       kni->stats.tx_dropped++;
-
-       return NETDEV_TX_OK;
-}
-#endif
-
-/*
  * Deal with a transmit timeout.
  */
 static void
@@ -557,55 +601,6 @@ kni_net_poll_resp(struct kni_dev *kni)
 }

 /*
- * It can be called to process the request.
- */
-static int
-kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
-{
-       int ret = -1;
-       void *resp_va;
-       unsigned int num;
-       int ret_val;
-
-       if (!kni || !req) {
-               pr_err("No kni instance or request\n");
-               return -EINVAL;
-       }
-
-       mutex_lock(&kni->sync_lock);
-
-       /* Construct data */
-       memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
-       num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
-       if (num < 1) {
-               pr_err("Cannot send to req_q\n");
-               ret = -EBUSY;
-               goto fail;
-       }
-
-       ret_val = wait_event_interruptible_timeout(kni->wq,
-                       kni_fifo_count(kni->resp_q), 3 * HZ);
-       if (signal_pending(current) || ret_val <= 0) {
-               ret = -ETIME;
-               goto fail;
-       }
-       num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
-       if (num != 1 || resp_va != kni->sync_va) {
-               /* This should never happen */
-               pr_err("No data in resp_q\n");
-               ret = -ENODATA;
-               goto fail;
-       }
-
-       memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
-       ret = 0;
-
-fail:
-       mutex_unlock(&kni->sync_lock);
-       return ret;
-}
-
-/*
  * Return statistics to the caller
  */
 static struct net_device_stats *
-- 
2.7.4

Reply via email to