This will be the default setting for dpdk iface.

Signed-off-by: Alex Wang <al...@nicira.com>
---
 lib/netdev-dpdk.c |   47 ++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 40 insertions(+), 7 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index e2ce1ea..6268c10 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -36,6 +36,7 @@
 #include "odp-util.h"
 #include "ofp-print.h"
 #include "ofpbuf.h"
+#include "ovs-numa.h"
 #include "ovs-thread.h"
 #include "ovs-rcu.h"
 #include "packet-dpif.h"
@@ -154,7 +155,9 @@ struct netdev_dpdk {
     int port_id;
     int max_packet_len;
 
-    struct dpdk_tx_queue tx_q[NR_QUEUE];
+    struct dpdk_tx_queue *tx_q;
+    int n_tx_q;
+    int n_rx_q;
 
     struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
 
@@ -353,6 +356,25 @@ dpdk_watchdog(void *dummy OVS_UNUSED)
     return NULL;
 }
 
+/* Returns the number of dpdk ifaces on the cpu socket. */
+static int
+dpdk_get_n_devs(uint32_t socket_id)
+{
+    int count = 0;
+    int i;
+
+    CPU_SOCKET_ID_ASSERT(socket_id);
+
+    for (i = 0; i < rte_eth_dev_count(); i++) {
+        if (rte_eth_dev_socket_id(i) == socket_id) {
+            count++;
+        }
+    }
+    ovs_assert(count);
+
+    return count;
+}
+
 static int
 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
 {
@@ -365,13 +387,14 @@ dpdk_eth_dev_init(struct netdev_dpdk *dev) 
OVS_REQUIRES(dpdk_mutex)
         return -ENODEV;
     }
 
-    diag = rte_eth_dev_configure(dev->port_id, NR_QUEUE, NR_QUEUE,  
&port_conf);
+    diag = rte_eth_dev_configure(dev->port_id, dev->n_rx_q, dev->n_tx_q,
+                                 &port_conf);
     if (diag) {
         VLOG_ERR("eth dev config error %d",diag);
         return diag;
     }
 
-    for (i = 0; i < NR_QUEUE; i++) {
+    for (i = 0; i < dev->n_tx_q; i++) {
         diag = rte_eth_tx_queue_setup(dev->port_id, i, MAX_TX_QUEUE_LEN,
                                       dev->socket_id, &tx_conf);
         if (diag) {
@@ -380,7 +403,7 @@ dpdk_eth_dev_init(struct netdev_dpdk *dev) 
OVS_REQUIRES(dpdk_mutex)
         }
     }
 
-    for (i = 0; i < NR_QUEUE; i++) {
+    for (i = 0; i < dev->n_rx_q; i++) {
         diag = rte_eth_rx_queue_setup(dev->port_id, i, MAX_RX_QUEUE_LEN,
                                       dev->socket_id,
                                       &rx_conf, dev->dpdk_mp->mp);
@@ -450,7 +473,13 @@ netdev_dpdk_construct(struct netdev *netdev_)
 
     port_no = strtol(cport, 0, 0); /* string must be null terminated */
 
-    for (i = 0; i < NR_QUEUE; i++) {
+    netdev->n_rx_q = dpdk_get_n_devs(netdev->socket_id);
+
+    /* There can only be rte_lcore_count() pmd threads, so creates a tx_q
+     * for each of them. */
+    netdev->n_tx_q = rte_lcore_count();
+    netdev->tx_q = dpdk_rte_mzalloc(netdev->n_tx_q * sizeof *netdev->tx_q);
+    for (i = 0; i < netdev->n_tx_q; i++) {
         rte_spinlock_init(&netdev->tx_q[i].tx_lock);
     }
 
@@ -476,11 +505,14 @@ netdev_dpdk_construct(struct netdev *netdev_)
     if (err) {
         goto unlock_dev;
     }
-    netdev_->n_rxq = NR_QUEUE;
+    netdev_->n_rxq = netdev->n_rx_q;
 
     list_push_back(&dpdk_list, &netdev->list_node);
 
 unlock_dev:
+    if (err) {
+        rte_free(netdev->tx_q);
+    }
     ovs_mutex_unlock(&netdev->mutex);
 unlock_dpdk:
     ovs_mutex_unlock(&dpdk_mutex);
@@ -497,6 +529,7 @@ netdev_dpdk_destruct(struct netdev *netdev_)
     ovs_mutex_unlock(&dev->mutex);
 
     ovs_mutex_lock(&dpdk_mutex);
+    rte_free(dev->tx_q);
     list_remove(&dev->list_node);
     dpdk_mp_put(dev->dpdk_mp);
     ovs_mutex_unlock(&dpdk_mutex);
@@ -725,7 +758,7 @@ netdev_dpdk_send(struct netdev *netdev, struct dpif_packet 
**pkts, int cnt,
         int next_tx_idx = 0;
         int dropped = 0;
 
-        qid = rte_lcore_id() % NR_QUEUE;
+        qid = rte_lcore_id();
 
         for (i = 0; i < cnt; i++) {
             int size = ofpbuf_size(&pkts[i]->ofpbuf);
-- 
1.7.9.5

_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to