In future XPS implementation dpif-netdev layer will distribute
TX queues between PMD threads dynamically and netdev layer will
not know about sharing of TX queues. So, we need to lock them
always. Each tx queue still has its own lock, so, impact on
performance should be minimal.

Signed-off-by: Ilya Maximets <i.maxim...@samsung.com>
---
 INSTALL.DPDK.md   |  9 ++++-----
 lib/netdev-dpdk.c | 22 +++++-----------------
 2 files changed, 9 insertions(+), 22 deletions(-)

diff --git a/INSTALL.DPDK.md b/INSTALL.DPDK.md
index 630c68d..bb14bb5 100644
--- a/INSTALL.DPDK.md
+++ b/INSTALL.DPDK.md
@@ -989,11 +989,10 @@ Restrictions:
     a system as described above, an error will be reported that initialization
     failed for the 65th queue. OVS will then roll back to the previous
     successful queue initialization and use that value as the total number of
-    TX queues available with queue locking. If a user wishes to use more than
-    64 queues and avoid locking, then the
-    `CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF` config parameter in DPDK must be
-    increased to the desired number of queues. Both DPDK and OVS must be
-    recompiled for this change to take effect.
+    TX queues available. If a user wishes to use more than
+    64 queues, then the `CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF` config
+    parameter in DPDK must be increased to the desired number of queues.
+    Both DPDK and OVS must be recompiled for this change to take effect.
 
 Bug Reporting:
 --------------
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index d86926c..32a15fd 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -298,9 +298,7 @@ struct dpdk_mp {
  * each cpu core. */
 struct dpdk_tx_queue {
     rte_spinlock_t tx_lock;        /* Protects the members and the NIC queue
-                                    * from concurrent access.  It is used only
-                                    * if the queue is shared among different
-                                    * pmd threads (see 'txq_needs_locking'). */
+                                    * from concurrent access. */
     int map;                       /* Mapping of configured vhost-user queues
                                     * to enabled by guest. */
 };
@@ -347,12 +345,9 @@ struct netdev_dpdk {
 
     /* dpif-netdev might request more txqs than the NIC has, also, number of tx
      * queues may be changed via database ('options:n_txq').
-     * We remap requested by dpif-netdev number on 'real_n_txq'.
-     * If the numbers match, 'txq_needs_locking' is false, otherwise it is
-     * true and we will take a spinlock on transmission */
+     * We remap requested by dpif-netdev number on 'real_n_txq'. */
     int real_n_txq;
     int real_n_rxq;
-    bool txq_needs_locking;
 
     /* virtio-net structure for vhost device */
     OVSRCU_TYPE(struct virtio_net *) virtio_dev;
@@ -1414,10 +1409,8 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
 {
     int i;
 
-    if (OVS_UNLIKELY(dev->txq_needs_locking)) {
-        qid = qid % dev->real_n_txq;
-        rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
-    }
+    qid = qid % dev->real_n_txq;
+    rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
 
     if (OVS_UNLIKELY(!may_steal ||
                      pkts[0]->source != DPBUF_DPDK)) {
@@ -1479,9 +1472,7 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
         }
     }
 
-    if (OVS_UNLIKELY(dev->txq_needs_locking)) {
-        rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
-    }
+    rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
 }
 
 static int
@@ -2069,7 +2060,6 @@ netdev_dpdk_vhost_set_queues(struct netdev_dpdk *dev, 
struct virtio_net *virtio_
 
     dev->real_n_rxq = qp_num;
     dev->real_n_txq = qp_num;
-    dev->txq_needs_locking = true;
     /* Enable TX queue 0 by default if it wasn't disabled. */
     if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
         dev->tx_q[0].map = 0;
@@ -2683,7 +2673,6 @@ netdev_dpdk_reconfigure(struct netdev *netdev)
 
     rte_free(dev->tx_q);
     err = dpdk_eth_dev_init(dev);
-    dev->txq_needs_locking = dev->real_n_txq < ovs_numa_get_n_cores() + 1;
     netdev_dpdk_alloc_txq(dev, dev->real_n_txq);
 
 out:
@@ -2721,7 +2710,6 @@ netdev_dpdk_vhost_cuse_reconfigure(struct netdev *netdev)
     netdev->n_txq = dev->requested_n_txq;
     dev->real_n_txq = 1;
     netdev->n_rxq = 1;
-    dev->txq_needs_locking = true;
 
     ovs_mutex_unlock(&dev->mutex);
     ovs_mutex_unlock(&dpdk_mutex);
-- 
2.5.0

_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to