The previous commit makes OVS create one tx queue for each cpu core, each pmd thread will use a separate tx queue. Also, tx of non-pmd threads on dpdk interface is all through 'NON_PMD_THREAD_TX_QUEUE', protected by the 'nonpmd_mempool_mutex'. Therefore, the spinlock is no longer needed. And this commit removes it from 'struct dpdk_tx_queue'.
Signed-off-by: Alex Wang <al...@nicira.com> --- PATCH -> V2 - rebase and refactor the code. --- lib/netdev-dpdk.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index d6bf0bd..26b1591 100644 --- a/lib/netdev-dpdk.c +++ b/lib/netdev-dpdk.c @@ -157,7 +157,6 @@ struct dpdk_mp { /* There will one 'struct dpdk_tx_queue' created for each cpu core.*/ struct dpdk_tx_queue { - rte_spinlock_t tx_lock; bool flush_tx; /* Set to true to flush queue everytime */ /* pkts are queued. */ int count; @@ -512,7 +511,6 @@ netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no) OVS_REQUIRES(dpdk for (i = 0; i < n_cores; i++) { int core_id = ovs_numa_get_numa_id(i); - rte_spinlock_init(&netdev->tx_q[i].tx_lock); /* If the corresponding core is not on the same numa node * as 'netdev', flags the 'flush_tx'. */ netdev->tx_q[i].flush_tx = netdev->socket_id == core_id; @@ -716,9 +714,7 @@ dpdk_queue_flush(struct netdev_dpdk *dev, int qid) if (txq->count == 0) { return; } - rte_spinlock_lock(&txq->tx_lock); dpdk_queue_flush__(dev, qid); - rte_spinlock_unlock(&txq->tx_lock); } static int @@ -754,7 +750,6 @@ dpdk_queue_pkts(struct netdev_dpdk *dev, int qid, int i = 0; - rte_spinlock_lock(&txq->tx_lock); while (i < cnt) { int freeslots = MAX_TX_QUEUE_LEN - txq->count; int tocopy = MIN(freeslots, cnt-i); @@ -773,7 +768,6 @@ dpdk_queue_pkts(struct netdev_dpdk *dev, int qid, dpdk_queue_flush__(dev, qid); } } - rte_spinlock_unlock(&txq->tx_lock); } /* Tx function. Transmit packets indefinitely */ -- 1.7.9.5 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev