Right now ethernet and ring devices use a mutex, while vhost devices use
a mutex or a a spinlock to protect statistics.  This commit introduces a
single spinlock that's always used for stats updates.

Signed-off-by: Daniele Di Proietto <diproiet...@vmware.com>
---
 lib/netdev-dpdk.c | 29 +++++++++++++++++++++--------
 1 file changed, 21 insertions(+), 8 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index f69154b..16bdae3 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -221,6 +221,8 @@ struct netdev_dpdk {
     int socket_id;
     int buf_size;
     struct netdev_stats stats;
+    /* Protects stats */
+    rte_spinlock_t stats_lock;
 
     uint8_t hwaddr[ETH_ADDR_LEN];
     enum netdev_flags flags;
@@ -531,6 +533,8 @@ netdev_dpdk_init(struct netdev *netdev_, unsigned int 
port_no,
     ovs_mutex_init(&netdev->mutex);
     ovs_mutex_lock(&netdev->mutex);
 
+    rte_spinlock_init(&netdev->stats_lock);
+
     /* If the 'sid' is negative, it means that the kernel fails
      * to obtain the pci numa info.  In that situation, always
      * use 'SOCKET0'. */
@@ -812,9 +816,9 @@ dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
         for (i = nb_tx; i < txq->count; i++) {
             rte_pktmbuf_free_seg(txq->burst_pkts[i]);
         }
-        ovs_mutex_lock(&dev->mutex);
+        rte_spinlock_lock(&dev->stats_lock);
         dev->stats.tx_dropped += txq->count-nb_tx;
-        ovs_mutex_unlock(&dev->mutex);
+        rte_spinlock_unlock(&dev->stats_lock);
     }
 
     txq->count = 0;
@@ -864,7 +868,10 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
         return EAGAIN;
     }
 
+    rte_spinlock_lock(&vhost_dev->stats_lock);
     vhost_dev->stats.rx_packets += (uint64_t)nb_rx;
+    rte_spinlock_unlock(&vhost_dev->stats_lock);
+
     *c = (int) nb_rx;
     return 0;
 }
@@ -906,9 +913,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, struct 
dp_packet **pkts,
     int tx_pkts, i;
 
     if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
-       ovs_mutex_lock(&vhost_dev->mutex);
+        rte_spinlock_lock(&vhost_dev->stats_lock);
        vhost_dev->stats.tx_dropped+= cnt;
-       ovs_mutex_unlock(&vhost_dev->mutex);
+        rte_spinlock_unlock(&vhost_dev->stats_lock);
        goto out;
     }
 
@@ -917,8 +924,10 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, struct 
dp_packet **pkts,
     tx_pkts = rte_vhost_enqueue_burst(virtio_dev, VIRTIO_RXQ,
                                       (struct rte_mbuf **)pkts, cnt);
 
+    rte_spinlock_lock(&vhost_dev->stats_lock);
     vhost_dev->stats.tx_packets += tx_pkts;
     vhost_dev->stats.tx_dropped += (cnt - tx_pkts);
+    rte_spinlock_unlock(&vhost_dev->stats_lock);
     rte_spinlock_unlock(&vhost_dev->txq_lock);
 
 out:
@@ -1005,9 +1014,9 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct 
dp_packet **pkts,
     }
 
     if (OVS_UNLIKELY(dropped)) {
-        ovs_mutex_lock(&dev->mutex);
+        rte_spinlock_lock(&dev->stats_lock);
         dev->stats.tx_dropped += dropped;
-        ovs_mutex_unlock(&dev->mutex);
+        rte_spinlock_unlock(&dev->stats_lock);
     }
 
     if (dev->type == DPDK_DEV_VHOST) {
@@ -1086,9 +1095,9 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
         }
 
         if (OVS_UNLIKELY(dropped)) {
-            ovs_mutex_lock(&dev->mutex);
+            rte_spinlock_lock(&dev->stats_lock);
             dev->stats.tx_dropped += dropped;
-            ovs_mutex_unlock(&dev->mutex);
+            rte_spinlock_unlock(&dev->stats_lock);
         }
     }
 }
@@ -1223,10 +1232,12 @@ netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
     stats->rx_dropped += UINT64_MAX;
     stats->tx_bytes += UINT64_MAX;
 
+    rte_spinlock_lock(&dev->stats_lock);
     /* Supported Stats */
     stats->rx_packets += dev->stats.rx_packets;
     stats->tx_packets += dev->stats.tx_packets;
     stats->tx_dropped += dev->stats.tx_dropped;
+    rte_spinlock_unlock(&dev->stats_lock);
     ovs_mutex_unlock(&dev->mutex);
 
     return 0;
@@ -1253,7 +1264,9 @@ netdev_dpdk_get_stats(const struct netdev *netdev, struct 
netdev_stats *stats)
     stats->tx_errors = rte_stats.oerrors;
     stats->multicast = rte_stats.imcasts;
 
+    rte_spinlock_lock(&dev->stats_lock);
     stats->tx_dropped = dev->stats.tx_dropped;
+    rte_spinlock_unlock(&dev->stats_lock);
     ovs_mutex_unlock(&dev->mutex);
 
     return 0;
-- 
2.1.4

_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to