When dealing with a large number of ports, one of the performance
bottlenecks is that we loop through all netdevs in the main loop. Miimon
is a contributor to this, executing even if it is not enabled on any
devices.

This patch introduces a counter for the number of netdevs with miimon
configured. If this is 0, then we skip miimon_run() and miimon_wait().
In a test environment of 5000 internal ports and 50 tunnel ports with
bfd, this reduces CPU usage from about 50% to about 45%.

Signed-off-by: Joe Stringer <joestrin...@nicira.com>
---
 lib/netdev-linux.c |   26 ++++++++++++++++++++++++--
 vswitchd/bridge.c  |    2 ++
 2 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c
index 2752623..82e1358 100644
--- a/lib/netdev-linux.c
+++ b/lib/netdev-linux.c
@@ -61,6 +61,7 @@
 #include "netlink.h"
 #include "ofpbuf.h"
 #include "openflow/openflow.h"
+#include "ovs-atomic.h"
 #include "packets.h"
 #include "poll-loop.h"
 #include "rtnetlink-link.h"
@@ -402,6 +403,11 @@ struct netdev_rx_linux {
  * additional log messages. */
 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
 
+/* Polling miimon status for all ports causes performance degradation when
+ * handling a large number of ports. If there are no devices using miimon, then
+ * we skip netdev_linux_miimon_run() and netdev_linux_miimon_wait(). */
+static atomic_int miimon_cnt = ATOMIC_VAR_INIT(0);
+
 static void netdev_linux_run(void);
 
 static int netdev_linux_do_ethtool(const char *name, struct ethtool_cmd *,
@@ -490,8 +496,12 @@ netdev_linux_run(void)
 {
     struct nl_sock *sock;
     int error;
+    int miimon;
 
-    netdev_linux_miimon_run();
+    atomic_read(&miimon_cnt, &miimon);
+    if (miimon) {
+        netdev_linux_miimon_run();
+    }
 
     sock = netdev_linux_notify_sock();
     if (!sock) {
@@ -552,8 +562,12 @@ static void
 netdev_linux_wait(void)
 {
     struct nl_sock *sock;
+    int miimon;
 
-    netdev_linux_miimon_wait();
+    atomic_read(&miimon_cnt, &miimon);
+    if (miimon) {
+        netdev_linux_miimon_wait();
+    }
     sock = netdev_linux_notify_sock();
     if (sock) {
         nl_sock_wait(sock, POLLIN);
@@ -1222,6 +1236,14 @@ netdev_linux_set_miimon_interval(struct netdev *netdev_,
     ovs_mutex_lock(&netdev->mutex);
     interval = interval > 0 ? MAX(interval, 100) : 0;
     if (netdev->miimon_interval != interval) {
+        int junk;
+
+        if (interval && !netdev->miimon_interval) {
+            atomic_add(&miimon_cnt, 1, &junk);
+        } else if (!interval && netdev->miimon_interval) {
+            atomic_sub(&miimon_cnt, 1, &junk);
+        }
+
         netdev->miimon_interval = interval;
         timer_set_expired(&netdev->miimon_timer);
     }
diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c
index ec3633c..1e1ef10 100644
--- a/vswitchd/bridge.c
+++ b/vswitchd/bridge.c
@@ -3470,6 +3470,8 @@ iface_destroy(struct iface *iface)
         list_remove(&iface->port_elem);
         hmap_remove(&br->iface_by_name, &iface->name_node);
 
+        /* Ensure that miimon netdevs are counted correctly. */
+        netdev_set_miimon_interval(iface->netdev, 0);
         netdev_close(iface->netdev);
 
         free(iface->name);
-- 
1.7.9.5

_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to