The poll_block() function already has some state in thread-specific storage that's used as a registry for all of the fds to poll() on the next time poll_block() gets called. poll_block() was calling malloc() and free() every time it was called to create the pollfd and wevents arrays needed to pass to poll(). We can optimize this a bit by caching the allocation on the existing thread-specific storage and just reallocating it to a larger size if necessary.
Signed-off-by: Russell Bryant <rbry...@redhat.com> --- lib/poll-loop.c | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) v1->v2: - Actually free the two allocations in free_poll_loop(). Oops. v2->v3: - Simplify call to realloc() since it can take NULL as suggested by Jarno Rajahalme. diff --git a/lib/poll-loop.c b/lib/poll-loop.c index 3c4b55c..65366b2 100644 --- a/lib/poll-loop.c +++ b/lib/poll-loop.c @@ -53,6 +53,16 @@ struct poll_loop { * wake up immediately, or LLONG_MAX to wait forever. */ long long int timeout_when; /* In msecs as returned by time_msec(). */ const char *timeout_where; /* Where 'timeout_when' was set. */ + + /* A cached array of pollfds that gets filled in each time poll_block() is + * called and is expanded as needed. */ + struct pollfd *pollfds; + size_t n_pollfds; + + /* A cached array of wevents that gets filled in each time poll_block() is + * called and is expanded as needed. */ + HANDLE *wevents; + size_t n_wevents; }; static struct poll_loop *poll_loop(void); @@ -314,8 +324,6 @@ poll_block(void) { struct poll_loop *loop = poll_loop(); struct poll_node *node; - struct pollfd *pollfds; - HANDLE *wevents = NULL; int elapsed; int retval; int i; @@ -329,18 +337,26 @@ poll_block(void) } timewarp_run(); - pollfds = xmalloc(hmap_count(&loop->poll_nodes) * sizeof *pollfds); + if (loop->n_pollfds < hmap_count(&loop->poll_nodes)) { + size_t bytes = hmap_count(&loop->poll_nodes) * sizeof *loop->pollfds; + loop->pollfds = xrealloc(loop->pollfds, bytes); + loop->n_pollfds = hmap_count(&loop->poll_nodes); + } #ifdef _WIN32 - wevents = xmalloc(hmap_count(&loop->poll_nodes) * sizeof *wevents); + if (loop->n_wevents < hmap_count(&loop->poll_nodes)) { + size_t bytes = hmap_count(&loop->poll_nodes) * sizeof *loop->wevents; + loop->wevents = xrealloc(loop->wevents, bytes); + loop->n_wevents = hmap_count(&loop->poll_nodes); + } #endif /* Populate with all the fds and events. */ i = 0; HMAP_FOR_EACH (node, hmap_node, &loop->poll_nodes) { - pollfds[i] = node->pollfd; + loop->pollfds[i] = node->pollfd; #ifdef _WIN32 - wevents[i] = node->wevent; + loop->wevents[i] = node->wevent; if (node->pollfd.fd && node->wevent) { short int wsa_events = 0; if (node->pollfd.events & POLLIN) { @@ -355,8 +371,8 @@ poll_block(void) i++; } - retval = time_poll(pollfds, hmap_count(&loop->poll_nodes), wevents, - loop->timeout_when, &elapsed); + retval = time_poll(loop->pollfds, hmap_count(&loop->poll_nodes), + loop->wevents, loop->timeout_when, &elapsed); if (retval < 0) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); VLOG_ERR_RL(&rl, "poll: %s", ovs_strerror(-retval)); @@ -365,8 +381,8 @@ poll_block(void) } else if (get_cpu_usage() > 50 || VLOG_IS_DBG_ENABLED()) { i = 0; HMAP_FOR_EACH (node, hmap_node, &loop->poll_nodes) { - if (pollfds[i].revents) { - log_wakeup(node->where, &pollfds[i], 0); + if (loop->pollfds[i].revents) { + log_wakeup(node->where, &loop->pollfds[i], 0); } i++; } @@ -375,8 +391,6 @@ poll_block(void) free_poll_nodes(loop); loop->timeout_when = LLONG_MAX; loop->timeout_where = NULL; - free(pollfds); - free(wevents); /* Handle any pending signals before doing anything else. */ fatal_signal_run(); @@ -391,6 +405,8 @@ free_poll_loop(void *loop_) free_poll_nodes(loop); hmap_destroy(&loop->poll_nodes); + free(loop->pollfds); + free(loop->wevents); free(loop); } -- 2.1.0 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev