Most per-cpu stats are never used and remain at zero. Avoid the locking overhead for such stats. In the unlikely event of a concurrent writer holding the lock for the first write on the stats we'll skip spinning on the lock and do not get those stats at this time, but will get the stats on the next time the stats are read.
Proposed by Ben Pfaff as an optimization for stats reading overhead. Signed-off-by: Jarno Rajahalme <jrajaha...@nicira.com> --- datapath/flow.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/datapath/flow.c b/datapath/flow.c index 8be3801..b1b2596 100644 --- a/datapath/flow.c +++ b/datapath/flow.c @@ -91,21 +91,23 @@ static void stats_read(struct flow_stats *stats, bool lock_bh, struct ovs_flow_stats *ovs_stats, unsigned long *used, __be16 *tcp_flags) { - if (lock_bh) - spin_lock_bh(&stats->lock); - else - spin_lock(&stats->lock); + if (stats->packet_count) { + if (lock_bh) + spin_lock_bh(&stats->lock); + else + spin_lock(&stats->lock); - if (time_after(stats->used, *used)) - *used = stats->used; - *tcp_flags |= stats->tcp_flags; - ovs_stats->n_packets += stats->packet_count; - ovs_stats->n_bytes += stats->byte_count; + if (time_after(stats->used, *used)) + *used = stats->used; + *tcp_flags |= stats->tcp_flags; + ovs_stats->n_packets += stats->packet_count; + ovs_stats->n_bytes += stats->byte_count; - if (lock_bh) - spin_unlock_bh(&stats->lock); - else - spin_unlock(&stats->lock); + if (lock_bh) + spin_unlock_bh(&stats->lock); + else + spin_unlock(&stats->lock); + } } void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats, -- 1.7.10.4 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev