While the values are written only by a single thread, reading them concurrently might produce incorrect results, given the lack of 64-bit atomic loads on 32-bit systems.
This fix prevent unconsistent reads of 64-bit values on 32-bit systems. Signed-off-by: Daniele Di Proietto <diproiet...@vmware.com> --- lib/dpif-netdev.c | 48 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 7 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 2637e8d..ba677eb 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -64,6 +64,7 @@ #include "sset.h" #include "timeval.h" #include "tnl-arp-cache.h" +#include "u64-stats-sync.h" #include "unixctl.h" #include "util.h" #include "openvswitch/vlog.h" @@ -301,6 +302,9 @@ struct dp_netdev_flow { /* Statistics. */ struct dp_netdev_flow_stats stats; + /* Used to protect 'stats'. Only guarantees consistency of single stats + * members, not of the structure as a whole */ + struct u64_stats_sync stats_lock; /* Actions. */ OVSRCU_TYPE(struct dp_netdev_actions *) actions; @@ -382,6 +386,9 @@ struct dp_netdev_pmd_thread { /* Statistics. */ struct dp_netdev_pmd_stats stats; + /* Used to protect 'stats'. Only guarantees consistency of single stats + * members, not of the structure as a whole */ + struct u64_stats_sync stats_lock; struct latch exit_latch; /* For terminating the pmd thread. */ atomic_uint change_seq; /* For reloading pmd ports. */ @@ -754,10 +761,19 @@ dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats) stats->n_flows = stats->n_hit = stats->n_missed = stats->n_lost = 0; CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + struct dp_netdev_pmd_stats s; + uint32_t c; + + do { + c = u64_stats_read_begin(&pmd->stats_lock); + s = pmd->stats; + } while (!u64_stats_read_correct(&pmd->stats_lock, c)); + stats->n_flows += cmap_count(&pmd->flow_table); - stats->n_hit += pmd->stats.n[DP_STAT_HIT]; - stats->n_missed += pmd->stats.n[DP_STAT_MISS]; - stats->n_lost += pmd->stats.n[DP_STAT_LOST]; + + stats->n_hit += s.n[DP_STAT_HIT]; + stats->n_missed += s.n[DP_STAT_MISS]; + stats->n_lost += s.n[DP_STAT_LOST]; } stats->n_masks = UINT32_MAX; stats->n_mask_hit = UINT64_MAX; @@ -1548,10 +1564,15 @@ static void get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow, struct dpif_flow_stats *stats) { - stats->n_packets = netdev_flow->stats.packet_count; - stats->n_bytes = netdev_flow->stats.byte_count; - stats->used = netdev_flow->stats.used; - stats->tcp_flags = netdev_flow->stats.tcp_flags; + uint32_t c; + do { + c = u64_stats_read_begin(&netdev_flow->stats_lock); + + stats->n_packets = netdev_flow->stats.packet_count; + stats->n_bytes = netdev_flow->stats.byte_count; + stats->used = netdev_flow->stats.used; + stats->tcp_flags = netdev_flow->stats.tcp_flags; + } while (!u64_stats_read_correct(&netdev_flow->stats_lock, c)); } /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for @@ -1735,6 +1756,7 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, /* Do not allocate extra space. */ flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len); memset(&flow->stats, 0, sizeof flow->stats); + memset(&flow->stats_lock, 0, sizeof flow->stats_lock); flow->dead = false; *CONST_CAST(int *, &flow->pmd_id) = pmd->core_id; *CONST_CAST(struct flow *, &flow->flow) = match->flow; @@ -2670,18 +2692,30 @@ dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, int cnt, int size, uint16_t tcp_flags) { long long int now = time_msec(); + uint32_t c; + + c = u64_stats_write_begin(&netdev_flow->stats_lock); netdev_flow->stats.used = MAX(now, netdev_flow->stats.used); netdev_flow->stats.packet_count += cnt; netdev_flow->stats.byte_count += size; netdev_flow->stats.tcp_flags |= tcp_flags; + + u64_stats_write_end(&netdev_flow->stats_lock, c); } static void dp_netdev_count_packet(struct dp_netdev_pmd_thread *pmd, enum dp_stat_type type, int cnt) { + uint32_t c; + + if (!cnt) + return; + + c = u64_stats_write_begin(&pmd->stats_lock); pmd->stats.n[type] += cnt; + u64_stats_write_end(&pmd->stats_lock, c); } static int -- 2.1.4 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev