Prefetch flow stats as early as possible.  This may have an impact on
throughput when many cores hammer on the same flows.

From: Jarno Rajahalme <jrajaha...@nicira.com>
Signed-off-by: Jarno Rajahalme <jrajaha...@nicira.com>
---
 datapath/datapath.c   |   13 +++++++++++++
 datapath/flow_table.c |   15 ++++++++++++++-
 2 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/datapath/datapath.c b/datapath/datapath.c
index b42fd8b..c80abe0 100644
--- a/datapath/datapath.c
+++ b/datapath/datapath.c
@@ -645,6 +645,17 @@ static size_t ovs_flow_cmd_msg_size(const struct 
sw_flow_actions *acts)
                + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
 }
 
+static void ovs_flow_stats_prefetch_all(struct sw_flow *flow)
+{
+       if (!flow->stats.per_numa_mem)
+               spin_lock_prefetch(&flow->stats.stat->lock);
+       else {
+               int node;
+               for (node = 0; node < ovs_numa_nodes; node++)
+                       spin_lock_prefetch(&flow->stats.numa_stats[node].lock);
+       }
+}
+
 /* Called with ovs_mutex. */
 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
                                  struct sk_buff *skb, u32 portid,
@@ -665,6 +676,8 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, 
struct datapath *dp,
 
        ovs_header->dp_ifindex = get_dpifindex(dp);
 
+       ovs_flow_stats_prefetch_all(flow);
+
        /* Fill flow key. */
        nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
        if (!nla)
diff --git a/datapath/flow_table.c b/datapath/flow_table.c
index ec8e5a4..aec96cb 100644
--- a/datapath/flow_table.c
+++ b/datapath/flow_table.c
@@ -435,6 +435,17 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
        return cmp_key(&flow->unmasked_key, key, key_start, key_end);
 }
 
+static void ovs_flow_stats_prefetch(struct sw_flow *flow)
+{
+       struct flow_stats *stats;
+
+       if (!flow->stats.per_numa_mem)
+               stats = flow->stats.stat;
+       else
+               stats = &flow->stats.numa_stats[numa_node_id()];
+       spin_lock_prefetch(&stats->lock);
+}
+
 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
                                          const struct sw_flow_key *unmasked,
                                          struct sw_flow_mask *mask)
@@ -452,8 +463,10 @@ static struct sw_flow *masked_flow_lookup(struct 
table_instance *ti,
        hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
                if (flow->mask == mask && flow->hash == hash &&
                    flow_cmp_masked_key(flow, &masked_key,
-                                         key_start, key_end))
+                                       key_start, key_end)) {
+                       ovs_flow_stats_prefetch(flow);
                        return flow;
+               }
        }
        return NULL;
 }
-- 
1.7.10.4

_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to