On 04/08/2014 12:00 AM, Pravin wrote:
+struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, + const struct sw_flow_key *key, + u32 skb_hash, + u32 *n_mask_hit) +{ + struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); + struct mask_cache_entry *entries, *ce, *del; + struct sw_flow *flow; + u32 hash = skb_hash; + int seg; + + del = NULL; + entries = this_cpu_ptr(tbl->mask_cache); + + for (seg = 0; seg < MC_HASH_SEGS; seg++) { + int index; + + index = hash & (MC_HASH_ENTRIES - 1); + ce = &entries[index]; + + if (ce->skb_hash == skb_hash) { + struct sw_flow_mask *mask; + int i; + + i = 0; + list_for_each_entry_rcu(mask, &tbl->mask_list, list) { + if (ce->mask_index == i++) { + flow = masked_flow_lookup(ti, key, mask); + if (flow) /* Found */ + return flow; + + ce->skb_hash = 0;
One more comment, perhaps break out of the loop as it should be impossible to hit another cache entry at this point? Setting del = ce before breaking out would allow to cache the flow mask that applies again right away, ce is guarantee to be unused at this point.
+ } + } + } + + if (!del || (del->skb_hash && !ce->skb_hash)) { + del = ce; + } + + hash >>= MC_HASH_SHIFT; + } + flow = flow_lookup(tbl, ti, key, n_mask_hit); + + if (flow) { + del->skb_hash = skb_hash; + del->mask_index = (*n_mask_hit - 1); + } + return flow; +} +
_______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev