Signed-off-by: Andy Zhou <az...@nicira.com> --- datapath/flow_table.c | 77 +++++++++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 33 deletions(-)
diff --git a/datapath/flow_table.c b/datapath/flow_table.c index 58a25c7..97b3283 100644 --- a/datapath/flow_table.c +++ b/datapath/flow_table.c @@ -554,8 +554,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, { struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); - struct mask_cache_entry *entries, *ce, *del; + struct mask_cache_entry *entries, *ce; struct sw_flow *flow; + struct sw_flow_mask *cache; u32 hash = skb_hash; int seg; @@ -566,42 +567,53 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index); } - del = NULL; + ce = NULL; + cache = NULL; entries = this_cpu_ptr(tbl->mask_cache); + /* Find the cache entry 'ce' to operate on. */ for (seg = 0; seg < MC_HASH_SEGS; seg++) { - int index; - - index = hash & (MC_HASH_ENTRIES - 1); - ce = &entries[index]; - - if (ce->skb_hash == skb_hash) { - struct sw_flow_mask *mask; + struct mask_cache_entry *e; + int index = hash & (MC_HASH_ENTRIES - 1); + + e = &entries[index]; + if (e->skb_hash == skb_hash) { + int i = e->mask_index; + + if (i < ma->count) + cache = rcu_dereference_ovsl(ma->masks[i]); + else if (i < ma->max) { + cache = rcu_dereference_ovsl(ma->masks[i]); + i = tbl_mask_array_find_idx(ma, cache); + if (i < 0) + cache = NULL; + } - mask = rcu_dereference_ovsl(ma->masks[ce->mask_index]); - if (mask) { - flow = masked_flow_lookup(ti, key, mask, - n_mask_hit); - if (flow) /* Found */ - return flow; + if (!cache) + e->skb_hash = 0; /* Not a valid cache entry. */ - } - del = ce; + ce = e; /* The best cache candidate. */ break; } - if (!del || (del->skb_hash && !ce->skb_hash) || - (rcu_dereference_ovsl(ma->masks[del->mask_index]) && - !rcu_dereference_ovsl(ma->masks[ce->mask_index]))) { - del = ce; - } + if (!ce || e->skb_hash > ce->skb_hash) + ce = e; /* A better replacement cache candidate. */ hash >>= MC_HASH_SHIFT; } - flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &del->mask_index); + /* Try cached mask first if a cache entry is found. */ + if (cache) { + flow = masked_flow_lookup(ti, key, cache, n_mask_hit); + if (flow) + /* Cache hit. */ + return flow; + } + + /* Cache miss, do full lookup. */ + flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index); if (flow) - del->skb_hash = skb_hash; + ce->skb_hash = skb_hash; return flow; } @@ -644,18 +656,17 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) if (!mask->ref_count) { struct mask_array *ma; - int i; ma = ovsl_dereference(tbl->mask_array); - for (i = 0; i < ma->max; i++) { - if (mask == ovsl_dereference(ma->masks[i])) { - RCU_INIT_POINTER(ma->masks[i], NULL); - ma->count--; - goto free; - } + /* Shrink the mask array if necessary. */ + if (ma->max > MASK_ARRAY_SIZE_MIN * 2 + && ma->count <= ma->max / 4) { + + tbl_mask_array_realloc(tbl, ma->max / 2); + ma = ovsl_dereference(tbl->mask_array); } - BUG(); -free: + + tbl_mask_array_delete_mask(ma, mask); call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); } } -- 1.9.1 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev