When flow table is copied, the mask list from the old table is not properly copied into the new table. The corrupted mask list in the new table will lead to kernel crash. This patch fixes this bug.
Bug #18110 Reported-by: Justin Pettit <jpet...@nicira.com> ------- v1 -> v2: Jesse pointed out the race condition in v1. Fastpath can be walking the list while the head is being replaced. In V2, instead of have mask_list head as part of the flow table data structure, it only has a pointer to the head. The actual head is allocated at run time, and stay consistent with the mask list. Signed-off-by: Andy Zhou <az...@nicira.com> --- datapath/flow.c | 36 ++++++++++++++++++++++++++++-------- datapath/flow.h | 2 +- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/datapath/flow.c b/datapath/flow.c index 38b9502..864c31c 100644 --- a/datapath/flow.c +++ b/datapath/flow.c @@ -461,7 +461,7 @@ struct flow_table *ovs_flow_tbl_alloc(int new_size) table->node_ver = 0; table->keep_flows = false; get_random_bytes(&table->hash_seed, sizeof(u32)); - INIT_LIST_HEAD(&table->mask_list); + table->mask_list = NULL; return table; } @@ -485,6 +485,11 @@ static void __flow_tbl_destroy(struct flow_table *table) } } + if (table->mask_list) { + BUG_ON(!list_empty(table->mask_list)); + kfree(table->mask_list); + } + skip_flows: free_buckets(table->buckets); kfree(table); @@ -1028,12 +1033,15 @@ struct sw_flow *ovs_flow_lookup(struct flow_table *tbl, const struct sw_flow_key *key) { struct sw_flow *flow = NULL; - struct sw_flow_mask *mask; - list_for_each_entry_rcu(mask, &tbl->mask_list, list) { - flow = ovs_masked_flow_lookup(tbl, key, mask); - if (flow) /* Found */ - break; + if (tbl->mask_list) { + struct sw_flow_mask *mask; + + list_for_each_entry_rcu(mask, tbl->mask_list, list) { + flow = ovs_masked_flow_lookup(tbl, key, mask); + if (flow) /* Found */ + break; + } } return flow; @@ -1843,7 +1851,10 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, { struct list_head *ml; - list_for_each(ml, &tbl->mask_list) { + if (!tbl->mask_list) + return NULL; + + list_for_each(ml, tbl->mask_list) { struct sw_flow_mask *m; m = container_of(ml, struct sw_flow_mask, list); if (ovs_sw_flow_mask_equal(mask, m)) @@ -1860,7 +1871,16 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, */ void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) { - list_add_rcu(&mask->list, &tbl->mask_list); + if (!tbl->mask_list) { + tbl->mask_list = (struct list_head *) + kmalloc(sizeof(struct list_head), GFP_KERNEL); + + if (tbl->mask_list) + INIT_LIST_HEAD(tbl->mask_list); + } + + if (tbl->mask_list) + list_add_rcu(&mask->list, tbl->mask_list); } /** diff --git a/datapath/flow.h b/datapath/flow.h index 45eba03..a31dab0 100644 --- a/datapath/flow.h +++ b/datapath/flow.h @@ -191,7 +191,7 @@ struct flow_table { struct flex_array *buckets; unsigned int count, n_buckets; struct rcu_head rcu; - struct list_head mask_list; + struct list_head *mask_list; int node_ver; u32 hash_seed; bool keep_flows; -- 1.7.9.5 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev