Both mega flow mask's reference counter and per flow table mask list
should only be accessed when holding ovs_mutex() lock. However
this is not true with ovs_flow_table_flush(). The patch fixes this bug.

Signed-off-by: Andy Zhou <az...@nicira.com>
---
 datapath/datapath.c   |    8 +++++--
 datapath/flow_table.c |   60 ++++++++++++++++++++++++-------------------------
 2 files changed, 36 insertions(+), 32 deletions(-)

diff --git a/datapath/datapath.c b/datapath/datapath.c
index b42fd8b..6ae4ecf 100644
--- a/datapath/datapath.c
+++ b/datapath/datapath.c
@@ -56,6 +56,7 @@
 
 #include "datapath.h"
 #include "flow.h"
+#include "flow_table.h"
 #include "flow_netlink.h"
 #include "vlan.h"
 #include "vport-internal_dev.h"
@@ -1305,10 +1306,13 @@ static void __dp_destroy(struct datapath *dp)
        list_del_rcu(&dp->list_node);
 
        /* OVSP_LOCAL is datapath internal port. We need to make sure that
-        * all port in datapath are destroyed first before freeing datapath.
-        */
+        * all ports in datapath are destroyed first before freeing datapath.
+        */
        ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
 
+       /* Remove all flows while holding ovs_mutex */
+       ovs_flow_tbl_flush(&dp->table);
+
        call_rcu(&dp->rcu, destroy_dp_rcu);
 }
 
diff --git a/datapath/flow_table.c b/datapath/flow_table.c
index 4232b82..3ebea78 100644
--- a/datapath/flow_table.c
+++ b/datapath/flow_table.c
@@ -162,29 +162,27 @@ static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
        kfree(mask);
 }
 
-static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
+void ovs_flow_free(struct sw_flow *flow, bool deferred)
 {
-       if (!mask)
+       if (!flow)
                return;
 
-       BUG_ON(!mask->ref_count);
-       mask->ref_count--;
+       ASSERT_OVSL();
 
-       if (!mask->ref_count) {
-               list_del_rcu(&mask->list);
-               if (deferred)
-                       call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
-               else
-                       kfree(mask);
-       }
-}
+       if (flow->mask) {
+               struct sw_flow_mask *mask = flow->mask;
 
-void ovs_flow_free(struct sw_flow *flow, bool deferred)
-{
-       if (!flow)
-               return;
+               BUG_ON(!mask->ref_count);
+               mask->ref_count--;
 
-       flow_mask_del_ref(flow->mask, deferred);
+               if (!mask->ref_count) {
+                       list_del_rcu(&mask->list);
+                       if (deferred)
+                               call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
+                       else
+                               kfree(mask);
+               }
+       }
 
        if (deferred)
                call_rcu(&flow->rcu, rcu_free_flow_callback);
@@ -197,12 +195,12 @@ static void free_buckets(struct flex_array *buckets)
        flex_array_free(buckets);
 }
 
-static void __table_instance_destroy(struct table_instance *ti)
+static void table_instance_free_flows(struct table_instance *ti, bool deferred)
 {
        int i;
 
        if (ti->keep_flows)
-               goto skip_flows;
+               return;
 
        for (i = 0; i < ti->n_buckets; i++) {
                struct sw_flow *flow;
@@ -211,12 +209,14 @@ static void __table_instance_destroy(struct 
table_instance *ti)
                int ver = ti->node_ver;
 
                hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
-                       hlist_del(&flow->hash_node[ver]);
-                       ovs_flow_free(flow, false);
+                       hlist_del_rcu(&flow->hash_node[ver]);
+                       ovs_flow_free(flow, deferred);
                }
        }
+}
 
-skip_flows:
+static void __table_instance_destroy(struct table_instance *ti)
+{
        free_buckets(ti->buckets);
        kfree(ti);
 }
@@ -262,7 +262,8 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
 {
        struct table_instance *ti = container_of(rcu, struct table_instance, 
rcu);
 
-       __table_instance_destroy(ti);
+       free_buckets(ti->buckets);
+       kfree(ti);
 }
 
 static void table_instance_destroy(struct table_instance *ti, bool deferred)
@@ -270,6 +271,8 @@ static void table_instance_destroy(struct table_instance 
*ti, bool deferred)
        if (!ti)
                return;
 
+       table_instance_free_flows(ti, deferred);
+
        if (deferred)
                call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
        else
@@ -513,16 +516,11 @@ static struct sw_flow_mask *mask_alloc(void)
 
        mask = kmalloc(sizeof(*mask), GFP_KERNEL);
        if (mask)
-               mask->ref_count = 0;
+               mask->ref_count = 1;
 
        return mask;
 }
 
-static void mask_add_ref(struct sw_flow_mask *mask)
-{
-       mask->ref_count++;
-}
-
 static bool mask_equal(const struct sw_flow_mask *a,
                       const struct sw_flow_mask *b)
 {
@@ -563,9 +561,11 @@ static int flow_mask_insert(struct flow_table *tbl, struct 
sw_flow *flow,
                mask->key = new->key;
                mask->range = new->range;
                list_add_rcu(&mask->list, &tbl->mask_list);
+       } else {
+               BUG_ON(!mask->ref_count);
+               mask->ref_count++;
        }
 
-       mask_add_ref(mask);
        flow->mask = mask;
        return 0;
 }
-- 
1.7.9.5

_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to