This requires less locking and makes lockless classifier use a simpler. Signed-off-by: Jarno Rajahalme <jrajaha...@nicira.com> --- lib/dpif-netdev.c | 51 ++++++++++++++++++++++++--------------------------- 1 file changed, 24 insertions(+), 27 deletions(-)
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index fce2650..629414c 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -39,7 +39,7 @@ #include "dummy.h" #include "dynamic-string.h" #include "flow.h" -#include "hmap.h" +#include "cmap.h" #include "latch.h" #include "list.h" #include "meta-flow.h" @@ -136,15 +136,17 @@ struct dp_netdev { /* Flows. * - * Readers of 'cls' and 'flow_table' must take a 'cls->rwlock' read lock. + * Readers of 'cls' must take a 'cls->rwlock' read lock. + * + * Writers of 'flow_table' must take the 'flow_mutex'. * - * Writers of 'cls' and 'flow_table' must take the 'flow_mutex' and then - * the 'cls->rwlock' write lock. (The outer 'flow_mutex' allows writers to - * atomically perform multiple operations on 'cls' and 'flow_table'.) + * Writers of 'cls' must take the 'flow_mutex' and then the 'cls->rwlock' + * write lock. (The outer 'flow_mutex' allows writers to atomically + * perform multiple operations on 'cls' and 'flow_table'.) */ struct ovs_mutex flow_mutex; struct classifier cls; /* Classifier. Protected by cls.rwlock. */ - struct hmap flow_table OVS_GUARDED; /* Flow table. */ + struct cmap flow_table OVS_GUARDED; /* Flow table. */ /* Queues. * @@ -262,7 +264,7 @@ struct dp_netdev_flow { const struct cls_rule cr; /* In owning dp_netdev's 'cls'. */ /* Hash table index by unmasked flow. */ - const struct hmap_node node; /* In owning dp_netdev's 'flow_table'. */ + const struct cmap_node node; /* In owning dp_netdev's 'flow_table'. */ const struct flow flow; /* The flow that created this entry. */ /* Statistics. @@ -486,7 +488,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class, ovs_mutex_init(&dp->flow_mutex); classifier_init(&dp->cls, NULL); - hmap_init(&dp->flow_table); + cmap_init(&dp->flow_table); fat_rwlock_init(&dp->queue_rwlock); @@ -587,7 +589,7 @@ dp_netdev_free(struct dp_netdev *dp) fat_rwlock_destroy(&dp->queue_rwlock); classifier_destroy(&dp->cls); - hmap_destroy(&dp->flow_table); + cmap_destroy(&dp->flow_table); ovs_mutex_destroy(&dp->flow_mutex); seq_destroy(dp->port_seq); cmap_destroy(&dp->ports); @@ -641,9 +643,7 @@ dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats) struct dp_netdev_stats *bucket; size_t i; - fat_rwlock_rdlock(&dp->cls.rwlock); - stats->n_flows = hmap_count(&dp->flow_table); - fat_rwlock_unlock(&dp->cls.rwlock); + stats->n_flows = cmap_count(&dp->flow_table); stats->n_hit = stats->n_missed = stats->n_lost = 0; OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &dp->stats) { @@ -959,10 +959,10 @@ dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow) OVS_REQUIRES(dp->flow_mutex) { struct cls_rule *cr = CONST_CAST(struct cls_rule *, &flow->cr); - struct hmap_node *node = CONST_CAST(struct hmap_node *, &flow->node); + struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node); classifier_remove(&dp->cls, cr); - hmap_remove(&dp->flow_table, node); + cmap_remove(&dp->flow_table, node, flow_hash(&flow->flow, 0)); ovsrcu_postpone(dp_netdev_flow_free, flow); } @@ -973,7 +973,7 @@ dp_netdev_flow_flush(struct dp_netdev *dp) ovs_mutex_lock(&dp->flow_mutex); fat_rwlock_wrlock(&dp->cls.rwlock); - HMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) { + CMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) { dp_netdev_remove_flow(dp, netdev_flow); } fat_rwlock_unlock(&dp->cls.rwlock); @@ -1092,7 +1092,7 @@ dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow) { struct dp_netdev_flow *netdev_flow; - HMAP_FOR_EACH_WITH_HASH (netdev_flow, node, flow_hash(flow, 0), + CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, flow_hash(flow, 0), &dp->flow_table) { if (flow_equal(&netdev_flow->flow, flow)) { return netdev_flow; @@ -1270,12 +1270,12 @@ dp_netdev_flow_add(struct dp_netdev *dp, const struct flow *flow, match_init(&match, flow, wc); cls_rule_init(CONST_CAST(struct cls_rule *, &netdev_flow->cr), &match, NETDEV_RULE_PRIORITY); + cmap_insert(&dp->flow_table, + CONST_CAST(struct cmap_node *, &netdev_flow->node), + flow_hash(flow, 0)); fat_rwlock_wrlock(&dp->cls.rwlock); classifier_insert(&dp->cls, CONST_CAST(struct cls_rule *, &netdev_flow->cr)); - hmap_insert(&dp->flow_table, - CONST_CAST(struct hmap_node *, &netdev_flow->node), - flow_hash(flow, 0)); fat_rwlock_unlock(&dp->cls.rwlock); return 0; @@ -1323,7 +1323,7 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) netdev_flow = dp_netdev_lookup_flow(dp, &miniflow); if (!netdev_flow) { if (put->flags & DPIF_FP_CREATE) { - if (hmap_count(&dp->flow_table) < MAX_FLOWS) { + if (cmap_count(&dp->flow_table) < MAX_FLOWS) { if (put->stats) { memset(put->stats, 0, sizeof *put->stats); } @@ -1400,8 +1400,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) struct dpif_netdev_flow_dump { struct dpif_flow_dump up; - uint32_t bucket; - uint32_t offset; + struct cmap_position pos; int status; struct ovs_mutex mutex; }; @@ -1419,8 +1418,7 @@ dpif_netdev_flow_dump_create(const struct dpif *dpif_) dump = xmalloc(sizeof *dump); dpif_flow_dump_init(&dump->up, dpif_); - dump->bucket = 0; - dump->offset = 0; + memset(&dump->pos, 0, sizeof dump->pos); dump->status = 0; ovs_mutex_init(&dump->mutex); @@ -1490,10 +1488,9 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_, fat_rwlock_rdlock(&dp->cls.rwlock); for (n_flows = 0; n_flows < MIN(max_flows, FLOW_DUMP_MAX_BATCH); n_flows++) { - struct hmap_node *node; + struct cmap_node *node; - node = hmap_at_position(&dp->flow_table, &dump->bucket, - &dump->offset); + node = cmap_next_position(&dp->flow_table, &dump->pos); if (!node) { dump->status = EOF; break; -- 1.7.10.4 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev