Ben, Thanks for reminding, somehow I lost track of this.
This is actually a nice cleanup :-) Acked-by: Jarno Rajahalme <jrajaha...@nicira.com> On Jul 21, 2014, at 9:38 PM, Ben Pfaff <b...@nicira.com> wrote: > There isn't any significant downside to making cmap iteration "safe" all > the time, so this drops the _SAFE variant. > > Similar changes to CMAP_CURSOR_FOR_EACH and CMAP_CURSOR_FOR_EACH_CONTINUE. > > Signed-off-by: Ben Pfaff <b...@nicira.com> > --- > lib/classifier.c | 4 ++-- > lib/cmap.h | 57 ++++++++++++++++++++----------------------------------- > lib/dpif-netdev.c | 2 +- > 3 files changed, 24 insertions(+), 39 deletions(-) > > diff --git a/lib/classifier.c b/lib/classifier.c > index af28070..b8b5e64 100644 > --- a/lib/classifier.c > +++ b/lib/classifier.c > @@ -485,12 +485,12 @@ classifier_destroy(struct classifier *cls) > trie_destroy(&cls->tries[i].root); > } > > - CMAP_FOR_EACH_SAFE (subtable, cmap_node, &cls->subtables_map) { > + CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) { > destroy_subtable(cls, subtable); > } > cmap_destroy(&cls->subtables_map); > > - CMAP_FOR_EACH_SAFE (partition, cmap_node, &cls->partitions) { > + CMAP_FOR_EACH (partition, cmap_node, &cls->partitions) { > ovsrcu_postpone(free, partition); > } > cmap_destroy(&cls->partitions); > diff --git a/lib/cmap.h b/lib/cmap.h > index 87a1d53..038db6c 100644 > --- a/lib/cmap.h > +++ b/lib/cmap.h > @@ -163,33 +163,29 @@ struct cmap_node *cmap_find_protected(const struct cmap > *, uint32_t hash); > * ...operate on my_node... > * } > * > - * CMAP_FOR_EACH_SAFE variant is useful only in deallocation code already > - * executing at postponed time, when it is known that the RCU grace period > - * has already expired. > + * CMAP_FOR_EACH is "safe" in the sense of HMAP_FOR_EACH_SAFE. That is, it > is > + * safe to free the current node before going on to the next iteration. Most > + * of the time, though, this doesn't matter for a cmap because node > + * deallocation has to be postponed until the next grace period. This means > + * that this guarantee is useful only in deallocation code already executing > at > + * postponed time, when it is known that the RCU grace period has already > + * expired. > */ > > -#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \ > - for (*(CURSOR) = cmap_cursor_start(CMAP); \ > - ((CURSOR)->node \ > - ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \ > - : false); \ > - cmap_cursor_advance(CURSOR)) > - > -#define CMAP_CURSOR_FOR_EACH_SAFE(NODE, MEMBER, CURSOR, CMAP) \ > - for (*(CURSOR) = cmap_cursor_start(CMAP); \ > - ((CURSOR)->node \ > - ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \ > - cmap_cursor_advance(CURSOR), \ > - true) \ > - : false); \ > +#define CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER) \ > + ((CURSOR)->node \ > + ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \ > + cmap_cursor_advance(CURSOR), \ > + true) \ > + : false) > + > +#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \ > + for (*(CURSOR) = cmap_cursor_start(CMAP); \ > + CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER); \ > ) > > -#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \ > - for (cmap_cursor_advance(CURSOR); \ > - ((CURSOR)->node \ > - ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \ > - : false); \ > - cmap_cursor_advance(CURSOR)) > +#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \ > + while (CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER)) > > struct cmap_cursor { > const struct cmap_impl *impl; > @@ -201,20 +197,9 @@ struct cmap_cursor { > struct cmap_cursor cmap_cursor_start(const struct cmap *); > void cmap_cursor_advance(struct cmap_cursor *); > > -#define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \ > - for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \ > - (cursor__.node \ > - ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), true) \ > - : false); \ > - cmap_cursor_advance(&cursor__)) > - > -#define CMAP_FOR_EACH_SAFE(NODE, MEMBER, CMAP) \ > +#define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \ > for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \ > - (cursor__.node \ > - ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), \ > - cmap_cursor_advance(&cursor__), \ > - true) \ > - : false); \ > + CMAP_CURSOR_FOR_EACH__(NODE, &cursor__, MEMBER); \ > ) > > static inline struct cmap_node *cmap_first(const struct cmap *); > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c > index cfd7539..b4d9741 100644 > --- a/lib/dpif-netdev.c > +++ b/lib/dpif-netdev.c > @@ -965,7 +965,7 @@ dp_netdev_flow_flush(struct dp_netdev *dp) > struct dp_netdev_flow *netdev_flow; > > ovs_mutex_lock(&dp->flow_mutex); > - CMAP_FOR_EACH_SAFE (netdev_flow, node, &dp->flow_table) { > + CMAP_FOR_EACH (netdev_flow, node, &dp->flow_table) { > dp_netdev_remove_flow(dp, netdev_flow); > } > ovs_mutex_unlock(&dp->flow_mutex); > -- > 1.9.1 > > _______________________________________________ > dev mailing list > dev@openvswitch.org > http://openvswitch.org/mailman/listinfo/dev _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev