This is a prerequisite step in making the classifier lookups lockless. If taking a reference fails, we do the lookup again, as a new (lower priority) rule may now match instead.
Also remove unwildcarding dl_type and nw_frag, as these are already taken care of by xlate_actions(). Signed-off-by: Jarno Rajahalme <jrajaha...@nicira.com> --- ofproto/ofproto-dpif.c | 41 ++++++++++++++++++++++++----------------- ofproto/ofproto-dpif.h | 9 +++++++++ ofproto/ofproto-provider.h | 1 + ofproto/ofproto.c | 10 ++++++++++ 4 files changed, 44 insertions(+), 17 deletions(-) diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 1fa6b0c..c94a0d2 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -3357,39 +3357,46 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id, struct classifier *cls = &ofproto->up.tables[table_id].cls; const struct cls_rule *cls_rule; struct rule_dpif *rule; + struct flow ofpc_normal_flow; - fat_rwlock_rdlock(&cls->rwlock); if (ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) { - if (wc) { - memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type); - if (is_ip_any(flow)) { - wc->masks.nw_frag |= FLOW_NW_FRAG_MASK; - } - } + /* We always unwildcard dl_type and nw_frag (for IP), so they + * need not be unwildcarded here. */ if (flow->nw_frag & FLOW_NW_FRAG_ANY) { if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) { /* We must pretend that transport ports are unavailable. */ - struct flow ofpc_normal_flow = *flow; + ofpc_normal_flow = *flow; ofpc_normal_flow.tp_src = htons(0); ofpc_normal_flow.tp_dst = htons(0); - cls_rule = classifier_lookup(cls, &ofpc_normal_flow, wc); + flow = &ofpc_normal_flow; } else { - /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM). */ + /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM). + * Use the drop_frags_rule (which cannot disappear). */ cls_rule = &ofproto->drop_frags_rule->up.cr; + rule = rule_dpif_cast(rule_from_cls_rule(cls_rule)); + if (take_ref) { + rule_dpif_ref(rule); + } + return rule; } - } else { - cls_rule = classifier_lookup(cls, flow, wc); } - } else { - cls_rule = classifier_lookup(cls, flow, wc); } +retry: + fat_rwlock_rdlock(&cls->rwlock); + cls_rule = classifier_lookup(cls, flow, wc); + fat_rwlock_unlock(&cls->rwlock); + rule = rule_dpif_cast(rule_from_cls_rule(cls_rule)); - if (take_ref) { - rule_dpif_ref(rule); + if (rule && take_ref) { + if (!rule_dpif_try_ref(rule)) { + /* The rule was released before we got the ref, so it + * cannot be in the classifier any more. Do another + * lookup to find another rule, if any. */ + goto retry; + } } - fat_rwlock_unlock(&cls->rwlock); return rule; } diff --git a/ofproto/ofproto-dpif.h b/ofproto/ofproto-dpif.h index 2038d75..2f150b5 100644 --- a/ofproto/ofproto-dpif.h +++ b/ofproto/ofproto-dpif.h @@ -261,6 +261,15 @@ static inline void rule_dpif_ref(struct rule_dpif *rule) } } +static inline bool rule_dpif_try_ref(struct rule_dpif *rule) +{ + if (rule) { + return ofproto_rule_try_ref(RULE_CAST(rule)); + } + return false; +} + + static inline void rule_dpif_unref(struct rule_dpif *rule) { if (rule) { diff --git a/ofproto/ofproto-provider.h b/ofproto/ofproto-provider.h index af4409e..309ebd2 100644 --- a/ofproto/ofproto-provider.h +++ b/ofproto/ofproto-provider.h @@ -384,6 +384,7 @@ struct rule { }; void ofproto_rule_ref(struct rule *); +bool ofproto_rule_try_ref(struct rule *); void ofproto_rule_unref(struct rule *); static inline const struct rule_actions * rule_get_actions(const struct rule *); diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index 5399c9f..3ddeeed 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -2536,6 +2536,16 @@ ofproto_rule_ref(struct rule *rule) } } +bool +ofproto_rule_try_ref(struct rule *rule) +{ + if (rule) { + return ovs_refcount_try_ref(&rule->ref_count); + } + return false; +} + + /* Decrements 'rule''s ref_count and schedules 'rule' to be destroyed if the * ref_count reaches 0. * -- 1.7.10.4 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev