Earlier I have seen the frags checking suspiciously high in perf reports, but did not understand why. Maybe this explains it:
Previously we checked frags handling before each flow table lookup. As the nw_frags field is not writeable, it suffices to check them once, before the first table lookup. Also, ofproto-dpif-xlate already has code for this, but it was run after the first table lookup. This check is now done only once, before the first table lookup. Signed-off-by: Jarno Rajahalme <jrajaha...@nicira.com> --- ofproto/ofproto-dpif-xlate.c | 71 +++++++++++++++++++++--------------- ofproto/ofproto-dpif.c | 82 ++++++++++++++++++++++++++++-------------- ofproto/ofproto-dpif.h | 10 ++++-- 3 files changed, 104 insertions(+), 59 deletions(-) diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c index 1d46456..9bd72ca 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -4187,6 +4187,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) ctx.xout->has_learn = false; ctx.xout->has_normal = false; ctx.xout->has_fin_timeout = false; + ctx.xout->fail_open = false; ctx.xout->nf_output_iface = NF_OUT_DROP; ctx.xout->mirrors = 0; @@ -4231,35 +4232,6 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) ctx.use_recirc = false; ctx.was_mpls = false; - if (!xin->ofpacts && !ctx.rule) { - ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow, - !xin->skip_wildcards ? wc : NULL, - &rule, ctx.xin->xcache != NULL, - ctx.xin->resubmit_stats); - if (ctx.xin->resubmit_stats) { - rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats); - } - if (ctx.xin->xcache) { - struct xc_entry *entry; - - entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE); - entry->u.rule = rule; - } - ctx.rule = rule; - } - xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule); - - if (xin->ofpacts) { - ofpacts = xin->ofpacts; - ofpacts_len = xin->ofpacts_len; - } else if (ctx.rule) { - actions = rule_dpif_get_actions(ctx.rule); - ofpacts = actions->ofpacts; - ofpacts_len = actions->ofpacts_len; - } else { - OVS_NOT_REACHED(); - } - ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack); ofpbuf_use_stub(&ctx.action_set, ctx.action_set_stub, sizeof ctx.action_set_stub); @@ -4279,6 +4251,16 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) break; case OFPC_FRAG_DROP: + if (!xin->ofpacts && !ctx.rule) { + rule = ofproto_dpif_get_drop_frags_rule(ctx.xbridge->ofproto, + xin->resubmit_stats); + if (xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(xin->xcache, XC_RULE); + entry->u.rule = rule_dpif_ref(rule); + } + } return; case OFPC_FRAG_REASM: @@ -4293,6 +4275,37 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) } } + if (!xin->ofpacts && !ctx.rule) { + ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow, + !xin->skip_wildcards ? wc : NULL, + &rule, xin->xcache != NULL, + xin->resubmit_stats); + if (xin->resubmit_stats) { + rule_dpif_credit_stats(rule, xin->resubmit_stats); + } + if (xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(xin->xcache, XC_RULE); + entry->u.rule = rule; + } + ctx.rule = rule; + if (ctx.rule) { + xout->fail_open = rule_dpif_is_fail_open(ctx.rule); + } + } + + if (xin->ofpacts) { + ofpacts = xin->ofpacts; + ofpacts_len = xin->ofpacts_len; + } else if (ctx.rule) { + actions = rule_dpif_get_actions(ctx.rule); + ofpacts = actions->ofpacts; + ofpacts_len = actions->ofpacts_len; + } else { + OVS_NOT_REACHED(); + } + in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port); if (in_port && in_port->is_tunnel) { if (ctx.xin->resubmit_stats) { diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 6cc9789..95d817d 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -3562,6 +3562,25 @@ rule_set_recirc_id(struct rule *rule_, uint32_t id) ovs_mutex_unlock(&rule->up.mutex); } +struct rule_dpif * +ofproto_dpif_get_drop_frags_rule(struct ofproto_dpif *ofproto, + const struct dpif_flow_stats *stats) +{ + const struct cls_rule *cls_rule; + struct rule_dpif *rule; + + cls_rule = &ofproto->drop_frags_rule->up.cr; + rule = rule_dpif_cast(rule_from_cls_rule(cls_rule)); + if (stats) { + struct oftable *tbl = &ofproto->up.tables[0]; + unsigned long orig; + + atomic_add_relaxed(&tbl->n_matched, stats->n_packets, &orig); + rule_dpif_credit_stats(rule, stats); + } + return rule; +} + /* Lookup 'flow' in table 0 of 'ofproto''s classifier. * If 'wc' is non-null, sets the fields that were relevant as part of * the lookup. Returns the table_id where a match or miss occurred. @@ -3575,7 +3594,7 @@ rule_set_recirc_id(struct rule *rule_, uint32_t id) * a non-zero 'take_ref' must be passed in to cause a reference to be taken * on it before this returns. */ uint8_t -rule_dpif_lookup(struct ofproto_dpif *ofproto, struct flow *flow, +rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow, struct flow_wildcards *wc, struct rule_dpif **rule, bool take_ref, const struct dpif_flow_stats *stats) { @@ -3637,6 +3656,40 @@ rule_dpif_lookup(struct ofproto_dpif *ofproto, struct flow *flow, return table_id; } +/* Same as rule_dpif_lookup, but temporarily modifies the ports on 'flow' + * if necessary, and does not return a table_id. */ +static void +rule_dpif_lookup_do_frag(struct ofproto_dpif *ofproto, struct flow *flow, + struct flow_wildcards *wc, struct rule_dpif **rule, + bool take_ref, const struct dpif_flow_stats *stats) +{ + ovs_be16 tp_src = htons(0), tp_dst = htons(0); + + if (flow->nw_frag & FLOW_NW_FRAG_ANY + && ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) { + + if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) { + /* We must pretend that transport ports are unavailable. */ + tp_src = flow->tp_src; + flow->tp_src = htons(0); + tp_dst = flow->tp_dst; + flow->tp_dst = htons(0); + } else { + /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM). + * Use the drop_frags_rule (which cannot disappear). */ + *rule = ofproto_dpif_get_drop_frags_rule(ofproto, NULL); + return; + } + } + + rule_dpif_lookup(ofproto, flow, wc, rule, take_ref, stats); + + if (tp_src || tp_dst) { + flow->tp_src = tp_src; + flow->tp_dst = tp_dst; + } +} + /* The returned rule is valid at least until the next RCU quiescent period. * If the '*rule' needs to stay around longer, a non-zero 'take_ref' must be * passed in to cause a reference to be taken on it before this returns. */ @@ -3648,31 +3701,6 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id, struct classifier *cls = &ofproto->up.tables[table_id].cls; const struct cls_rule *cls_rule; struct rule_dpif *rule; - struct flow ofpc_normal_flow; - - if (ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) { - /* We always unwildcard dl_type and nw_frag (for IP), so they - * need not be unwildcarded here. */ - - if (flow->nw_frag & FLOW_NW_FRAG_ANY) { - if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) { - /* We must pretend that transport ports are unavailable. */ - ofpc_normal_flow = *flow; - ofpc_normal_flow.tp_src = htons(0); - ofpc_normal_flow.tp_dst = htons(0); - flow = &ofpc_normal_flow; - } else { - /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM). - * Use the drop_frags_rule (which cannot disappear). */ - cls_rule = &ofproto->drop_frags_rule->up.cr; - rule = rule_dpif_cast(rule_from_cls_rule(cls_rule)); - if (take_ref) { - rule_dpif_ref(rule); - } - return rule; - } - } - } do { cls_rule = classifier_lookup(cls, flow, wc); @@ -4705,7 +4733,7 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow *flow, if (ofpacts) { rule = NULL; } else { - rule_dpif_lookup(ofproto, flow, &trace.wc, &rule, false, NULL); + rule_dpif_lookup_do_frag(ofproto, flow, &trace.wc, &rule, false, NULL); trace_format_rule(ds, 0, rule); if (rule == ofproto->miss_rule) { diff --git a/ofproto/ofproto-dpif.h b/ofproto/ofproto-dpif.h index a8c5d48..d1ee530 100644 --- a/ofproto/ofproto-dpif.h +++ b/ofproto/ofproto-dpif.h @@ -88,7 +88,10 @@ enum rule_dpif_lookup_verdict { size_t ofproto_dpif_get_max_mpls_depth(const struct ofproto_dpif *); bool ofproto_dpif_get_enable_recirc(const struct ofproto_dpif *); -uint8_t rule_dpif_lookup(struct ofproto_dpif *, struct flow *, +struct rule_dpif *ofproto_dpif_get_drop_frags_rule(struct ofproto_dpif *, + const struct dpif_flow_stats *); + +uint8_t rule_dpif_lookup(struct ofproto_dpif *, const struct flow *, struct flow_wildcards *, struct rule_dpif **rule, bool take_ref, const struct dpif_flow_stats *); @@ -101,7 +104,7 @@ enum rule_dpif_lookup_verdict rule_dpif_lookup_from_table(struct ofproto_dpif *, bool take_ref, const struct dpif_flow_stats *); -static inline void rule_dpif_ref(struct rule_dpif *); +static inline struct rule_dpif * rule_dpif_ref(struct rule_dpif *); static inline void rule_dpif_unref(struct rule_dpif *); void rule_dpif_credit_stats(struct rule_dpif *rule , @@ -255,11 +258,12 @@ static inline void group_dpif_unref(struct group_dpif *group) } } -static inline void rule_dpif_ref(struct rule_dpif *rule) +static inline struct rule_dpif * rule_dpif_ref(struct rule_dpif *rule) { if (rule) { ofproto_rule_ref(RULE_CAST(rule)); } + return rule; } static inline bool rule_dpif_try_ref(struct rule_dpif *rule) -- 1.7.10.4 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev