On Jun 7, 2013, at 12:03 , ext Justin Pettit wrote: ... > diff --git a/lib/flow.c b/lib/flow.c > index 6476029..db4ce1d 100644 > --- a/lib/flow.c > +++ b/lib/flow.c > @@ -759,6 +759,32 @@ flow_hash_symmetric_l4(const struct flow *flow, uint32_t > basis) > return jhash_bytes(&fields, sizeof fields, basis); > } > > +/* Masks the fields in 'wc' that are used by the flow hash 'fields'. */ > +void > +flow_mask_hash_fields(struct flow_wildcards *wc, enum nx_hash_fields fields) > +{ > + switch (fields) { > + case NX_HASH_FIELDS_ETH_SRC: > + memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src); > + break; > + > + case NX_HASH_FIELDS_SYMMETRIC_L4: > + memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src); > + memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst); > + memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type); > + memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci); > + memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto); > + memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src); > + memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst); > + memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src); > + memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst); > + break; > + > + default: > + NOT_REACHED(); > + } > +} > + > /* Hashes the portions of 'flow' designated by 'fields'. */ > uint32_t > flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields, ... > @@ -3764,7 +3880,7 @@ handle_flow_miss_with_facet(struct flow_miss *miss, > struct facet *facet, > struct ofpbuf *packet; > > subfacet = subfacet_create(facet, miss, now); > - want_path = subfacet->facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH; > + want_path = subfacet->facet->xc->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH; >
facet->xc->xout.slow works as well. ... > @@ -5013,8 +5149,10 @@ facet_revalidate(struct facet *facet) > struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto); > struct rule_dpif *new_rule; > struct subfacet *subfacet; > - struct xlate_out xout; > + struct flow_wildcards wc; > + struct xout_cache *xc; > struct xlate_in xin; > + bool refresh_subfacets = false; > > COVERAGE_INC(facet_revalidate); > > @@ -5037,63 +5175,58 @@ facet_revalidate(struct facet *facet) > } > } > > - new_rule = rule_dpif_lookup(ofproto, &facet->flow); > + flow_wildcards_init_catchall(&wc); > + new_rule = rule_dpif_lookup(ofproto, &facet->flow, &wc); > > - /* Calculate new datapath actions. > - * > - * We do not modify any 'facet' state yet, because we might need to, > e.g., > - * emit a NetFlow expiration and, if so, we need to have the old state > - * around to properly compose it. */ > - xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, > new_rule, > - 0, NULL); > - xlate_actions(&xin, &xout); > + xc = lookup_xc(ofproto, &facet->flow); > + if (!xc) { > + ovs_assert(!facet->xc); > + xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, > + new_rule, 0, NULL); > + xc = create_xc(&wc, &facet->initial_vals, facet); > + xlate_actions(&xin, &xc->xout); > + insert_xc(ofproto, xc, &facet->flow); > + refresh_subfacets = true; > + } else if (!facet->xc) { > + add_facet_to_xc(xc, facet); > + refresh_subfacets = true; > + } refresh_subfacets = true in both cases? > > /* A facet's slow path reason should only change under dramatic > * circumstances. Rather than try to update everything, it's simpler to > * remove the facet and start over. */ > - if (facet->xout.slow != xout.slow) { > + if (facet->xc->xout.slow != xc->xout.slow) { > facet_remove(facet); > - xlate_out_uninit(&xout); > return false; > } > > - if (!ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)) { > + if (refresh_subfacets || facet->xc != xc) { > LIST_FOR_EACH(subfacet, list_node, &facet->subfacets) { > if (subfacet->path == SF_FAST_PATH) { > struct dpif_flow_stats stats; > > - subfacet_install(subfacet, &xout.odp_actions, &stats); > + subfacet_install(subfacet, &xc->xout.odp_actions, &stats); > subfacet_update_stats(subfacet, &stats); > } > } > - > facet_flush_stats(facet); > - > - ofpbuf_clear(&facet->xout.odp_actions); > - ofpbuf_put(&facet->xout.odp_actions, xout.odp_actions.data, > - xout.odp_actions.size); > } > > - /* Update 'facet' now that we've taken care of all the old state. */ > - facet->xout.tags = xout.tags; > - facet->xout.slow = xout.slow; > - facet->xout.has_learn = xout.has_learn; > - facet->xout.has_normal = xout.has_normal; > - facet->xout.has_fin_timeout = xout.has_fin_timeout; > - facet->xout.nf_output_iface = xout.nf_output_iface; > - facet->xout.mirrors = xout.mirrors; > - facet->nf_flow.output_iface = facet->xout.nf_output_iface; > + if (facet->xc != xc) { > + del_facet_from_xc(facet); > + add_facet_to_xc(xc, facet); > + } > + facet->nf_flow.output_iface = facet->xc->xout.nf_output_iface; > > if (facet->rule != new_rule) { > COVERAGE_INC(facet_changed_rule); > - list_remove(&facet->list_node); > - list_push_back(&new_rule->facets, &facet->list_node); > + list_remove(&facet->rule_list_node); > + list_push_back(&new_rule->facets, &facet->rule_list_node); > facet->rule = new_rule; > facet->used = new_rule->up.created; > facet->prev_used = facet->used; > } > > - xlate_out_uninit(&xout); > return true; > } > ... > @@ -6231,6 +6429,13 @@ execute_mpls_push_action(struct xlate_ctx *ctx, > ovs_be16 eth_type) > { > ovs_assert(eth_type_mpls(eth_type)); > > + memset(&ctx->xout->wc.masks.dl_type, 0xff, > + sizeof ctx->xout->wc.masks.dl_type); > + memset(&ctx->xout->wc.masks.mpls_lse, 0xff, > + sizeof ctx->xout->wc.masks.mpls_lse); > + memset(&ctx->xout->wc.masks.mpls_depth, 0xff, > + sizeof ctx->xout->wc.masks.mpls_depth); > + > if (ctx->base_flow.mpls_depth) { > ctx->xin->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK); > ctx->xin->flow.mpls_depth++; > @@ -6257,6 +6462,13 @@ execute_mpls_pop_action(struct xlate_ctx *ctx, > ovs_be16 eth_type) > ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type)); > ovs_assert(!eth_type_mpls(eth_type)); > > + memset(&ctx->xout->wc.masks.dl_type, 0xff, > + sizeof ctx->xout->wc.masks.dl_type); > + memset(&ctx->xout->wc.masks.mpls_lse, 0xff, > + sizeof ctx->xout->wc.masks.mpls_lse); > + memset(&ctx->xout->wc.masks.mpls_depth, 0xff, > + sizeof ctx->xout->wc.masks.mpls_depth); > + > if (ctx->xin->flow.mpls_depth) { > ctx->xin->flow.mpls_depth--; > ctx->xin->flow.mpls_lse = htonl(0); > @@ -6377,6 +6589,10 @@ xlate_output_reg_action(struct xlate_ctx *ctx, > { > uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow); > if (port <= UINT16_MAX) { > + union mf_subvalue value; > + > + memset(&value, 0xff, sizeof value); > + mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks); > xlate_output_action(ctx, port, or->max_len, false); > } > } > @@ -6462,8 +6678,8 @@ xlate_bundle_action(struct xlate_ctx *ctx, > { > uint16_t port; > > - port = bundle_execute(bundle, &ctx->xin->flow, slave_enabled_cb, > - ctx->ofproto); > + port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc, > + slave_enabled_cb, ctx->ofproto); > if (bundle->dst.field) { > nxm_reg_load(&bundle->dst, port, &ctx->xin->flow); > } else { > @@ -6481,6 +6697,14 @@ xlate_learn_action(struct xlate_ctx *ctx, > struct ofpbuf ofpacts; > int error; > > + ctx->xout->has_learn = true; > + > + learn_mask(learn, &ctx->xout->wc); > + > + if (!ctx->xin->may_learn) { > + return; > + } > + > ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); > learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts); > > @@ -6646,12 +6870,16 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > break; > > case OFPACT_SET_IPV4_SRC: > + memset(&ctx->xout->wc.masks.dl_type, 0xff, > + sizeof ctx->xout->wc.masks.dl_type); > if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) { > ctx->xin->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4; > } > break; > > case OFPACT_SET_IPV4_DST: > + memset(&ctx->xout->wc.masks.dl_type, 0xff, > + sizeof ctx->xout->wc.masks.dl_type); > if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) { > ctx->xin->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4; > } > @@ -6659,6 +6887,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > > case OFPACT_SET_IPV4_DSCP: > /* OpenFlow 1.0 only supports IPv4. */ > + memset(&ctx->xout->wc.masks.dl_type, 0xff, > + sizeof ctx->xout->wc.masks.dl_type); > if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) { > ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK; > ctx->xin->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp; > @@ -6666,6 +6896,10 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > break; > > case OFPACT_SET_L4_SRC_PORT: > + memset(&ctx->xout->wc.masks.dl_type, 0xff, > + sizeof ctx->xout->wc.masks.dl_type); > + memset(&ctx->xout->wc.masks.nw_proto, 0xff, > + sizeof ctx->xout->wc.masks.nw_proto); > if (is_ip_any(&ctx->xin->flow)) { > ctx->xin->flow.tp_src = > htons(ofpact_get_SET_L4_SRC_PORT(a)->port); > @@ -6673,6 +6907,10 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > break; > > case OFPACT_SET_L4_DST_PORT: > + memset(&ctx->xout->wc.masks.dl_type, 0xff, > + sizeof ctx->xout->wc.masks.dl_type); > + memset(&ctx->xout->wc.masks.nw_proto, 0xff, > + sizeof ctx->xout->wc.masks.nw_proto); > if (is_ip_any(&ctx->xin->flow)) { > ctx->xin->flow.tp_dst = > htons(ofpact_get_SET_L4_DST_PORT(a)->port); > @@ -6693,11 +6931,15 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > break; > > case OFPACT_POP_QUEUE: > + memset(&ctx->xout->wc.masks.skb_priority, 0xff, > + sizeof ctx->xout->wc.masks.skb_priority); > + > ctx->xin->flow.skb_priority = ctx->orig_skb_priority; > break; > > case OFPACT_REG_MOVE: > - nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->xin->flow); > + nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->xin->flow, > + &ctx->xout->wc); > break; > > case OFPACT_REG_LOAD: > @@ -6706,7 +6948,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > > case OFPACT_STACK_PUSH: > nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->xin->flow, > - &ctx->stack); > + &ctx->xout->wc, &ctx->stack); > break; > > case OFPACT_STACK_POP: > @@ -6736,6 +6978,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > break; > > case OFPACT_DEC_TTL: > + memset(&ctx->xout->wc.masks.dl_type, 0xff, > + sizeof ctx->xout->wc.masks.dl_type); > if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) { > goto out; > } > @@ -6746,7 +6990,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > break; > > case OFPACT_MULTIPATH: > - multipath_execute(ofpact_get_MULTIPATH(a), &ctx->xin->flow); > + multipath_execute(ofpact_get_MULTIPATH(a), &ctx->xin->flow, > + &ctx->xout->wc); > break; > > case OFPACT_BUNDLE: > @@ -6759,10 +7004,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > break; > > case OFPACT_LEARN: > - ctx->xout->has_learn = true; > - if (ctx->xin->may_learn) { > - xlate_learn_action(ctx, ofpact_get_LEARN(a)); > - } > + xlate_learn_action(ctx, ofpact_get_LEARN(a)); > break; > > case OFPACT_EXIT: > @@ -6770,6 +7012,10 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > break; > > case OFPACT_FIN_TIMEOUT: > + memset(&ctx->xout->wc.masks.dl_type, 0xff, > + sizeof ctx->xout->wc.masks.dl_type); > + memset(&ctx->xout->wc.masks.nw_proto, 0xff, > + sizeof ctx->xout->wc.masks.nw_proto); > ctx->xout->has_fin_timeout = true; > xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a)); > break; > @@ -6798,7 +7044,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t > ofpacts_len, > ctx->table_id = ogt->table_id; > > /* Look up a flow from the new table. */ > - rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, > ctx->table_id); > + rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, > + &ctx->xout->wc, ctx->table_id); > > tag_the_flow(ctx, rule); > > @@ -6867,7 +7114,12 @@ xlate_out_uninit(struct xlate_out *xout) > } > > /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at > 'ofpacts' > - * into datapath actions in 'odp_actions', using 'ctx'. */ > + * into datapath actions in 'odp_actions', using 'ctx'. > + * > + * The caller is responsible for initializing 'xout->wc'. It will > + * likely either be the wildcards from a call to rule_dpif_lookup() or > + * flow_wildcards_init_catchall() if no rule was used. > + */ > static void > xlate_actions(struct xlate_in *xin, struct xlate_out *xout) > { > @@ -6917,6 +7169,36 @@ xlate_actions(struct xlate_in *xin, struct xlate_out > *xout) > memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel); > ctx.orig_tunnel_ip_dst = ctx.xin->flow.tunnel.ip_dst; > > + memset(&ctx.xout->wc.masks.in_port, 0xff, > + sizeof ctx.xout->wc.masks.in_port); > + > + if (tnl_port_should_receive(&ctx.xin->flow)) { > + memset(&ctx.xout->wc.masks.tunnel, 0xff, > + sizeof ctx.xout->wc.masks.tunnel); > + } > + > + /* Disable most wildcarding for NetFlow. */ > + if (xin->ofproto->netflow) { > + memset(&ctx.xout->wc.masks.dl_src, 0xff, > + sizeof ctx.xout->wc.masks.dl_src); > + memset(&ctx.xout->wc.masks.dl_dst, 0xff, > + sizeof ctx.xout->wc.masks.dl_dst); > + memset(&ctx.xout->wc.masks.dl_type, 0xff, > + sizeof ctx.xout->wc.masks.dl_type); > + memset(&ctx.xout->wc.masks.vlan_tci, 0xff, > + sizeof ctx.xout->wc.masks.vlan_tci); > + memset(&ctx.xout->wc.masks.nw_proto, 0xff, > + sizeof ctx.xout->wc.masks.nw_proto); > + memset(&ctx.xout->wc.masks.nw_src, 0xff, > + sizeof ctx.xout->wc.masks.nw_src); > + memset(&ctx.xout->wc.masks.nw_dst, 0xff, > + sizeof ctx.xout->wc.masks.nw_dst); > + memset(&ctx.xout->wc.masks.tp_src, 0xff, > + sizeof ctx.xout->wc.masks.tp_src); > + memset(&ctx.xout->wc.masks.tp_dst, 0xff, > + sizeof ctx.xout->wc.masks.tp_dst); > + } > + Use the new flow_mask_hash_fields(&ctx.xout->wc, NX_HASH_FIELDS_SYMMETRIC_L4) for this? Also, could define new NX_HASH_FIELDS_* values for MPLS, NW_PROTO, etc. and use to set the masks above. > ctx.xout->tags = 0; > ctx.xout->slow = 0; > ctx.xout->has_learn = false; > @@ -7033,6 +7315,12 @@ xlate_actions(struct xlate_in *xin, struct xlate_out > *xout) > } > > ofpbuf_uninit(&ctx.stack); > + > + /* Clear the metadata and register wildcard masks, because we won't > + * use non-header fields as part of the cache. */ > + memset(&ctx.xout->wc.masks.metadata, 0, > + sizeof ctx.xout->wc.masks.metadata); > + memset(&ctx.xout->wc.masks.regs, 0, sizeof ctx.xout->wc.masks.regs); > } _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev