Until now, struct xlate_out has embedded an ofpbuf for actions and a large stub for it, which xlate_actions() filled in during the flow translation process This commit removes the embedded ofpbuf and stub, instead putting a pointer to an ofpbuf into struct xlate_in, for a caller to fill in with a pointer to its own structure if desired. (If none is supplied, xlate_actions() uses an internal scratch buffer and destroys it before returning.)
This commit eliminates the last large data structure from struct xlate_out, making the initialization of an entire xlate_out at the beginning of xlate_actions() now reasonable. More members will be eliminated in upcoming commits, but this is no longer essential. Signed-off-by: Ben Pfaff <b...@nicira.com> --- ofproto/ofproto-dpif-upcall.c | 44 +++++++++--------- ofproto/ofproto-dpif-xlate.c | 101 +++++++++++++++++++++--------------------- ofproto/ofproto-dpif-xlate.h | 11 ++--- ofproto/ofproto-dpif.c | 20 ++++++--- 4 files changed, 91 insertions(+), 85 deletions(-) diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c index 6a02d60..59010c2 100644 --- a/ofproto/ofproto-dpif-upcall.c +++ b/ofproto/ofproto-dpif-upcall.c @@ -170,6 +170,7 @@ struct upcall { bool xout_initialized; /* True if 'xout' must be uninitialized. */ struct xlate_out xout; /* Result of xlate_actions(). */ + struct ofpbuf odp_actions; /* Datapath actions from xlate_actions(). */ struct flow_wildcards wc; /* Dependencies that megaflow must match. */ struct ofpbuf put_actions; /* Actions 'put' in the fastpath. */ @@ -190,6 +191,8 @@ struct upcall { const struct nlattr *key; /* Datapath flow key. */ size_t key_len; /* Datapath flow key length. */ const struct nlattr *out_tun_key; /* Datapath output tunnel key. */ + + uint64_t odp_actions_stub[1024 / 8]; /* Stub for odp_actions. */ }; /* 'udpif_key's are responsible for tracking the little bit of state udpif @@ -706,7 +709,8 @@ recv_upcalls(struct handler *handler) pkt_metadata_from_flow(&dupcall->packet.md, flow); flow_extract(&dupcall->packet, flow); - error = process_upcall(udpif, upcall, NULL, &upcall->wc); + error = process_upcall(udpif, upcall, + &upcall->odp_actions, &upcall->wc); if (error) { goto cleanup; } @@ -927,6 +931,8 @@ upcall_receive(struct upcall *upcall, const struct dpif_backer *backer, upcall->pmd_id = pmd_id; upcall->type = type; upcall->userdata = userdata; + ofpbuf_use_stub(&upcall->odp_actions, upcall->odp_actions_stub, + sizeof upcall->odp_actions_stub); ofpbuf_init(&upcall->put_actions, 0); upcall->xout_initialized = false; @@ -956,8 +962,7 @@ upcall_xlate(struct udpif *udpif, struct upcall *upcall, stats.tcp_flags = ntohs(upcall->flow->tcp_flags); xlate_in_init(&xin, upcall->ofproto, upcall->flow, upcall->in_port, NULL, - stats.tcp_flags, upcall->packet, wc); - xin.odp_actions = odp_actions; + stats.tcp_flags, upcall->packet, wc, odp_actions); if (upcall->type == DPIF_UC_MISS) { xin.resubmit_stats = &stats; @@ -1011,8 +1016,7 @@ upcall_xlate(struct udpif *udpif, struct upcall *upcall, if (!upcall->xout.slow) { ofpbuf_use_const(&upcall->put_actions, - upcall->xout.odp_actions->data, - upcall->xout.odp_actions->size); + odp_actions->data, odp_actions->size); } else { ofpbuf_init(&upcall->put_actions, 0); compose_slow_path(udpif, &upcall->xout, upcall->flow, @@ -1035,6 +1039,7 @@ upcall_uninit(struct upcall *upcall) if (upcall->xout_initialized) { xlate_out_uninit(&upcall->xout); } + ofpbuf_uninit(&upcall->odp_actions); ofpbuf_uninit(&upcall->put_actions); if (upcall->ukey) { if (!upcall->ukey_persists) { @@ -1234,7 +1239,7 @@ handle_upcalls(struct udpif *udpif, struct upcall *upcalls, * actions were composed assuming that the packet contained no * VLAN. So, we must remove the VLAN header from the packet before * trying to execute the actions. */ - if (upcall->xout.odp_actions->size) { + if (upcall->odp_actions.size) { eth_pop_vlan(CONST_CAST(struct dp_packet *, upcall->packet)); } @@ -1272,15 +1277,15 @@ handle_upcalls(struct udpif *udpif, struct upcall *upcalls, op->dop.u.flow_put.actions_len = ukey->actions->size; } - if (upcall->xout.odp_actions->size) { + if (upcall->odp_actions.size) { op = &ops[n_ops++]; op->ukey = NULL; op->dop.type = DPIF_OP_EXECUTE; op->dop.u.execute.packet = CONST_CAST(struct dp_packet *, packet); odp_key_to_pkt_metadata(upcall->key, upcall->key_len, &op->dop.u.execute.packet->md); - op->dop.u.execute.actions = upcall->xout.odp_actions->data; - op->dop.u.execute.actions_len = upcall->xout.odp_actions->size; + op->dop.u.execute.actions = upcall->odp_actions.data; + op->dop.u.execute.actions_len = upcall->odp_actions.size; op->dop.u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0; op->dop.u.execute.probe = false; } @@ -1663,12 +1668,13 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey, const struct dpif_flow_stats *stats, uint64_t reval_seq) OVS_REQUIRES(ukey->mutex) { - uint64_t slow_path_buf[128 / 8]; + uint64_t odp_actions_stub[1024 / 8]; + struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub); + struct xlate_out xout, *xoutp; struct netflow *netflow; struct ofproto_dpif *ofproto; struct dpif_flow_stats push; - struct ofpbuf xout_actions; struct flow flow, dp_mask; struct flow_wildcards wc; uint64_t *dp64, *xout64; @@ -1733,7 +1739,7 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey, } xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, push.tcp_flags, - NULL, need_revalidate ? &wc : NULL); + NULL, need_revalidate ? &wc : NULL, &odp_actions); if (push.n_packets) { xin.resubmit_stats = &push; xin.may_learn = true; @@ -1747,16 +1753,13 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey, goto exit; } - if (!xout.slow) { - ofpbuf_use_const(&xout_actions, xout.odp_actions->data, - xout.odp_actions->size); - } else { - ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf); + if (xout.slow) { + ofpbuf_clear(&odp_actions); compose_slow_path(udpif, &xout, &flow, flow.in_port.odp_port, - &xout_actions); + &odp_actions); } - if (!ofpbuf_equal(&xout_actions, ukey->actions)) { + if (!ofpbuf_equal(&odp_actions, ukey->actions)) { goto exit; } @@ -1788,6 +1791,7 @@ exit: netflow_flow_clear(netflow, &flow); } xlate_out_uninit(xoutp); + ofpbuf_uninit(&odp_actions); return ok; } @@ -1880,7 +1884,7 @@ push_ukey_ops__(struct udpif *udpif, struct ukey_op *ops, size_t n_ops) struct xlate_in xin; xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, - push->tcp_flags, NULL, NULL); + push->tcp_flags, NULL, NULL, NULL); xin.resubmit_stats = push->n_packets ? push : NULL; xin.may_learn = push->n_packets > 0; xlate_actions_for_side_effects(&xin); diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c index a219936..eb1e17a 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -187,6 +187,13 @@ struct xlate_ctx { * caller really wants wildcards. */ struct flow_wildcards *wc; + /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull, + * this is the same pointer. When 'xin->odp_actions' is null, this points + * to a scratch ofpbuf. This allows code to add actions to + * 'ctx->odp_actions' without worrying about whether the caller really + * wants actions. */ + struct ofpbuf *odp_actions; + /* Resubmit statistics, via xlate_table_action(). */ int recurse; /* Current resubmit nesting depth. */ int resubmits; /* Total number of resubmits. */ @@ -1548,7 +1555,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow) "%s, which is reserved exclusively for mirroring", ctx->xbridge->name, in_xbundle->name); } - ofpbuf_clear(ctx->xout->odp_actions); + ofpbuf_clear(ctx->odp_actions); return; } @@ -2597,7 +2604,7 @@ static void add_sflow_action(struct xlate_ctx *ctx) { ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge, - ctx->xout->odp_actions, + ctx->odp_actions, &ctx->xin->flow, ODPP_NONE); ctx->sflow_odp_port = 0; ctx->sflow_n_outputs = 0; @@ -2608,14 +2615,14 @@ add_sflow_action(struct xlate_ctx *ctx) static void add_ipfix_action(struct xlate_ctx *ctx) { - compose_ipfix_action(ctx->xbridge, ctx->xout->odp_actions, + compose_ipfix_action(ctx->xbridge, ctx->odp_actions, &ctx->xin->flow, ODPP_NONE); } static void add_ipfix_output_action(struct xlate_ctx *ctx, odp_port_t port) { - compose_ipfix_action(ctx->xbridge, ctx->xout->odp_actions, + compose_ipfix_action(ctx->xbridge, ctx->odp_actions, &ctx->xin->flow, port); } @@ -2632,7 +2639,7 @@ fix_sflow_action(struct xlate_ctx *ctx) return; } - cookie = ofpbuf_at(ctx->xout->odp_actions, ctx->user_cookie_offset, + cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset, sizeof cookie->sflow); ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW); @@ -2817,7 +2824,7 @@ build_tunnel_send(struct xlate_ctx *ctx, const struct xport *xport, } tnl_push_data.tnl_port = odp_to_u32(tunnel_odp_port); tnl_push_data.out_port = odp_to_u32(out_dev->odp_port); - odp_put_tnl_push_action(ctx->xout->odp_actions, &tnl_push_data); + odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data); return 0; } @@ -2924,13 +2931,13 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and * the learning action look at the packet, then drop it. */ struct flow old_base_flow = ctx->base_flow; - size_t old_size = ctx->xout->odp_actions->size; + size_t old_size = ctx->odp_actions->size; mirror_mask_t old_mirrors = ctx->xout->mirrors; xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true); ctx->xout->mirrors = old_mirrors; ctx->base_flow = old_base_flow; - ctx->xout->odp_actions->size = old_size; + ctx->odp_actions->size = old_size; /* Undo changes that may have been done for recirculation. */ if (exit_recirculates(ctx)) { @@ -3023,8 +3030,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, tnl_push_pop_send = true; } else { xlate_report(ctx, "output to kernel tunnel"); - commit_odp_tunnel_action(flow, &ctx->base_flow, - ctx->xout->odp_actions); + commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions); flow->tunnel = flow_tnl; /* Restore tunnel metadata */ } } else { @@ -3047,21 +3053,21 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, bool use_masked = ctx->xbridge->support.masked_set_action; ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow, - ctx->xout->odp_actions, + ctx->odp_actions, wc, use_masked); if (xr) { struct ovs_action_hash *act_hash; /* Hash action. */ - act_hash = nl_msg_put_unspec_uninit(ctx->xout->odp_actions, + act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions, OVS_ACTION_ATTR_HASH, sizeof *act_hash); act_hash->hash_alg = xr->hash_alg; act_hash->hash_basis = xr->hash_basis; /* Recirc action. */ - nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC, + nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, xr->recirc_id); } else { @@ -3080,14 +3086,14 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, } if (odp_tnl_port != ODPP_NONE) { - nl_msg_put_odp_port(ctx->xout->odp_actions, + nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_TUNNEL_POP, odp_tnl_port); } else { /* Tunnel push-pop action is not compatible with * IPFIX action. */ add_ipfix_output_action(ctx, out_port); - nl_msg_put_odp_port(ctx->xout->odp_actions, + nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port); } @@ -3145,7 +3151,7 @@ xlate_resubmit_resource_check(struct xlate_ctx *ctx) MAX_RESUBMIT_RECURSION); } else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) { VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS); - } else if (ctx->xout->odp_actions->size > UINT16_MAX) { + } else if (ctx->odp_actions->size > UINT16_MAX) { VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions"); } else if (ctx->stack.size >= 65536) { VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack"); @@ -3504,12 +3510,11 @@ execute_controller_action(struct xlate_ctx *ctx, int len, use_masked = ctx->xbridge->support.masked_set_action; ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow, - ctx->xout->odp_actions, + ctx->odp_actions, ctx->wc, use_masked); odp_execute_actions(NULL, &packet, 1, false, - ctx->xout->odp_actions->data, - ctx->xout->odp_actions->size, NULL); + ctx->odp_actions->data, ctx->odp_actions->size, NULL); pin = xmalloc(sizeof *pin); pin->up.packet_len = dp_packet_size(packet); @@ -3552,7 +3557,7 @@ compose_recirculate_action(struct xlate_ctx *ctx) use_masked = ctx->xbridge->support.masked_set_action; ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow, - ctx->xout->odp_actions, + ctx->odp_actions, ctx->wc, use_masked); recirc_metadata_from_flow(&md, &ctx->xin->flow); @@ -3584,7 +3589,7 @@ compose_recirculate_action(struct xlate_ctx *ctx) * fail all revalidations as zero is not a valid recirculation ID. */ } - nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC, id); + nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, id); /* Undo changes done by recirculation. */ ctx->action_set.size = ctx->recirc_action_offset; @@ -3605,7 +3610,7 @@ compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls) bool use_masked = ctx->xbridge->support.masked_set_action; ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow, - ctx->xout->odp_actions, + ctx->odp_actions, ctx->wc, use_masked); } else if (n >= FLOW_MAX_MPLS_LABELS) { if (ctx->xin->packet != NULL) { @@ -3641,7 +3646,7 @@ compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type) ctx->xbridge->name, FLOW_MAX_MPLS_LABELS); } ctx->exit = true; - ofpbuf_clear(ctx->xout->odp_actions); + ofpbuf_clear(ctx->odp_actions); } } @@ -3963,12 +3968,12 @@ xlate_sample_action(struct xlate_ctx *ctx, use_masked = ctx->xbridge->support.masked_set_action; ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow, - ctx->xout->odp_actions, + ctx->odp_actions, ctx->wc, use_masked); compose_flow_sample_cookie(os->probability, os->collector_set_id, os->obs_domain_id, os->obs_point_id, &cookie); - compose_sample_action(ctx->xbridge, ctx->xout->odp_actions, + compose_sample_action(ctx->xbridge, ctx->odp_actions, &ctx->xin->flow, probability, &cookie, sizeof cookie.flow_sample, ODPP_NONE, false); @@ -4533,7 +4538,8 @@ void xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto, const struct flow *flow, ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags, - const struct dp_packet *packet, struct flow_wildcards *wc) + const struct dp_packet *packet, struct flow_wildcards *wc, + struct ofpbuf *odp_actions) { xin->ofproto = ofproto; xin->flow = *flow; @@ -4550,7 +4556,7 @@ xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto, xin->report_hook = NULL; xin->resubmit_stats = NULL; xin->wc = wc; - xin->odp_actions = NULL; + xin->odp_actions = odp_actions; /* Do recirc lookup. */ xin->recirc = flow->recirc_id @@ -4562,9 +4568,6 @@ void xlate_out_uninit(struct xlate_out *xout) { if (xout) { - if (xout->odp_actions == &xout->odp_actions_buf) { - ofpbuf_uninit(xout->odp_actions); - } xlate_out_free_recircs(xout); } } @@ -4628,8 +4631,8 @@ actions_output_to_local_port(const struct xlate_ctx *ctx) const struct nlattr *a; unsigned int left; - NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions->data, - ctx->xout->odp_actions->size) { + NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data, + ctx->odp_actions->size) { if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT && nl_attr_get_odp_port(a) == local_odp_port) { return true; @@ -4740,6 +4743,8 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) union mf_subvalue stack_stub[1024 / sizeof(union mf_subvalue)]; uint64_t action_set_stub[1024 / 8]; struct flow_wildcards scratch_wc; + uint64_t actions_stub[256 / 8]; + struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub); struct xlate_ctx ctx = { .xin = xin, .xout = xout, @@ -4749,6 +4754,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) .stack = OFPBUF_STUB_INITIALIZER(stack_stub), .rule = xin->rule, .wc = xin->wc ? xin->wc : &scratch_wc, + .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions, .recurse = 0, .resubmits = 0, @@ -4772,6 +4778,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub), }; memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel); + ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE); enum slow_path_reason special; const struct ofpact *ofpacts; @@ -4804,14 +4811,6 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) * kernel does. If we wish to maintain the original values an action * needs to be generated. */ - xout->odp_actions = xin->odp_actions; - if (!xout->odp_actions) { - xout->odp_actions = &xout->odp_actions_buf; - ofpbuf_use_stub(xout->odp_actions, xout->odp_actions_stub, - sizeof xout->odp_actions_stub); - } - ofpbuf_reserve(xout->odp_actions, NL_A_U32_SIZE); - if (xin->wc) { flow_wildcards_init_catchall(ctx.wc); memset(&ctx.wc->masks.in_port, 0xff, sizeof ctx.wc->masks.in_port); @@ -4845,7 +4844,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) xin->ofpacts_len > 0 ? "actions" : "rule"); - return; + goto exit; } /* Set the bridge for post-recirculation processing if needed. */ @@ -4858,7 +4857,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) /* Drop the packet if the bridge cannot be found. */ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); VLOG_WARN_RL(&rl, "Recirculation bridge no longer exists."); - return; + goto exit; } ctx.xbridge = new_bridge; } @@ -4904,7 +4903,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) VLOG_WARN_RL(&rl, "Recirculation context not found for ID %"PRIx32, flow->recirc_id); - return; + goto exit; } /* The bridge is now known so obtain its table version. */ ctx.tables_version = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto); @@ -4989,7 +4988,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) if (!xin->recirc) { add_sflow_action(&ctx); add_ipfix_action(&ctx); - sample_actions_len = ctx.xout->odp_actions->size; + sample_actions_len = ctx.odp_actions->size; } else { sample_actions_len = 0; } @@ -5002,7 +5001,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) if (in_port && (!xport_stp_forward_state(in_port) || !xport_rstp_forward_state(in_port))) { /* Drop all actions added by do_xlate_actions() above. */ - ctx.xout->odp_actions->size = sample_actions_len; + ctx.odp_actions->size = sample_actions_len; /* Undo changes that may have been done for recirculation. */ if (exit_recirculates(&ctx)) { @@ -5041,14 +5040,14 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) } } - if (nl_attr_oversized(ctx.xout->odp_actions->size)) { + if (nl_attr_oversized(ctx.odp_actions->size)) { /* These datapath actions are too big for a Netlink attribute, so we * can't hand them to the kernel directly. dpif_execute() can execute * them one by one with help, so just mark the result as SLOW_ACTION to * prevent the flow from being installed. */ COVERAGE_INC(xlate_actions_oversize); ctx.xout->slow |= SLOW_ACTION; - } else if (too_many_output_actions(ctx.xout->odp_actions)) { + } else if (too_many_output_actions(ctx.odp_actions)) { COVERAGE_INC(xlate_actions_too_many_output); ctx.xout->slow |= SLOW_ACTION; } @@ -5094,9 +5093,6 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) } } - ofpbuf_uninit(&ctx.stack); - ofpbuf_uninit(&ctx.action_set); - if (xin->wc) { /* Clear the metadata and register wildcard masks, because we won't * use non-header fields as part of the cache. */ @@ -5121,6 +5117,11 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) ctx.wc->masks.vlan_tci |= htons(VLAN_CFI); } } + +exit: + ofpbuf_uninit(&ctx.stack); + ofpbuf_uninit(&ctx.action_set); + ofpbuf_uninit(&scratch_actions); } /* Sends 'packet' out 'ofport'. diff --git a/ofproto/ofproto-dpif-xlate.h b/ofproto/ofproto-dpif-xlate.h index c5648d6..9c9d2f2 100644 --- a/ofproto/ofproto-dpif-xlate.h +++ b/ofproto/ofproto-dpif-xlate.h @@ -52,10 +52,6 @@ struct xlate_out { uint32_t recirc[2]; /* When n_recircs == 1 or 2 */ uint32_t *recircs; /* When 'n_recircs' > 2 */ }; - - uint64_t odp_actions_stub[256 / 8]; - struct ofpbuf odp_actions_buf; - struct ofpbuf *odp_actions; }; /* Helpers to abstract the recirculation union away. */ @@ -181,9 +177,8 @@ struct xlate_in { * calling xlate_in_init(). */ struct xlate_cache *xcache; - /* Allows callers to optionally supply their own buffer for the resulting - * odp_actions stored in xlate_out. If NULL, the default buffer will be - * used. */ + /* If nonnull, flow translation puts the resulting datapath actions in this + * buffer. If null, flow translation will not produce datapath actions. */ struct ofpbuf *odp_actions; /* If nonnull, flow translation populates this with wildcards relevant in @@ -239,7 +234,7 @@ void xlate_actions(struct xlate_in *, struct xlate_out *); void xlate_in_init(struct xlate_in *, struct ofproto_dpif *, const struct flow *, ofp_port_t in_port, struct rule_dpif *, uint16_t tcp_flags, const struct dp_packet *packet, - struct flow_wildcards *); + struct flow_wildcards *, struct ofpbuf *odp_actions); void xlate_out_uninit(struct xlate_out *); void xlate_actions_for_side_effects(struct xlate_in *); diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index ab0bf09..078eb90 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -3660,15 +3660,17 @@ ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto, rule_dpif_credit_stats(rule, &stats); } + uint64_t odp_actions_stub[1024 / 8]; + struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub); xlate_in_init(&xin, ofproto, flow, flow->in_port.ofp_port, rule, - stats.tcp_flags, packet, NULL); + stats.tcp_flags, packet, NULL, &odp_actions); xin.ofpacts = ofpacts; xin.ofpacts_len = ofpacts_len; xin.resubmit_stats = &stats; xlate_actions(&xin, &xout); - execute.actions = xout.odp_actions->data; - execute.actions_len = xout.odp_actions->size; + execute.actions = odp_actions.data; + execute.actions_len = odp_actions.size; pkt_metadata_from_flow(&packet->md, flow); execute.packet = packet; @@ -3685,6 +3687,7 @@ ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto, error = dpif_execute(ofproto->backer->dpif, &execute); xlate_out_uninit(&xout); + ofpbuf_uninit(&odp_actions); return error; } @@ -4466,6 +4469,7 @@ struct trace_ctx { struct flow flow; struct flow_wildcards wc; struct ds *result; + struct ofpbuf odp_actions; }; static void @@ -4532,7 +4536,7 @@ static void trace_format_odp(struct ds *result, int level, const char *title, struct trace_ctx *trace) { - struct ofpbuf *odp_actions = trace->xout.odp_actions; + struct ofpbuf *odp_actions = &trace->odp_actions; ds_put_char_multiple(result, '\t', level); ds_put_format(result, "%s: ", title); @@ -4891,12 +4895,14 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow *flow, ds_put_char(ds, '\n'); flow_wildcards_init_catchall(&trace.wc); + ofpbuf_init(&trace.odp_actions, 0); trace.result = ds; trace.key = flow; /* Original flow key, used for megaflow. */ trace.flow = *flow; /* May be modified by actions. */ xlate_in_init(&trace.xin, ofproto, flow, flow->in_port.ofp_port, NULL, - ntohs(flow->tcp_flags), packet, &trace.wc); + ntohs(flow->tcp_flags), packet, &trace.wc, + &trace.odp_actions); trace.xin.ofpacts = ofpacts; trace.xin.ofpacts_len = ofpacts_len; trace.xin.resubmit_hook = trace_resubmit; @@ -4909,8 +4915,7 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow *flow, trace_format_megaflow(ds, 0, "Megaflow", &trace); ds_put_cstr(ds, "Datapath actions: "); - format_odp_actions(ds, trace.xout.odp_actions->data, - trace.xout.odp_actions->size); + format_odp_actions(ds, trace.odp_actions.data, trace.odp_actions.size); if (trace.xout.slow) { enum slow_path_reason slow; @@ -4930,6 +4935,7 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow *flow, } xlate_out_uninit(&trace.xout); + ofpbuf_uninit(&trace.odp_actions); } /* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list -- 2.1.3 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev