From: Ben Pfaff <b...@nicira.com>

Until now, struct xlate_out has embedded an ofpbuf for actions and a
large stub for it, which xlate_actions() filled in during the flow
translation process.  This commit removes the embedded ofpbuf and
stub, instead putting a pointer to an ofpbuf into struct xlate_in, for
a caller to fill in with a pointer to its own structure if desired.
(If none is supplied, xlate_actions() uses an internal scratch buffer
and destroys it before returning.)

This patch is a backport of commit 1520ef4 to branch-2.3.

Signed-off-by: Ben Pfaff <b...@nicira.com>
Signed-off-by: Jarno Rajahalme <ja...@ovn.org>
Acked-by: Ben Pfaff <b...@ovn.org>
---
 ofproto/ofproto-dpif-upcall.c |  47 +++++++++++---------
 ofproto/ofproto-dpif-xlate.c  | 101 +++++++++++++++++++-----------------------
 ofproto/ofproto-dpif-xlate.h  |  11 ++---
 ofproto/ofproto-dpif.c        |  24 ++++++----
 4 files changed, 94 insertions(+), 89 deletions(-)

diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
index 193e6b7..f8f19c8 100644
--- a/ofproto/ofproto-dpif-upcall.c
+++ b/ofproto/ofproto-dpif-upcall.c
@@ -202,6 +202,9 @@ struct flow_miss {
     struct xlate_out xout;
 
     bool put;
+
+    struct ofpbuf odp_actions;     /* Datapath actions from xlate_actions(). */
+    uint64_t odp_actions_stub[1024 / 8]; /* Stub for odp_actions. */
 };
 
 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
@@ -533,6 +536,7 @@ udpif_upcall_handler(void *arg)
 
             HMAP_FOR_EACH (miss, hmap_node, &misses) {
                 xlate_out_uninit(&miss->xout);
+                ofpbuf_uninit(&miss->odp_actions);
             }
             hmap_clear(&misses);
             for (i = 0; i < n_upcalls; i++) {
@@ -827,6 +831,8 @@ read_upcalls(struct handler *handler,
                 miss->stats.tcp_flags = 0;
                 miss->odp_in_port = odp_in_port;
                 miss->put = false;
+                ofpbuf_use_stub(&miss->odp_actions, miss->odp_actions_stub,
+                                sizeof miss->odp_actions_stub);
                 n_misses++;
             } else {
                 miss = existing_miss;
@@ -918,7 +924,7 @@ handle_upcalls(struct handler *handler, struct hmap *misses,
         struct xlate_in xin;
 
         xlate_in_init(&xin, miss->ofproto, &miss->flow, NULL,
-                      miss->stats.tcp_flags, NULL);
+                      miss->stats.tcp_flags, NULL, &miss->odp_actions);
         xin.may_learn = true;
 
         if (miss->upcall_type == DPIF_UC_MISS) {
@@ -960,7 +966,8 @@ handle_upcalls(struct handler *handler, struct hmap *misses,
         if (miss->xout.slow) {
             struct xlate_in xin;
 
-            xlate_in_init(&xin, miss->ofproto, &miss->flow, NULL, 0, packet);
+            xlate_in_init(&xin, miss->ofproto, &miss->flow, NULL, 0, packet,
+                          NULL);
             xlate_actions_for_side_effects(&xin);
         }
 
@@ -974,7 +981,7 @@ handle_upcalls(struct handler *handler, struct hmap *misses,
              * the packet contained no VLAN.  So, we must remove the
              * VLAN header from the packet before trying to execute the
              * actions. */
-            if (ofpbuf_size(&miss->xout.odp_actions)) {
+            if (ofpbuf_size(&miss->odp_actions)) {
                 eth_pop_vlan(packet);
             }
 
@@ -1019,8 +1026,8 @@ handle_upcalls(struct handler *handler, struct hmap 
*misses,
             op->u.flow_put.stats = NULL;
 
             if (!miss->xout.slow) {
-                op->u.flow_put.actions = ofpbuf_data(&miss->xout.odp_actions);
-                op->u.flow_put.actions_len = 
ofpbuf_size(&miss->xout.odp_actions);
+                op->u.flow_put.actions = ofpbuf_data(&miss->odp_actions);
+                op->u.flow_put.actions_len = ofpbuf_size(&miss->odp_actions);
             } else {
                 struct ofpbuf buf;
 
@@ -1039,15 +1046,15 @@ handle_upcalls(struct handler *handler, struct hmap 
*misses,
          * upcall. */
         miss->flow.vlan_tci = flow_vlan_tci;
 
-        if (ofpbuf_size(&miss->xout.odp_actions)) {
+        if (ofpbuf_size(&miss->odp_actions)) {
 
             op = &ops[n_ops++];
             op->type = DPIF_OP_EXECUTE;
             op->u.execute.packet = packet;
             odp_key_to_pkt_metadata(miss->key, miss->key_len,
                                     &op->u.execute.md);
-            op->u.execute.actions = ofpbuf_data(&miss->xout.odp_actions);
-            op->u.execute.actions_len = ofpbuf_size(&miss->xout.odp_actions);
+            op->u.execute.actions = ofpbuf_data(&miss->odp_actions);
+            op->u.execute.actions_len = ofpbuf_size(&miss->odp_actions);
             op->u.execute.needs_help = (miss->xout.slow & SLOW_ACTION) != 0;
         }
     }
@@ -1215,12 +1222,12 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key 
*ukey,
                 const struct dpif_flow_stats *stats)
     OVS_REQUIRES(ukey->mutex)
 {
-    uint64_t slow_path_buf[128 / 8];
+    uint64_t odp_actions_stub[1024 / 8];
+    struct ofpbuf odp_actions;
     struct xlate_out xout, *xoutp;
     struct netflow *netflow;
     struct ofproto_dpif *ofproto;
     struct dpif_flow_stats push;
-    struct ofpbuf xout_actions;
     struct flow flow, dp_mask;
     uint32_t *dp32, *xout32;
     odp_port_t odp_in_port;
@@ -1230,6 +1237,7 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key 
*ukey,
     size_t i;
     bool may_learn, ok;
 
+    ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof(odp_actions_stub));
     ok = false;
     xoutp = NULL;
     netflow = NULL;
@@ -1277,7 +1285,8 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key 
*ukey,
         ukey->xcache = xlate_cache_new();
     }
 
-    xlate_in_init(&xin, ofproto, &flow, NULL, push.tcp_flags, NULL);
+    xlate_in_init(&xin, ofproto, &flow, NULL, push.tcp_flags, NULL,
+                  &odp_actions);
     xin.resubmit_stats = push.n_packets ? &push : NULL;
     xin.xcache = ukey->xcache;
     xin.may_learn = may_learn;
@@ -1290,16 +1299,13 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key 
*ukey,
         goto exit;
     }
 
-    if (!xout.slow) {
-        ofpbuf_use_const(&xout_actions, ofpbuf_data(&xout.odp_actions),
-                         ofpbuf_size(&xout.odp_actions));
-    } else {
-        ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
-        compose_slow_path(udpif, &xout, &flow, odp_in_port, &xout_actions);
+    if (xout.slow) {
+        ofpbuf_clear(&odp_actions);
+        compose_slow_path(udpif, &xout, &flow, odp_in_port, &odp_actions);
     }
 
-    if (actions_len != ofpbuf_size(&xout_actions)
-        || memcmp(ofpbuf_data(&xout_actions), actions, actions_len)) {
+    if (actions_len != ofpbuf_size(&odp_actions)
+        || memcmp(ofpbuf_data(&odp_actions), actions, actions_len)) {
         goto exit;
     }
 
@@ -1330,6 +1336,7 @@ exit:
         netflow_unref(netflow);
     }
     xlate_out_uninit(xoutp);
+    ofpbuf_uninit(&odp_actions);
     return ok;
 }
 
@@ -1402,7 +1409,7 @@ push_dump_ops__(struct udpif *udpif, struct dump_op *ops, 
size_t n_ops)
                 struct xlate_in xin;
 
                 xlate_in_init(&xin, ofproto, &flow, NULL, push->tcp_flags,
-                              NULL);
+                              NULL, NULL);
                 xin.resubmit_stats = push->n_packets ? push : NULL;
                 xin.may_learn = may_learn;
                 xin.skip_wildcards = true;
diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c
index df848e5..c9f5f41 100644
--- a/ofproto/ofproto-dpif-xlate.c
+++ b/ofproto/ofproto-dpif-xlate.c
@@ -181,6 +181,13 @@ struct xlate_ctx {
     /* The rule that we are currently translating, or NULL. */
     struct rule_dpif *rule;
 
+    /* Output buffer for datapath actions.  When 'xin->odp_actions' is nonnull,
+     * this is the same pointer.  When 'xin->odp_actions' is null, this points
+     * to a scratch ofpbuf.  This allows code to add actions to
+     * 'ctx->odp_actions' without worrying about whether the caller really
+     * wants actions. */
+    struct ofpbuf *odp_actions;
+
     /* Resubmit statistics, via xlate_table_action(). */
     int recurse;                /* Current resubmit nesting depth. */
     int resubmits;              /* Total number of resubmits. */
@@ -1084,7 +1091,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct 
flow *orig_flow)
                          "%s, which is reserved exclusively for mirroring",
                          ctx->xbridge->name, in_xbundle->name);
         }
-        ofpbuf_clear(&ctx->xout->odp_actions);
+        ofpbuf_clear(ctx->odp_actions);
         return;
     }
 
@@ -1775,7 +1782,7 @@ static void
 add_sflow_action(struct xlate_ctx *ctx)
 {
     ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
-                                                   &ctx->xout->odp_actions,
+                                                   ctx->odp_actions,
                                                    &ctx->xin->flow, ODPP_NONE);
     ctx->sflow_odp_port = 0;
     ctx->sflow_n_outputs = 0;
@@ -1786,7 +1793,7 @@ add_sflow_action(struct xlate_ctx *ctx)
 static void
 add_ipfix_action(struct xlate_ctx *ctx)
 {
-    compose_ipfix_action(ctx->xbridge, &ctx->xout->odp_actions,
+    compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
                          &ctx->xin->flow);
 }
 
@@ -1803,7 +1810,7 @@ fix_sflow_action(struct xlate_ctx *ctx)
         return;
     }
 
-    cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset,
+    cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
                        sizeof cookie->sflow);
     ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
 
@@ -1916,13 +1923,13 @@ compose_output_action__(struct xlate_ctx *ctx, 
ofp_port_t ofp_port,
                 /* Forwarding is disabled by STP.  Let OFPP_NORMAL and the
                  * learning action look at the packet, then drop it. */
                 struct flow old_base_flow = ctx->base_flow;
-                size_t old_size = ofpbuf_size(&ctx->xout->odp_actions);
+                size_t old_size = ofpbuf_size(ctx->odp_actions);
                 mirror_mask_t old_mirrors = ctx->xout->mirrors;
                 xlate_table_action(ctx, flow->in_port.ofp_port, table_id,
                                    true, true);
                 ctx->xout->mirrors = old_mirrors;
                 ctx->base_flow = old_base_flow;
-                ofpbuf_set_size(&ctx->xout->odp_actions, old_size);
+                ofpbuf_set_size(ctx->odp_actions, old_size);
             }
         }
 
@@ -1983,8 +1990,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t 
ofp_port,
             entry->u.dev.tx = netdev_ref(xport->netdev);
         }
         out_port = odp_port;
-        commit_odp_tunnel_action(flow, &ctx->base_flow,
-                                 &ctx->xout->odp_actions);
+        commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions);
         flow->tunnel = flow_tnl; /* Restore tunnel metadata */
     } else {
         odp_port = xport->odp_port;
@@ -2004,24 +2010,23 @@ compose_output_action__(struct xlate_ctx *ctx, 
ofp_port_t ofp_port,
 
     if (out_port != ODPP_NONE) {
         ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
-                                              &ctx->xout->odp_actions,
+                                              ctx->odp_actions,
                                               &ctx->xout->wc);
-
         if (xr) {
             struct ovs_action_hash *act_hash;
 
             /* Hash action. */
-            act_hash = nl_msg_put_unspec_uninit(&ctx->xout->odp_actions,
+            act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
                                                 OVS_ACTION_ATTR_HASH,
                                                 sizeof *act_hash);
             act_hash->hash_alg = xr->hash_alg;
             act_hash->hash_basis = xr->hash_basis;
 
             /* Recirc action. */
-            nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC,
+            nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
                            xr->recirc_id);
         } else {
-            nl_msg_put_odp_port(&ctx->xout->odp_actions, 
OVS_ACTION_ATTR_OUTPUT,
+            nl_msg_put_odp_port(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT,
                                 out_port);
         }
 
@@ -2073,7 +2078,7 @@ xlate_resubmit_resource_check(struct xlate_ctx *ctx)
                     MAX_RESUBMIT_RECURSION);
     } else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
         VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
-    } else if (ofpbuf_size(&ctx->xout->odp_actions) > UINT16_MAX) {
+    } else if (ofpbuf_size(ctx->odp_actions) > UINT16_MAX) {
         VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
     } else if (ofpbuf_size(&ctx->stack) >= 65536) {
         VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
@@ -2370,8 +2375,7 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
 
     ctx->xout->slow |= SLOW_CONTROLLER;
     ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
-                                          &ctx->xout->odp_actions,
-                                          &ctx->xout->wc);
+                                          ctx->odp_actions, &ctx->xout->wc);
 
     if (!ctx->xin->packet) {
         return;
@@ -2380,9 +2384,8 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
     packet = ofpbuf_clone(ctx->xin->packet);
 
     odp_execute_actions(NULL, packet, false, &md,
-                        ofpbuf_data(&ctx->xout->odp_actions),
-                        ofpbuf_size(&ctx->xout->odp_actions), NULL);
-
+                        ofpbuf_data(ctx->odp_actions),
+                        ofpbuf_size(ctx->odp_actions), NULL);
     pin = xmalloc(sizeof *pin);
     pin->up.packet_len = ofpbuf_size(packet);
     pin->up.packet = ofpbuf_steal_data(packet);
@@ -2428,7 +2431,7 @@ compose_mpls_push_action(struct xlate_ctx *ctx, struct 
ofpact_push_mpls *mpls)
     n = flow_count_mpls_labels(flow, wc);
     if (!n) {
         ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
-                                              &ctx->xout->odp_actions,
+                                              ctx->odp_actions,
                                               &ctx->xout->wc);
     } else if (n >= FLOW_MAX_MPLS_LABELS) {
         if (ctx->xin->packet != NULL) {
@@ -2464,7 +2467,7 @@ compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 
eth_type)
                          ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
         }
         ctx->exit = true;
-        ofpbuf_clear(&ctx->xout->odp_actions);
+        ofpbuf_clear(ctx->odp_actions);
     }
 }
 
@@ -2784,12 +2787,11 @@ xlate_sample_action(struct xlate_ctx *ctx,
   }
 
   ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
-                                        &ctx->xout->odp_actions,
-                                        &ctx->xout->wc);
+                                        ctx->odp_actions, &ctx->xout->wc);
 
   compose_flow_sample_cookie(os->probability, os->collector_set_id,
                              os->obs_domain_id, os->obs_point_id, &cookie);
-  compose_sample_action(ctx->xbridge, &ctx->xout->odp_actions, &ctx->xin->flow,
+  compose_sample_action(ctx->xbridge, ctx->odp_actions, &ctx->xin->flow,
                         probability, &cookie, sizeof cookie.flow_sample);
 }
 
@@ -3126,7 +3128,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t 
ofpacts_len,
 void
 xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
               const struct flow *flow, struct rule_dpif *rule,
-              uint16_t tcp_flags, const struct ofpbuf *packet)
+              uint16_t tcp_flags, const struct ofpbuf *packet,
+              struct ofpbuf *odp_actions)
 {
     xin->ofproto = ofproto;
     xin->flow = *flow;
@@ -3141,14 +3144,13 @@ xlate_in_init(struct xlate_in *xin, struct ofproto_dpif 
*ofproto,
     xin->report_hook = NULL;
     xin->resubmit_stats = NULL;
     xin->skip_wildcards = false;
+    xin->odp_actions = odp_actions;
 }
 
 void
-xlate_out_uninit(struct xlate_out *xout)
+xlate_out_uninit(struct xlate_out * xout OVS_UNUSED)
 {
-    if (xout) {
-        ofpbuf_uninit(&xout->odp_actions);
-    }
+    /* Nothing to do. */
 }
 
 /* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
@@ -3169,23 +3171,6 @@ xlate_report(struct xlate_ctx *ctx, const char *s)
         ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
     }
 }
-
-void
-xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
-{
-    dst->wc = src->wc;
-    dst->slow = src->slow;
-    dst->has_learn = src->has_learn;
-    dst->has_normal = src->has_normal;
-    dst->has_fin_timeout = src->has_fin_timeout;
-    dst->nf_output_iface = src->nf_output_iface;
-    dst->mirrors = src->mirrors;
-
-    ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
-                    sizeof dst->odp_actions_stub);
-    ofpbuf_put(&dst->odp_actions, ofpbuf_data(&src->odp_actions),
-               ofpbuf_size(&src->odp_actions));
-}
 
 static struct skb_priority_to_dscp *
 get_skb_priority(const struct xport *xport, uint32_t skb_priority)
@@ -3229,8 +3214,8 @@ actions_output_to_local_port(const struct xlate_ctx *ctx)
     const struct nlattr *a;
     unsigned int left;
 
-    NL_ATTR_FOR_EACH_UNSAFE (a, left, ofpbuf_data(&ctx->xout->odp_actions),
-                             ofpbuf_size(&ctx->xout->odp_actions)) {
+    NL_ATTR_FOR_EACH_UNSAFE (a, left, ofpbuf_data(ctx->odp_actions),
+                             ofpbuf_size(ctx->odp_actions)) {
         if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
             && nl_attr_get_odp_port(a) == local_odp_port) {
             return true;
@@ -3343,6 +3328,11 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out 
*xout)
     bool tnl_may_send;
     bool is_icmp;
 
+    uint64_t actions_stub[256 / 8];
+    struct ofpbuf scratch_actions;
+
+    ofpbuf_use_stub(&scratch_actions, actions_stub, sizeof(actions_stub));
+
     COVERAGE_INC(xlate_actions);
 
     /* Flow initialization rules:
@@ -3374,9 +3364,8 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out 
*xout)
     ctx.xout->has_fin_timeout = false;
     ctx.xout->nf_output_iface = NF_OUT_DROP;
     ctx.xout->mirrors = 0;
-    ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
-                    sizeof ctx.xout->odp_actions_stub);
-    ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
+    ctx.odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
+    ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
 
     ctx.xbridge = xbridge_lookup(xin->ofproto);
     if (!ctx.xbridge) {
@@ -3504,7 +3493,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out 
*xout)
 
         add_sflow_action(&ctx);
         add_ipfix_action(&ctx);
-        sample_actions_len = ofpbuf_size(&ctx.xout->odp_actions);
+        sample_actions_len = ofpbuf_size(ctx.odp_actions);
 
         if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
             do_xlate_actions(ofpacts, ofpacts_len, &ctx);
@@ -3512,7 +3501,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out 
*xout)
             /* We've let OFPP_NORMAL and the learning action look at the
              * packet, so drop it now if forwarding is disabled. */
             if (in_port && !xport_stp_forward_state(in_port)) {
-                ofpbuf_set_size(&ctx.xout->odp_actions, sample_actions_len);
+                ofpbuf_set_size(ctx.odp_actions, sample_actions_len);
             }
         }
 
@@ -3533,14 +3522,14 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out 
*xout)
         }
     }
 
-    if (nl_attr_oversized(ofpbuf_size(&ctx.xout->odp_actions))) {
+    if (nl_attr_oversized(ofpbuf_size(ctx.odp_actions))) {
         /* These datapath actions are too big for a Netlink attribute, so we
          * can't hand them to the kernel directly.  dpif_execute() can execute
          * them one by one with help, so just mark the result as SLOW_ACTION to
          * prevent the flow from being installed. */
         COVERAGE_INC(xlate_actions_oversize);
         ctx.xout->slow |= SLOW_ACTION;
-    } else if (too_many_output_actions(&ctx.xout->odp_actions)) {
+    } else if (too_many_output_actions(ctx.odp_actions)) {
         COVERAGE_INC(xlate_actions_too_many_output);
         ctx.xout->slow |= SLOW_ACTION;
     }
@@ -3586,7 +3575,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out 
*xout)
 
     ofpbuf_uninit(&ctx.stack);
     ofpbuf_uninit(&ctx.action_set);
-
+    ofpbuf_uninit(&scratch_actions);
 
     /* Clear the metadata and register wildcard masks, because we won't
      * use non-header fields as part of the cache. */
diff --git a/ofproto/ofproto-dpif-xlate.h b/ofproto/ofproto-dpif-xlate.h
index 8c7642e..3bc0826 100644
--- a/ofproto/ofproto-dpif-xlate.h
+++ b/ofproto/ofproto-dpif-xlate.h
@@ -50,9 +50,6 @@ struct xlate_out {
     bool has_fin_timeout;       /* Actions include NXAST_FIN_TIMEOUT? */
     ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
     mirror_mask_t mirrors;      /* Bitmap of associated mirrors. */
-
-    uint64_t odp_actions_stub[256 / 8];
-    struct ofpbuf odp_actions;
 };
 
 struct xlate_in {
@@ -128,6 +125,10 @@ struct xlate_in {
      * This is normally null so the client has to set it manually after
      * calling xlate_in_init(). */
     struct xlate_cache *xcache;
+
+    /* If nonnull, flow translation puts the resulting datapath actions in this
+     * buffer.  If null, flow translation will not produce datapath actions. */
+    struct ofpbuf *odp_actions;
 };
 
 extern struct fat_rwlock xlate_rwlock;
@@ -173,10 +174,10 @@ void xlate_actions(struct xlate_in *, struct xlate_out *)
     OVS_EXCLUDED(xlate_rwlock);
 void xlate_in_init(struct xlate_in *, struct ofproto_dpif *,
                    const struct flow *, struct rule_dpif *,
-                   uint16_t tcp_flags, const struct ofpbuf *packet);
+                   uint16_t tcp_flags, const struct ofpbuf *packet,
+                   struct ofpbuf *odp_actions);
 void xlate_out_uninit(struct xlate_out *);
 void xlate_actions_for_side_effects(struct xlate_in *);
-void xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src);
 
 int xlate_send_packet(const struct ofport_dpif *, struct ofpbuf *);
 
diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
index 9b87248..a7ad88b 100644
--- a/ofproto/ofproto-dpif.c
+++ b/ofproto/ofproto-dpif.c
@@ -3146,6 +3146,10 @@ ofproto_dpif_execute_actions(struct ofproto_dpif 
*ofproto,
     ofp_port_t in_port;
     struct dpif_execute execute;
     int error;
+    uint64_t odp_actions_stub[1024 / 8];
+    struct ofpbuf odp_actions;
+
+    ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof(odp_actions_stub));
 
     ovs_assert((rule != NULL) != (ofpacts != NULL));
 
@@ -3155,14 +3159,15 @@ ofproto_dpif_execute_actions(struct ofproto_dpif 
*ofproto,
         rule_dpif_credit_stats(rule, &stats);
     }
 
-    xlate_in_init(&xin, ofproto, flow, rule, stats.tcp_flags, packet);
+    xlate_in_init(&xin, ofproto, flow, rule, stats.tcp_flags, packet,
+                  &odp_actions);
     xin.ofpacts = ofpacts;
     xin.ofpacts_len = ofpacts_len;
     xin.resubmit_stats = &stats;
     xlate_actions(&xin, &xout);
 
-    execute.actions = ofpbuf_data(&xout.odp_actions);
-    execute.actions_len = ofpbuf_size(&xout.odp_actions);
+    execute.actions = ofpbuf_data(&odp_actions);
+    execute.actions_len = ofpbuf_size(&odp_actions);
     execute.packet = packet;
     execute.md = pkt_metadata_from_flow(flow);
     execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
@@ -3177,6 +3182,7 @@ ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
     error = dpif_execute(ofproto->backer->dpif, &execute);
 
     xlate_out_uninit(&xout);
+    ofpbuf_uninit(&odp_actions);
 
     return error;
 }
@@ -3841,6 +3847,7 @@ struct trace_ctx {
     struct flow flow;
     struct flow_wildcards wc;
     struct ds *result;
+    struct ofpbuf odp_actions;
 };
 
 static void
@@ -3907,7 +3914,7 @@ static void
 trace_format_odp(struct ds *result, int level, const char *title,
                  struct trace_ctx *trace)
 {
-    struct ofpbuf *odp_actions = &trace->xout.odp_actions;
+    struct ofpbuf *odp_actions = &trace->odp_actions;
 
     ds_put_char_multiple(result, '\t', level);
     ds_put_format(result, "%s: ", title);
@@ -4227,6 +4234,7 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow 
*flow,
     flow_format(ds, flow);
     ds_put_char(ds, '\n');
 
+    ofpbuf_init(&trace.odp_actions, 0);
     flow_wildcards_init_catchall(&trace.wc);
     if (ofpacts) {
         rule = NULL;
@@ -4250,7 +4258,7 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow 
*flow,
         trace.key = flow; /* Original flow key, used for megaflow. */
         trace.flow = *flow; /* May be modified by actions. */
         xlate_in_init(&trace.xin, ofproto, flow, rule, ntohs(flow->tcp_flags),
-                      packet);
+                      packet, &trace.odp_actions);
         if (ofpacts) {
             trace.xin.ofpacts = ofpacts;
             trace.xin.ofpacts_len = ofpacts_len;
@@ -4265,9 +4273,8 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow 
*flow,
         trace_format_megaflow(ds, 0, "Megaflow", &trace);
 
         ds_put_cstr(ds, "Datapath actions: ");
-        format_odp_actions(ds, ofpbuf_data(&trace.xout.odp_actions),
-                           ofpbuf_size(&trace.xout.odp_actions));
-
+        format_odp_actions(ds, ofpbuf_data(&trace.odp_actions),
+                           ofpbuf_size(&trace.odp_actions));
         if (trace.xout.slow) {
             enum slow_path_reason slow;
 
@@ -4287,6 +4294,7 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow 
*flow,
 
         xlate_out_uninit(&trace.xout);
     }
+    ofpbuf_uninit(&trace.odp_actions);
 }
 
 /* Store the current ofprotos in 'ofproto_shash'.  Returns a sorted list
-- 
2.1.4

_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to