This will make later patches easier to follow.
Signed-off-by: Jarno Rajahalme <[email protected]>
---
ofproto/ofproto-dpif.c | 316 ++++++++++++++++++++++++++----------------------
1 file changed, 171 insertions(+), 145 deletions(-)
diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
index f78d60b..23ea2c6 100644
--- a/ofproto/ofproto-dpif.c
+++ b/ofproto/ofproto-dpif.c
@@ -5668,7 +5668,7 @@ send_packet(const struct ofport_dpif *ofport, struct
ofpbuf *packet)
/* OpenFlow to datapath action translation. */
-static bool may_receive(const struct ofport_dpif *, struct xlate_ctx *);
+static bool may_receive(const struct ofport_dpif *, struct flow *);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct xlate_ctx *);
static void xlate_normal(struct xlate_ctx *);
@@ -5849,9 +5849,11 @@ compose_ipfix_action(const struct ofproto_dpif *ofproto,
static void
add_sflow_action(struct xlate_ctx *ctx)
{
+ struct flow *flow = &ctx->xin->flow;
+
ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
&ctx->xout->odp_actions,
- &ctx->xin->flow, OVSP_NONE);
+ flow, OVSP_NONE);
ctx->sflow_odp_port = 0;
ctx->sflow_n_outputs = 0;
}
@@ -5861,8 +5863,10 @@ add_sflow_action(struct xlate_ctx *ctx)
static void
add_ipfix_action(struct xlate_ctx *ctx)
{
+ struct flow *flow = &ctx->xin->flow;
+
compose_ipfix_action(ctx->ofproto, &ctx->xout->odp_actions,
- &ctx->xin->flow);
+ flow);
}
/* Fix SAMPLE action according to data collected while composing ODP actions.
@@ -5890,6 +5894,7 @@ static void
compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port,
bool check_stp)
{
+ struct flow *flow = &ctx->xin->flow;
const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
ovs_be16 flow_vlan_tci;
uint32_t flow_skb_mark;
@@ -5914,7 +5919,7 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t
ofp_port,
if (netdev_vport_is_patch(ofport->up.netdev)) {
struct ofport_dpif *peer = ofport_get_peer(ofport);
- struct flow old_flow = ctx->xin->flow;
+ struct flow old_flow = *flow;
const struct ofproto_dpif *peer_ofproto;
enum slow_path_reason special;
struct ofport_dpif *in_port;
@@ -5931,31 +5936,31 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t
ofp_port,
}
ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
- ctx->xin->flow.in_port = peer->up.ofp_port;
- ctx->xin->flow.metadata = htonll(0);
- memset(&ctx->xin->flow.tunnel, 0, sizeof ctx->xin->flow.tunnel);
- memset(ctx->xin->flow.regs, 0, sizeof ctx->xin->flow.regs);
+ flow->in_port = peer->up.ofp_port;
+ flow->metadata = htonll(0);
+ memset(&flow->tunnel, 0, sizeof flow->tunnel);
+ memset(flow->regs, 0, sizeof flow->regs);
- in_port = get_ofp_port(ctx->ofproto, ctx->xin->flow.in_port);
- special = process_special(ctx->ofproto, &ctx->xin->flow, in_port,
+ in_port = get_ofp_port(ctx->ofproto, flow->in_port);
+ special = process_special(ctx->ofproto, flow, in_port,
ctx->xin->packet);
if (special) {
ctx->xout->slow = special;
- } else if (!in_port || may_receive(in_port, ctx)) {
+ } else if (!in_port || may_receive(in_port, flow)) {
if (!in_port || stp_forward_in_state(in_port->stp_state)) {
- xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true);
+ xlate_table_action(ctx, flow->in_port, 0, true);
} else {
/* Forwarding is disabled by STP. Let OFPP_NORMAL and the
* learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
size_t old_size = ctx->xout->odp_actions.size;
- xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true);
+ xlate_table_action(ctx, flow->in_port, 0, true);
ctx->base_flow = old_base_flow;
ctx->xout->odp_actions.size = old_size;
}
}
- ctx->xin->flow = old_flow;
+ *flow = old_flow;
ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
if (ctx->xin->resubmit_stats) {
@@ -5966,14 +5971,14 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t
ofp_port,
return;
}
- flow_vlan_tci = ctx->xin->flow.vlan_tci;
- flow_skb_mark = ctx->xin->flow.skb_mark;
- flow_nw_tos = ctx->xin->flow.nw_tos;
+ flow_vlan_tci = flow->vlan_tci;
+ flow_skb_mark = flow->skb_mark;
+ flow_nw_tos = flow->nw_tos;
- pdscp = get_priority(ofport, ctx->xin->flow.skb_priority);
+ pdscp = get_priority(ofport, flow->skb_priority);
if (pdscp) {
- ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK;
- ctx->xin->flow.nw_tos |= pdscp->dscp;
+ flow->nw_tos &= ~IP_DSCP_MASK;
+ flow->nw_tos |= pdscp->dscp;
}
if (ofport->tnl_port) {
@@ -5981,13 +5986,13 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t
ofp_port,
* the Logical (tunnel) Port are not visible for any further
* matches, while explicit set actions on tunnel metadata are.
*/
- struct flow_tnl flow_tnl = ctx->xin->flow.tunnel;
- odp_port = tnl_port_send(ofport->tnl_port, &ctx->xin->flow);
+ struct flow_tnl flow_tnl = flow->tunnel;
+ odp_port = tnl_port_send(ofport->tnl_port, flow);
if (odp_port == OVSP_NONE) {
xlate_report(ctx, "Tunneling decided against output");
goto out; /* restore flow_nw_tos */
}
- if (ctx->xin->flow.tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
+ if (flow->tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
xlate_report(ctx, "Not tunneling to our own address");
goto out; /* restore flow_nw_tos */
}
@@ -5995,24 +6000,23 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t
ofp_port,
netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
}
out_port = odp_port;
- commit_odp_tunnel_action(&ctx->xin->flow, &ctx->base_flow,
+ commit_odp_tunnel_action(flow, &ctx->base_flow,
&ctx->xout->odp_actions);
- ctx->xin->flow.tunnel = flow_tnl; /* Restore tunnel metadata */
+ flow->tunnel = flow_tnl; /* Restore tunnel metadata */
} else {
uint16_t vlandev_port;
odp_port = ofport->odp_port;
vlandev_port = vsp_realdev_to_vlandev(ctx->ofproto, ofp_port,
- ctx->xin->flow.vlan_tci);
+ flow->vlan_tci);
if (vlandev_port == ofp_port) {
out_port = odp_port;
} else {
out_port = ofp_port_to_odp_port(ctx->ofproto, vlandev_port);
- ctx->xin->flow.vlan_tci = htons(0);
+ flow->vlan_tci = htons(0);
}
- ctx->xin->flow.skb_mark &= ~IPSEC_MARK;
+ flow->skb_mark &= ~IPSEC_MARK;
}
- commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- &ctx->xout->odp_actions);
+ commit_odp_actions(flow, &ctx->base_flow, &ctx->xout->odp_actions);
nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
ctx->sflow_odp_port = odp_port;
@@ -6020,10 +6024,10 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t
ofp_port,
ctx->xout->nf_output_iface = ofp_port;
/* Restore flow */
- ctx->xin->flow.vlan_tci = flow_vlan_tci;
- ctx->xin->flow.skb_mark = flow_skb_mark;
+ flow->vlan_tci = flow_vlan_tci;
+ flow->skb_mark = flow_skb_mark;
out:
- ctx->xin->flow.nw_tos = flow_nw_tos;
+ flow->nw_tos = flow_nw_tos;
}
static void
@@ -6033,7 +6037,7 @@ compose_output_action(struct xlate_ctx *ctx, uint16_t
ofp_port)
}
static void
-tag_the_flow(struct xlate_ctx *ctx, struct rule_dpif *rule)
+tag_the_flow(struct xlate_ctx *ctx, struct flow *flow, struct rule_dpif *rule)
{
struct ofproto_dpif *ofproto = ctx->ofproto;
uint8_t table_id = ctx->table_id;
@@ -6043,7 +6047,7 @@ tag_the_flow(struct xlate_ctx *ctx, struct rule_dpif
*rule)
if (table->other_table) {
ctx->xout->tags |= (rule && rule->tag
? rule->tag
- : rule_calculate_tag(&ctx->xin->flow,
+ : rule_calculate_tag(flow,
&table->other_table->mask,
table->basis));
}
@@ -6055,6 +6059,8 @@ static struct rule_dpif *
ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
bool may_packet_in)
{
+ struct flow *flow = &ctx->xin->flow;
+
if (ctx->xin->resubmit_hook) {
ctx->xin->resubmit_hook(ctx, rule);
}
@@ -6066,7 +6072,7 @@ ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif
*rule,
* OFPTC_TABLE_MISS_DROP
* When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
*/
- rule = rule_dpif_miss_rule(ctx->ofproto, &ctx->xin->flow);
+ rule = rule_dpif_miss_rule(ctx->ofproto, flow);
}
if (rule && ctx->xin->resubmit_stats) {
rule_credit_stats(rule, ctx->xin->resubmit_stats);
@@ -6078,22 +6084,24 @@ static void
xlate_table_action(struct xlate_ctx *ctx,
uint16_t in_port, uint8_t table_id, bool may_packet_in)
{
+ struct flow *flow = &ctx->xin->flow;
+
if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
struct rule_dpif *rule;
- uint16_t old_in_port = ctx->xin->flow.in_port;
+ uint16_t old_in_port = flow->in_port;
uint8_t old_table_id = ctx->table_id;
ctx->table_id = table_id;
/* Look up a flow with 'in_port' as the input port. */
- ctx->xin->flow.in_port = in_port;
- rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, table_id);
+ flow->in_port = in_port;
+ rule = rule_dpif_lookup__(ctx->ofproto, flow, table_id);
- tag_the_flow(ctx, rule);
+ tag_the_flow(ctx, flow, rule);
/* Restore the original input port. Otherwise OFPP_NORMAL and
* OFPP_IN_PORT will have surprising behavior. */
- ctx->xin->flow.in_port = old_in_port;
+ flow->in_port = old_in_port;
rule = ctx_rule_hooks(ctx, rule, may_packet_in);
@@ -6121,12 +6129,13 @@ static void
xlate_ofpact_resubmit(struct xlate_ctx *ctx,
const struct ofpact_resubmit *resubmit)
{
+ struct flow *flow = &ctx->xin->flow;
uint16_t in_port;
uint8_t table_id;
in_port = resubmit->in_port;
if (in_port == OFPP_IN_PORT) {
- in_port = ctx->xin->flow.in_port;
+ in_port = flow->in_port;
}
table_id = resubmit->table_id;
@@ -6140,12 +6149,13 @@ xlate_ofpact_resubmit(struct xlate_ctx *ctx,
static void
flood_packets(struct xlate_ctx *ctx, bool all)
{
+ struct flow *flow = &ctx->xin->flow;
struct ofport_dpif *ofport;
HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
uint16_t ofp_port = ofport->up.ofp_port;
- if (ofp_port == ctx->xin->flow.in_port) {
+ if (ofp_port == flow->in_port) {
continue;
}
@@ -6164,6 +6174,7 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
enum ofp_packet_in_reason reason,
uint16_t controller_id)
{
+ struct flow *flow = &ctx->xin->flow;
struct ofputil_packet_in pin;
struct ofpbuf *packet;
@@ -6182,37 +6193,37 @@ execute_controller_action(struct xlate_ctx *ctx, int
len,
eth_pop_vlan(packet);
eh = packet->l2;
- memcpy(eh->eth_src, ctx->xin->flow.dl_src, sizeof eh->eth_src);
- memcpy(eh->eth_dst, ctx->xin->flow.dl_dst, sizeof eh->eth_dst);
+ memcpy(eh->eth_src, flow->dl_src, sizeof eh->eth_src);
+ memcpy(eh->eth_dst, flow->dl_dst, sizeof eh->eth_dst);
- if (ctx->xin->flow.vlan_tci & htons(VLAN_CFI)) {
- eth_push_vlan(packet, ctx->xin->flow.vlan_tci);
+ if (flow->vlan_tci & htons(VLAN_CFI)) {
+ eth_push_vlan(packet, flow->vlan_tci);
}
mpls_depth = eth_mpls_depth(packet);
- if (mpls_depth < ctx->xin->flow.mpls_depth) {
- push_mpls(packet, ctx->xin->flow.dl_type, ctx->xin->flow.mpls_lse);
- } else if (mpls_depth > ctx->xin->flow.mpls_depth) {
- pop_mpls(packet, ctx->xin->flow.dl_type);
+ if (mpls_depth < flow->mpls_depth) {
+ push_mpls(packet, flow->dl_type, flow->mpls_lse);
+ } else if (mpls_depth > flow->mpls_depth) {
+ pop_mpls(packet, flow->dl_type);
} else if (mpls_depth) {
- set_mpls_lse(packet, ctx->xin->flow.mpls_lse);
+ set_mpls_lse(packet, flow->mpls_lse);
}
if (packet->l4) {
- if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
- packet_set_ipv4(packet, ctx->xin->flow.nw_src,
- ctx->xin->flow.nw_dst, ctx->xin->flow.nw_tos,
- ctx->xin->flow.nw_ttl);
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ packet_set_ipv4(packet, flow->nw_src,
+ flow->nw_dst, flow->nw_tos,
+ flow->nw_ttl);
}
if (packet->l7) {
- if (ctx->xin->flow.nw_proto == IPPROTO_TCP) {
- packet_set_tcp_port(packet, ctx->xin->flow.tp_src,
- ctx->xin->flow.tp_dst);
- } else if (ctx->xin->flow.nw_proto == IPPROTO_UDP) {
- packet_set_udp_port(packet, ctx->xin->flow.tp_src,
- ctx->xin->flow.tp_dst);
+ if (flow->nw_proto == IPPROTO_TCP) {
+ packet_set_tcp_port(packet, flow->tp_src,
+ flow->tp_dst);
+ } else if (flow->nw_proto == IPPROTO_UDP) {
+ packet_set_udp_port(packet, flow->tp_src,
+ flow->tp_dst);
}
}
}
@@ -6226,7 +6237,7 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
pin.send_len = len;
- flow_get_metadata(&ctx->xin->flow, &pin.fmd);
+ flow_get_metadata(flow, &pin.fmd);
connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
ofpbuf_delete(packet);
@@ -6323,16 +6334,17 @@ static void
xlate_output_action(struct xlate_ctx *ctx,
uint16_t port, uint16_t max_len, bool may_packet_in)
{
+ struct flow *flow = &ctx->xin->flow;
uint16_t prev_nf_output_iface = ctx->xout->nf_output_iface;
ctx->xout->nf_output_iface = NF_OUT_DROP;
switch (port) {
case OFPP_IN_PORT:
- compose_output_action(ctx, ctx->xin->flow.in_port);
+ compose_output_action(ctx, flow->in_port);
break;
case OFPP_TABLE:
- xlate_table_action(ctx, ctx->xin->flow.in_port, 0, may_packet_in);
+ xlate_table_action(ctx, flow->in_port, 0, may_packet_in);
break;
case OFPP_NORMAL:
xlate_normal(ctx);
@@ -6350,7 +6362,7 @@ xlate_output_action(struct xlate_ctx *ctx,
break;
case OFPP_LOCAL:
default:
- if (port != ctx->xin->flow.in_port) {
+ if (port != flow->in_port) {
compose_output_action(ctx, port);
} else {
xlate_report(ctx, "skipping output to input port");
@@ -6372,7 +6384,9 @@ static void
xlate_output_reg_action(struct xlate_ctx *ctx,
const struct ofpact_output_reg *or)
{
- uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
+ struct flow *flow = &ctx->xin->flow;
+
+ uint64_t port = mf_get_subfield(&or->src, flow);
if (port <= UINT16_MAX) {
xlate_output_action(ctx, port, or->max_len, false);
}
@@ -6382,6 +6396,8 @@ static void
xlate_enqueue_action(struct xlate_ctx *ctx,
const struct ofpact_enqueue *enqueue)
{
+ struct flow *flow = &ctx->xin->flow;
+
uint16_t ofp_port = enqueue->port;
uint32_t queue_id = enqueue->queue;
uint32_t flow_priority, priority;
@@ -6398,16 +6414,16 @@ xlate_enqueue_action(struct xlate_ctx *ctx,
/* Check output port. */
if (ofp_port == OFPP_IN_PORT) {
- ofp_port = ctx->xin->flow.in_port;
- } else if (ofp_port == ctx->xin->flow.in_port) {
+ ofp_port = flow->in_port;
+ } else if (ofp_port == flow->in_port) {
return;
}
/* Add datapath actions. */
- flow_priority = ctx->xin->flow.skb_priority;
- ctx->xin->flow.skb_priority = priority;
+ flow_priority = flow->skb_priority;
+ flow->skb_priority = priority;
compose_output_action(ctx, ofp_port);
- ctx->xin->flow.skb_priority = flow_priority;
+ flow->skb_priority = flow_priority;
/* Update NetFlow output port. */
if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
@@ -6420,11 +6436,13 @@ xlate_enqueue_action(struct xlate_ctx *ctx,
static void
xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
{
+ struct flow *flow = &ctx->xin->flow;
+
uint32_t skb_priority;
if (!dpif_queue_to_priority(ctx->ofproto->backer->dpif,
queue_id, &skb_priority)) {
- ctx->xin->flow.skb_priority = skb_priority;
+ flow->skb_priority = skb_priority;
} else {
/* Couldn't translate queue to a priority. Nothing to do. A warning
* has already been logged. */
@@ -6457,12 +6475,13 @@ static void
xlate_bundle_action(struct xlate_ctx *ctx,
const struct ofpact_bundle *bundle)
{
+ struct flow *flow = &ctx->xin->flow;
+
uint16_t port;
- port = bundle_execute(bundle, &ctx->xin->flow, slave_enabled_cb,
- ctx->ofproto);
+ port = bundle_execute(bundle, flow, slave_enabled_cb, ctx->ofproto);
if (bundle->dst.field) {
- nxm_reg_load(&bundle->dst, port, &ctx->xin->flow);
+ nxm_reg_load(&bundle->dst, port, flow);
} else {
xlate_output_action(ctx, port, 0, false);
}
@@ -6472,6 +6491,8 @@ static void
xlate_learn_action(struct xlate_ctx *ctx,
const struct ofpact_learn *learn)
{
+ struct flow *flow = &ctx->xin->flow;
+
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
struct ofputil_flow_mod fm;
uint64_t ofpacts_stub[1024 / 8];
@@ -6479,7 +6500,7 @@ xlate_learn_action(struct xlate_ctx *ctx,
int error;
ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
- learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
+ learn_execute(learn, flow, &fm, &ofpacts);
error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
if (error && !VLOG_DROP_WARN(&rl)) {
@@ -6516,24 +6537,25 @@ static void
xlate_sample_action(struct xlate_ctx *ctx,
const struct ofpact_sample *os)
{
- union user_action_cookie cookie;
- /* Scale the probability from 16-bit to 32-bit while representing
- * the same percentage. */
- uint32_t probability = (os->probability << 16) | os->probability;
+ struct flow *flow = &ctx->xin->flow;
- commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- &ctx->xout->odp_actions);
+ union user_action_cookie cookie;
+ /* Scale the probability from 16-bit to 32-bit while representing
+ * the same percentage. */
+ uint32_t probability = (os->probability << 16) | os->probability;
- compose_flow_sample_cookie(os->probability, os->collector_set_id,
- os->obs_domain_id, os->obs_point_id, &cookie);
- compose_sample_action(ctx->ofproto, &ctx->xout->odp_actions, &ctx->xin->flow,
- probability, &cookie, sizeof cookie.flow_sample);
+ commit_odp_actions(flow, &ctx->base_flow, &ctx->xout->odp_actions);
+
+ compose_flow_sample_cookie(os->probability, os->collector_set_id,
+ os->obs_domain_id, os->obs_point_id, &cookie);
+ compose_sample_action(ctx->ofproto, &ctx->xout->odp_actions, flow,
+ probability, &cookie, sizeof cookie.flow_sample);
}
static bool
-may_receive(const struct ofport_dpif *port, struct xlate_ctx *ctx)
+may_receive(const struct ofport_dpif *port, struct flow *flow)
{
- if (port->up.pp.config & (eth_addr_equals(ctx->xin->flow.dl_dst,
+ if (port->up.pp.config & (eth_addr_equals(flow->dl_dst,
eth_addr_stp)
? OFPUTIL_PC_NO_RECV_STP
: OFPUTIL_PC_NO_RECV)) {
@@ -6555,15 +6577,17 @@ may_receive(const struct ofport_dpif *port, struct
xlate_ctx *ctx)
static bool
tunnel_ecn_ok(struct xlate_ctx *ctx)
{
+ struct flow *flow = &ctx->xin->flow;
+
if (is_ip_any(&ctx->base_flow)
- && (ctx->xin->flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
+ && (flow->tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
" but is not ECN capable");
return false;
} else {
/* Set the ECN CE value in the tunneled packet. */
- ctx->xin->flow.nw_tos |= IP_ECN_CE;
+ flow->nw_tos |= IP_ECN_CE;
}
}
@@ -6574,6 +6598,7 @@ static void
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct xlate_ctx *ctx)
{
+ struct flow *flow = &ctx->xin->flow;
bool was_evictable = true;
const struct ofpact *a;
@@ -6610,69 +6635,67 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t
ofpacts_len,
break;
case OFPACT_SET_VLAN_VID:
- ctx->xin->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
- ctx->xin->flow.vlan_tci |=
+ flow->vlan_tci &= ~htons(VLAN_VID_MASK);
+ flow->vlan_tci |=
(htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
| htons(VLAN_CFI));
break;
case OFPACT_SET_VLAN_PCP:
- ctx->xin->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
- ctx->xin->flow.vlan_tci |=
+ flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
+ flow->vlan_tci |=
htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT)
| VLAN_CFI);
break;
case OFPACT_STRIP_VLAN:
- ctx->xin->flow.vlan_tci = htons(0);
+ flow->vlan_tci = htons(0);
break;
case OFPACT_PUSH_VLAN:
/* XXX 802.1AD(QinQ) */
- ctx->xin->flow.vlan_tci = htons(VLAN_CFI);
+ flow->vlan_tci = htons(VLAN_CFI);
break;
case OFPACT_SET_ETH_SRC:
- memcpy(ctx->xin->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
+ memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
ETH_ADDR_LEN);
break;
case OFPACT_SET_ETH_DST:
- memcpy(ctx->xin->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
+ memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
ETH_ADDR_LEN);
break;
case OFPACT_SET_IPV4_SRC:
- if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
- ctx->xin->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
}
break;
case OFPACT_SET_IPV4_DST:
- if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
- ctx->xin->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
}
break;
case OFPACT_SET_IPV4_DSCP:
/* OpenFlow 1.0 only supports IPv4. */
- if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
- ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK;
- ctx->xin->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ flow->nw_tos &= ~IP_DSCP_MASK;
+ flow->nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
}
break;
case OFPACT_SET_L4_SRC_PORT:
- if (is_ip_any(&ctx->xin->flow)) {
- ctx->xin->flow.tp_src =
- htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
+ if (is_ip_any(flow)) {
+ flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
}
break;
case OFPACT_SET_L4_DST_PORT:
- if (is_ip_any(&ctx->xin->flow)) {
- ctx->xin->flow.tp_dst =
- htons(ofpact_get_SET_L4_DST_PORT(a)->port);
+ if (is_ip_any(flow)) {
+ flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
}
break;
@@ -6681,8 +6704,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t
ofpacts_len,
break;
case OFPACT_SET_TUNNEL:
- ctx->xin->flow.tunnel.tun_id =
- htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
+ flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
break;
case OFPACT_SET_QUEUE:
@@ -6690,46 +6712,44 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t
ofpacts_len,
break;
case OFPACT_POP_QUEUE:
- ctx->xin->flow.skb_priority = ctx->orig_skb_priority;
+ flow->skb_priority = ctx->orig_skb_priority;
break;
case OFPACT_REG_MOVE:
- nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->xin->flow);
+ nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow);
break;
case OFPACT_REG_LOAD:
- nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->xin->flow);
+ nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow);
break;
case OFPACT_STACK_PUSH:
- nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->xin->flow,
+ nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow,
&ctx->stack);
break;
case OFPACT_STACK_POP:
- nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->xin->flow,
+ nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow,
&ctx->stack);
break;
case OFPACT_PUSH_MPLS:
- compose_mpls_push_action(&ctx->xin->flow,
- ofpact_get_PUSH_MPLS(a)->ethertype);
+ compose_mpls_push_action(flow, ofpact_get_PUSH_MPLS(a)->ethertype);
break;
case OFPACT_POP_MPLS:
- compose_mpls_pop_action(&ctx->xin->flow,
- ofpact_get_POP_MPLS(a)->ethertype);
+ compose_mpls_pop_action(flow, ofpact_get_POP_MPLS(a)->ethertype);
break;
case OFPACT_SET_MPLS_TTL:
- if (compose_set_mpls_ttl_action(&ctx->xin->flow,
+ if (compose_set_mpls_ttl_action(flow,
ofpact_get_SET_MPLS_TTL(a)->ttl)) {
goto out;
}
break;
case OFPACT_DEC_MPLS_TTL:
- if (compose_dec_mpls_ttl_action(&ctx->xin->flow)) {
+ if (compose_dec_mpls_ttl_action(flow)) {
execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
0);
goto out;
@@ -6737,7 +6757,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t
ofpacts_len,
break;
case OFPACT_DEC_TTL:
- if (compose_dec_ttl(&ctx->xin->flow)) {
+ if (compose_dec_ttl(flow)) {
struct ofpact_cnt_ids *ids = ofpact_get_DEC_TTL(a);
size_t i;
@@ -6755,7 +6775,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t
ofpacts_len,
break;
case OFPACT_MULTIPATH:
- multipath_execute(ofpact_get_MULTIPATH(a), &ctx->xin->flow);
+ multipath_execute(ofpact_get_MULTIPATH(a), flow);
break;
case OFPACT_BUNDLE:
@@ -6793,8 +6813,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t
ofpacts_len,
case OFPACT_WRITE_METADATA:
metadata = ofpact_get_WRITE_METADATA(a);
- ctx->xin->flow.metadata &= ~metadata->mask;
- ctx->xin->flow.metadata |= metadata->metadata & metadata->mask;
+ flow->metadata &= ~metadata->mask;
+ flow->metadata |= metadata->metadata & metadata->mask;
break;
case OFPACT_GOTO_TABLE: {
@@ -6807,9 +6827,9 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t
ofpacts_len,
ctx->table_id = ogt->table_id;
/* Look up a flow from the new table. */
- rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow,
ctx->table_id);
+ rule = rule_dpif_lookup__(ctx->ofproto, flow, ctx->table_id);
- tag_the_flow(ctx, rule);
+ tag_the_flow(ctx, flow, rule);
rule = ctx_rule_hooks(ctx, rule, true);
@@ -6863,7 +6883,7 @@ xlate_in_init(struct xlate_in *xin, struct ofproto_dpif
*ofproto,
if (initial_vals) {
xin->initial_vals = *initial_vals;
} else {
- xin->initial_vals.vlan_tci = xin->flow.vlan_tci;
+ xin->initial_vals.vlan_tci = flow->vlan_tci;
}
}
@@ -7002,7 +7022,9 @@ xlate_actions(struct xlate_in *xin, struct xlate_out
*xout)
add_ipfix_action(&ctx);
sample_actions_len = ctx.xout->odp_actions.size;
- if (tunnel_ecn_ok(&ctx) && (!in_port || may_receive(in_port, &ctx))) {
+ if (tunnel_ecn_ok(&ctx)
+ && (!in_port || may_receive(in_port, &ctx.xin->flow))) {
+
do_xlate_actions(ofpacts, ofpacts_len, &ctx);
/* We've let OFPP_NORMAL and the learning action look at the
@@ -7180,6 +7202,7 @@ static void
output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle,
uint16_t vlan)
{
+ struct flow *flow = &ctx->xin->flow;
struct ofport_dpif *port;
uint16_t vid;
ovs_be16 tci, old_tci;
@@ -7188,7 +7211,7 @@ output_normal(struct xlate_ctx *ctx, const struct
ofbundle *out_bundle,
if (!out_bundle->bond) {
port = ofbundle_get_a_port(out_bundle);
} else {
- port = bond_choose_output_slave(out_bundle->bond, &ctx->xin->flow,
+ port = bond_choose_output_slave(out_bundle->bond, flow,
vid, &ctx->xout->tags);
if (!port) {
/* No slaves enabled, so drop packet. */
@@ -7196,18 +7219,18 @@ output_normal(struct xlate_ctx *ctx, const struct
ofbundle *out_bundle,
}
}
- old_tci = ctx->xin->flow.vlan_tci;
+ old_tci = flow->vlan_tci;
tci = htons(vid);
if (tci || out_bundle->use_priority_tags) {
- tci |= ctx->xin->flow.vlan_tci & htons(VLAN_PCP_MASK);
+ tci |= flow->vlan_tci & htons(VLAN_PCP_MASK);
if (tci) {
tci |= htons(VLAN_CFI);
}
}
- ctx->xin->flow.vlan_tci = tci;
+ flow->vlan_tci = tci;
compose_output_action(ctx, port->up.ofp_port);
- ctx->xin->flow.vlan_tci = old_tci;
+ flow->vlan_tci = old_tci;
}
static int
@@ -7476,8 +7499,9 @@ static bool
is_admissible(struct xlate_ctx *ctx, struct ofport_dpif *in_port,
uint16_t vlan)
{
- struct ofproto_dpif *ofproto = ctx->ofproto;
struct flow *flow = &ctx->xin->flow;
+
+ struct ofproto_dpif *ofproto = ctx->ofproto;
struct ofbundle *in_bundle = in_port->bundle;
/* Drop frames for reserved multicast addresses
@@ -7518,6 +7542,8 @@ is_admissible(struct xlate_ctx *ctx, struct ofport_dpif
*in_port,
static void
xlate_normal(struct xlate_ctx *ctx)
{
+ struct flow *flow = &ctx->xin->flow;
+
struct ofport_dpif *in_port;
struct ofbundle *in_bundle;
struct mac_entry *mac;
@@ -7526,7 +7552,7 @@ xlate_normal(struct xlate_ctx *ctx)
ctx->xout->has_normal = true;
- in_bundle = lookup_input_bundle(ctx->ofproto, ctx->xin->flow.in_port,
+ in_bundle = lookup_input_bundle(ctx->ofproto, flow->in_port,
ctx->xin->packet != NULL, &in_port);
if (!in_bundle) {
xlate_report(ctx, "no input bundle, dropping");
@@ -7534,8 +7560,8 @@ xlate_normal(struct xlate_ctx *ctx)
}
/* Drop malformed frames. */
- if (ctx->xin->flow.dl_type == htons(ETH_TYPE_VLAN) &&
- !(ctx->xin->flow.vlan_tci & htons(VLAN_CFI))) {
+ if (flow->dl_type == htons(ETH_TYPE_VLAN) &&
+ !(flow->vlan_tci & htons(VLAN_CFI))) {
if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
@@ -7559,7 +7585,7 @@ xlate_normal(struct xlate_ctx *ctx)
}
/* Check VLAN. */
- vid = vlan_tci_to_vid(ctx->xin->flow.vlan_tci);
+ vid = vlan_tci_to_vid(flow->vlan_tci);
if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
return;
@@ -7573,11 +7599,11 @@ xlate_normal(struct xlate_ctx *ctx)
/* Learn source MAC. */
if (ctx->xin->may_learn) {
- update_learning_table(ctx->ofproto, &ctx->xin->flow, vlan, in_bundle);
+ update_learning_table(ctx->ofproto, flow, vlan, in_bundle);
}
/* Determine output bundle. */
- mac = mac_learning_lookup(ctx->ofproto->ml, ctx->xin->flow.dl_dst, vlan,
+ mac = mac_learning_lookup(ctx->ofproto->ml, flow->dl_dst, vlan,
&ctx->xout->tags);
if (mac) {
if (mac->port.p != in_bundle) {
--
1.7.10.4
_______________________________________________
dev mailing list
[email protected]
http://openvswitch.org/mailman/listinfo/dev