Masked set action allows more megaflow wildcarding. Masked set action is now supported for all writeable key types, except for the tunnel key.
It is not clear whether masked set is useful for skb_priority. However, we already use the LSB of pkt_mark for IPSec in tunnels, so it might be useful to be able to set individual bits on pkt_mark. Signed-off-by: Jarno Rajahalme <jrajaha...@nicira.com> --- datapath/actions.c | 374 ++++++++++++++++++++++++++++++++++++------- datapath/flow_netlink.c | 63 ++++++-- include/linux/openvswitch.h | 8 +- 3 files changed, 371 insertions(+), 74 deletions(-) diff --git a/datapath/actions.c b/datapath/actions.c index 0b66e7c..7cbd73c 100644 --- a/datapath/actions.c +++ b/datapath/actions.c @@ -125,8 +125,20 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla return 0; } +/* 'src' is already properly masked. */ +static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_) +{ + u16 *dst = (u16 *)dst_; + const u16 *src = (const u16 *)src_; + const u16 *mask = (const u16 *)mask_; + + dst[0] = src[0] | (dst[0] & ~mask[0]); + dst[1] = src[1] | (dst[1] & ~mask[1]); + dst[2] = src[2] | (dst[2] & ~mask[2]); +} + static int set_eth_addr(struct sk_buff *skb, - const struct ovs_key_ethernet *eth_key) + const struct ovs_key_ethernet *key) { int err; err = make_writable(skb, ETH_HLEN); @@ -135,16 +147,35 @@ static int set_eth_addr(struct sk_buff *skb, skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); - ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src); - ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst); + ether_addr_copy(eth_hdr(skb)->h_source, key->eth_src); + ether_addr_copy(eth_hdr(skb)->h_dest, key->eth_dst); ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); + return 0; +} +static int set_eth_addr_masked(struct sk_buff *skb, + const struct ovs_key_ethernet *key, + const struct ovs_key_ethernet *mask) +{ + int err; + err = make_writable(skb, ETH_HLEN); + if (unlikely(err)) + return err; + + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); + + ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src, + mask->eth_src); + ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst, + mask->eth_dst); + + ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); return 0; } static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, - __be32 *addr, __be32 new_addr) + __be32 *addr, __be32 new_addr) { int transport_len = skb->len - skb_transport_offset(skb); @@ -197,21 +228,28 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, __be32 addr[4], const __be32 new_addr[4], bool recalculate_csum) { - if (recalculate_csum) + if (likely(recalculate_csum)) update_ipv6_checksum(skb, l4_proto, addr, new_addr); skb_clear_rxhash(skb); memcpy(addr, new_addr, sizeof(__be32[4])); } -static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc) +static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc, u8 mask) { + /* Keep the unmasked bits, if any. */ + if (unlikely(~mask)) + tc |= (nh->priority << 4 | (nh->flow_lbl[0] & 0xF0) >> 4) & ~mask; nh->priority = tc >> 4; nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4); } -static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl) +static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) { + /* Keep the unmasked bits, if any. */ + if (unlikely(~mask)) + fl |= ((u32)(nh->flow_lbl[0] & 0x0F) << 16 + | nh->flow_lbl[1] << 8 | nh->flow_lbl[2]) & ~mask; nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16; nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8; nh->flow_lbl[2] = fl & 0x000000FF; @@ -223,7 +261,35 @@ static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) nh->ttl = new_ttl; } -static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) +static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *key) +{ + struct iphdr *nh; + int err; + + err = make_writable(skb, skb_network_offset(skb) + + sizeof(struct iphdr)); + if (unlikely(err)) + return err; + + nh = ip_hdr(skb); + + if (unlikely(key->ipv4_src != nh->saddr)) + set_ip_addr(skb, nh, &nh->saddr, key->ipv4_src); + + if (unlikely(key->ipv4_dst != nh->daddr)) + set_ip_addr(skb, nh, &nh->daddr, key->ipv4_dst); + + if (key->ipv4_tos != nh->tos) + ipv4_change_dsfield(nh, 0, key->ipv4_tos); + + if (key->ipv4_ttl != nh->ttl) + set_ip_ttl(skb, nh, key->ipv4_ttl); + + return 0; +} + +static int set_ipv4_masked(struct sk_buff *skb, const struct ovs_key_ipv4 *key, + const struct ovs_key_ipv4 *mask) { struct iphdr *nh; int err; @@ -235,22 +301,26 @@ static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) nh = ip_hdr(skb); - if (ipv4_key->ipv4_src != nh->saddr) - set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src); + if (unlikely(mask->ipv4_src)) + set_ip_addr(skb, nh, &nh->saddr, + key->ipv4_src | (nh->saddr & ~mask->ipv4_src)); - if (ipv4_key->ipv4_dst != nh->daddr) - set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst); + if (unlikely(mask->ipv4_dst)) + set_ip_addr(skb, nh, &nh->daddr, + key->ipv4_dst | (nh->daddr & ~mask->ipv4_dst)); - if (ipv4_key->ipv4_tos != nh->tos) - ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos); + if (mask->ipv4_tos) + ipv4_change_dsfield(nh, 0, + key->ipv4_tos | (nh->tos & ~mask->ipv4_tos)); - if (ipv4_key->ipv4_ttl != nh->ttl) - set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl); + if (mask->ipv4_ttl) + set_ip_ttl(skb, nh, + key->ipv4_ttl | (nh->ttl & ~mask->ipv4_ttl)); return 0; } -static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) +static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *key) { struct ipv6hdr *nh; int err; @@ -266,11 +336,10 @@ static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) saddr = (__be32 *)&nh->saddr; daddr = (__be32 *)&nh->daddr; - if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) - set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, - ipv6_key->ipv6_src, true); + if (unlikely(memcmp(key->ipv6_src, saddr, sizeof(key->ipv6_src)))) + set_ipv6_addr(skb, key->ipv6_proto, saddr, key->ipv6_src, true); - if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { + if (unlikely(memcmp(key->ipv6_dst, daddr, sizeof(key->ipv6_dst)))) { unsigned int offset = 0; int flags = OVS_IP6T_FH_F_SKIP_RH; bool recalc_csum = true; @@ -280,13 +349,71 @@ static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) NEXTHDR_ROUTING, NULL, &flags) != NEXTHDR_ROUTING; - set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, - ipv6_key->ipv6_dst, recalc_csum); + set_ipv6_addr(skb, key->ipv6_proto, daddr, key->ipv6_dst, + recalc_csum); } - set_ipv6_tc(nh, ipv6_key->ipv6_tclass); - set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); - nh->hop_limit = ipv6_key->ipv6_hlimit; + set_ipv6_tc(nh, key->ipv6_tclass, 0xff); + set_ipv6_fl(nh, ntohl(key->ipv6_label), UINT_MAX); + nh->hop_limit = key->ipv6_hlimit; + + return 0; +} + +static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4], + const __be32 mask[4], __be32 masked[4]) +{ + masked[0] = addr[0] | (old[0] & ~mask[0]); + masked[1] = addr[1] | (old[1] & ~mask[1]); + masked[2] = addr[2] | (old[2] & ~mask[2]); + masked[3] = addr[3] | (old[3] & ~mask[3]); +} + +static bool is_ipv6_addr_any(const __be32 addr[4]) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3]); +} + +static int set_ipv6_masked(struct sk_buff *skb, const struct ovs_key_ipv6 *key, + const struct ovs_key_ipv6 *mask) +{ + struct ipv6hdr *nh; + int err; + + err = make_writable(skb, skb_network_offset(skb) + + sizeof(struct ipv6hdr)); + if (unlikely(err)) + return err; + + nh = ipv6_hdr(skb); + + if (unlikely(is_ipv6_addr_any(mask->ipv6_src))) { + __be32 masked[4]; + __be32 *saddr = (__be32 *)&nh->saddr; + + mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); + set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, true); + } + if (unlikely(is_ipv6_addr_any(mask->ipv6_dst))) { + __be32 masked[4]; + __be32 *daddr = (__be32 *)&nh->daddr; + unsigned int offset = 0; + int flags = OVS_IP6T_FH_F_SKIP_RH; + bool recalc_csum = true; + + if (ipv6_ext_hdr(nh->nexthdr)) + recalc_csum = ipv6_find_hdr(skb, &offset, + NEXTHDR_ROUTING, NULL, + &flags) != NEXTHDR_ROUTING; + + mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked); + set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, recalc_csum); + } + if (mask->ipv6_tclass) + set_ipv6_tc(nh, key->ipv6_tclass, mask->ipv6_tclass); + if (mask->ipv6_label) + set_ipv6_fl(nh, ntohl(key->ipv6_label), ntohl(mask->ipv6_label)); + nh->hop_limit = key->ipv6_hlimit | (nh->hop_limit & ~mask->ipv6_hlimit); return 0; } @@ -315,7 +442,7 @@ static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) } } -static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) +static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *key) { struct udphdr *uh; int err; @@ -326,16 +453,41 @@ static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) return err; uh = udp_hdr(skb); - if (udp_port_key->udp_src != uh->source) - set_udp_port(skb, &uh->source, udp_port_key->udp_src); - if (udp_port_key->udp_dst != uh->dest) - set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); + if (likely(key->udp_src != uh->source)) + set_udp_port(skb, &uh->source, key->udp_src); + + if (likely(key->udp_dst != uh->dest)) + set_udp_port(skb, &uh->dest, key->udp_dst); return 0; } -static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) +static int set_udp_masked(struct sk_buff *skb, const struct ovs_key_udp *key, + const struct ovs_key_udp *mask) +{ + struct udphdr *uh; + int err; + + err = make_writable(skb, skb_transport_offset(skb) + + sizeof(struct udphdr)); + if (unlikely(err)) + return err; + + uh = udp_hdr(skb); + + if (likely(mask->udp_src)) + set_udp_port(skb, &uh->source, + key->udp_src | (uh->source & ~mask->udp_src)); + + if (likely(mask->udp_dst)) + set_udp_port(skb, &uh->dest, + key->udp_dst | (uh->dest & ~mask->udp_dst)); + + return 0; +} + +static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *key) { struct tcphdr *th; int err; @@ -346,48 +498,87 @@ static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) return err; th = tcp_hdr(skb); - if (tcp_port_key->tcp_src != th->source) - set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check); - if (tcp_port_key->tcp_dst != th->dest) - set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check); + if (likely(key->tcp_src != th->source)) + set_tp_port(skb, &th->source, key->tcp_src, &th->check); + + if (likely(key->tcp_dst != th->dest)) + set_tp_port(skb, &th->dest, key->tcp_dst, &th->check); return 0; } -static int set_sctp(struct sk_buff *skb, - const struct ovs_key_sctp *sctp_port_key) +static int set_tcp_masked(struct sk_buff *skb, const struct ovs_key_tcp *key, + const struct ovs_key_tcp *mask) +{ + struct tcphdr *th; + int err; + + err = make_writable(skb, skb_transport_offset(skb) + + sizeof(struct tcphdr)); + if (unlikely(err)) + return err; + + th = tcp_hdr(skb); + + if (likely(mask->tcp_src)) + set_tp_port(skb, &th->source, + key->tcp_src | (th->source & ~mask->tcp_src), + &th->check); + + if (likely(mask->tcp_dst)) + set_tp_port(skb, &th->dest, + key->tcp_dst | (th->dest & ~mask->tcp_dst), + &th->check); + + return 0; +} + +static int set_sctp_ports(struct sk_buff *skb, __be16 src, __be16 dst) { struct sctphdr *sh; int err; unsigned int sctphoff = skb_transport_offset(skb); + __le32 old_correct_csum, new_csum, old_csum; err = make_writable(skb, sctphoff + sizeof(struct sctphdr)); if (unlikely(err)) return err; sh = sctp_hdr(skb); - if (sctp_port_key->sctp_src != sh->source || - sctp_port_key->sctp_dst != sh->dest) { - __le32 old_correct_csum, new_csum, old_csum; - old_csum = sh->checksum; - old_correct_csum = sctp_compute_cksum(skb, sctphoff); + old_csum = sh->checksum; + old_correct_csum = sctp_compute_cksum(skb, sctphoff); - sh->source = sctp_port_key->sctp_src; - sh->dest = sctp_port_key->sctp_dst; + sh->source = src; + sh->dest = dst; - new_csum = sctp_compute_cksum(skb, sctphoff); + new_csum = sctp_compute_cksum(skb, sctphoff); - /* Carry any checksum errors through. */ - sh->checksum = old_csum ^ old_correct_csum ^ new_csum; + /* Carry any checksum errors through. */ + sh->checksum = old_csum ^ old_correct_csum ^ new_csum; - skb_clear_rxhash(skb); - } + skb_clear_rxhash(skb); return 0; } +static int set_sctp(struct sk_buff *skb, + const struct ovs_key_sctp *key) +{ + return set_sctp_ports(skb, key->sctp_src, key->sctp_dst); +} + +static int set_sctp_masked(struct sk_buff *skb, + const struct ovs_key_sctp *key, + const struct ovs_key_sctp *mask) +{ + struct sctphdr *sh = sctp_hdr(skb); + return set_sctp_ports(skb, + key->sctp_src | (sh->source & ~mask->sctp_src), + key->sctp_dst | (sh->dest & ~mask->sctp_dst)); +} + static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) { struct vport *vport; @@ -460,46 +651,101 @@ static int sample(struct datapath *dp, struct sk_buff *skb, nla_len(acts_list), true); } -static int execute_set_action(struct sk_buff *skb, - const struct nlattr *nested_attr) +static int execute_set_action(struct sk_buff *skb, const struct nlattr *a) { int err = 0; - switch (nla_type(nested_attr)) { + switch (nla_type(a)) { case OVS_KEY_ATTR_PRIORITY: - skb->priority = nla_get_u32(nested_attr); + skb->priority = nla_get_u32(a); break; case OVS_KEY_ATTR_SKB_MARK: - skb->mark = nla_get_u32(nested_attr); + skb->mark = nla_get_u32(a); break; case OVS_KEY_ATTR_IPV4_TUNNEL: - OVS_CB(skb)->tun_key = nla_data(nested_attr); + OVS_CB(skb)->tun_key = nla_data(a); break; case OVS_KEY_ATTR_ETHERNET: - err = set_eth_addr(skb, nla_data(nested_attr)); + err = set_eth_addr(skb, nla_data(a)); break; case OVS_KEY_ATTR_IPV4: - err = set_ipv4(skb, nla_data(nested_attr)); + err = set_ipv4(skb, nla_data(a)); break; case OVS_KEY_ATTR_IPV6: - err = set_ipv6(skb, nla_data(nested_attr)); + err = set_ipv6(skb, nla_data(a)); break; case OVS_KEY_ATTR_TCP: - err = set_tcp(skb, nla_data(nested_attr)); + err = set_tcp(skb, nla_data(a)); break; case OVS_KEY_ATTR_UDP: - err = set_udp(skb, nla_data(nested_attr)); + err = set_udp(skb, nla_data(a)); break; case OVS_KEY_ATTR_SCTP: - err = set_sctp(skb, nla_data(nested_attr)); + err = set_sctp(skb, nla_data(a)); + break; + } + + return err; +} + +#define get_mask(a, type) ((const type *)nla_data(a) + 1) + +static int execute_masked_set_action(struct sk_buff *skb, + const struct nlattr *a) +{ + int err = 0; + + switch (nla_type(a)) { + case OVS_KEY_ATTR_PRIORITY: + skb->priority = nla_get_u32(a) + | (skb->priority & ~*get_mask(a, u32)); + break; + + case OVS_KEY_ATTR_SKB_MARK: + skb->mark = nla_get_u32(a) | (skb->mark & ~*get_mask(a, u32)); + break; + + case OVS_KEY_ATTR_IPV4_TUNNEL: + /* Masked data not supported for tunnel. */ + err = -EINVAL; + break; + + case OVS_KEY_ATTR_ETHERNET: + err = set_eth_addr_masked(skb, nla_data(a), + get_mask(a, struct ovs_key_ethernet)); + break; + + case OVS_KEY_ATTR_IPV4: + err = set_ipv4_masked(skb, nla_data(a), + get_mask(a, struct ovs_key_ipv4)); + break; + + case OVS_KEY_ATTR_IPV6: + err = set_ipv6_masked(skb, nla_data(a), + get_mask(a, struct ovs_key_ipv6)); + break; + + case OVS_KEY_ATTR_TCP: + err = set_tcp_masked(skb, nla_data(a), + get_mask(a, struct ovs_key_tcp)); + break; + + case OVS_KEY_ATTR_UDP: + err = set_udp_masked(skb, nla_data(a), + get_mask(a, struct ovs_key_udp)); + break; + + case OVS_KEY_ATTR_SCTP: + err = set_sctp_masked(skb, nla_data(a), + get_mask(a, struct ovs_key_sctp)); break; } @@ -550,6 +796,10 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, err = execute_set_action(skb, nla_data(a)); break; + case OVS_ACTION_ATTR_SET_MASKED: + err = execute_masked_set_action(skb, nla_data(a)); + break; + case OVS_ACTION_ATTR_SAMPLE: err = sample(dp, skb, a); break; diff --git a/datapath/flow_netlink.c b/datapath/flow_netlink.c index 5c32cd0..c94e3d6 100644 --- a/datapath/flow_netlink.c +++ b/datapath/flow_netlink.c @@ -1274,23 +1274,43 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, return err; } +/* Return false if there are any non-masked bits set. + * Mask follows data immediately, before any netlink padding. */ +static bool validate_masked(u8 *data, int len) +{ + u8 *mask = data + len; + + while (len--) + if (*data++ & ~*mask++) + return false; + return true; +} + static int validate_set(const struct nlattr *a, const struct sw_flow_key *flow_key, struct sw_flow_actions **sfa, - bool *set_tun) + bool *set_tun, bool masked) { const struct nlattr *ovs_key = nla_data(a); int key_type = nla_type(ovs_key); + size_t key_len; /* There can be only one key in a action */ if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) return -EINVAL; + key_len = nla_len(ovs_key); + if (masked) + key_len /= 2; + if (key_type > OVS_KEY_ATTR_MAX || - (ovs_key_lens[key_type] != nla_len(ovs_key) && + (ovs_key_lens[key_type] != key_len && ovs_key_lens[key_type] != -1)) return -EINVAL; + if (masked && !validate_masked(nla_data(ovs_key), key_len)) + return -EINVAL; + switch (key_type) { const struct ovs_key_ipv4 *ipv4_key; const struct ovs_key_ipv6 *ipv6_key; @@ -1316,12 +1336,19 @@ static int validate_set(const struct nlattr *a, return -EINVAL; ipv4_key = nla_data(ovs_key); - if (ipv4_key->ipv4_proto != flow_key->ip.proto) - return -EINVAL; + if (masked) { + const struct ovs_key_ipv4 *mask = ipv4_key + 1; - if (ipv4_key->ipv4_frag != flow_key->ip.frag) - return -EINVAL; + /* Non-writeable fields. */ + if (mask->ipv4_proto || mask->ipv4_frag) + return -EINVAL; + } else { + if (ipv4_key->ipv4_proto != flow_key->ip.proto) + return -EINVAL; + if (ipv4_key->ipv4_frag != flow_key->ip.frag) + return -EINVAL; + } break; case OVS_KEY_ATTR_IPV6: @@ -1332,12 +1359,19 @@ static int validate_set(const struct nlattr *a, return -EINVAL; ipv6_key = nla_data(ovs_key); - if (ipv6_key->ipv6_proto != flow_key->ip.proto) - return -EINVAL; + if (masked) { + const struct ovs_key_ipv6 *mask = ipv6_key + 1; - if (ipv6_key->ipv6_frag != flow_key->ip.frag) - return -EINVAL; + /* Non-writeable fields. */ + if (mask->ipv6_proto || mask->ipv6_frag) + return -EINVAL; + } else { + if (ipv6_key->ipv6_proto != flow_key->ip.proto) + return -EINVAL; + if (ipv6_key->ipv6_frag != flow_key->ip.frag) + return -EINVAL; + } if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000) return -EINVAL; @@ -1422,6 +1456,7 @@ int ovs_nla_copy_actions(const struct nlattr *attr, [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), [OVS_ACTION_ATTR_POP_VLAN] = 0, [OVS_ACTION_ATTR_SET] = (u32)-1, + [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1, [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 }; const struct ovs_action_push_vlan *vlan; @@ -1462,7 +1497,13 @@ int ovs_nla_copy_actions(const struct nlattr *attr, break; case OVS_ACTION_ATTR_SET: - err = validate_set(a, key, sfa, &skip_copy); + err = validate_set(a, key, sfa, &skip_copy, false); + if (err) + return err; + break; + + case OVS_ACTION_ATTR_SET_MASKED: + err = validate_set(a, key, sfa, &skip_copy, true); if (err) return err; break; diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h index ea3cb79..ef6b569 100644 --- a/include/linux/openvswitch.h +++ b/include/linux/openvswitch.h @@ -585,7 +585,13 @@ struct ovs_action_recirc { * indicate the new packet contents. This could potentially still be * %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there * is no MPLS label stack, as determined by ethertype, no action is taken. - * @OVS_ACTION_RECIRC: Recirculate within the data path. + * @OVS_ACTION_ATTR_RECIRC: Recirculate within the data path. + * @OVS_ACTION_ATTR_SET_MASKED: Replaces the contents of an existing header. A + * nested %OVS_KEY_ATTR_* attribute specifies a header to modify, its value, + * and a mask. For every bit set to one in the mask, the corresponding header + * field bit is set to the one in value, rest of the bits are left unchanged. + * These non-significant bits must be passed in as zeroes, though. Masking is + * not supported for the %OVS_KEY_ATTR_TUNNEL attribute. * * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all * fields within a header are modifiable, e.g. the IPv4 protocol and fragment -- 1.7.10.4 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev