OVS already has compat functions to handle GSO packets.
Following patch get rid of GSO packet handling in lisp
and use ovs iptunnel_xmit() function for same.
CC: Lori Jakab <[email protected]>
Signed-off-by: Pravin B Shelar <[email protected]>
---
datapath/vport-lisp.c | 209 ++++++++++++-------------------------------------
1 files changed, 50 insertions(+), 159 deletions(-)
diff --git a/datapath/vport-lisp.c b/datapath/vport-lisp.c
index 847cb39..9ffa74f 100644
--- a/datapath/vport-lisp.c
+++ b/datapath/vport-lisp.c
@@ -35,6 +35,7 @@
#include <net/xfrm.h>
#include "datapath.h"
+#include "gso.h"
#include "vport.h"
/*
@@ -177,35 +178,6 @@ static u16 ovs_tnl_get_src_port(struct sk_buff *skb)
return (((u64) hash * range) >> 32) + low;
}
-static void lisp_build_header(const struct vport *vport,
- struct sk_buff *skb,
- int tunnel_hlen)
-{
- struct lisp_port *lisp_port = lisp_vport(vport);
- struct udphdr *udph = udp_hdr(skb);
- struct lisphdr *lisph = (struct lisphdr *)(udph + 1);
- const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
-
- udph->dest = lisp_port->dst_port;
- udph->source = htons(ovs_tnl_get_src_port(skb));
- udph->check = 0;
- udph->len = htons(skb->len - skb_transport_offset(skb));
-
- lisph->nonce_present = 0; /* We don't support echo nonce algorithm */
- lisph->locator_status_bits_present = 1; /* Set LSB */
- lisph->solicit_echo_nonce = 0; /* No echo noncing */
- lisph->map_version_present = 0; /* No mapping versioning, nonce
instead */
- lisph->instance_id_present = 1; /* Store the tun_id as Instance ID
*/
- lisph->reserved_flags = 0; /* Reserved flags, set to 0 */
-
- lisph->u1.nonce[0] = 0;
- lisph->u1.nonce[1] = 0;
- lisph->u1.nonce[2] = 0;
-
- tunnel_id_to_instance_id(tun_key->tun_id,
&lisph->u2.word2.instance_id[0]);
- lisph->u2.word2.locator_status_bits = 1;
-}
-
/* Called with rcu_read_lock and BH disabled. */
static int lisp_rcv(struct sock *sk, struct sk_buff *skb)
{
@@ -376,86 +348,56 @@ error:
return ERR_PTR(err);
}
-static bool need_linearize(const struct sk_buff *skb)
+static void lisp_build_header(struct sk_buff *skb)
{
- int i;
+ struct udphdr *udph = udp_hdr(skb);
+ struct lisphdr *lisph = (struct lisphdr *)(udph + 1);
+ const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
- if (unlikely(skb_shinfo(skb)->frag_list))
- return true;
+ lisph->nonce_present = 0; /* We don't support echo nonce algorithm */
+ lisph->locator_status_bits_present = 1; /* Set LSB */
+ lisph->solicit_echo_nonce = 0; /* No echo noncing */
+ lisph->map_version_present = 0; /* No mapping versioning, nonce
instead */
+ lisph->instance_id_present = 1; /* Store the tun_id as Instance ID
*/
+ lisph->reserved_flags = 0; /* Reserved flags, set to 0 */
- /*
- * Generally speaking we should linearize if there are paged frags.
- * However, if all of the refcounts are 1 we know nobody else can
- * change them from underneath us and we can skip the linearization.
- */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i]))
> 1))
- return true;
+ lisph->u1.nonce[0] = 0;
+ lisph->u1.nonce[1] = 0;
+ lisph->u1.nonce[2] = 0;
- return false;
+ tunnel_id_to_instance_id(tun_key->tun_id,
&lisph->u2.word2.instance_id[0]);
+ lisph->u2.word2.locator_status_bits = 1;
}
-static struct sk_buff *handle_offloads(struct sk_buff *skb)
+static void handle_offloads(struct sk_buff *skb)
{
- int err;
+ if (skb_is_gso(skb))
+ OVS_GSO_CB(skb)->fix_segment = lisp_build_header;
+ else if (get_ip_summed(skb) != OVS_CSUM_PARTIAL)
+ set_ip_summed(skb, OVS_CSUM_NONE);
forward_ip_summed(skb, true);
-
- if (skb_is_gso(skb)) {
- struct sk_buff *nskb;
-
- nskb = __skb_gso_segment(skb, 0, false);
- if (IS_ERR(nskb)) {
- err = PTR_ERR(nskb);
- goto error;
- }
-
- consume_skb(skb);
- skb = nskb;
- } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
- /* Pages aren't locked and could change at any time.
- * If this happens after we compute the checksum, the
- * checksum will be wrong. We linearize now to avoid
- * this problem.
- */
- if (unlikely(need_linearize(skb))) {
- err = __skb_linearize(skb);
- if (unlikely(err))
- goto error;
- }
-
- err = skb_checksum_help(skb);
- if (unlikely(err))
- goto error;
- }
-
- set_ip_summed(skb, OVS_CSUM_NONE);
-
- return skb;
-
-error:
- return ERR_PTR(err);
+ skb_reset_inner_headers(skb);
}
-static int ovs_tnl_send(struct vport *vport, struct sk_buff *skb,
- u8 ipproto, int tunnel_hlen,
- void (*build_header)(const struct vport *,
- struct sk_buff *,
- int tunnel_hlen))
+static int tnl_send(struct vport *vport, struct sk_buff *skb)
{
- int min_headroom;
+ struct net *net = ovs_dp_get_net(vport->dp);
+ struct lisp_port *lisp_port = lisp_vport(vport);
+ struct udphdr *udph;
struct rtable *rt;
+ int min_headroom;
+ int tunnel_hlen;
__be32 saddr;
- int sent_len = 0;
+ __be16 df;
int err;
- struct sk_buff *nskb;
/* Route lookup */
saddr = OVS_CB(skb)->tun_key->ipv4_src;
rt = find_route(ovs_dp_get_net(vport->dp),
&saddr,
OVS_CB(skb)->tun_key->ipv4_dst,
- ipproto,
+ IPPROTO_UDP,
OVS_CB(skb)->tun_key->ipv4_tos,
skb_get_mark(skb));
if (IS_ERR(rt)) {
@@ -463,7 +405,7 @@ static int ovs_tnl_send(struct vport *vport, struct sk_buff
*skb,
goto error;
}
- tunnel_hlen += sizeof(struct iphdr);
+ tunnel_hlen = sizeof(struct iphdr) + LISP_HLEN;
min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
+ tunnel_hlen
@@ -480,77 +422,27 @@ static int ovs_tnl_send(struct vport *vport, struct
sk_buff *skb,
goto err_free_rt;
}
+ __skb_push(skb, LISP_HLEN);
+ skb_reset_transport_header(skb);
+
+ lisp_build_header(skb);
+ udph = udp_hdr(skb);
+ udph->dest = lisp_port->dst_port;
+ udph->source = htons(ovs_tnl_get_src_port(skb));
+ udph->check = 0;
+ udph->len = htons(skb->len - skb_transport_offset(skb));
+
/* Offloading */
- nskb = handle_offloads(skb);
- if (IS_ERR(nskb)) {
- err = PTR_ERR(nskb);
- goto err_free_rt;
- }
- skb = nskb;
-
- /* Reset SKB */
- nf_reset(skb);
- secpath_reset(skb);
- skb_dst_drop(skb);
- skb_clear_rxhash(skb);
-
- while (skb) {
- struct sk_buff *next_skb = skb->next;
- struct iphdr *iph;
- int frag_len;
-
- skb->next = NULL;
-
- if (unlikely(vlan_deaccel_tag(skb)))
- goto next;
-
- frag_len = skb->len;
- skb_push(skb, tunnel_hlen);
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, sizeof(struct iphdr));
-
- if (next_skb)
- skb_dst_set(skb, dst_clone(&rt_dst(rt)));
- else
- skb_dst_set(skb, &rt_dst(rt));
-
- /* Push Tunnel header. */
- build_header(vport, skb, tunnel_hlen);
-
- /* Push IP header. */
- iph = ip_hdr(skb);
- iph->version = 4;
- iph->ihl = sizeof(struct iphdr) >> 2;
- iph->protocol = ipproto;
- iph->daddr = OVS_CB(skb)->tun_key->ipv4_dst;
- iph->saddr = saddr;
- iph->tos = OVS_CB(skb)->tun_key->ipv4_tos;
- iph->ttl = OVS_CB(skb)->tun_key->ipv4_ttl;
- iph->frag_off = OVS_CB(skb)->tun_key->tun_flags &
+ handle_offloads(skb);
+ skb->local_df = 1;
+
+ df = OVS_CB(skb)->tun_key->tun_flags &
TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
- /*
- * Allow our local IP stack to fragment the outer packet even
- * if the DF bit is set as a last resort. We also need to
- * force selection of an IP ID here with __ip_select_ident(),
- * as ip_select_ident() assumes a proper ID is not needed when
- * when the DF bit is set.
- */
- skb->local_df = 1;
- __ip_select_ident(iph, skb_dst(skb), 0);
-
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-
- err = ip_local_out(skb);
- if (unlikely(net_xmit_eval(err)))
- goto next;
-
- sent_len += frag_len;
-
-next:
- skb = next_skb;
- }
- return sent_len;
+ return iptunnel_xmit(net, rt, skb,
+ saddr, OVS_CB(skb)->tun_key->ipv4_dst,
+ IPPROTO_UDP, OVS_CB(skb)->tun_key->ipv4_tos,
+ OVS_CB(skb)->tun_key->ipv4_ttl, df);
err_free_rt:
ip_rt_put(rt);
@@ -572,8 +464,7 @@ static int lisp_tnl_send(struct vport *vport, struct
sk_buff *skb)
case htons(ETH_P_IPV6):
/* Pop off "inner" Ethernet header */
skb_pull(skb, network_offset);
- tnl_len = ovs_tnl_send(vport, skb, IPPROTO_UDP,
- LISP_HLEN, lisp_build_header);
+ tnl_len = tnl_send(vport, skb);
return tnl_len > 0 ? tnl_len + network_offset : tnl_len;
default:
kfree_skb(skb);