This patch extends UDP GRO to support fraglist GRO/GSO
by using the previously introduced infrastructure.
All UDP packets that are not targeted to a GRO capable
UDP sockets are going to fraglist GRO now (local input
and forward).

Signed-off-by: Steffen Klassert <steffen.klass...@secunet.com>
---
 net/ipv4/udp_offload.c | 57 ++++++++++++++++++++++++++++++++++++------
 1 file changed, 49 insertions(+), 8 deletions(-)

diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 0646d61f4fa8..9d77cc44da6b 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -187,6 +187,20 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
 }
 EXPORT_SYMBOL(skb_udp_tunnel_segment);
 
+static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
+                                             netdev_features_t features)
+{
+       unsigned int mss = skb_shinfo(skb)->gso_size;
+
+       skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
+       if (IS_ERR(skb))
+               return skb;
+
+       udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
+
+       return skb;
+}
+
 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
                                  netdev_features_t features)
 {
@@ -199,6 +213,9 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
        __sum16 check;
        __be16 newlen;
 
+       if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
+               return __udp_gso_segment_list(gso_skb, features);
+
        mss = skb_shinfo(gso_skb)->gso_size;
        if (gso_skb->len <= sizeof(*uh) + mss)
                return ERR_PTR(-EINVAL);
@@ -351,16 +368,15 @@ static struct sk_buff *udp_gro_receive_segment(struct 
list_head *head,
        struct sk_buff *pp = NULL;
        struct udphdr *uh2;
        struct sk_buff *p;
+       int ret;
 
        /* requires non zero csum, for symmetry with GSO */
        if (!uh->check) {
                NAPI_GRO_CB(skb)->flush = 1;
                return NULL;
        }
-
        /* pull encapsulating udp header */
        skb_gro_pull(skb, sizeof(struct udphdr));
-       skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
 
        list_for_each_entry(p, head, list) {
                if (!NAPI_GRO_CB(p)->same_flow)
@@ -378,8 +394,17 @@ static struct sk_buff *udp_gro_receive_segment(struct 
list_head *head,
                 * Under small packet flood GRO count could elsewhere grow a lot
                 * leading to execessive truesize values
                 */
-               if (!skb_gro_receive(p, skb) &&
-                   NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
+               if (NAPI_GRO_CB(skb)->is_flist) {
+                       if (!pskb_may_pull(skb, skb_gro_offset(skb)))
+                               return NULL;
+                       ret = skb_gro_receive_list(p, skb);
+               } else {
+                       skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
+
+                       ret = skb_gro_receive(p, skb);
+               }
+
+               if (!ret && NAPI_GRO_CB(p)->count > UDP_GRO_CNT_MAX)
                        pp = p;
                else if (uh->len != uh2->len)
                        pp = p;
@@ -403,10 +428,17 @@ struct sk_buff *udp_gro_receive(struct list_head *head, 
struct sk_buff *skb,
 
        rcu_read_lock();
        sk = (*lookup)(skb, uh->source, uh->dest);
-       if (!sk)
-               goto out_unlock;
+       if (!sk) {
+               NAPI_GRO_CB(skb)->is_flist = 1;
+               pp = call_gro_receive(udp_gro_receive_segment, head, skb);
+               rcu_read_unlock();
+               return pp;
+       }
+
+       if (!udp_sk(sk)->gro_receive) {
+               if (!udp_sk(sk)->gro_enabled)
+                       NAPI_GRO_CB(skb)->is_flist = 1;
 
-       if (udp_sk(sk)->gro_enabled) {
                pp = call_gro_receive(udp_gro_receive_segment, head, skb);
                rcu_read_unlock();
                return pp;
@@ -456,7 +488,7 @@ static struct sk_buff *udp4_gro_receive(struct list_head 
*head,
 {
        struct udphdr *uh = udp_gro_udphdr(skb);
 
-       if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key))
+       if (unlikely(!uh))
                goto flush;
 
        /* Don't bother verifying checksum if we're going to flush anyway. */
@@ -530,6 +562,15 @@ static int udp4_gro_complete(struct sk_buff *skb, int 
nhoff)
        const struct iphdr *iph = ip_hdr(skb);
        struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
 
+       if (NAPI_GRO_CB(skb)->is_flist) {
+               uh->len = htons(skb->len - nhoff);
+
+               skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
+               skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+               return 0;
+       }
+
        if (uh->check)
                uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
                                          iph->daddr, 0);
-- 
2.17.1

Reply via email to