hi all,

while testing some local patches I observed that the TCP tput in the
following scenario:

# the following enable napi on veth0, so that we can trigger the
# GRO path with namespaces
ip netns add test
ip link add type veth
ip link set dev veth0 netns test
ip -n test link set lo up
ip -n test link set veth0 up
ip -n test addr add dev veth0 172.16.1.2/24
ip link set dev veth1 up
ip addr add dev veth1 172.16.1.1/24
IDX=`ip netns exec test cat /sys/class/net/veth0/ifindex`

# 'xdp_pass' is a NO-OP XDP program that simply return XDP_PASS
ip netns exec test ./xdp_pass $IDX &
taskset 0x2 ip netns exec test iperf3 -s -i 60 &
taskset 0x1 iperf3 -c 172.16.1.2 -t 60 -i 60

is quite lower than expected (~800Mbps). 'perf' shows a weird topmost 
offender:

  80.42%  [kernel]           [k] find_bug

I *think* skb_gro_receive() does not behave correctly if !skb->sk, and
I experimented with the following patch, so far with success (in the
above scenario tput is now ~11Gbps). Am I missing something? it that
overkill?

Thank you,

Paolo

---
diff --git a/net/core/dev.c b/net/core/dev.c
index ca78dc5..94723b1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5554,6 +5554,11 @@ struct packet_offload *gro_find_complete_by_type(__be16 
type)
 
 static void napi_skb_free_stolen_head(struct sk_buff *skb)
 {
+       if (skb->destructor) {
+               WARN_ON(in_irq());
+               skb->destructor(skb);
+       }
+
        skb_dst_drop(skb);
        secpath_reset(skb);
        kmem_cache_free(skbuff_head_cache, skb);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c996c09..19f2fd9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3827,6 +3827,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff 
*skb)
        unsigned int offset = skb_gro_offset(skb);
        unsigned int headlen = skb_headlen(skb);
        unsigned int len = skb_gro_len(skb);
+       unsigned int new_skb_truesize;
        unsigned int delta_truesize;
        struct sk_buff *lp;
 
@@ -3858,11 +3859,13 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff 
*skb)
                frag->page_offset += offset;
                skb_frag_size_sub(frag, offset);
 
-               /* all fragments truesize : remove (head size + sk_buff) */
-               delta_truesize = skb->truesize -
-                                SKB_TRUESIZE(skb_end_offset(skb));
+               /* all fragments truesize : remove (head size + sk_buff);
+                * keep unchanged the amount of memory accounted to skb->sk
+                */
+               new_skb_truesize = SKB_TRUESIZE(skb_end_offset(skb));
+               delta_truesize = skb->truesize - new_skb_truesize;
 
-               skb->truesize -= skb->data_len;
+               skb->truesize -= new_skb_truesize;
                skb->len -= skb->data_len;
                skb->data_len = 0;
 
@@ -3891,12 +3894,24 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff 
*skb)
                memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * 
skbinfo->nr_frags);
                /* We dont need to clear skbinfo->nr_frags here */
 
-               delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct 
sk_buff));
+               /* keep unchanged the amount of memory accounted to skb->sk */
+               new_skb_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
+               delta_truesize = skb->truesize - new_skb_truesize;
+               skb->truesize = new_skb_truesize;
                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
                goto done;
        }
 
 merge:
+       if (skb->destructor) {
+               /* skb's truesize onwership is transferred to p, avoid
+                * releasing it twice when p is freed
+                */
+               WARN_ON_ONCE(p->sk != skb->sk || p->destructor != 
skb->destructor);
+               skb->sk = 0;
+               skb->destructor = NULL;
+       }
+
        delta_truesize = skb->truesize;
        if (offset > headlen) {
                unsigned int eat = offset - headlen;



Reply via email to