From: "David S. Miller" <[EMAIL PROTECTED]>
Date: Sun, 21 Aug 2005 16:35:06 -0700 (PDT)

> Attached are two patches dealing with two issues I think should be
> addressed in 2.6.13 if possible.

Sorry, I attached the wrong patches, let's try this instead.

diff-tree b94c07bcb646075b82302616c369b1f3006682d0 (from 
f6fdd7d9c273bb2a20ab467cb57067494f932fa3)
Author: Dmitry Yusupov <[EMAIL PROTECTED]>
Date:   Sun Aug 21 15:49:45 2005 -0700

    [TCP]: Do TSO deferral even if tail SKB can go out now.
    
    If the tail SKB fits into the window, it is still
    benefitical to defer until the goal percentage of
    the window is available.  This give the application
    time to feed more data into the send queue and thus
    results in larger TSO frames going out.
    
    Patch from Dmitry Yusupov <[EMAIL PROTECTED]>.
    
    Signed-off-by: David S. Miller <[EMAIL PROTECTED]>

diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -925,10 +925,6 @@ static int tcp_tso_should_defer(struct s
 
        limit = min(send_win, cong_win);
 
-       /* If sk_send_head can be sent fully now, just do it.  */
-       if (skb->len <= limit)
-               return 0;
-
        if (sysctl_tcp_tso_win_divisor) {
                u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
 
diff-tree 76d62a93fe420491267ef2312bbcfe11e1816da4 (from 
b94c07bcb646075b82302616c369b1f3006682d0)
Author: David S. Miller <[EMAIL PROTECTED]>
Date:   Sun Aug 21 16:21:10 2005 -0700

    [TCP]: Keep TSO enabled even during loss events.
    
    All we need to do is resegment the queue so that
    we record SACK information accurately.  The edges
    of the SACK blocks guide our resegmenting decisions.
    
    Signed-off-by: David S. Miller <[EMAIL PROTECTED]>
    

diff --git a/include/net/tcp.h b/include/net/tcp.h
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -855,6 +855,7 @@ extern int tcp_retransmit_skb(struct soc
 extern void tcp_xmit_retransmit_queue(struct sock *);
 extern void tcp_simple_retransmit(struct sock *);
 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
 
 extern void tcp_send_probe0(struct sock *);
 extern void tcp_send_partial(struct sock *);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -909,14 +909,6 @@ tcp_sacktag_write_queue(struct sock *sk,
        int flag = 0;
        int i;
 
-       /* So, SACKs for already sent large segments will be lost.
-        * Not good, but alternative is to resegment the queue. */
-       if (sk->sk_route_caps & NETIF_F_TSO) {
-               sk->sk_route_caps &= ~NETIF_F_TSO;
-               sock_set_flag(sk, SOCK_NO_LARGESEND);
-               tp->mss_cache = tp->mss_cache;
-       }
-
        if (!tp->sacked_out)
                tp->fackets_out = 0;
        prior_fackets = tp->fackets_out;
@@ -964,20 +956,40 @@ tcp_sacktag_write_queue(struct sock *sk,
                        flag |= FLAG_DATA_LOST;
 
                sk_stream_for_retrans_queue(skb, sk) {
-                       u8 sacked = TCP_SKB_CB(skb)->sacked;
-                       int in_sack;
+                       int in_sack, pcount;
+                       u8 sacked;
 
                        /* The retransmission queue is always in order, so
                         * we can short-circuit the walk early.
                         */
-                       if(!before(TCP_SKB_CB(skb)->seq, end_seq))
+                       if (!before(TCP_SKB_CB(skb)->seq, end_seq))
                                break;
 
-                       fack_count += tcp_skb_pcount(skb);
+                       pcount = tcp_skb_pcount(skb);
+
+                       if (pcount > 1 &&
+                           (after(start_seq, TCP_SKB_CB(skb)->seq) ||
+                            before(end_seq, TCP_SKB_CB(skb)->end_seq))) {
+                               unsigned int pkt_len;
+
+                               if (after(start_seq, TCP_SKB_CB(skb)->seq))
+                                       pkt_len = (start_seq -
+                                                  TCP_SKB_CB(skb)->seq);
+                               else
+                                       pkt_len = (end_seq -
+                                                  TCP_SKB_CB(skb)->seq);
+                               if (tcp_fragment(sk, skb, pkt_len, pkt_len))
+                                       break;
+                               pcount = tcp_skb_pcount(skb);
+                       }
+
+                       fack_count += pcount;
 
                        in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
                                !before(end_seq, TCP_SKB_CB(skb)->end_seq);
 
+                       sacked = TCP_SKB_CB(skb)->sacked;
+
                        /* Account D-SACK for retransmitted packet. */
                        if ((dup_sack && in_sack) &&
                            (sacked & TCPCB_RETRANS) &&
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -427,7 +427,7 @@ static void tcp_set_skb_tso_segs(struct 
  * packet to the list.  This won't be called frequently, I hope. 
  * Remember, these are still headerless SKBs at this point.
  */
-static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 
unsigned int mss_now)
+int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int 
mss_now)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *buff;
@@ -1346,12 +1346,6 @@ int tcp_retransmit_skb(struct sock *sk, 
        if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
                if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
                        BUG();
-
-               if (sk->sk_route_caps & NETIF_F_TSO) {
-                       sk->sk_route_caps &= ~NETIF_F_TSO;
-                       sock_set_flag(sk, SOCK_NO_LARGESEND);
-               }
-
                if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
                        return -ENOMEM;
        }
@@ -1980,12 +1974,6 @@ int tcp_write_wakeup(struct sock *sk)
                                TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
                                if (tcp_fragment(sk, skb, seg_size, mss))
                                        return -1;
-                               /* SWS override triggered forced fragmentation.
-                                * Disable TSO, the connection is too sick. */
-                               if (sk->sk_route_caps & NETIF_F_TSO) {
-                                       sock_set_flag(sk, SOCK_NO_LARGESEND);
-                                       sk->sk_route_caps &= ~NETIF_F_TSO;
-                               }
                        } else if (!tcp_skb_pcount(skb))
                                tcp_set_skb_tso_segs(sk, skb, mss);
 

Reply via email to