Though not very efficient, the previous way of maintaining a
stale SACK cache past reneging protected better against misuse
because receiver couldn't cause set-clear ping-pong for
SACKED_ACKED for a large number of skbs.

Mostly copy-paste from sacktag clearer. Compile tested.

Signed-off-by: Ilpo Järvinen <[EMAIL PROTECTED]>
---
 net/ipv4/tcp_input.c |   11 +++++++++--
 1 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 97b9be2..ea196de 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1509,7 +1509,7 @@ void tcp_enter_loss(struct sock *sk, int
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int cnt = 0;
+       int cnt = 0, i;
 
        /* Reduce ssthresh if it has not yet been made inside this window. */
        if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == 
tp->high_seq ||
@@ -1527,8 +1527,15 @@ void tcp_enter_loss(struct sock *sk, int
 
        /* Push undo marker, if it was plain RTO and nothing
         * was retransmitted. */
-       if (!how)
+       if (!how) {
                tp->undo_marker = tp->snd_una;
+       } else {
+               /* We're going to flush SACKed state, every SACK counts again */
+               for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
+                       tp->recv_sack_cache[i].start_seq = 0;
+                       tp->recv_sack_cache[i].end_seq = 0;
+               }
+       }
 
        tcp_for_write_queue(skb, sk) {
                if (skb == tcp_send_head(sk))
-- 
1.4.2

Reply via email to