Refactor using a new helper, tcp_timeout_mark_loss(), that marks packets
lost upon RTO.

Signed-off-by: Yuchung Cheng <ych...@google.com>
Signed-off-by: Neal Cardwell <ncardw...@google.com>
Reviewed-by: Eric Dumazet <eduma...@google.com>
Reviewed-by: Soheil Hassas Yeganeh <soh...@google.com>
Reviewed-by: Priyaranjan Jha <priyar...@google.com>
---
 net/ipv4/tcp_input.c | 50 +++++++++++++++++++++++++-------------------
 1 file changed, 29 insertions(+), 21 deletions(-)

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6fb0a28977a0..af32accda2a9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1917,18 +1917,43 @@ static inline void tcp_init_undo(struct tcp_sock *tp)
        tp->undo_retrans = tp->retrans_out ? : -1;
 }
 
-/* Enter Loss state. If we detect SACK reneging, forget all SACK information
+/* If we detect SACK reneging, forget all SACK information
  * and reset tags completely, otherwise preserve SACKs. If receiver
  * dropped its ofo queue, we will know this due to reneging detection.
  */
+static void tcp_timeout_mark_lost(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+       bool is_reneg;                  /* is receiver reneging on SACKs? */
+
+       skb = tcp_rtx_queue_head(sk);
+       is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
+       if (is_reneg) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
+               tp->sacked_out = 0;
+               /* Mark SACK reneging until we recover from this loss event. */
+               tp->is_sack_reneg = 1;
+       } else if (tcp_is_reno(tp)) {
+               tcp_reset_reno_sack(tp);
+       }
+
+       skb_rbtree_walk_from(skb) {
+               if (is_reneg)
+                       TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
+               tcp_mark_skb_lost(sk, skb);
+       }
+       tcp_verify_left_out(tp);
+       tcp_clear_all_retrans_hints(tp);
+}
+
+/* Enter Loss state. */
 void tcp_enter_loss(struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct net *net = sock_net(sk);
-       struct sk_buff *skb;
        bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
-       bool is_reneg;                  /* is receiver reneging on SACKs? */
 
        /* Reduce ssthresh if it has not yet been made inside this window. */
        if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
@@ -1944,24 +1969,7 @@ void tcp_enter_loss(struct sock *sk)
        tp->snd_cwnd_cnt   = 0;
        tp->snd_cwnd_stamp = tcp_jiffies32;
 
-       if (tcp_is_reno(tp))
-               tcp_reset_reno_sack(tp);
-
-       skb = tcp_rtx_queue_head(sk);
-       is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
-       if (is_reneg) {
-               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
-               tp->sacked_out = 0;
-               /* Mark SACK reneging until we recover from this loss event. */
-               tp->is_sack_reneg = 1;
-       }
-       skb_rbtree_walk_from(skb) {
-               if (is_reneg)
-                       TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
-               tcp_mark_skb_lost(sk, skb);
-       }
-       tcp_verify_left_out(tp);
-       tcp_clear_all_retrans_hints(tp);
+       tcp_timeout_mark_lost(sk);
 
        /* Timeout in disordered state after receiving substantial DUPACKs
         * suggests that the degree of reordering is over-estimated.
-- 
2.17.0.441.gb46fe60e1d-goog

Reply via email to