xref: /openbmc/linux/net/ipv4/tcp_recovery.c (revision 4f6cce39)
1 #include <linux/tcp.h>
2 #include <net/tcp.h>
3 
4 int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
5 
6 static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
7 {
8 	struct tcp_sock *tp = tcp_sk(sk);
9 
10 	tcp_skb_mark_lost_uncond_verify(tp, skb);
11 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
12 		/* Account for retransmits that are lost again */
13 		TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14 		tp->retrans_out -= tcp_skb_pcount(skb);
15 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
16 	}
17 }
18 
19 static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
20 				const struct skb_mstamp *t2,
21 				u32 seq1, u32 seq2)
22 {
23 	return skb_mstamp_after(t1, t2) ||
24 	       (t1->v64 == t2->v64 && after(seq1, seq2));
25 }
26 
27 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
28  *
29  * Marks a packet lost, if some packet sent later has been (s)acked.
30  * The underlying idea is similar to the traditional dupthresh and FACK
31  * but they look at different metrics:
32  *
33  * dupthresh: 3 OOO packets delivered (packet count)
34  * FACK: sequence delta to highest sacked sequence (sequence space)
35  * RACK: sent time delta to the latest delivered packet (time domain)
36  *
37  * The advantage of RACK is it applies to both original and retransmitted
38  * packet and therefore is robust against tail losses. Another advantage
39  * is being more resilient to reordering by simply allowing some
40  * "settling delay", instead of tweaking the dupthresh.
41  *
42  * When tcp_rack_detect_loss() detects some packets are lost and we
43  * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
44  * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
45  * make us enter the CA_Recovery state.
46  */
47 static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now,
48 				 u32 *reo_timeout)
49 {
50 	struct tcp_sock *tp = tcp_sk(sk);
51 	struct sk_buff *skb;
52 	u32 reo_wnd;
53 
54 	*reo_timeout = 0;
55 	/* To be more reordering resilient, allow min_rtt/4 settling delay
56 	 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
57 	 * RTT because reordering is often a path property and less related
58 	 * to queuing or delayed ACKs.
59 	 */
60 	reo_wnd = 1000;
61 	if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
62 		reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
63 
64 	tcp_for_write_queue(skb, sk) {
65 		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
66 
67 		if (skb == tcp_send_head(sk))
68 			break;
69 
70 		/* Skip ones already (s)acked */
71 		if (!after(scb->end_seq, tp->snd_una) ||
72 		    scb->sacked & TCPCB_SACKED_ACKED)
73 			continue;
74 
75 		if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp,
76 					tp->rack.end_seq, scb->end_seq)) {
77 			/* Step 3 in draft-cheng-tcpm-rack-00.txt:
78 			 * A packet is lost if its elapsed time is beyond
79 			 * the recent RTT plus the reordering window.
80 			 */
81 			u32 elapsed = skb_mstamp_us_delta(now,
82 							  &skb->skb_mstamp);
83 			s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
84 
85 			if (remaining < 0) {
86 				tcp_rack_mark_skb_lost(sk, skb);
87 				continue;
88 			}
89 
90 			/* Skip ones marked lost but not yet retransmitted */
91 			if ((scb->sacked & TCPCB_LOST) &&
92 			    !(scb->sacked & TCPCB_SACKED_RETRANS))
93 				continue;
94 
95 			/* Record maximum wait time (+1 to avoid 0) */
96 			*reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
97 
98 		} else if (!(scb->sacked & TCPCB_RETRANS)) {
99 			/* Original data are sent sequentially so stop early
100 			 * b/c the rest are all sent after rack_sent
101 			 */
102 			break;
103 		}
104 	}
105 }
106 
107 void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now)
108 {
109 	struct tcp_sock *tp = tcp_sk(sk);
110 	u32 timeout;
111 
112 	if (!tp->rack.advanced)
113 		return;
114 
115 	/* Reset the advanced flag to avoid unnecessary queue scanning */
116 	tp->rack.advanced = 0;
117 	tcp_rack_detect_loss(sk, now, &timeout);
118 	if (timeout) {
119 		timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN);
120 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
121 					  timeout, inet_csk(sk)->icsk_rto);
122 	}
123 }
124 
125 /* Record the most recently (re)sent time among the (s)acked packets
126  * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
127  * draft-cheng-tcpm-rack-00.txt
128  */
129 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
130 		      const struct skb_mstamp *xmit_time,
131 		      const struct skb_mstamp *ack_time)
132 {
133 	u32 rtt_us;
134 
135 	if (tp->rack.mstamp.v64 &&
136 	    !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp,
137 				 end_seq, tp->rack.end_seq))
138 		return;
139 
140 	rtt_us = skb_mstamp_us_delta(ack_time, xmit_time);
141 	if (sacked & TCPCB_RETRANS) {
142 		/* If the sacked packet was retransmitted, it's ambiguous
143 		 * whether the retransmission or the original (or the prior
144 		 * retransmission) was sacked.
145 		 *
146 		 * If the original is lost, there is no ambiguity. Otherwise
147 		 * we assume the original can be delayed up to aRTT + min_rtt.
148 		 * the aRTT term is bounded by the fast recovery or timeout,
149 		 * so it's at least one RTT (i.e., retransmission is at least
150 		 * an RTT later).
151 		 */
152 		if (rtt_us < tcp_min_rtt(tp))
153 			return;
154 	}
155 	tp->rack.rtt_us = rtt_us;
156 	tp->rack.mstamp = *xmit_time;
157 	tp->rack.end_seq = end_seq;
158 	tp->rack.advanced = 1;
159 }
160 
161 /* We have waited long enough to accommodate reordering. Mark the expired
162  * packets lost and retransmit them.
163  */
164 void tcp_rack_reo_timeout(struct sock *sk)
165 {
166 	struct tcp_sock *tp = tcp_sk(sk);
167 	struct skb_mstamp now;
168 	u32 timeout, prior_inflight;
169 
170 	skb_mstamp_get(&now);
171 	prior_inflight = tcp_packets_in_flight(tp);
172 	tcp_rack_detect_loss(sk, &now, &timeout);
173 	if (prior_inflight != tcp_packets_in_flight(tp)) {
174 		if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
175 			tcp_enter_recovery(sk, false);
176 			if (!inet_csk(sk)->icsk_ca_ops->cong_control)
177 				tcp_cwnd_reduction(sk, 1, 0);
178 		}
179 		tcp_xmit_retransmit_queue(sk);
180 	}
181 	if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
182 		tcp_rearm_rto(sk);
183 }
184