xref: /openbmc/linux/net/ipv4/tcp_westwood.c (revision 2a598d0b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TCP Westwood+: end-to-end bandwidth estimation for TCP
4  *
5  *      Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
6  *
7  * Support at http://c3lab.poliba.it/index.php/Westwood
8  * Main references in literature:
9  *
10  * - Mascolo S, Casetti, M. Gerla et al.
11  *   "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
12  *
13  * - A. Grieco, s. Mascolo
14  *   "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
15  *     Comm. Review, 2004
16  *
17  * - A. Dell'Aera, L. Grieco, S. Mascolo.
18  *   "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
19  *    A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
20  *
21  * Westwood+ employs end-to-end bandwidth measurement to set cwnd and
22  * ssthresh after packet loss. The probing phase is as the original Reno.
23  */
24 
25 #include <linux/mm.h>
26 #include <linux/module.h>
27 #include <linux/skbuff.h>
28 #include <linux/inet_diag.h>
29 #include <net/tcp.h>
30 
31 /* TCP Westwood structure */
32 struct westwood {
33 	u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */
34 	u32    bw_est;           /* bandwidth estimate */
35 	u32    rtt_win_sx;       /* here starts a new evaluation... */
36 	u32    bk;
37 	u32    snd_una;          /* used for evaluating the number of acked bytes */
38 	u32    cumul_ack;
39 	u32    accounted;
40 	u32    rtt;
41 	u32    rtt_min;          /* minimum observed RTT */
42 	u8     first_ack;        /* flag which infers that this is the first ack */
43 	u8     reset_rtt_min;    /* Reset RTT min to next RTT sample*/
44 };
45 
46 /* TCP Westwood functions and constants */
47 #define TCP_WESTWOOD_RTT_MIN   (HZ/20)	/* 50ms */
48 #define TCP_WESTWOOD_INIT_RTT  (20*HZ)	/* maybe too conservative?! */
49 
50 /*
51  * @tcp_westwood_create
52  * This function initializes fields used in TCP Westwood+,
53  * it is called after the initial SYN, so the sequence numbers
54  * are correct but new passive connections we have no
55  * information about RTTmin at this time so we simply set it to
56  * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
57  * since in this way we're sure it will be updated in a consistent
58  * way as soon as possible. It will reasonably happen within the first
59  * RTT period of the connection lifetime.
60  */
61 static void tcp_westwood_init(struct sock *sk)
62 {
63 	struct westwood *w = inet_csk_ca(sk);
64 
65 	w->bk = 0;
66 	w->bw_ns_est = 0;
67 	w->bw_est = 0;
68 	w->accounted = 0;
69 	w->cumul_ack = 0;
70 	w->reset_rtt_min = 1;
71 	w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
72 	w->rtt_win_sx = tcp_jiffies32;
73 	w->snd_una = tcp_sk(sk)->snd_una;
74 	w->first_ack = 1;
75 }
76 
77 /*
78  * @westwood_do_filter
79  * Low-pass filter. Implemented using constant coefficients.
80  */
81 static inline u32 westwood_do_filter(u32 a, u32 b)
82 {
83 	return ((7 * a) + b) >> 3;
84 }
85 
86 static void westwood_filter(struct westwood *w, u32 delta)
87 {
88 	/* If the filter is empty fill it with the first sample of bandwidth  */
89 	if (w->bw_ns_est == 0 && w->bw_est == 0) {
90 		w->bw_ns_est = w->bk / delta;
91 		w->bw_est = w->bw_ns_est;
92 	} else {
93 		w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
94 		w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
95 	}
96 }
97 
98 /*
99  * @westwood_pkts_acked
100  * Called after processing group of packets.
101  * but all westwood needs is the last sample of srtt.
102  */
103 static void tcp_westwood_pkts_acked(struct sock *sk,
104 				    const struct ack_sample *sample)
105 {
106 	struct westwood *w = inet_csk_ca(sk);
107 
108 	if (sample->rtt_us > 0)
109 		w->rtt = usecs_to_jiffies(sample->rtt_us);
110 }
111 
112 /*
113  * @westwood_update_window
114  * It updates RTT evaluation window if it is the right moment to do
115  * it. If so it calls filter for evaluating bandwidth.
116  */
117 static void westwood_update_window(struct sock *sk)
118 {
119 	struct westwood *w = inet_csk_ca(sk);
120 	s32 delta = tcp_jiffies32 - w->rtt_win_sx;
121 
122 	/* Initialize w->snd_una with the first acked sequence number in order
123 	 * to fix mismatch between tp->snd_una and w->snd_una for the first
124 	 * bandwidth sample
125 	 */
126 	if (w->first_ack) {
127 		w->snd_una = tcp_sk(sk)->snd_una;
128 		w->first_ack = 0;
129 	}
130 
131 	/*
132 	 * See if a RTT-window has passed.
133 	 * Be careful since if RTT is less than
134 	 * 50ms we don't filter but we continue 'building the sample'.
135 	 * This minimum limit was chosen since an estimation on small
136 	 * time intervals is better to avoid...
137 	 * Obviously on a LAN we reasonably will always have
138 	 * right_bound = left_bound + WESTWOOD_RTT_MIN
139 	 */
140 	if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
141 		westwood_filter(w, delta);
142 
143 		w->bk = 0;
144 		w->rtt_win_sx = tcp_jiffies32;
145 	}
146 }
147 
148 static inline void update_rtt_min(struct westwood *w)
149 {
150 	if (w->reset_rtt_min) {
151 		w->rtt_min = w->rtt;
152 		w->reset_rtt_min = 0;
153 	} else
154 		w->rtt_min = min(w->rtt, w->rtt_min);
155 }
156 
157 /*
158  * @westwood_fast_bw
159  * It is called when we are in fast path. In particular it is called when
160  * header prediction is successful. In such case in fact update is
161  * straight forward and doesn't need any particular care.
162  */
163 static inline void westwood_fast_bw(struct sock *sk)
164 {
165 	const struct tcp_sock *tp = tcp_sk(sk);
166 	struct westwood *w = inet_csk_ca(sk);
167 
168 	westwood_update_window(sk);
169 
170 	w->bk += tp->snd_una - w->snd_una;
171 	w->snd_una = tp->snd_una;
172 	update_rtt_min(w);
173 }
174 
175 /*
176  * @westwood_acked_count
177  * This function evaluates cumul_ack for evaluating bk in case of
178  * delayed or partial acks.
179  */
180 static inline u32 westwood_acked_count(struct sock *sk)
181 {
182 	const struct tcp_sock *tp = tcp_sk(sk);
183 	struct westwood *w = inet_csk_ca(sk);
184 
185 	w->cumul_ack = tp->snd_una - w->snd_una;
186 
187 	/* If cumul_ack is 0 this is a dupack since it's not moving
188 	 * tp->snd_una.
189 	 */
190 	if (!w->cumul_ack) {
191 		w->accounted += tp->mss_cache;
192 		w->cumul_ack = tp->mss_cache;
193 	}
194 
195 	if (w->cumul_ack > tp->mss_cache) {
196 		/* Partial or delayed ack */
197 		if (w->accounted >= w->cumul_ack) {
198 			w->accounted -= w->cumul_ack;
199 			w->cumul_ack = tp->mss_cache;
200 		} else {
201 			w->cumul_ack -= w->accounted;
202 			w->accounted = 0;
203 		}
204 	}
205 
206 	w->snd_una = tp->snd_una;
207 
208 	return w->cumul_ack;
209 }
210 
211 /*
212  * TCP Westwood
213  * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
214  * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
215  * so avoids ever returning 0.
216  */
217 static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
218 {
219 	const struct tcp_sock *tp = tcp_sk(sk);
220 	const struct westwood *w = inet_csk_ca(sk);
221 
222 	return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
223 }
224 
225 static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
226 {
227 	if (ack_flags & CA_ACK_SLOWPATH) {
228 		struct westwood *w = inet_csk_ca(sk);
229 
230 		westwood_update_window(sk);
231 		w->bk += westwood_acked_count(sk);
232 
233 		update_rtt_min(w);
234 		return;
235 	}
236 
237 	westwood_fast_bw(sk);
238 }
239 
240 static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
241 {
242 	struct tcp_sock *tp = tcp_sk(sk);
243 	struct westwood *w = inet_csk_ca(sk);
244 
245 	switch (event) {
246 	case CA_EVENT_COMPLETE_CWR:
247 		tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
248 		tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
249 		break;
250 	case CA_EVENT_LOSS:
251 		tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
252 		/* Update RTT_min when next ack arrives */
253 		w->reset_rtt_min = 1;
254 		break;
255 	default:
256 		/* don't care */
257 		break;
258 	}
259 }
260 
261 /* Extract info for Tcp socket info provided via netlink. */
262 static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
263 				union tcp_cc_info *info)
264 {
265 	const struct westwood *ca = inet_csk_ca(sk);
266 
267 	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
268 		info->vegas.tcpv_enabled = 1;
269 		info->vegas.tcpv_rttcnt	= 0;
270 		info->vegas.tcpv_rtt	= jiffies_to_usecs(ca->rtt);
271 		info->vegas.tcpv_minrtt	= jiffies_to_usecs(ca->rtt_min);
272 
273 		*attr = INET_DIAG_VEGASINFO;
274 		return sizeof(struct tcpvegas_info);
275 	}
276 	return 0;
277 }
278 
279 static struct tcp_congestion_ops tcp_westwood __read_mostly = {
280 	.init		= tcp_westwood_init,
281 	.ssthresh	= tcp_reno_ssthresh,
282 	.cong_avoid	= tcp_reno_cong_avoid,
283 	.undo_cwnd      = tcp_reno_undo_cwnd,
284 	.cwnd_event	= tcp_westwood_event,
285 	.in_ack_event	= tcp_westwood_ack,
286 	.get_info	= tcp_westwood_info,
287 	.pkts_acked	= tcp_westwood_pkts_acked,
288 
289 	.owner		= THIS_MODULE,
290 	.name		= "westwood"
291 };
292 
293 static int __init tcp_westwood_register(void)
294 {
295 	BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
296 	return tcp_register_congestion_control(&tcp_westwood);
297 }
298 
299 static void __exit tcp_westwood_unregister(void)
300 {
301 	tcp_unregister_congestion_control(&tcp_westwood);
302 }
303 
304 module_init(tcp_westwood_register);
305 module_exit(tcp_westwood_unregister);
306 
307 MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
308 MODULE_LICENSE("GPL");
309 MODULE_DESCRIPTION("TCP Westwood+");
310