xref: /openbmc/linux/net/ipv4/tcp_cong.c (revision 020c5260)
1 /*
2  * Pluggable TCP congestion control support and newReno
3  * congestion control.
4  * Based on ideas from I/O scheduler support and Web100.
5  *
6  * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
7  */
8 
9 #define pr_fmt(fmt) "TCP: " fmt
10 
11 #include <linux/module.h>
12 #include <linux/mm.h>
13 #include <linux/types.h>
14 #include <linux/list.h>
15 #include <linux/gfp.h>
16 #include <linux/jhash.h>
17 #include <net/tcp.h>
18 
19 static DEFINE_SPINLOCK(tcp_cong_list_lock);
20 static LIST_HEAD(tcp_cong_list);
21 
22 /* Simple linear search, don't expect many entries! */
23 static struct tcp_congestion_ops *tcp_ca_find(const char *name)
24 {
25 	struct tcp_congestion_ops *e;
26 
27 	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
28 		if (strcmp(e->name, name) == 0)
29 			return e;
30 	}
31 
32 	return NULL;
33 }
34 
35 /* Must be called with rcu lock held */
36 static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name)
37 {
38 	const struct tcp_congestion_ops *ca = tcp_ca_find(name);
39 #ifdef CONFIG_MODULES
40 	if (!ca && capable(CAP_NET_ADMIN)) {
41 		rcu_read_unlock();
42 		request_module("tcp_%s", name);
43 		rcu_read_lock();
44 		ca = tcp_ca_find(name);
45 	}
46 #endif
47 	return ca;
48 }
49 
50 /* Simple linear search, not much in here. */
51 struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
52 {
53 	struct tcp_congestion_ops *e;
54 
55 	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
56 		if (e->key == key)
57 			return e;
58 	}
59 
60 	return NULL;
61 }
62 
63 /*
64  * Attach new congestion control algorithm to the list
65  * of available options.
66  */
67 int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
68 {
69 	int ret = 0;
70 
71 	/* all algorithms must implement these */
72 	if (!ca->ssthresh || !ca->undo_cwnd ||
73 	    !(ca->cong_avoid || ca->cong_control)) {
74 		pr_err("%s does not implement required ops\n", ca->name);
75 		return -EINVAL;
76 	}
77 
78 	ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
79 
80 	spin_lock(&tcp_cong_list_lock);
81 	if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
82 		pr_notice("%s already registered or non-unique key\n",
83 			  ca->name);
84 		ret = -EEXIST;
85 	} else {
86 		list_add_tail_rcu(&ca->list, &tcp_cong_list);
87 		pr_debug("%s registered\n", ca->name);
88 	}
89 	spin_unlock(&tcp_cong_list_lock);
90 
91 	return ret;
92 }
93 EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
94 
95 /*
96  * Remove congestion control algorithm, called from
97  * the module's remove function.  Module ref counts are used
98  * to ensure that this can't be done till all sockets using
99  * that method are closed.
100  */
101 void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
102 {
103 	spin_lock(&tcp_cong_list_lock);
104 	list_del_rcu(&ca->list);
105 	spin_unlock(&tcp_cong_list_lock);
106 
107 	/* Wait for outstanding readers to complete before the
108 	 * module gets removed entirely.
109 	 *
110 	 * A try_module_get() should fail by now as our module is
111 	 * in "going" state since no refs are held anymore and
112 	 * module_exit() handler being called.
113 	 */
114 	synchronize_rcu();
115 }
116 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
117 
118 u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
119 {
120 	const struct tcp_congestion_ops *ca;
121 	u32 key = TCP_CA_UNSPEC;
122 
123 	might_sleep();
124 
125 	rcu_read_lock();
126 	ca = __tcp_ca_find_autoload(name);
127 	if (ca) {
128 		key = ca->key;
129 		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
130 	}
131 	rcu_read_unlock();
132 
133 	return key;
134 }
135 EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
136 
137 char *tcp_ca_get_name_by_key(u32 key, char *buffer)
138 {
139 	const struct tcp_congestion_ops *ca;
140 	char *ret = NULL;
141 
142 	rcu_read_lock();
143 	ca = tcp_ca_find_key(key);
144 	if (ca)
145 		ret = strncpy(buffer, ca->name,
146 			      TCP_CA_NAME_MAX);
147 	rcu_read_unlock();
148 
149 	return ret;
150 }
151 EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
152 
153 /* Assign choice of congestion control. */
154 void tcp_assign_congestion_control(struct sock *sk)
155 {
156 	struct inet_connection_sock *icsk = inet_csk(sk);
157 	struct tcp_congestion_ops *ca;
158 
159 	rcu_read_lock();
160 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
161 		if (likely(try_module_get(ca->owner))) {
162 			icsk->icsk_ca_ops = ca;
163 			goto out;
164 		}
165 		/* Fallback to next available. The last really
166 		 * guaranteed fallback is Reno from this list.
167 		 */
168 	}
169 out:
170 	rcu_read_unlock();
171 	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
172 
173 	if (ca->flags & TCP_CONG_NEEDS_ECN)
174 		INET_ECN_xmit(sk);
175 	else
176 		INET_ECN_dontxmit(sk);
177 }
178 
179 void tcp_init_congestion_control(struct sock *sk)
180 {
181 	const struct inet_connection_sock *icsk = inet_csk(sk);
182 
183 	if (icsk->icsk_ca_ops->init)
184 		icsk->icsk_ca_ops->init(sk);
185 	if (tcp_ca_needs_ecn(sk))
186 		INET_ECN_xmit(sk);
187 	else
188 		INET_ECN_dontxmit(sk);
189 }
190 
191 static void tcp_reinit_congestion_control(struct sock *sk,
192 					  const struct tcp_congestion_ops *ca)
193 {
194 	struct inet_connection_sock *icsk = inet_csk(sk);
195 
196 	tcp_cleanup_congestion_control(sk);
197 	icsk->icsk_ca_ops = ca;
198 	icsk->icsk_ca_setsockopt = 1;
199 	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
200 
201 	if (sk->sk_state != TCP_CLOSE)
202 		tcp_init_congestion_control(sk);
203 }
204 
205 /* Manage refcounts on socket close. */
206 void tcp_cleanup_congestion_control(struct sock *sk)
207 {
208 	struct inet_connection_sock *icsk = inet_csk(sk);
209 
210 	if (icsk->icsk_ca_ops->release)
211 		icsk->icsk_ca_ops->release(sk);
212 	module_put(icsk->icsk_ca_ops->owner);
213 }
214 
215 /* Used by sysctl to change default congestion control */
216 int tcp_set_default_congestion_control(const char *name)
217 {
218 	struct tcp_congestion_ops *ca;
219 	int ret = -ENOENT;
220 
221 	spin_lock(&tcp_cong_list_lock);
222 	ca = tcp_ca_find(name);
223 #ifdef CONFIG_MODULES
224 	if (!ca && capable(CAP_NET_ADMIN)) {
225 		spin_unlock(&tcp_cong_list_lock);
226 
227 		request_module("tcp_%s", name);
228 		spin_lock(&tcp_cong_list_lock);
229 		ca = tcp_ca_find(name);
230 	}
231 #endif
232 
233 	if (ca) {
234 		ca->flags |= TCP_CONG_NON_RESTRICTED;	/* default is always allowed */
235 		list_move(&ca->list, &tcp_cong_list);
236 		ret = 0;
237 	}
238 	spin_unlock(&tcp_cong_list_lock);
239 
240 	return ret;
241 }
242 
243 /* Set default value from kernel configuration at bootup */
244 static int __init tcp_congestion_default(void)
245 {
246 	return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
247 }
248 late_initcall(tcp_congestion_default);
249 
250 /* Build string with list of available congestion control values */
251 void tcp_get_available_congestion_control(char *buf, size_t maxlen)
252 {
253 	struct tcp_congestion_ops *ca;
254 	size_t offs = 0;
255 
256 	rcu_read_lock();
257 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
258 		offs += snprintf(buf + offs, maxlen - offs,
259 				 "%s%s",
260 				 offs == 0 ? "" : " ", ca->name);
261 	}
262 	rcu_read_unlock();
263 }
264 
265 /* Get current default congestion control */
266 void tcp_get_default_congestion_control(char *name)
267 {
268 	struct tcp_congestion_ops *ca;
269 	/* We will always have reno... */
270 	BUG_ON(list_empty(&tcp_cong_list));
271 
272 	rcu_read_lock();
273 	ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
274 	strncpy(name, ca->name, TCP_CA_NAME_MAX);
275 	rcu_read_unlock();
276 }
277 
278 /* Built list of non-restricted congestion control values */
279 void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
280 {
281 	struct tcp_congestion_ops *ca;
282 	size_t offs = 0;
283 
284 	*buf = '\0';
285 	rcu_read_lock();
286 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
287 		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
288 			continue;
289 		offs += snprintf(buf + offs, maxlen - offs,
290 				 "%s%s",
291 				 offs == 0 ? "" : " ", ca->name);
292 	}
293 	rcu_read_unlock();
294 }
295 
296 /* Change list of non-restricted congestion control */
297 int tcp_set_allowed_congestion_control(char *val)
298 {
299 	struct tcp_congestion_ops *ca;
300 	char *saved_clone, *clone, *name;
301 	int ret = 0;
302 
303 	saved_clone = clone = kstrdup(val, GFP_USER);
304 	if (!clone)
305 		return -ENOMEM;
306 
307 	spin_lock(&tcp_cong_list_lock);
308 	/* pass 1 check for bad entries */
309 	while ((name = strsep(&clone, " ")) && *name) {
310 		ca = tcp_ca_find(name);
311 		if (!ca) {
312 			ret = -ENOENT;
313 			goto out;
314 		}
315 	}
316 
317 	/* pass 2 clear old values */
318 	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
319 		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
320 
321 	/* pass 3 mark as allowed */
322 	while ((name = strsep(&val, " ")) && *name) {
323 		ca = tcp_ca_find(name);
324 		WARN_ON(!ca);
325 		if (ca)
326 			ca->flags |= TCP_CONG_NON_RESTRICTED;
327 	}
328 out:
329 	spin_unlock(&tcp_cong_list_lock);
330 	kfree(saved_clone);
331 
332 	return ret;
333 }
334 
335 /* Change congestion control for socket */
336 int tcp_set_congestion_control(struct sock *sk, const char *name)
337 {
338 	struct inet_connection_sock *icsk = inet_csk(sk);
339 	const struct tcp_congestion_ops *ca;
340 	int err = 0;
341 
342 	if (icsk->icsk_ca_dst_locked)
343 		return -EPERM;
344 
345 	rcu_read_lock();
346 	ca = __tcp_ca_find_autoload(name);
347 	/* No change asking for existing value */
348 	if (ca == icsk->icsk_ca_ops) {
349 		icsk->icsk_ca_setsockopt = 1;
350 		goto out;
351 	}
352 	if (!ca)
353 		err = -ENOENT;
354 	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
355 		   ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
356 		err = -EPERM;
357 	else if (!try_module_get(ca->owner))
358 		err = -EBUSY;
359 	else
360 		tcp_reinit_congestion_control(sk, ca);
361  out:
362 	rcu_read_unlock();
363 	return err;
364 }
365 
366 /* Slow start is used when congestion window is no greater than the slow start
367  * threshold. We base on RFC2581 and also handle stretch ACKs properly.
368  * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
369  * something better;) a packet is only considered (s)acked in its entirety to
370  * defend the ACK attacks described in the RFC. Slow start processes a stretch
371  * ACK of degree N as if N acks of degree 1 are received back to back except
372  * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
373  * returns the leftover acks to adjust cwnd in congestion avoidance mode.
374  */
375 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
376 {
377 	u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
378 
379 	acked -= cwnd - tp->snd_cwnd;
380 	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
381 
382 	return acked;
383 }
384 EXPORT_SYMBOL_GPL(tcp_slow_start);
385 
386 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
387  * for every packet that was ACKed.
388  */
389 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
390 {
391 	/* If credits accumulated at a higher w, apply them gently now. */
392 	if (tp->snd_cwnd_cnt >= w) {
393 		tp->snd_cwnd_cnt = 0;
394 		tp->snd_cwnd++;
395 	}
396 
397 	tp->snd_cwnd_cnt += acked;
398 	if (tp->snd_cwnd_cnt >= w) {
399 		u32 delta = tp->snd_cwnd_cnt / w;
400 
401 		tp->snd_cwnd_cnt -= delta * w;
402 		tp->snd_cwnd += delta;
403 	}
404 	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
405 }
406 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
407 
408 /*
409  * TCP Reno congestion control
410  * This is special case used for fallback as well.
411  */
412 /* This is Jacobson's slow start and congestion avoidance.
413  * SIGCOMM '88, p. 328.
414  */
415 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
416 {
417 	struct tcp_sock *tp = tcp_sk(sk);
418 
419 	if (!tcp_is_cwnd_limited(sk))
420 		return;
421 
422 	/* In "safe" area, increase. */
423 	if (tcp_in_slow_start(tp)) {
424 		acked = tcp_slow_start(tp, acked);
425 		if (!acked)
426 			return;
427 	}
428 	/* In dangerous area, increase slowly. */
429 	tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
430 }
431 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
432 
433 /* Slow start threshold is half the congestion window (min 2) */
434 u32 tcp_reno_ssthresh(struct sock *sk)
435 {
436 	const struct tcp_sock *tp = tcp_sk(sk);
437 
438 	return max(tp->snd_cwnd >> 1U, 2U);
439 }
440 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
441 
442 u32 tcp_reno_undo_cwnd(struct sock *sk)
443 {
444 	const struct tcp_sock *tp = tcp_sk(sk);
445 
446 	return max(tp->snd_cwnd, tp->snd_ssthresh << 1);
447 }
448 EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
449 
450 struct tcp_congestion_ops tcp_reno = {
451 	.flags		= TCP_CONG_NON_RESTRICTED,
452 	.name		= "reno",
453 	.owner		= THIS_MODULE,
454 	.ssthresh	= tcp_reno_ssthresh,
455 	.cong_avoid	= tcp_reno_cong_avoid,
456 	.undo_cwnd	= tcp_reno_undo_cwnd,
457 };
458