xref: /openbmc/linux/net/ipv4/tcp_cong.c (revision a12a601e)
1 /*
2  * Plugable TCP congestion control support and newReno
3  * congestion control.
4  * Based on ideas from I/O scheduler support and Web100.
5  *
6  * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
7  */
8 
9 #define pr_fmt(fmt) "TCP: " fmt
10 
11 #include <linux/module.h>
12 #include <linux/mm.h>
13 #include <linux/types.h>
14 #include <linux/list.h>
15 #include <linux/gfp.h>
16 #include <net/tcp.h>
17 
18 static DEFINE_SPINLOCK(tcp_cong_list_lock);
19 static LIST_HEAD(tcp_cong_list);
20 
21 /* Simple linear search, don't expect many entries! */
22 static struct tcp_congestion_ops *tcp_ca_find(const char *name)
23 {
24 	struct tcp_congestion_ops *e;
25 
26 	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
27 		if (strcmp(e->name, name) == 0)
28 			return e;
29 	}
30 
31 	return NULL;
32 }
33 
34 /*
35  * Attach new congestion control algorithm to the list
36  * of available options.
37  */
38 int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
39 {
40 	int ret = 0;
41 
42 	/* all algorithms must implement ssthresh and cong_avoid ops */
43 	if (!ca->ssthresh || !ca->cong_avoid) {
44 		pr_err("%s does not implement required ops\n", ca->name);
45 		return -EINVAL;
46 	}
47 
48 	spin_lock(&tcp_cong_list_lock);
49 	if (tcp_ca_find(ca->name)) {
50 		pr_notice("%s already registered\n", ca->name);
51 		ret = -EEXIST;
52 	} else {
53 		list_add_tail_rcu(&ca->list, &tcp_cong_list);
54 		pr_info("%s registered\n", ca->name);
55 	}
56 	spin_unlock(&tcp_cong_list_lock);
57 
58 	return ret;
59 }
60 EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
61 
62 /*
63  * Remove congestion control algorithm, called from
64  * the module's remove function.  Module ref counts are used
65  * to ensure that this can't be done till all sockets using
66  * that method are closed.
67  */
68 void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
69 {
70 	spin_lock(&tcp_cong_list_lock);
71 	list_del_rcu(&ca->list);
72 	spin_unlock(&tcp_cong_list_lock);
73 }
74 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
75 
76 /* Assign choice of congestion control. */
77 void tcp_assign_congestion_control(struct sock *sk)
78 {
79 	struct inet_connection_sock *icsk = inet_csk(sk);
80 	struct tcp_congestion_ops *ca;
81 
82 	rcu_read_lock();
83 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
84 		if (likely(try_module_get(ca->owner))) {
85 			icsk->icsk_ca_ops = ca;
86 			goto out;
87 		}
88 		/* Fallback to next available. The last really
89 		 * guaranteed fallback is Reno from this list.
90 		 */
91 	}
92 out:
93 	rcu_read_unlock();
94 
95 	/* Clear out private data before diag gets it and
96 	 * the ca has not been initialized.
97 	 */
98 	if (ca->get_info)
99 		memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
100 }
101 
102 void tcp_init_congestion_control(struct sock *sk)
103 {
104 	const struct inet_connection_sock *icsk = inet_csk(sk);
105 
106 	if (icsk->icsk_ca_ops->init)
107 		icsk->icsk_ca_ops->init(sk);
108 }
109 
110 /* Manage refcounts on socket close. */
111 void tcp_cleanup_congestion_control(struct sock *sk)
112 {
113 	struct inet_connection_sock *icsk = inet_csk(sk);
114 
115 	if (icsk->icsk_ca_ops->release)
116 		icsk->icsk_ca_ops->release(sk);
117 	module_put(icsk->icsk_ca_ops->owner);
118 }
119 
120 /* Used by sysctl to change default congestion control */
121 int tcp_set_default_congestion_control(const char *name)
122 {
123 	struct tcp_congestion_ops *ca;
124 	int ret = -ENOENT;
125 
126 	spin_lock(&tcp_cong_list_lock);
127 	ca = tcp_ca_find(name);
128 #ifdef CONFIG_MODULES
129 	if (!ca && capable(CAP_NET_ADMIN)) {
130 		spin_unlock(&tcp_cong_list_lock);
131 
132 		request_module("tcp_%s", name);
133 		spin_lock(&tcp_cong_list_lock);
134 		ca = tcp_ca_find(name);
135 	}
136 #endif
137 
138 	if (ca) {
139 		ca->flags |= TCP_CONG_NON_RESTRICTED;	/* default is always allowed */
140 		list_move(&ca->list, &tcp_cong_list);
141 		ret = 0;
142 	}
143 	spin_unlock(&tcp_cong_list_lock);
144 
145 	return ret;
146 }
147 
148 /* Set default value from kernel configuration at bootup */
149 static int __init tcp_congestion_default(void)
150 {
151 	return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
152 }
153 late_initcall(tcp_congestion_default);
154 
155 /* Build string with list of available congestion control values */
156 void tcp_get_available_congestion_control(char *buf, size_t maxlen)
157 {
158 	struct tcp_congestion_ops *ca;
159 	size_t offs = 0;
160 
161 	rcu_read_lock();
162 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
163 		offs += snprintf(buf + offs, maxlen - offs,
164 				 "%s%s",
165 				 offs == 0 ? "" : " ", ca->name);
166 	}
167 	rcu_read_unlock();
168 }
169 
170 /* Get current default congestion control */
171 void tcp_get_default_congestion_control(char *name)
172 {
173 	struct tcp_congestion_ops *ca;
174 	/* We will always have reno... */
175 	BUG_ON(list_empty(&tcp_cong_list));
176 
177 	rcu_read_lock();
178 	ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
179 	strncpy(name, ca->name, TCP_CA_NAME_MAX);
180 	rcu_read_unlock();
181 }
182 
183 /* Built list of non-restricted congestion control values */
184 void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
185 {
186 	struct tcp_congestion_ops *ca;
187 	size_t offs = 0;
188 
189 	*buf = '\0';
190 	rcu_read_lock();
191 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
192 		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
193 			continue;
194 		offs += snprintf(buf + offs, maxlen - offs,
195 				 "%s%s",
196 				 offs == 0 ? "" : " ", ca->name);
197 	}
198 	rcu_read_unlock();
199 }
200 
201 /* Change list of non-restricted congestion control */
202 int tcp_set_allowed_congestion_control(char *val)
203 {
204 	struct tcp_congestion_ops *ca;
205 	char *saved_clone, *clone, *name;
206 	int ret = 0;
207 
208 	saved_clone = clone = kstrdup(val, GFP_USER);
209 	if (!clone)
210 		return -ENOMEM;
211 
212 	spin_lock(&tcp_cong_list_lock);
213 	/* pass 1 check for bad entries */
214 	while ((name = strsep(&clone, " ")) && *name) {
215 		ca = tcp_ca_find(name);
216 		if (!ca) {
217 			ret = -ENOENT;
218 			goto out;
219 		}
220 	}
221 
222 	/* pass 2 clear old values */
223 	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
224 		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
225 
226 	/* pass 3 mark as allowed */
227 	while ((name = strsep(&val, " ")) && *name) {
228 		ca = tcp_ca_find(name);
229 		WARN_ON(!ca);
230 		if (ca)
231 			ca->flags |= TCP_CONG_NON_RESTRICTED;
232 	}
233 out:
234 	spin_unlock(&tcp_cong_list_lock);
235 	kfree(saved_clone);
236 
237 	return ret;
238 }
239 
240 /* Change congestion control for socket */
241 int tcp_set_congestion_control(struct sock *sk, const char *name)
242 {
243 	struct inet_connection_sock *icsk = inet_csk(sk);
244 	struct tcp_congestion_ops *ca;
245 	int err = 0;
246 
247 	rcu_read_lock();
248 	ca = tcp_ca_find(name);
249 
250 	/* no change asking for existing value */
251 	if (ca == icsk->icsk_ca_ops)
252 		goto out;
253 
254 #ifdef CONFIG_MODULES
255 	/* not found attempt to autoload module */
256 	if (!ca && capable(CAP_NET_ADMIN)) {
257 		rcu_read_unlock();
258 		request_module("tcp_%s", name);
259 		rcu_read_lock();
260 		ca = tcp_ca_find(name);
261 	}
262 #endif
263 	if (!ca)
264 		err = -ENOENT;
265 
266 	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
267 		   ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
268 		err = -EPERM;
269 
270 	else if (!try_module_get(ca->owner))
271 		err = -EBUSY;
272 
273 	else {
274 		tcp_cleanup_congestion_control(sk);
275 		icsk->icsk_ca_ops = ca;
276 
277 		if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
278 			icsk->icsk_ca_ops->init(sk);
279 	}
280  out:
281 	rcu_read_unlock();
282 	return err;
283 }
284 
285 /* Slow start is used when congestion window is no greater than the slow start
286  * threshold. We base on RFC2581 and also handle stretch ACKs properly.
287  * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
288  * something better;) a packet is only considered (s)acked in its entirety to
289  * defend the ACK attacks described in the RFC. Slow start processes a stretch
290  * ACK of degree N as if N acks of degree 1 are received back to back except
291  * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
292  * returns the leftover acks to adjust cwnd in congestion avoidance mode.
293  */
294 void tcp_slow_start(struct tcp_sock *tp, u32 acked)
295 {
296 	u32 cwnd = tp->snd_cwnd + acked;
297 
298 	if (cwnd > tp->snd_ssthresh)
299 		cwnd = tp->snd_ssthresh + 1;
300 	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
301 }
302 EXPORT_SYMBOL_GPL(tcp_slow_start);
303 
304 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
305 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
306 {
307 	if (tp->snd_cwnd_cnt >= w) {
308 		if (tp->snd_cwnd < tp->snd_cwnd_clamp)
309 			tp->snd_cwnd++;
310 		tp->snd_cwnd_cnt = 0;
311 	} else {
312 		tp->snd_cwnd_cnt++;
313 	}
314 }
315 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
316 
317 /*
318  * TCP Reno congestion control
319  * This is special case used for fallback as well.
320  */
321 /* This is Jacobson's slow start and congestion avoidance.
322  * SIGCOMM '88, p. 328.
323  */
324 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
325 {
326 	struct tcp_sock *tp = tcp_sk(sk);
327 
328 	if (!tcp_is_cwnd_limited(sk))
329 		return;
330 
331 	/* In "safe" area, increase. */
332 	if (tp->snd_cwnd <= tp->snd_ssthresh)
333 		tcp_slow_start(tp, acked);
334 	/* In dangerous area, increase slowly. */
335 	else
336 		tcp_cong_avoid_ai(tp, tp->snd_cwnd);
337 }
338 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
339 
340 /* Slow start threshold is half the congestion window (min 2) */
341 u32 tcp_reno_ssthresh(struct sock *sk)
342 {
343 	const struct tcp_sock *tp = tcp_sk(sk);
344 
345 	return max(tp->snd_cwnd >> 1U, 2U);
346 }
347 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
348 
349 struct tcp_congestion_ops tcp_reno = {
350 	.flags		= TCP_CONG_NON_RESTRICTED,
351 	.name		= "reno",
352 	.owner		= THIS_MODULE,
353 	.ssthresh	= tcp_reno_ssthresh,
354 	.cong_avoid	= tcp_reno_cong_avoid,
355 };
356