1 /* 2 * Plugable TCP congestion control support and newReno 3 * congestion control. 4 * Based on ideas from I/O scheduler support and Web100. 5 * 6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> 7 */ 8 9 #define pr_fmt(fmt) "TCP: " fmt 10 11 #include <linux/module.h> 12 #include <linux/mm.h> 13 #include <linux/types.h> 14 #include <linux/list.h> 15 #include <linux/gfp.h> 16 #include <net/tcp.h> 17 18 int sysctl_tcp_max_ssthresh = 0; 19 20 static DEFINE_SPINLOCK(tcp_cong_list_lock); 21 static LIST_HEAD(tcp_cong_list); 22 23 /* Simple linear search, don't expect many entries! */ 24 static struct tcp_congestion_ops *tcp_ca_find(const char *name) 25 { 26 struct tcp_congestion_ops *e; 27 28 list_for_each_entry_rcu(e, &tcp_cong_list, list) { 29 if (strcmp(e->name, name) == 0) 30 return e; 31 } 32 33 return NULL; 34 } 35 36 /* 37 * Attach new congestion control algorithm to the list 38 * of available options. 39 */ 40 int tcp_register_congestion_control(struct tcp_congestion_ops *ca) 41 { 42 int ret = 0; 43 44 /* all algorithms must implement ssthresh and cong_avoid ops */ 45 if (!ca->ssthresh || !ca->cong_avoid) { 46 pr_err("%s does not implement required ops\n", ca->name); 47 return -EINVAL; 48 } 49 50 spin_lock(&tcp_cong_list_lock); 51 if (tcp_ca_find(ca->name)) { 52 pr_notice("%s already registered\n", ca->name); 53 ret = -EEXIST; 54 } else { 55 list_add_tail_rcu(&ca->list, &tcp_cong_list); 56 pr_info("%s registered\n", ca->name); 57 } 58 spin_unlock(&tcp_cong_list_lock); 59 60 return ret; 61 } 62 EXPORT_SYMBOL_GPL(tcp_register_congestion_control); 63 64 /* 65 * Remove congestion control algorithm, called from 66 * the module's remove function. Module ref counts are used 67 * to ensure that this can't be done till all sockets using 68 * that method are closed. 69 */ 70 void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) 71 { 72 spin_lock(&tcp_cong_list_lock); 73 list_del_rcu(&ca->list); 74 spin_unlock(&tcp_cong_list_lock); 75 } 76 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); 77 78 /* Assign choice of congestion control. */ 79 void tcp_init_congestion_control(struct sock *sk) 80 { 81 struct inet_connection_sock *icsk = inet_csk(sk); 82 struct tcp_congestion_ops *ca; 83 84 /* if no choice made yet assign the current value set as default */ 85 if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) { 86 rcu_read_lock(); 87 list_for_each_entry_rcu(ca, &tcp_cong_list, list) { 88 if (try_module_get(ca->owner)) { 89 icsk->icsk_ca_ops = ca; 90 break; 91 } 92 93 /* fallback to next available */ 94 } 95 rcu_read_unlock(); 96 } 97 98 if (icsk->icsk_ca_ops->init) 99 icsk->icsk_ca_ops->init(sk); 100 } 101 102 /* Manage refcounts on socket close. */ 103 void tcp_cleanup_congestion_control(struct sock *sk) 104 { 105 struct inet_connection_sock *icsk = inet_csk(sk); 106 107 if (icsk->icsk_ca_ops->release) 108 icsk->icsk_ca_ops->release(sk); 109 module_put(icsk->icsk_ca_ops->owner); 110 } 111 112 /* Used by sysctl to change default congestion control */ 113 int tcp_set_default_congestion_control(const char *name) 114 { 115 struct tcp_congestion_ops *ca; 116 int ret = -ENOENT; 117 118 spin_lock(&tcp_cong_list_lock); 119 ca = tcp_ca_find(name); 120 #ifdef CONFIG_MODULES 121 if (!ca && capable(CAP_NET_ADMIN)) { 122 spin_unlock(&tcp_cong_list_lock); 123 124 request_module("tcp_%s", name); 125 spin_lock(&tcp_cong_list_lock); 126 ca = tcp_ca_find(name); 127 } 128 #endif 129 130 if (ca) { 131 ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */ 132 list_move(&ca->list, &tcp_cong_list); 133 ret = 0; 134 } 135 spin_unlock(&tcp_cong_list_lock); 136 137 return ret; 138 } 139 140 /* Set default value from kernel configuration at bootup */ 141 static int __init tcp_congestion_default(void) 142 { 143 return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG); 144 } 145 late_initcall(tcp_congestion_default); 146 147 148 /* Build string with list of available congestion control values */ 149 void tcp_get_available_congestion_control(char *buf, size_t maxlen) 150 { 151 struct tcp_congestion_ops *ca; 152 size_t offs = 0; 153 154 rcu_read_lock(); 155 list_for_each_entry_rcu(ca, &tcp_cong_list, list) { 156 offs += snprintf(buf + offs, maxlen - offs, 157 "%s%s", 158 offs == 0 ? "" : " ", ca->name); 159 160 } 161 rcu_read_unlock(); 162 } 163 164 /* Get current default congestion control */ 165 void tcp_get_default_congestion_control(char *name) 166 { 167 struct tcp_congestion_ops *ca; 168 /* We will always have reno... */ 169 BUG_ON(list_empty(&tcp_cong_list)); 170 171 rcu_read_lock(); 172 ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list); 173 strncpy(name, ca->name, TCP_CA_NAME_MAX); 174 rcu_read_unlock(); 175 } 176 177 /* Built list of non-restricted congestion control values */ 178 void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) 179 { 180 struct tcp_congestion_ops *ca; 181 size_t offs = 0; 182 183 *buf = '\0'; 184 rcu_read_lock(); 185 list_for_each_entry_rcu(ca, &tcp_cong_list, list) { 186 if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) 187 continue; 188 offs += snprintf(buf + offs, maxlen - offs, 189 "%s%s", 190 offs == 0 ? "" : " ", ca->name); 191 192 } 193 rcu_read_unlock(); 194 } 195 196 /* Change list of non-restricted congestion control */ 197 int tcp_set_allowed_congestion_control(char *val) 198 { 199 struct tcp_congestion_ops *ca; 200 char *saved_clone, *clone, *name; 201 int ret = 0; 202 203 saved_clone = clone = kstrdup(val, GFP_USER); 204 if (!clone) 205 return -ENOMEM; 206 207 spin_lock(&tcp_cong_list_lock); 208 /* pass 1 check for bad entries */ 209 while ((name = strsep(&clone, " ")) && *name) { 210 ca = tcp_ca_find(name); 211 if (!ca) { 212 ret = -ENOENT; 213 goto out; 214 } 215 } 216 217 /* pass 2 clear old values */ 218 list_for_each_entry_rcu(ca, &tcp_cong_list, list) 219 ca->flags &= ~TCP_CONG_NON_RESTRICTED; 220 221 /* pass 3 mark as allowed */ 222 while ((name = strsep(&val, " ")) && *name) { 223 ca = tcp_ca_find(name); 224 WARN_ON(!ca); 225 if (ca) 226 ca->flags |= TCP_CONG_NON_RESTRICTED; 227 } 228 out: 229 spin_unlock(&tcp_cong_list_lock); 230 kfree(saved_clone); 231 232 return ret; 233 } 234 235 236 /* Change congestion control for socket */ 237 int tcp_set_congestion_control(struct sock *sk, const char *name) 238 { 239 struct inet_connection_sock *icsk = inet_csk(sk); 240 struct tcp_congestion_ops *ca; 241 int err = 0; 242 243 rcu_read_lock(); 244 ca = tcp_ca_find(name); 245 246 /* no change asking for existing value */ 247 if (ca == icsk->icsk_ca_ops) 248 goto out; 249 250 #ifdef CONFIG_MODULES 251 /* not found attempt to autoload module */ 252 if (!ca && capable(CAP_NET_ADMIN)) { 253 rcu_read_unlock(); 254 request_module("tcp_%s", name); 255 rcu_read_lock(); 256 ca = tcp_ca_find(name); 257 } 258 #endif 259 if (!ca) 260 err = -ENOENT; 261 262 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || 263 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) 264 err = -EPERM; 265 266 else if (!try_module_get(ca->owner)) 267 err = -EBUSY; 268 269 else { 270 tcp_cleanup_congestion_control(sk); 271 icsk->icsk_ca_ops = ca; 272 273 if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) 274 icsk->icsk_ca_ops->init(sk); 275 } 276 out: 277 rcu_read_unlock(); 278 return err; 279 } 280 281 /* RFC2861 Check whether we are limited by application or congestion window 282 * This is the inverse of cwnd check in tcp_tso_should_defer 283 */ 284 bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) 285 { 286 const struct tcp_sock *tp = tcp_sk(sk); 287 u32 left; 288 289 if (in_flight >= tp->snd_cwnd) 290 return true; 291 292 left = tp->snd_cwnd - in_flight; 293 if (sk_can_gso(sk) && 294 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && 295 left * tp->mss_cache < sk->sk_gso_max_size && 296 left < sk->sk_gso_max_segs) 297 return true; 298 return left <= tcp_max_tso_deferred_mss(tp); 299 } 300 EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); 301 302 /* 303 * Slow start is used when congestion window is less than slow start 304 * threshold. This version implements the basic RFC2581 version 305 * and optionally supports: 306 * RFC3742 Limited Slow Start - growth limited to max_ssthresh 307 * RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged 308 */ 309 void tcp_slow_start(struct tcp_sock *tp) 310 { 311 int cnt; /* increase in packets */ 312 unsigned int delta = 0; 313 u32 snd_cwnd = tp->snd_cwnd; 314 315 if (unlikely(!snd_cwnd)) { 316 pr_err_once("snd_cwnd is nul, please report this bug.\n"); 317 snd_cwnd = 1U; 318 } 319 320 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 321 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 322 else 323 cnt = snd_cwnd; /* exponential increase */ 324 325 tp->snd_cwnd_cnt += cnt; 326 while (tp->snd_cwnd_cnt >= snd_cwnd) { 327 tp->snd_cwnd_cnt -= snd_cwnd; 328 delta++; 329 } 330 tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp); 331 } 332 EXPORT_SYMBOL_GPL(tcp_slow_start); 333 334 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ 335 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) 336 { 337 if (tp->snd_cwnd_cnt >= w) { 338 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 339 tp->snd_cwnd++; 340 tp->snd_cwnd_cnt = 0; 341 } else { 342 tp->snd_cwnd_cnt++; 343 } 344 } 345 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); 346 347 /* 348 * TCP Reno congestion control 349 * This is special case used for fallback as well. 350 */ 351 /* This is Jacobson's slow start and congestion avoidance. 352 * SIGCOMM '88, p. 328. 353 */ 354 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 355 { 356 struct tcp_sock *tp = tcp_sk(sk); 357 358 if (!tcp_is_cwnd_limited(sk, in_flight)) 359 return; 360 361 /* In "safe" area, increase. */ 362 if (tp->snd_cwnd <= tp->snd_ssthresh) 363 tcp_slow_start(tp); 364 /* In dangerous area, increase slowly. */ 365 else 366 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 367 } 368 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 369 370 /* Slow start threshold is half the congestion window (min 2) */ 371 u32 tcp_reno_ssthresh(struct sock *sk) 372 { 373 const struct tcp_sock *tp = tcp_sk(sk); 374 return max(tp->snd_cwnd >> 1U, 2U); 375 } 376 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); 377 378 /* Lower bound on congestion window with halving. */ 379 u32 tcp_reno_min_cwnd(const struct sock *sk) 380 { 381 const struct tcp_sock *tp = tcp_sk(sk); 382 return tp->snd_ssthresh/2; 383 } 384 EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); 385 386 struct tcp_congestion_ops tcp_reno = { 387 .flags = TCP_CONG_NON_RESTRICTED, 388 .name = "reno", 389 .owner = THIS_MODULE, 390 .ssthresh = tcp_reno_ssthresh, 391 .cong_avoid = tcp_reno_cong_avoid, 392 .min_cwnd = tcp_reno_min_cwnd, 393 }; 394 395 /* Initial congestion control used (until SYN) 396 * really reno under another name so we can tell difference 397 * during tcp_set_default_congestion_control 398 */ 399 struct tcp_congestion_ops tcp_init_congestion_ops = { 400 .name = "", 401 .owner = THIS_MODULE, 402 .ssthresh = tcp_reno_ssthresh, 403 .cong_avoid = tcp_reno_cong_avoid, 404 .min_cwnd = tcp_reno_min_cwnd, 405 }; 406 EXPORT_SYMBOL_GPL(tcp_init_congestion_ops); 407