1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 * 20 * Fixes: 21 * Alan Cox : Numerous verify_area() calls 22 * Alan Cox : Set the ACK bit on a reset 23 * Alan Cox : Stopped it crashing if it closed while 24 * sk->inuse=1 and was trying to connect 25 * (tcp_err()). 26 * Alan Cox : All icmp error handling was broken 27 * pointers passed where wrong and the 28 * socket was looked up backwards. Nobody 29 * tested any icmp error code obviously. 30 * Alan Cox : tcp_err() now handled properly. It 31 * wakes people on errors. poll 32 * behaves and the icmp error race 33 * has gone by moving it into sock.c 34 * Alan Cox : tcp_send_reset() fixed to work for 35 * everything not just packets for 36 * unknown sockets. 37 * Alan Cox : tcp option processing. 38 * Alan Cox : Reset tweaked (still not 100%) [Had 39 * syn rule wrong] 40 * Herp Rosmanith : More reset fixes 41 * Alan Cox : No longer acks invalid rst frames. 42 * Acking any kind of RST is right out. 43 * Alan Cox : Sets an ignore me flag on an rst 44 * receive otherwise odd bits of prattle 45 * escape still 46 * Alan Cox : Fixed another acking RST frame bug. 47 * Should stop LAN workplace lockups. 48 * Alan Cox : Some tidyups using the new skb list 49 * facilities 50 * Alan Cox : sk->keepopen now seems to work 51 * Alan Cox : Pulls options out correctly on accepts 52 * Alan Cox : Fixed assorted sk->rqueue->next errors 53 * Alan Cox : PSH doesn't end a TCP read. Switched a 54 * bit to skb ops. 55 * Alan Cox : Tidied tcp_data to avoid a potential 56 * nasty. 57 * Alan Cox : Added some better commenting, as the 58 * tcp is hard to follow 59 * Alan Cox : Removed incorrect check for 20 * psh 60 * Michael O'Reilly : ack < copied bug fix. 61 * Johannes Stille : Misc tcp fixes (not all in yet). 62 * Alan Cox : FIN with no memory -> CRASH 63 * Alan Cox : Added socket option proto entries. 64 * Also added awareness of them to accept. 65 * Alan Cox : Added TCP options (SOL_TCP) 66 * Alan Cox : Switched wakeup calls to callbacks, 67 * so the kernel can layer network 68 * sockets. 69 * Alan Cox : Use ip_tos/ip_ttl settings. 70 * Alan Cox : Handle FIN (more) properly (we hope). 71 * Alan Cox : RST frames sent on unsynchronised 72 * state ack error. 73 * Alan Cox : Put in missing check for SYN bit. 74 * Alan Cox : Added tcp_select_window() aka NET2E 75 * window non shrink trick. 76 * Alan Cox : Added a couple of small NET2E timer 77 * fixes 78 * Charles Hedrick : TCP fixes 79 * Toomas Tamm : TCP window fixes 80 * Alan Cox : Small URG fix to rlogin ^C ack fight 81 * Charles Hedrick : Rewrote most of it to actually work 82 * Linus : Rewrote tcp_read() and URG handling 83 * completely 84 * Gerhard Koerting: Fixed some missing timer handling 85 * Matthew Dillon : Reworked TCP machine states as per RFC 86 * Gerhard Koerting: PC/TCP workarounds 87 * Adam Caldwell : Assorted timer/timing errors 88 * Matthew Dillon : Fixed another RST bug 89 * Alan Cox : Move to kernel side addressing changes. 90 * Alan Cox : Beginning work on TCP fastpathing 91 * (not yet usable) 92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 93 * Alan Cox : TCP fast path debugging 94 * Alan Cox : Window clamping 95 * Michael Riepe : Bug in tcp_check() 96 * Matt Dillon : More TCP improvements and RST bug fixes 97 * Matt Dillon : Yet more small nasties remove from the 98 * TCP code (Be very nice to this man if 99 * tcp finally works 100%) 8) 100 * Alan Cox : BSD accept semantics. 101 * Alan Cox : Reset on closedown bug. 102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 103 * Michael Pall : Handle poll() after URG properly in 104 * all cases. 105 * Michael Pall : Undo the last fix in tcp_read_urg() 106 * (multi URG PUSH broke rlogin). 107 * Michael Pall : Fix the multi URG PUSH problem in 108 * tcp_readable(), poll() after URG 109 * works now. 110 * Michael Pall : recv(...,MSG_OOB) never blocks in the 111 * BSD api. 112 * Alan Cox : Changed the semantics of sk->socket to 113 * fix a race and a signal problem with 114 * accept() and async I/O. 115 * Alan Cox : Relaxed the rules on tcp_sendto(). 116 * Yury Shevchuk : Really fixed accept() blocking problem. 117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 118 * clients/servers which listen in on 119 * fixed ports. 120 * Alan Cox : Cleaned the above up and shrank it to 121 * a sensible code size. 122 * Alan Cox : Self connect lockup fix. 123 * Alan Cox : No connect to multicast. 124 * Ross Biro : Close unaccepted children on master 125 * socket close. 126 * Alan Cox : Reset tracing code. 127 * Alan Cox : Spurious resets on shutdown. 128 * Alan Cox : Giant 15 minute/60 second timer error 129 * Alan Cox : Small whoops in polling before an 130 * accept. 131 * Alan Cox : Kept the state trace facility since 132 * it's handy for debugging. 133 * Alan Cox : More reset handler fixes. 134 * Alan Cox : Started rewriting the code based on 135 * the RFC's for other useful protocol 136 * references see: Comer, KA9Q NOS, and 137 * for a reference on the difference 138 * between specifications and how BSD 139 * works see the 4.4lite source. 140 * A.N.Kuznetsov : Don't time wait on completion of tidy 141 * close. 142 * Linus Torvalds : Fin/Shutdown & copied_seq changes. 143 * Linus Torvalds : Fixed BSD port reuse to work first syn 144 * Alan Cox : Reimplemented timers as per the RFC 145 * and using multiple timers for sanity. 146 * Alan Cox : Small bug fixes, and a lot of new 147 * comments. 148 * Alan Cox : Fixed dual reader crash by locking 149 * the buffers (much like datagram.c) 150 * Alan Cox : Fixed stuck sockets in probe. A probe 151 * now gets fed up of retrying without 152 * (even a no space) answer. 153 * Alan Cox : Extracted closing code better 154 * Alan Cox : Fixed the closing state machine to 155 * resemble the RFC. 156 * Alan Cox : More 'per spec' fixes. 157 * Jorge Cwik : Even faster checksumming. 158 * Alan Cox : tcp_data() doesn't ack illegal PSH 159 * only frames. At least one pc tcp stack 160 * generates them. 161 * Alan Cox : Cache last socket. 162 * Alan Cox : Per route irtt. 163 * Matt Day : poll()->select() match BSD precisely on error 164 * Alan Cox : New buffers 165 * Marc Tamsky : Various sk->prot->retransmits and 166 * sk->retransmits misupdating fixed. 167 * Fixed tcp_write_timeout: stuck close, 168 * and TCP syn retries gets used now. 169 * Mark Yarvis : In tcp_read_wakeup(), don't send an 170 * ack if state is TCP_CLOSED. 171 * Alan Cox : Look up device on a retransmit - routes may 172 * change. Doesn't yet cope with MSS shrink right 173 * but it's a start! 174 * Marc Tamsky : Closing in closing fixes. 175 * Mike Shaver : RFC1122 verifications. 176 * Alan Cox : rcv_saddr errors. 177 * Alan Cox : Block double connect(). 178 * Alan Cox : Small hooks for enSKIP. 179 * Alexey Kuznetsov: Path MTU discovery. 180 * Alan Cox : Support soft errors. 181 * Alan Cox : Fix MTU discovery pathological case 182 * when the remote claims no mtu! 183 * Marc Tamsky : TCP_CLOSE fix. 184 * Colin (G3TNE) : Send a reset on syn ack replies in 185 * window but wrong (fixes NT lpd problems) 186 * Pedro Roque : Better TCP window handling, delayed ack. 187 * Joerg Reuter : No modification of locked buffers in 188 * tcp_do_retransmit() 189 * Eric Schenk : Changed receiver side silly window 190 * avoidance algorithm to BSD style 191 * algorithm. This doubles throughput 192 * against machines running Solaris, 193 * and seems to result in general 194 * improvement. 195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 196 * Willy Konynenberg : Transparent proxying support. 197 * Mike McLagan : Routing by source 198 * Keith Owens : Do proper merging with partial SKB's in 199 * tcp_do_sendmsg to avoid burstiness. 200 * Eric Schenk : Fix fast close down bug with 201 * shutdown() followed by close(). 202 * Andi Kleen : Make poll agree with SIGIO 203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 204 * lingertime == 0 (RFC 793 ABORT Call) 205 * Hirokazu Takahashi : Use copy_from_user() instead of 206 * csum_and_copy_from_user() if possible. 207 * 208 * This program is free software; you can redistribute it and/or 209 * modify it under the terms of the GNU General Public License 210 * as published by the Free Software Foundation; either version 211 * 2 of the License, or(at your option) any later version. 212 * 213 * Description of States: 214 * 215 * TCP_SYN_SENT sent a connection request, waiting for ack 216 * 217 * TCP_SYN_RECV received a connection request, sent ack, 218 * waiting for final ack in three-way handshake. 219 * 220 * TCP_ESTABLISHED connection established 221 * 222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 223 * transmission of remaining buffered data 224 * 225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 226 * to shutdown 227 * 228 * TCP_CLOSING both sides have shutdown but we still have 229 * data we have to finish sending 230 * 231 * TCP_TIME_WAIT timeout to catch resent junk before entering 232 * closed, can only be entered from FIN_WAIT2 233 * or CLOSING. Required because the other end 234 * may not have gotten our last ACK causing it 235 * to retransmit the data packet (which we ignore) 236 * 237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 238 * us to finish writing our data and to shutdown 239 * (we have to close() to move on to LAST_ACK) 240 * 241 * TCP_LAST_ACK out side has shutdown after remote has 242 * shutdown. There may still be data in our 243 * buffer that we have to finish sending 244 * 245 * TCP_CLOSE socket is finished 246 */ 247 248 #define pr_fmt(fmt) "TCP: " fmt 249 250 #include <crypto/hash.h> 251 #include <linux/kernel.h> 252 #include <linux/module.h> 253 #include <linux/types.h> 254 #include <linux/fcntl.h> 255 #include <linux/poll.h> 256 #include <linux/inet_diag.h> 257 #include <linux/init.h> 258 #include <linux/fs.h> 259 #include <linux/skbuff.h> 260 #include <linux/scatterlist.h> 261 #include <linux/splice.h> 262 #include <linux/net.h> 263 #include <linux/socket.h> 264 #include <linux/random.h> 265 #include <linux/bootmem.h> 266 #include <linux/highmem.h> 267 #include <linux/swap.h> 268 #include <linux/cache.h> 269 #include <linux/err.h> 270 #include <linux/time.h> 271 #include <linux/slab.h> 272 273 #include <net/icmp.h> 274 #include <net/inet_common.h> 275 #include <net/tcp.h> 276 #include <net/xfrm.h> 277 #include <net/ip.h> 278 #include <net/sock.h> 279 280 #include <linux/uaccess.h> 281 #include <asm/ioctls.h> 282 #include <net/busy_poll.h> 283 284 int sysctl_tcp_min_tso_segs __read_mostly = 2; 285 286 int sysctl_tcp_autocorking __read_mostly = 1; 287 288 struct percpu_counter tcp_orphan_count; 289 EXPORT_SYMBOL_GPL(tcp_orphan_count); 290 291 long sysctl_tcp_mem[3] __read_mostly; 292 int sysctl_tcp_wmem[3] __read_mostly; 293 int sysctl_tcp_rmem[3] __read_mostly; 294 295 EXPORT_SYMBOL(sysctl_tcp_mem); 296 EXPORT_SYMBOL(sysctl_tcp_rmem); 297 EXPORT_SYMBOL(sysctl_tcp_wmem); 298 299 atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ 300 EXPORT_SYMBOL(tcp_memory_allocated); 301 302 /* 303 * Current number of TCP sockets. 304 */ 305 struct percpu_counter tcp_sockets_allocated; 306 EXPORT_SYMBOL(tcp_sockets_allocated); 307 308 /* 309 * TCP splice context 310 */ 311 struct tcp_splice_state { 312 struct pipe_inode_info *pipe; 313 size_t len; 314 unsigned int flags; 315 }; 316 317 /* 318 * Pressure flag: try to collapse. 319 * Technical note: it is used by multiple contexts non atomically. 320 * All the __sk_mem_schedule() is of this nature: accounting 321 * is strict, actions are advisory and have some latency. 322 */ 323 int tcp_memory_pressure __read_mostly; 324 EXPORT_SYMBOL(tcp_memory_pressure); 325 326 void tcp_enter_memory_pressure(struct sock *sk) 327 { 328 if (!tcp_memory_pressure) { 329 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 330 tcp_memory_pressure = 1; 331 } 332 } 333 EXPORT_SYMBOL(tcp_enter_memory_pressure); 334 335 /* Convert seconds to retransmits based on initial and max timeout */ 336 static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 337 { 338 u8 res = 0; 339 340 if (seconds > 0) { 341 int period = timeout; 342 343 res = 1; 344 while (seconds > period && res < 255) { 345 res++; 346 timeout <<= 1; 347 if (timeout > rto_max) 348 timeout = rto_max; 349 period += timeout; 350 } 351 } 352 return res; 353 } 354 355 /* Convert retransmits to seconds based on initial and max timeout */ 356 static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 357 { 358 int period = 0; 359 360 if (retrans > 0) { 361 period = timeout; 362 while (--retrans) { 363 timeout <<= 1; 364 if (timeout > rto_max) 365 timeout = rto_max; 366 period += timeout; 367 } 368 } 369 return period; 370 } 371 372 /* Address-family independent initialization for a tcp_sock. 373 * 374 * NOTE: A lot of things set to zero explicitly by call to 375 * sk_alloc() so need not be done here. 376 */ 377 void tcp_init_sock(struct sock *sk) 378 { 379 struct inet_connection_sock *icsk = inet_csk(sk); 380 struct tcp_sock *tp = tcp_sk(sk); 381 382 tp->out_of_order_queue = RB_ROOT; 383 tcp_init_xmit_timers(sk); 384 tcp_prequeue_init(tp); 385 INIT_LIST_HEAD(&tp->tsq_node); 386 387 icsk->icsk_rto = TCP_TIMEOUT_INIT; 388 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 389 minmax_reset(&tp->rtt_min, tcp_time_stamp, ~0U); 390 391 /* So many TCP implementations out there (incorrectly) count the 392 * initial SYN frame in their delayed-ACK and congestion control 393 * algorithms that we must have the following bandaid to talk 394 * efficiently to them. -DaveM 395 */ 396 tp->snd_cwnd = TCP_INIT_CWND; 397 398 /* There's a bubble in the pipe until at least the first ACK. */ 399 tp->app_limited = ~0U; 400 401 /* See draft-stevens-tcpca-spec-01 for discussion of the 402 * initialization of these values. 403 */ 404 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 405 tp->snd_cwnd_clamp = ~0; 406 tp->mss_cache = TCP_MSS_DEFAULT; 407 408 tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; 409 tcp_assign_congestion_control(sk); 410 411 tp->tsoffset = 0; 412 413 sk->sk_state = TCP_CLOSE; 414 415 sk->sk_write_space = sk_stream_write_space; 416 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 417 418 icsk->icsk_sync_mss = tcp_sync_mss; 419 420 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 421 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 422 423 sk_sockets_allocated_inc(sk); 424 } 425 EXPORT_SYMBOL(tcp_init_sock); 426 427 static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb) 428 { 429 if (tsflags && skb) { 430 struct skb_shared_info *shinfo = skb_shinfo(skb); 431 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 432 433 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); 434 if (tsflags & SOF_TIMESTAMPING_TX_ACK) 435 tcb->txstamp_ack = 1; 436 if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 437 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; 438 } 439 } 440 441 /* 442 * Wait for a TCP event. 443 * 444 * Note that we don't need to lock the socket, as the upper poll layers 445 * take care of normal races (between the test and the event) and we don't 446 * go look at any of the socket buffers directly. 447 */ 448 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 449 { 450 unsigned int mask; 451 struct sock *sk = sock->sk; 452 const struct tcp_sock *tp = tcp_sk(sk); 453 int state; 454 455 sock_rps_record_flow(sk); 456 457 sock_poll_wait(file, sk_sleep(sk), wait); 458 459 state = sk_state_load(sk); 460 if (state == TCP_LISTEN) 461 return inet_csk_listen_poll(sk); 462 463 /* Socket is not locked. We are protected from async events 464 * by poll logic and correct handling of state changes 465 * made by other threads is impossible in any case. 466 */ 467 468 mask = 0; 469 470 /* 471 * POLLHUP is certainly not done right. But poll() doesn't 472 * have a notion of HUP in just one direction, and for a 473 * socket the read side is more interesting. 474 * 475 * Some poll() documentation says that POLLHUP is incompatible 476 * with the POLLOUT/POLLWR flags, so somebody should check this 477 * all. But careful, it tends to be safer to return too many 478 * bits than too few, and you can easily break real applications 479 * if you don't tell them that something has hung up! 480 * 481 * Check-me. 482 * 483 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and 484 * our fs/select.c). It means that after we received EOF, 485 * poll always returns immediately, making impossible poll() on write() 486 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP 487 * if and only if shutdown has been made in both directions. 488 * Actually, it is interesting to look how Solaris and DUX 489 * solve this dilemma. I would prefer, if POLLHUP were maskable, 490 * then we could set it on SND_SHUTDOWN. BTW examples given 491 * in Stevens' books assume exactly this behaviour, it explains 492 * why POLLHUP is incompatible with POLLOUT. --ANK 493 * 494 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 495 * blocking on fresh not-connected or disconnected socket. --ANK 496 */ 497 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 498 mask |= POLLHUP; 499 if (sk->sk_shutdown & RCV_SHUTDOWN) 500 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 501 502 /* Connected or passive Fast Open socket? */ 503 if (state != TCP_SYN_SENT && 504 (state != TCP_SYN_RECV || tp->fastopen_rsk)) { 505 int target = sock_rcvlowat(sk, 0, INT_MAX); 506 507 if (tp->urg_seq == tp->copied_seq && 508 !sock_flag(sk, SOCK_URGINLINE) && 509 tp->urg_data) 510 target++; 511 512 if (tp->rcv_nxt - tp->copied_seq >= target) 513 mask |= POLLIN | POLLRDNORM; 514 515 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 516 if (sk_stream_is_writeable(sk)) { 517 mask |= POLLOUT | POLLWRNORM; 518 } else { /* send SIGIO later */ 519 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 520 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 521 522 /* Race breaker. If space is freed after 523 * wspace test but before the flags are set, 524 * IO signal will be lost. Memory barrier 525 * pairs with the input side. 526 */ 527 smp_mb__after_atomic(); 528 if (sk_stream_is_writeable(sk)) 529 mask |= POLLOUT | POLLWRNORM; 530 } 531 } else 532 mask |= POLLOUT | POLLWRNORM; 533 534 if (tp->urg_data & TCP_URG_VALID) 535 mask |= POLLPRI; 536 } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { 537 /* Active TCP fastopen socket with defer_connect 538 * Return POLLOUT so application can call write() 539 * in order for kernel to generate SYN+data 540 */ 541 mask |= POLLOUT | POLLWRNORM; 542 } 543 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 544 smp_rmb(); 545 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 546 mask |= POLLERR; 547 548 return mask; 549 } 550 EXPORT_SYMBOL(tcp_poll); 551 552 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) 553 { 554 struct tcp_sock *tp = tcp_sk(sk); 555 int answ; 556 bool slow; 557 558 switch (cmd) { 559 case SIOCINQ: 560 if (sk->sk_state == TCP_LISTEN) 561 return -EINVAL; 562 563 slow = lock_sock_fast(sk); 564 answ = tcp_inq(sk); 565 unlock_sock_fast(sk, slow); 566 break; 567 case SIOCATMARK: 568 answ = tp->urg_data && tp->urg_seq == tp->copied_seq; 569 break; 570 case SIOCOUTQ: 571 if (sk->sk_state == TCP_LISTEN) 572 return -EINVAL; 573 574 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 575 answ = 0; 576 else 577 answ = tp->write_seq - tp->snd_una; 578 break; 579 case SIOCOUTQNSD: 580 if (sk->sk_state == TCP_LISTEN) 581 return -EINVAL; 582 583 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 584 answ = 0; 585 else 586 answ = tp->write_seq - tp->snd_nxt; 587 break; 588 default: 589 return -ENOIOCTLCMD; 590 } 591 592 return put_user(answ, (int __user *)arg); 593 } 594 EXPORT_SYMBOL(tcp_ioctl); 595 596 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 597 { 598 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 599 tp->pushed_seq = tp->write_seq; 600 } 601 602 static inline bool forced_push(const struct tcp_sock *tp) 603 { 604 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 605 } 606 607 static void skb_entail(struct sock *sk, struct sk_buff *skb) 608 { 609 struct tcp_sock *tp = tcp_sk(sk); 610 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 611 612 skb->csum = 0; 613 tcb->seq = tcb->end_seq = tp->write_seq; 614 tcb->tcp_flags = TCPHDR_ACK; 615 tcb->sacked = 0; 616 __skb_header_release(skb); 617 tcp_add_write_queue_tail(sk, skb); 618 sk->sk_wmem_queued += skb->truesize; 619 sk_mem_charge(sk, skb->truesize); 620 if (tp->nonagle & TCP_NAGLE_PUSH) 621 tp->nonagle &= ~TCP_NAGLE_PUSH; 622 623 tcp_slow_start_after_idle_check(sk); 624 } 625 626 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 627 { 628 if (flags & MSG_OOB) 629 tp->snd_up = tp->write_seq; 630 } 631 632 /* If a not yet filled skb is pushed, do not send it if 633 * we have data packets in Qdisc or NIC queues : 634 * Because TX completion will happen shortly, it gives a chance 635 * to coalesce future sendmsg() payload into this skb, without 636 * need for a timer, and with no latency trade off. 637 * As packets containing data payload have a bigger truesize 638 * than pure acks (dataless) packets, the last checks prevent 639 * autocorking if we only have an ACK in Qdisc/NIC queues, 640 * or if TX completion was delayed after we processed ACK packet. 641 */ 642 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, 643 int size_goal) 644 { 645 return skb->len < size_goal && 646 sysctl_tcp_autocorking && 647 skb != tcp_write_queue_head(sk) && 648 atomic_read(&sk->sk_wmem_alloc) > skb->truesize; 649 } 650 651 static void tcp_push(struct sock *sk, int flags, int mss_now, 652 int nonagle, int size_goal) 653 { 654 struct tcp_sock *tp = tcp_sk(sk); 655 struct sk_buff *skb; 656 657 if (!tcp_send_head(sk)) 658 return; 659 660 skb = tcp_write_queue_tail(sk); 661 if (!(flags & MSG_MORE) || forced_push(tp)) 662 tcp_mark_push(tp, skb); 663 664 tcp_mark_urg(tp, flags); 665 666 if (tcp_should_autocork(sk, skb, size_goal)) { 667 668 /* avoid atomic op if TSQ_THROTTLED bit is already set */ 669 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 670 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 671 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 672 } 673 /* It is possible TX completion already happened 674 * before we set TSQ_THROTTLED. 675 */ 676 if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize) 677 return; 678 } 679 680 if (flags & MSG_MORE) 681 nonagle = TCP_NAGLE_CORK; 682 683 __tcp_push_pending_frames(sk, mss_now, nonagle); 684 } 685 686 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 687 unsigned int offset, size_t len) 688 { 689 struct tcp_splice_state *tss = rd_desc->arg.data; 690 int ret; 691 692 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, 693 min(rd_desc->count, len), tss->flags); 694 if (ret > 0) 695 rd_desc->count -= ret; 696 return ret; 697 } 698 699 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 700 { 701 /* Store TCP splice context information in read_descriptor_t. */ 702 read_descriptor_t rd_desc = { 703 .arg.data = tss, 704 .count = tss->len, 705 }; 706 707 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 708 } 709 710 /** 711 * tcp_splice_read - splice data from TCP socket to a pipe 712 * @sock: socket to splice from 713 * @ppos: position (not valid) 714 * @pipe: pipe to splice to 715 * @len: number of bytes to splice 716 * @flags: splice modifier flags 717 * 718 * Description: 719 * Will read pages from given socket and fill them into a pipe. 720 * 721 **/ 722 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 723 struct pipe_inode_info *pipe, size_t len, 724 unsigned int flags) 725 { 726 struct sock *sk = sock->sk; 727 struct tcp_splice_state tss = { 728 .pipe = pipe, 729 .len = len, 730 .flags = flags, 731 }; 732 long timeo; 733 ssize_t spliced; 734 int ret; 735 736 sock_rps_record_flow(sk); 737 /* 738 * We can't seek on a socket input 739 */ 740 if (unlikely(*ppos)) 741 return -ESPIPE; 742 743 ret = spliced = 0; 744 745 lock_sock(sk); 746 747 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 748 while (tss.len) { 749 ret = __tcp_splice_read(sk, &tss); 750 if (ret < 0) 751 break; 752 else if (!ret) { 753 if (spliced) 754 break; 755 if (sock_flag(sk, SOCK_DONE)) 756 break; 757 if (sk->sk_err) { 758 ret = sock_error(sk); 759 break; 760 } 761 if (sk->sk_shutdown & RCV_SHUTDOWN) 762 break; 763 if (sk->sk_state == TCP_CLOSE) { 764 /* 765 * This occurs when user tries to read 766 * from never connected socket. 767 */ 768 if (!sock_flag(sk, SOCK_DONE)) 769 ret = -ENOTCONN; 770 break; 771 } 772 if (!timeo) { 773 ret = -EAGAIN; 774 break; 775 } 776 /* if __tcp_splice_read() got nothing while we have 777 * an skb in receive queue, we do not want to loop. 778 * This might happen with URG data. 779 */ 780 if (!skb_queue_empty(&sk->sk_receive_queue)) 781 break; 782 sk_wait_data(sk, &timeo, NULL); 783 if (signal_pending(current)) { 784 ret = sock_intr_errno(timeo); 785 break; 786 } 787 continue; 788 } 789 tss.len -= ret; 790 spliced += ret; 791 792 if (!timeo) 793 break; 794 release_sock(sk); 795 lock_sock(sk); 796 797 if (sk->sk_err || sk->sk_state == TCP_CLOSE || 798 (sk->sk_shutdown & RCV_SHUTDOWN) || 799 signal_pending(current)) 800 break; 801 } 802 803 release_sock(sk); 804 805 if (spliced) 806 return spliced; 807 808 return ret; 809 } 810 EXPORT_SYMBOL(tcp_splice_read); 811 812 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, 813 bool force_schedule) 814 { 815 struct sk_buff *skb; 816 817 /* The TCP header must be at least 32-bit aligned. */ 818 size = ALIGN(size, 4); 819 820 if (unlikely(tcp_under_memory_pressure(sk))) 821 sk_mem_reclaim_partial(sk); 822 823 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); 824 if (likely(skb)) { 825 bool mem_scheduled; 826 827 if (force_schedule) { 828 mem_scheduled = true; 829 sk_forced_mem_schedule(sk, skb->truesize); 830 } else { 831 mem_scheduled = sk_wmem_schedule(sk, skb->truesize); 832 } 833 if (likely(mem_scheduled)) { 834 skb_reserve(skb, sk->sk_prot->max_header); 835 /* 836 * Make sure that we have exactly size bytes 837 * available to the caller, no more, no less. 838 */ 839 skb->reserved_tailroom = skb->end - skb->tail - size; 840 return skb; 841 } 842 __kfree_skb(skb); 843 } else { 844 sk->sk_prot->enter_memory_pressure(sk); 845 sk_stream_moderate_sndbuf(sk); 846 } 847 return NULL; 848 } 849 850 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 851 int large_allowed) 852 { 853 struct tcp_sock *tp = tcp_sk(sk); 854 u32 new_size_goal, size_goal; 855 856 if (!large_allowed || !sk_can_gso(sk)) 857 return mss_now; 858 859 /* Note : tcp_tso_autosize() will eventually split this later */ 860 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; 861 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); 862 863 /* We try hard to avoid divides here */ 864 size_goal = tp->gso_segs * mss_now; 865 if (unlikely(new_size_goal < size_goal || 866 new_size_goal >= size_goal + mss_now)) { 867 tp->gso_segs = min_t(u16, new_size_goal / mss_now, 868 sk->sk_gso_max_segs); 869 size_goal = tp->gso_segs * mss_now; 870 } 871 872 return max(size_goal, mss_now); 873 } 874 875 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 876 { 877 int mss_now; 878 879 mss_now = tcp_current_mss(sk); 880 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 881 882 return mss_now; 883 } 884 885 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 886 size_t size, int flags) 887 { 888 struct tcp_sock *tp = tcp_sk(sk); 889 int mss_now, size_goal; 890 int err; 891 ssize_t copied; 892 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 893 894 /* Wait for a connection to finish. One exception is TCP Fast Open 895 * (passive side) where data is allowed to be sent before a connection 896 * is fully established. 897 */ 898 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 899 !tcp_passive_fastopen(sk)) { 900 err = sk_stream_wait_connect(sk, &timeo); 901 if (err != 0) 902 goto out_err; 903 } 904 905 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 906 907 mss_now = tcp_send_mss(sk, &size_goal, flags); 908 copied = 0; 909 910 err = -EPIPE; 911 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 912 goto out_err; 913 914 while (size > 0) { 915 struct sk_buff *skb = tcp_write_queue_tail(sk); 916 int copy, i; 917 bool can_coalesce; 918 919 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0 || 920 !tcp_skb_can_collapse_to(skb)) { 921 new_segment: 922 if (!sk_stream_memory_free(sk)) 923 goto wait_for_sndbuf; 924 925 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, 926 skb_queue_empty(&sk->sk_write_queue)); 927 if (!skb) 928 goto wait_for_memory; 929 930 skb_entail(sk, skb); 931 copy = size_goal; 932 } 933 934 if (copy > size) 935 copy = size; 936 937 i = skb_shinfo(skb)->nr_frags; 938 can_coalesce = skb_can_coalesce(skb, i, page, offset); 939 if (!can_coalesce && i >= sysctl_max_skb_frags) { 940 tcp_mark_push(tp, skb); 941 goto new_segment; 942 } 943 if (!sk_wmem_schedule(sk, copy)) 944 goto wait_for_memory; 945 946 if (can_coalesce) { 947 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 948 } else { 949 get_page(page); 950 skb_fill_page_desc(skb, i, page, offset, copy); 951 } 952 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 953 954 skb->len += copy; 955 skb->data_len += copy; 956 skb->truesize += copy; 957 sk->sk_wmem_queued += copy; 958 sk_mem_charge(sk, copy); 959 skb->ip_summed = CHECKSUM_PARTIAL; 960 tp->write_seq += copy; 961 TCP_SKB_CB(skb)->end_seq += copy; 962 tcp_skb_pcount_set(skb, 0); 963 964 if (!copied) 965 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 966 967 copied += copy; 968 offset += copy; 969 size -= copy; 970 if (!size) 971 goto out; 972 973 if (skb->len < size_goal || (flags & MSG_OOB)) 974 continue; 975 976 if (forced_push(tp)) { 977 tcp_mark_push(tp, skb); 978 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 979 } else if (skb == tcp_send_head(sk)) 980 tcp_push_one(sk, mss_now); 981 continue; 982 983 wait_for_sndbuf: 984 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 985 wait_for_memory: 986 tcp_push(sk, flags & ~MSG_MORE, mss_now, 987 TCP_NAGLE_PUSH, size_goal); 988 989 err = sk_stream_wait_memory(sk, &timeo); 990 if (err != 0) 991 goto do_error; 992 993 mss_now = tcp_send_mss(sk, &size_goal, flags); 994 } 995 996 out: 997 if (copied) { 998 tcp_tx_timestamp(sk, sk->sk_tsflags, tcp_write_queue_tail(sk)); 999 if (!(flags & MSG_SENDPAGE_NOTLAST)) 1000 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1001 } 1002 return copied; 1003 1004 do_error: 1005 if (copied) 1006 goto out; 1007 out_err: 1008 /* make sure we wake any epoll edge trigger waiter */ 1009 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && 1010 err == -EAGAIN)) { 1011 sk->sk_write_space(sk); 1012 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1013 } 1014 return sk_stream_error(sk, flags, err); 1015 } 1016 1017 int tcp_sendpage(struct sock *sk, struct page *page, int offset, 1018 size_t size, int flags) 1019 { 1020 ssize_t res; 1021 1022 if (!(sk->sk_route_caps & NETIF_F_SG) || 1023 !sk_check_csum_caps(sk)) 1024 return sock_no_sendpage(sk->sk_socket, page, offset, size, 1025 flags); 1026 1027 lock_sock(sk); 1028 1029 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1030 1031 res = do_tcp_sendpages(sk, page, offset, size, flags); 1032 release_sock(sk); 1033 return res; 1034 } 1035 EXPORT_SYMBOL(tcp_sendpage); 1036 1037 /* Do not bother using a page frag for very small frames. 1038 * But use this heuristic only for the first skb in write queue. 1039 * 1040 * Having no payload in skb->head allows better SACK shifting 1041 * in tcp_shift_skb_data(), reducing sack/rack overhead, because 1042 * write queue has less skbs. 1043 * Each skb can hold up to MAX_SKB_FRAGS * 32Kbytes, or ~0.5 MB. 1044 * This also speeds up tso_fragment(), since it wont fallback 1045 * to tcp_fragment(). 1046 */ 1047 static int linear_payload_sz(bool first_skb) 1048 { 1049 if (first_skb) 1050 return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER); 1051 return 0; 1052 } 1053 1054 static int select_size(const struct sock *sk, bool sg, bool first_skb) 1055 { 1056 const struct tcp_sock *tp = tcp_sk(sk); 1057 int tmp = tp->mss_cache; 1058 1059 if (sg) { 1060 if (sk_can_gso(sk)) { 1061 tmp = linear_payload_sz(first_skb); 1062 } else { 1063 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); 1064 1065 if (tmp >= pgbreak && 1066 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) 1067 tmp = pgbreak; 1068 } 1069 } 1070 1071 return tmp; 1072 } 1073 1074 void tcp_free_fastopen_req(struct tcp_sock *tp) 1075 { 1076 if (tp->fastopen_req) { 1077 kfree(tp->fastopen_req); 1078 tp->fastopen_req = NULL; 1079 } 1080 } 1081 1082 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, 1083 int *copied, size_t size) 1084 { 1085 struct tcp_sock *tp = tcp_sk(sk); 1086 struct inet_sock *inet = inet_sk(sk); 1087 int err, flags; 1088 1089 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) 1090 return -EOPNOTSUPP; 1091 if (tp->fastopen_req) 1092 return -EALREADY; /* Another Fast Open is in progress */ 1093 1094 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1095 sk->sk_allocation); 1096 if (unlikely(!tp->fastopen_req)) 1097 return -ENOBUFS; 1098 tp->fastopen_req->data = msg; 1099 tp->fastopen_req->size = size; 1100 1101 if (inet->defer_connect) { 1102 err = tcp_connect(sk); 1103 /* Same failure procedure as in tcp_v4/6_connect */ 1104 if (err) { 1105 tcp_set_state(sk, TCP_CLOSE); 1106 inet->inet_dport = 0; 1107 sk->sk_route_caps = 0; 1108 } 1109 } 1110 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1111 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1112 msg->msg_namelen, flags, 1); 1113 /* fastopen_req could already be freed in __inet_stream_connect 1114 * if the connection times out or gets rst 1115 */ 1116 if (tp->fastopen_req) { 1117 *copied = tp->fastopen_req->copied; 1118 tcp_free_fastopen_req(tp); 1119 inet->defer_connect = 0; 1120 } 1121 return err; 1122 } 1123 1124 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1125 { 1126 struct tcp_sock *tp = tcp_sk(sk); 1127 struct sk_buff *skb; 1128 struct sockcm_cookie sockc; 1129 int flags, err, copied = 0; 1130 int mss_now = 0, size_goal, copied_syn = 0; 1131 bool process_backlog = false; 1132 bool sg; 1133 long timeo; 1134 1135 lock_sock(sk); 1136 1137 flags = msg->msg_flags; 1138 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { 1139 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); 1140 if (err == -EINPROGRESS && copied_syn > 0) 1141 goto out; 1142 else if (err) 1143 goto out_err; 1144 } 1145 1146 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1147 1148 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1149 1150 /* Wait for a connection to finish. One exception is TCP Fast Open 1151 * (passive side) where data is allowed to be sent before a connection 1152 * is fully established. 1153 */ 1154 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 1155 !tcp_passive_fastopen(sk)) { 1156 err = sk_stream_wait_connect(sk, &timeo); 1157 if (err != 0) 1158 goto do_error; 1159 } 1160 1161 if (unlikely(tp->repair)) { 1162 if (tp->repair_queue == TCP_RECV_QUEUE) { 1163 copied = tcp_send_rcvq(sk, msg, size); 1164 goto out_nopush; 1165 } 1166 1167 err = -EINVAL; 1168 if (tp->repair_queue == TCP_NO_QUEUE) 1169 goto out_err; 1170 1171 /* 'common' sending to sendq */ 1172 } 1173 1174 sockc.tsflags = sk->sk_tsflags; 1175 if (msg->msg_controllen) { 1176 err = sock_cmsg_send(sk, msg, &sockc); 1177 if (unlikely(err)) { 1178 err = -EINVAL; 1179 goto out_err; 1180 } 1181 } 1182 1183 /* This should be in poll */ 1184 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1185 1186 /* Ok commence sending. */ 1187 copied = 0; 1188 1189 restart: 1190 mss_now = tcp_send_mss(sk, &size_goal, flags); 1191 1192 err = -EPIPE; 1193 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1194 goto do_error; 1195 1196 sg = !!(sk->sk_route_caps & NETIF_F_SG); 1197 1198 while (msg_data_left(msg)) { 1199 int copy = 0; 1200 int max = size_goal; 1201 1202 skb = tcp_write_queue_tail(sk); 1203 if (tcp_send_head(sk)) { 1204 if (skb->ip_summed == CHECKSUM_NONE) 1205 max = mss_now; 1206 copy = max - skb->len; 1207 } 1208 1209 if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { 1210 bool first_skb; 1211 1212 new_segment: 1213 /* Allocate new segment. If the interface is SG, 1214 * allocate skb fitting to single page. 1215 */ 1216 if (!sk_stream_memory_free(sk)) 1217 goto wait_for_sndbuf; 1218 1219 if (process_backlog && sk_flush_backlog(sk)) { 1220 process_backlog = false; 1221 goto restart; 1222 } 1223 first_skb = skb_queue_empty(&sk->sk_write_queue); 1224 skb = sk_stream_alloc_skb(sk, 1225 select_size(sk, sg, first_skb), 1226 sk->sk_allocation, 1227 first_skb); 1228 if (!skb) 1229 goto wait_for_memory; 1230 1231 process_backlog = true; 1232 /* 1233 * Check whether we can use HW checksum. 1234 */ 1235 if (sk_check_csum_caps(sk)) 1236 skb->ip_summed = CHECKSUM_PARTIAL; 1237 1238 skb_entail(sk, skb); 1239 copy = size_goal; 1240 max = size_goal; 1241 1242 /* All packets are restored as if they have 1243 * already been sent. skb_mstamp isn't set to 1244 * avoid wrong rtt estimation. 1245 */ 1246 if (tp->repair) 1247 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; 1248 } 1249 1250 /* Try to append data to the end of skb. */ 1251 if (copy > msg_data_left(msg)) 1252 copy = msg_data_left(msg); 1253 1254 /* Where to copy to? */ 1255 if (skb_availroom(skb) > 0) { 1256 /* We have some space in skb head. Superb! */ 1257 copy = min_t(int, copy, skb_availroom(skb)); 1258 err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); 1259 if (err) 1260 goto do_fault; 1261 } else { 1262 bool merge = true; 1263 int i = skb_shinfo(skb)->nr_frags; 1264 struct page_frag *pfrag = sk_page_frag(sk); 1265 1266 if (!sk_page_frag_refill(sk, pfrag)) 1267 goto wait_for_memory; 1268 1269 if (!skb_can_coalesce(skb, i, pfrag->page, 1270 pfrag->offset)) { 1271 if (i >= sysctl_max_skb_frags || !sg) { 1272 tcp_mark_push(tp, skb); 1273 goto new_segment; 1274 } 1275 merge = false; 1276 } 1277 1278 copy = min_t(int, copy, pfrag->size - pfrag->offset); 1279 1280 if (!sk_wmem_schedule(sk, copy)) 1281 goto wait_for_memory; 1282 1283 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, 1284 pfrag->page, 1285 pfrag->offset, 1286 copy); 1287 if (err) 1288 goto do_error; 1289 1290 /* Update the skb. */ 1291 if (merge) { 1292 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1293 } else { 1294 skb_fill_page_desc(skb, i, pfrag->page, 1295 pfrag->offset, copy); 1296 page_ref_inc(pfrag->page); 1297 } 1298 pfrag->offset += copy; 1299 } 1300 1301 if (!copied) 1302 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1303 1304 tp->write_seq += copy; 1305 TCP_SKB_CB(skb)->end_seq += copy; 1306 tcp_skb_pcount_set(skb, 0); 1307 1308 copied += copy; 1309 if (!msg_data_left(msg)) { 1310 if (unlikely(flags & MSG_EOR)) 1311 TCP_SKB_CB(skb)->eor = 1; 1312 goto out; 1313 } 1314 1315 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair)) 1316 continue; 1317 1318 if (forced_push(tp)) { 1319 tcp_mark_push(tp, skb); 1320 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1321 } else if (skb == tcp_send_head(sk)) 1322 tcp_push_one(sk, mss_now); 1323 continue; 1324 1325 wait_for_sndbuf: 1326 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1327 wait_for_memory: 1328 if (copied) 1329 tcp_push(sk, flags & ~MSG_MORE, mss_now, 1330 TCP_NAGLE_PUSH, size_goal); 1331 1332 err = sk_stream_wait_memory(sk, &timeo); 1333 if (err != 0) 1334 goto do_error; 1335 1336 mss_now = tcp_send_mss(sk, &size_goal, flags); 1337 } 1338 1339 out: 1340 if (copied) { 1341 tcp_tx_timestamp(sk, sockc.tsflags, tcp_write_queue_tail(sk)); 1342 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1343 } 1344 out_nopush: 1345 release_sock(sk); 1346 return copied + copied_syn; 1347 1348 do_fault: 1349 if (!skb->len) { 1350 tcp_unlink_write_queue(skb, sk); 1351 /* It is the one place in all of TCP, except connection 1352 * reset, where we can be unlinking the send_head. 1353 */ 1354 tcp_check_send_head(sk, skb); 1355 sk_wmem_free_skb(sk, skb); 1356 } 1357 1358 do_error: 1359 if (copied + copied_syn) 1360 goto out; 1361 out_err: 1362 err = sk_stream_error(sk, flags, err); 1363 /* make sure we wake any epoll edge trigger waiter */ 1364 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && 1365 err == -EAGAIN)) { 1366 sk->sk_write_space(sk); 1367 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1368 } 1369 release_sock(sk); 1370 return err; 1371 } 1372 EXPORT_SYMBOL(tcp_sendmsg); 1373 1374 /* 1375 * Handle reading urgent data. BSD has very simple semantics for 1376 * this, no blocking and very strange errors 8) 1377 */ 1378 1379 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 1380 { 1381 struct tcp_sock *tp = tcp_sk(sk); 1382 1383 /* No URG data to read. */ 1384 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 1385 tp->urg_data == TCP_URG_READ) 1386 return -EINVAL; /* Yes this is right ! */ 1387 1388 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 1389 return -ENOTCONN; 1390 1391 if (tp->urg_data & TCP_URG_VALID) { 1392 int err = 0; 1393 char c = tp->urg_data; 1394 1395 if (!(flags & MSG_PEEK)) 1396 tp->urg_data = TCP_URG_READ; 1397 1398 /* Read urgent data. */ 1399 msg->msg_flags |= MSG_OOB; 1400 1401 if (len > 0) { 1402 if (!(flags & MSG_TRUNC)) 1403 err = memcpy_to_msg(msg, &c, 1); 1404 len = 1; 1405 } else 1406 msg->msg_flags |= MSG_TRUNC; 1407 1408 return err ? -EFAULT : len; 1409 } 1410 1411 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 1412 return 0; 1413 1414 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 1415 * the available implementations agree in this case: 1416 * this call should never block, independent of the 1417 * blocking state of the socket. 1418 * Mike <pall@rz.uni-karlsruhe.de> 1419 */ 1420 return -EAGAIN; 1421 } 1422 1423 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1424 { 1425 struct sk_buff *skb; 1426 int copied = 0, err = 0; 1427 1428 /* XXX -- need to support SO_PEEK_OFF */ 1429 1430 skb_queue_walk(&sk->sk_write_queue, skb) { 1431 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1432 if (err) 1433 break; 1434 1435 copied += skb->len; 1436 } 1437 1438 return err ?: copied; 1439 } 1440 1441 /* Clean up the receive buffer for full frames taken by the user, 1442 * then send an ACK if necessary. COPIED is the number of bytes 1443 * tcp_recvmsg has given to the user so far, it speeds up the 1444 * calculation of whether or not we must ACK for the sake of 1445 * a window update. 1446 */ 1447 static void tcp_cleanup_rbuf(struct sock *sk, int copied) 1448 { 1449 struct tcp_sock *tp = tcp_sk(sk); 1450 bool time_to_ack = false; 1451 1452 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1453 1454 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1455 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1456 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1457 1458 if (inet_csk_ack_scheduled(sk)) { 1459 const struct inet_connection_sock *icsk = inet_csk(sk); 1460 /* Delayed ACKs frequently hit locked sockets during bulk 1461 * receive. */ 1462 if (icsk->icsk_ack.blocked || 1463 /* Once-per-two-segments ACK was not sent by tcp_input.c */ 1464 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 1465 /* 1466 * If this read emptied read buffer, we send ACK, if 1467 * connection is not bidirectional, user drained 1468 * receive buffer and there was a small segment 1469 * in queue. 1470 */ 1471 (copied > 0 && 1472 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 1473 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1474 !icsk->icsk_ack.pingpong)) && 1475 !atomic_read(&sk->sk_rmem_alloc))) 1476 time_to_ack = true; 1477 } 1478 1479 /* We send an ACK if we can now advertise a non-zero window 1480 * which has been raised "significantly". 1481 * 1482 * Even if window raised up to infinity, do not send window open ACK 1483 * in states, where we will not receive more. It is useless. 1484 */ 1485 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1486 __u32 rcv_window_now = tcp_receive_window(tp); 1487 1488 /* Optimize, __tcp_select_window() is not cheap. */ 1489 if (2*rcv_window_now <= tp->window_clamp) { 1490 __u32 new_window = __tcp_select_window(sk); 1491 1492 /* Send ACK now, if this read freed lots of space 1493 * in our buffer. Certainly, new_window is new window. 1494 * We can advertise it now, if it is not less than current one. 1495 * "Lots" means "at least twice" here. 1496 */ 1497 if (new_window && new_window >= 2 * rcv_window_now) 1498 time_to_ack = true; 1499 } 1500 } 1501 if (time_to_ack) 1502 tcp_send_ack(sk); 1503 } 1504 1505 static void tcp_prequeue_process(struct sock *sk) 1506 { 1507 struct sk_buff *skb; 1508 struct tcp_sock *tp = tcp_sk(sk); 1509 1510 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED); 1511 1512 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 1513 sk_backlog_rcv(sk, skb); 1514 1515 /* Clear memory counter. */ 1516 tp->ucopy.memory = 0; 1517 } 1518 1519 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1520 { 1521 struct sk_buff *skb; 1522 u32 offset; 1523 1524 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1525 offset = seq - TCP_SKB_CB(skb)->seq; 1526 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 1527 pr_err_once("%s: found a SYN, please report !\n", __func__); 1528 offset--; 1529 } 1530 if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { 1531 *off = offset; 1532 return skb; 1533 } 1534 /* This looks weird, but this can happen if TCP collapsing 1535 * splitted a fat GRO packet, while we released socket lock 1536 * in skb_splice_bits() 1537 */ 1538 sk_eat_skb(sk, skb); 1539 } 1540 return NULL; 1541 } 1542 1543 /* 1544 * This routine provides an alternative to tcp_recvmsg() for routines 1545 * that would like to handle copying from skbuffs directly in 'sendfile' 1546 * fashion. 1547 * Note: 1548 * - It is assumed that the socket was locked by the caller. 1549 * - The routine does not block. 1550 * - At present, there is no support for reading OOB data 1551 * or for 'peeking' the socket using this routine 1552 * (although both would be easy to implement). 1553 */ 1554 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1555 sk_read_actor_t recv_actor) 1556 { 1557 struct sk_buff *skb; 1558 struct tcp_sock *tp = tcp_sk(sk); 1559 u32 seq = tp->copied_seq; 1560 u32 offset; 1561 int copied = 0; 1562 1563 if (sk->sk_state == TCP_LISTEN) 1564 return -ENOTCONN; 1565 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1566 if (offset < skb->len) { 1567 int used; 1568 size_t len; 1569 1570 len = skb->len - offset; 1571 /* Stop reading if we hit a patch of urgent data */ 1572 if (tp->urg_data) { 1573 u32 urg_offset = tp->urg_seq - seq; 1574 if (urg_offset < len) 1575 len = urg_offset; 1576 if (!len) 1577 break; 1578 } 1579 used = recv_actor(desc, skb, offset, len); 1580 if (used <= 0) { 1581 if (!copied) 1582 copied = used; 1583 break; 1584 } else if (used <= len) { 1585 seq += used; 1586 copied += used; 1587 offset += used; 1588 } 1589 /* If recv_actor drops the lock (e.g. TCP splice 1590 * receive) the skb pointer might be invalid when 1591 * getting here: tcp_collapse might have deleted it 1592 * while aggregating skbs from the socket queue. 1593 */ 1594 skb = tcp_recv_skb(sk, seq - 1, &offset); 1595 if (!skb) 1596 break; 1597 /* TCP coalescing might have appended data to the skb. 1598 * Try to splice more frags 1599 */ 1600 if (offset + 1 != skb->len) 1601 continue; 1602 } 1603 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 1604 sk_eat_skb(sk, skb); 1605 ++seq; 1606 break; 1607 } 1608 sk_eat_skb(sk, skb); 1609 if (!desc->count) 1610 break; 1611 tp->copied_seq = seq; 1612 } 1613 tp->copied_seq = seq; 1614 1615 tcp_rcv_space_adjust(sk); 1616 1617 /* Clean up data we have read: This will do ACK frames. */ 1618 if (copied > 0) { 1619 tcp_recv_skb(sk, seq, &offset); 1620 tcp_cleanup_rbuf(sk, copied); 1621 } 1622 return copied; 1623 } 1624 EXPORT_SYMBOL(tcp_read_sock); 1625 1626 int tcp_peek_len(struct socket *sock) 1627 { 1628 return tcp_inq(sock->sk); 1629 } 1630 EXPORT_SYMBOL(tcp_peek_len); 1631 1632 /* 1633 * This routine copies from a sock struct into the user buffer. 1634 * 1635 * Technical note: in 2.3 we work on _locked_ socket, so that 1636 * tricks with *seq access order and skb->users are not required. 1637 * Probably, code can be easily improved even more. 1638 */ 1639 1640 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, 1641 int flags, int *addr_len) 1642 { 1643 struct tcp_sock *tp = tcp_sk(sk); 1644 int copied = 0; 1645 u32 peek_seq; 1646 u32 *seq; 1647 unsigned long used; 1648 int err; 1649 int target; /* Read at least this many bytes */ 1650 long timeo; 1651 struct task_struct *user_recv = NULL; 1652 struct sk_buff *skb, *last; 1653 u32 urg_hole = 0; 1654 1655 if (unlikely(flags & MSG_ERRQUEUE)) 1656 return inet_recv_error(sk, msg, len, addr_len); 1657 1658 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && 1659 (sk->sk_state == TCP_ESTABLISHED)) 1660 sk_busy_loop(sk, nonblock); 1661 1662 lock_sock(sk); 1663 1664 err = -ENOTCONN; 1665 if (sk->sk_state == TCP_LISTEN) 1666 goto out; 1667 1668 timeo = sock_rcvtimeo(sk, nonblock); 1669 1670 /* Urgent data needs to be handled specially. */ 1671 if (flags & MSG_OOB) 1672 goto recv_urg; 1673 1674 if (unlikely(tp->repair)) { 1675 err = -EPERM; 1676 if (!(flags & MSG_PEEK)) 1677 goto out; 1678 1679 if (tp->repair_queue == TCP_SEND_QUEUE) 1680 goto recv_sndq; 1681 1682 err = -EINVAL; 1683 if (tp->repair_queue == TCP_NO_QUEUE) 1684 goto out; 1685 1686 /* 'common' recv queue MSG_PEEK-ing */ 1687 } 1688 1689 seq = &tp->copied_seq; 1690 if (flags & MSG_PEEK) { 1691 peek_seq = tp->copied_seq; 1692 seq = &peek_seq; 1693 } 1694 1695 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1696 1697 do { 1698 u32 offset; 1699 1700 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 1701 if (tp->urg_data && tp->urg_seq == *seq) { 1702 if (copied) 1703 break; 1704 if (signal_pending(current)) { 1705 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 1706 break; 1707 } 1708 } 1709 1710 /* Next get a buffer. */ 1711 1712 last = skb_peek_tail(&sk->sk_receive_queue); 1713 skb_queue_walk(&sk->sk_receive_queue, skb) { 1714 last = skb; 1715 /* Now that we have two receive queues this 1716 * shouldn't happen. 1717 */ 1718 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 1719 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", 1720 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 1721 flags)) 1722 break; 1723 1724 offset = *seq - TCP_SKB_CB(skb)->seq; 1725 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 1726 pr_err_once("%s: found a SYN, please report !\n", __func__); 1727 offset--; 1728 } 1729 if (offset < skb->len) 1730 goto found_ok_skb; 1731 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1732 goto found_fin_ok; 1733 WARN(!(flags & MSG_PEEK), 1734 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", 1735 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 1736 } 1737 1738 /* Well, if we have backlog, try to process it now yet. */ 1739 1740 if (copied >= target && !sk->sk_backlog.tail) 1741 break; 1742 1743 if (copied) { 1744 if (sk->sk_err || 1745 sk->sk_state == TCP_CLOSE || 1746 (sk->sk_shutdown & RCV_SHUTDOWN) || 1747 !timeo || 1748 signal_pending(current)) 1749 break; 1750 } else { 1751 if (sock_flag(sk, SOCK_DONE)) 1752 break; 1753 1754 if (sk->sk_err) { 1755 copied = sock_error(sk); 1756 break; 1757 } 1758 1759 if (sk->sk_shutdown & RCV_SHUTDOWN) 1760 break; 1761 1762 if (sk->sk_state == TCP_CLOSE) { 1763 if (!sock_flag(sk, SOCK_DONE)) { 1764 /* This occurs when user tries to read 1765 * from never connected socket. 1766 */ 1767 copied = -ENOTCONN; 1768 break; 1769 } 1770 break; 1771 } 1772 1773 if (!timeo) { 1774 copied = -EAGAIN; 1775 break; 1776 } 1777 1778 if (signal_pending(current)) { 1779 copied = sock_intr_errno(timeo); 1780 break; 1781 } 1782 } 1783 1784 tcp_cleanup_rbuf(sk, copied); 1785 1786 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { 1787 /* Install new reader */ 1788 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { 1789 user_recv = current; 1790 tp->ucopy.task = user_recv; 1791 tp->ucopy.msg = msg; 1792 } 1793 1794 tp->ucopy.len = len; 1795 1796 WARN_ON(tp->copied_seq != tp->rcv_nxt && 1797 !(flags & (MSG_PEEK | MSG_TRUNC))); 1798 1799 /* Ugly... If prequeue is not empty, we have to 1800 * process it before releasing socket, otherwise 1801 * order will be broken at second iteration. 1802 * More elegant solution is required!!! 1803 * 1804 * Look: we have the following (pseudo)queues: 1805 * 1806 * 1. packets in flight 1807 * 2. backlog 1808 * 3. prequeue 1809 * 4. receive_queue 1810 * 1811 * Each queue can be processed only if the next ones 1812 * are empty. At this point we have empty receive_queue. 1813 * But prequeue _can_ be not empty after 2nd iteration, 1814 * when we jumped to start of loop because backlog 1815 * processing added something to receive_queue. 1816 * We cannot release_sock(), because backlog contains 1817 * packets arrived _after_ prequeued ones. 1818 * 1819 * Shortly, algorithm is clear --- to process all 1820 * the queues in order. We could make it more directly, 1821 * requeueing packets from backlog to prequeue, if 1822 * is not empty. It is more elegant, but eats cycles, 1823 * unfortunately. 1824 */ 1825 if (!skb_queue_empty(&tp->ucopy.prequeue)) 1826 goto do_prequeue; 1827 1828 /* __ Set realtime policy in scheduler __ */ 1829 } 1830 1831 if (copied >= target) { 1832 /* Do not sleep, just process backlog. */ 1833 release_sock(sk); 1834 lock_sock(sk); 1835 } else { 1836 sk_wait_data(sk, &timeo, last); 1837 } 1838 1839 if (user_recv) { 1840 int chunk; 1841 1842 /* __ Restore normal policy in scheduler __ */ 1843 1844 chunk = len - tp->ucopy.len; 1845 if (chunk != 0) { 1846 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); 1847 len -= chunk; 1848 copied += chunk; 1849 } 1850 1851 if (tp->rcv_nxt == tp->copied_seq && 1852 !skb_queue_empty(&tp->ucopy.prequeue)) { 1853 do_prequeue: 1854 tcp_prequeue_process(sk); 1855 1856 chunk = len - tp->ucopy.len; 1857 if (chunk != 0) { 1858 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1859 len -= chunk; 1860 copied += chunk; 1861 } 1862 } 1863 } 1864 if ((flags & MSG_PEEK) && 1865 (peek_seq - copied - urg_hole != tp->copied_seq)) { 1866 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 1867 current->comm, 1868 task_pid_nr(current)); 1869 peek_seq = tp->copied_seq; 1870 } 1871 continue; 1872 1873 found_ok_skb: 1874 /* Ok so how much can we use? */ 1875 used = skb->len - offset; 1876 if (len < used) 1877 used = len; 1878 1879 /* Do we have urgent data here? */ 1880 if (tp->urg_data) { 1881 u32 urg_offset = tp->urg_seq - *seq; 1882 if (urg_offset < used) { 1883 if (!urg_offset) { 1884 if (!sock_flag(sk, SOCK_URGINLINE)) { 1885 ++*seq; 1886 urg_hole++; 1887 offset++; 1888 used--; 1889 if (!used) 1890 goto skip_copy; 1891 } 1892 } else 1893 used = urg_offset; 1894 } 1895 } 1896 1897 if (!(flags & MSG_TRUNC)) { 1898 err = skb_copy_datagram_msg(skb, offset, msg, used); 1899 if (err) { 1900 /* Exception. Bailout! */ 1901 if (!copied) 1902 copied = -EFAULT; 1903 break; 1904 } 1905 } 1906 1907 *seq += used; 1908 copied += used; 1909 len -= used; 1910 1911 tcp_rcv_space_adjust(sk); 1912 1913 skip_copy: 1914 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { 1915 tp->urg_data = 0; 1916 tcp_fast_path_check(sk); 1917 } 1918 if (used + offset < skb->len) 1919 continue; 1920 1921 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1922 goto found_fin_ok; 1923 if (!(flags & MSG_PEEK)) 1924 sk_eat_skb(sk, skb); 1925 continue; 1926 1927 found_fin_ok: 1928 /* Process the FIN. */ 1929 ++*seq; 1930 if (!(flags & MSG_PEEK)) 1931 sk_eat_skb(sk, skb); 1932 break; 1933 } while (len > 0); 1934 1935 if (user_recv) { 1936 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 1937 int chunk; 1938 1939 tp->ucopy.len = copied > 0 ? len : 0; 1940 1941 tcp_prequeue_process(sk); 1942 1943 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { 1944 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1945 len -= chunk; 1946 copied += chunk; 1947 } 1948 } 1949 1950 tp->ucopy.task = NULL; 1951 tp->ucopy.len = 0; 1952 } 1953 1954 /* According to UNIX98, msg_name/msg_namelen are ignored 1955 * on connected socket. I was just happy when found this 8) --ANK 1956 */ 1957 1958 /* Clean up data we have read: This will do ACK frames. */ 1959 tcp_cleanup_rbuf(sk, copied); 1960 1961 release_sock(sk); 1962 return copied; 1963 1964 out: 1965 release_sock(sk); 1966 return err; 1967 1968 recv_urg: 1969 err = tcp_recv_urg(sk, msg, len, flags); 1970 goto out; 1971 1972 recv_sndq: 1973 err = tcp_peek_sndq(sk, msg, len); 1974 goto out; 1975 } 1976 EXPORT_SYMBOL(tcp_recvmsg); 1977 1978 void tcp_set_state(struct sock *sk, int state) 1979 { 1980 int oldstate = sk->sk_state; 1981 1982 switch (state) { 1983 case TCP_ESTABLISHED: 1984 if (oldstate != TCP_ESTABLISHED) 1985 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 1986 break; 1987 1988 case TCP_CLOSE: 1989 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 1990 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 1991 1992 sk->sk_prot->unhash(sk); 1993 if (inet_csk(sk)->icsk_bind_hash && 1994 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1995 inet_put_port(sk); 1996 /* fall through */ 1997 default: 1998 if (oldstate == TCP_ESTABLISHED) 1999 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2000 } 2001 2002 /* Change state AFTER socket is unhashed to avoid closed 2003 * socket sitting in hash tables. 2004 */ 2005 sk_state_store(sk, state); 2006 2007 #ifdef STATE_TRACE 2008 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); 2009 #endif 2010 } 2011 EXPORT_SYMBOL_GPL(tcp_set_state); 2012 2013 /* 2014 * State processing on a close. This implements the state shift for 2015 * sending our FIN frame. Note that we only send a FIN for some 2016 * states. A shutdown() may have already sent the FIN, or we may be 2017 * closed. 2018 */ 2019 2020 static const unsigned char new_state[16] = { 2021 /* current state: new state: action: */ 2022 [0 /* (Invalid) */] = TCP_CLOSE, 2023 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2024 [TCP_SYN_SENT] = TCP_CLOSE, 2025 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2026 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 2027 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 2028 [TCP_TIME_WAIT] = TCP_CLOSE, 2029 [TCP_CLOSE] = TCP_CLOSE, 2030 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 2031 [TCP_LAST_ACK] = TCP_LAST_ACK, 2032 [TCP_LISTEN] = TCP_CLOSE, 2033 [TCP_CLOSING] = TCP_CLOSING, 2034 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 2035 }; 2036 2037 static int tcp_close_state(struct sock *sk) 2038 { 2039 int next = (int)new_state[sk->sk_state]; 2040 int ns = next & TCP_STATE_MASK; 2041 2042 tcp_set_state(sk, ns); 2043 2044 return next & TCP_ACTION_FIN; 2045 } 2046 2047 /* 2048 * Shutdown the sending side of a connection. Much like close except 2049 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 2050 */ 2051 2052 void tcp_shutdown(struct sock *sk, int how) 2053 { 2054 /* We need to grab some memory, and put together a FIN, 2055 * and then put it into the queue to be sent. 2056 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 2057 */ 2058 if (!(how & SEND_SHUTDOWN)) 2059 return; 2060 2061 /* If we've already sent a FIN, or it's a closed state, skip this. */ 2062 if ((1 << sk->sk_state) & 2063 (TCPF_ESTABLISHED | TCPF_SYN_SENT | 2064 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 2065 /* Clear out any half completed packets. FIN if needed. */ 2066 if (tcp_close_state(sk)) 2067 tcp_send_fin(sk); 2068 } 2069 } 2070 EXPORT_SYMBOL(tcp_shutdown); 2071 2072 bool tcp_check_oom(struct sock *sk, int shift) 2073 { 2074 bool too_many_orphans, out_of_socket_memory; 2075 2076 too_many_orphans = tcp_too_many_orphans(sk, shift); 2077 out_of_socket_memory = tcp_out_of_memory(sk); 2078 2079 if (too_many_orphans) 2080 net_info_ratelimited("too many orphaned sockets\n"); 2081 if (out_of_socket_memory) 2082 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 2083 return too_many_orphans || out_of_socket_memory; 2084 } 2085 2086 void tcp_close(struct sock *sk, long timeout) 2087 { 2088 struct sk_buff *skb; 2089 int data_was_unread = 0; 2090 int state; 2091 2092 lock_sock(sk); 2093 sk->sk_shutdown = SHUTDOWN_MASK; 2094 2095 if (sk->sk_state == TCP_LISTEN) { 2096 tcp_set_state(sk, TCP_CLOSE); 2097 2098 /* Special case. */ 2099 inet_csk_listen_stop(sk); 2100 2101 goto adjudge_to_death; 2102 } 2103 2104 /* We need to flush the recv. buffs. We do this only on the 2105 * descriptor close, not protocol-sourced closes, because the 2106 * reader process may not have drained the data yet! 2107 */ 2108 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 2109 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; 2110 2111 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2112 len--; 2113 data_was_unread += len; 2114 __kfree_skb(skb); 2115 } 2116 2117 sk_mem_reclaim(sk); 2118 2119 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 2120 if (sk->sk_state == TCP_CLOSE) 2121 goto adjudge_to_death; 2122 2123 /* As outlined in RFC 2525, section 2.17, we send a RST here because 2124 * data was lost. To witness the awful effects of the old behavior of 2125 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 2126 * GET in an FTP client, suspend the process, wait for the client to 2127 * advertise a zero window, then kill -9 the FTP client, wheee... 2128 * Note: timeout is always zero in such a case. 2129 */ 2130 if (unlikely(tcp_sk(sk)->repair)) { 2131 sk->sk_prot->disconnect(sk, 0); 2132 } else if (data_was_unread) { 2133 /* Unread data was tossed, zap the connection. */ 2134 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 2135 tcp_set_state(sk, TCP_CLOSE); 2136 tcp_send_active_reset(sk, sk->sk_allocation); 2137 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 2138 /* Check zero linger _after_ checking for unread data. */ 2139 sk->sk_prot->disconnect(sk, 0); 2140 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 2141 } else if (tcp_close_state(sk)) { 2142 /* We FIN if the application ate all the data before 2143 * zapping the connection. 2144 */ 2145 2146 /* RED-PEN. Formally speaking, we have broken TCP state 2147 * machine. State transitions: 2148 * 2149 * TCP_ESTABLISHED -> TCP_FIN_WAIT1 2150 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 2151 * TCP_CLOSE_WAIT -> TCP_LAST_ACK 2152 * 2153 * are legal only when FIN has been sent (i.e. in window), 2154 * rather than queued out of window. Purists blame. 2155 * 2156 * F.e. "RFC state" is ESTABLISHED, 2157 * if Linux state is FIN-WAIT-1, but FIN is still not sent. 2158 * 2159 * The visible declinations are that sometimes 2160 * we enter time-wait state, when it is not required really 2161 * (harmless), do not send active resets, when they are 2162 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 2163 * they look as CLOSING or LAST_ACK for Linux) 2164 * Probably, I missed some more holelets. 2165 * --ANK 2166 * XXX (TFO) - To start off we don't support SYN+ACK+FIN 2167 * in a single packet! (May consider it later but will 2168 * probably need API support or TCP_CORK SYN-ACK until 2169 * data is written and socket is closed.) 2170 */ 2171 tcp_send_fin(sk); 2172 } 2173 2174 sk_stream_wait_close(sk, timeout); 2175 2176 adjudge_to_death: 2177 state = sk->sk_state; 2178 sock_hold(sk); 2179 sock_orphan(sk); 2180 2181 /* It is the last release_sock in its life. It will remove backlog. */ 2182 release_sock(sk); 2183 2184 2185 /* Now socket is owned by kernel and we acquire BH lock 2186 to finish close. No need to check for user refs. 2187 */ 2188 local_bh_disable(); 2189 bh_lock_sock(sk); 2190 WARN_ON(sock_owned_by_user(sk)); 2191 2192 percpu_counter_inc(sk->sk_prot->orphan_count); 2193 2194 /* Have we already been destroyed by a softirq or backlog? */ 2195 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 2196 goto out; 2197 2198 /* This is a (useful) BSD violating of the RFC. There is a 2199 * problem with TCP as specified in that the other end could 2200 * keep a socket open forever with no application left this end. 2201 * We use a 1 minute timeout (about the same as BSD) then kill 2202 * our end. If they send after that then tough - BUT: long enough 2203 * that we won't make the old 4*rto = almost no time - whoops 2204 * reset mistake. 2205 * 2206 * Nope, it was not mistake. It is really desired behaviour 2207 * f.e. on http servers, when such sockets are useless, but 2208 * consume significant resources. Let's do it with special 2209 * linger2 option. --ANK 2210 */ 2211 2212 if (sk->sk_state == TCP_FIN_WAIT2) { 2213 struct tcp_sock *tp = tcp_sk(sk); 2214 if (tp->linger2 < 0) { 2215 tcp_set_state(sk, TCP_CLOSE); 2216 tcp_send_active_reset(sk, GFP_ATOMIC); 2217 __NET_INC_STATS(sock_net(sk), 2218 LINUX_MIB_TCPABORTONLINGER); 2219 } else { 2220 const int tmo = tcp_fin_time(sk); 2221 2222 if (tmo > TCP_TIMEWAIT_LEN) { 2223 inet_csk_reset_keepalive_timer(sk, 2224 tmo - TCP_TIMEWAIT_LEN); 2225 } else { 2226 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 2227 goto out; 2228 } 2229 } 2230 } 2231 if (sk->sk_state != TCP_CLOSE) { 2232 sk_mem_reclaim(sk); 2233 if (tcp_check_oom(sk, 0)) { 2234 tcp_set_state(sk, TCP_CLOSE); 2235 tcp_send_active_reset(sk, GFP_ATOMIC); 2236 __NET_INC_STATS(sock_net(sk), 2237 LINUX_MIB_TCPABORTONMEMORY); 2238 } 2239 } 2240 2241 if (sk->sk_state == TCP_CLOSE) { 2242 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; 2243 /* We could get here with a non-NULL req if the socket is 2244 * aborted (e.g., closed with unread data) before 3WHS 2245 * finishes. 2246 */ 2247 if (req) 2248 reqsk_fastopen_remove(sk, req, false); 2249 inet_csk_destroy_sock(sk); 2250 } 2251 /* Otherwise, socket is reprieved until protocol close. */ 2252 2253 out: 2254 bh_unlock_sock(sk); 2255 local_bh_enable(); 2256 sock_put(sk); 2257 } 2258 EXPORT_SYMBOL(tcp_close); 2259 2260 /* These states need RST on ABORT according to RFC793 */ 2261 2262 static inline bool tcp_need_reset(int state) 2263 { 2264 return (1 << state) & 2265 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2266 TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 2267 } 2268 2269 int tcp_disconnect(struct sock *sk, int flags) 2270 { 2271 struct inet_sock *inet = inet_sk(sk); 2272 struct inet_connection_sock *icsk = inet_csk(sk); 2273 struct tcp_sock *tp = tcp_sk(sk); 2274 int err = 0; 2275 int old_state = sk->sk_state; 2276 2277 if (old_state != TCP_CLOSE) 2278 tcp_set_state(sk, TCP_CLOSE); 2279 2280 /* ABORT function of RFC793 */ 2281 if (old_state == TCP_LISTEN) { 2282 inet_csk_listen_stop(sk); 2283 } else if (unlikely(tp->repair)) { 2284 sk->sk_err = ECONNABORTED; 2285 } else if (tcp_need_reset(old_state) || 2286 (tp->snd_nxt != tp->write_seq && 2287 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 2288 /* The last check adjusts for discrepancy of Linux wrt. RFC 2289 * states 2290 */ 2291 tcp_send_active_reset(sk, gfp_any()); 2292 sk->sk_err = ECONNRESET; 2293 } else if (old_state == TCP_SYN_SENT) 2294 sk->sk_err = ECONNRESET; 2295 2296 tcp_clear_xmit_timers(sk); 2297 __skb_queue_purge(&sk->sk_receive_queue); 2298 tcp_write_queue_purge(sk); 2299 tcp_fastopen_active_disable_ofo_check(sk); 2300 skb_rbtree_purge(&tp->out_of_order_queue); 2301 2302 inet->inet_dport = 0; 2303 2304 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 2305 inet_reset_saddr(sk); 2306 2307 sk->sk_shutdown = 0; 2308 sock_reset_flag(sk, SOCK_DONE); 2309 tp->srtt_us = 0; 2310 tp->write_seq += tp->max_window + 2; 2311 if (tp->write_seq == 0) 2312 tp->write_seq = 1; 2313 icsk->icsk_backoff = 0; 2314 tp->snd_cwnd = 2; 2315 icsk->icsk_probes_out = 0; 2316 tp->packets_out = 0; 2317 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2318 tp->snd_cwnd_cnt = 0; 2319 tp->window_clamp = 0; 2320 tcp_set_ca_state(sk, TCP_CA_Open); 2321 tcp_clear_retrans(tp); 2322 inet_csk_delack_init(sk); 2323 tcp_init_send_head(sk); 2324 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2325 __sk_dst_reset(sk); 2326 tcp_saved_syn_free(tp); 2327 2328 /* Clean up fastopen related fields */ 2329 tcp_free_fastopen_req(tp); 2330 inet->defer_connect = 0; 2331 2332 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 2333 2334 sk->sk_error_report(sk); 2335 return err; 2336 } 2337 EXPORT_SYMBOL(tcp_disconnect); 2338 2339 static inline bool tcp_can_repair_sock(const struct sock *sk) 2340 { 2341 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 2342 (sk->sk_state != TCP_LISTEN); 2343 } 2344 2345 static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len) 2346 { 2347 struct tcp_repair_window opt; 2348 2349 if (!tp->repair) 2350 return -EPERM; 2351 2352 if (len != sizeof(opt)) 2353 return -EINVAL; 2354 2355 if (copy_from_user(&opt, optbuf, sizeof(opt))) 2356 return -EFAULT; 2357 2358 if (opt.max_window < opt.snd_wnd) 2359 return -EINVAL; 2360 2361 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) 2362 return -EINVAL; 2363 2364 if (after(opt.rcv_wup, tp->rcv_nxt)) 2365 return -EINVAL; 2366 2367 tp->snd_wl1 = opt.snd_wl1; 2368 tp->snd_wnd = opt.snd_wnd; 2369 tp->max_window = opt.max_window; 2370 2371 tp->rcv_wnd = opt.rcv_wnd; 2372 tp->rcv_wup = opt.rcv_wup; 2373 2374 return 0; 2375 } 2376 2377 static int tcp_repair_options_est(struct tcp_sock *tp, 2378 struct tcp_repair_opt __user *optbuf, unsigned int len) 2379 { 2380 struct tcp_repair_opt opt; 2381 2382 while (len >= sizeof(opt)) { 2383 if (copy_from_user(&opt, optbuf, sizeof(opt))) 2384 return -EFAULT; 2385 2386 optbuf++; 2387 len -= sizeof(opt); 2388 2389 switch (opt.opt_code) { 2390 case TCPOPT_MSS: 2391 tp->rx_opt.mss_clamp = opt.opt_val; 2392 break; 2393 case TCPOPT_WINDOW: 2394 { 2395 u16 snd_wscale = opt.opt_val & 0xFFFF; 2396 u16 rcv_wscale = opt.opt_val >> 16; 2397 2398 if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) 2399 return -EFBIG; 2400 2401 tp->rx_opt.snd_wscale = snd_wscale; 2402 tp->rx_opt.rcv_wscale = rcv_wscale; 2403 tp->rx_opt.wscale_ok = 1; 2404 } 2405 break; 2406 case TCPOPT_SACK_PERM: 2407 if (opt.opt_val != 0) 2408 return -EINVAL; 2409 2410 tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 2411 if (sysctl_tcp_fack) 2412 tcp_enable_fack(tp); 2413 break; 2414 case TCPOPT_TIMESTAMP: 2415 if (opt.opt_val != 0) 2416 return -EINVAL; 2417 2418 tp->rx_opt.tstamp_ok = 1; 2419 break; 2420 } 2421 } 2422 2423 return 0; 2424 } 2425 2426 /* 2427 * Socket option code for TCP. 2428 */ 2429 static int do_tcp_setsockopt(struct sock *sk, int level, 2430 int optname, char __user *optval, unsigned int optlen) 2431 { 2432 struct tcp_sock *tp = tcp_sk(sk); 2433 struct inet_connection_sock *icsk = inet_csk(sk); 2434 struct net *net = sock_net(sk); 2435 int val; 2436 int err = 0; 2437 2438 /* These are data/string values, all the others are ints */ 2439 switch (optname) { 2440 case TCP_CONGESTION: { 2441 char name[TCP_CA_NAME_MAX]; 2442 2443 if (optlen < 1) 2444 return -EINVAL; 2445 2446 val = strncpy_from_user(name, optval, 2447 min_t(long, TCP_CA_NAME_MAX-1, optlen)); 2448 if (val < 0) 2449 return -EFAULT; 2450 name[val] = 0; 2451 2452 lock_sock(sk); 2453 err = tcp_set_congestion_control(sk, name); 2454 release_sock(sk); 2455 return err; 2456 } 2457 default: 2458 /* fallthru */ 2459 break; 2460 } 2461 2462 if (optlen < sizeof(int)) 2463 return -EINVAL; 2464 2465 if (get_user(val, (int __user *)optval)) 2466 return -EFAULT; 2467 2468 lock_sock(sk); 2469 2470 switch (optname) { 2471 case TCP_MAXSEG: 2472 /* Values greater than interface MTU won't take effect. However 2473 * at the point when this call is done we typically don't yet 2474 * know which interface is going to be used */ 2475 if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { 2476 err = -EINVAL; 2477 break; 2478 } 2479 tp->rx_opt.user_mss = val; 2480 break; 2481 2482 case TCP_NODELAY: 2483 if (val) { 2484 /* TCP_NODELAY is weaker than TCP_CORK, so that 2485 * this option on corked socket is remembered, but 2486 * it is not activated until cork is cleared. 2487 * 2488 * However, when TCP_NODELAY is set we make 2489 * an explicit push, which overrides even TCP_CORK 2490 * for currently queued segments. 2491 */ 2492 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 2493 tcp_push_pending_frames(sk); 2494 } else { 2495 tp->nonagle &= ~TCP_NAGLE_OFF; 2496 } 2497 break; 2498 2499 case TCP_THIN_LINEAR_TIMEOUTS: 2500 if (val < 0 || val > 1) 2501 err = -EINVAL; 2502 else 2503 tp->thin_lto = val; 2504 break; 2505 2506 case TCP_THIN_DUPACK: 2507 if (val < 0 || val > 1) 2508 err = -EINVAL; 2509 break; 2510 2511 case TCP_REPAIR: 2512 if (!tcp_can_repair_sock(sk)) 2513 err = -EPERM; 2514 else if (val == 1) { 2515 tp->repair = 1; 2516 sk->sk_reuse = SK_FORCE_REUSE; 2517 tp->repair_queue = TCP_NO_QUEUE; 2518 } else if (val == 0) { 2519 tp->repair = 0; 2520 sk->sk_reuse = SK_NO_REUSE; 2521 tcp_send_window_probe(sk); 2522 } else 2523 err = -EINVAL; 2524 2525 break; 2526 2527 case TCP_REPAIR_QUEUE: 2528 if (!tp->repair) 2529 err = -EPERM; 2530 else if (val < TCP_QUEUES_NR) 2531 tp->repair_queue = val; 2532 else 2533 err = -EINVAL; 2534 break; 2535 2536 case TCP_QUEUE_SEQ: 2537 if (sk->sk_state != TCP_CLOSE) 2538 err = -EPERM; 2539 else if (tp->repair_queue == TCP_SEND_QUEUE) 2540 tp->write_seq = val; 2541 else if (tp->repair_queue == TCP_RECV_QUEUE) 2542 tp->rcv_nxt = val; 2543 else 2544 err = -EINVAL; 2545 break; 2546 2547 case TCP_REPAIR_OPTIONS: 2548 if (!tp->repair) 2549 err = -EINVAL; 2550 else if (sk->sk_state == TCP_ESTABLISHED) 2551 err = tcp_repair_options_est(tp, 2552 (struct tcp_repair_opt __user *)optval, 2553 optlen); 2554 else 2555 err = -EPERM; 2556 break; 2557 2558 case TCP_CORK: 2559 /* When set indicates to always queue non-full frames. 2560 * Later the user clears this option and we transmit 2561 * any pending partial frames in the queue. This is 2562 * meant to be used alongside sendfile() to get properly 2563 * filled frames when the user (for example) must write 2564 * out headers with a write() call first and then use 2565 * sendfile to send out the data parts. 2566 * 2567 * TCP_CORK can be set together with TCP_NODELAY and it is 2568 * stronger than TCP_NODELAY. 2569 */ 2570 if (val) { 2571 tp->nonagle |= TCP_NAGLE_CORK; 2572 } else { 2573 tp->nonagle &= ~TCP_NAGLE_CORK; 2574 if (tp->nonagle&TCP_NAGLE_OFF) 2575 tp->nonagle |= TCP_NAGLE_PUSH; 2576 tcp_push_pending_frames(sk); 2577 } 2578 break; 2579 2580 case TCP_KEEPIDLE: 2581 if (val < 1 || val > MAX_TCP_KEEPIDLE) 2582 err = -EINVAL; 2583 else { 2584 tp->keepalive_time = val * HZ; 2585 if (sock_flag(sk, SOCK_KEEPOPEN) && 2586 !((1 << sk->sk_state) & 2587 (TCPF_CLOSE | TCPF_LISTEN))) { 2588 u32 elapsed = keepalive_time_elapsed(tp); 2589 if (tp->keepalive_time > elapsed) 2590 elapsed = tp->keepalive_time - elapsed; 2591 else 2592 elapsed = 0; 2593 inet_csk_reset_keepalive_timer(sk, elapsed); 2594 } 2595 } 2596 break; 2597 case TCP_KEEPINTVL: 2598 if (val < 1 || val > MAX_TCP_KEEPINTVL) 2599 err = -EINVAL; 2600 else 2601 tp->keepalive_intvl = val * HZ; 2602 break; 2603 case TCP_KEEPCNT: 2604 if (val < 1 || val > MAX_TCP_KEEPCNT) 2605 err = -EINVAL; 2606 else 2607 tp->keepalive_probes = val; 2608 break; 2609 case TCP_SYNCNT: 2610 if (val < 1 || val > MAX_TCP_SYNCNT) 2611 err = -EINVAL; 2612 else 2613 icsk->icsk_syn_retries = val; 2614 break; 2615 2616 case TCP_SAVE_SYN: 2617 if (val < 0 || val > 1) 2618 err = -EINVAL; 2619 else 2620 tp->save_syn = val; 2621 break; 2622 2623 case TCP_LINGER2: 2624 if (val < 0) 2625 tp->linger2 = -1; 2626 else if (val > net->ipv4.sysctl_tcp_fin_timeout / HZ) 2627 tp->linger2 = 0; 2628 else 2629 tp->linger2 = val * HZ; 2630 break; 2631 2632 case TCP_DEFER_ACCEPT: 2633 /* Translate value in seconds to number of retransmits */ 2634 icsk->icsk_accept_queue.rskq_defer_accept = 2635 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 2636 TCP_RTO_MAX / HZ); 2637 break; 2638 2639 case TCP_WINDOW_CLAMP: 2640 if (!val) { 2641 if (sk->sk_state != TCP_CLOSE) { 2642 err = -EINVAL; 2643 break; 2644 } 2645 tp->window_clamp = 0; 2646 } else 2647 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 2648 SOCK_MIN_RCVBUF / 2 : val; 2649 break; 2650 2651 case TCP_QUICKACK: 2652 if (!val) { 2653 icsk->icsk_ack.pingpong = 1; 2654 } else { 2655 icsk->icsk_ack.pingpong = 0; 2656 if ((1 << sk->sk_state) & 2657 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 2658 inet_csk_ack_scheduled(sk)) { 2659 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 2660 tcp_cleanup_rbuf(sk, 1); 2661 if (!(val & 1)) 2662 icsk->icsk_ack.pingpong = 1; 2663 } 2664 } 2665 break; 2666 2667 #ifdef CONFIG_TCP_MD5SIG 2668 case TCP_MD5SIG: 2669 /* Read the IP->Key mappings from userspace */ 2670 err = tp->af_specific->md5_parse(sk, optval, optlen); 2671 break; 2672 #endif 2673 case TCP_USER_TIMEOUT: 2674 /* Cap the max time in ms TCP will retry or probe the window 2675 * before giving up and aborting (ETIMEDOUT) a connection. 2676 */ 2677 if (val < 0) 2678 err = -EINVAL; 2679 else 2680 icsk->icsk_user_timeout = msecs_to_jiffies(val); 2681 break; 2682 2683 case TCP_FASTOPEN: 2684 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 2685 TCPF_LISTEN))) { 2686 tcp_fastopen_init_key_once(true); 2687 2688 fastopen_queue_tune(sk, val); 2689 } else { 2690 err = -EINVAL; 2691 } 2692 break; 2693 case TCP_FASTOPEN_CONNECT: 2694 if (val > 1 || val < 0) { 2695 err = -EINVAL; 2696 } else if (sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) { 2697 if (sk->sk_state == TCP_CLOSE) 2698 tp->fastopen_connect = val; 2699 else 2700 err = -EINVAL; 2701 } else { 2702 err = -EOPNOTSUPP; 2703 } 2704 break; 2705 case TCP_TIMESTAMP: 2706 if (!tp->repair) 2707 err = -EPERM; 2708 else 2709 tp->tsoffset = val - tcp_time_stamp; 2710 break; 2711 case TCP_REPAIR_WINDOW: 2712 err = tcp_repair_set_window(tp, optval, optlen); 2713 break; 2714 case TCP_NOTSENT_LOWAT: 2715 tp->notsent_lowat = val; 2716 sk->sk_write_space(sk); 2717 break; 2718 default: 2719 err = -ENOPROTOOPT; 2720 break; 2721 } 2722 2723 release_sock(sk); 2724 return err; 2725 } 2726 2727 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 2728 unsigned int optlen) 2729 { 2730 const struct inet_connection_sock *icsk = inet_csk(sk); 2731 2732 if (level != SOL_TCP) 2733 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 2734 optval, optlen); 2735 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 2736 } 2737 EXPORT_SYMBOL(tcp_setsockopt); 2738 2739 #ifdef CONFIG_COMPAT 2740 int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 2741 char __user *optval, unsigned int optlen) 2742 { 2743 if (level != SOL_TCP) 2744 return inet_csk_compat_setsockopt(sk, level, optname, 2745 optval, optlen); 2746 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 2747 } 2748 EXPORT_SYMBOL(compat_tcp_setsockopt); 2749 #endif 2750 2751 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 2752 struct tcp_info *info) 2753 { 2754 u64 stats[__TCP_CHRONO_MAX], total = 0; 2755 enum tcp_chrono i; 2756 2757 for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { 2758 stats[i] = tp->chrono_stat[i - 1]; 2759 if (i == tp->chrono_type) 2760 stats[i] += tcp_time_stamp - tp->chrono_start; 2761 stats[i] *= USEC_PER_SEC / HZ; 2762 total += stats[i]; 2763 } 2764 2765 info->tcpi_busy_time = total; 2766 info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; 2767 info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; 2768 } 2769 2770 /* Return information about state of tcp endpoint in API format. */ 2771 void tcp_get_info(struct sock *sk, struct tcp_info *info) 2772 { 2773 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 2774 const struct inet_connection_sock *icsk = inet_csk(sk); 2775 u32 now, intv; 2776 u64 rate64; 2777 bool slow; 2778 u32 rate; 2779 2780 memset(info, 0, sizeof(*info)); 2781 if (sk->sk_type != SOCK_STREAM) 2782 return; 2783 2784 info->tcpi_state = sk_state_load(sk); 2785 2786 /* Report meaningful fields for all TCP states, including listeners */ 2787 rate = READ_ONCE(sk->sk_pacing_rate); 2788 rate64 = rate != ~0U ? rate : ~0ULL; 2789 info->tcpi_pacing_rate = rate64; 2790 2791 rate = READ_ONCE(sk->sk_max_pacing_rate); 2792 rate64 = rate != ~0U ? rate : ~0ULL; 2793 info->tcpi_max_pacing_rate = rate64; 2794 2795 info->tcpi_reordering = tp->reordering; 2796 info->tcpi_snd_cwnd = tp->snd_cwnd; 2797 2798 if (info->tcpi_state == TCP_LISTEN) { 2799 /* listeners aliased fields : 2800 * tcpi_unacked -> Number of children ready for accept() 2801 * tcpi_sacked -> max backlog 2802 */ 2803 info->tcpi_unacked = sk->sk_ack_backlog; 2804 info->tcpi_sacked = sk->sk_max_ack_backlog; 2805 return; 2806 } 2807 2808 slow = lock_sock_fast(sk); 2809 2810 info->tcpi_ca_state = icsk->icsk_ca_state; 2811 info->tcpi_retransmits = icsk->icsk_retransmits; 2812 info->tcpi_probes = icsk->icsk_probes_out; 2813 info->tcpi_backoff = icsk->icsk_backoff; 2814 2815 if (tp->rx_opt.tstamp_ok) 2816 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 2817 if (tcp_is_sack(tp)) 2818 info->tcpi_options |= TCPI_OPT_SACK; 2819 if (tp->rx_opt.wscale_ok) { 2820 info->tcpi_options |= TCPI_OPT_WSCALE; 2821 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 2822 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 2823 } 2824 2825 if (tp->ecn_flags & TCP_ECN_OK) 2826 info->tcpi_options |= TCPI_OPT_ECN; 2827 if (tp->ecn_flags & TCP_ECN_SEEN) 2828 info->tcpi_options |= TCPI_OPT_ECN_SEEN; 2829 if (tp->syn_data_acked) 2830 info->tcpi_options |= TCPI_OPT_SYN_DATA; 2831 2832 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 2833 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 2834 info->tcpi_snd_mss = tp->mss_cache; 2835 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 2836 2837 info->tcpi_unacked = tp->packets_out; 2838 info->tcpi_sacked = tp->sacked_out; 2839 2840 info->tcpi_lost = tp->lost_out; 2841 info->tcpi_retrans = tp->retrans_out; 2842 info->tcpi_fackets = tp->fackets_out; 2843 2844 now = tcp_time_stamp; 2845 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 2846 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 2847 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 2848 2849 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 2850 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 2851 info->tcpi_rtt = tp->srtt_us >> 3; 2852 info->tcpi_rttvar = tp->mdev_us >> 2; 2853 info->tcpi_snd_ssthresh = tp->snd_ssthresh; 2854 info->tcpi_advmss = tp->advmss; 2855 2856 info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; 2857 info->tcpi_rcv_space = tp->rcvq_space.space; 2858 2859 info->tcpi_total_retrans = tp->total_retrans; 2860 2861 info->tcpi_bytes_acked = tp->bytes_acked; 2862 info->tcpi_bytes_received = tp->bytes_received; 2863 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); 2864 tcp_get_info_chrono_stats(tp, info); 2865 2866 info->tcpi_segs_out = tp->segs_out; 2867 info->tcpi_segs_in = tp->segs_in; 2868 2869 info->tcpi_min_rtt = tcp_min_rtt(tp); 2870 info->tcpi_data_segs_in = tp->data_segs_in; 2871 info->tcpi_data_segs_out = tp->data_segs_out; 2872 2873 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; 2874 rate = READ_ONCE(tp->rate_delivered); 2875 intv = READ_ONCE(tp->rate_interval_us); 2876 if (rate && intv) { 2877 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; 2878 do_div(rate64, intv); 2879 info->tcpi_delivery_rate = rate64; 2880 } 2881 unlock_sock_fast(sk, slow); 2882 } 2883 EXPORT_SYMBOL_GPL(tcp_get_info); 2884 2885 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) 2886 { 2887 const struct tcp_sock *tp = tcp_sk(sk); 2888 struct sk_buff *stats; 2889 struct tcp_info info; 2890 2891 stats = alloc_skb(5 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC); 2892 if (!stats) 2893 return NULL; 2894 2895 tcp_get_info_chrono_stats(tp, &info); 2896 nla_put_u64_64bit(stats, TCP_NLA_BUSY, 2897 info.tcpi_busy_time, TCP_NLA_PAD); 2898 nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, 2899 info.tcpi_rwnd_limited, TCP_NLA_PAD); 2900 nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, 2901 info.tcpi_sndbuf_limited, TCP_NLA_PAD); 2902 nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, 2903 tp->data_segs_out, TCP_NLA_PAD); 2904 nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, 2905 tp->total_retrans, TCP_NLA_PAD); 2906 return stats; 2907 } 2908 2909 static int do_tcp_getsockopt(struct sock *sk, int level, 2910 int optname, char __user *optval, int __user *optlen) 2911 { 2912 struct inet_connection_sock *icsk = inet_csk(sk); 2913 struct tcp_sock *tp = tcp_sk(sk); 2914 struct net *net = sock_net(sk); 2915 int val, len; 2916 2917 if (get_user(len, optlen)) 2918 return -EFAULT; 2919 2920 len = min_t(unsigned int, len, sizeof(int)); 2921 2922 if (len < 0) 2923 return -EINVAL; 2924 2925 switch (optname) { 2926 case TCP_MAXSEG: 2927 val = tp->mss_cache; 2928 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 2929 val = tp->rx_opt.user_mss; 2930 if (tp->repair) 2931 val = tp->rx_opt.mss_clamp; 2932 break; 2933 case TCP_NODELAY: 2934 val = !!(tp->nonagle&TCP_NAGLE_OFF); 2935 break; 2936 case TCP_CORK: 2937 val = !!(tp->nonagle&TCP_NAGLE_CORK); 2938 break; 2939 case TCP_KEEPIDLE: 2940 val = keepalive_time_when(tp) / HZ; 2941 break; 2942 case TCP_KEEPINTVL: 2943 val = keepalive_intvl_when(tp) / HZ; 2944 break; 2945 case TCP_KEEPCNT: 2946 val = keepalive_probes(tp); 2947 break; 2948 case TCP_SYNCNT: 2949 val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; 2950 break; 2951 case TCP_LINGER2: 2952 val = tp->linger2; 2953 if (val >= 0) 2954 val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ; 2955 break; 2956 case TCP_DEFER_ACCEPT: 2957 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, 2958 TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); 2959 break; 2960 case TCP_WINDOW_CLAMP: 2961 val = tp->window_clamp; 2962 break; 2963 case TCP_INFO: { 2964 struct tcp_info info; 2965 2966 if (get_user(len, optlen)) 2967 return -EFAULT; 2968 2969 tcp_get_info(sk, &info); 2970 2971 len = min_t(unsigned int, len, sizeof(info)); 2972 if (put_user(len, optlen)) 2973 return -EFAULT; 2974 if (copy_to_user(optval, &info, len)) 2975 return -EFAULT; 2976 return 0; 2977 } 2978 case TCP_CC_INFO: { 2979 const struct tcp_congestion_ops *ca_ops; 2980 union tcp_cc_info info; 2981 size_t sz = 0; 2982 int attr; 2983 2984 if (get_user(len, optlen)) 2985 return -EFAULT; 2986 2987 ca_ops = icsk->icsk_ca_ops; 2988 if (ca_ops && ca_ops->get_info) 2989 sz = ca_ops->get_info(sk, ~0U, &attr, &info); 2990 2991 len = min_t(unsigned int, len, sz); 2992 if (put_user(len, optlen)) 2993 return -EFAULT; 2994 if (copy_to_user(optval, &info, len)) 2995 return -EFAULT; 2996 return 0; 2997 } 2998 case TCP_QUICKACK: 2999 val = !icsk->icsk_ack.pingpong; 3000 break; 3001 3002 case TCP_CONGESTION: 3003 if (get_user(len, optlen)) 3004 return -EFAULT; 3005 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 3006 if (put_user(len, optlen)) 3007 return -EFAULT; 3008 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) 3009 return -EFAULT; 3010 return 0; 3011 3012 case TCP_THIN_LINEAR_TIMEOUTS: 3013 val = tp->thin_lto; 3014 break; 3015 3016 case TCP_THIN_DUPACK: 3017 val = 0; 3018 break; 3019 3020 case TCP_REPAIR: 3021 val = tp->repair; 3022 break; 3023 3024 case TCP_REPAIR_QUEUE: 3025 if (tp->repair) 3026 val = tp->repair_queue; 3027 else 3028 return -EINVAL; 3029 break; 3030 3031 case TCP_REPAIR_WINDOW: { 3032 struct tcp_repair_window opt; 3033 3034 if (get_user(len, optlen)) 3035 return -EFAULT; 3036 3037 if (len != sizeof(opt)) 3038 return -EINVAL; 3039 3040 if (!tp->repair) 3041 return -EPERM; 3042 3043 opt.snd_wl1 = tp->snd_wl1; 3044 opt.snd_wnd = tp->snd_wnd; 3045 opt.max_window = tp->max_window; 3046 opt.rcv_wnd = tp->rcv_wnd; 3047 opt.rcv_wup = tp->rcv_wup; 3048 3049 if (copy_to_user(optval, &opt, len)) 3050 return -EFAULT; 3051 return 0; 3052 } 3053 case TCP_QUEUE_SEQ: 3054 if (tp->repair_queue == TCP_SEND_QUEUE) 3055 val = tp->write_seq; 3056 else if (tp->repair_queue == TCP_RECV_QUEUE) 3057 val = tp->rcv_nxt; 3058 else 3059 return -EINVAL; 3060 break; 3061 3062 case TCP_USER_TIMEOUT: 3063 val = jiffies_to_msecs(icsk->icsk_user_timeout); 3064 break; 3065 3066 case TCP_FASTOPEN: 3067 val = icsk->icsk_accept_queue.fastopenq.max_qlen; 3068 break; 3069 3070 case TCP_FASTOPEN_CONNECT: 3071 val = tp->fastopen_connect; 3072 break; 3073 3074 case TCP_TIMESTAMP: 3075 val = tcp_time_stamp + tp->tsoffset; 3076 break; 3077 case TCP_NOTSENT_LOWAT: 3078 val = tp->notsent_lowat; 3079 break; 3080 case TCP_SAVE_SYN: 3081 val = tp->save_syn; 3082 break; 3083 case TCP_SAVED_SYN: { 3084 if (get_user(len, optlen)) 3085 return -EFAULT; 3086 3087 lock_sock(sk); 3088 if (tp->saved_syn) { 3089 if (len < tp->saved_syn[0]) { 3090 if (put_user(tp->saved_syn[0], optlen)) { 3091 release_sock(sk); 3092 return -EFAULT; 3093 } 3094 release_sock(sk); 3095 return -EINVAL; 3096 } 3097 len = tp->saved_syn[0]; 3098 if (put_user(len, optlen)) { 3099 release_sock(sk); 3100 return -EFAULT; 3101 } 3102 if (copy_to_user(optval, tp->saved_syn + 1, len)) { 3103 release_sock(sk); 3104 return -EFAULT; 3105 } 3106 tcp_saved_syn_free(tp); 3107 release_sock(sk); 3108 } else { 3109 release_sock(sk); 3110 len = 0; 3111 if (put_user(len, optlen)) 3112 return -EFAULT; 3113 } 3114 return 0; 3115 } 3116 default: 3117 return -ENOPROTOOPT; 3118 } 3119 3120 if (put_user(len, optlen)) 3121 return -EFAULT; 3122 if (copy_to_user(optval, &val, len)) 3123 return -EFAULT; 3124 return 0; 3125 } 3126 3127 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 3128 int __user *optlen) 3129 { 3130 struct inet_connection_sock *icsk = inet_csk(sk); 3131 3132 if (level != SOL_TCP) 3133 return icsk->icsk_af_ops->getsockopt(sk, level, optname, 3134 optval, optlen); 3135 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 3136 } 3137 EXPORT_SYMBOL(tcp_getsockopt); 3138 3139 #ifdef CONFIG_COMPAT 3140 int compat_tcp_getsockopt(struct sock *sk, int level, int optname, 3141 char __user *optval, int __user *optlen) 3142 { 3143 if (level != SOL_TCP) 3144 return inet_csk_compat_getsockopt(sk, level, optname, 3145 optval, optlen); 3146 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 3147 } 3148 EXPORT_SYMBOL(compat_tcp_getsockopt); 3149 #endif 3150 3151 #ifdef CONFIG_TCP_MD5SIG 3152 static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); 3153 static DEFINE_MUTEX(tcp_md5sig_mutex); 3154 static bool tcp_md5sig_pool_populated = false; 3155 3156 static void __tcp_alloc_md5sig_pool(void) 3157 { 3158 struct crypto_ahash *hash; 3159 int cpu; 3160 3161 hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 3162 if (IS_ERR(hash)) 3163 return; 3164 3165 for_each_possible_cpu(cpu) { 3166 void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch; 3167 struct ahash_request *req; 3168 3169 if (!scratch) { 3170 scratch = kmalloc_node(sizeof(union tcp_md5sum_block) + 3171 sizeof(struct tcphdr), 3172 GFP_KERNEL, 3173 cpu_to_node(cpu)); 3174 if (!scratch) 3175 return; 3176 per_cpu(tcp_md5sig_pool, cpu).scratch = scratch; 3177 } 3178 if (per_cpu(tcp_md5sig_pool, cpu).md5_req) 3179 continue; 3180 3181 req = ahash_request_alloc(hash, GFP_KERNEL); 3182 if (!req) 3183 return; 3184 3185 ahash_request_set_callback(req, 0, NULL, NULL); 3186 3187 per_cpu(tcp_md5sig_pool, cpu).md5_req = req; 3188 } 3189 /* before setting tcp_md5sig_pool_populated, we must commit all writes 3190 * to memory. See smp_rmb() in tcp_get_md5sig_pool() 3191 */ 3192 smp_wmb(); 3193 tcp_md5sig_pool_populated = true; 3194 } 3195 3196 bool tcp_alloc_md5sig_pool(void) 3197 { 3198 if (unlikely(!tcp_md5sig_pool_populated)) { 3199 mutex_lock(&tcp_md5sig_mutex); 3200 3201 if (!tcp_md5sig_pool_populated) 3202 __tcp_alloc_md5sig_pool(); 3203 3204 mutex_unlock(&tcp_md5sig_mutex); 3205 } 3206 return tcp_md5sig_pool_populated; 3207 } 3208 EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 3209 3210 3211 /** 3212 * tcp_get_md5sig_pool - get md5sig_pool for this user 3213 * 3214 * We use percpu structure, so if we succeed, we exit with preemption 3215 * and BH disabled, to make sure another thread or softirq handling 3216 * wont try to get same context. 3217 */ 3218 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 3219 { 3220 local_bh_disable(); 3221 3222 if (tcp_md5sig_pool_populated) { 3223 /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ 3224 smp_rmb(); 3225 return this_cpu_ptr(&tcp_md5sig_pool); 3226 } 3227 local_bh_enable(); 3228 return NULL; 3229 } 3230 EXPORT_SYMBOL(tcp_get_md5sig_pool); 3231 3232 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 3233 const struct sk_buff *skb, unsigned int header_len) 3234 { 3235 struct scatterlist sg; 3236 const struct tcphdr *tp = tcp_hdr(skb); 3237 struct ahash_request *req = hp->md5_req; 3238 unsigned int i; 3239 const unsigned int head_data_len = skb_headlen(skb) > header_len ? 3240 skb_headlen(skb) - header_len : 0; 3241 const struct skb_shared_info *shi = skb_shinfo(skb); 3242 struct sk_buff *frag_iter; 3243 3244 sg_init_table(&sg, 1); 3245 3246 sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); 3247 ahash_request_set_crypt(req, &sg, NULL, head_data_len); 3248 if (crypto_ahash_update(req)) 3249 return 1; 3250 3251 for (i = 0; i < shi->nr_frags; ++i) { 3252 const struct skb_frag_struct *f = &shi->frags[i]; 3253 unsigned int offset = f->page_offset; 3254 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); 3255 3256 sg_set_page(&sg, page, skb_frag_size(f), 3257 offset_in_page(offset)); 3258 ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f)); 3259 if (crypto_ahash_update(req)) 3260 return 1; 3261 } 3262 3263 skb_walk_frags(skb, frag_iter) 3264 if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) 3265 return 1; 3266 3267 return 0; 3268 } 3269 EXPORT_SYMBOL(tcp_md5_hash_skb_data); 3270 3271 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) 3272 { 3273 struct scatterlist sg; 3274 3275 sg_init_one(&sg, key->key, key->keylen); 3276 ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen); 3277 return crypto_ahash_update(hp->md5_req); 3278 } 3279 EXPORT_SYMBOL(tcp_md5_hash_key); 3280 3281 #endif 3282 3283 void tcp_done(struct sock *sk) 3284 { 3285 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; 3286 3287 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 3288 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 3289 3290 tcp_set_state(sk, TCP_CLOSE); 3291 tcp_clear_xmit_timers(sk); 3292 if (req) 3293 reqsk_fastopen_remove(sk, req, false); 3294 3295 sk->sk_shutdown = SHUTDOWN_MASK; 3296 3297 if (!sock_flag(sk, SOCK_DEAD)) 3298 sk->sk_state_change(sk); 3299 else 3300 inet_csk_destroy_sock(sk); 3301 } 3302 EXPORT_SYMBOL_GPL(tcp_done); 3303 3304 int tcp_abort(struct sock *sk, int err) 3305 { 3306 if (!sk_fullsock(sk)) { 3307 if (sk->sk_state == TCP_NEW_SYN_RECV) { 3308 struct request_sock *req = inet_reqsk(sk); 3309 3310 local_bh_disable(); 3311 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, 3312 req); 3313 local_bh_enable(); 3314 return 0; 3315 } 3316 return -EOPNOTSUPP; 3317 } 3318 3319 /* Don't race with userspace socket closes such as tcp_close. */ 3320 lock_sock(sk); 3321 3322 if (sk->sk_state == TCP_LISTEN) { 3323 tcp_set_state(sk, TCP_CLOSE); 3324 inet_csk_listen_stop(sk); 3325 } 3326 3327 /* Don't race with BH socket closes such as inet_csk_listen_stop. */ 3328 local_bh_disable(); 3329 bh_lock_sock(sk); 3330 3331 if (!sock_flag(sk, SOCK_DEAD)) { 3332 sk->sk_err = err; 3333 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 3334 smp_wmb(); 3335 sk->sk_error_report(sk); 3336 if (tcp_need_reset(sk->sk_state)) 3337 tcp_send_active_reset(sk, GFP_ATOMIC); 3338 tcp_done(sk); 3339 } 3340 3341 bh_unlock_sock(sk); 3342 local_bh_enable(); 3343 release_sock(sk); 3344 return 0; 3345 } 3346 EXPORT_SYMBOL_GPL(tcp_abort); 3347 3348 extern struct tcp_congestion_ops tcp_reno; 3349 3350 static __initdata unsigned long thash_entries; 3351 static int __init set_thash_entries(char *str) 3352 { 3353 ssize_t ret; 3354 3355 if (!str) 3356 return 0; 3357 3358 ret = kstrtoul(str, 0, &thash_entries); 3359 if (ret) 3360 return 0; 3361 3362 return 1; 3363 } 3364 __setup("thash_entries=", set_thash_entries); 3365 3366 static void __init tcp_init_mem(void) 3367 { 3368 unsigned long limit = nr_free_buffer_pages() / 16; 3369 3370 limit = max(limit, 128UL); 3371 sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ 3372 sysctl_tcp_mem[1] = limit; /* 6.25 % */ 3373 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ 3374 } 3375 3376 void __init tcp_init(void) 3377 { 3378 int max_rshare, max_wshare, cnt; 3379 unsigned long limit; 3380 unsigned int i; 3381 3382 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 3383 FIELD_SIZEOF(struct sk_buff, cb)); 3384 3385 percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 3386 percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL); 3387 inet_hashinfo_init(&tcp_hashinfo); 3388 tcp_hashinfo.bind_bucket_cachep = 3389 kmem_cache_create("tcp_bind_bucket", 3390 sizeof(struct inet_bind_bucket), 0, 3391 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3392 3393 /* Size and allocate the main established and bind bucket 3394 * hash tables. 3395 * 3396 * The methodology is similar to that of the buffer cache. 3397 */ 3398 tcp_hashinfo.ehash = 3399 alloc_large_system_hash("TCP established", 3400 sizeof(struct inet_ehash_bucket), 3401 thash_entries, 3402 17, /* one slot per 128 KB of memory */ 3403 0, 3404 NULL, 3405 &tcp_hashinfo.ehash_mask, 3406 0, 3407 thash_entries ? 0 : 512 * 1024); 3408 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) 3409 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 3410 3411 if (inet_ehash_locks_alloc(&tcp_hashinfo)) 3412 panic("TCP: failed to alloc ehash_locks"); 3413 tcp_hashinfo.bhash = 3414 alloc_large_system_hash("TCP bind", 3415 sizeof(struct inet_bind_hashbucket), 3416 tcp_hashinfo.ehash_mask + 1, 3417 17, /* one slot per 128 KB of memory */ 3418 0, 3419 &tcp_hashinfo.bhash_size, 3420 NULL, 3421 0, 3422 64 * 1024); 3423 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 3424 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 3425 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 3426 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 3427 } 3428 3429 3430 cnt = tcp_hashinfo.ehash_mask + 1; 3431 sysctl_tcp_max_orphans = cnt / 2; 3432 3433 tcp_init_mem(); 3434 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3435 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 3436 max_wshare = min(4UL*1024*1024, limit); 3437 max_rshare = min(6UL*1024*1024, limit); 3438 3439 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3440 sysctl_tcp_wmem[1] = 16*1024; 3441 sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 3442 3443 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 3444 sysctl_tcp_rmem[1] = 87380; 3445 sysctl_tcp_rmem[2] = max(87380, max_rshare); 3446 3447 pr_info("Hash tables configured (established %u bind %u)\n", 3448 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3449 3450 tcp_v4_init(); 3451 tcp_metrics_init(); 3452 BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); 3453 tcp_tasklet_init(); 3454 } 3455