1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 * 21 * Fixes: 22 * Alan Cox : Numerous verify_area() calls 23 * Alan Cox : Set the ACK bit on a reset 24 * Alan Cox : Stopped it crashing if it closed while 25 * sk->inuse=1 and was trying to connect 26 * (tcp_err()). 27 * Alan Cox : All icmp error handling was broken 28 * pointers passed where wrong and the 29 * socket was looked up backwards. Nobody 30 * tested any icmp error code obviously. 31 * Alan Cox : tcp_err() now handled properly. It 32 * wakes people on errors. poll 33 * behaves and the icmp error race 34 * has gone by moving it into sock.c 35 * Alan Cox : tcp_send_reset() fixed to work for 36 * everything not just packets for 37 * unknown sockets. 38 * Alan Cox : tcp option processing. 39 * Alan Cox : Reset tweaked (still not 100%) [Had 40 * syn rule wrong] 41 * Herp Rosmanith : More reset fixes 42 * Alan Cox : No longer acks invalid rst frames. 43 * Acking any kind of RST is right out. 44 * Alan Cox : Sets an ignore me flag on an rst 45 * receive otherwise odd bits of prattle 46 * escape still 47 * Alan Cox : Fixed another acking RST frame bug. 48 * Should stop LAN workplace lockups. 49 * Alan Cox : Some tidyups using the new skb list 50 * facilities 51 * Alan Cox : sk->keepopen now seems to work 52 * Alan Cox : Pulls options out correctly on accepts 53 * Alan Cox : Fixed assorted sk->rqueue->next errors 54 * Alan Cox : PSH doesn't end a TCP read. Switched a 55 * bit to skb ops. 56 * Alan Cox : Tidied tcp_data to avoid a potential 57 * nasty. 58 * Alan Cox : Added some better commenting, as the 59 * tcp is hard to follow 60 * Alan Cox : Removed incorrect check for 20 * psh 61 * Michael O'Reilly : ack < copied bug fix. 62 * Johannes Stille : Misc tcp fixes (not all in yet). 63 * Alan Cox : FIN with no memory -> CRASH 64 * Alan Cox : Added socket option proto entries. 65 * Also added awareness of them to accept. 66 * Alan Cox : Added TCP options (SOL_TCP) 67 * Alan Cox : Switched wakeup calls to callbacks, 68 * so the kernel can layer network 69 * sockets. 70 * Alan Cox : Use ip_tos/ip_ttl settings. 71 * Alan Cox : Handle FIN (more) properly (we hope). 72 * Alan Cox : RST frames sent on unsynchronised 73 * state ack error. 74 * Alan Cox : Put in missing check for SYN bit. 75 * Alan Cox : Added tcp_select_window() aka NET2E 76 * window non shrink trick. 77 * Alan Cox : Added a couple of small NET2E timer 78 * fixes 79 * Charles Hedrick : TCP fixes 80 * Toomas Tamm : TCP window fixes 81 * Alan Cox : Small URG fix to rlogin ^C ack fight 82 * Charles Hedrick : Rewrote most of it to actually work 83 * Linus : Rewrote tcp_read() and URG handling 84 * completely 85 * Gerhard Koerting: Fixed some missing timer handling 86 * Matthew Dillon : Reworked TCP machine states as per RFC 87 * Gerhard Koerting: PC/TCP workarounds 88 * Adam Caldwell : Assorted timer/timing errors 89 * Matthew Dillon : Fixed another RST bug 90 * Alan Cox : Move to kernel side addressing changes. 91 * Alan Cox : Beginning work on TCP fastpathing 92 * (not yet usable) 93 * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 94 * Alan Cox : TCP fast path debugging 95 * Alan Cox : Window clamping 96 * Michael Riepe : Bug in tcp_check() 97 * Matt Dillon : More TCP improvements and RST bug fixes 98 * Matt Dillon : Yet more small nasties remove from the 99 * TCP code (Be very nice to this man if 100 * tcp finally works 100%) 8) 101 * Alan Cox : BSD accept semantics. 102 * Alan Cox : Reset on closedown bug. 103 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 104 * Michael Pall : Handle poll() after URG properly in 105 * all cases. 106 * Michael Pall : Undo the last fix in tcp_read_urg() 107 * (multi URG PUSH broke rlogin). 108 * Michael Pall : Fix the multi URG PUSH problem in 109 * tcp_readable(), poll() after URG 110 * works now. 111 * Michael Pall : recv(...,MSG_OOB) never blocks in the 112 * BSD api. 113 * Alan Cox : Changed the semantics of sk->socket to 114 * fix a race and a signal problem with 115 * accept() and async I/O. 116 * Alan Cox : Relaxed the rules on tcp_sendto(). 117 * Yury Shevchuk : Really fixed accept() blocking problem. 118 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 119 * clients/servers which listen in on 120 * fixed ports. 121 * Alan Cox : Cleaned the above up and shrank it to 122 * a sensible code size. 123 * Alan Cox : Self connect lockup fix. 124 * Alan Cox : No connect to multicast. 125 * Ross Biro : Close unaccepted children on master 126 * socket close. 127 * Alan Cox : Reset tracing code. 128 * Alan Cox : Spurious resets on shutdown. 129 * Alan Cox : Giant 15 minute/60 second timer error 130 * Alan Cox : Small whoops in polling before an 131 * accept. 132 * Alan Cox : Kept the state trace facility since 133 * it's handy for debugging. 134 * Alan Cox : More reset handler fixes. 135 * Alan Cox : Started rewriting the code based on 136 * the RFC's for other useful protocol 137 * references see: Comer, KA9Q NOS, and 138 * for a reference on the difference 139 * between specifications and how BSD 140 * works see the 4.4lite source. 141 * A.N.Kuznetsov : Don't time wait on completion of tidy 142 * close. 143 * Linus Torvalds : Fin/Shutdown & copied_seq changes. 144 * Linus Torvalds : Fixed BSD port reuse to work first syn 145 * Alan Cox : Reimplemented timers as per the RFC 146 * and using multiple timers for sanity. 147 * Alan Cox : Small bug fixes, and a lot of new 148 * comments. 149 * Alan Cox : Fixed dual reader crash by locking 150 * the buffers (much like datagram.c) 151 * Alan Cox : Fixed stuck sockets in probe. A probe 152 * now gets fed up of retrying without 153 * (even a no space) answer. 154 * Alan Cox : Extracted closing code better 155 * Alan Cox : Fixed the closing state machine to 156 * resemble the RFC. 157 * Alan Cox : More 'per spec' fixes. 158 * Jorge Cwik : Even faster checksumming. 159 * Alan Cox : tcp_data() doesn't ack illegal PSH 160 * only frames. At least one pc tcp stack 161 * generates them. 162 * Alan Cox : Cache last socket. 163 * Alan Cox : Per route irtt. 164 * Matt Day : poll()->select() match BSD precisely on error 165 * Alan Cox : New buffers 166 * Marc Tamsky : Various sk->prot->retransmits and 167 * sk->retransmits misupdating fixed. 168 * Fixed tcp_write_timeout: stuck close, 169 * and TCP syn retries gets used now. 170 * Mark Yarvis : In tcp_read_wakeup(), don't send an 171 * ack if state is TCP_CLOSED. 172 * Alan Cox : Look up device on a retransmit - routes may 173 * change. Doesn't yet cope with MSS shrink right 174 * but it's a start! 175 * Marc Tamsky : Closing in closing fixes. 176 * Mike Shaver : RFC1122 verifications. 177 * Alan Cox : rcv_saddr errors. 178 * Alan Cox : Block double connect(). 179 * Alan Cox : Small hooks for enSKIP. 180 * Alexey Kuznetsov: Path MTU discovery. 181 * Alan Cox : Support soft errors. 182 * Alan Cox : Fix MTU discovery pathological case 183 * when the remote claims no mtu! 184 * Marc Tamsky : TCP_CLOSE fix. 185 * Colin (G3TNE) : Send a reset on syn ack replies in 186 * window but wrong (fixes NT lpd problems) 187 * Pedro Roque : Better TCP window handling, delayed ack. 188 * Joerg Reuter : No modification of locked buffers in 189 * tcp_do_retransmit() 190 * Eric Schenk : Changed receiver side silly window 191 * avoidance algorithm to BSD style 192 * algorithm. This doubles throughput 193 * against machines running Solaris, 194 * and seems to result in general 195 * improvement. 196 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 197 * Willy Konynenberg : Transparent proxying support. 198 * Mike McLagan : Routing by source 199 * Keith Owens : Do proper merging with partial SKB's in 200 * tcp_do_sendmsg to avoid burstiness. 201 * Eric Schenk : Fix fast close down bug with 202 * shutdown() followed by close(). 203 * Andi Kleen : Make poll agree with SIGIO 204 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 205 * lingertime == 0 (RFC 793 ABORT Call) 206 * Hirokazu Takahashi : Use copy_from_user() instead of 207 * csum_and_copy_from_user() if possible. 208 * 209 * Description of States: 210 * 211 * TCP_SYN_SENT sent a connection request, waiting for ack 212 * 213 * TCP_SYN_RECV received a connection request, sent ack, 214 * waiting for final ack in three-way handshake. 215 * 216 * TCP_ESTABLISHED connection established 217 * 218 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 219 * transmission of remaining buffered data 220 * 221 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 222 * to shutdown 223 * 224 * TCP_CLOSING both sides have shutdown but we still have 225 * data we have to finish sending 226 * 227 * TCP_TIME_WAIT timeout to catch resent junk before entering 228 * closed, can only be entered from FIN_WAIT2 229 * or CLOSING. Required because the other end 230 * may not have gotten our last ACK causing it 231 * to retransmit the data packet (which we ignore) 232 * 233 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 234 * us to finish writing our data and to shutdown 235 * (we have to close() to move on to LAST_ACK) 236 * 237 * TCP_LAST_ACK out side has shutdown after remote has 238 * shutdown. There may still be data in our 239 * buffer that we have to finish sending 240 * 241 * TCP_CLOSE socket is finished 242 */ 243 244 #define pr_fmt(fmt) "TCP: " fmt 245 246 #include <crypto/hash.h> 247 #include <linux/kernel.h> 248 #include <linux/module.h> 249 #include <linux/types.h> 250 #include <linux/fcntl.h> 251 #include <linux/poll.h> 252 #include <linux/inet_diag.h> 253 #include <linux/init.h> 254 #include <linux/fs.h> 255 #include <linux/skbuff.h> 256 #include <linux/scatterlist.h> 257 #include <linux/splice.h> 258 #include <linux/net.h> 259 #include <linux/socket.h> 260 #include <linux/random.h> 261 #include <linux/memblock.h> 262 #include <linux/highmem.h> 263 #include <linux/cache.h> 264 #include <linux/err.h> 265 #include <linux/time.h> 266 #include <linux/slab.h> 267 #include <linux/errqueue.h> 268 #include <linux/static_key.h> 269 #include <linux/btf.h> 270 271 #include <net/icmp.h> 272 #include <net/inet_common.h> 273 #include <net/tcp.h> 274 #include <net/mptcp.h> 275 #include <net/xfrm.h> 276 #include <net/ip.h> 277 #include <net/sock.h> 278 279 #include <linux/uaccess.h> 280 #include <asm/ioctls.h> 281 #include <net/busy_poll.h> 282 283 /* Track pending CMSGs. */ 284 enum { 285 TCP_CMSG_INQ = 1, 286 TCP_CMSG_TS = 2 287 }; 288 289 DEFINE_PER_CPU(unsigned int, tcp_orphan_count); 290 EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); 291 292 long sysctl_tcp_mem[3] __read_mostly; 293 EXPORT_SYMBOL(sysctl_tcp_mem); 294 295 atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ 296 EXPORT_SYMBOL(tcp_memory_allocated); 297 DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 298 EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); 299 300 #if IS_ENABLED(CONFIG_SMC) 301 DEFINE_STATIC_KEY_FALSE(tcp_have_smc); 302 EXPORT_SYMBOL(tcp_have_smc); 303 #endif 304 305 /* 306 * Current number of TCP sockets. 307 */ 308 struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; 309 EXPORT_SYMBOL(tcp_sockets_allocated); 310 311 /* 312 * TCP splice context 313 */ 314 struct tcp_splice_state { 315 struct pipe_inode_info *pipe; 316 size_t len; 317 unsigned int flags; 318 }; 319 320 /* 321 * Pressure flag: try to collapse. 322 * Technical note: it is used by multiple contexts non atomically. 323 * All the __sk_mem_schedule() is of this nature: accounting 324 * is strict, actions are advisory and have some latency. 325 */ 326 unsigned long tcp_memory_pressure __read_mostly; 327 EXPORT_SYMBOL_GPL(tcp_memory_pressure); 328 329 void tcp_enter_memory_pressure(struct sock *sk) 330 { 331 unsigned long val; 332 333 if (READ_ONCE(tcp_memory_pressure)) 334 return; 335 val = jiffies; 336 337 if (!val) 338 val--; 339 if (!cmpxchg(&tcp_memory_pressure, 0, val)) 340 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 341 } 342 EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure); 343 344 void tcp_leave_memory_pressure(struct sock *sk) 345 { 346 unsigned long val; 347 348 if (!READ_ONCE(tcp_memory_pressure)) 349 return; 350 val = xchg(&tcp_memory_pressure, 0); 351 if (val) 352 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, 353 jiffies_to_msecs(jiffies - val)); 354 } 355 EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure); 356 357 /* Convert seconds to retransmits based on initial and max timeout */ 358 static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 359 { 360 u8 res = 0; 361 362 if (seconds > 0) { 363 int period = timeout; 364 365 res = 1; 366 while (seconds > period && res < 255) { 367 res++; 368 timeout <<= 1; 369 if (timeout > rto_max) 370 timeout = rto_max; 371 period += timeout; 372 } 373 } 374 return res; 375 } 376 377 /* Convert retransmits to seconds based on initial and max timeout */ 378 static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 379 { 380 int period = 0; 381 382 if (retrans > 0) { 383 period = timeout; 384 while (--retrans) { 385 timeout <<= 1; 386 if (timeout > rto_max) 387 timeout = rto_max; 388 period += timeout; 389 } 390 } 391 return period; 392 } 393 394 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) 395 { 396 u32 rate = READ_ONCE(tp->rate_delivered); 397 u32 intv = READ_ONCE(tp->rate_interval_us); 398 u64 rate64 = 0; 399 400 if (rate && intv) { 401 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; 402 do_div(rate64, intv); 403 } 404 return rate64; 405 } 406 407 /* Address-family independent initialization for a tcp_sock. 408 * 409 * NOTE: A lot of things set to zero explicitly by call to 410 * sk_alloc() so need not be done here. 411 */ 412 void tcp_init_sock(struct sock *sk) 413 { 414 struct inet_connection_sock *icsk = inet_csk(sk); 415 struct tcp_sock *tp = tcp_sk(sk); 416 417 tp->out_of_order_queue = RB_ROOT; 418 sk->tcp_rtx_queue = RB_ROOT; 419 tcp_init_xmit_timers(sk); 420 INIT_LIST_HEAD(&tp->tsq_node); 421 INIT_LIST_HEAD(&tp->tsorted_sent_queue); 422 423 icsk->icsk_rto = TCP_TIMEOUT_INIT; 424 icsk->icsk_rto_min = TCP_RTO_MIN; 425 icsk->icsk_delack_max = TCP_DELACK_MAX; 426 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 427 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); 428 429 /* So many TCP implementations out there (incorrectly) count the 430 * initial SYN frame in their delayed-ACK and congestion control 431 * algorithms that we must have the following bandaid to talk 432 * efficiently to them. -DaveM 433 */ 434 tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 435 436 /* There's a bubble in the pipe until at least the first ACK. */ 437 tp->app_limited = ~0U; 438 tp->rate_app_limited = 1; 439 440 /* See draft-stevens-tcpca-spec-01 for discussion of the 441 * initialization of these values. 442 */ 443 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 444 tp->snd_cwnd_clamp = ~0; 445 tp->mss_cache = TCP_MSS_DEFAULT; 446 447 tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); 448 tcp_assign_congestion_control(sk); 449 450 tp->tsoffset = 0; 451 tp->rack.reo_wnd_steps = 1; 452 453 sk->sk_write_space = sk_stream_write_space; 454 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 455 456 icsk->icsk_sync_mss = tcp_sync_mss; 457 458 WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); 459 WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); 460 461 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 462 sk_sockets_allocated_inc(sk); 463 } 464 EXPORT_SYMBOL(tcp_init_sock); 465 466 static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) 467 { 468 struct sk_buff *skb = tcp_write_queue_tail(sk); 469 470 if (tsflags && skb) { 471 struct skb_shared_info *shinfo = skb_shinfo(skb); 472 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 473 474 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); 475 if (tsflags & SOF_TIMESTAMPING_TX_ACK) 476 tcb->txstamp_ack = 1; 477 if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 478 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; 479 } 480 } 481 482 static bool tcp_stream_is_readable(struct sock *sk, int target) 483 { 484 if (tcp_epollin_ready(sk, target)) 485 return true; 486 return sk_is_readable(sk); 487 } 488 489 /* 490 * Wait for a TCP event. 491 * 492 * Note that we don't need to lock the socket, as the upper poll layers 493 * take care of normal races (between the test and the event) and we don't 494 * go look at any of the socket buffers directly. 495 */ 496 __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 497 { 498 __poll_t mask; 499 struct sock *sk = sock->sk; 500 const struct tcp_sock *tp = tcp_sk(sk); 501 u8 shutdown; 502 int state; 503 504 sock_poll_wait(file, sock, wait); 505 506 state = inet_sk_state_load(sk); 507 if (state == TCP_LISTEN) 508 return inet_csk_listen_poll(sk); 509 510 /* Socket is not locked. We are protected from async events 511 * by poll logic and correct handling of state changes 512 * made by other threads is impossible in any case. 513 */ 514 515 mask = 0; 516 517 /* 518 * EPOLLHUP is certainly not done right. But poll() doesn't 519 * have a notion of HUP in just one direction, and for a 520 * socket the read side is more interesting. 521 * 522 * Some poll() documentation says that EPOLLHUP is incompatible 523 * with the EPOLLOUT/POLLWR flags, so somebody should check this 524 * all. But careful, it tends to be safer to return too many 525 * bits than too few, and you can easily break real applications 526 * if you don't tell them that something has hung up! 527 * 528 * Check-me. 529 * 530 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and 531 * our fs/select.c). It means that after we received EOF, 532 * poll always returns immediately, making impossible poll() on write() 533 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP 534 * if and only if shutdown has been made in both directions. 535 * Actually, it is interesting to look how Solaris and DUX 536 * solve this dilemma. I would prefer, if EPOLLHUP were maskable, 537 * then we could set it on SND_SHUTDOWN. BTW examples given 538 * in Stevens' books assume exactly this behaviour, it explains 539 * why EPOLLHUP is incompatible with EPOLLOUT. --ANK 540 * 541 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 542 * blocking on fresh not-connected or disconnected socket. --ANK 543 */ 544 shutdown = READ_ONCE(sk->sk_shutdown); 545 if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 546 mask |= EPOLLHUP; 547 if (shutdown & RCV_SHUTDOWN) 548 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 549 550 /* Connected or passive Fast Open socket? */ 551 if (state != TCP_SYN_SENT && 552 (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { 553 int target = sock_rcvlowat(sk, 0, INT_MAX); 554 u16 urg_data = READ_ONCE(tp->urg_data); 555 556 if (unlikely(urg_data) && 557 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && 558 !sock_flag(sk, SOCK_URGINLINE)) 559 target++; 560 561 if (tcp_stream_is_readable(sk, target)) 562 mask |= EPOLLIN | EPOLLRDNORM; 563 564 if (!(shutdown & SEND_SHUTDOWN)) { 565 if (__sk_stream_is_writeable(sk, 1)) { 566 mask |= EPOLLOUT | EPOLLWRNORM; 567 } else { /* send SIGIO later */ 568 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 569 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 570 571 /* Race breaker. If space is freed after 572 * wspace test but before the flags are set, 573 * IO signal will be lost. Memory barrier 574 * pairs with the input side. 575 */ 576 smp_mb__after_atomic(); 577 if (__sk_stream_is_writeable(sk, 1)) 578 mask |= EPOLLOUT | EPOLLWRNORM; 579 } 580 } else 581 mask |= EPOLLOUT | EPOLLWRNORM; 582 583 if (urg_data & TCP_URG_VALID) 584 mask |= EPOLLPRI; 585 } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { 586 /* Active TCP fastopen socket with defer_connect 587 * Return EPOLLOUT so application can call write() 588 * in order for kernel to generate SYN+data 589 */ 590 mask |= EPOLLOUT | EPOLLWRNORM; 591 } 592 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 593 smp_rmb(); 594 if (READ_ONCE(sk->sk_err) || 595 !skb_queue_empty_lockless(&sk->sk_error_queue)) 596 mask |= EPOLLERR; 597 598 return mask; 599 } 600 EXPORT_SYMBOL(tcp_poll); 601 602 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) 603 { 604 struct tcp_sock *tp = tcp_sk(sk); 605 int answ; 606 bool slow; 607 608 switch (cmd) { 609 case SIOCINQ: 610 if (sk->sk_state == TCP_LISTEN) 611 return -EINVAL; 612 613 slow = lock_sock_fast(sk); 614 answ = tcp_inq(sk); 615 unlock_sock_fast(sk, slow); 616 break; 617 case SIOCATMARK: 618 answ = READ_ONCE(tp->urg_data) && 619 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); 620 break; 621 case SIOCOUTQ: 622 if (sk->sk_state == TCP_LISTEN) 623 return -EINVAL; 624 625 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 626 answ = 0; 627 else 628 answ = READ_ONCE(tp->write_seq) - tp->snd_una; 629 break; 630 case SIOCOUTQNSD: 631 if (sk->sk_state == TCP_LISTEN) 632 return -EINVAL; 633 634 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 635 answ = 0; 636 else 637 answ = READ_ONCE(tp->write_seq) - 638 READ_ONCE(tp->snd_nxt); 639 break; 640 default: 641 return -ENOIOCTLCMD; 642 } 643 644 return put_user(answ, (int __user *)arg); 645 } 646 EXPORT_SYMBOL(tcp_ioctl); 647 648 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 649 { 650 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 651 tp->pushed_seq = tp->write_seq; 652 } 653 654 static inline bool forced_push(const struct tcp_sock *tp) 655 { 656 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 657 } 658 659 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) 660 { 661 struct tcp_sock *tp = tcp_sk(sk); 662 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 663 664 tcb->seq = tcb->end_seq = tp->write_seq; 665 tcb->tcp_flags = TCPHDR_ACK; 666 __skb_header_release(skb); 667 tcp_add_write_queue_tail(sk, skb); 668 sk_wmem_queued_add(sk, skb->truesize); 669 sk_mem_charge(sk, skb->truesize); 670 if (tp->nonagle & TCP_NAGLE_PUSH) 671 tp->nonagle &= ~TCP_NAGLE_PUSH; 672 673 tcp_slow_start_after_idle_check(sk); 674 } 675 676 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 677 { 678 if (flags & MSG_OOB) 679 tp->snd_up = tp->write_seq; 680 } 681 682 /* If a not yet filled skb is pushed, do not send it if 683 * we have data packets in Qdisc or NIC queues : 684 * Because TX completion will happen shortly, it gives a chance 685 * to coalesce future sendmsg() payload into this skb, without 686 * need for a timer, and with no latency trade off. 687 * As packets containing data payload have a bigger truesize 688 * than pure acks (dataless) packets, the last checks prevent 689 * autocorking if we only have an ACK in Qdisc/NIC queues, 690 * or if TX completion was delayed after we processed ACK packet. 691 */ 692 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, 693 int size_goal) 694 { 695 return skb->len < size_goal && 696 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && 697 !tcp_rtx_queue_empty(sk) && 698 refcount_read(&sk->sk_wmem_alloc) > skb->truesize && 699 tcp_skb_can_collapse_to(skb); 700 } 701 702 void tcp_push(struct sock *sk, int flags, int mss_now, 703 int nonagle, int size_goal) 704 { 705 struct tcp_sock *tp = tcp_sk(sk); 706 struct sk_buff *skb; 707 708 skb = tcp_write_queue_tail(sk); 709 if (!skb) 710 return; 711 if (!(flags & MSG_MORE) || forced_push(tp)) 712 tcp_mark_push(tp, skb); 713 714 tcp_mark_urg(tp, flags); 715 716 if (tcp_should_autocork(sk, skb, size_goal)) { 717 718 /* avoid atomic op if TSQ_THROTTLED bit is already set */ 719 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 720 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 721 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 722 } 723 /* It is possible TX completion already happened 724 * before we set TSQ_THROTTLED. 725 */ 726 if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) 727 return; 728 } 729 730 if (flags & MSG_MORE) 731 nonagle = TCP_NAGLE_CORK; 732 733 __tcp_push_pending_frames(sk, mss_now, nonagle); 734 } 735 736 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 737 unsigned int offset, size_t len) 738 { 739 struct tcp_splice_state *tss = rd_desc->arg.data; 740 int ret; 741 742 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, 743 min(rd_desc->count, len), tss->flags); 744 if (ret > 0) 745 rd_desc->count -= ret; 746 return ret; 747 } 748 749 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 750 { 751 /* Store TCP splice context information in read_descriptor_t. */ 752 read_descriptor_t rd_desc = { 753 .arg.data = tss, 754 .count = tss->len, 755 }; 756 757 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 758 } 759 760 /** 761 * tcp_splice_read - splice data from TCP socket to a pipe 762 * @sock: socket to splice from 763 * @ppos: position (not valid) 764 * @pipe: pipe to splice to 765 * @len: number of bytes to splice 766 * @flags: splice modifier flags 767 * 768 * Description: 769 * Will read pages from given socket and fill them into a pipe. 770 * 771 **/ 772 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 773 struct pipe_inode_info *pipe, size_t len, 774 unsigned int flags) 775 { 776 struct sock *sk = sock->sk; 777 struct tcp_splice_state tss = { 778 .pipe = pipe, 779 .len = len, 780 .flags = flags, 781 }; 782 long timeo; 783 ssize_t spliced; 784 int ret; 785 786 sock_rps_record_flow(sk); 787 /* 788 * We can't seek on a socket input 789 */ 790 if (unlikely(*ppos)) 791 return -ESPIPE; 792 793 ret = spliced = 0; 794 795 lock_sock(sk); 796 797 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 798 while (tss.len) { 799 ret = __tcp_splice_read(sk, &tss); 800 if (ret < 0) 801 break; 802 else if (!ret) { 803 if (spliced) 804 break; 805 if (sock_flag(sk, SOCK_DONE)) 806 break; 807 if (sk->sk_err) { 808 ret = sock_error(sk); 809 break; 810 } 811 if (sk->sk_shutdown & RCV_SHUTDOWN) 812 break; 813 if (sk->sk_state == TCP_CLOSE) { 814 /* 815 * This occurs when user tries to read 816 * from never connected socket. 817 */ 818 ret = -ENOTCONN; 819 break; 820 } 821 if (!timeo) { 822 ret = -EAGAIN; 823 break; 824 } 825 /* if __tcp_splice_read() got nothing while we have 826 * an skb in receive queue, we do not want to loop. 827 * This might happen with URG data. 828 */ 829 if (!skb_queue_empty(&sk->sk_receive_queue)) 830 break; 831 sk_wait_data(sk, &timeo, NULL); 832 if (signal_pending(current)) { 833 ret = sock_intr_errno(timeo); 834 break; 835 } 836 continue; 837 } 838 tss.len -= ret; 839 spliced += ret; 840 841 if (!timeo) 842 break; 843 release_sock(sk); 844 lock_sock(sk); 845 846 if (sk->sk_err || sk->sk_state == TCP_CLOSE || 847 (sk->sk_shutdown & RCV_SHUTDOWN) || 848 signal_pending(current)) 849 break; 850 } 851 852 release_sock(sk); 853 854 if (spliced) 855 return spliced; 856 857 return ret; 858 } 859 EXPORT_SYMBOL(tcp_splice_read); 860 861 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, 862 bool force_schedule) 863 { 864 struct sk_buff *skb; 865 866 skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp); 867 if (likely(skb)) { 868 bool mem_scheduled; 869 870 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 871 if (force_schedule) { 872 mem_scheduled = true; 873 sk_forced_mem_schedule(sk, skb->truesize); 874 } else { 875 mem_scheduled = sk_wmem_schedule(sk, skb->truesize); 876 } 877 if (likely(mem_scheduled)) { 878 skb_reserve(skb, MAX_TCP_HEADER); 879 skb->ip_summed = CHECKSUM_PARTIAL; 880 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 881 return skb; 882 } 883 __kfree_skb(skb); 884 } else { 885 sk->sk_prot->enter_memory_pressure(sk); 886 sk_stream_moderate_sndbuf(sk); 887 } 888 return NULL; 889 } 890 891 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 892 int large_allowed) 893 { 894 struct tcp_sock *tp = tcp_sk(sk); 895 u32 new_size_goal, size_goal; 896 897 if (!large_allowed) 898 return mss_now; 899 900 /* Note : tcp_tso_autosize() will eventually split this later */ 901 new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size); 902 903 /* We try hard to avoid divides here */ 904 size_goal = tp->gso_segs * mss_now; 905 if (unlikely(new_size_goal < size_goal || 906 new_size_goal >= size_goal + mss_now)) { 907 tp->gso_segs = min_t(u16, new_size_goal / mss_now, 908 sk->sk_gso_max_segs); 909 size_goal = tp->gso_segs * mss_now; 910 } 911 912 return max(size_goal, mss_now); 913 } 914 915 int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 916 { 917 int mss_now; 918 919 mss_now = tcp_current_mss(sk); 920 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 921 922 return mss_now; 923 } 924 925 /* In some cases, both sendpage() and sendmsg() could have added 926 * an skb to the write queue, but failed adding payload on it. 927 * We need to remove it to consume less memory, but more 928 * importantly be able to generate EPOLLOUT for Edge Trigger epoll() 929 * users. 930 */ 931 void tcp_remove_empty_skb(struct sock *sk) 932 { 933 struct sk_buff *skb = tcp_write_queue_tail(sk); 934 935 if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 936 tcp_unlink_write_queue(skb, sk); 937 if (tcp_write_queue_empty(sk)) 938 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 939 tcp_wmem_free_skb(sk, skb); 940 } 941 } 942 943 /* skb changing from pure zc to mixed, must charge zc */ 944 static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) 945 { 946 if (unlikely(skb_zcopy_pure(skb))) { 947 u32 extra = skb->truesize - 948 SKB_TRUESIZE(skb_end_offset(skb)); 949 950 if (!sk_wmem_schedule(sk, extra)) 951 return -ENOMEM; 952 953 sk_mem_charge(sk, extra); 954 skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; 955 } 956 return 0; 957 } 958 959 960 static int tcp_wmem_schedule(struct sock *sk, int copy) 961 { 962 int left; 963 964 if (likely(sk_wmem_schedule(sk, copy))) 965 return copy; 966 967 /* We could be in trouble if we have nothing queued. 968 * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] 969 * to guarantee some progress. 970 */ 971 left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued; 972 if (left > 0) 973 sk_forced_mem_schedule(sk, min(left, copy)); 974 return min(copy, sk->sk_forward_alloc); 975 } 976 977 static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, 978 struct page *page, int offset, size_t *size) 979 { 980 struct sk_buff *skb = tcp_write_queue_tail(sk); 981 struct tcp_sock *tp = tcp_sk(sk); 982 bool can_coalesce; 983 int copy, i; 984 985 if (!skb || (copy = size_goal - skb->len) <= 0 || 986 !tcp_skb_can_collapse_to(skb)) { 987 new_segment: 988 if (!sk_stream_memory_free(sk)) 989 return NULL; 990 991 skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, 992 tcp_rtx_and_write_queues_empty(sk)); 993 if (!skb) 994 return NULL; 995 996 #ifdef CONFIG_TLS_DEVICE 997 skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); 998 #endif 999 tcp_skb_entail(sk, skb); 1000 copy = size_goal; 1001 } 1002 1003 if (copy > *size) 1004 copy = *size; 1005 1006 i = skb_shinfo(skb)->nr_frags; 1007 can_coalesce = skb_can_coalesce(skb, i, page, offset); 1008 if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) { 1009 tcp_mark_push(tp, skb); 1010 goto new_segment; 1011 } 1012 if (tcp_downgrade_zcopy_pure(sk, skb)) 1013 return NULL; 1014 1015 copy = tcp_wmem_schedule(sk, copy); 1016 if (!copy) 1017 return NULL; 1018 1019 if (can_coalesce) { 1020 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1021 } else { 1022 get_page(page); 1023 skb_fill_page_desc_noacc(skb, i, page, offset, copy); 1024 } 1025 1026 if (!(flags & MSG_NO_SHARED_FRAGS)) 1027 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; 1028 1029 skb->len += copy; 1030 skb->data_len += copy; 1031 skb->truesize += copy; 1032 sk_wmem_queued_add(sk, copy); 1033 sk_mem_charge(sk, copy); 1034 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 1035 TCP_SKB_CB(skb)->end_seq += copy; 1036 tcp_skb_pcount_set(skb, 0); 1037 1038 *size = copy; 1039 return skb; 1040 } 1041 1042 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 1043 size_t size, int flags) 1044 { 1045 struct tcp_sock *tp = tcp_sk(sk); 1046 int mss_now, size_goal; 1047 int err; 1048 ssize_t copied; 1049 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1050 1051 if (IS_ENABLED(CONFIG_DEBUG_VM) && 1052 WARN_ONCE(!sendpage_ok(page), 1053 "page must not be a Slab one and have page_count > 0")) 1054 return -EINVAL; 1055 1056 /* Wait for a connection to finish. One exception is TCP Fast Open 1057 * (passive side) where data is allowed to be sent before a connection 1058 * is fully established. 1059 */ 1060 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 1061 !tcp_passive_fastopen(sk)) { 1062 err = sk_stream_wait_connect(sk, &timeo); 1063 if (err != 0) 1064 goto out_err; 1065 } 1066 1067 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1068 1069 mss_now = tcp_send_mss(sk, &size_goal, flags); 1070 copied = 0; 1071 1072 err = -EPIPE; 1073 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1074 goto out_err; 1075 1076 while (size > 0) { 1077 struct sk_buff *skb; 1078 size_t copy = size; 1079 1080 skb = tcp_build_frag(sk, size_goal, flags, page, offset, ©); 1081 if (!skb) 1082 goto wait_for_space; 1083 1084 if (!copied) 1085 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1086 1087 copied += copy; 1088 offset += copy; 1089 size -= copy; 1090 if (!size) 1091 goto out; 1092 1093 if (skb->len < size_goal || (flags & MSG_OOB)) 1094 continue; 1095 1096 if (forced_push(tp)) { 1097 tcp_mark_push(tp, skb); 1098 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1099 } else if (skb == tcp_send_head(sk)) 1100 tcp_push_one(sk, mss_now); 1101 continue; 1102 1103 wait_for_space: 1104 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1105 tcp_push(sk, flags & ~MSG_MORE, mss_now, 1106 TCP_NAGLE_PUSH, size_goal); 1107 1108 err = sk_stream_wait_memory(sk, &timeo); 1109 if (err != 0) 1110 goto do_error; 1111 1112 mss_now = tcp_send_mss(sk, &size_goal, flags); 1113 } 1114 1115 out: 1116 if (copied) { 1117 tcp_tx_timestamp(sk, sk->sk_tsflags); 1118 if (!(flags & MSG_SENDPAGE_NOTLAST)) 1119 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1120 } 1121 return copied; 1122 1123 do_error: 1124 tcp_remove_empty_skb(sk); 1125 if (copied) 1126 goto out; 1127 out_err: 1128 /* make sure we wake any epoll edge trigger waiter */ 1129 if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1130 sk->sk_write_space(sk); 1131 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1132 } 1133 return sk_stream_error(sk, flags, err); 1134 } 1135 EXPORT_SYMBOL_GPL(do_tcp_sendpages); 1136 1137 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, 1138 size_t size, int flags) 1139 { 1140 if (!(sk->sk_route_caps & NETIF_F_SG)) 1141 return sock_no_sendpage_locked(sk, page, offset, size, flags); 1142 1143 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1144 1145 return do_tcp_sendpages(sk, page, offset, size, flags); 1146 } 1147 EXPORT_SYMBOL_GPL(tcp_sendpage_locked); 1148 1149 int tcp_sendpage(struct sock *sk, struct page *page, int offset, 1150 size_t size, int flags) 1151 { 1152 int ret; 1153 1154 lock_sock(sk); 1155 ret = tcp_sendpage_locked(sk, page, offset, size, flags); 1156 release_sock(sk); 1157 1158 return ret; 1159 } 1160 EXPORT_SYMBOL(tcp_sendpage); 1161 1162 void tcp_free_fastopen_req(struct tcp_sock *tp) 1163 { 1164 if (tp->fastopen_req) { 1165 kfree(tp->fastopen_req); 1166 tp->fastopen_req = NULL; 1167 } 1168 } 1169 1170 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, 1171 size_t size, struct ubuf_info *uarg) 1172 { 1173 struct tcp_sock *tp = tcp_sk(sk); 1174 struct inet_sock *inet = inet_sk(sk); 1175 struct sockaddr *uaddr = msg->msg_name; 1176 int err, flags; 1177 1178 if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & 1179 TFO_CLIENT_ENABLE) || 1180 (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && 1181 uaddr->sa_family == AF_UNSPEC)) 1182 return -EOPNOTSUPP; 1183 if (tp->fastopen_req) 1184 return -EALREADY; /* Another Fast Open is in progress */ 1185 1186 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1187 sk->sk_allocation); 1188 if (unlikely(!tp->fastopen_req)) 1189 return -ENOBUFS; 1190 tp->fastopen_req->data = msg; 1191 tp->fastopen_req->size = size; 1192 tp->fastopen_req->uarg = uarg; 1193 1194 if (inet->defer_connect) { 1195 err = tcp_connect(sk); 1196 /* Same failure procedure as in tcp_v4/6_connect */ 1197 if (err) { 1198 tcp_set_state(sk, TCP_CLOSE); 1199 inet->inet_dport = 0; 1200 sk->sk_route_caps = 0; 1201 } 1202 } 1203 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1204 err = __inet_stream_connect(sk->sk_socket, uaddr, 1205 msg->msg_namelen, flags, 1); 1206 /* fastopen_req could already be freed in __inet_stream_connect 1207 * if the connection times out or gets rst 1208 */ 1209 if (tp->fastopen_req) { 1210 *copied = tp->fastopen_req->copied; 1211 tcp_free_fastopen_req(tp); 1212 inet->defer_connect = 0; 1213 } 1214 return err; 1215 } 1216 1217 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) 1218 { 1219 struct tcp_sock *tp = tcp_sk(sk); 1220 struct ubuf_info *uarg = NULL; 1221 struct sk_buff *skb; 1222 struct sockcm_cookie sockc; 1223 int flags, err, copied = 0; 1224 int mss_now = 0, size_goal, copied_syn = 0; 1225 int process_backlog = 0; 1226 bool zc = false; 1227 long timeo; 1228 1229 flags = msg->msg_flags; 1230 1231 if ((flags & MSG_ZEROCOPY) && size) { 1232 skb = tcp_write_queue_tail(sk); 1233 1234 if (msg->msg_ubuf) { 1235 uarg = msg->msg_ubuf; 1236 net_zcopy_get(uarg); 1237 zc = sk->sk_route_caps & NETIF_F_SG; 1238 } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 1239 uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb)); 1240 if (!uarg) { 1241 err = -ENOBUFS; 1242 goto out_err; 1243 } 1244 zc = sk->sk_route_caps & NETIF_F_SG; 1245 if (!zc) 1246 uarg_to_msgzc(uarg)->zerocopy = 0; 1247 } 1248 } 1249 1250 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && 1251 !tp->repair) { 1252 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); 1253 if (err == -EINPROGRESS && copied_syn > 0) 1254 goto out; 1255 else if (err) 1256 goto out_err; 1257 } 1258 1259 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1260 1261 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1262 1263 /* Wait for a connection to finish. One exception is TCP Fast Open 1264 * (passive side) where data is allowed to be sent before a connection 1265 * is fully established. 1266 */ 1267 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 1268 !tcp_passive_fastopen(sk)) { 1269 err = sk_stream_wait_connect(sk, &timeo); 1270 if (err != 0) 1271 goto do_error; 1272 } 1273 1274 if (unlikely(tp->repair)) { 1275 if (tp->repair_queue == TCP_RECV_QUEUE) { 1276 copied = tcp_send_rcvq(sk, msg, size); 1277 goto out_nopush; 1278 } 1279 1280 err = -EINVAL; 1281 if (tp->repair_queue == TCP_NO_QUEUE) 1282 goto out_err; 1283 1284 /* 'common' sending to sendq */ 1285 } 1286 1287 sockcm_init(&sockc, sk); 1288 if (msg->msg_controllen) { 1289 err = sock_cmsg_send(sk, msg, &sockc); 1290 if (unlikely(err)) { 1291 err = -EINVAL; 1292 goto out_err; 1293 } 1294 } 1295 1296 /* This should be in poll */ 1297 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1298 1299 /* Ok commence sending. */ 1300 copied = 0; 1301 1302 restart: 1303 mss_now = tcp_send_mss(sk, &size_goal, flags); 1304 1305 err = -EPIPE; 1306 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1307 goto do_error; 1308 1309 while (msg_data_left(msg)) { 1310 int copy = 0; 1311 1312 skb = tcp_write_queue_tail(sk); 1313 if (skb) 1314 copy = size_goal - skb->len; 1315 1316 if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { 1317 bool first_skb; 1318 1319 new_segment: 1320 if (!sk_stream_memory_free(sk)) 1321 goto wait_for_space; 1322 1323 if (unlikely(process_backlog >= 16)) { 1324 process_backlog = 0; 1325 if (sk_flush_backlog(sk)) 1326 goto restart; 1327 } 1328 first_skb = tcp_rtx_and_write_queues_empty(sk); 1329 skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, 1330 first_skb); 1331 if (!skb) 1332 goto wait_for_space; 1333 1334 process_backlog++; 1335 1336 tcp_skb_entail(sk, skb); 1337 copy = size_goal; 1338 1339 /* All packets are restored as if they have 1340 * already been sent. skb_mstamp_ns isn't set to 1341 * avoid wrong rtt estimation. 1342 */ 1343 if (tp->repair) 1344 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; 1345 } 1346 1347 /* Try to append data to the end of skb. */ 1348 if (copy > msg_data_left(msg)) 1349 copy = msg_data_left(msg); 1350 1351 if (!zc) { 1352 bool merge = true; 1353 int i = skb_shinfo(skb)->nr_frags; 1354 struct page_frag *pfrag = sk_page_frag(sk); 1355 1356 if (!sk_page_frag_refill(sk, pfrag)) 1357 goto wait_for_space; 1358 1359 if (!skb_can_coalesce(skb, i, pfrag->page, 1360 pfrag->offset)) { 1361 if (i >= READ_ONCE(sysctl_max_skb_frags)) { 1362 tcp_mark_push(tp, skb); 1363 goto new_segment; 1364 } 1365 merge = false; 1366 } 1367 1368 copy = min_t(int, copy, pfrag->size - pfrag->offset); 1369 1370 if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) { 1371 if (tcp_downgrade_zcopy_pure(sk, skb)) 1372 goto wait_for_space; 1373 skb_zcopy_downgrade_managed(skb); 1374 } 1375 1376 copy = tcp_wmem_schedule(sk, copy); 1377 if (!copy) 1378 goto wait_for_space; 1379 1380 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, 1381 pfrag->page, 1382 pfrag->offset, 1383 copy); 1384 if (err) 1385 goto do_error; 1386 1387 /* Update the skb. */ 1388 if (merge) { 1389 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1390 } else { 1391 skb_fill_page_desc(skb, i, pfrag->page, 1392 pfrag->offset, copy); 1393 page_ref_inc(pfrag->page); 1394 } 1395 pfrag->offset += copy; 1396 } else { 1397 /* First append to a fragless skb builds initial 1398 * pure zerocopy skb 1399 */ 1400 if (!skb->len) 1401 skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; 1402 1403 if (!skb_zcopy_pure(skb)) { 1404 copy = tcp_wmem_schedule(sk, copy); 1405 if (!copy) 1406 goto wait_for_space; 1407 } 1408 1409 err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); 1410 if (err == -EMSGSIZE || err == -EEXIST) { 1411 tcp_mark_push(tp, skb); 1412 goto new_segment; 1413 } 1414 if (err < 0) 1415 goto do_error; 1416 copy = err; 1417 } 1418 1419 if (!copied) 1420 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1421 1422 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 1423 TCP_SKB_CB(skb)->end_seq += copy; 1424 tcp_skb_pcount_set(skb, 0); 1425 1426 copied += copy; 1427 if (!msg_data_left(msg)) { 1428 if (unlikely(flags & MSG_EOR)) 1429 TCP_SKB_CB(skb)->eor = 1; 1430 goto out; 1431 } 1432 1433 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) 1434 continue; 1435 1436 if (forced_push(tp)) { 1437 tcp_mark_push(tp, skb); 1438 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1439 } else if (skb == tcp_send_head(sk)) 1440 tcp_push_one(sk, mss_now); 1441 continue; 1442 1443 wait_for_space: 1444 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1445 if (copied) 1446 tcp_push(sk, flags & ~MSG_MORE, mss_now, 1447 TCP_NAGLE_PUSH, size_goal); 1448 1449 err = sk_stream_wait_memory(sk, &timeo); 1450 if (err != 0) 1451 goto do_error; 1452 1453 mss_now = tcp_send_mss(sk, &size_goal, flags); 1454 } 1455 1456 out: 1457 if (copied) { 1458 tcp_tx_timestamp(sk, sockc.tsflags); 1459 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1460 } 1461 out_nopush: 1462 net_zcopy_put(uarg); 1463 return copied + copied_syn; 1464 1465 do_error: 1466 tcp_remove_empty_skb(sk); 1467 1468 if (copied + copied_syn) 1469 goto out; 1470 out_err: 1471 net_zcopy_put_abort(uarg, true); 1472 err = sk_stream_error(sk, flags, err); 1473 /* make sure we wake any epoll edge trigger waiter */ 1474 if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1475 sk->sk_write_space(sk); 1476 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1477 } 1478 return err; 1479 } 1480 EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); 1481 1482 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1483 { 1484 int ret; 1485 1486 lock_sock(sk); 1487 ret = tcp_sendmsg_locked(sk, msg, size); 1488 release_sock(sk); 1489 1490 return ret; 1491 } 1492 EXPORT_SYMBOL(tcp_sendmsg); 1493 1494 /* 1495 * Handle reading urgent data. BSD has very simple semantics for 1496 * this, no blocking and very strange errors 8) 1497 */ 1498 1499 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 1500 { 1501 struct tcp_sock *tp = tcp_sk(sk); 1502 1503 /* No URG data to read. */ 1504 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 1505 tp->urg_data == TCP_URG_READ) 1506 return -EINVAL; /* Yes this is right ! */ 1507 1508 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 1509 return -ENOTCONN; 1510 1511 if (tp->urg_data & TCP_URG_VALID) { 1512 int err = 0; 1513 char c = tp->urg_data; 1514 1515 if (!(flags & MSG_PEEK)) 1516 WRITE_ONCE(tp->urg_data, TCP_URG_READ); 1517 1518 /* Read urgent data. */ 1519 msg->msg_flags |= MSG_OOB; 1520 1521 if (len > 0) { 1522 if (!(flags & MSG_TRUNC)) 1523 err = memcpy_to_msg(msg, &c, 1); 1524 len = 1; 1525 } else 1526 msg->msg_flags |= MSG_TRUNC; 1527 1528 return err ? -EFAULT : len; 1529 } 1530 1531 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 1532 return 0; 1533 1534 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 1535 * the available implementations agree in this case: 1536 * this call should never block, independent of the 1537 * blocking state of the socket. 1538 * Mike <pall@rz.uni-karlsruhe.de> 1539 */ 1540 return -EAGAIN; 1541 } 1542 1543 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1544 { 1545 struct sk_buff *skb; 1546 int copied = 0, err = 0; 1547 1548 /* XXX -- need to support SO_PEEK_OFF */ 1549 1550 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 1551 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1552 if (err) 1553 return err; 1554 copied += skb->len; 1555 } 1556 1557 skb_queue_walk(&sk->sk_write_queue, skb) { 1558 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1559 if (err) 1560 break; 1561 1562 copied += skb->len; 1563 } 1564 1565 return err ?: copied; 1566 } 1567 1568 /* Clean up the receive buffer for full frames taken by the user, 1569 * then send an ACK if necessary. COPIED is the number of bytes 1570 * tcp_recvmsg has given to the user so far, it speeds up the 1571 * calculation of whether or not we must ACK for the sake of 1572 * a window update. 1573 */ 1574 static void __tcp_cleanup_rbuf(struct sock *sk, int copied) 1575 { 1576 struct tcp_sock *tp = tcp_sk(sk); 1577 bool time_to_ack = false; 1578 1579 if (inet_csk_ack_scheduled(sk)) { 1580 const struct inet_connection_sock *icsk = inet_csk(sk); 1581 1582 if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ 1583 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 1584 /* 1585 * If this read emptied read buffer, we send ACK, if 1586 * connection is not bidirectional, user drained 1587 * receive buffer and there was a small segment 1588 * in queue. 1589 */ 1590 (copied > 0 && 1591 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 1592 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1593 !inet_csk_in_pingpong_mode(sk))) && 1594 !atomic_read(&sk->sk_rmem_alloc))) 1595 time_to_ack = true; 1596 } 1597 1598 /* We send an ACK if we can now advertise a non-zero window 1599 * which has been raised "significantly". 1600 * 1601 * Even if window raised up to infinity, do not send window open ACK 1602 * in states, where we will not receive more. It is useless. 1603 */ 1604 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1605 __u32 rcv_window_now = tcp_receive_window(tp); 1606 1607 /* Optimize, __tcp_select_window() is not cheap. */ 1608 if (2*rcv_window_now <= tp->window_clamp) { 1609 __u32 new_window = __tcp_select_window(sk); 1610 1611 /* Send ACK now, if this read freed lots of space 1612 * in our buffer. Certainly, new_window is new window. 1613 * We can advertise it now, if it is not less than current one. 1614 * "Lots" means "at least twice" here. 1615 */ 1616 if (new_window && new_window >= 2 * rcv_window_now) 1617 time_to_ack = true; 1618 } 1619 } 1620 if (time_to_ack) 1621 tcp_send_ack(sk); 1622 } 1623 1624 void tcp_cleanup_rbuf(struct sock *sk, int copied) 1625 { 1626 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1627 struct tcp_sock *tp = tcp_sk(sk); 1628 1629 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1630 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1631 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1632 __tcp_cleanup_rbuf(sk, copied); 1633 } 1634 1635 static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) 1636 { 1637 __skb_unlink(skb, &sk->sk_receive_queue); 1638 if (likely(skb->destructor == sock_rfree)) { 1639 sock_rfree(skb); 1640 skb->destructor = NULL; 1641 skb->sk = NULL; 1642 return skb_attempt_defer_free(skb); 1643 } 1644 __kfree_skb(skb); 1645 } 1646 1647 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1648 { 1649 struct sk_buff *skb; 1650 u32 offset; 1651 1652 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1653 offset = seq - TCP_SKB_CB(skb)->seq; 1654 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 1655 pr_err_once("%s: found a SYN, please report !\n", __func__); 1656 offset--; 1657 } 1658 if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { 1659 *off = offset; 1660 return skb; 1661 } 1662 /* This looks weird, but this can happen if TCP collapsing 1663 * splitted a fat GRO packet, while we released socket lock 1664 * in skb_splice_bits() 1665 */ 1666 tcp_eat_recv_skb(sk, skb); 1667 } 1668 return NULL; 1669 } 1670 EXPORT_SYMBOL(tcp_recv_skb); 1671 1672 /* 1673 * This routine provides an alternative to tcp_recvmsg() for routines 1674 * that would like to handle copying from skbuffs directly in 'sendfile' 1675 * fashion. 1676 * Note: 1677 * - It is assumed that the socket was locked by the caller. 1678 * - The routine does not block. 1679 * - At present, there is no support for reading OOB data 1680 * or for 'peeking' the socket using this routine 1681 * (although both would be easy to implement). 1682 */ 1683 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1684 sk_read_actor_t recv_actor) 1685 { 1686 struct sk_buff *skb; 1687 struct tcp_sock *tp = tcp_sk(sk); 1688 u32 seq = tp->copied_seq; 1689 u32 offset; 1690 int copied = 0; 1691 1692 if (sk->sk_state == TCP_LISTEN) 1693 return -ENOTCONN; 1694 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1695 if (offset < skb->len) { 1696 int used; 1697 size_t len; 1698 1699 len = skb->len - offset; 1700 /* Stop reading if we hit a patch of urgent data */ 1701 if (unlikely(tp->urg_data)) { 1702 u32 urg_offset = tp->urg_seq - seq; 1703 if (urg_offset < len) 1704 len = urg_offset; 1705 if (!len) 1706 break; 1707 } 1708 used = recv_actor(desc, skb, offset, len); 1709 if (used <= 0) { 1710 if (!copied) 1711 copied = used; 1712 break; 1713 } 1714 if (WARN_ON_ONCE(used > len)) 1715 used = len; 1716 seq += used; 1717 copied += used; 1718 offset += used; 1719 1720 /* If recv_actor drops the lock (e.g. TCP splice 1721 * receive) the skb pointer might be invalid when 1722 * getting here: tcp_collapse might have deleted it 1723 * while aggregating skbs from the socket queue. 1724 */ 1725 skb = tcp_recv_skb(sk, seq - 1, &offset); 1726 if (!skb) 1727 break; 1728 /* TCP coalescing might have appended data to the skb. 1729 * Try to splice more frags 1730 */ 1731 if (offset + 1 != skb->len) 1732 continue; 1733 } 1734 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 1735 tcp_eat_recv_skb(sk, skb); 1736 ++seq; 1737 break; 1738 } 1739 tcp_eat_recv_skb(sk, skb); 1740 if (!desc->count) 1741 break; 1742 WRITE_ONCE(tp->copied_seq, seq); 1743 } 1744 WRITE_ONCE(tp->copied_seq, seq); 1745 1746 tcp_rcv_space_adjust(sk); 1747 1748 /* Clean up data we have read: This will do ACK frames. */ 1749 if (copied > 0) { 1750 tcp_recv_skb(sk, seq, &offset); 1751 tcp_cleanup_rbuf(sk, copied); 1752 } 1753 return copied; 1754 } 1755 EXPORT_SYMBOL(tcp_read_sock); 1756 1757 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 1758 { 1759 struct tcp_sock *tp = tcp_sk(sk); 1760 u32 seq = tp->copied_seq; 1761 struct sk_buff *skb; 1762 int copied = 0; 1763 u32 offset; 1764 1765 if (sk->sk_state == TCP_LISTEN) 1766 return -ENOTCONN; 1767 1768 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1769 u8 tcp_flags; 1770 int used; 1771 1772 __skb_unlink(skb, &sk->sk_receive_queue); 1773 WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); 1774 tcp_flags = TCP_SKB_CB(skb)->tcp_flags; 1775 used = recv_actor(sk, skb); 1776 consume_skb(skb); 1777 if (used < 0) { 1778 if (!copied) 1779 copied = used; 1780 break; 1781 } 1782 seq += used; 1783 copied += used; 1784 1785 if (tcp_flags & TCPHDR_FIN) { 1786 ++seq; 1787 break; 1788 } 1789 } 1790 WRITE_ONCE(tp->copied_seq, seq); 1791 1792 tcp_rcv_space_adjust(sk); 1793 1794 /* Clean up data we have read: This will do ACK frames. */ 1795 if (copied > 0) 1796 __tcp_cleanup_rbuf(sk, copied); 1797 1798 return copied; 1799 } 1800 EXPORT_SYMBOL(tcp_read_skb); 1801 1802 void tcp_read_done(struct sock *sk, size_t len) 1803 { 1804 struct tcp_sock *tp = tcp_sk(sk); 1805 u32 seq = tp->copied_seq; 1806 struct sk_buff *skb; 1807 size_t left; 1808 u32 offset; 1809 1810 if (sk->sk_state == TCP_LISTEN) 1811 return; 1812 1813 left = len; 1814 while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1815 int used; 1816 1817 used = min_t(size_t, skb->len - offset, left); 1818 seq += used; 1819 left -= used; 1820 1821 if (skb->len > offset + used) 1822 break; 1823 1824 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 1825 tcp_eat_recv_skb(sk, skb); 1826 ++seq; 1827 break; 1828 } 1829 tcp_eat_recv_skb(sk, skb); 1830 } 1831 WRITE_ONCE(tp->copied_seq, seq); 1832 1833 tcp_rcv_space_adjust(sk); 1834 1835 /* Clean up data we have read: This will do ACK frames. */ 1836 if (left != len) 1837 tcp_cleanup_rbuf(sk, len - left); 1838 } 1839 EXPORT_SYMBOL(tcp_read_done); 1840 1841 int tcp_peek_len(struct socket *sock) 1842 { 1843 return tcp_inq(sock->sk); 1844 } 1845 EXPORT_SYMBOL(tcp_peek_len); 1846 1847 /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ 1848 int tcp_set_rcvlowat(struct sock *sk, int val) 1849 { 1850 int cap; 1851 1852 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1853 cap = sk->sk_rcvbuf >> 1; 1854 else 1855 cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; 1856 val = min(val, cap); 1857 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 1858 1859 /* Check if we need to signal EPOLLIN right now */ 1860 tcp_data_ready(sk); 1861 1862 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1863 return 0; 1864 1865 val <<= 1; 1866 if (val > sk->sk_rcvbuf) { 1867 WRITE_ONCE(sk->sk_rcvbuf, val); 1868 tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val); 1869 } 1870 return 0; 1871 } 1872 EXPORT_SYMBOL(tcp_set_rcvlowat); 1873 1874 void tcp_update_recv_tstamps(struct sk_buff *skb, 1875 struct scm_timestamping_internal *tss) 1876 { 1877 if (skb->tstamp) 1878 tss->ts[0] = ktime_to_timespec64(skb->tstamp); 1879 else 1880 tss->ts[0] = (struct timespec64) {0}; 1881 1882 if (skb_hwtstamps(skb)->hwtstamp) 1883 tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); 1884 else 1885 tss->ts[2] = (struct timespec64) {0}; 1886 } 1887 1888 #ifdef CONFIG_MMU 1889 static const struct vm_operations_struct tcp_vm_ops = { 1890 }; 1891 1892 int tcp_mmap(struct file *file, struct socket *sock, 1893 struct vm_area_struct *vma) 1894 { 1895 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 1896 return -EPERM; 1897 vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC); 1898 1899 /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ 1900 vm_flags_set(vma, VM_MIXEDMAP); 1901 1902 vma->vm_ops = &tcp_vm_ops; 1903 return 0; 1904 } 1905 EXPORT_SYMBOL(tcp_mmap); 1906 1907 static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, 1908 u32 *offset_frag) 1909 { 1910 skb_frag_t *frag; 1911 1912 if (unlikely(offset_skb >= skb->len)) 1913 return NULL; 1914 1915 offset_skb -= skb_headlen(skb); 1916 if ((int)offset_skb < 0 || skb_has_frag_list(skb)) 1917 return NULL; 1918 1919 frag = skb_shinfo(skb)->frags; 1920 while (offset_skb) { 1921 if (skb_frag_size(frag) > offset_skb) { 1922 *offset_frag = offset_skb; 1923 return frag; 1924 } 1925 offset_skb -= skb_frag_size(frag); 1926 ++frag; 1927 } 1928 *offset_frag = 0; 1929 return frag; 1930 } 1931 1932 static bool can_map_frag(const skb_frag_t *frag) 1933 { 1934 return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag); 1935 } 1936 1937 static int find_next_mappable_frag(const skb_frag_t *frag, 1938 int remaining_in_skb) 1939 { 1940 int offset = 0; 1941 1942 if (likely(can_map_frag(frag))) 1943 return 0; 1944 1945 while (offset < remaining_in_skb && !can_map_frag(frag)) { 1946 offset += skb_frag_size(frag); 1947 ++frag; 1948 } 1949 return offset; 1950 } 1951 1952 static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, 1953 struct tcp_zerocopy_receive *zc, 1954 struct sk_buff *skb, u32 offset) 1955 { 1956 u32 frag_offset, partial_frag_remainder = 0; 1957 int mappable_offset; 1958 skb_frag_t *frag; 1959 1960 /* worst case: skip to next skb. try to improve on this case below */ 1961 zc->recv_skip_hint = skb->len - offset; 1962 1963 /* Find the frag containing this offset (and how far into that frag) */ 1964 frag = skb_advance_to_frag(skb, offset, &frag_offset); 1965 if (!frag) 1966 return; 1967 1968 if (frag_offset) { 1969 struct skb_shared_info *info = skb_shinfo(skb); 1970 1971 /* We read part of the last frag, must recvmsg() rest of skb. */ 1972 if (frag == &info->frags[info->nr_frags - 1]) 1973 return; 1974 1975 /* Else, we must at least read the remainder in this frag. */ 1976 partial_frag_remainder = skb_frag_size(frag) - frag_offset; 1977 zc->recv_skip_hint -= partial_frag_remainder; 1978 ++frag; 1979 } 1980 1981 /* partial_frag_remainder: If part way through a frag, must read rest. 1982 * mappable_offset: Bytes till next mappable frag, *not* counting bytes 1983 * in partial_frag_remainder. 1984 */ 1985 mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint); 1986 zc->recv_skip_hint = mappable_offset + partial_frag_remainder; 1987 } 1988 1989 static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 1990 int flags, struct scm_timestamping_internal *tss, 1991 int *cmsg_flags); 1992 static int receive_fallback_to_copy(struct sock *sk, 1993 struct tcp_zerocopy_receive *zc, int inq, 1994 struct scm_timestamping_internal *tss) 1995 { 1996 unsigned long copy_address = (unsigned long)zc->copybuf_address; 1997 struct msghdr msg = {}; 1998 struct iovec iov; 1999 int err; 2000 2001 zc->length = 0; 2002 zc->recv_skip_hint = 0; 2003 2004 if (copy_address != zc->copybuf_address) 2005 return -EINVAL; 2006 2007 err = import_single_range(ITER_DEST, (void __user *)copy_address, 2008 inq, &iov, &msg.msg_iter); 2009 if (err) 2010 return err; 2011 2012 err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT, 2013 tss, &zc->msg_flags); 2014 if (err < 0) 2015 return err; 2016 2017 zc->copybuf_len = err; 2018 if (likely(zc->copybuf_len)) { 2019 struct sk_buff *skb; 2020 u32 offset; 2021 2022 skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); 2023 if (skb) 2024 tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset); 2025 } 2026 return 0; 2027 } 2028 2029 static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, 2030 struct sk_buff *skb, u32 copylen, 2031 u32 *offset, u32 *seq) 2032 { 2033 unsigned long copy_address = (unsigned long)zc->copybuf_address; 2034 struct msghdr msg = {}; 2035 struct iovec iov; 2036 int err; 2037 2038 if (copy_address != zc->copybuf_address) 2039 return -EINVAL; 2040 2041 err = import_single_range(ITER_DEST, (void __user *)copy_address, 2042 copylen, &iov, &msg.msg_iter); 2043 if (err) 2044 return err; 2045 err = skb_copy_datagram_msg(skb, *offset, &msg, copylen); 2046 if (err) 2047 return err; 2048 zc->recv_skip_hint -= copylen; 2049 *offset += copylen; 2050 *seq += copylen; 2051 return (__s32)copylen; 2052 } 2053 2054 static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc, 2055 struct sock *sk, 2056 struct sk_buff *skb, 2057 u32 *seq, 2058 s32 copybuf_len, 2059 struct scm_timestamping_internal *tss) 2060 { 2061 u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); 2062 2063 if (!copylen) 2064 return 0; 2065 /* skb is null if inq < PAGE_SIZE. */ 2066 if (skb) { 2067 offset = *seq - TCP_SKB_CB(skb)->seq; 2068 } else { 2069 skb = tcp_recv_skb(sk, *seq, &offset); 2070 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2071 tcp_update_recv_tstamps(skb, tss); 2072 zc->msg_flags |= TCP_CMSG_TS; 2073 } 2074 } 2075 2076 zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset, 2077 seq); 2078 return zc->copybuf_len < 0 ? 0 : copylen; 2079 } 2080 2081 static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, 2082 struct page **pending_pages, 2083 unsigned long pages_remaining, 2084 unsigned long *address, 2085 u32 *length, 2086 u32 *seq, 2087 struct tcp_zerocopy_receive *zc, 2088 u32 total_bytes_to_map, 2089 int err) 2090 { 2091 /* At least one page did not map. Try zapping if we skipped earlier. */ 2092 if (err == -EBUSY && 2093 zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { 2094 u32 maybe_zap_len; 2095 2096 maybe_zap_len = total_bytes_to_map - /* All bytes to map */ 2097 *length + /* Mapped or pending */ 2098 (pages_remaining * PAGE_SIZE); /* Failed map. */ 2099 zap_page_range_single(vma, *address, maybe_zap_len, NULL); 2100 err = 0; 2101 } 2102 2103 if (!err) { 2104 unsigned long leftover_pages = pages_remaining; 2105 int bytes_mapped; 2106 2107 /* We called zap_page_range_single, try to reinsert. */ 2108 err = vm_insert_pages(vma, *address, 2109 pending_pages, 2110 &pages_remaining); 2111 bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); 2112 *seq += bytes_mapped; 2113 *address += bytes_mapped; 2114 } 2115 if (err) { 2116 /* Either we were unable to zap, OR we zapped, retried an 2117 * insert, and still had an issue. Either ways, pages_remaining 2118 * is the number of pages we were unable to map, and we unroll 2119 * some state we speculatively touched before. 2120 */ 2121 const int bytes_not_mapped = PAGE_SIZE * pages_remaining; 2122 2123 *length -= bytes_not_mapped; 2124 zc->recv_skip_hint += bytes_not_mapped; 2125 } 2126 return err; 2127 } 2128 2129 static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, 2130 struct page **pages, 2131 unsigned int pages_to_map, 2132 unsigned long *address, 2133 u32 *length, 2134 u32 *seq, 2135 struct tcp_zerocopy_receive *zc, 2136 u32 total_bytes_to_map) 2137 { 2138 unsigned long pages_remaining = pages_to_map; 2139 unsigned int pages_mapped; 2140 unsigned int bytes_mapped; 2141 int err; 2142 2143 err = vm_insert_pages(vma, *address, pages, &pages_remaining); 2144 pages_mapped = pages_to_map - (unsigned int)pages_remaining; 2145 bytes_mapped = PAGE_SIZE * pages_mapped; 2146 /* Even if vm_insert_pages fails, it may have partially succeeded in 2147 * mapping (some but not all of the pages). 2148 */ 2149 *seq += bytes_mapped; 2150 *address += bytes_mapped; 2151 2152 if (likely(!err)) 2153 return 0; 2154 2155 /* Error: maybe zap and retry + rollback state for failed inserts. */ 2156 return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, 2157 pages_remaining, address, length, seq, zc, total_bytes_to_map, 2158 err); 2159 } 2160 2161 #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS) 2162 static void tcp_zc_finalize_rx_tstamp(struct sock *sk, 2163 struct tcp_zerocopy_receive *zc, 2164 struct scm_timestamping_internal *tss) 2165 { 2166 unsigned long msg_control_addr; 2167 struct msghdr cmsg_dummy; 2168 2169 msg_control_addr = (unsigned long)zc->msg_control; 2170 cmsg_dummy.msg_control_user = (void __user *)msg_control_addr; 2171 cmsg_dummy.msg_controllen = 2172 (__kernel_size_t)zc->msg_controllen; 2173 cmsg_dummy.msg_flags = in_compat_syscall() 2174 ? MSG_CMSG_COMPAT : 0; 2175 cmsg_dummy.msg_control_is_user = true; 2176 zc->msg_flags = 0; 2177 if (zc->msg_control == msg_control_addr && 2178 zc->msg_controllen == cmsg_dummy.msg_controllen) { 2179 tcp_recv_timestamp(&cmsg_dummy, sk, tss); 2180 zc->msg_control = (__u64) 2181 ((uintptr_t)cmsg_dummy.msg_control_user); 2182 zc->msg_controllen = 2183 (__u64)cmsg_dummy.msg_controllen; 2184 zc->msg_flags = (__u32)cmsg_dummy.msg_flags; 2185 } 2186 } 2187 2188 #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 2189 static int tcp_zerocopy_receive(struct sock *sk, 2190 struct tcp_zerocopy_receive *zc, 2191 struct scm_timestamping_internal *tss) 2192 { 2193 u32 length = 0, offset, vma_len, avail_len, copylen = 0; 2194 unsigned long address = (unsigned long)zc->address; 2195 struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE]; 2196 s32 copybuf_len = zc->copybuf_len; 2197 struct tcp_sock *tp = tcp_sk(sk); 2198 const skb_frag_t *frags = NULL; 2199 unsigned int pages_to_map = 0; 2200 struct vm_area_struct *vma; 2201 struct sk_buff *skb = NULL; 2202 u32 seq = tp->copied_seq; 2203 u32 total_bytes_to_map; 2204 int inq = tcp_inq(sk); 2205 int ret; 2206 2207 zc->copybuf_len = 0; 2208 zc->msg_flags = 0; 2209 2210 if (address & (PAGE_SIZE - 1) || address != zc->address) 2211 return -EINVAL; 2212 2213 if (sk->sk_state == TCP_LISTEN) 2214 return -ENOTCONN; 2215 2216 sock_rps_record_flow(sk); 2217 2218 if (inq && inq <= copybuf_len) 2219 return receive_fallback_to_copy(sk, zc, inq, tss); 2220 2221 if (inq < PAGE_SIZE) { 2222 zc->length = 0; 2223 zc->recv_skip_hint = inq; 2224 if (!inq && sock_flag(sk, SOCK_DONE)) 2225 return -EIO; 2226 return 0; 2227 } 2228 2229 mmap_read_lock(current->mm); 2230 2231 vma = vma_lookup(current->mm, address); 2232 if (!vma || vma->vm_ops != &tcp_vm_ops) { 2233 mmap_read_unlock(current->mm); 2234 return -EINVAL; 2235 } 2236 vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); 2237 avail_len = min_t(u32, vma_len, inq); 2238 total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); 2239 if (total_bytes_to_map) { 2240 if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) 2241 zap_page_range_single(vma, address, total_bytes_to_map, 2242 NULL); 2243 zc->length = total_bytes_to_map; 2244 zc->recv_skip_hint = 0; 2245 } else { 2246 zc->length = avail_len; 2247 zc->recv_skip_hint = avail_len; 2248 } 2249 ret = 0; 2250 while (length + PAGE_SIZE <= zc->length) { 2251 int mappable_offset; 2252 struct page *page; 2253 2254 if (zc->recv_skip_hint < PAGE_SIZE) { 2255 u32 offset_frag; 2256 2257 if (skb) { 2258 if (zc->recv_skip_hint > 0) 2259 break; 2260 skb = skb->next; 2261 offset = seq - TCP_SKB_CB(skb)->seq; 2262 } else { 2263 skb = tcp_recv_skb(sk, seq, &offset); 2264 } 2265 2266 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2267 tcp_update_recv_tstamps(skb, tss); 2268 zc->msg_flags |= TCP_CMSG_TS; 2269 } 2270 zc->recv_skip_hint = skb->len - offset; 2271 frags = skb_advance_to_frag(skb, offset, &offset_frag); 2272 if (!frags || offset_frag) 2273 break; 2274 } 2275 2276 mappable_offset = find_next_mappable_frag(frags, 2277 zc->recv_skip_hint); 2278 if (mappable_offset) { 2279 zc->recv_skip_hint = mappable_offset; 2280 break; 2281 } 2282 page = skb_frag_page(frags); 2283 prefetchw(page); 2284 pages[pages_to_map++] = page; 2285 length += PAGE_SIZE; 2286 zc->recv_skip_hint -= PAGE_SIZE; 2287 frags++; 2288 if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || 2289 zc->recv_skip_hint < PAGE_SIZE) { 2290 /* Either full batch, or we're about to go to next skb 2291 * (and we cannot unroll failed ops across skbs). 2292 */ 2293 ret = tcp_zerocopy_vm_insert_batch(vma, pages, 2294 pages_to_map, 2295 &address, &length, 2296 &seq, zc, 2297 total_bytes_to_map); 2298 if (ret) 2299 goto out; 2300 pages_to_map = 0; 2301 } 2302 } 2303 if (pages_to_map) { 2304 ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, 2305 &address, &length, &seq, 2306 zc, total_bytes_to_map); 2307 } 2308 out: 2309 mmap_read_unlock(current->mm); 2310 /* Try to copy straggler data. */ 2311 if (!ret) 2312 copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); 2313 2314 if (length + copylen) { 2315 WRITE_ONCE(tp->copied_seq, seq); 2316 tcp_rcv_space_adjust(sk); 2317 2318 /* Clean up data we have read: This will do ACK frames. */ 2319 tcp_recv_skb(sk, seq, &offset); 2320 tcp_cleanup_rbuf(sk, length + copylen); 2321 ret = 0; 2322 if (length == zc->length) 2323 zc->recv_skip_hint = 0; 2324 } else { 2325 if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) 2326 ret = -EIO; 2327 } 2328 zc->length = length; 2329 return ret; 2330 } 2331 #endif 2332 2333 /* Similar to __sock_recv_timestamp, but does not require an skb */ 2334 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 2335 struct scm_timestamping_internal *tss) 2336 { 2337 int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); 2338 bool has_timestamping = false; 2339 2340 if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { 2341 if (sock_flag(sk, SOCK_RCVTSTAMP)) { 2342 if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { 2343 if (new_tstamp) { 2344 struct __kernel_timespec kts = { 2345 .tv_sec = tss->ts[0].tv_sec, 2346 .tv_nsec = tss->ts[0].tv_nsec, 2347 }; 2348 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, 2349 sizeof(kts), &kts); 2350 } else { 2351 struct __kernel_old_timespec ts_old = { 2352 .tv_sec = tss->ts[0].tv_sec, 2353 .tv_nsec = tss->ts[0].tv_nsec, 2354 }; 2355 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, 2356 sizeof(ts_old), &ts_old); 2357 } 2358 } else { 2359 if (new_tstamp) { 2360 struct __kernel_sock_timeval stv = { 2361 .tv_sec = tss->ts[0].tv_sec, 2362 .tv_usec = tss->ts[0].tv_nsec / 1000, 2363 }; 2364 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, 2365 sizeof(stv), &stv); 2366 } else { 2367 struct __kernel_old_timeval tv = { 2368 .tv_sec = tss->ts[0].tv_sec, 2369 .tv_usec = tss->ts[0].tv_nsec / 1000, 2370 }; 2371 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, 2372 sizeof(tv), &tv); 2373 } 2374 } 2375 } 2376 2377 if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) 2378 has_timestamping = true; 2379 else 2380 tss->ts[0] = (struct timespec64) {0}; 2381 } 2382 2383 if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { 2384 if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) 2385 has_timestamping = true; 2386 else 2387 tss->ts[2] = (struct timespec64) {0}; 2388 } 2389 2390 if (has_timestamping) { 2391 tss->ts[1] = (struct timespec64) {0}; 2392 if (sock_flag(sk, SOCK_TSTAMP_NEW)) 2393 put_cmsg_scm_timestamping64(msg, tss); 2394 else 2395 put_cmsg_scm_timestamping(msg, tss); 2396 } 2397 } 2398 2399 static int tcp_inq_hint(struct sock *sk) 2400 { 2401 const struct tcp_sock *tp = tcp_sk(sk); 2402 u32 copied_seq = READ_ONCE(tp->copied_seq); 2403 u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); 2404 int inq; 2405 2406 inq = rcv_nxt - copied_seq; 2407 if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { 2408 lock_sock(sk); 2409 inq = tp->rcv_nxt - tp->copied_seq; 2410 release_sock(sk); 2411 } 2412 /* After receiving a FIN, tell the user-space to continue reading 2413 * by returning a non-zero inq. 2414 */ 2415 if (inq == 0 && sock_flag(sk, SOCK_DONE)) 2416 inq = 1; 2417 return inq; 2418 } 2419 2420 /* 2421 * This routine copies from a sock struct into the user buffer. 2422 * 2423 * Technical note: in 2.3 we work on _locked_ socket, so that 2424 * tricks with *seq access order and skb->users are not required. 2425 * Probably, code can be easily improved even more. 2426 */ 2427 2428 static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 2429 int flags, struct scm_timestamping_internal *tss, 2430 int *cmsg_flags) 2431 { 2432 struct tcp_sock *tp = tcp_sk(sk); 2433 int copied = 0; 2434 u32 peek_seq; 2435 u32 *seq; 2436 unsigned long used; 2437 int err; 2438 int target; /* Read at least this many bytes */ 2439 long timeo; 2440 struct sk_buff *skb, *last; 2441 u32 urg_hole = 0; 2442 2443 err = -ENOTCONN; 2444 if (sk->sk_state == TCP_LISTEN) 2445 goto out; 2446 2447 if (tp->recvmsg_inq) { 2448 *cmsg_flags = TCP_CMSG_INQ; 2449 msg->msg_get_inq = 1; 2450 } 2451 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2452 2453 /* Urgent data needs to be handled specially. */ 2454 if (flags & MSG_OOB) 2455 goto recv_urg; 2456 2457 if (unlikely(tp->repair)) { 2458 err = -EPERM; 2459 if (!(flags & MSG_PEEK)) 2460 goto out; 2461 2462 if (tp->repair_queue == TCP_SEND_QUEUE) 2463 goto recv_sndq; 2464 2465 err = -EINVAL; 2466 if (tp->repair_queue == TCP_NO_QUEUE) 2467 goto out; 2468 2469 /* 'common' recv queue MSG_PEEK-ing */ 2470 } 2471 2472 seq = &tp->copied_seq; 2473 if (flags & MSG_PEEK) { 2474 peek_seq = tp->copied_seq; 2475 seq = &peek_seq; 2476 } 2477 2478 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 2479 2480 do { 2481 u32 offset; 2482 2483 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 2484 if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { 2485 if (copied) 2486 break; 2487 if (signal_pending(current)) { 2488 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 2489 break; 2490 } 2491 } 2492 2493 /* Next get a buffer. */ 2494 2495 last = skb_peek_tail(&sk->sk_receive_queue); 2496 skb_queue_walk(&sk->sk_receive_queue, skb) { 2497 last = skb; 2498 /* Now that we have two receive queues this 2499 * shouldn't happen. 2500 */ 2501 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 2502 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", 2503 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 2504 flags)) 2505 break; 2506 2507 offset = *seq - TCP_SKB_CB(skb)->seq; 2508 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 2509 pr_err_once("%s: found a SYN, please report !\n", __func__); 2510 offset--; 2511 } 2512 if (offset < skb->len) 2513 goto found_ok_skb; 2514 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2515 goto found_fin_ok; 2516 WARN(!(flags & MSG_PEEK), 2517 "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", 2518 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 2519 } 2520 2521 /* Well, if we have backlog, try to process it now yet. */ 2522 2523 if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) 2524 break; 2525 2526 if (copied) { 2527 if (!timeo || 2528 sk->sk_err || 2529 sk->sk_state == TCP_CLOSE || 2530 (sk->sk_shutdown & RCV_SHUTDOWN) || 2531 signal_pending(current)) 2532 break; 2533 } else { 2534 if (sock_flag(sk, SOCK_DONE)) 2535 break; 2536 2537 if (sk->sk_err) { 2538 copied = sock_error(sk); 2539 break; 2540 } 2541 2542 if (sk->sk_shutdown & RCV_SHUTDOWN) 2543 break; 2544 2545 if (sk->sk_state == TCP_CLOSE) { 2546 /* This occurs when user tries to read 2547 * from never connected socket. 2548 */ 2549 copied = -ENOTCONN; 2550 break; 2551 } 2552 2553 if (!timeo) { 2554 copied = -EAGAIN; 2555 break; 2556 } 2557 2558 if (signal_pending(current)) { 2559 copied = sock_intr_errno(timeo); 2560 break; 2561 } 2562 } 2563 2564 if (copied >= target) { 2565 /* Do not sleep, just process backlog. */ 2566 __sk_flush_backlog(sk); 2567 } else { 2568 tcp_cleanup_rbuf(sk, copied); 2569 sk_wait_data(sk, &timeo, last); 2570 } 2571 2572 if ((flags & MSG_PEEK) && 2573 (peek_seq - copied - urg_hole != tp->copied_seq)) { 2574 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 2575 current->comm, 2576 task_pid_nr(current)); 2577 peek_seq = tp->copied_seq; 2578 } 2579 continue; 2580 2581 found_ok_skb: 2582 /* Ok so how much can we use? */ 2583 used = skb->len - offset; 2584 if (len < used) 2585 used = len; 2586 2587 /* Do we have urgent data here? */ 2588 if (unlikely(tp->urg_data)) { 2589 u32 urg_offset = tp->urg_seq - *seq; 2590 if (urg_offset < used) { 2591 if (!urg_offset) { 2592 if (!sock_flag(sk, SOCK_URGINLINE)) { 2593 WRITE_ONCE(*seq, *seq + 1); 2594 urg_hole++; 2595 offset++; 2596 used--; 2597 if (!used) 2598 goto skip_copy; 2599 } 2600 } else 2601 used = urg_offset; 2602 } 2603 } 2604 2605 if (!(flags & MSG_TRUNC)) { 2606 err = skb_copy_datagram_msg(skb, offset, msg, used); 2607 if (err) { 2608 /* Exception. Bailout! */ 2609 if (!copied) 2610 copied = -EFAULT; 2611 break; 2612 } 2613 } 2614 2615 WRITE_ONCE(*seq, *seq + used); 2616 copied += used; 2617 len -= used; 2618 2619 tcp_rcv_space_adjust(sk); 2620 2621 skip_copy: 2622 if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { 2623 WRITE_ONCE(tp->urg_data, 0); 2624 tcp_fast_path_check(sk); 2625 } 2626 2627 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2628 tcp_update_recv_tstamps(skb, tss); 2629 *cmsg_flags |= TCP_CMSG_TS; 2630 } 2631 2632 if (used + offset < skb->len) 2633 continue; 2634 2635 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2636 goto found_fin_ok; 2637 if (!(flags & MSG_PEEK)) 2638 tcp_eat_recv_skb(sk, skb); 2639 continue; 2640 2641 found_fin_ok: 2642 /* Process the FIN. */ 2643 WRITE_ONCE(*seq, *seq + 1); 2644 if (!(flags & MSG_PEEK)) 2645 tcp_eat_recv_skb(sk, skb); 2646 break; 2647 } while (len > 0); 2648 2649 /* According to UNIX98, msg_name/msg_namelen are ignored 2650 * on connected socket. I was just happy when found this 8) --ANK 2651 */ 2652 2653 /* Clean up data we have read: This will do ACK frames. */ 2654 tcp_cleanup_rbuf(sk, copied); 2655 return copied; 2656 2657 out: 2658 return err; 2659 2660 recv_urg: 2661 err = tcp_recv_urg(sk, msg, len, flags); 2662 goto out; 2663 2664 recv_sndq: 2665 err = tcp_peek_sndq(sk, msg, len); 2666 goto out; 2667 } 2668 2669 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 2670 int *addr_len) 2671 { 2672 int cmsg_flags = 0, ret; 2673 struct scm_timestamping_internal tss; 2674 2675 if (unlikely(flags & MSG_ERRQUEUE)) 2676 return inet_recv_error(sk, msg, len, addr_len); 2677 2678 if (sk_can_busy_loop(sk) && 2679 skb_queue_empty_lockless(&sk->sk_receive_queue) && 2680 sk->sk_state == TCP_ESTABLISHED) 2681 sk_busy_loop(sk, flags & MSG_DONTWAIT); 2682 2683 lock_sock(sk); 2684 ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags); 2685 release_sock(sk); 2686 2687 if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) { 2688 if (cmsg_flags & TCP_CMSG_TS) 2689 tcp_recv_timestamp(msg, sk, &tss); 2690 if (msg->msg_get_inq) { 2691 msg->msg_inq = tcp_inq_hint(sk); 2692 if (cmsg_flags & TCP_CMSG_INQ) 2693 put_cmsg(msg, SOL_TCP, TCP_CM_INQ, 2694 sizeof(msg->msg_inq), &msg->msg_inq); 2695 } 2696 } 2697 return ret; 2698 } 2699 EXPORT_SYMBOL(tcp_recvmsg); 2700 2701 void tcp_set_state(struct sock *sk, int state) 2702 { 2703 int oldstate = sk->sk_state; 2704 2705 /* We defined a new enum for TCP states that are exported in BPF 2706 * so as not force the internal TCP states to be frozen. The 2707 * following checks will detect if an internal state value ever 2708 * differs from the BPF value. If this ever happens, then we will 2709 * need to remap the internal value to the BPF value before calling 2710 * tcp_call_bpf_2arg. 2711 */ 2712 BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); 2713 BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); 2714 BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); 2715 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); 2716 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); 2717 BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); 2718 BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); 2719 BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); 2720 BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); 2721 BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); 2722 BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); 2723 BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); 2724 BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); 2725 2726 /* bpf uapi header bpf.h defines an anonymous enum with values 2727 * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux 2728 * is able to emit this enum in DWARF due to the above BUILD_BUG_ON. 2729 * But clang built vmlinux does not have this enum in DWARF 2730 * since clang removes the above code before generating IR/debuginfo. 2731 * Let us explicitly emit the type debuginfo to ensure the 2732 * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF 2733 * regardless of which compiler is used. 2734 */ 2735 BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED); 2736 2737 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) 2738 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); 2739 2740 switch (state) { 2741 case TCP_ESTABLISHED: 2742 if (oldstate != TCP_ESTABLISHED) 2743 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2744 break; 2745 2746 case TCP_CLOSE: 2747 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 2748 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 2749 2750 sk->sk_prot->unhash(sk); 2751 if (inet_csk(sk)->icsk_bind_hash && 2752 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 2753 inet_put_port(sk); 2754 fallthrough; 2755 default: 2756 if (oldstate == TCP_ESTABLISHED) 2757 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2758 } 2759 2760 /* Change state AFTER socket is unhashed to avoid closed 2761 * socket sitting in hash tables. 2762 */ 2763 inet_sk_state_store(sk, state); 2764 } 2765 EXPORT_SYMBOL_GPL(tcp_set_state); 2766 2767 /* 2768 * State processing on a close. This implements the state shift for 2769 * sending our FIN frame. Note that we only send a FIN for some 2770 * states. A shutdown() may have already sent the FIN, or we may be 2771 * closed. 2772 */ 2773 2774 static const unsigned char new_state[16] = { 2775 /* current state: new state: action: */ 2776 [0 /* (Invalid) */] = TCP_CLOSE, 2777 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2778 [TCP_SYN_SENT] = TCP_CLOSE, 2779 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2780 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 2781 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 2782 [TCP_TIME_WAIT] = TCP_CLOSE, 2783 [TCP_CLOSE] = TCP_CLOSE, 2784 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 2785 [TCP_LAST_ACK] = TCP_LAST_ACK, 2786 [TCP_LISTEN] = TCP_CLOSE, 2787 [TCP_CLOSING] = TCP_CLOSING, 2788 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 2789 }; 2790 2791 static int tcp_close_state(struct sock *sk) 2792 { 2793 int next = (int)new_state[sk->sk_state]; 2794 int ns = next & TCP_STATE_MASK; 2795 2796 tcp_set_state(sk, ns); 2797 2798 return next & TCP_ACTION_FIN; 2799 } 2800 2801 /* 2802 * Shutdown the sending side of a connection. Much like close except 2803 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 2804 */ 2805 2806 void tcp_shutdown(struct sock *sk, int how) 2807 { 2808 /* We need to grab some memory, and put together a FIN, 2809 * and then put it into the queue to be sent. 2810 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 2811 */ 2812 if (!(how & SEND_SHUTDOWN)) 2813 return; 2814 2815 /* If we've already sent a FIN, or it's a closed state, skip this. */ 2816 if ((1 << sk->sk_state) & 2817 (TCPF_ESTABLISHED | TCPF_SYN_SENT | 2818 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 2819 /* Clear out any half completed packets. FIN if needed. */ 2820 if (tcp_close_state(sk)) 2821 tcp_send_fin(sk); 2822 } 2823 } 2824 EXPORT_SYMBOL(tcp_shutdown); 2825 2826 int tcp_orphan_count_sum(void) 2827 { 2828 int i, total = 0; 2829 2830 for_each_possible_cpu(i) 2831 total += per_cpu(tcp_orphan_count, i); 2832 2833 return max(total, 0); 2834 } 2835 2836 static int tcp_orphan_cache; 2837 static struct timer_list tcp_orphan_timer; 2838 #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) 2839 2840 static void tcp_orphan_update(struct timer_list *unused) 2841 { 2842 WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); 2843 mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 2844 } 2845 2846 static bool tcp_too_many_orphans(int shift) 2847 { 2848 return READ_ONCE(tcp_orphan_cache) << shift > 2849 READ_ONCE(sysctl_tcp_max_orphans); 2850 } 2851 2852 bool tcp_check_oom(struct sock *sk, int shift) 2853 { 2854 bool too_many_orphans, out_of_socket_memory; 2855 2856 too_many_orphans = tcp_too_many_orphans(shift); 2857 out_of_socket_memory = tcp_out_of_memory(sk); 2858 2859 if (too_many_orphans) 2860 net_info_ratelimited("too many orphaned sockets\n"); 2861 if (out_of_socket_memory) 2862 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 2863 return too_many_orphans || out_of_socket_memory; 2864 } 2865 2866 void __tcp_close(struct sock *sk, long timeout) 2867 { 2868 struct sk_buff *skb; 2869 int data_was_unread = 0; 2870 int state; 2871 2872 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 2873 2874 if (sk->sk_state == TCP_LISTEN) { 2875 tcp_set_state(sk, TCP_CLOSE); 2876 2877 /* Special case. */ 2878 inet_csk_listen_stop(sk); 2879 2880 goto adjudge_to_death; 2881 } 2882 2883 /* We need to flush the recv. buffs. We do this only on the 2884 * descriptor close, not protocol-sourced closes, because the 2885 * reader process may not have drained the data yet! 2886 */ 2887 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 2888 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; 2889 2890 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2891 len--; 2892 data_was_unread += len; 2893 __kfree_skb(skb); 2894 } 2895 2896 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 2897 if (sk->sk_state == TCP_CLOSE) 2898 goto adjudge_to_death; 2899 2900 /* As outlined in RFC 2525, section 2.17, we send a RST here because 2901 * data was lost. To witness the awful effects of the old behavior of 2902 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 2903 * GET in an FTP client, suspend the process, wait for the client to 2904 * advertise a zero window, then kill -9 the FTP client, wheee... 2905 * Note: timeout is always zero in such a case. 2906 */ 2907 if (unlikely(tcp_sk(sk)->repair)) { 2908 sk->sk_prot->disconnect(sk, 0); 2909 } else if (data_was_unread) { 2910 /* Unread data was tossed, zap the connection. */ 2911 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 2912 tcp_set_state(sk, TCP_CLOSE); 2913 tcp_send_active_reset(sk, sk->sk_allocation); 2914 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 2915 /* Check zero linger _after_ checking for unread data. */ 2916 sk->sk_prot->disconnect(sk, 0); 2917 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 2918 } else if (tcp_close_state(sk)) { 2919 /* We FIN if the application ate all the data before 2920 * zapping the connection. 2921 */ 2922 2923 /* RED-PEN. Formally speaking, we have broken TCP state 2924 * machine. State transitions: 2925 * 2926 * TCP_ESTABLISHED -> TCP_FIN_WAIT1 2927 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 2928 * TCP_CLOSE_WAIT -> TCP_LAST_ACK 2929 * 2930 * are legal only when FIN has been sent (i.e. in window), 2931 * rather than queued out of window. Purists blame. 2932 * 2933 * F.e. "RFC state" is ESTABLISHED, 2934 * if Linux state is FIN-WAIT-1, but FIN is still not sent. 2935 * 2936 * The visible declinations are that sometimes 2937 * we enter time-wait state, when it is not required really 2938 * (harmless), do not send active resets, when they are 2939 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 2940 * they look as CLOSING or LAST_ACK for Linux) 2941 * Probably, I missed some more holelets. 2942 * --ANK 2943 * XXX (TFO) - To start off we don't support SYN+ACK+FIN 2944 * in a single packet! (May consider it later but will 2945 * probably need API support or TCP_CORK SYN-ACK until 2946 * data is written and socket is closed.) 2947 */ 2948 tcp_send_fin(sk); 2949 } 2950 2951 sk_stream_wait_close(sk, timeout); 2952 2953 adjudge_to_death: 2954 state = sk->sk_state; 2955 sock_hold(sk); 2956 sock_orphan(sk); 2957 2958 local_bh_disable(); 2959 bh_lock_sock(sk); 2960 /* remove backlog if any, without releasing ownership. */ 2961 __release_sock(sk); 2962 2963 this_cpu_inc(tcp_orphan_count); 2964 2965 /* Have we already been destroyed by a softirq or backlog? */ 2966 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 2967 goto out; 2968 2969 /* This is a (useful) BSD violating of the RFC. There is a 2970 * problem with TCP as specified in that the other end could 2971 * keep a socket open forever with no application left this end. 2972 * We use a 1 minute timeout (about the same as BSD) then kill 2973 * our end. If they send after that then tough - BUT: long enough 2974 * that we won't make the old 4*rto = almost no time - whoops 2975 * reset mistake. 2976 * 2977 * Nope, it was not mistake. It is really desired behaviour 2978 * f.e. on http servers, when such sockets are useless, but 2979 * consume significant resources. Let's do it with special 2980 * linger2 option. --ANK 2981 */ 2982 2983 if (sk->sk_state == TCP_FIN_WAIT2) { 2984 struct tcp_sock *tp = tcp_sk(sk); 2985 if (tp->linger2 < 0) { 2986 tcp_set_state(sk, TCP_CLOSE); 2987 tcp_send_active_reset(sk, GFP_ATOMIC); 2988 __NET_INC_STATS(sock_net(sk), 2989 LINUX_MIB_TCPABORTONLINGER); 2990 } else { 2991 const int tmo = tcp_fin_time(sk); 2992 2993 if (tmo > TCP_TIMEWAIT_LEN) { 2994 inet_csk_reset_keepalive_timer(sk, 2995 tmo - TCP_TIMEWAIT_LEN); 2996 } else { 2997 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 2998 goto out; 2999 } 3000 } 3001 } 3002 if (sk->sk_state != TCP_CLOSE) { 3003 if (tcp_check_oom(sk, 0)) { 3004 tcp_set_state(sk, TCP_CLOSE); 3005 tcp_send_active_reset(sk, GFP_ATOMIC); 3006 __NET_INC_STATS(sock_net(sk), 3007 LINUX_MIB_TCPABORTONMEMORY); 3008 } else if (!check_net(sock_net(sk))) { 3009 /* Not possible to send reset; just close */ 3010 tcp_set_state(sk, TCP_CLOSE); 3011 } 3012 } 3013 3014 if (sk->sk_state == TCP_CLOSE) { 3015 struct request_sock *req; 3016 3017 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 3018 lockdep_sock_is_held(sk)); 3019 /* We could get here with a non-NULL req if the socket is 3020 * aborted (e.g., closed with unread data) before 3WHS 3021 * finishes. 3022 */ 3023 if (req) 3024 reqsk_fastopen_remove(sk, req, false); 3025 inet_csk_destroy_sock(sk); 3026 } 3027 /* Otherwise, socket is reprieved until protocol close. */ 3028 3029 out: 3030 bh_unlock_sock(sk); 3031 local_bh_enable(); 3032 } 3033 3034 void tcp_close(struct sock *sk, long timeout) 3035 { 3036 lock_sock(sk); 3037 __tcp_close(sk, timeout); 3038 release_sock(sk); 3039 sock_put(sk); 3040 } 3041 EXPORT_SYMBOL(tcp_close); 3042 3043 /* These states need RST on ABORT according to RFC793 */ 3044 3045 static inline bool tcp_need_reset(int state) 3046 { 3047 return (1 << state) & 3048 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 3049 TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 3050 } 3051 3052 static void tcp_rtx_queue_purge(struct sock *sk) 3053 { 3054 struct rb_node *p = rb_first(&sk->tcp_rtx_queue); 3055 3056 tcp_sk(sk)->highest_sack = NULL; 3057 while (p) { 3058 struct sk_buff *skb = rb_to_skb(p); 3059 3060 p = rb_next(p); 3061 /* Since we are deleting whole queue, no need to 3062 * list_del(&skb->tcp_tsorted_anchor) 3063 */ 3064 tcp_rtx_queue_unlink(skb, sk); 3065 tcp_wmem_free_skb(sk, skb); 3066 } 3067 } 3068 3069 void tcp_write_queue_purge(struct sock *sk) 3070 { 3071 struct sk_buff *skb; 3072 3073 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 3074 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 3075 tcp_skb_tsorted_anchor_cleanup(skb); 3076 tcp_wmem_free_skb(sk, skb); 3077 } 3078 tcp_rtx_queue_purge(sk); 3079 INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 3080 tcp_clear_all_retrans_hints(tcp_sk(sk)); 3081 tcp_sk(sk)->packets_out = 0; 3082 inet_csk(sk)->icsk_backoff = 0; 3083 } 3084 3085 int tcp_disconnect(struct sock *sk, int flags) 3086 { 3087 struct inet_sock *inet = inet_sk(sk); 3088 struct inet_connection_sock *icsk = inet_csk(sk); 3089 struct tcp_sock *tp = tcp_sk(sk); 3090 int old_state = sk->sk_state; 3091 u32 seq; 3092 3093 if (old_state != TCP_CLOSE) 3094 tcp_set_state(sk, TCP_CLOSE); 3095 3096 /* ABORT function of RFC793 */ 3097 if (old_state == TCP_LISTEN) { 3098 inet_csk_listen_stop(sk); 3099 } else if (unlikely(tp->repair)) { 3100 WRITE_ONCE(sk->sk_err, ECONNABORTED); 3101 } else if (tcp_need_reset(old_state) || 3102 (tp->snd_nxt != tp->write_seq && 3103 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 3104 /* The last check adjusts for discrepancy of Linux wrt. RFC 3105 * states 3106 */ 3107 tcp_send_active_reset(sk, gfp_any()); 3108 WRITE_ONCE(sk->sk_err, ECONNRESET); 3109 } else if (old_state == TCP_SYN_SENT) 3110 WRITE_ONCE(sk->sk_err, ECONNRESET); 3111 3112 tcp_clear_xmit_timers(sk); 3113 __skb_queue_purge(&sk->sk_receive_queue); 3114 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 3115 WRITE_ONCE(tp->urg_data, 0); 3116 tcp_write_queue_purge(sk); 3117 tcp_fastopen_active_disable_ofo_check(sk); 3118 skb_rbtree_purge(&tp->out_of_order_queue); 3119 3120 inet->inet_dport = 0; 3121 3122 inet_bhash2_reset_saddr(sk); 3123 3124 WRITE_ONCE(sk->sk_shutdown, 0); 3125 sock_reset_flag(sk, SOCK_DONE); 3126 tp->srtt_us = 0; 3127 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 3128 tp->rcv_rtt_last_tsecr = 0; 3129 3130 seq = tp->write_seq + tp->max_window + 2; 3131 if (!seq) 3132 seq = 1; 3133 WRITE_ONCE(tp->write_seq, seq); 3134 3135 icsk->icsk_backoff = 0; 3136 icsk->icsk_probes_out = 0; 3137 icsk->icsk_probes_tstamp = 0; 3138 icsk->icsk_rto = TCP_TIMEOUT_INIT; 3139 icsk->icsk_rto_min = TCP_RTO_MIN; 3140 icsk->icsk_delack_max = TCP_DELACK_MAX; 3141 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 3142 tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 3143 tp->snd_cwnd_cnt = 0; 3144 tp->is_cwnd_limited = 0; 3145 tp->max_packets_out = 0; 3146 tp->window_clamp = 0; 3147 tp->delivered = 0; 3148 tp->delivered_ce = 0; 3149 if (icsk->icsk_ca_ops->release) 3150 icsk->icsk_ca_ops->release(sk); 3151 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 3152 icsk->icsk_ca_initialized = 0; 3153 tcp_set_ca_state(sk, TCP_CA_Open); 3154 tp->is_sack_reneg = 0; 3155 tcp_clear_retrans(tp); 3156 tp->total_retrans = 0; 3157 inet_csk_delack_init(sk); 3158 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 3159 * issue in __tcp_select_window() 3160 */ 3161 icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; 3162 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 3163 __sk_dst_reset(sk); 3164 dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); 3165 tcp_saved_syn_free(tp); 3166 tp->compressed_ack = 0; 3167 tp->segs_in = 0; 3168 tp->segs_out = 0; 3169 tp->bytes_sent = 0; 3170 tp->bytes_acked = 0; 3171 tp->bytes_received = 0; 3172 tp->bytes_retrans = 0; 3173 tp->data_segs_in = 0; 3174 tp->data_segs_out = 0; 3175 tp->duplicate_sack[0].start_seq = 0; 3176 tp->duplicate_sack[0].end_seq = 0; 3177 tp->dsack_dups = 0; 3178 tp->reord_seen = 0; 3179 tp->retrans_out = 0; 3180 tp->sacked_out = 0; 3181 tp->tlp_high_seq = 0; 3182 tp->last_oow_ack_time = 0; 3183 tp->plb_rehash = 0; 3184 /* There's a bubble in the pipe until at least the first ACK. */ 3185 tp->app_limited = ~0U; 3186 tp->rate_app_limited = 1; 3187 tp->rack.mstamp = 0; 3188 tp->rack.advanced = 0; 3189 tp->rack.reo_wnd_steps = 1; 3190 tp->rack.last_delivered = 0; 3191 tp->rack.reo_wnd_persist = 0; 3192 tp->rack.dsack_seen = 0; 3193 tp->syn_data_acked = 0; 3194 tp->rx_opt.saw_tstamp = 0; 3195 tp->rx_opt.dsack = 0; 3196 tp->rx_opt.num_sacks = 0; 3197 tp->rcv_ooopack = 0; 3198 3199 3200 /* Clean up fastopen related fields */ 3201 tcp_free_fastopen_req(tp); 3202 inet->defer_connect = 0; 3203 tp->fastopen_client_fail = 0; 3204 3205 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 3206 3207 if (sk->sk_frag.page) { 3208 put_page(sk->sk_frag.page); 3209 sk->sk_frag.page = NULL; 3210 sk->sk_frag.offset = 0; 3211 } 3212 sk_error_report(sk); 3213 return 0; 3214 } 3215 EXPORT_SYMBOL(tcp_disconnect); 3216 3217 static inline bool tcp_can_repair_sock(const struct sock *sk) 3218 { 3219 return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 3220 (sk->sk_state != TCP_LISTEN); 3221 } 3222 3223 static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) 3224 { 3225 struct tcp_repair_window opt; 3226 3227 if (!tp->repair) 3228 return -EPERM; 3229 3230 if (len != sizeof(opt)) 3231 return -EINVAL; 3232 3233 if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) 3234 return -EFAULT; 3235 3236 if (opt.max_window < opt.snd_wnd) 3237 return -EINVAL; 3238 3239 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) 3240 return -EINVAL; 3241 3242 if (after(opt.rcv_wup, tp->rcv_nxt)) 3243 return -EINVAL; 3244 3245 tp->snd_wl1 = opt.snd_wl1; 3246 tp->snd_wnd = opt.snd_wnd; 3247 tp->max_window = opt.max_window; 3248 3249 tp->rcv_wnd = opt.rcv_wnd; 3250 tp->rcv_wup = opt.rcv_wup; 3251 3252 return 0; 3253 } 3254 3255 static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, 3256 unsigned int len) 3257 { 3258 struct tcp_sock *tp = tcp_sk(sk); 3259 struct tcp_repair_opt opt; 3260 size_t offset = 0; 3261 3262 while (len >= sizeof(opt)) { 3263 if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) 3264 return -EFAULT; 3265 3266 offset += sizeof(opt); 3267 len -= sizeof(opt); 3268 3269 switch (opt.opt_code) { 3270 case TCPOPT_MSS: 3271 tp->rx_opt.mss_clamp = opt.opt_val; 3272 tcp_mtup_init(sk); 3273 break; 3274 case TCPOPT_WINDOW: 3275 { 3276 u16 snd_wscale = opt.opt_val & 0xFFFF; 3277 u16 rcv_wscale = opt.opt_val >> 16; 3278 3279 if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) 3280 return -EFBIG; 3281 3282 tp->rx_opt.snd_wscale = snd_wscale; 3283 tp->rx_opt.rcv_wscale = rcv_wscale; 3284 tp->rx_opt.wscale_ok = 1; 3285 } 3286 break; 3287 case TCPOPT_SACK_PERM: 3288 if (opt.opt_val != 0) 3289 return -EINVAL; 3290 3291 tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 3292 break; 3293 case TCPOPT_TIMESTAMP: 3294 if (opt.opt_val != 0) 3295 return -EINVAL; 3296 3297 tp->rx_opt.tstamp_ok = 1; 3298 break; 3299 } 3300 } 3301 3302 return 0; 3303 } 3304 3305 DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 3306 EXPORT_SYMBOL(tcp_tx_delay_enabled); 3307 3308 static void tcp_enable_tx_delay(void) 3309 { 3310 if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { 3311 static int __tcp_tx_delay_enabled = 0; 3312 3313 if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { 3314 static_branch_enable(&tcp_tx_delay_enabled); 3315 pr_info("TCP_TX_DELAY enabled\n"); 3316 } 3317 } 3318 } 3319 3320 /* When set indicates to always queue non-full frames. Later the user clears 3321 * this option and we transmit any pending partial frames in the queue. This is 3322 * meant to be used alongside sendfile() to get properly filled frames when the 3323 * user (for example) must write out headers with a write() call first and then 3324 * use sendfile to send out the data parts. 3325 * 3326 * TCP_CORK can be set together with TCP_NODELAY and it is stronger than 3327 * TCP_NODELAY. 3328 */ 3329 void __tcp_sock_set_cork(struct sock *sk, bool on) 3330 { 3331 struct tcp_sock *tp = tcp_sk(sk); 3332 3333 if (on) { 3334 tp->nonagle |= TCP_NAGLE_CORK; 3335 } else { 3336 tp->nonagle &= ~TCP_NAGLE_CORK; 3337 if (tp->nonagle & TCP_NAGLE_OFF) 3338 tp->nonagle |= TCP_NAGLE_PUSH; 3339 tcp_push_pending_frames(sk); 3340 } 3341 } 3342 3343 void tcp_sock_set_cork(struct sock *sk, bool on) 3344 { 3345 lock_sock(sk); 3346 __tcp_sock_set_cork(sk, on); 3347 release_sock(sk); 3348 } 3349 EXPORT_SYMBOL(tcp_sock_set_cork); 3350 3351 /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is 3352 * remembered, but it is not activated until cork is cleared. 3353 * 3354 * However, when TCP_NODELAY is set we make an explicit push, which overrides 3355 * even TCP_CORK for currently queued segments. 3356 */ 3357 void __tcp_sock_set_nodelay(struct sock *sk, bool on) 3358 { 3359 if (on) { 3360 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 3361 tcp_push_pending_frames(sk); 3362 } else { 3363 tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; 3364 } 3365 } 3366 3367 void tcp_sock_set_nodelay(struct sock *sk) 3368 { 3369 lock_sock(sk); 3370 __tcp_sock_set_nodelay(sk, true); 3371 release_sock(sk); 3372 } 3373 EXPORT_SYMBOL(tcp_sock_set_nodelay); 3374 3375 static void __tcp_sock_set_quickack(struct sock *sk, int val) 3376 { 3377 if (!val) { 3378 inet_csk_enter_pingpong_mode(sk); 3379 return; 3380 } 3381 3382 inet_csk_exit_pingpong_mode(sk); 3383 if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 3384 inet_csk_ack_scheduled(sk)) { 3385 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; 3386 tcp_cleanup_rbuf(sk, 1); 3387 if (!(val & 1)) 3388 inet_csk_enter_pingpong_mode(sk); 3389 } 3390 } 3391 3392 void tcp_sock_set_quickack(struct sock *sk, int val) 3393 { 3394 lock_sock(sk); 3395 __tcp_sock_set_quickack(sk, val); 3396 release_sock(sk); 3397 } 3398 EXPORT_SYMBOL(tcp_sock_set_quickack); 3399 3400 int tcp_sock_set_syncnt(struct sock *sk, int val) 3401 { 3402 if (val < 1 || val > MAX_TCP_SYNCNT) 3403 return -EINVAL; 3404 3405 lock_sock(sk); 3406 inet_csk(sk)->icsk_syn_retries = val; 3407 release_sock(sk); 3408 return 0; 3409 } 3410 EXPORT_SYMBOL(tcp_sock_set_syncnt); 3411 3412 void tcp_sock_set_user_timeout(struct sock *sk, u32 val) 3413 { 3414 lock_sock(sk); 3415 inet_csk(sk)->icsk_user_timeout = val; 3416 release_sock(sk); 3417 } 3418 EXPORT_SYMBOL(tcp_sock_set_user_timeout); 3419 3420 int tcp_sock_set_keepidle_locked(struct sock *sk, int val) 3421 { 3422 struct tcp_sock *tp = tcp_sk(sk); 3423 3424 if (val < 1 || val > MAX_TCP_KEEPIDLE) 3425 return -EINVAL; 3426 3427 tp->keepalive_time = val * HZ; 3428 if (sock_flag(sk, SOCK_KEEPOPEN) && 3429 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 3430 u32 elapsed = keepalive_time_elapsed(tp); 3431 3432 if (tp->keepalive_time > elapsed) 3433 elapsed = tp->keepalive_time - elapsed; 3434 else 3435 elapsed = 0; 3436 inet_csk_reset_keepalive_timer(sk, elapsed); 3437 } 3438 3439 return 0; 3440 } 3441 3442 int tcp_sock_set_keepidle(struct sock *sk, int val) 3443 { 3444 int err; 3445 3446 lock_sock(sk); 3447 err = tcp_sock_set_keepidle_locked(sk, val); 3448 release_sock(sk); 3449 return err; 3450 } 3451 EXPORT_SYMBOL(tcp_sock_set_keepidle); 3452 3453 int tcp_sock_set_keepintvl(struct sock *sk, int val) 3454 { 3455 if (val < 1 || val > MAX_TCP_KEEPINTVL) 3456 return -EINVAL; 3457 3458 lock_sock(sk); 3459 tcp_sk(sk)->keepalive_intvl = val * HZ; 3460 release_sock(sk); 3461 return 0; 3462 } 3463 EXPORT_SYMBOL(tcp_sock_set_keepintvl); 3464 3465 int tcp_sock_set_keepcnt(struct sock *sk, int val) 3466 { 3467 if (val < 1 || val > MAX_TCP_KEEPCNT) 3468 return -EINVAL; 3469 3470 lock_sock(sk); 3471 tcp_sk(sk)->keepalive_probes = val; 3472 release_sock(sk); 3473 return 0; 3474 } 3475 EXPORT_SYMBOL(tcp_sock_set_keepcnt); 3476 3477 int tcp_set_window_clamp(struct sock *sk, int val) 3478 { 3479 struct tcp_sock *tp = tcp_sk(sk); 3480 3481 if (!val) { 3482 if (sk->sk_state != TCP_CLOSE) 3483 return -EINVAL; 3484 tp->window_clamp = 0; 3485 } else { 3486 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 3487 SOCK_MIN_RCVBUF / 2 : val; 3488 tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); 3489 } 3490 return 0; 3491 } 3492 3493 /* 3494 * Socket option code for TCP. 3495 */ 3496 int do_tcp_setsockopt(struct sock *sk, int level, int optname, 3497 sockptr_t optval, unsigned int optlen) 3498 { 3499 struct tcp_sock *tp = tcp_sk(sk); 3500 struct inet_connection_sock *icsk = inet_csk(sk); 3501 struct net *net = sock_net(sk); 3502 int val; 3503 int err = 0; 3504 3505 /* These are data/string values, all the others are ints */ 3506 switch (optname) { 3507 case TCP_CONGESTION: { 3508 char name[TCP_CA_NAME_MAX]; 3509 3510 if (optlen < 1) 3511 return -EINVAL; 3512 3513 val = strncpy_from_sockptr(name, optval, 3514 min_t(long, TCP_CA_NAME_MAX-1, optlen)); 3515 if (val < 0) 3516 return -EFAULT; 3517 name[val] = 0; 3518 3519 sockopt_lock_sock(sk); 3520 err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), 3521 sockopt_ns_capable(sock_net(sk)->user_ns, 3522 CAP_NET_ADMIN)); 3523 sockopt_release_sock(sk); 3524 return err; 3525 } 3526 case TCP_ULP: { 3527 char name[TCP_ULP_NAME_MAX]; 3528 3529 if (optlen < 1) 3530 return -EINVAL; 3531 3532 val = strncpy_from_sockptr(name, optval, 3533 min_t(long, TCP_ULP_NAME_MAX - 1, 3534 optlen)); 3535 if (val < 0) 3536 return -EFAULT; 3537 name[val] = 0; 3538 3539 sockopt_lock_sock(sk); 3540 err = tcp_set_ulp(sk, name); 3541 sockopt_release_sock(sk); 3542 return err; 3543 } 3544 case TCP_FASTOPEN_KEY: { 3545 __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; 3546 __u8 *backup_key = NULL; 3547 3548 /* Allow a backup key as well to facilitate key rotation 3549 * First key is the active one. 3550 */ 3551 if (optlen != TCP_FASTOPEN_KEY_LENGTH && 3552 optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) 3553 return -EINVAL; 3554 3555 if (copy_from_sockptr(key, optval, optlen)) 3556 return -EFAULT; 3557 3558 if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) 3559 backup_key = key + TCP_FASTOPEN_KEY_LENGTH; 3560 3561 return tcp_fastopen_reset_cipher(net, sk, key, backup_key); 3562 } 3563 default: 3564 /* fallthru */ 3565 break; 3566 } 3567 3568 if (optlen < sizeof(int)) 3569 return -EINVAL; 3570 3571 if (copy_from_sockptr(&val, optval, sizeof(val))) 3572 return -EFAULT; 3573 3574 sockopt_lock_sock(sk); 3575 3576 switch (optname) { 3577 case TCP_MAXSEG: 3578 /* Values greater than interface MTU won't take effect. However 3579 * at the point when this call is done we typically don't yet 3580 * know which interface is going to be used 3581 */ 3582 if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { 3583 err = -EINVAL; 3584 break; 3585 } 3586 tp->rx_opt.user_mss = val; 3587 break; 3588 3589 case TCP_NODELAY: 3590 __tcp_sock_set_nodelay(sk, val); 3591 break; 3592 3593 case TCP_THIN_LINEAR_TIMEOUTS: 3594 if (val < 0 || val > 1) 3595 err = -EINVAL; 3596 else 3597 tp->thin_lto = val; 3598 break; 3599 3600 case TCP_THIN_DUPACK: 3601 if (val < 0 || val > 1) 3602 err = -EINVAL; 3603 break; 3604 3605 case TCP_REPAIR: 3606 if (!tcp_can_repair_sock(sk)) 3607 err = -EPERM; 3608 else if (val == TCP_REPAIR_ON) { 3609 tp->repair = 1; 3610 sk->sk_reuse = SK_FORCE_REUSE; 3611 tp->repair_queue = TCP_NO_QUEUE; 3612 } else if (val == TCP_REPAIR_OFF) { 3613 tp->repair = 0; 3614 sk->sk_reuse = SK_NO_REUSE; 3615 tcp_send_window_probe(sk); 3616 } else if (val == TCP_REPAIR_OFF_NO_WP) { 3617 tp->repair = 0; 3618 sk->sk_reuse = SK_NO_REUSE; 3619 } else 3620 err = -EINVAL; 3621 3622 break; 3623 3624 case TCP_REPAIR_QUEUE: 3625 if (!tp->repair) 3626 err = -EPERM; 3627 else if ((unsigned int)val < TCP_QUEUES_NR) 3628 tp->repair_queue = val; 3629 else 3630 err = -EINVAL; 3631 break; 3632 3633 case TCP_QUEUE_SEQ: 3634 if (sk->sk_state != TCP_CLOSE) { 3635 err = -EPERM; 3636 } else if (tp->repair_queue == TCP_SEND_QUEUE) { 3637 if (!tcp_rtx_queue_empty(sk)) 3638 err = -EPERM; 3639 else 3640 WRITE_ONCE(tp->write_seq, val); 3641 } else if (tp->repair_queue == TCP_RECV_QUEUE) { 3642 if (tp->rcv_nxt != tp->copied_seq) { 3643 err = -EPERM; 3644 } else { 3645 WRITE_ONCE(tp->rcv_nxt, val); 3646 WRITE_ONCE(tp->copied_seq, val); 3647 } 3648 } else { 3649 err = -EINVAL; 3650 } 3651 break; 3652 3653 case TCP_REPAIR_OPTIONS: 3654 if (!tp->repair) 3655 err = -EINVAL; 3656 else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) 3657 err = tcp_repair_options_est(sk, optval, optlen); 3658 else 3659 err = -EPERM; 3660 break; 3661 3662 case TCP_CORK: 3663 __tcp_sock_set_cork(sk, val); 3664 break; 3665 3666 case TCP_KEEPIDLE: 3667 err = tcp_sock_set_keepidle_locked(sk, val); 3668 break; 3669 case TCP_KEEPINTVL: 3670 if (val < 1 || val > MAX_TCP_KEEPINTVL) 3671 err = -EINVAL; 3672 else 3673 tp->keepalive_intvl = val * HZ; 3674 break; 3675 case TCP_KEEPCNT: 3676 if (val < 1 || val > MAX_TCP_KEEPCNT) 3677 err = -EINVAL; 3678 else 3679 tp->keepalive_probes = val; 3680 break; 3681 case TCP_SYNCNT: 3682 if (val < 1 || val > MAX_TCP_SYNCNT) 3683 err = -EINVAL; 3684 else 3685 icsk->icsk_syn_retries = val; 3686 break; 3687 3688 case TCP_SAVE_SYN: 3689 /* 0: disable, 1: enable, 2: start from ether_header */ 3690 if (val < 0 || val > 2) 3691 err = -EINVAL; 3692 else 3693 tp->save_syn = val; 3694 break; 3695 3696 case TCP_LINGER2: 3697 if (val < 0) 3698 tp->linger2 = -1; 3699 else if (val > TCP_FIN_TIMEOUT_MAX / HZ) 3700 tp->linger2 = TCP_FIN_TIMEOUT_MAX; 3701 else 3702 tp->linger2 = val * HZ; 3703 break; 3704 3705 case TCP_DEFER_ACCEPT: 3706 /* Translate value in seconds to number of retransmits */ 3707 icsk->icsk_accept_queue.rskq_defer_accept = 3708 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 3709 TCP_RTO_MAX / HZ); 3710 break; 3711 3712 case TCP_WINDOW_CLAMP: 3713 err = tcp_set_window_clamp(sk, val); 3714 break; 3715 3716 case TCP_QUICKACK: 3717 __tcp_sock_set_quickack(sk, val); 3718 break; 3719 3720 #ifdef CONFIG_TCP_MD5SIG 3721 case TCP_MD5SIG: 3722 case TCP_MD5SIG_EXT: 3723 err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 3724 break; 3725 #endif 3726 case TCP_USER_TIMEOUT: 3727 /* Cap the max time in ms TCP will retry or probe the window 3728 * before giving up and aborting (ETIMEDOUT) a connection. 3729 */ 3730 if (val < 0) 3731 err = -EINVAL; 3732 else 3733 icsk->icsk_user_timeout = val; 3734 break; 3735 3736 case TCP_FASTOPEN: 3737 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 3738 TCPF_LISTEN))) { 3739 tcp_fastopen_init_key_once(net); 3740 3741 fastopen_queue_tune(sk, val); 3742 } else { 3743 err = -EINVAL; 3744 } 3745 break; 3746 case TCP_FASTOPEN_CONNECT: 3747 if (val > 1 || val < 0) { 3748 err = -EINVAL; 3749 } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & 3750 TFO_CLIENT_ENABLE) { 3751 if (sk->sk_state == TCP_CLOSE) 3752 tp->fastopen_connect = val; 3753 else 3754 err = -EINVAL; 3755 } else { 3756 err = -EOPNOTSUPP; 3757 } 3758 break; 3759 case TCP_FASTOPEN_NO_COOKIE: 3760 if (val > 1 || val < 0) 3761 err = -EINVAL; 3762 else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 3763 err = -EINVAL; 3764 else 3765 tp->fastopen_no_cookie = val; 3766 break; 3767 case TCP_TIMESTAMP: 3768 if (!tp->repair) 3769 err = -EPERM; 3770 else 3771 tp->tsoffset = val - tcp_time_stamp_raw(); 3772 break; 3773 case TCP_REPAIR_WINDOW: 3774 err = tcp_repair_set_window(tp, optval, optlen); 3775 break; 3776 case TCP_NOTSENT_LOWAT: 3777 tp->notsent_lowat = val; 3778 sk->sk_write_space(sk); 3779 break; 3780 case TCP_INQ: 3781 if (val > 1 || val < 0) 3782 err = -EINVAL; 3783 else 3784 tp->recvmsg_inq = val; 3785 break; 3786 case TCP_TX_DELAY: 3787 if (val) 3788 tcp_enable_tx_delay(); 3789 tp->tcp_tx_delay = val; 3790 break; 3791 default: 3792 err = -ENOPROTOOPT; 3793 break; 3794 } 3795 3796 sockopt_release_sock(sk); 3797 return err; 3798 } 3799 3800 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 3801 unsigned int optlen) 3802 { 3803 const struct inet_connection_sock *icsk = inet_csk(sk); 3804 3805 if (level != SOL_TCP) 3806 /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 3807 return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname, 3808 optval, optlen); 3809 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 3810 } 3811 EXPORT_SYMBOL(tcp_setsockopt); 3812 3813 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 3814 struct tcp_info *info) 3815 { 3816 u64 stats[__TCP_CHRONO_MAX], total = 0; 3817 enum tcp_chrono i; 3818 3819 for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { 3820 stats[i] = tp->chrono_stat[i - 1]; 3821 if (i == tp->chrono_type) 3822 stats[i] += tcp_jiffies32 - tp->chrono_start; 3823 stats[i] *= USEC_PER_SEC / HZ; 3824 total += stats[i]; 3825 } 3826 3827 info->tcpi_busy_time = total; 3828 info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; 3829 info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; 3830 } 3831 3832 /* Return information about state of tcp endpoint in API format. */ 3833 void tcp_get_info(struct sock *sk, struct tcp_info *info) 3834 { 3835 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 3836 const struct inet_connection_sock *icsk = inet_csk(sk); 3837 unsigned long rate; 3838 u32 now; 3839 u64 rate64; 3840 bool slow; 3841 3842 memset(info, 0, sizeof(*info)); 3843 if (sk->sk_type != SOCK_STREAM) 3844 return; 3845 3846 info->tcpi_state = inet_sk_state_load(sk); 3847 3848 /* Report meaningful fields for all TCP states, including listeners */ 3849 rate = READ_ONCE(sk->sk_pacing_rate); 3850 rate64 = (rate != ~0UL) ? rate : ~0ULL; 3851 info->tcpi_pacing_rate = rate64; 3852 3853 rate = READ_ONCE(sk->sk_max_pacing_rate); 3854 rate64 = (rate != ~0UL) ? rate : ~0ULL; 3855 info->tcpi_max_pacing_rate = rate64; 3856 3857 info->tcpi_reordering = tp->reordering; 3858 info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); 3859 3860 if (info->tcpi_state == TCP_LISTEN) { 3861 /* listeners aliased fields : 3862 * tcpi_unacked -> Number of children ready for accept() 3863 * tcpi_sacked -> max backlog 3864 */ 3865 info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); 3866 info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); 3867 return; 3868 } 3869 3870 slow = lock_sock_fast(sk); 3871 3872 info->tcpi_ca_state = icsk->icsk_ca_state; 3873 info->tcpi_retransmits = icsk->icsk_retransmits; 3874 info->tcpi_probes = icsk->icsk_probes_out; 3875 info->tcpi_backoff = icsk->icsk_backoff; 3876 3877 if (tp->rx_opt.tstamp_ok) 3878 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 3879 if (tcp_is_sack(tp)) 3880 info->tcpi_options |= TCPI_OPT_SACK; 3881 if (tp->rx_opt.wscale_ok) { 3882 info->tcpi_options |= TCPI_OPT_WSCALE; 3883 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 3884 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 3885 } 3886 3887 if (tp->ecn_flags & TCP_ECN_OK) 3888 info->tcpi_options |= TCPI_OPT_ECN; 3889 if (tp->ecn_flags & TCP_ECN_SEEN) 3890 info->tcpi_options |= TCPI_OPT_ECN_SEEN; 3891 if (tp->syn_data_acked) 3892 info->tcpi_options |= TCPI_OPT_SYN_DATA; 3893 3894 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 3895 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 3896 info->tcpi_snd_mss = tp->mss_cache; 3897 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 3898 3899 info->tcpi_unacked = tp->packets_out; 3900 info->tcpi_sacked = tp->sacked_out; 3901 3902 info->tcpi_lost = tp->lost_out; 3903 info->tcpi_retrans = tp->retrans_out; 3904 3905 now = tcp_jiffies32; 3906 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 3907 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 3908 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 3909 3910 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 3911 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 3912 info->tcpi_rtt = tp->srtt_us >> 3; 3913 info->tcpi_rttvar = tp->mdev_us >> 2; 3914 info->tcpi_snd_ssthresh = tp->snd_ssthresh; 3915 info->tcpi_advmss = tp->advmss; 3916 3917 info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; 3918 info->tcpi_rcv_space = tp->rcvq_space.space; 3919 3920 info->tcpi_total_retrans = tp->total_retrans; 3921 3922 info->tcpi_bytes_acked = tp->bytes_acked; 3923 info->tcpi_bytes_received = tp->bytes_received; 3924 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); 3925 tcp_get_info_chrono_stats(tp, info); 3926 3927 info->tcpi_segs_out = tp->segs_out; 3928 3929 /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ 3930 info->tcpi_segs_in = READ_ONCE(tp->segs_in); 3931 info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); 3932 3933 info->tcpi_min_rtt = tcp_min_rtt(tp); 3934 info->tcpi_data_segs_out = tp->data_segs_out; 3935 3936 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; 3937 rate64 = tcp_compute_delivery_rate(tp); 3938 if (rate64) 3939 info->tcpi_delivery_rate = rate64; 3940 info->tcpi_delivered = tp->delivered; 3941 info->tcpi_delivered_ce = tp->delivered_ce; 3942 info->tcpi_bytes_sent = tp->bytes_sent; 3943 info->tcpi_bytes_retrans = tp->bytes_retrans; 3944 info->tcpi_dsack_dups = tp->dsack_dups; 3945 info->tcpi_reord_seen = tp->reord_seen; 3946 info->tcpi_rcv_ooopack = tp->rcv_ooopack; 3947 info->tcpi_snd_wnd = tp->snd_wnd; 3948 info->tcpi_rcv_wnd = tp->rcv_wnd; 3949 info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash; 3950 info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; 3951 unlock_sock_fast(sk, slow); 3952 } 3953 EXPORT_SYMBOL_GPL(tcp_get_info); 3954 3955 static size_t tcp_opt_stats_get_size(void) 3956 { 3957 return 3958 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ 3959 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ 3960 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ 3961 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ 3962 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ 3963 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ 3964 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ 3965 nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ 3966 nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ 3967 nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ 3968 nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ 3969 nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ 3970 nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ 3971 nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ 3972 nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ 3973 nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ 3974 nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ 3975 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ 3976 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ 3977 nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ 3978 nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ 3979 nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ 3980 nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ 3981 nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ 3982 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 3983 nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */ 3984 nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */ 3985 0; 3986 } 3987 3988 /* Returns TTL or hop limit of an incoming packet from skb. */ 3989 static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb) 3990 { 3991 if (skb->protocol == htons(ETH_P_IP)) 3992 return ip_hdr(skb)->ttl; 3993 else if (skb->protocol == htons(ETH_P_IPV6)) 3994 return ipv6_hdr(skb)->hop_limit; 3995 else 3996 return 0; 3997 } 3998 3999 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 4000 const struct sk_buff *orig_skb, 4001 const struct sk_buff *ack_skb) 4002 { 4003 const struct tcp_sock *tp = tcp_sk(sk); 4004 struct sk_buff *stats; 4005 struct tcp_info info; 4006 unsigned long rate; 4007 u64 rate64; 4008 4009 stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); 4010 if (!stats) 4011 return NULL; 4012 4013 tcp_get_info_chrono_stats(tp, &info); 4014 nla_put_u64_64bit(stats, TCP_NLA_BUSY, 4015 info.tcpi_busy_time, TCP_NLA_PAD); 4016 nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, 4017 info.tcpi_rwnd_limited, TCP_NLA_PAD); 4018 nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, 4019 info.tcpi_sndbuf_limited, TCP_NLA_PAD); 4020 nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, 4021 tp->data_segs_out, TCP_NLA_PAD); 4022 nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, 4023 tp->total_retrans, TCP_NLA_PAD); 4024 4025 rate = READ_ONCE(sk->sk_pacing_rate); 4026 rate64 = (rate != ~0UL) ? rate : ~0ULL; 4027 nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); 4028 4029 rate64 = tcp_compute_delivery_rate(tp); 4030 nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); 4031 4032 nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); 4033 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); 4034 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); 4035 4036 nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); 4037 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); 4038 nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); 4039 nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); 4040 nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); 4041 4042 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); 4043 nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); 4044 4045 nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, 4046 TCP_NLA_PAD); 4047 nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, 4048 TCP_NLA_PAD); 4049 nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); 4050 nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); 4051 nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); 4052 nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); 4053 nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, 4054 max_t(int, 0, tp->write_seq - tp->snd_nxt)); 4055 nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, 4056 TCP_NLA_PAD); 4057 if (ack_skb) 4058 nla_put_u8(stats, TCP_NLA_TTL, 4059 tcp_skb_ttl_or_hop_limit(ack_skb)); 4060 4061 nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash); 4062 return stats; 4063 } 4064 4065 int do_tcp_getsockopt(struct sock *sk, int level, 4066 int optname, sockptr_t optval, sockptr_t optlen) 4067 { 4068 struct inet_connection_sock *icsk = inet_csk(sk); 4069 struct tcp_sock *tp = tcp_sk(sk); 4070 struct net *net = sock_net(sk); 4071 int val, len; 4072 4073 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4074 return -EFAULT; 4075 4076 len = min_t(unsigned int, len, sizeof(int)); 4077 4078 if (len < 0) 4079 return -EINVAL; 4080 4081 switch (optname) { 4082 case TCP_MAXSEG: 4083 val = tp->mss_cache; 4084 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 4085 val = tp->rx_opt.user_mss; 4086 if (tp->repair) 4087 val = tp->rx_opt.mss_clamp; 4088 break; 4089 case TCP_NODELAY: 4090 val = !!(tp->nonagle&TCP_NAGLE_OFF); 4091 break; 4092 case TCP_CORK: 4093 val = !!(tp->nonagle&TCP_NAGLE_CORK); 4094 break; 4095 case TCP_KEEPIDLE: 4096 val = keepalive_time_when(tp) / HZ; 4097 break; 4098 case TCP_KEEPINTVL: 4099 val = keepalive_intvl_when(tp) / HZ; 4100 break; 4101 case TCP_KEEPCNT: 4102 val = keepalive_probes(tp); 4103 break; 4104 case TCP_SYNCNT: 4105 val = icsk->icsk_syn_retries ? : 4106 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); 4107 break; 4108 case TCP_LINGER2: 4109 val = tp->linger2; 4110 if (val >= 0) 4111 val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; 4112 break; 4113 case TCP_DEFER_ACCEPT: 4114 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, 4115 TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); 4116 break; 4117 case TCP_WINDOW_CLAMP: 4118 val = tp->window_clamp; 4119 break; 4120 case TCP_INFO: { 4121 struct tcp_info info; 4122 4123 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4124 return -EFAULT; 4125 4126 tcp_get_info(sk, &info); 4127 4128 len = min_t(unsigned int, len, sizeof(info)); 4129 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4130 return -EFAULT; 4131 if (copy_to_sockptr(optval, &info, len)) 4132 return -EFAULT; 4133 return 0; 4134 } 4135 case TCP_CC_INFO: { 4136 const struct tcp_congestion_ops *ca_ops; 4137 union tcp_cc_info info; 4138 size_t sz = 0; 4139 int attr; 4140 4141 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4142 return -EFAULT; 4143 4144 ca_ops = icsk->icsk_ca_ops; 4145 if (ca_ops && ca_ops->get_info) 4146 sz = ca_ops->get_info(sk, ~0U, &attr, &info); 4147 4148 len = min_t(unsigned int, len, sz); 4149 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4150 return -EFAULT; 4151 if (copy_to_sockptr(optval, &info, len)) 4152 return -EFAULT; 4153 return 0; 4154 } 4155 case TCP_QUICKACK: 4156 val = !inet_csk_in_pingpong_mode(sk); 4157 break; 4158 4159 case TCP_CONGESTION: 4160 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4161 return -EFAULT; 4162 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 4163 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4164 return -EFAULT; 4165 if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) 4166 return -EFAULT; 4167 return 0; 4168 4169 case TCP_ULP: 4170 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4171 return -EFAULT; 4172 len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); 4173 if (!icsk->icsk_ulp_ops) { 4174 len = 0; 4175 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4176 return -EFAULT; 4177 return 0; 4178 } 4179 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4180 return -EFAULT; 4181 if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) 4182 return -EFAULT; 4183 return 0; 4184 4185 case TCP_FASTOPEN_KEY: { 4186 u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; 4187 unsigned int key_len; 4188 4189 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4190 return -EFAULT; 4191 4192 key_len = tcp_fastopen_get_cipher(net, icsk, key) * 4193 TCP_FASTOPEN_KEY_LENGTH; 4194 len = min_t(unsigned int, len, key_len); 4195 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4196 return -EFAULT; 4197 if (copy_to_sockptr(optval, key, len)) 4198 return -EFAULT; 4199 return 0; 4200 } 4201 case TCP_THIN_LINEAR_TIMEOUTS: 4202 val = tp->thin_lto; 4203 break; 4204 4205 case TCP_THIN_DUPACK: 4206 val = 0; 4207 break; 4208 4209 case TCP_REPAIR: 4210 val = tp->repair; 4211 break; 4212 4213 case TCP_REPAIR_QUEUE: 4214 if (tp->repair) 4215 val = tp->repair_queue; 4216 else 4217 return -EINVAL; 4218 break; 4219 4220 case TCP_REPAIR_WINDOW: { 4221 struct tcp_repair_window opt; 4222 4223 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4224 return -EFAULT; 4225 4226 if (len != sizeof(opt)) 4227 return -EINVAL; 4228 4229 if (!tp->repair) 4230 return -EPERM; 4231 4232 opt.snd_wl1 = tp->snd_wl1; 4233 opt.snd_wnd = tp->snd_wnd; 4234 opt.max_window = tp->max_window; 4235 opt.rcv_wnd = tp->rcv_wnd; 4236 opt.rcv_wup = tp->rcv_wup; 4237 4238 if (copy_to_sockptr(optval, &opt, len)) 4239 return -EFAULT; 4240 return 0; 4241 } 4242 case TCP_QUEUE_SEQ: 4243 if (tp->repair_queue == TCP_SEND_QUEUE) 4244 val = tp->write_seq; 4245 else if (tp->repair_queue == TCP_RECV_QUEUE) 4246 val = tp->rcv_nxt; 4247 else 4248 return -EINVAL; 4249 break; 4250 4251 case TCP_USER_TIMEOUT: 4252 val = icsk->icsk_user_timeout; 4253 break; 4254 4255 case TCP_FASTOPEN: 4256 val = icsk->icsk_accept_queue.fastopenq.max_qlen; 4257 break; 4258 4259 case TCP_FASTOPEN_CONNECT: 4260 val = tp->fastopen_connect; 4261 break; 4262 4263 case TCP_FASTOPEN_NO_COOKIE: 4264 val = tp->fastopen_no_cookie; 4265 break; 4266 4267 case TCP_TX_DELAY: 4268 val = tp->tcp_tx_delay; 4269 break; 4270 4271 case TCP_TIMESTAMP: 4272 val = tcp_time_stamp_raw() + tp->tsoffset; 4273 break; 4274 case TCP_NOTSENT_LOWAT: 4275 val = tp->notsent_lowat; 4276 break; 4277 case TCP_INQ: 4278 val = tp->recvmsg_inq; 4279 break; 4280 case TCP_SAVE_SYN: 4281 val = tp->save_syn; 4282 break; 4283 case TCP_SAVED_SYN: { 4284 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4285 return -EFAULT; 4286 4287 sockopt_lock_sock(sk); 4288 if (tp->saved_syn) { 4289 if (len < tcp_saved_syn_len(tp->saved_syn)) { 4290 len = tcp_saved_syn_len(tp->saved_syn); 4291 if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4292 sockopt_release_sock(sk); 4293 return -EFAULT; 4294 } 4295 sockopt_release_sock(sk); 4296 return -EINVAL; 4297 } 4298 len = tcp_saved_syn_len(tp->saved_syn); 4299 if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4300 sockopt_release_sock(sk); 4301 return -EFAULT; 4302 } 4303 if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { 4304 sockopt_release_sock(sk); 4305 return -EFAULT; 4306 } 4307 tcp_saved_syn_free(tp); 4308 sockopt_release_sock(sk); 4309 } else { 4310 sockopt_release_sock(sk); 4311 len = 0; 4312 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4313 return -EFAULT; 4314 } 4315 return 0; 4316 } 4317 #ifdef CONFIG_MMU 4318 case TCP_ZEROCOPY_RECEIVE: { 4319 struct scm_timestamping_internal tss; 4320 struct tcp_zerocopy_receive zc = {}; 4321 int err; 4322 4323 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4324 return -EFAULT; 4325 if (len < 0 || 4326 len < offsetofend(struct tcp_zerocopy_receive, length)) 4327 return -EINVAL; 4328 if (unlikely(len > sizeof(zc))) { 4329 err = check_zeroed_sockptr(optval, sizeof(zc), 4330 len - sizeof(zc)); 4331 if (err < 1) 4332 return err == 0 ? -EINVAL : err; 4333 len = sizeof(zc); 4334 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4335 return -EFAULT; 4336 } 4337 if (copy_from_sockptr(&zc, optval, len)) 4338 return -EFAULT; 4339 if (zc.reserved) 4340 return -EINVAL; 4341 if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) 4342 return -EINVAL; 4343 sockopt_lock_sock(sk); 4344 err = tcp_zerocopy_receive(sk, &zc, &tss); 4345 err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, 4346 &zc, &len, err); 4347 sockopt_release_sock(sk); 4348 if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) 4349 goto zerocopy_rcv_cmsg; 4350 switch (len) { 4351 case offsetofend(struct tcp_zerocopy_receive, msg_flags): 4352 goto zerocopy_rcv_cmsg; 4353 case offsetofend(struct tcp_zerocopy_receive, msg_controllen): 4354 case offsetofend(struct tcp_zerocopy_receive, msg_control): 4355 case offsetofend(struct tcp_zerocopy_receive, flags): 4356 case offsetofend(struct tcp_zerocopy_receive, copybuf_len): 4357 case offsetofend(struct tcp_zerocopy_receive, copybuf_address): 4358 case offsetofend(struct tcp_zerocopy_receive, err): 4359 goto zerocopy_rcv_sk_err; 4360 case offsetofend(struct tcp_zerocopy_receive, inq): 4361 goto zerocopy_rcv_inq; 4362 case offsetofend(struct tcp_zerocopy_receive, length): 4363 default: 4364 goto zerocopy_rcv_out; 4365 } 4366 zerocopy_rcv_cmsg: 4367 if (zc.msg_flags & TCP_CMSG_TS) 4368 tcp_zc_finalize_rx_tstamp(sk, &zc, &tss); 4369 else 4370 zc.msg_flags = 0; 4371 zerocopy_rcv_sk_err: 4372 if (!err) 4373 zc.err = sock_error(sk); 4374 zerocopy_rcv_inq: 4375 zc.inq = tcp_inq_hint(sk); 4376 zerocopy_rcv_out: 4377 if (!err && copy_to_sockptr(optval, &zc, len)) 4378 err = -EFAULT; 4379 return err; 4380 } 4381 #endif 4382 default: 4383 return -ENOPROTOOPT; 4384 } 4385 4386 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4387 return -EFAULT; 4388 if (copy_to_sockptr(optval, &val, len)) 4389 return -EFAULT; 4390 return 0; 4391 } 4392 4393 bool tcp_bpf_bypass_getsockopt(int level, int optname) 4394 { 4395 /* TCP do_tcp_getsockopt has optimized getsockopt implementation 4396 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE. 4397 */ 4398 if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE) 4399 return true; 4400 4401 return false; 4402 } 4403 EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt); 4404 4405 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 4406 int __user *optlen) 4407 { 4408 struct inet_connection_sock *icsk = inet_csk(sk); 4409 4410 if (level != SOL_TCP) 4411 /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 4412 return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname, 4413 optval, optlen); 4414 return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), 4415 USER_SOCKPTR(optlen)); 4416 } 4417 EXPORT_SYMBOL(tcp_getsockopt); 4418 4419 #ifdef CONFIG_TCP_MD5SIG 4420 static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); 4421 static DEFINE_MUTEX(tcp_md5sig_mutex); 4422 static bool tcp_md5sig_pool_populated = false; 4423 4424 static void __tcp_alloc_md5sig_pool(void) 4425 { 4426 struct crypto_ahash *hash; 4427 int cpu; 4428 4429 hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 4430 if (IS_ERR(hash)) 4431 return; 4432 4433 for_each_possible_cpu(cpu) { 4434 void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch; 4435 struct ahash_request *req; 4436 4437 if (!scratch) { 4438 scratch = kmalloc_node(sizeof(union tcp_md5sum_block) + 4439 sizeof(struct tcphdr), 4440 GFP_KERNEL, 4441 cpu_to_node(cpu)); 4442 if (!scratch) 4443 return; 4444 per_cpu(tcp_md5sig_pool, cpu).scratch = scratch; 4445 } 4446 if (per_cpu(tcp_md5sig_pool, cpu).md5_req) 4447 continue; 4448 4449 req = ahash_request_alloc(hash, GFP_KERNEL); 4450 if (!req) 4451 return; 4452 4453 ahash_request_set_callback(req, 0, NULL, NULL); 4454 4455 per_cpu(tcp_md5sig_pool, cpu).md5_req = req; 4456 } 4457 /* before setting tcp_md5sig_pool_populated, we must commit all writes 4458 * to memory. See smp_rmb() in tcp_get_md5sig_pool() 4459 */ 4460 smp_wmb(); 4461 /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool() 4462 * and tcp_get_md5sig_pool(). 4463 */ 4464 WRITE_ONCE(tcp_md5sig_pool_populated, true); 4465 } 4466 4467 bool tcp_alloc_md5sig_pool(void) 4468 { 4469 /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ 4470 if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) { 4471 mutex_lock(&tcp_md5sig_mutex); 4472 4473 if (!tcp_md5sig_pool_populated) 4474 __tcp_alloc_md5sig_pool(); 4475 4476 mutex_unlock(&tcp_md5sig_mutex); 4477 } 4478 /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ 4479 return READ_ONCE(tcp_md5sig_pool_populated); 4480 } 4481 EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 4482 4483 4484 /** 4485 * tcp_get_md5sig_pool - get md5sig_pool for this user 4486 * 4487 * We use percpu structure, so if we succeed, we exit with preemption 4488 * and BH disabled, to make sure another thread or softirq handling 4489 * wont try to get same context. 4490 */ 4491 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 4492 { 4493 local_bh_disable(); 4494 4495 /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ 4496 if (READ_ONCE(tcp_md5sig_pool_populated)) { 4497 /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ 4498 smp_rmb(); 4499 return this_cpu_ptr(&tcp_md5sig_pool); 4500 } 4501 local_bh_enable(); 4502 return NULL; 4503 } 4504 EXPORT_SYMBOL(tcp_get_md5sig_pool); 4505 4506 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 4507 const struct sk_buff *skb, unsigned int header_len) 4508 { 4509 struct scatterlist sg; 4510 const struct tcphdr *tp = tcp_hdr(skb); 4511 struct ahash_request *req = hp->md5_req; 4512 unsigned int i; 4513 const unsigned int head_data_len = skb_headlen(skb) > header_len ? 4514 skb_headlen(skb) - header_len : 0; 4515 const struct skb_shared_info *shi = skb_shinfo(skb); 4516 struct sk_buff *frag_iter; 4517 4518 sg_init_table(&sg, 1); 4519 4520 sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); 4521 ahash_request_set_crypt(req, &sg, NULL, head_data_len); 4522 if (crypto_ahash_update(req)) 4523 return 1; 4524 4525 for (i = 0; i < shi->nr_frags; ++i) { 4526 const skb_frag_t *f = &shi->frags[i]; 4527 unsigned int offset = skb_frag_off(f); 4528 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); 4529 4530 sg_set_page(&sg, page, skb_frag_size(f), 4531 offset_in_page(offset)); 4532 ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f)); 4533 if (crypto_ahash_update(req)) 4534 return 1; 4535 } 4536 4537 skb_walk_frags(skb, frag_iter) 4538 if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) 4539 return 1; 4540 4541 return 0; 4542 } 4543 EXPORT_SYMBOL(tcp_md5_hash_skb_data); 4544 4545 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) 4546 { 4547 u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ 4548 struct scatterlist sg; 4549 4550 sg_init_one(&sg, key->key, keylen); 4551 ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); 4552 4553 /* We use data_race() because tcp_md5_do_add() might change key->key under us */ 4554 return data_race(crypto_ahash_update(hp->md5_req)); 4555 } 4556 EXPORT_SYMBOL(tcp_md5_hash_key); 4557 4558 /* Called with rcu_read_lock() */ 4559 enum skb_drop_reason 4560 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 4561 const void *saddr, const void *daddr, 4562 int family, int dif, int sdif) 4563 { 4564 /* 4565 * This gets called for each TCP segment that arrives 4566 * so we want to be efficient. 4567 * We have 3 drop cases: 4568 * o No MD5 hash and one expected. 4569 * o MD5 hash and we're not expecting one. 4570 * o MD5 hash and its wrong. 4571 */ 4572 const __u8 *hash_location = NULL; 4573 struct tcp_md5sig_key *hash_expected; 4574 const struct tcphdr *th = tcp_hdr(skb); 4575 const struct tcp_sock *tp = tcp_sk(sk); 4576 int genhash, l3index; 4577 u8 newhash[16]; 4578 4579 /* sdif set, means packet ingressed via a device 4580 * in an L3 domain and dif is set to the l3mdev 4581 */ 4582 l3index = sdif ? dif : 0; 4583 4584 hash_expected = tcp_md5_do_lookup(sk, l3index, saddr, family); 4585 hash_location = tcp_parse_md5sig_option(th); 4586 4587 /* We've parsed the options - do we have a hash? */ 4588 if (!hash_expected && !hash_location) 4589 return SKB_NOT_DROPPED_YET; 4590 4591 if (hash_expected && !hash_location) { 4592 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 4593 return SKB_DROP_REASON_TCP_MD5NOTFOUND; 4594 } 4595 4596 if (!hash_expected && hash_location) { 4597 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 4598 return SKB_DROP_REASON_TCP_MD5UNEXPECTED; 4599 } 4600 4601 /* Check the signature. 4602 * To support dual stack listeners, we need to handle 4603 * IPv4-mapped case. 4604 */ 4605 if (family == AF_INET) 4606 genhash = tcp_v4_md5_hash_skb(newhash, 4607 hash_expected, 4608 NULL, skb); 4609 else 4610 genhash = tp->af_specific->calc_md5_hash(newhash, 4611 hash_expected, 4612 NULL, skb); 4613 4614 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 4615 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 4616 if (family == AF_INET) { 4617 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n", 4618 saddr, ntohs(th->source), 4619 daddr, ntohs(th->dest), 4620 genhash ? " tcp_v4_calc_md5_hash failed" 4621 : "", l3index); 4622 } else { 4623 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n", 4624 genhash ? "failed" : "mismatch", 4625 saddr, ntohs(th->source), 4626 daddr, ntohs(th->dest), l3index); 4627 } 4628 return SKB_DROP_REASON_TCP_MD5FAILURE; 4629 } 4630 return SKB_NOT_DROPPED_YET; 4631 } 4632 EXPORT_SYMBOL(tcp_inbound_md5_hash); 4633 4634 #endif 4635 4636 void tcp_done(struct sock *sk) 4637 { 4638 struct request_sock *req; 4639 4640 /* We might be called with a new socket, after 4641 * inet_csk_prepare_forced_close() has been called 4642 * so we can not use lockdep_sock_is_held(sk) 4643 */ 4644 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); 4645 4646 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 4647 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 4648 4649 tcp_set_state(sk, TCP_CLOSE); 4650 tcp_clear_xmit_timers(sk); 4651 if (req) 4652 reqsk_fastopen_remove(sk, req, false); 4653 4654 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 4655 4656 if (!sock_flag(sk, SOCK_DEAD)) 4657 sk->sk_state_change(sk); 4658 else 4659 inet_csk_destroy_sock(sk); 4660 } 4661 EXPORT_SYMBOL_GPL(tcp_done); 4662 4663 int tcp_abort(struct sock *sk, int err) 4664 { 4665 int state = inet_sk_state_load(sk); 4666 4667 if (state == TCP_NEW_SYN_RECV) { 4668 struct request_sock *req = inet_reqsk(sk); 4669 4670 local_bh_disable(); 4671 inet_csk_reqsk_queue_drop(req->rsk_listener, req); 4672 local_bh_enable(); 4673 return 0; 4674 } 4675 if (state == TCP_TIME_WAIT) { 4676 struct inet_timewait_sock *tw = inet_twsk(sk); 4677 4678 refcount_inc(&tw->tw_refcnt); 4679 local_bh_disable(); 4680 inet_twsk_deschedule_put(tw); 4681 local_bh_enable(); 4682 return 0; 4683 } 4684 4685 /* Don't race with userspace socket closes such as tcp_close. */ 4686 lock_sock(sk); 4687 4688 if (sk->sk_state == TCP_LISTEN) { 4689 tcp_set_state(sk, TCP_CLOSE); 4690 inet_csk_listen_stop(sk); 4691 } 4692 4693 /* Don't race with BH socket closes such as inet_csk_listen_stop. */ 4694 local_bh_disable(); 4695 bh_lock_sock(sk); 4696 4697 if (!sock_flag(sk, SOCK_DEAD)) { 4698 WRITE_ONCE(sk->sk_err, err); 4699 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4700 smp_wmb(); 4701 sk_error_report(sk); 4702 if (tcp_need_reset(sk->sk_state)) 4703 tcp_send_active_reset(sk, GFP_ATOMIC); 4704 tcp_done(sk); 4705 } 4706 4707 bh_unlock_sock(sk); 4708 local_bh_enable(); 4709 tcp_write_queue_purge(sk); 4710 release_sock(sk); 4711 return 0; 4712 } 4713 EXPORT_SYMBOL_GPL(tcp_abort); 4714 4715 extern struct tcp_congestion_ops tcp_reno; 4716 4717 static __initdata unsigned long thash_entries; 4718 static int __init set_thash_entries(char *str) 4719 { 4720 ssize_t ret; 4721 4722 if (!str) 4723 return 0; 4724 4725 ret = kstrtoul(str, 0, &thash_entries); 4726 if (ret) 4727 return 0; 4728 4729 return 1; 4730 } 4731 __setup("thash_entries=", set_thash_entries); 4732 4733 static void __init tcp_init_mem(void) 4734 { 4735 unsigned long limit = nr_free_buffer_pages() / 16; 4736 4737 limit = max(limit, 128UL); 4738 sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ 4739 sysctl_tcp_mem[1] = limit; /* 6.25 % */ 4740 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ 4741 } 4742 4743 void __init tcp_init(void) 4744 { 4745 int max_rshare, max_wshare, cnt; 4746 unsigned long limit; 4747 unsigned int i; 4748 4749 BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); 4750 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 4751 sizeof_field(struct sk_buff, cb)); 4752 4753 percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 4754 4755 timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); 4756 mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 4757 4758 inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", 4759 thash_entries, 21, /* one slot per 2 MB*/ 4760 0, 64 * 1024); 4761 tcp_hashinfo.bind_bucket_cachep = 4762 kmem_cache_create("tcp_bind_bucket", 4763 sizeof(struct inet_bind_bucket), 0, 4764 SLAB_HWCACHE_ALIGN | SLAB_PANIC | 4765 SLAB_ACCOUNT, 4766 NULL); 4767 tcp_hashinfo.bind2_bucket_cachep = 4768 kmem_cache_create("tcp_bind2_bucket", 4769 sizeof(struct inet_bind2_bucket), 0, 4770 SLAB_HWCACHE_ALIGN | SLAB_PANIC | 4771 SLAB_ACCOUNT, 4772 NULL); 4773 4774 /* Size and allocate the main established and bind bucket 4775 * hash tables. 4776 * 4777 * The methodology is similar to that of the buffer cache. 4778 */ 4779 tcp_hashinfo.ehash = 4780 alloc_large_system_hash("TCP established", 4781 sizeof(struct inet_ehash_bucket), 4782 thash_entries, 4783 17, /* one slot per 128 KB of memory */ 4784 0, 4785 NULL, 4786 &tcp_hashinfo.ehash_mask, 4787 0, 4788 thash_entries ? 0 : 512 * 1024); 4789 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) 4790 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 4791 4792 if (inet_ehash_locks_alloc(&tcp_hashinfo)) 4793 panic("TCP: failed to alloc ehash_locks"); 4794 tcp_hashinfo.bhash = 4795 alloc_large_system_hash("TCP bind", 4796 2 * sizeof(struct inet_bind_hashbucket), 4797 tcp_hashinfo.ehash_mask + 1, 4798 17, /* one slot per 128 KB of memory */ 4799 0, 4800 &tcp_hashinfo.bhash_size, 4801 NULL, 4802 0, 4803 64 * 1024); 4804 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 4805 tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; 4806 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 4807 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 4808 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 4809 spin_lock_init(&tcp_hashinfo.bhash2[i].lock); 4810 INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); 4811 } 4812 4813 tcp_hashinfo.pernet = false; 4814 4815 cnt = tcp_hashinfo.ehash_mask + 1; 4816 sysctl_tcp_max_orphans = cnt / 2; 4817 4818 tcp_init_mem(); 4819 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 4820 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 4821 max_wshare = min(4UL*1024*1024, limit); 4822 max_rshare = min(6UL*1024*1024, limit); 4823 4824 init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE; 4825 init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; 4826 init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 4827 4828 init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE; 4829 init_net.ipv4.sysctl_tcp_rmem[1] = 131072; 4830 init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); 4831 4832 pr_info("Hash tables configured (established %u bind %u)\n", 4833 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 4834 4835 tcp_v4_init(); 4836 tcp_metrics_init(); 4837 BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); 4838 tcp_tasklet_init(); 4839 mptcp_init(); 4840 } 4841