1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 * 21 * Fixes: 22 * Alan Cox : Numerous verify_area() calls 23 * Alan Cox : Set the ACK bit on a reset 24 * Alan Cox : Stopped it crashing if it closed while 25 * sk->inuse=1 and was trying to connect 26 * (tcp_err()). 27 * Alan Cox : All icmp error handling was broken 28 * pointers passed where wrong and the 29 * socket was looked up backwards. Nobody 30 * tested any icmp error code obviously. 31 * Alan Cox : tcp_err() now handled properly. It 32 * wakes people on errors. poll 33 * behaves and the icmp error race 34 * has gone by moving it into sock.c 35 * Alan Cox : tcp_send_reset() fixed to work for 36 * everything not just packets for 37 * unknown sockets. 38 * Alan Cox : tcp option processing. 39 * Alan Cox : Reset tweaked (still not 100%) [Had 40 * syn rule wrong] 41 * Herp Rosmanith : More reset fixes 42 * Alan Cox : No longer acks invalid rst frames. 43 * Acking any kind of RST is right out. 44 * Alan Cox : Sets an ignore me flag on an rst 45 * receive otherwise odd bits of prattle 46 * escape still 47 * Alan Cox : Fixed another acking RST frame bug. 48 * Should stop LAN workplace lockups. 49 * Alan Cox : Some tidyups using the new skb list 50 * facilities 51 * Alan Cox : sk->keepopen now seems to work 52 * Alan Cox : Pulls options out correctly on accepts 53 * Alan Cox : Fixed assorted sk->rqueue->next errors 54 * Alan Cox : PSH doesn't end a TCP read. Switched a 55 * bit to skb ops. 56 * Alan Cox : Tidied tcp_data to avoid a potential 57 * nasty. 58 * Alan Cox : Added some better commenting, as the 59 * tcp is hard to follow 60 * Alan Cox : Removed incorrect check for 20 * psh 61 * Michael O'Reilly : ack < copied bug fix. 62 * Johannes Stille : Misc tcp fixes (not all in yet). 63 * Alan Cox : FIN with no memory -> CRASH 64 * Alan Cox : Added socket option proto entries. 65 * Also added awareness of them to accept. 66 * Alan Cox : Added TCP options (SOL_TCP) 67 * Alan Cox : Switched wakeup calls to callbacks, 68 * so the kernel can layer network 69 * sockets. 70 * Alan Cox : Use ip_tos/ip_ttl settings. 71 * Alan Cox : Handle FIN (more) properly (we hope). 72 * Alan Cox : RST frames sent on unsynchronised 73 * state ack error. 74 * Alan Cox : Put in missing check for SYN bit. 75 * Alan Cox : Added tcp_select_window() aka NET2E 76 * window non shrink trick. 77 * Alan Cox : Added a couple of small NET2E timer 78 * fixes 79 * Charles Hedrick : TCP fixes 80 * Toomas Tamm : TCP window fixes 81 * Alan Cox : Small URG fix to rlogin ^C ack fight 82 * Charles Hedrick : Rewrote most of it to actually work 83 * Linus : Rewrote tcp_read() and URG handling 84 * completely 85 * Gerhard Koerting: Fixed some missing timer handling 86 * Matthew Dillon : Reworked TCP machine states as per RFC 87 * Gerhard Koerting: PC/TCP workarounds 88 * Adam Caldwell : Assorted timer/timing errors 89 * Matthew Dillon : Fixed another RST bug 90 * Alan Cox : Move to kernel side addressing changes. 91 * Alan Cox : Beginning work on TCP fastpathing 92 * (not yet usable) 93 * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 94 * Alan Cox : TCP fast path debugging 95 * Alan Cox : Window clamping 96 * Michael Riepe : Bug in tcp_check() 97 * Matt Dillon : More TCP improvements and RST bug fixes 98 * Matt Dillon : Yet more small nasties remove from the 99 * TCP code (Be very nice to this man if 100 * tcp finally works 100%) 8) 101 * Alan Cox : BSD accept semantics. 102 * Alan Cox : Reset on closedown bug. 103 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 104 * Michael Pall : Handle poll() after URG properly in 105 * all cases. 106 * Michael Pall : Undo the last fix in tcp_read_urg() 107 * (multi URG PUSH broke rlogin). 108 * Michael Pall : Fix the multi URG PUSH problem in 109 * tcp_readable(), poll() after URG 110 * works now. 111 * Michael Pall : recv(...,MSG_OOB) never blocks in the 112 * BSD api. 113 * Alan Cox : Changed the semantics of sk->socket to 114 * fix a race and a signal problem with 115 * accept() and async I/O. 116 * Alan Cox : Relaxed the rules on tcp_sendto(). 117 * Yury Shevchuk : Really fixed accept() blocking problem. 118 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 119 * clients/servers which listen in on 120 * fixed ports. 121 * Alan Cox : Cleaned the above up and shrank it to 122 * a sensible code size. 123 * Alan Cox : Self connect lockup fix. 124 * Alan Cox : No connect to multicast. 125 * Ross Biro : Close unaccepted children on master 126 * socket close. 127 * Alan Cox : Reset tracing code. 128 * Alan Cox : Spurious resets on shutdown. 129 * Alan Cox : Giant 15 minute/60 second timer error 130 * Alan Cox : Small whoops in polling before an 131 * accept. 132 * Alan Cox : Kept the state trace facility since 133 * it's handy for debugging. 134 * Alan Cox : More reset handler fixes. 135 * Alan Cox : Started rewriting the code based on 136 * the RFC's for other useful protocol 137 * references see: Comer, KA9Q NOS, and 138 * for a reference on the difference 139 * between specifications and how BSD 140 * works see the 4.4lite source. 141 * A.N.Kuznetsov : Don't time wait on completion of tidy 142 * close. 143 * Linus Torvalds : Fin/Shutdown & copied_seq changes. 144 * Linus Torvalds : Fixed BSD port reuse to work first syn 145 * Alan Cox : Reimplemented timers as per the RFC 146 * and using multiple timers for sanity. 147 * Alan Cox : Small bug fixes, and a lot of new 148 * comments. 149 * Alan Cox : Fixed dual reader crash by locking 150 * the buffers (much like datagram.c) 151 * Alan Cox : Fixed stuck sockets in probe. A probe 152 * now gets fed up of retrying without 153 * (even a no space) answer. 154 * Alan Cox : Extracted closing code better 155 * Alan Cox : Fixed the closing state machine to 156 * resemble the RFC. 157 * Alan Cox : More 'per spec' fixes. 158 * Jorge Cwik : Even faster checksumming. 159 * Alan Cox : tcp_data() doesn't ack illegal PSH 160 * only frames. At least one pc tcp stack 161 * generates them. 162 * Alan Cox : Cache last socket. 163 * Alan Cox : Per route irtt. 164 * Matt Day : poll()->select() match BSD precisely on error 165 * Alan Cox : New buffers 166 * Marc Tamsky : Various sk->prot->retransmits and 167 * sk->retransmits misupdating fixed. 168 * Fixed tcp_write_timeout: stuck close, 169 * and TCP syn retries gets used now. 170 * Mark Yarvis : In tcp_read_wakeup(), don't send an 171 * ack if state is TCP_CLOSED. 172 * Alan Cox : Look up device on a retransmit - routes may 173 * change. Doesn't yet cope with MSS shrink right 174 * but it's a start! 175 * Marc Tamsky : Closing in closing fixes. 176 * Mike Shaver : RFC1122 verifications. 177 * Alan Cox : rcv_saddr errors. 178 * Alan Cox : Block double connect(). 179 * Alan Cox : Small hooks for enSKIP. 180 * Alexey Kuznetsov: Path MTU discovery. 181 * Alan Cox : Support soft errors. 182 * Alan Cox : Fix MTU discovery pathological case 183 * when the remote claims no mtu! 184 * Marc Tamsky : TCP_CLOSE fix. 185 * Colin (G3TNE) : Send a reset on syn ack replies in 186 * window but wrong (fixes NT lpd problems) 187 * Pedro Roque : Better TCP window handling, delayed ack. 188 * Joerg Reuter : No modification of locked buffers in 189 * tcp_do_retransmit() 190 * Eric Schenk : Changed receiver side silly window 191 * avoidance algorithm to BSD style 192 * algorithm. This doubles throughput 193 * against machines running Solaris, 194 * and seems to result in general 195 * improvement. 196 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 197 * Willy Konynenberg : Transparent proxying support. 198 * Mike McLagan : Routing by source 199 * Keith Owens : Do proper merging with partial SKB's in 200 * tcp_do_sendmsg to avoid burstiness. 201 * Eric Schenk : Fix fast close down bug with 202 * shutdown() followed by close(). 203 * Andi Kleen : Make poll agree with SIGIO 204 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 205 * lingertime == 0 (RFC 793 ABORT Call) 206 * Hirokazu Takahashi : Use copy_from_user() instead of 207 * csum_and_copy_from_user() if possible. 208 * 209 * Description of States: 210 * 211 * TCP_SYN_SENT sent a connection request, waiting for ack 212 * 213 * TCP_SYN_RECV received a connection request, sent ack, 214 * waiting for final ack in three-way handshake. 215 * 216 * TCP_ESTABLISHED connection established 217 * 218 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 219 * transmission of remaining buffered data 220 * 221 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 222 * to shutdown 223 * 224 * TCP_CLOSING both sides have shutdown but we still have 225 * data we have to finish sending 226 * 227 * TCP_TIME_WAIT timeout to catch resent junk before entering 228 * closed, can only be entered from FIN_WAIT2 229 * or CLOSING. Required because the other end 230 * may not have gotten our last ACK causing it 231 * to retransmit the data packet (which we ignore) 232 * 233 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 234 * us to finish writing our data and to shutdown 235 * (we have to close() to move on to LAST_ACK) 236 * 237 * TCP_LAST_ACK out side has shutdown after remote has 238 * shutdown. There may still be data in our 239 * buffer that we have to finish sending 240 * 241 * TCP_CLOSE socket is finished 242 */ 243 244 #define pr_fmt(fmt) "TCP: " fmt 245 246 #include <crypto/hash.h> 247 #include <linux/kernel.h> 248 #include <linux/module.h> 249 #include <linux/types.h> 250 #include <linux/fcntl.h> 251 #include <linux/poll.h> 252 #include <linux/inet_diag.h> 253 #include <linux/init.h> 254 #include <linux/fs.h> 255 #include <linux/skbuff.h> 256 #include <linux/scatterlist.h> 257 #include <linux/splice.h> 258 #include <linux/net.h> 259 #include <linux/socket.h> 260 #include <linux/random.h> 261 #include <linux/memblock.h> 262 #include <linux/highmem.h> 263 #include <linux/cache.h> 264 #include <linux/err.h> 265 #include <linux/time.h> 266 #include <linux/slab.h> 267 #include <linux/errqueue.h> 268 #include <linux/static_key.h> 269 #include <linux/btf.h> 270 271 #include <net/icmp.h> 272 #include <net/inet_common.h> 273 #include <net/tcp.h> 274 #include <net/mptcp.h> 275 #include <net/xfrm.h> 276 #include <net/ip.h> 277 #include <net/sock.h> 278 279 #include <linux/uaccess.h> 280 #include <asm/ioctls.h> 281 #include <net/busy_poll.h> 282 283 /* Track pending CMSGs. */ 284 enum { 285 TCP_CMSG_INQ = 1, 286 TCP_CMSG_TS = 2 287 }; 288 289 DEFINE_PER_CPU(unsigned int, tcp_orphan_count); 290 EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); 291 292 long sysctl_tcp_mem[3] __read_mostly; 293 EXPORT_SYMBOL(sysctl_tcp_mem); 294 295 atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ 296 EXPORT_SYMBOL(tcp_memory_allocated); 297 298 #if IS_ENABLED(CONFIG_SMC) 299 DEFINE_STATIC_KEY_FALSE(tcp_have_smc); 300 EXPORT_SYMBOL(tcp_have_smc); 301 #endif 302 303 /* 304 * Current number of TCP sockets. 305 */ 306 struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; 307 EXPORT_SYMBOL(tcp_sockets_allocated); 308 309 /* 310 * TCP splice context 311 */ 312 struct tcp_splice_state { 313 struct pipe_inode_info *pipe; 314 size_t len; 315 unsigned int flags; 316 }; 317 318 /* 319 * Pressure flag: try to collapse. 320 * Technical note: it is used by multiple contexts non atomically. 321 * All the __sk_mem_schedule() is of this nature: accounting 322 * is strict, actions are advisory and have some latency. 323 */ 324 unsigned long tcp_memory_pressure __read_mostly; 325 EXPORT_SYMBOL_GPL(tcp_memory_pressure); 326 327 void tcp_enter_memory_pressure(struct sock *sk) 328 { 329 unsigned long val; 330 331 if (READ_ONCE(tcp_memory_pressure)) 332 return; 333 val = jiffies; 334 335 if (!val) 336 val--; 337 if (!cmpxchg(&tcp_memory_pressure, 0, val)) 338 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 339 } 340 EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure); 341 342 void tcp_leave_memory_pressure(struct sock *sk) 343 { 344 unsigned long val; 345 346 if (!READ_ONCE(tcp_memory_pressure)) 347 return; 348 val = xchg(&tcp_memory_pressure, 0); 349 if (val) 350 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, 351 jiffies_to_msecs(jiffies - val)); 352 } 353 EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure); 354 355 /* Convert seconds to retransmits based on initial and max timeout */ 356 static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 357 { 358 u8 res = 0; 359 360 if (seconds > 0) { 361 int period = timeout; 362 363 res = 1; 364 while (seconds > period && res < 255) { 365 res++; 366 timeout <<= 1; 367 if (timeout > rto_max) 368 timeout = rto_max; 369 period += timeout; 370 } 371 } 372 return res; 373 } 374 375 /* Convert retransmits to seconds based on initial and max timeout */ 376 static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 377 { 378 int period = 0; 379 380 if (retrans > 0) { 381 period = timeout; 382 while (--retrans) { 383 timeout <<= 1; 384 if (timeout > rto_max) 385 timeout = rto_max; 386 period += timeout; 387 } 388 } 389 return period; 390 } 391 392 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) 393 { 394 u32 rate = READ_ONCE(tp->rate_delivered); 395 u32 intv = READ_ONCE(tp->rate_interval_us); 396 u64 rate64 = 0; 397 398 if (rate && intv) { 399 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; 400 do_div(rate64, intv); 401 } 402 return rate64; 403 } 404 405 /* Address-family independent initialization for a tcp_sock. 406 * 407 * NOTE: A lot of things set to zero explicitly by call to 408 * sk_alloc() so need not be done here. 409 */ 410 void tcp_init_sock(struct sock *sk) 411 { 412 struct inet_connection_sock *icsk = inet_csk(sk); 413 struct tcp_sock *tp = tcp_sk(sk); 414 415 tp->out_of_order_queue = RB_ROOT; 416 sk->tcp_rtx_queue = RB_ROOT; 417 tcp_init_xmit_timers(sk); 418 INIT_LIST_HEAD(&tp->tsq_node); 419 INIT_LIST_HEAD(&tp->tsorted_sent_queue); 420 421 icsk->icsk_rto = TCP_TIMEOUT_INIT; 422 icsk->icsk_rto_min = TCP_RTO_MIN; 423 icsk->icsk_delack_max = TCP_DELACK_MAX; 424 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 425 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); 426 427 /* So many TCP implementations out there (incorrectly) count the 428 * initial SYN frame in their delayed-ACK and congestion control 429 * algorithms that we must have the following bandaid to talk 430 * efficiently to them. -DaveM 431 */ 432 tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 433 434 /* There's a bubble in the pipe until at least the first ACK. */ 435 tp->app_limited = ~0U; 436 437 /* See draft-stevens-tcpca-spec-01 for discussion of the 438 * initialization of these values. 439 */ 440 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 441 tp->snd_cwnd_clamp = ~0; 442 tp->mss_cache = TCP_MSS_DEFAULT; 443 444 tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; 445 tcp_assign_congestion_control(sk); 446 447 tp->tsoffset = 0; 448 tp->rack.reo_wnd_steps = 1; 449 450 sk->sk_write_space = sk_stream_write_space; 451 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 452 453 icsk->icsk_sync_mss = tcp_sync_mss; 454 455 WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]); 456 WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]); 457 458 sk_sockets_allocated_inc(sk); 459 } 460 EXPORT_SYMBOL(tcp_init_sock); 461 462 static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) 463 { 464 struct sk_buff *skb = tcp_write_queue_tail(sk); 465 466 if (tsflags && skb) { 467 struct skb_shared_info *shinfo = skb_shinfo(skb); 468 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 469 470 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); 471 if (tsflags & SOF_TIMESTAMPING_TX_ACK) 472 tcb->txstamp_ack = 1; 473 if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 474 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; 475 } 476 } 477 478 static bool tcp_stream_is_readable(struct sock *sk, int target) 479 { 480 if (tcp_epollin_ready(sk, target)) 481 return true; 482 return sk_is_readable(sk); 483 } 484 485 /* 486 * Wait for a TCP event. 487 * 488 * Note that we don't need to lock the socket, as the upper poll layers 489 * take care of normal races (between the test and the event) and we don't 490 * go look at any of the socket buffers directly. 491 */ 492 __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 493 { 494 __poll_t mask; 495 struct sock *sk = sock->sk; 496 const struct tcp_sock *tp = tcp_sk(sk); 497 int state; 498 499 sock_poll_wait(file, sock, wait); 500 501 state = inet_sk_state_load(sk); 502 if (state == TCP_LISTEN) 503 return inet_csk_listen_poll(sk); 504 505 /* Socket is not locked. We are protected from async events 506 * by poll logic and correct handling of state changes 507 * made by other threads is impossible in any case. 508 */ 509 510 mask = 0; 511 512 /* 513 * EPOLLHUP is certainly not done right. But poll() doesn't 514 * have a notion of HUP in just one direction, and for a 515 * socket the read side is more interesting. 516 * 517 * Some poll() documentation says that EPOLLHUP is incompatible 518 * with the EPOLLOUT/POLLWR flags, so somebody should check this 519 * all. But careful, it tends to be safer to return too many 520 * bits than too few, and you can easily break real applications 521 * if you don't tell them that something has hung up! 522 * 523 * Check-me. 524 * 525 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and 526 * our fs/select.c). It means that after we received EOF, 527 * poll always returns immediately, making impossible poll() on write() 528 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP 529 * if and only if shutdown has been made in both directions. 530 * Actually, it is interesting to look how Solaris and DUX 531 * solve this dilemma. I would prefer, if EPOLLHUP were maskable, 532 * then we could set it on SND_SHUTDOWN. BTW examples given 533 * in Stevens' books assume exactly this behaviour, it explains 534 * why EPOLLHUP is incompatible with EPOLLOUT. --ANK 535 * 536 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 537 * blocking on fresh not-connected or disconnected socket. --ANK 538 */ 539 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 540 mask |= EPOLLHUP; 541 if (sk->sk_shutdown & RCV_SHUTDOWN) 542 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 543 544 /* Connected or passive Fast Open socket? */ 545 if (state != TCP_SYN_SENT && 546 (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { 547 int target = sock_rcvlowat(sk, 0, INT_MAX); 548 u16 urg_data = READ_ONCE(tp->urg_data); 549 550 if (unlikely(urg_data) && 551 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && 552 !sock_flag(sk, SOCK_URGINLINE)) 553 target++; 554 555 if (tcp_stream_is_readable(sk, target)) 556 mask |= EPOLLIN | EPOLLRDNORM; 557 558 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 559 if (__sk_stream_is_writeable(sk, 1)) { 560 mask |= EPOLLOUT | EPOLLWRNORM; 561 } else { /* send SIGIO later */ 562 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 563 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 564 565 /* Race breaker. If space is freed after 566 * wspace test but before the flags are set, 567 * IO signal will be lost. Memory barrier 568 * pairs with the input side. 569 */ 570 smp_mb__after_atomic(); 571 if (__sk_stream_is_writeable(sk, 1)) 572 mask |= EPOLLOUT | EPOLLWRNORM; 573 } 574 } else 575 mask |= EPOLLOUT | EPOLLWRNORM; 576 577 if (urg_data & TCP_URG_VALID) 578 mask |= EPOLLPRI; 579 } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { 580 /* Active TCP fastopen socket with defer_connect 581 * Return EPOLLOUT so application can call write() 582 * in order for kernel to generate SYN+data 583 */ 584 mask |= EPOLLOUT | EPOLLWRNORM; 585 } 586 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 587 smp_rmb(); 588 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) 589 mask |= EPOLLERR; 590 591 return mask; 592 } 593 EXPORT_SYMBOL(tcp_poll); 594 595 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) 596 { 597 struct tcp_sock *tp = tcp_sk(sk); 598 int answ; 599 bool slow; 600 601 switch (cmd) { 602 case SIOCINQ: 603 if (sk->sk_state == TCP_LISTEN) 604 return -EINVAL; 605 606 slow = lock_sock_fast(sk); 607 answ = tcp_inq(sk); 608 unlock_sock_fast(sk, slow); 609 break; 610 case SIOCATMARK: 611 answ = READ_ONCE(tp->urg_data) && 612 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); 613 break; 614 case SIOCOUTQ: 615 if (sk->sk_state == TCP_LISTEN) 616 return -EINVAL; 617 618 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 619 answ = 0; 620 else 621 answ = READ_ONCE(tp->write_seq) - tp->snd_una; 622 break; 623 case SIOCOUTQNSD: 624 if (sk->sk_state == TCP_LISTEN) 625 return -EINVAL; 626 627 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 628 answ = 0; 629 else 630 answ = READ_ONCE(tp->write_seq) - 631 READ_ONCE(tp->snd_nxt); 632 break; 633 default: 634 return -ENOIOCTLCMD; 635 } 636 637 return put_user(answ, (int __user *)arg); 638 } 639 EXPORT_SYMBOL(tcp_ioctl); 640 641 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 642 { 643 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 644 tp->pushed_seq = tp->write_seq; 645 } 646 647 static inline bool forced_push(const struct tcp_sock *tp) 648 { 649 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 650 } 651 652 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) 653 { 654 struct tcp_sock *tp = tcp_sk(sk); 655 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 656 657 tcb->seq = tcb->end_seq = tp->write_seq; 658 tcb->tcp_flags = TCPHDR_ACK; 659 __skb_header_release(skb); 660 tcp_add_write_queue_tail(sk, skb); 661 sk_wmem_queued_add(sk, skb->truesize); 662 sk_mem_charge(sk, skb->truesize); 663 if (tp->nonagle & TCP_NAGLE_PUSH) 664 tp->nonagle &= ~TCP_NAGLE_PUSH; 665 666 tcp_slow_start_after_idle_check(sk); 667 } 668 669 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 670 { 671 if (flags & MSG_OOB) 672 tp->snd_up = tp->write_seq; 673 } 674 675 /* If a not yet filled skb is pushed, do not send it if 676 * we have data packets in Qdisc or NIC queues : 677 * Because TX completion will happen shortly, it gives a chance 678 * to coalesce future sendmsg() payload into this skb, without 679 * need for a timer, and with no latency trade off. 680 * As packets containing data payload have a bigger truesize 681 * than pure acks (dataless) packets, the last checks prevent 682 * autocorking if we only have an ACK in Qdisc/NIC queues, 683 * or if TX completion was delayed after we processed ACK packet. 684 */ 685 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, 686 int size_goal) 687 { 688 return skb->len < size_goal && 689 sock_net(sk)->ipv4.sysctl_tcp_autocorking && 690 !tcp_rtx_queue_empty(sk) && 691 refcount_read(&sk->sk_wmem_alloc) > skb->truesize && 692 tcp_skb_can_collapse_to(skb); 693 } 694 695 void tcp_push(struct sock *sk, int flags, int mss_now, 696 int nonagle, int size_goal) 697 { 698 struct tcp_sock *tp = tcp_sk(sk); 699 struct sk_buff *skb; 700 701 skb = tcp_write_queue_tail(sk); 702 if (!skb) 703 return; 704 if (!(flags & MSG_MORE) || forced_push(tp)) 705 tcp_mark_push(tp, skb); 706 707 tcp_mark_urg(tp, flags); 708 709 if (tcp_should_autocork(sk, skb, size_goal)) { 710 711 /* avoid atomic op if TSQ_THROTTLED bit is already set */ 712 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 713 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 714 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 715 } 716 /* It is possible TX completion already happened 717 * before we set TSQ_THROTTLED. 718 */ 719 if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) 720 return; 721 } 722 723 if (flags & MSG_MORE) 724 nonagle = TCP_NAGLE_CORK; 725 726 __tcp_push_pending_frames(sk, mss_now, nonagle); 727 } 728 729 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 730 unsigned int offset, size_t len) 731 { 732 struct tcp_splice_state *tss = rd_desc->arg.data; 733 int ret; 734 735 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, 736 min(rd_desc->count, len), tss->flags); 737 if (ret > 0) 738 rd_desc->count -= ret; 739 return ret; 740 } 741 742 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 743 { 744 /* Store TCP splice context information in read_descriptor_t. */ 745 read_descriptor_t rd_desc = { 746 .arg.data = tss, 747 .count = tss->len, 748 }; 749 750 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 751 } 752 753 /** 754 * tcp_splice_read - splice data from TCP socket to a pipe 755 * @sock: socket to splice from 756 * @ppos: position (not valid) 757 * @pipe: pipe to splice to 758 * @len: number of bytes to splice 759 * @flags: splice modifier flags 760 * 761 * Description: 762 * Will read pages from given socket and fill them into a pipe. 763 * 764 **/ 765 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 766 struct pipe_inode_info *pipe, size_t len, 767 unsigned int flags) 768 { 769 struct sock *sk = sock->sk; 770 struct tcp_splice_state tss = { 771 .pipe = pipe, 772 .len = len, 773 .flags = flags, 774 }; 775 long timeo; 776 ssize_t spliced; 777 int ret; 778 779 sock_rps_record_flow(sk); 780 /* 781 * We can't seek on a socket input 782 */ 783 if (unlikely(*ppos)) 784 return -ESPIPE; 785 786 ret = spliced = 0; 787 788 lock_sock(sk); 789 790 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 791 while (tss.len) { 792 ret = __tcp_splice_read(sk, &tss); 793 if (ret < 0) 794 break; 795 else if (!ret) { 796 if (spliced) 797 break; 798 if (sock_flag(sk, SOCK_DONE)) 799 break; 800 if (sk->sk_err) { 801 ret = sock_error(sk); 802 break; 803 } 804 if (sk->sk_shutdown & RCV_SHUTDOWN) 805 break; 806 if (sk->sk_state == TCP_CLOSE) { 807 /* 808 * This occurs when user tries to read 809 * from never connected socket. 810 */ 811 ret = -ENOTCONN; 812 break; 813 } 814 if (!timeo) { 815 ret = -EAGAIN; 816 break; 817 } 818 /* if __tcp_splice_read() got nothing while we have 819 * an skb in receive queue, we do not want to loop. 820 * This might happen with URG data. 821 */ 822 if (!skb_queue_empty(&sk->sk_receive_queue)) 823 break; 824 sk_wait_data(sk, &timeo, NULL); 825 if (signal_pending(current)) { 826 ret = sock_intr_errno(timeo); 827 break; 828 } 829 continue; 830 } 831 tss.len -= ret; 832 spliced += ret; 833 834 if (!timeo) 835 break; 836 release_sock(sk); 837 lock_sock(sk); 838 839 if (sk->sk_err || sk->sk_state == TCP_CLOSE || 840 (sk->sk_shutdown & RCV_SHUTDOWN) || 841 signal_pending(current)) 842 break; 843 } 844 845 release_sock(sk); 846 847 if (spliced) 848 return spliced; 849 850 return ret; 851 } 852 EXPORT_SYMBOL(tcp_splice_read); 853 854 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, 855 bool force_schedule) 856 { 857 struct sk_buff *skb; 858 859 if (unlikely(tcp_under_memory_pressure(sk))) 860 sk_mem_reclaim_partial(sk); 861 862 skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp); 863 if (likely(skb)) { 864 bool mem_scheduled; 865 866 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 867 if (force_schedule) { 868 mem_scheduled = true; 869 sk_forced_mem_schedule(sk, skb->truesize); 870 } else { 871 mem_scheduled = sk_wmem_schedule(sk, skb->truesize); 872 } 873 if (likely(mem_scheduled)) { 874 skb_reserve(skb, MAX_TCP_HEADER); 875 skb->ip_summed = CHECKSUM_PARTIAL; 876 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 877 return skb; 878 } 879 __kfree_skb(skb); 880 } else { 881 sk->sk_prot->enter_memory_pressure(sk); 882 sk_stream_moderate_sndbuf(sk); 883 } 884 return NULL; 885 } 886 887 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 888 int large_allowed) 889 { 890 struct tcp_sock *tp = tcp_sk(sk); 891 u32 new_size_goal, size_goal; 892 893 if (!large_allowed) 894 return mss_now; 895 896 /* Note : tcp_tso_autosize() will eventually split this later */ 897 new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size); 898 899 /* We try hard to avoid divides here */ 900 size_goal = tp->gso_segs * mss_now; 901 if (unlikely(new_size_goal < size_goal || 902 new_size_goal >= size_goal + mss_now)) { 903 tp->gso_segs = min_t(u16, new_size_goal / mss_now, 904 sk->sk_gso_max_segs); 905 size_goal = tp->gso_segs * mss_now; 906 } 907 908 return max(size_goal, mss_now); 909 } 910 911 int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 912 { 913 int mss_now; 914 915 mss_now = tcp_current_mss(sk); 916 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 917 918 return mss_now; 919 } 920 921 /* In some cases, both sendpage() and sendmsg() could have added 922 * an skb to the write queue, but failed adding payload on it. 923 * We need to remove it to consume less memory, but more 924 * importantly be able to generate EPOLLOUT for Edge Trigger epoll() 925 * users. 926 */ 927 void tcp_remove_empty_skb(struct sock *sk) 928 { 929 struct sk_buff *skb = tcp_write_queue_tail(sk); 930 931 if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 932 tcp_unlink_write_queue(skb, sk); 933 if (tcp_write_queue_empty(sk)) 934 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 935 tcp_wmem_free_skb(sk, skb); 936 } 937 } 938 939 /* skb changing from pure zc to mixed, must charge zc */ 940 static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) 941 { 942 if (unlikely(skb_zcopy_pure(skb))) { 943 u32 extra = skb->truesize - 944 SKB_TRUESIZE(skb_end_offset(skb)); 945 946 if (!sk_wmem_schedule(sk, extra)) 947 return -ENOMEM; 948 949 sk_mem_charge(sk, extra); 950 skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; 951 } 952 return 0; 953 } 954 955 static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, 956 struct page *page, int offset, size_t *size) 957 { 958 struct sk_buff *skb = tcp_write_queue_tail(sk); 959 struct tcp_sock *tp = tcp_sk(sk); 960 bool can_coalesce; 961 int copy, i; 962 963 if (!skb || (copy = size_goal - skb->len) <= 0 || 964 !tcp_skb_can_collapse_to(skb)) { 965 new_segment: 966 if (!sk_stream_memory_free(sk)) 967 return NULL; 968 969 skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, 970 tcp_rtx_and_write_queues_empty(sk)); 971 if (!skb) 972 return NULL; 973 974 #ifdef CONFIG_TLS_DEVICE 975 skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); 976 #endif 977 tcp_skb_entail(sk, skb); 978 copy = size_goal; 979 } 980 981 if (copy > *size) 982 copy = *size; 983 984 i = skb_shinfo(skb)->nr_frags; 985 can_coalesce = skb_can_coalesce(skb, i, page, offset); 986 if (!can_coalesce && i >= sysctl_max_skb_frags) { 987 tcp_mark_push(tp, skb); 988 goto new_segment; 989 } 990 if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy)) 991 return NULL; 992 993 if (can_coalesce) { 994 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 995 } else { 996 get_page(page); 997 skb_fill_page_desc(skb, i, page, offset, copy); 998 } 999 1000 if (!(flags & MSG_NO_SHARED_FRAGS)) 1001 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; 1002 1003 skb->len += copy; 1004 skb->data_len += copy; 1005 skb->truesize += copy; 1006 sk_wmem_queued_add(sk, copy); 1007 sk_mem_charge(sk, copy); 1008 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 1009 TCP_SKB_CB(skb)->end_seq += copy; 1010 tcp_skb_pcount_set(skb, 0); 1011 1012 *size = copy; 1013 return skb; 1014 } 1015 1016 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 1017 size_t size, int flags) 1018 { 1019 struct tcp_sock *tp = tcp_sk(sk); 1020 int mss_now, size_goal; 1021 int err; 1022 ssize_t copied; 1023 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1024 1025 if (IS_ENABLED(CONFIG_DEBUG_VM) && 1026 WARN_ONCE(!sendpage_ok(page), 1027 "page must not be a Slab one and have page_count > 0")) 1028 return -EINVAL; 1029 1030 /* Wait for a connection to finish. One exception is TCP Fast Open 1031 * (passive side) where data is allowed to be sent before a connection 1032 * is fully established. 1033 */ 1034 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 1035 !tcp_passive_fastopen(sk)) { 1036 err = sk_stream_wait_connect(sk, &timeo); 1037 if (err != 0) 1038 goto out_err; 1039 } 1040 1041 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1042 1043 mss_now = tcp_send_mss(sk, &size_goal, flags); 1044 copied = 0; 1045 1046 err = -EPIPE; 1047 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1048 goto out_err; 1049 1050 while (size > 0) { 1051 struct sk_buff *skb; 1052 size_t copy = size; 1053 1054 skb = tcp_build_frag(sk, size_goal, flags, page, offset, ©); 1055 if (!skb) 1056 goto wait_for_space; 1057 1058 if (!copied) 1059 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1060 1061 copied += copy; 1062 offset += copy; 1063 size -= copy; 1064 if (!size) 1065 goto out; 1066 1067 if (skb->len < size_goal || (flags & MSG_OOB)) 1068 continue; 1069 1070 if (forced_push(tp)) { 1071 tcp_mark_push(tp, skb); 1072 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1073 } else if (skb == tcp_send_head(sk)) 1074 tcp_push_one(sk, mss_now); 1075 continue; 1076 1077 wait_for_space: 1078 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1079 tcp_push(sk, flags & ~MSG_MORE, mss_now, 1080 TCP_NAGLE_PUSH, size_goal); 1081 1082 err = sk_stream_wait_memory(sk, &timeo); 1083 if (err != 0) 1084 goto do_error; 1085 1086 mss_now = tcp_send_mss(sk, &size_goal, flags); 1087 } 1088 1089 out: 1090 if (copied) { 1091 tcp_tx_timestamp(sk, sk->sk_tsflags); 1092 if (!(flags & MSG_SENDPAGE_NOTLAST)) 1093 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1094 } 1095 return copied; 1096 1097 do_error: 1098 tcp_remove_empty_skb(sk); 1099 if (copied) 1100 goto out; 1101 out_err: 1102 /* make sure we wake any epoll edge trigger waiter */ 1103 if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1104 sk->sk_write_space(sk); 1105 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1106 } 1107 return sk_stream_error(sk, flags, err); 1108 } 1109 EXPORT_SYMBOL_GPL(do_tcp_sendpages); 1110 1111 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, 1112 size_t size, int flags) 1113 { 1114 if (!(sk->sk_route_caps & NETIF_F_SG)) 1115 return sock_no_sendpage_locked(sk, page, offset, size, flags); 1116 1117 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1118 1119 return do_tcp_sendpages(sk, page, offset, size, flags); 1120 } 1121 EXPORT_SYMBOL_GPL(tcp_sendpage_locked); 1122 1123 int tcp_sendpage(struct sock *sk, struct page *page, int offset, 1124 size_t size, int flags) 1125 { 1126 int ret; 1127 1128 lock_sock(sk); 1129 ret = tcp_sendpage_locked(sk, page, offset, size, flags); 1130 release_sock(sk); 1131 1132 return ret; 1133 } 1134 EXPORT_SYMBOL(tcp_sendpage); 1135 1136 void tcp_free_fastopen_req(struct tcp_sock *tp) 1137 { 1138 if (tp->fastopen_req) { 1139 kfree(tp->fastopen_req); 1140 tp->fastopen_req = NULL; 1141 } 1142 } 1143 1144 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, 1145 int *copied, size_t size, 1146 struct ubuf_info *uarg) 1147 { 1148 struct tcp_sock *tp = tcp_sk(sk); 1149 struct inet_sock *inet = inet_sk(sk); 1150 struct sockaddr *uaddr = msg->msg_name; 1151 int err, flags; 1152 1153 if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) || 1154 (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && 1155 uaddr->sa_family == AF_UNSPEC)) 1156 return -EOPNOTSUPP; 1157 if (tp->fastopen_req) 1158 return -EALREADY; /* Another Fast Open is in progress */ 1159 1160 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1161 sk->sk_allocation); 1162 if (unlikely(!tp->fastopen_req)) 1163 return -ENOBUFS; 1164 tp->fastopen_req->data = msg; 1165 tp->fastopen_req->size = size; 1166 tp->fastopen_req->uarg = uarg; 1167 1168 if (inet->defer_connect) { 1169 err = tcp_connect(sk); 1170 /* Same failure procedure as in tcp_v4/6_connect */ 1171 if (err) { 1172 tcp_set_state(sk, TCP_CLOSE); 1173 inet->inet_dport = 0; 1174 sk->sk_route_caps = 0; 1175 } 1176 } 1177 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1178 err = __inet_stream_connect(sk->sk_socket, uaddr, 1179 msg->msg_namelen, flags, 1); 1180 /* fastopen_req could already be freed in __inet_stream_connect 1181 * if the connection times out or gets rst 1182 */ 1183 if (tp->fastopen_req) { 1184 *copied = tp->fastopen_req->copied; 1185 tcp_free_fastopen_req(tp); 1186 inet->defer_connect = 0; 1187 } 1188 return err; 1189 } 1190 1191 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) 1192 { 1193 struct tcp_sock *tp = tcp_sk(sk); 1194 struct ubuf_info *uarg = NULL; 1195 struct sk_buff *skb; 1196 struct sockcm_cookie sockc; 1197 int flags, err, copied = 0; 1198 int mss_now = 0, size_goal, copied_syn = 0; 1199 int process_backlog = 0; 1200 bool zc = false; 1201 long timeo; 1202 1203 flags = msg->msg_flags; 1204 1205 if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { 1206 skb = tcp_write_queue_tail(sk); 1207 uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb)); 1208 if (!uarg) { 1209 err = -ENOBUFS; 1210 goto out_err; 1211 } 1212 1213 zc = sk->sk_route_caps & NETIF_F_SG; 1214 if (!zc) 1215 uarg->zerocopy = 0; 1216 } 1217 1218 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && 1219 !tp->repair) { 1220 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); 1221 if (err == -EINPROGRESS && copied_syn > 0) 1222 goto out; 1223 else if (err) 1224 goto out_err; 1225 } 1226 1227 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1228 1229 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1230 1231 /* Wait for a connection to finish. One exception is TCP Fast Open 1232 * (passive side) where data is allowed to be sent before a connection 1233 * is fully established. 1234 */ 1235 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 1236 !tcp_passive_fastopen(sk)) { 1237 err = sk_stream_wait_connect(sk, &timeo); 1238 if (err != 0) 1239 goto do_error; 1240 } 1241 1242 if (unlikely(tp->repair)) { 1243 if (tp->repair_queue == TCP_RECV_QUEUE) { 1244 copied = tcp_send_rcvq(sk, msg, size); 1245 goto out_nopush; 1246 } 1247 1248 err = -EINVAL; 1249 if (tp->repair_queue == TCP_NO_QUEUE) 1250 goto out_err; 1251 1252 /* 'common' sending to sendq */ 1253 } 1254 1255 sockcm_init(&sockc, sk); 1256 if (msg->msg_controllen) { 1257 err = sock_cmsg_send(sk, msg, &sockc); 1258 if (unlikely(err)) { 1259 err = -EINVAL; 1260 goto out_err; 1261 } 1262 } 1263 1264 /* This should be in poll */ 1265 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1266 1267 /* Ok commence sending. */ 1268 copied = 0; 1269 1270 restart: 1271 mss_now = tcp_send_mss(sk, &size_goal, flags); 1272 1273 err = -EPIPE; 1274 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1275 goto do_error; 1276 1277 while (msg_data_left(msg)) { 1278 int copy = 0; 1279 1280 skb = tcp_write_queue_tail(sk); 1281 if (skb) 1282 copy = size_goal - skb->len; 1283 1284 if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { 1285 bool first_skb; 1286 1287 new_segment: 1288 if (!sk_stream_memory_free(sk)) 1289 goto wait_for_space; 1290 1291 if (unlikely(process_backlog >= 16)) { 1292 process_backlog = 0; 1293 if (sk_flush_backlog(sk)) 1294 goto restart; 1295 } 1296 first_skb = tcp_rtx_and_write_queues_empty(sk); 1297 skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, 1298 first_skb); 1299 if (!skb) 1300 goto wait_for_space; 1301 1302 process_backlog++; 1303 1304 tcp_skb_entail(sk, skb); 1305 copy = size_goal; 1306 1307 /* All packets are restored as if they have 1308 * already been sent. skb_mstamp_ns isn't set to 1309 * avoid wrong rtt estimation. 1310 */ 1311 if (tp->repair) 1312 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; 1313 } 1314 1315 /* Try to append data to the end of skb. */ 1316 if (copy > msg_data_left(msg)) 1317 copy = msg_data_left(msg); 1318 1319 if (!zc) { 1320 bool merge = true; 1321 int i = skb_shinfo(skb)->nr_frags; 1322 struct page_frag *pfrag = sk_page_frag(sk); 1323 1324 if (!sk_page_frag_refill(sk, pfrag)) 1325 goto wait_for_space; 1326 1327 if (!skb_can_coalesce(skb, i, pfrag->page, 1328 pfrag->offset)) { 1329 if (i >= sysctl_max_skb_frags) { 1330 tcp_mark_push(tp, skb); 1331 goto new_segment; 1332 } 1333 merge = false; 1334 } 1335 1336 copy = min_t(int, copy, pfrag->size - pfrag->offset); 1337 1338 if (tcp_downgrade_zcopy_pure(sk, skb) || 1339 !sk_wmem_schedule(sk, copy)) 1340 goto wait_for_space; 1341 1342 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, 1343 pfrag->page, 1344 pfrag->offset, 1345 copy); 1346 if (err) 1347 goto do_error; 1348 1349 /* Update the skb. */ 1350 if (merge) { 1351 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1352 } else { 1353 skb_fill_page_desc(skb, i, pfrag->page, 1354 pfrag->offset, copy); 1355 page_ref_inc(pfrag->page); 1356 } 1357 pfrag->offset += copy; 1358 } else { 1359 /* First append to a fragless skb builds initial 1360 * pure zerocopy skb 1361 */ 1362 if (!skb->len) 1363 skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; 1364 1365 if (!skb_zcopy_pure(skb)) { 1366 if (!sk_wmem_schedule(sk, copy)) 1367 goto wait_for_space; 1368 } 1369 1370 err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); 1371 if (err == -EMSGSIZE || err == -EEXIST) { 1372 tcp_mark_push(tp, skb); 1373 goto new_segment; 1374 } 1375 if (err < 0) 1376 goto do_error; 1377 copy = err; 1378 } 1379 1380 if (!copied) 1381 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1382 1383 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 1384 TCP_SKB_CB(skb)->end_seq += copy; 1385 tcp_skb_pcount_set(skb, 0); 1386 1387 copied += copy; 1388 if (!msg_data_left(msg)) { 1389 if (unlikely(flags & MSG_EOR)) 1390 TCP_SKB_CB(skb)->eor = 1; 1391 goto out; 1392 } 1393 1394 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) 1395 continue; 1396 1397 if (forced_push(tp)) { 1398 tcp_mark_push(tp, skb); 1399 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1400 } else if (skb == tcp_send_head(sk)) 1401 tcp_push_one(sk, mss_now); 1402 continue; 1403 1404 wait_for_space: 1405 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1406 if (copied) 1407 tcp_push(sk, flags & ~MSG_MORE, mss_now, 1408 TCP_NAGLE_PUSH, size_goal); 1409 1410 err = sk_stream_wait_memory(sk, &timeo); 1411 if (err != 0) 1412 goto do_error; 1413 1414 mss_now = tcp_send_mss(sk, &size_goal, flags); 1415 } 1416 1417 out: 1418 if (copied) { 1419 tcp_tx_timestamp(sk, sockc.tsflags); 1420 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1421 } 1422 out_nopush: 1423 net_zcopy_put(uarg); 1424 return copied + copied_syn; 1425 1426 do_error: 1427 tcp_remove_empty_skb(sk); 1428 1429 if (copied + copied_syn) 1430 goto out; 1431 out_err: 1432 net_zcopy_put_abort(uarg, true); 1433 err = sk_stream_error(sk, flags, err); 1434 /* make sure we wake any epoll edge trigger waiter */ 1435 if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1436 sk->sk_write_space(sk); 1437 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1438 } 1439 return err; 1440 } 1441 EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); 1442 1443 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1444 { 1445 int ret; 1446 1447 lock_sock(sk); 1448 ret = tcp_sendmsg_locked(sk, msg, size); 1449 release_sock(sk); 1450 1451 return ret; 1452 } 1453 EXPORT_SYMBOL(tcp_sendmsg); 1454 1455 /* 1456 * Handle reading urgent data. BSD has very simple semantics for 1457 * this, no blocking and very strange errors 8) 1458 */ 1459 1460 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 1461 { 1462 struct tcp_sock *tp = tcp_sk(sk); 1463 1464 /* No URG data to read. */ 1465 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 1466 tp->urg_data == TCP_URG_READ) 1467 return -EINVAL; /* Yes this is right ! */ 1468 1469 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 1470 return -ENOTCONN; 1471 1472 if (tp->urg_data & TCP_URG_VALID) { 1473 int err = 0; 1474 char c = tp->urg_data; 1475 1476 if (!(flags & MSG_PEEK)) 1477 WRITE_ONCE(tp->urg_data, TCP_URG_READ); 1478 1479 /* Read urgent data. */ 1480 msg->msg_flags |= MSG_OOB; 1481 1482 if (len > 0) { 1483 if (!(flags & MSG_TRUNC)) 1484 err = memcpy_to_msg(msg, &c, 1); 1485 len = 1; 1486 } else 1487 msg->msg_flags |= MSG_TRUNC; 1488 1489 return err ? -EFAULT : len; 1490 } 1491 1492 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 1493 return 0; 1494 1495 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 1496 * the available implementations agree in this case: 1497 * this call should never block, independent of the 1498 * blocking state of the socket. 1499 * Mike <pall@rz.uni-karlsruhe.de> 1500 */ 1501 return -EAGAIN; 1502 } 1503 1504 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1505 { 1506 struct sk_buff *skb; 1507 int copied = 0, err = 0; 1508 1509 /* XXX -- need to support SO_PEEK_OFF */ 1510 1511 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 1512 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1513 if (err) 1514 return err; 1515 copied += skb->len; 1516 } 1517 1518 skb_queue_walk(&sk->sk_write_queue, skb) { 1519 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1520 if (err) 1521 break; 1522 1523 copied += skb->len; 1524 } 1525 1526 return err ?: copied; 1527 } 1528 1529 /* Clean up the receive buffer for full frames taken by the user, 1530 * then send an ACK if necessary. COPIED is the number of bytes 1531 * tcp_recvmsg has given to the user so far, it speeds up the 1532 * calculation of whether or not we must ACK for the sake of 1533 * a window update. 1534 */ 1535 void tcp_cleanup_rbuf(struct sock *sk, int copied) 1536 { 1537 struct tcp_sock *tp = tcp_sk(sk); 1538 bool time_to_ack = false; 1539 1540 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1541 1542 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1543 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1544 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1545 1546 if (inet_csk_ack_scheduled(sk)) { 1547 const struct inet_connection_sock *icsk = inet_csk(sk); 1548 1549 if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ 1550 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 1551 /* 1552 * If this read emptied read buffer, we send ACK, if 1553 * connection is not bidirectional, user drained 1554 * receive buffer and there was a small segment 1555 * in queue. 1556 */ 1557 (copied > 0 && 1558 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 1559 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1560 !inet_csk_in_pingpong_mode(sk))) && 1561 !atomic_read(&sk->sk_rmem_alloc))) 1562 time_to_ack = true; 1563 } 1564 1565 /* We send an ACK if we can now advertise a non-zero window 1566 * which has been raised "significantly". 1567 * 1568 * Even if window raised up to infinity, do not send window open ACK 1569 * in states, where we will not receive more. It is useless. 1570 */ 1571 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1572 __u32 rcv_window_now = tcp_receive_window(tp); 1573 1574 /* Optimize, __tcp_select_window() is not cheap. */ 1575 if (2*rcv_window_now <= tp->window_clamp) { 1576 __u32 new_window = __tcp_select_window(sk); 1577 1578 /* Send ACK now, if this read freed lots of space 1579 * in our buffer. Certainly, new_window is new window. 1580 * We can advertise it now, if it is not less than current one. 1581 * "Lots" means "at least twice" here. 1582 */ 1583 if (new_window && new_window >= 2 * rcv_window_now) 1584 time_to_ack = true; 1585 } 1586 } 1587 if (time_to_ack) 1588 tcp_send_ack(sk); 1589 } 1590 1591 static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) 1592 { 1593 __skb_unlink(skb, &sk->sk_receive_queue); 1594 if (likely(skb->destructor == sock_rfree)) { 1595 sock_rfree(skb); 1596 skb->destructor = NULL; 1597 skb->sk = NULL; 1598 return skb_attempt_defer_free(skb); 1599 } 1600 __kfree_skb(skb); 1601 } 1602 1603 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1604 { 1605 struct sk_buff *skb; 1606 u32 offset; 1607 1608 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1609 offset = seq - TCP_SKB_CB(skb)->seq; 1610 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 1611 pr_err_once("%s: found a SYN, please report !\n", __func__); 1612 offset--; 1613 } 1614 if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { 1615 *off = offset; 1616 return skb; 1617 } 1618 /* This looks weird, but this can happen if TCP collapsing 1619 * splitted a fat GRO packet, while we released socket lock 1620 * in skb_splice_bits() 1621 */ 1622 tcp_eat_recv_skb(sk, skb); 1623 } 1624 return NULL; 1625 } 1626 1627 /* 1628 * This routine provides an alternative to tcp_recvmsg() for routines 1629 * that would like to handle copying from skbuffs directly in 'sendfile' 1630 * fashion. 1631 * Note: 1632 * - It is assumed that the socket was locked by the caller. 1633 * - The routine does not block. 1634 * - At present, there is no support for reading OOB data 1635 * or for 'peeking' the socket using this routine 1636 * (although both would be easy to implement). 1637 */ 1638 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1639 sk_read_actor_t recv_actor) 1640 { 1641 struct sk_buff *skb; 1642 struct tcp_sock *tp = tcp_sk(sk); 1643 u32 seq = tp->copied_seq; 1644 u32 offset; 1645 int copied = 0; 1646 1647 if (sk->sk_state == TCP_LISTEN) 1648 return -ENOTCONN; 1649 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1650 if (offset < skb->len) { 1651 int used; 1652 size_t len; 1653 1654 len = skb->len - offset; 1655 /* Stop reading if we hit a patch of urgent data */ 1656 if (unlikely(tp->urg_data)) { 1657 u32 urg_offset = tp->urg_seq - seq; 1658 if (urg_offset < len) 1659 len = urg_offset; 1660 if (!len) 1661 break; 1662 } 1663 used = recv_actor(desc, skb, offset, len); 1664 if (used <= 0) { 1665 if (!copied) 1666 copied = used; 1667 break; 1668 } 1669 if (WARN_ON_ONCE(used > len)) 1670 used = len; 1671 seq += used; 1672 copied += used; 1673 offset += used; 1674 1675 /* If recv_actor drops the lock (e.g. TCP splice 1676 * receive) the skb pointer might be invalid when 1677 * getting here: tcp_collapse might have deleted it 1678 * while aggregating skbs from the socket queue. 1679 */ 1680 skb = tcp_recv_skb(sk, seq - 1, &offset); 1681 if (!skb) 1682 break; 1683 /* TCP coalescing might have appended data to the skb. 1684 * Try to splice more frags 1685 */ 1686 if (offset + 1 != skb->len) 1687 continue; 1688 } 1689 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 1690 tcp_eat_recv_skb(sk, skb); 1691 ++seq; 1692 break; 1693 } 1694 tcp_eat_recv_skb(sk, skb); 1695 if (!desc->count) 1696 break; 1697 WRITE_ONCE(tp->copied_seq, seq); 1698 } 1699 WRITE_ONCE(tp->copied_seq, seq); 1700 1701 tcp_rcv_space_adjust(sk); 1702 1703 /* Clean up data we have read: This will do ACK frames. */ 1704 if (copied > 0) { 1705 tcp_recv_skb(sk, seq, &offset); 1706 tcp_cleanup_rbuf(sk, copied); 1707 } 1708 return copied; 1709 } 1710 EXPORT_SYMBOL(tcp_read_sock); 1711 1712 int tcp_peek_len(struct socket *sock) 1713 { 1714 return tcp_inq(sock->sk); 1715 } 1716 EXPORT_SYMBOL(tcp_peek_len); 1717 1718 /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ 1719 int tcp_set_rcvlowat(struct sock *sk, int val) 1720 { 1721 int cap; 1722 1723 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1724 cap = sk->sk_rcvbuf >> 1; 1725 else 1726 cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1; 1727 val = min(val, cap); 1728 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 1729 1730 /* Check if we need to signal EPOLLIN right now */ 1731 tcp_data_ready(sk); 1732 1733 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1734 return 0; 1735 1736 val <<= 1; 1737 if (val > sk->sk_rcvbuf) { 1738 WRITE_ONCE(sk->sk_rcvbuf, val); 1739 tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val); 1740 } 1741 return 0; 1742 } 1743 EXPORT_SYMBOL(tcp_set_rcvlowat); 1744 1745 void tcp_update_recv_tstamps(struct sk_buff *skb, 1746 struct scm_timestamping_internal *tss) 1747 { 1748 if (skb->tstamp) 1749 tss->ts[0] = ktime_to_timespec64(skb->tstamp); 1750 else 1751 tss->ts[0] = (struct timespec64) {0}; 1752 1753 if (skb_hwtstamps(skb)->hwtstamp) 1754 tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); 1755 else 1756 tss->ts[2] = (struct timespec64) {0}; 1757 } 1758 1759 #ifdef CONFIG_MMU 1760 static const struct vm_operations_struct tcp_vm_ops = { 1761 }; 1762 1763 int tcp_mmap(struct file *file, struct socket *sock, 1764 struct vm_area_struct *vma) 1765 { 1766 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 1767 return -EPERM; 1768 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); 1769 1770 /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ 1771 vma->vm_flags |= VM_MIXEDMAP; 1772 1773 vma->vm_ops = &tcp_vm_ops; 1774 return 0; 1775 } 1776 EXPORT_SYMBOL(tcp_mmap); 1777 1778 static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, 1779 u32 *offset_frag) 1780 { 1781 skb_frag_t *frag; 1782 1783 if (unlikely(offset_skb >= skb->len)) 1784 return NULL; 1785 1786 offset_skb -= skb_headlen(skb); 1787 if ((int)offset_skb < 0 || skb_has_frag_list(skb)) 1788 return NULL; 1789 1790 frag = skb_shinfo(skb)->frags; 1791 while (offset_skb) { 1792 if (skb_frag_size(frag) > offset_skb) { 1793 *offset_frag = offset_skb; 1794 return frag; 1795 } 1796 offset_skb -= skb_frag_size(frag); 1797 ++frag; 1798 } 1799 *offset_frag = 0; 1800 return frag; 1801 } 1802 1803 static bool can_map_frag(const skb_frag_t *frag) 1804 { 1805 return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag); 1806 } 1807 1808 static int find_next_mappable_frag(const skb_frag_t *frag, 1809 int remaining_in_skb) 1810 { 1811 int offset = 0; 1812 1813 if (likely(can_map_frag(frag))) 1814 return 0; 1815 1816 while (offset < remaining_in_skb && !can_map_frag(frag)) { 1817 offset += skb_frag_size(frag); 1818 ++frag; 1819 } 1820 return offset; 1821 } 1822 1823 static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, 1824 struct tcp_zerocopy_receive *zc, 1825 struct sk_buff *skb, u32 offset) 1826 { 1827 u32 frag_offset, partial_frag_remainder = 0; 1828 int mappable_offset; 1829 skb_frag_t *frag; 1830 1831 /* worst case: skip to next skb. try to improve on this case below */ 1832 zc->recv_skip_hint = skb->len - offset; 1833 1834 /* Find the frag containing this offset (and how far into that frag) */ 1835 frag = skb_advance_to_frag(skb, offset, &frag_offset); 1836 if (!frag) 1837 return; 1838 1839 if (frag_offset) { 1840 struct skb_shared_info *info = skb_shinfo(skb); 1841 1842 /* We read part of the last frag, must recvmsg() rest of skb. */ 1843 if (frag == &info->frags[info->nr_frags - 1]) 1844 return; 1845 1846 /* Else, we must at least read the remainder in this frag. */ 1847 partial_frag_remainder = skb_frag_size(frag) - frag_offset; 1848 zc->recv_skip_hint -= partial_frag_remainder; 1849 ++frag; 1850 } 1851 1852 /* partial_frag_remainder: If part way through a frag, must read rest. 1853 * mappable_offset: Bytes till next mappable frag, *not* counting bytes 1854 * in partial_frag_remainder. 1855 */ 1856 mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint); 1857 zc->recv_skip_hint = mappable_offset + partial_frag_remainder; 1858 } 1859 1860 static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 1861 int flags, struct scm_timestamping_internal *tss, 1862 int *cmsg_flags); 1863 static int receive_fallback_to_copy(struct sock *sk, 1864 struct tcp_zerocopy_receive *zc, int inq, 1865 struct scm_timestamping_internal *tss) 1866 { 1867 unsigned long copy_address = (unsigned long)zc->copybuf_address; 1868 struct msghdr msg = {}; 1869 struct iovec iov; 1870 int err; 1871 1872 zc->length = 0; 1873 zc->recv_skip_hint = 0; 1874 1875 if (copy_address != zc->copybuf_address) 1876 return -EINVAL; 1877 1878 err = import_single_range(READ, (void __user *)copy_address, 1879 inq, &iov, &msg.msg_iter); 1880 if (err) 1881 return err; 1882 1883 err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT, 1884 tss, &zc->msg_flags); 1885 if (err < 0) 1886 return err; 1887 1888 zc->copybuf_len = err; 1889 if (likely(zc->copybuf_len)) { 1890 struct sk_buff *skb; 1891 u32 offset; 1892 1893 skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); 1894 if (skb) 1895 tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset); 1896 } 1897 return 0; 1898 } 1899 1900 static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, 1901 struct sk_buff *skb, u32 copylen, 1902 u32 *offset, u32 *seq) 1903 { 1904 unsigned long copy_address = (unsigned long)zc->copybuf_address; 1905 struct msghdr msg = {}; 1906 struct iovec iov; 1907 int err; 1908 1909 if (copy_address != zc->copybuf_address) 1910 return -EINVAL; 1911 1912 err = import_single_range(READ, (void __user *)copy_address, 1913 copylen, &iov, &msg.msg_iter); 1914 if (err) 1915 return err; 1916 err = skb_copy_datagram_msg(skb, *offset, &msg, copylen); 1917 if (err) 1918 return err; 1919 zc->recv_skip_hint -= copylen; 1920 *offset += copylen; 1921 *seq += copylen; 1922 return (__s32)copylen; 1923 } 1924 1925 static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc, 1926 struct sock *sk, 1927 struct sk_buff *skb, 1928 u32 *seq, 1929 s32 copybuf_len, 1930 struct scm_timestamping_internal *tss) 1931 { 1932 u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); 1933 1934 if (!copylen) 1935 return 0; 1936 /* skb is null if inq < PAGE_SIZE. */ 1937 if (skb) { 1938 offset = *seq - TCP_SKB_CB(skb)->seq; 1939 } else { 1940 skb = tcp_recv_skb(sk, *seq, &offset); 1941 if (TCP_SKB_CB(skb)->has_rxtstamp) { 1942 tcp_update_recv_tstamps(skb, tss); 1943 zc->msg_flags |= TCP_CMSG_TS; 1944 } 1945 } 1946 1947 zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset, 1948 seq); 1949 return zc->copybuf_len < 0 ? 0 : copylen; 1950 } 1951 1952 static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, 1953 struct page **pending_pages, 1954 unsigned long pages_remaining, 1955 unsigned long *address, 1956 u32 *length, 1957 u32 *seq, 1958 struct tcp_zerocopy_receive *zc, 1959 u32 total_bytes_to_map, 1960 int err) 1961 { 1962 /* At least one page did not map. Try zapping if we skipped earlier. */ 1963 if (err == -EBUSY && 1964 zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { 1965 u32 maybe_zap_len; 1966 1967 maybe_zap_len = total_bytes_to_map - /* All bytes to map */ 1968 *length + /* Mapped or pending */ 1969 (pages_remaining * PAGE_SIZE); /* Failed map. */ 1970 zap_page_range(vma, *address, maybe_zap_len); 1971 err = 0; 1972 } 1973 1974 if (!err) { 1975 unsigned long leftover_pages = pages_remaining; 1976 int bytes_mapped; 1977 1978 /* We called zap_page_range, try to reinsert. */ 1979 err = vm_insert_pages(vma, *address, 1980 pending_pages, 1981 &pages_remaining); 1982 bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); 1983 *seq += bytes_mapped; 1984 *address += bytes_mapped; 1985 } 1986 if (err) { 1987 /* Either we were unable to zap, OR we zapped, retried an 1988 * insert, and still had an issue. Either ways, pages_remaining 1989 * is the number of pages we were unable to map, and we unroll 1990 * some state we speculatively touched before. 1991 */ 1992 const int bytes_not_mapped = PAGE_SIZE * pages_remaining; 1993 1994 *length -= bytes_not_mapped; 1995 zc->recv_skip_hint += bytes_not_mapped; 1996 } 1997 return err; 1998 } 1999 2000 static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, 2001 struct page **pages, 2002 unsigned int pages_to_map, 2003 unsigned long *address, 2004 u32 *length, 2005 u32 *seq, 2006 struct tcp_zerocopy_receive *zc, 2007 u32 total_bytes_to_map) 2008 { 2009 unsigned long pages_remaining = pages_to_map; 2010 unsigned int pages_mapped; 2011 unsigned int bytes_mapped; 2012 int err; 2013 2014 err = vm_insert_pages(vma, *address, pages, &pages_remaining); 2015 pages_mapped = pages_to_map - (unsigned int)pages_remaining; 2016 bytes_mapped = PAGE_SIZE * pages_mapped; 2017 /* Even if vm_insert_pages fails, it may have partially succeeded in 2018 * mapping (some but not all of the pages). 2019 */ 2020 *seq += bytes_mapped; 2021 *address += bytes_mapped; 2022 2023 if (likely(!err)) 2024 return 0; 2025 2026 /* Error: maybe zap and retry + rollback state for failed inserts. */ 2027 return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, 2028 pages_remaining, address, length, seq, zc, total_bytes_to_map, 2029 err); 2030 } 2031 2032 #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS) 2033 static void tcp_zc_finalize_rx_tstamp(struct sock *sk, 2034 struct tcp_zerocopy_receive *zc, 2035 struct scm_timestamping_internal *tss) 2036 { 2037 unsigned long msg_control_addr; 2038 struct msghdr cmsg_dummy; 2039 2040 msg_control_addr = (unsigned long)zc->msg_control; 2041 cmsg_dummy.msg_control = (void *)msg_control_addr; 2042 cmsg_dummy.msg_controllen = 2043 (__kernel_size_t)zc->msg_controllen; 2044 cmsg_dummy.msg_flags = in_compat_syscall() 2045 ? MSG_CMSG_COMPAT : 0; 2046 cmsg_dummy.msg_control_is_user = true; 2047 zc->msg_flags = 0; 2048 if (zc->msg_control == msg_control_addr && 2049 zc->msg_controllen == cmsg_dummy.msg_controllen) { 2050 tcp_recv_timestamp(&cmsg_dummy, sk, tss); 2051 zc->msg_control = (__u64) 2052 ((uintptr_t)cmsg_dummy.msg_control); 2053 zc->msg_controllen = 2054 (__u64)cmsg_dummy.msg_controllen; 2055 zc->msg_flags = (__u32)cmsg_dummy.msg_flags; 2056 } 2057 } 2058 2059 #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 2060 static int tcp_zerocopy_receive(struct sock *sk, 2061 struct tcp_zerocopy_receive *zc, 2062 struct scm_timestamping_internal *tss) 2063 { 2064 u32 length = 0, offset, vma_len, avail_len, copylen = 0; 2065 unsigned long address = (unsigned long)zc->address; 2066 struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE]; 2067 s32 copybuf_len = zc->copybuf_len; 2068 struct tcp_sock *tp = tcp_sk(sk); 2069 const skb_frag_t *frags = NULL; 2070 unsigned int pages_to_map = 0; 2071 struct vm_area_struct *vma; 2072 struct sk_buff *skb = NULL; 2073 u32 seq = tp->copied_seq; 2074 u32 total_bytes_to_map; 2075 int inq = tcp_inq(sk); 2076 int ret; 2077 2078 zc->copybuf_len = 0; 2079 zc->msg_flags = 0; 2080 2081 if (address & (PAGE_SIZE - 1) || address != zc->address) 2082 return -EINVAL; 2083 2084 if (sk->sk_state == TCP_LISTEN) 2085 return -ENOTCONN; 2086 2087 sock_rps_record_flow(sk); 2088 2089 if (inq && inq <= copybuf_len) 2090 return receive_fallback_to_copy(sk, zc, inq, tss); 2091 2092 if (inq < PAGE_SIZE) { 2093 zc->length = 0; 2094 zc->recv_skip_hint = inq; 2095 if (!inq && sock_flag(sk, SOCK_DONE)) 2096 return -EIO; 2097 return 0; 2098 } 2099 2100 mmap_read_lock(current->mm); 2101 2102 vma = vma_lookup(current->mm, address); 2103 if (!vma || vma->vm_ops != &tcp_vm_ops) { 2104 mmap_read_unlock(current->mm); 2105 return -EINVAL; 2106 } 2107 vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); 2108 avail_len = min_t(u32, vma_len, inq); 2109 total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); 2110 if (total_bytes_to_map) { 2111 if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) 2112 zap_page_range(vma, address, total_bytes_to_map); 2113 zc->length = total_bytes_to_map; 2114 zc->recv_skip_hint = 0; 2115 } else { 2116 zc->length = avail_len; 2117 zc->recv_skip_hint = avail_len; 2118 } 2119 ret = 0; 2120 while (length + PAGE_SIZE <= zc->length) { 2121 int mappable_offset; 2122 struct page *page; 2123 2124 if (zc->recv_skip_hint < PAGE_SIZE) { 2125 u32 offset_frag; 2126 2127 if (skb) { 2128 if (zc->recv_skip_hint > 0) 2129 break; 2130 skb = skb->next; 2131 offset = seq - TCP_SKB_CB(skb)->seq; 2132 } else { 2133 skb = tcp_recv_skb(sk, seq, &offset); 2134 } 2135 2136 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2137 tcp_update_recv_tstamps(skb, tss); 2138 zc->msg_flags |= TCP_CMSG_TS; 2139 } 2140 zc->recv_skip_hint = skb->len - offset; 2141 frags = skb_advance_to_frag(skb, offset, &offset_frag); 2142 if (!frags || offset_frag) 2143 break; 2144 } 2145 2146 mappable_offset = find_next_mappable_frag(frags, 2147 zc->recv_skip_hint); 2148 if (mappable_offset) { 2149 zc->recv_skip_hint = mappable_offset; 2150 break; 2151 } 2152 page = skb_frag_page(frags); 2153 prefetchw(page); 2154 pages[pages_to_map++] = page; 2155 length += PAGE_SIZE; 2156 zc->recv_skip_hint -= PAGE_SIZE; 2157 frags++; 2158 if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || 2159 zc->recv_skip_hint < PAGE_SIZE) { 2160 /* Either full batch, or we're about to go to next skb 2161 * (and we cannot unroll failed ops across skbs). 2162 */ 2163 ret = tcp_zerocopy_vm_insert_batch(vma, pages, 2164 pages_to_map, 2165 &address, &length, 2166 &seq, zc, 2167 total_bytes_to_map); 2168 if (ret) 2169 goto out; 2170 pages_to_map = 0; 2171 } 2172 } 2173 if (pages_to_map) { 2174 ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, 2175 &address, &length, &seq, 2176 zc, total_bytes_to_map); 2177 } 2178 out: 2179 mmap_read_unlock(current->mm); 2180 /* Try to copy straggler data. */ 2181 if (!ret) 2182 copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); 2183 2184 if (length + copylen) { 2185 WRITE_ONCE(tp->copied_seq, seq); 2186 tcp_rcv_space_adjust(sk); 2187 2188 /* Clean up data we have read: This will do ACK frames. */ 2189 tcp_recv_skb(sk, seq, &offset); 2190 tcp_cleanup_rbuf(sk, length + copylen); 2191 ret = 0; 2192 if (length == zc->length) 2193 zc->recv_skip_hint = 0; 2194 } else { 2195 if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) 2196 ret = -EIO; 2197 } 2198 zc->length = length; 2199 return ret; 2200 } 2201 #endif 2202 2203 /* Similar to __sock_recv_timestamp, but does not require an skb */ 2204 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 2205 struct scm_timestamping_internal *tss) 2206 { 2207 int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); 2208 bool has_timestamping = false; 2209 2210 if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { 2211 if (sock_flag(sk, SOCK_RCVTSTAMP)) { 2212 if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { 2213 if (new_tstamp) { 2214 struct __kernel_timespec kts = { 2215 .tv_sec = tss->ts[0].tv_sec, 2216 .tv_nsec = tss->ts[0].tv_nsec, 2217 }; 2218 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, 2219 sizeof(kts), &kts); 2220 } else { 2221 struct __kernel_old_timespec ts_old = { 2222 .tv_sec = tss->ts[0].tv_sec, 2223 .tv_nsec = tss->ts[0].tv_nsec, 2224 }; 2225 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, 2226 sizeof(ts_old), &ts_old); 2227 } 2228 } else { 2229 if (new_tstamp) { 2230 struct __kernel_sock_timeval stv = { 2231 .tv_sec = tss->ts[0].tv_sec, 2232 .tv_usec = tss->ts[0].tv_nsec / 1000, 2233 }; 2234 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, 2235 sizeof(stv), &stv); 2236 } else { 2237 struct __kernel_old_timeval tv = { 2238 .tv_sec = tss->ts[0].tv_sec, 2239 .tv_usec = tss->ts[0].tv_nsec / 1000, 2240 }; 2241 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, 2242 sizeof(tv), &tv); 2243 } 2244 } 2245 } 2246 2247 if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) 2248 has_timestamping = true; 2249 else 2250 tss->ts[0] = (struct timespec64) {0}; 2251 } 2252 2253 if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { 2254 if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) 2255 has_timestamping = true; 2256 else 2257 tss->ts[2] = (struct timespec64) {0}; 2258 } 2259 2260 if (has_timestamping) { 2261 tss->ts[1] = (struct timespec64) {0}; 2262 if (sock_flag(sk, SOCK_TSTAMP_NEW)) 2263 put_cmsg_scm_timestamping64(msg, tss); 2264 else 2265 put_cmsg_scm_timestamping(msg, tss); 2266 } 2267 } 2268 2269 static int tcp_inq_hint(struct sock *sk) 2270 { 2271 const struct tcp_sock *tp = tcp_sk(sk); 2272 u32 copied_seq = READ_ONCE(tp->copied_seq); 2273 u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); 2274 int inq; 2275 2276 inq = rcv_nxt - copied_seq; 2277 if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { 2278 lock_sock(sk); 2279 inq = tp->rcv_nxt - tp->copied_seq; 2280 release_sock(sk); 2281 } 2282 /* After receiving a FIN, tell the user-space to continue reading 2283 * by returning a non-zero inq. 2284 */ 2285 if (inq == 0 && sock_flag(sk, SOCK_DONE)) 2286 inq = 1; 2287 return inq; 2288 } 2289 2290 /* 2291 * This routine copies from a sock struct into the user buffer. 2292 * 2293 * Technical note: in 2.3 we work on _locked_ socket, so that 2294 * tricks with *seq access order and skb->users are not required. 2295 * Probably, code can be easily improved even more. 2296 */ 2297 2298 static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 2299 int flags, struct scm_timestamping_internal *tss, 2300 int *cmsg_flags) 2301 { 2302 struct tcp_sock *tp = tcp_sk(sk); 2303 int copied = 0; 2304 u32 peek_seq; 2305 u32 *seq; 2306 unsigned long used; 2307 int err; 2308 int target; /* Read at least this many bytes */ 2309 long timeo; 2310 struct sk_buff *skb, *last; 2311 u32 urg_hole = 0; 2312 2313 err = -ENOTCONN; 2314 if (sk->sk_state == TCP_LISTEN) 2315 goto out; 2316 2317 if (tp->recvmsg_inq) { 2318 *cmsg_flags = TCP_CMSG_INQ; 2319 msg->msg_get_inq = 1; 2320 } 2321 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2322 2323 /* Urgent data needs to be handled specially. */ 2324 if (flags & MSG_OOB) 2325 goto recv_urg; 2326 2327 if (unlikely(tp->repair)) { 2328 err = -EPERM; 2329 if (!(flags & MSG_PEEK)) 2330 goto out; 2331 2332 if (tp->repair_queue == TCP_SEND_QUEUE) 2333 goto recv_sndq; 2334 2335 err = -EINVAL; 2336 if (tp->repair_queue == TCP_NO_QUEUE) 2337 goto out; 2338 2339 /* 'common' recv queue MSG_PEEK-ing */ 2340 } 2341 2342 seq = &tp->copied_seq; 2343 if (flags & MSG_PEEK) { 2344 peek_seq = tp->copied_seq; 2345 seq = &peek_seq; 2346 } 2347 2348 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 2349 2350 do { 2351 u32 offset; 2352 2353 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 2354 if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { 2355 if (copied) 2356 break; 2357 if (signal_pending(current)) { 2358 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 2359 break; 2360 } 2361 } 2362 2363 /* Next get a buffer. */ 2364 2365 last = skb_peek_tail(&sk->sk_receive_queue); 2366 skb_queue_walk(&sk->sk_receive_queue, skb) { 2367 last = skb; 2368 /* Now that we have two receive queues this 2369 * shouldn't happen. 2370 */ 2371 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 2372 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", 2373 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 2374 flags)) 2375 break; 2376 2377 offset = *seq - TCP_SKB_CB(skb)->seq; 2378 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 2379 pr_err_once("%s: found a SYN, please report !\n", __func__); 2380 offset--; 2381 } 2382 if (offset < skb->len) 2383 goto found_ok_skb; 2384 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2385 goto found_fin_ok; 2386 WARN(!(flags & MSG_PEEK), 2387 "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", 2388 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 2389 } 2390 2391 /* Well, if we have backlog, try to process it now yet. */ 2392 2393 if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) 2394 break; 2395 2396 if (copied) { 2397 if (!timeo || 2398 sk->sk_err || 2399 sk->sk_state == TCP_CLOSE || 2400 (sk->sk_shutdown & RCV_SHUTDOWN) || 2401 signal_pending(current)) 2402 break; 2403 } else { 2404 if (sock_flag(sk, SOCK_DONE)) 2405 break; 2406 2407 if (sk->sk_err) { 2408 copied = sock_error(sk); 2409 break; 2410 } 2411 2412 if (sk->sk_shutdown & RCV_SHUTDOWN) 2413 break; 2414 2415 if (sk->sk_state == TCP_CLOSE) { 2416 /* This occurs when user tries to read 2417 * from never connected socket. 2418 */ 2419 copied = -ENOTCONN; 2420 break; 2421 } 2422 2423 if (!timeo) { 2424 copied = -EAGAIN; 2425 break; 2426 } 2427 2428 if (signal_pending(current)) { 2429 copied = sock_intr_errno(timeo); 2430 break; 2431 } 2432 } 2433 2434 if (copied >= target) { 2435 /* Do not sleep, just process backlog. */ 2436 __sk_flush_backlog(sk); 2437 } else { 2438 tcp_cleanup_rbuf(sk, copied); 2439 sk_wait_data(sk, &timeo, last); 2440 } 2441 2442 if ((flags & MSG_PEEK) && 2443 (peek_seq - copied - urg_hole != tp->copied_seq)) { 2444 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 2445 current->comm, 2446 task_pid_nr(current)); 2447 peek_seq = tp->copied_seq; 2448 } 2449 continue; 2450 2451 found_ok_skb: 2452 /* Ok so how much can we use? */ 2453 used = skb->len - offset; 2454 if (len < used) 2455 used = len; 2456 2457 /* Do we have urgent data here? */ 2458 if (unlikely(tp->urg_data)) { 2459 u32 urg_offset = tp->urg_seq - *seq; 2460 if (urg_offset < used) { 2461 if (!urg_offset) { 2462 if (!sock_flag(sk, SOCK_URGINLINE)) { 2463 WRITE_ONCE(*seq, *seq + 1); 2464 urg_hole++; 2465 offset++; 2466 used--; 2467 if (!used) 2468 goto skip_copy; 2469 } 2470 } else 2471 used = urg_offset; 2472 } 2473 } 2474 2475 if (!(flags & MSG_TRUNC)) { 2476 err = skb_copy_datagram_msg(skb, offset, msg, used); 2477 if (err) { 2478 /* Exception. Bailout! */ 2479 if (!copied) 2480 copied = -EFAULT; 2481 break; 2482 } 2483 } 2484 2485 WRITE_ONCE(*seq, *seq + used); 2486 copied += used; 2487 len -= used; 2488 2489 tcp_rcv_space_adjust(sk); 2490 2491 skip_copy: 2492 if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { 2493 WRITE_ONCE(tp->urg_data, 0); 2494 tcp_fast_path_check(sk); 2495 } 2496 2497 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2498 tcp_update_recv_tstamps(skb, tss); 2499 *cmsg_flags |= TCP_CMSG_TS; 2500 } 2501 2502 if (used + offset < skb->len) 2503 continue; 2504 2505 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2506 goto found_fin_ok; 2507 if (!(flags & MSG_PEEK)) 2508 tcp_eat_recv_skb(sk, skb); 2509 continue; 2510 2511 found_fin_ok: 2512 /* Process the FIN. */ 2513 WRITE_ONCE(*seq, *seq + 1); 2514 if (!(flags & MSG_PEEK)) 2515 tcp_eat_recv_skb(sk, skb); 2516 break; 2517 } while (len > 0); 2518 2519 /* According to UNIX98, msg_name/msg_namelen are ignored 2520 * on connected socket. I was just happy when found this 8) --ANK 2521 */ 2522 2523 /* Clean up data we have read: This will do ACK frames. */ 2524 tcp_cleanup_rbuf(sk, copied); 2525 return copied; 2526 2527 out: 2528 return err; 2529 2530 recv_urg: 2531 err = tcp_recv_urg(sk, msg, len, flags); 2532 goto out; 2533 2534 recv_sndq: 2535 err = tcp_peek_sndq(sk, msg, len); 2536 goto out; 2537 } 2538 2539 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 2540 int *addr_len) 2541 { 2542 int cmsg_flags = 0, ret; 2543 struct scm_timestamping_internal tss; 2544 2545 if (unlikely(flags & MSG_ERRQUEUE)) 2546 return inet_recv_error(sk, msg, len, addr_len); 2547 2548 if (sk_can_busy_loop(sk) && 2549 skb_queue_empty_lockless(&sk->sk_receive_queue) && 2550 sk->sk_state == TCP_ESTABLISHED) 2551 sk_busy_loop(sk, flags & MSG_DONTWAIT); 2552 2553 lock_sock(sk); 2554 ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags); 2555 release_sock(sk); 2556 2557 if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) { 2558 if (cmsg_flags & TCP_CMSG_TS) 2559 tcp_recv_timestamp(msg, sk, &tss); 2560 if (msg->msg_get_inq) { 2561 msg->msg_inq = tcp_inq_hint(sk); 2562 if (cmsg_flags & TCP_CMSG_INQ) 2563 put_cmsg(msg, SOL_TCP, TCP_CM_INQ, 2564 sizeof(msg->msg_inq), &msg->msg_inq); 2565 } 2566 } 2567 return ret; 2568 } 2569 EXPORT_SYMBOL(tcp_recvmsg); 2570 2571 void tcp_set_state(struct sock *sk, int state) 2572 { 2573 int oldstate = sk->sk_state; 2574 2575 /* We defined a new enum for TCP states that are exported in BPF 2576 * so as not force the internal TCP states to be frozen. The 2577 * following checks will detect if an internal state value ever 2578 * differs from the BPF value. If this ever happens, then we will 2579 * need to remap the internal value to the BPF value before calling 2580 * tcp_call_bpf_2arg. 2581 */ 2582 BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); 2583 BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); 2584 BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); 2585 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); 2586 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); 2587 BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); 2588 BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); 2589 BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); 2590 BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); 2591 BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); 2592 BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); 2593 BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); 2594 BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); 2595 2596 /* bpf uapi header bpf.h defines an anonymous enum with values 2597 * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux 2598 * is able to emit this enum in DWARF due to the above BUILD_BUG_ON. 2599 * But clang built vmlinux does not have this enum in DWARF 2600 * since clang removes the above code before generating IR/debuginfo. 2601 * Let us explicitly emit the type debuginfo to ensure the 2602 * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF 2603 * regardless of which compiler is used. 2604 */ 2605 BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED); 2606 2607 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) 2608 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); 2609 2610 switch (state) { 2611 case TCP_ESTABLISHED: 2612 if (oldstate != TCP_ESTABLISHED) 2613 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2614 break; 2615 2616 case TCP_CLOSE: 2617 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 2618 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 2619 2620 sk->sk_prot->unhash(sk); 2621 if (inet_csk(sk)->icsk_bind_hash && 2622 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 2623 inet_put_port(sk); 2624 fallthrough; 2625 default: 2626 if (oldstate == TCP_ESTABLISHED) 2627 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2628 } 2629 2630 /* Change state AFTER socket is unhashed to avoid closed 2631 * socket sitting in hash tables. 2632 */ 2633 inet_sk_state_store(sk, state); 2634 } 2635 EXPORT_SYMBOL_GPL(tcp_set_state); 2636 2637 /* 2638 * State processing on a close. This implements the state shift for 2639 * sending our FIN frame. Note that we only send a FIN for some 2640 * states. A shutdown() may have already sent the FIN, or we may be 2641 * closed. 2642 */ 2643 2644 static const unsigned char new_state[16] = { 2645 /* current state: new state: action: */ 2646 [0 /* (Invalid) */] = TCP_CLOSE, 2647 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2648 [TCP_SYN_SENT] = TCP_CLOSE, 2649 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2650 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 2651 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 2652 [TCP_TIME_WAIT] = TCP_CLOSE, 2653 [TCP_CLOSE] = TCP_CLOSE, 2654 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 2655 [TCP_LAST_ACK] = TCP_LAST_ACK, 2656 [TCP_LISTEN] = TCP_CLOSE, 2657 [TCP_CLOSING] = TCP_CLOSING, 2658 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 2659 }; 2660 2661 static int tcp_close_state(struct sock *sk) 2662 { 2663 int next = (int)new_state[sk->sk_state]; 2664 int ns = next & TCP_STATE_MASK; 2665 2666 tcp_set_state(sk, ns); 2667 2668 return next & TCP_ACTION_FIN; 2669 } 2670 2671 /* 2672 * Shutdown the sending side of a connection. Much like close except 2673 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 2674 */ 2675 2676 void tcp_shutdown(struct sock *sk, int how) 2677 { 2678 /* We need to grab some memory, and put together a FIN, 2679 * and then put it into the queue to be sent. 2680 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 2681 */ 2682 if (!(how & SEND_SHUTDOWN)) 2683 return; 2684 2685 /* If we've already sent a FIN, or it's a closed state, skip this. */ 2686 if ((1 << sk->sk_state) & 2687 (TCPF_ESTABLISHED | TCPF_SYN_SENT | 2688 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 2689 /* Clear out any half completed packets. FIN if needed. */ 2690 if (tcp_close_state(sk)) 2691 tcp_send_fin(sk); 2692 } 2693 } 2694 EXPORT_SYMBOL(tcp_shutdown); 2695 2696 int tcp_orphan_count_sum(void) 2697 { 2698 int i, total = 0; 2699 2700 for_each_possible_cpu(i) 2701 total += per_cpu(tcp_orphan_count, i); 2702 2703 return max(total, 0); 2704 } 2705 2706 static int tcp_orphan_cache; 2707 static struct timer_list tcp_orphan_timer; 2708 #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) 2709 2710 static void tcp_orphan_update(struct timer_list *unused) 2711 { 2712 WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); 2713 mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 2714 } 2715 2716 static bool tcp_too_many_orphans(int shift) 2717 { 2718 return READ_ONCE(tcp_orphan_cache) << shift > 2719 READ_ONCE(sysctl_tcp_max_orphans); 2720 } 2721 2722 bool tcp_check_oom(struct sock *sk, int shift) 2723 { 2724 bool too_many_orphans, out_of_socket_memory; 2725 2726 too_many_orphans = tcp_too_many_orphans(shift); 2727 out_of_socket_memory = tcp_out_of_memory(sk); 2728 2729 if (too_many_orphans) 2730 net_info_ratelimited("too many orphaned sockets\n"); 2731 if (out_of_socket_memory) 2732 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 2733 return too_many_orphans || out_of_socket_memory; 2734 } 2735 2736 void __tcp_close(struct sock *sk, long timeout) 2737 { 2738 struct sk_buff *skb; 2739 int data_was_unread = 0; 2740 int state; 2741 2742 sk->sk_shutdown = SHUTDOWN_MASK; 2743 2744 if (sk->sk_state == TCP_LISTEN) { 2745 tcp_set_state(sk, TCP_CLOSE); 2746 2747 /* Special case. */ 2748 inet_csk_listen_stop(sk); 2749 2750 goto adjudge_to_death; 2751 } 2752 2753 /* We need to flush the recv. buffs. We do this only on the 2754 * descriptor close, not protocol-sourced closes, because the 2755 * reader process may not have drained the data yet! 2756 */ 2757 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 2758 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; 2759 2760 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2761 len--; 2762 data_was_unread += len; 2763 __kfree_skb(skb); 2764 } 2765 2766 sk_mem_reclaim(sk); 2767 2768 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 2769 if (sk->sk_state == TCP_CLOSE) 2770 goto adjudge_to_death; 2771 2772 /* As outlined in RFC 2525, section 2.17, we send a RST here because 2773 * data was lost. To witness the awful effects of the old behavior of 2774 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 2775 * GET in an FTP client, suspend the process, wait for the client to 2776 * advertise a zero window, then kill -9 the FTP client, wheee... 2777 * Note: timeout is always zero in such a case. 2778 */ 2779 if (unlikely(tcp_sk(sk)->repair)) { 2780 sk->sk_prot->disconnect(sk, 0); 2781 } else if (data_was_unread) { 2782 /* Unread data was tossed, zap the connection. */ 2783 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 2784 tcp_set_state(sk, TCP_CLOSE); 2785 tcp_send_active_reset(sk, sk->sk_allocation); 2786 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 2787 /* Check zero linger _after_ checking for unread data. */ 2788 sk->sk_prot->disconnect(sk, 0); 2789 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 2790 } else if (tcp_close_state(sk)) { 2791 /* We FIN if the application ate all the data before 2792 * zapping the connection. 2793 */ 2794 2795 /* RED-PEN. Formally speaking, we have broken TCP state 2796 * machine. State transitions: 2797 * 2798 * TCP_ESTABLISHED -> TCP_FIN_WAIT1 2799 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 2800 * TCP_CLOSE_WAIT -> TCP_LAST_ACK 2801 * 2802 * are legal only when FIN has been sent (i.e. in window), 2803 * rather than queued out of window. Purists blame. 2804 * 2805 * F.e. "RFC state" is ESTABLISHED, 2806 * if Linux state is FIN-WAIT-1, but FIN is still not sent. 2807 * 2808 * The visible declinations are that sometimes 2809 * we enter time-wait state, when it is not required really 2810 * (harmless), do not send active resets, when they are 2811 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 2812 * they look as CLOSING or LAST_ACK for Linux) 2813 * Probably, I missed some more holelets. 2814 * --ANK 2815 * XXX (TFO) - To start off we don't support SYN+ACK+FIN 2816 * in a single packet! (May consider it later but will 2817 * probably need API support or TCP_CORK SYN-ACK until 2818 * data is written and socket is closed.) 2819 */ 2820 tcp_send_fin(sk); 2821 } 2822 2823 sk_stream_wait_close(sk, timeout); 2824 2825 adjudge_to_death: 2826 state = sk->sk_state; 2827 sock_hold(sk); 2828 sock_orphan(sk); 2829 2830 local_bh_disable(); 2831 bh_lock_sock(sk); 2832 /* remove backlog if any, without releasing ownership. */ 2833 __release_sock(sk); 2834 2835 this_cpu_inc(tcp_orphan_count); 2836 2837 /* Have we already been destroyed by a softirq or backlog? */ 2838 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 2839 goto out; 2840 2841 /* This is a (useful) BSD violating of the RFC. There is a 2842 * problem with TCP as specified in that the other end could 2843 * keep a socket open forever with no application left this end. 2844 * We use a 1 minute timeout (about the same as BSD) then kill 2845 * our end. If they send after that then tough - BUT: long enough 2846 * that we won't make the old 4*rto = almost no time - whoops 2847 * reset mistake. 2848 * 2849 * Nope, it was not mistake. It is really desired behaviour 2850 * f.e. on http servers, when such sockets are useless, but 2851 * consume significant resources. Let's do it with special 2852 * linger2 option. --ANK 2853 */ 2854 2855 if (sk->sk_state == TCP_FIN_WAIT2) { 2856 struct tcp_sock *tp = tcp_sk(sk); 2857 if (tp->linger2 < 0) { 2858 tcp_set_state(sk, TCP_CLOSE); 2859 tcp_send_active_reset(sk, GFP_ATOMIC); 2860 __NET_INC_STATS(sock_net(sk), 2861 LINUX_MIB_TCPABORTONLINGER); 2862 } else { 2863 const int tmo = tcp_fin_time(sk); 2864 2865 if (tmo > TCP_TIMEWAIT_LEN) { 2866 inet_csk_reset_keepalive_timer(sk, 2867 tmo - TCP_TIMEWAIT_LEN); 2868 } else { 2869 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 2870 goto out; 2871 } 2872 } 2873 } 2874 if (sk->sk_state != TCP_CLOSE) { 2875 sk_mem_reclaim(sk); 2876 if (tcp_check_oom(sk, 0)) { 2877 tcp_set_state(sk, TCP_CLOSE); 2878 tcp_send_active_reset(sk, GFP_ATOMIC); 2879 __NET_INC_STATS(sock_net(sk), 2880 LINUX_MIB_TCPABORTONMEMORY); 2881 } else if (!check_net(sock_net(sk))) { 2882 /* Not possible to send reset; just close */ 2883 tcp_set_state(sk, TCP_CLOSE); 2884 } 2885 } 2886 2887 if (sk->sk_state == TCP_CLOSE) { 2888 struct request_sock *req; 2889 2890 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 2891 lockdep_sock_is_held(sk)); 2892 /* We could get here with a non-NULL req if the socket is 2893 * aborted (e.g., closed with unread data) before 3WHS 2894 * finishes. 2895 */ 2896 if (req) 2897 reqsk_fastopen_remove(sk, req, false); 2898 inet_csk_destroy_sock(sk); 2899 } 2900 /* Otherwise, socket is reprieved until protocol close. */ 2901 2902 out: 2903 bh_unlock_sock(sk); 2904 local_bh_enable(); 2905 } 2906 2907 void tcp_close(struct sock *sk, long timeout) 2908 { 2909 lock_sock(sk); 2910 __tcp_close(sk, timeout); 2911 release_sock(sk); 2912 sock_put(sk); 2913 } 2914 EXPORT_SYMBOL(tcp_close); 2915 2916 /* These states need RST on ABORT according to RFC793 */ 2917 2918 static inline bool tcp_need_reset(int state) 2919 { 2920 return (1 << state) & 2921 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2922 TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 2923 } 2924 2925 static void tcp_rtx_queue_purge(struct sock *sk) 2926 { 2927 struct rb_node *p = rb_first(&sk->tcp_rtx_queue); 2928 2929 tcp_sk(sk)->highest_sack = NULL; 2930 while (p) { 2931 struct sk_buff *skb = rb_to_skb(p); 2932 2933 p = rb_next(p); 2934 /* Since we are deleting whole queue, no need to 2935 * list_del(&skb->tcp_tsorted_anchor) 2936 */ 2937 tcp_rtx_queue_unlink(skb, sk); 2938 tcp_wmem_free_skb(sk, skb); 2939 } 2940 } 2941 2942 void tcp_write_queue_purge(struct sock *sk) 2943 { 2944 struct sk_buff *skb; 2945 2946 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 2947 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 2948 tcp_skb_tsorted_anchor_cleanup(skb); 2949 tcp_wmem_free_skb(sk, skb); 2950 } 2951 tcp_rtx_queue_purge(sk); 2952 INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 2953 sk_mem_reclaim(sk); 2954 tcp_clear_all_retrans_hints(tcp_sk(sk)); 2955 tcp_sk(sk)->packets_out = 0; 2956 inet_csk(sk)->icsk_backoff = 0; 2957 } 2958 2959 int tcp_disconnect(struct sock *sk, int flags) 2960 { 2961 struct inet_sock *inet = inet_sk(sk); 2962 struct inet_connection_sock *icsk = inet_csk(sk); 2963 struct tcp_sock *tp = tcp_sk(sk); 2964 int old_state = sk->sk_state; 2965 u32 seq; 2966 2967 if (old_state != TCP_CLOSE) 2968 tcp_set_state(sk, TCP_CLOSE); 2969 2970 /* ABORT function of RFC793 */ 2971 if (old_state == TCP_LISTEN) { 2972 inet_csk_listen_stop(sk); 2973 } else if (unlikely(tp->repair)) { 2974 sk->sk_err = ECONNABORTED; 2975 } else if (tcp_need_reset(old_state) || 2976 (tp->snd_nxt != tp->write_seq && 2977 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 2978 /* The last check adjusts for discrepancy of Linux wrt. RFC 2979 * states 2980 */ 2981 tcp_send_active_reset(sk, gfp_any()); 2982 sk->sk_err = ECONNRESET; 2983 } else if (old_state == TCP_SYN_SENT) 2984 sk->sk_err = ECONNRESET; 2985 2986 tcp_clear_xmit_timers(sk); 2987 __skb_queue_purge(&sk->sk_receive_queue); 2988 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 2989 WRITE_ONCE(tp->urg_data, 0); 2990 tcp_write_queue_purge(sk); 2991 tcp_fastopen_active_disable_ofo_check(sk); 2992 skb_rbtree_purge(&tp->out_of_order_queue); 2993 2994 inet->inet_dport = 0; 2995 2996 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 2997 inet_reset_saddr(sk); 2998 2999 sk->sk_shutdown = 0; 3000 sock_reset_flag(sk, SOCK_DONE); 3001 tp->srtt_us = 0; 3002 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 3003 tp->rcv_rtt_last_tsecr = 0; 3004 3005 seq = tp->write_seq + tp->max_window + 2; 3006 if (!seq) 3007 seq = 1; 3008 WRITE_ONCE(tp->write_seq, seq); 3009 3010 icsk->icsk_backoff = 0; 3011 icsk->icsk_probes_out = 0; 3012 icsk->icsk_probes_tstamp = 0; 3013 icsk->icsk_rto = TCP_TIMEOUT_INIT; 3014 icsk->icsk_rto_min = TCP_RTO_MIN; 3015 icsk->icsk_delack_max = TCP_DELACK_MAX; 3016 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 3017 tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 3018 tp->snd_cwnd_cnt = 0; 3019 tp->window_clamp = 0; 3020 tp->delivered = 0; 3021 tp->delivered_ce = 0; 3022 if (icsk->icsk_ca_ops->release) 3023 icsk->icsk_ca_ops->release(sk); 3024 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 3025 icsk->icsk_ca_initialized = 0; 3026 tcp_set_ca_state(sk, TCP_CA_Open); 3027 tp->is_sack_reneg = 0; 3028 tcp_clear_retrans(tp); 3029 tp->total_retrans = 0; 3030 inet_csk_delack_init(sk); 3031 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 3032 * issue in __tcp_select_window() 3033 */ 3034 icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; 3035 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 3036 __sk_dst_reset(sk); 3037 dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); 3038 tcp_saved_syn_free(tp); 3039 tp->compressed_ack = 0; 3040 tp->segs_in = 0; 3041 tp->segs_out = 0; 3042 tp->bytes_sent = 0; 3043 tp->bytes_acked = 0; 3044 tp->bytes_received = 0; 3045 tp->bytes_retrans = 0; 3046 tp->data_segs_in = 0; 3047 tp->data_segs_out = 0; 3048 tp->duplicate_sack[0].start_seq = 0; 3049 tp->duplicate_sack[0].end_seq = 0; 3050 tp->dsack_dups = 0; 3051 tp->reord_seen = 0; 3052 tp->retrans_out = 0; 3053 tp->sacked_out = 0; 3054 tp->tlp_high_seq = 0; 3055 tp->last_oow_ack_time = 0; 3056 /* There's a bubble in the pipe until at least the first ACK. */ 3057 tp->app_limited = ~0U; 3058 tp->rack.mstamp = 0; 3059 tp->rack.advanced = 0; 3060 tp->rack.reo_wnd_steps = 1; 3061 tp->rack.last_delivered = 0; 3062 tp->rack.reo_wnd_persist = 0; 3063 tp->rack.dsack_seen = 0; 3064 tp->syn_data_acked = 0; 3065 tp->rx_opt.saw_tstamp = 0; 3066 tp->rx_opt.dsack = 0; 3067 tp->rx_opt.num_sacks = 0; 3068 tp->rcv_ooopack = 0; 3069 3070 3071 /* Clean up fastopen related fields */ 3072 tcp_free_fastopen_req(tp); 3073 inet->defer_connect = 0; 3074 tp->fastopen_client_fail = 0; 3075 3076 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 3077 3078 if (sk->sk_frag.page) { 3079 put_page(sk->sk_frag.page); 3080 sk->sk_frag.page = NULL; 3081 sk->sk_frag.offset = 0; 3082 } 3083 sk_error_report(sk); 3084 return 0; 3085 } 3086 EXPORT_SYMBOL(tcp_disconnect); 3087 3088 static inline bool tcp_can_repair_sock(const struct sock *sk) 3089 { 3090 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 3091 (sk->sk_state != TCP_LISTEN); 3092 } 3093 3094 static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) 3095 { 3096 struct tcp_repair_window opt; 3097 3098 if (!tp->repair) 3099 return -EPERM; 3100 3101 if (len != sizeof(opt)) 3102 return -EINVAL; 3103 3104 if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) 3105 return -EFAULT; 3106 3107 if (opt.max_window < opt.snd_wnd) 3108 return -EINVAL; 3109 3110 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) 3111 return -EINVAL; 3112 3113 if (after(opt.rcv_wup, tp->rcv_nxt)) 3114 return -EINVAL; 3115 3116 tp->snd_wl1 = opt.snd_wl1; 3117 tp->snd_wnd = opt.snd_wnd; 3118 tp->max_window = opt.max_window; 3119 3120 tp->rcv_wnd = opt.rcv_wnd; 3121 tp->rcv_wup = opt.rcv_wup; 3122 3123 return 0; 3124 } 3125 3126 static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, 3127 unsigned int len) 3128 { 3129 struct tcp_sock *tp = tcp_sk(sk); 3130 struct tcp_repair_opt opt; 3131 size_t offset = 0; 3132 3133 while (len >= sizeof(opt)) { 3134 if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) 3135 return -EFAULT; 3136 3137 offset += sizeof(opt); 3138 len -= sizeof(opt); 3139 3140 switch (opt.opt_code) { 3141 case TCPOPT_MSS: 3142 tp->rx_opt.mss_clamp = opt.opt_val; 3143 tcp_mtup_init(sk); 3144 break; 3145 case TCPOPT_WINDOW: 3146 { 3147 u16 snd_wscale = opt.opt_val & 0xFFFF; 3148 u16 rcv_wscale = opt.opt_val >> 16; 3149 3150 if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) 3151 return -EFBIG; 3152 3153 tp->rx_opt.snd_wscale = snd_wscale; 3154 tp->rx_opt.rcv_wscale = rcv_wscale; 3155 tp->rx_opt.wscale_ok = 1; 3156 } 3157 break; 3158 case TCPOPT_SACK_PERM: 3159 if (opt.opt_val != 0) 3160 return -EINVAL; 3161 3162 tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 3163 break; 3164 case TCPOPT_TIMESTAMP: 3165 if (opt.opt_val != 0) 3166 return -EINVAL; 3167 3168 tp->rx_opt.tstamp_ok = 1; 3169 break; 3170 } 3171 } 3172 3173 return 0; 3174 } 3175 3176 DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 3177 EXPORT_SYMBOL(tcp_tx_delay_enabled); 3178 3179 static void tcp_enable_tx_delay(void) 3180 { 3181 if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { 3182 static int __tcp_tx_delay_enabled = 0; 3183 3184 if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { 3185 static_branch_enable(&tcp_tx_delay_enabled); 3186 pr_info("TCP_TX_DELAY enabled\n"); 3187 } 3188 } 3189 } 3190 3191 /* When set indicates to always queue non-full frames. Later the user clears 3192 * this option and we transmit any pending partial frames in the queue. This is 3193 * meant to be used alongside sendfile() to get properly filled frames when the 3194 * user (for example) must write out headers with a write() call first and then 3195 * use sendfile to send out the data parts. 3196 * 3197 * TCP_CORK can be set together with TCP_NODELAY and it is stronger than 3198 * TCP_NODELAY. 3199 */ 3200 void __tcp_sock_set_cork(struct sock *sk, bool on) 3201 { 3202 struct tcp_sock *tp = tcp_sk(sk); 3203 3204 if (on) { 3205 tp->nonagle |= TCP_NAGLE_CORK; 3206 } else { 3207 tp->nonagle &= ~TCP_NAGLE_CORK; 3208 if (tp->nonagle & TCP_NAGLE_OFF) 3209 tp->nonagle |= TCP_NAGLE_PUSH; 3210 tcp_push_pending_frames(sk); 3211 } 3212 } 3213 3214 void tcp_sock_set_cork(struct sock *sk, bool on) 3215 { 3216 lock_sock(sk); 3217 __tcp_sock_set_cork(sk, on); 3218 release_sock(sk); 3219 } 3220 EXPORT_SYMBOL(tcp_sock_set_cork); 3221 3222 /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is 3223 * remembered, but it is not activated until cork is cleared. 3224 * 3225 * However, when TCP_NODELAY is set we make an explicit push, which overrides 3226 * even TCP_CORK for currently queued segments. 3227 */ 3228 void __tcp_sock_set_nodelay(struct sock *sk, bool on) 3229 { 3230 if (on) { 3231 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 3232 tcp_push_pending_frames(sk); 3233 } else { 3234 tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; 3235 } 3236 } 3237 3238 void tcp_sock_set_nodelay(struct sock *sk) 3239 { 3240 lock_sock(sk); 3241 __tcp_sock_set_nodelay(sk, true); 3242 release_sock(sk); 3243 } 3244 EXPORT_SYMBOL(tcp_sock_set_nodelay); 3245 3246 static void __tcp_sock_set_quickack(struct sock *sk, int val) 3247 { 3248 if (!val) { 3249 inet_csk_enter_pingpong_mode(sk); 3250 return; 3251 } 3252 3253 inet_csk_exit_pingpong_mode(sk); 3254 if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 3255 inet_csk_ack_scheduled(sk)) { 3256 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; 3257 tcp_cleanup_rbuf(sk, 1); 3258 if (!(val & 1)) 3259 inet_csk_enter_pingpong_mode(sk); 3260 } 3261 } 3262 3263 void tcp_sock_set_quickack(struct sock *sk, int val) 3264 { 3265 lock_sock(sk); 3266 __tcp_sock_set_quickack(sk, val); 3267 release_sock(sk); 3268 } 3269 EXPORT_SYMBOL(tcp_sock_set_quickack); 3270 3271 int tcp_sock_set_syncnt(struct sock *sk, int val) 3272 { 3273 if (val < 1 || val > MAX_TCP_SYNCNT) 3274 return -EINVAL; 3275 3276 lock_sock(sk); 3277 inet_csk(sk)->icsk_syn_retries = val; 3278 release_sock(sk); 3279 return 0; 3280 } 3281 EXPORT_SYMBOL(tcp_sock_set_syncnt); 3282 3283 void tcp_sock_set_user_timeout(struct sock *sk, u32 val) 3284 { 3285 lock_sock(sk); 3286 inet_csk(sk)->icsk_user_timeout = val; 3287 release_sock(sk); 3288 } 3289 EXPORT_SYMBOL(tcp_sock_set_user_timeout); 3290 3291 int tcp_sock_set_keepidle_locked(struct sock *sk, int val) 3292 { 3293 struct tcp_sock *tp = tcp_sk(sk); 3294 3295 if (val < 1 || val > MAX_TCP_KEEPIDLE) 3296 return -EINVAL; 3297 3298 tp->keepalive_time = val * HZ; 3299 if (sock_flag(sk, SOCK_KEEPOPEN) && 3300 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 3301 u32 elapsed = keepalive_time_elapsed(tp); 3302 3303 if (tp->keepalive_time > elapsed) 3304 elapsed = tp->keepalive_time - elapsed; 3305 else 3306 elapsed = 0; 3307 inet_csk_reset_keepalive_timer(sk, elapsed); 3308 } 3309 3310 return 0; 3311 } 3312 3313 int tcp_sock_set_keepidle(struct sock *sk, int val) 3314 { 3315 int err; 3316 3317 lock_sock(sk); 3318 err = tcp_sock_set_keepidle_locked(sk, val); 3319 release_sock(sk); 3320 return err; 3321 } 3322 EXPORT_SYMBOL(tcp_sock_set_keepidle); 3323 3324 int tcp_sock_set_keepintvl(struct sock *sk, int val) 3325 { 3326 if (val < 1 || val > MAX_TCP_KEEPINTVL) 3327 return -EINVAL; 3328 3329 lock_sock(sk); 3330 tcp_sk(sk)->keepalive_intvl = val * HZ; 3331 release_sock(sk); 3332 return 0; 3333 } 3334 EXPORT_SYMBOL(tcp_sock_set_keepintvl); 3335 3336 int tcp_sock_set_keepcnt(struct sock *sk, int val) 3337 { 3338 if (val < 1 || val > MAX_TCP_KEEPCNT) 3339 return -EINVAL; 3340 3341 lock_sock(sk); 3342 tcp_sk(sk)->keepalive_probes = val; 3343 release_sock(sk); 3344 return 0; 3345 } 3346 EXPORT_SYMBOL(tcp_sock_set_keepcnt); 3347 3348 int tcp_set_window_clamp(struct sock *sk, int val) 3349 { 3350 struct tcp_sock *tp = tcp_sk(sk); 3351 3352 if (!val) { 3353 if (sk->sk_state != TCP_CLOSE) 3354 return -EINVAL; 3355 tp->window_clamp = 0; 3356 } else { 3357 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 3358 SOCK_MIN_RCVBUF / 2 : val; 3359 tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); 3360 } 3361 return 0; 3362 } 3363 3364 /* 3365 * Socket option code for TCP. 3366 */ 3367 static int do_tcp_setsockopt(struct sock *sk, int level, int optname, 3368 sockptr_t optval, unsigned int optlen) 3369 { 3370 struct tcp_sock *tp = tcp_sk(sk); 3371 struct inet_connection_sock *icsk = inet_csk(sk); 3372 struct net *net = sock_net(sk); 3373 int val; 3374 int err = 0; 3375 3376 /* These are data/string values, all the others are ints */ 3377 switch (optname) { 3378 case TCP_CONGESTION: { 3379 char name[TCP_CA_NAME_MAX]; 3380 3381 if (optlen < 1) 3382 return -EINVAL; 3383 3384 val = strncpy_from_sockptr(name, optval, 3385 min_t(long, TCP_CA_NAME_MAX-1, optlen)); 3386 if (val < 0) 3387 return -EFAULT; 3388 name[val] = 0; 3389 3390 lock_sock(sk); 3391 err = tcp_set_congestion_control(sk, name, true, 3392 ns_capable(sock_net(sk)->user_ns, 3393 CAP_NET_ADMIN)); 3394 release_sock(sk); 3395 return err; 3396 } 3397 case TCP_ULP: { 3398 char name[TCP_ULP_NAME_MAX]; 3399 3400 if (optlen < 1) 3401 return -EINVAL; 3402 3403 val = strncpy_from_sockptr(name, optval, 3404 min_t(long, TCP_ULP_NAME_MAX - 1, 3405 optlen)); 3406 if (val < 0) 3407 return -EFAULT; 3408 name[val] = 0; 3409 3410 lock_sock(sk); 3411 err = tcp_set_ulp(sk, name); 3412 release_sock(sk); 3413 return err; 3414 } 3415 case TCP_FASTOPEN_KEY: { 3416 __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; 3417 __u8 *backup_key = NULL; 3418 3419 /* Allow a backup key as well to facilitate key rotation 3420 * First key is the active one. 3421 */ 3422 if (optlen != TCP_FASTOPEN_KEY_LENGTH && 3423 optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) 3424 return -EINVAL; 3425 3426 if (copy_from_sockptr(key, optval, optlen)) 3427 return -EFAULT; 3428 3429 if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) 3430 backup_key = key + TCP_FASTOPEN_KEY_LENGTH; 3431 3432 return tcp_fastopen_reset_cipher(net, sk, key, backup_key); 3433 } 3434 default: 3435 /* fallthru */ 3436 break; 3437 } 3438 3439 if (optlen < sizeof(int)) 3440 return -EINVAL; 3441 3442 if (copy_from_sockptr(&val, optval, sizeof(val))) 3443 return -EFAULT; 3444 3445 lock_sock(sk); 3446 3447 switch (optname) { 3448 case TCP_MAXSEG: 3449 /* Values greater than interface MTU won't take effect. However 3450 * at the point when this call is done we typically don't yet 3451 * know which interface is going to be used 3452 */ 3453 if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { 3454 err = -EINVAL; 3455 break; 3456 } 3457 tp->rx_opt.user_mss = val; 3458 break; 3459 3460 case TCP_NODELAY: 3461 __tcp_sock_set_nodelay(sk, val); 3462 break; 3463 3464 case TCP_THIN_LINEAR_TIMEOUTS: 3465 if (val < 0 || val > 1) 3466 err = -EINVAL; 3467 else 3468 tp->thin_lto = val; 3469 break; 3470 3471 case TCP_THIN_DUPACK: 3472 if (val < 0 || val > 1) 3473 err = -EINVAL; 3474 break; 3475 3476 case TCP_REPAIR: 3477 if (!tcp_can_repair_sock(sk)) 3478 err = -EPERM; 3479 else if (val == TCP_REPAIR_ON) { 3480 tp->repair = 1; 3481 sk->sk_reuse = SK_FORCE_REUSE; 3482 tp->repair_queue = TCP_NO_QUEUE; 3483 } else if (val == TCP_REPAIR_OFF) { 3484 tp->repair = 0; 3485 sk->sk_reuse = SK_NO_REUSE; 3486 tcp_send_window_probe(sk); 3487 } else if (val == TCP_REPAIR_OFF_NO_WP) { 3488 tp->repair = 0; 3489 sk->sk_reuse = SK_NO_REUSE; 3490 } else 3491 err = -EINVAL; 3492 3493 break; 3494 3495 case TCP_REPAIR_QUEUE: 3496 if (!tp->repair) 3497 err = -EPERM; 3498 else if ((unsigned int)val < TCP_QUEUES_NR) 3499 tp->repair_queue = val; 3500 else 3501 err = -EINVAL; 3502 break; 3503 3504 case TCP_QUEUE_SEQ: 3505 if (sk->sk_state != TCP_CLOSE) { 3506 err = -EPERM; 3507 } else if (tp->repair_queue == TCP_SEND_QUEUE) { 3508 if (!tcp_rtx_queue_empty(sk)) 3509 err = -EPERM; 3510 else 3511 WRITE_ONCE(tp->write_seq, val); 3512 } else if (tp->repair_queue == TCP_RECV_QUEUE) { 3513 if (tp->rcv_nxt != tp->copied_seq) { 3514 err = -EPERM; 3515 } else { 3516 WRITE_ONCE(tp->rcv_nxt, val); 3517 WRITE_ONCE(tp->copied_seq, val); 3518 } 3519 } else { 3520 err = -EINVAL; 3521 } 3522 break; 3523 3524 case TCP_REPAIR_OPTIONS: 3525 if (!tp->repair) 3526 err = -EINVAL; 3527 else if (sk->sk_state == TCP_ESTABLISHED) 3528 err = tcp_repair_options_est(sk, optval, optlen); 3529 else 3530 err = -EPERM; 3531 break; 3532 3533 case TCP_CORK: 3534 __tcp_sock_set_cork(sk, val); 3535 break; 3536 3537 case TCP_KEEPIDLE: 3538 err = tcp_sock_set_keepidle_locked(sk, val); 3539 break; 3540 case TCP_KEEPINTVL: 3541 if (val < 1 || val > MAX_TCP_KEEPINTVL) 3542 err = -EINVAL; 3543 else 3544 tp->keepalive_intvl = val * HZ; 3545 break; 3546 case TCP_KEEPCNT: 3547 if (val < 1 || val > MAX_TCP_KEEPCNT) 3548 err = -EINVAL; 3549 else 3550 tp->keepalive_probes = val; 3551 break; 3552 case TCP_SYNCNT: 3553 if (val < 1 || val > MAX_TCP_SYNCNT) 3554 err = -EINVAL; 3555 else 3556 icsk->icsk_syn_retries = val; 3557 break; 3558 3559 case TCP_SAVE_SYN: 3560 /* 0: disable, 1: enable, 2: start from ether_header */ 3561 if (val < 0 || val > 2) 3562 err = -EINVAL; 3563 else 3564 tp->save_syn = val; 3565 break; 3566 3567 case TCP_LINGER2: 3568 if (val < 0) 3569 tp->linger2 = -1; 3570 else if (val > TCP_FIN_TIMEOUT_MAX / HZ) 3571 tp->linger2 = TCP_FIN_TIMEOUT_MAX; 3572 else 3573 tp->linger2 = val * HZ; 3574 break; 3575 3576 case TCP_DEFER_ACCEPT: 3577 /* Translate value in seconds to number of retransmits */ 3578 icsk->icsk_accept_queue.rskq_defer_accept = 3579 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 3580 TCP_RTO_MAX / HZ); 3581 break; 3582 3583 case TCP_WINDOW_CLAMP: 3584 err = tcp_set_window_clamp(sk, val); 3585 break; 3586 3587 case TCP_QUICKACK: 3588 __tcp_sock_set_quickack(sk, val); 3589 break; 3590 3591 #ifdef CONFIG_TCP_MD5SIG 3592 case TCP_MD5SIG: 3593 case TCP_MD5SIG_EXT: 3594 err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 3595 break; 3596 #endif 3597 case TCP_USER_TIMEOUT: 3598 /* Cap the max time in ms TCP will retry or probe the window 3599 * before giving up and aborting (ETIMEDOUT) a connection. 3600 */ 3601 if (val < 0) 3602 err = -EINVAL; 3603 else 3604 icsk->icsk_user_timeout = val; 3605 break; 3606 3607 case TCP_FASTOPEN: 3608 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 3609 TCPF_LISTEN))) { 3610 tcp_fastopen_init_key_once(net); 3611 3612 fastopen_queue_tune(sk, val); 3613 } else { 3614 err = -EINVAL; 3615 } 3616 break; 3617 case TCP_FASTOPEN_CONNECT: 3618 if (val > 1 || val < 0) { 3619 err = -EINVAL; 3620 } else if (net->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) { 3621 if (sk->sk_state == TCP_CLOSE) 3622 tp->fastopen_connect = val; 3623 else 3624 err = -EINVAL; 3625 } else { 3626 err = -EOPNOTSUPP; 3627 } 3628 break; 3629 case TCP_FASTOPEN_NO_COOKIE: 3630 if (val > 1 || val < 0) 3631 err = -EINVAL; 3632 else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 3633 err = -EINVAL; 3634 else 3635 tp->fastopen_no_cookie = val; 3636 break; 3637 case TCP_TIMESTAMP: 3638 if (!tp->repair) 3639 err = -EPERM; 3640 else 3641 tp->tsoffset = val - tcp_time_stamp_raw(); 3642 break; 3643 case TCP_REPAIR_WINDOW: 3644 err = tcp_repair_set_window(tp, optval, optlen); 3645 break; 3646 case TCP_NOTSENT_LOWAT: 3647 tp->notsent_lowat = val; 3648 sk->sk_write_space(sk); 3649 break; 3650 case TCP_INQ: 3651 if (val > 1 || val < 0) 3652 err = -EINVAL; 3653 else 3654 tp->recvmsg_inq = val; 3655 break; 3656 case TCP_TX_DELAY: 3657 if (val) 3658 tcp_enable_tx_delay(); 3659 tp->tcp_tx_delay = val; 3660 break; 3661 default: 3662 err = -ENOPROTOOPT; 3663 break; 3664 } 3665 3666 release_sock(sk); 3667 return err; 3668 } 3669 3670 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 3671 unsigned int optlen) 3672 { 3673 const struct inet_connection_sock *icsk = inet_csk(sk); 3674 3675 if (level != SOL_TCP) 3676 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 3677 optval, optlen); 3678 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 3679 } 3680 EXPORT_SYMBOL(tcp_setsockopt); 3681 3682 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 3683 struct tcp_info *info) 3684 { 3685 u64 stats[__TCP_CHRONO_MAX], total = 0; 3686 enum tcp_chrono i; 3687 3688 for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { 3689 stats[i] = tp->chrono_stat[i - 1]; 3690 if (i == tp->chrono_type) 3691 stats[i] += tcp_jiffies32 - tp->chrono_start; 3692 stats[i] *= USEC_PER_SEC / HZ; 3693 total += stats[i]; 3694 } 3695 3696 info->tcpi_busy_time = total; 3697 info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; 3698 info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; 3699 } 3700 3701 /* Return information about state of tcp endpoint in API format. */ 3702 void tcp_get_info(struct sock *sk, struct tcp_info *info) 3703 { 3704 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 3705 const struct inet_connection_sock *icsk = inet_csk(sk); 3706 unsigned long rate; 3707 u32 now; 3708 u64 rate64; 3709 bool slow; 3710 3711 memset(info, 0, sizeof(*info)); 3712 if (sk->sk_type != SOCK_STREAM) 3713 return; 3714 3715 info->tcpi_state = inet_sk_state_load(sk); 3716 3717 /* Report meaningful fields for all TCP states, including listeners */ 3718 rate = READ_ONCE(sk->sk_pacing_rate); 3719 rate64 = (rate != ~0UL) ? rate : ~0ULL; 3720 info->tcpi_pacing_rate = rate64; 3721 3722 rate = READ_ONCE(sk->sk_max_pacing_rate); 3723 rate64 = (rate != ~0UL) ? rate : ~0ULL; 3724 info->tcpi_max_pacing_rate = rate64; 3725 3726 info->tcpi_reordering = tp->reordering; 3727 info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); 3728 3729 if (info->tcpi_state == TCP_LISTEN) { 3730 /* listeners aliased fields : 3731 * tcpi_unacked -> Number of children ready for accept() 3732 * tcpi_sacked -> max backlog 3733 */ 3734 info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); 3735 info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); 3736 return; 3737 } 3738 3739 slow = lock_sock_fast(sk); 3740 3741 info->tcpi_ca_state = icsk->icsk_ca_state; 3742 info->tcpi_retransmits = icsk->icsk_retransmits; 3743 info->tcpi_probes = icsk->icsk_probes_out; 3744 info->tcpi_backoff = icsk->icsk_backoff; 3745 3746 if (tp->rx_opt.tstamp_ok) 3747 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 3748 if (tcp_is_sack(tp)) 3749 info->tcpi_options |= TCPI_OPT_SACK; 3750 if (tp->rx_opt.wscale_ok) { 3751 info->tcpi_options |= TCPI_OPT_WSCALE; 3752 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 3753 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 3754 } 3755 3756 if (tp->ecn_flags & TCP_ECN_OK) 3757 info->tcpi_options |= TCPI_OPT_ECN; 3758 if (tp->ecn_flags & TCP_ECN_SEEN) 3759 info->tcpi_options |= TCPI_OPT_ECN_SEEN; 3760 if (tp->syn_data_acked) 3761 info->tcpi_options |= TCPI_OPT_SYN_DATA; 3762 3763 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 3764 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 3765 info->tcpi_snd_mss = tp->mss_cache; 3766 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 3767 3768 info->tcpi_unacked = tp->packets_out; 3769 info->tcpi_sacked = tp->sacked_out; 3770 3771 info->tcpi_lost = tp->lost_out; 3772 info->tcpi_retrans = tp->retrans_out; 3773 3774 now = tcp_jiffies32; 3775 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 3776 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 3777 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 3778 3779 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 3780 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 3781 info->tcpi_rtt = tp->srtt_us >> 3; 3782 info->tcpi_rttvar = tp->mdev_us >> 2; 3783 info->tcpi_snd_ssthresh = tp->snd_ssthresh; 3784 info->tcpi_advmss = tp->advmss; 3785 3786 info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; 3787 info->tcpi_rcv_space = tp->rcvq_space.space; 3788 3789 info->tcpi_total_retrans = tp->total_retrans; 3790 3791 info->tcpi_bytes_acked = tp->bytes_acked; 3792 info->tcpi_bytes_received = tp->bytes_received; 3793 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); 3794 tcp_get_info_chrono_stats(tp, info); 3795 3796 info->tcpi_segs_out = tp->segs_out; 3797 3798 /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ 3799 info->tcpi_segs_in = READ_ONCE(tp->segs_in); 3800 info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); 3801 3802 info->tcpi_min_rtt = tcp_min_rtt(tp); 3803 info->tcpi_data_segs_out = tp->data_segs_out; 3804 3805 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; 3806 rate64 = tcp_compute_delivery_rate(tp); 3807 if (rate64) 3808 info->tcpi_delivery_rate = rate64; 3809 info->tcpi_delivered = tp->delivered; 3810 info->tcpi_delivered_ce = tp->delivered_ce; 3811 info->tcpi_bytes_sent = tp->bytes_sent; 3812 info->tcpi_bytes_retrans = tp->bytes_retrans; 3813 info->tcpi_dsack_dups = tp->dsack_dups; 3814 info->tcpi_reord_seen = tp->reord_seen; 3815 info->tcpi_rcv_ooopack = tp->rcv_ooopack; 3816 info->tcpi_snd_wnd = tp->snd_wnd; 3817 info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; 3818 unlock_sock_fast(sk, slow); 3819 } 3820 EXPORT_SYMBOL_GPL(tcp_get_info); 3821 3822 static size_t tcp_opt_stats_get_size(void) 3823 { 3824 return 3825 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ 3826 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ 3827 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ 3828 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ 3829 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ 3830 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ 3831 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ 3832 nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ 3833 nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ 3834 nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ 3835 nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ 3836 nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ 3837 nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ 3838 nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ 3839 nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ 3840 nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ 3841 nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ 3842 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ 3843 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ 3844 nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ 3845 nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ 3846 nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ 3847 nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ 3848 nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ 3849 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 3850 nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */ 3851 0; 3852 } 3853 3854 /* Returns TTL or hop limit of an incoming packet from skb. */ 3855 static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb) 3856 { 3857 if (skb->protocol == htons(ETH_P_IP)) 3858 return ip_hdr(skb)->ttl; 3859 else if (skb->protocol == htons(ETH_P_IPV6)) 3860 return ipv6_hdr(skb)->hop_limit; 3861 else 3862 return 0; 3863 } 3864 3865 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 3866 const struct sk_buff *orig_skb, 3867 const struct sk_buff *ack_skb) 3868 { 3869 const struct tcp_sock *tp = tcp_sk(sk); 3870 struct sk_buff *stats; 3871 struct tcp_info info; 3872 unsigned long rate; 3873 u64 rate64; 3874 3875 stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); 3876 if (!stats) 3877 return NULL; 3878 3879 tcp_get_info_chrono_stats(tp, &info); 3880 nla_put_u64_64bit(stats, TCP_NLA_BUSY, 3881 info.tcpi_busy_time, TCP_NLA_PAD); 3882 nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, 3883 info.tcpi_rwnd_limited, TCP_NLA_PAD); 3884 nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, 3885 info.tcpi_sndbuf_limited, TCP_NLA_PAD); 3886 nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, 3887 tp->data_segs_out, TCP_NLA_PAD); 3888 nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, 3889 tp->total_retrans, TCP_NLA_PAD); 3890 3891 rate = READ_ONCE(sk->sk_pacing_rate); 3892 rate64 = (rate != ~0UL) ? rate : ~0ULL; 3893 nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); 3894 3895 rate64 = tcp_compute_delivery_rate(tp); 3896 nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); 3897 3898 nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); 3899 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); 3900 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); 3901 3902 nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); 3903 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); 3904 nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); 3905 nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); 3906 nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); 3907 3908 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); 3909 nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); 3910 3911 nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, 3912 TCP_NLA_PAD); 3913 nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, 3914 TCP_NLA_PAD); 3915 nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); 3916 nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); 3917 nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); 3918 nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); 3919 nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, 3920 max_t(int, 0, tp->write_seq - tp->snd_nxt)); 3921 nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, 3922 TCP_NLA_PAD); 3923 if (ack_skb) 3924 nla_put_u8(stats, TCP_NLA_TTL, 3925 tcp_skb_ttl_or_hop_limit(ack_skb)); 3926 3927 return stats; 3928 } 3929 3930 static int do_tcp_getsockopt(struct sock *sk, int level, 3931 int optname, char __user *optval, int __user *optlen) 3932 { 3933 struct inet_connection_sock *icsk = inet_csk(sk); 3934 struct tcp_sock *tp = tcp_sk(sk); 3935 struct net *net = sock_net(sk); 3936 int val, len; 3937 3938 if (get_user(len, optlen)) 3939 return -EFAULT; 3940 3941 len = min_t(unsigned int, len, sizeof(int)); 3942 3943 if (len < 0) 3944 return -EINVAL; 3945 3946 switch (optname) { 3947 case TCP_MAXSEG: 3948 val = tp->mss_cache; 3949 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 3950 val = tp->rx_opt.user_mss; 3951 if (tp->repair) 3952 val = tp->rx_opt.mss_clamp; 3953 break; 3954 case TCP_NODELAY: 3955 val = !!(tp->nonagle&TCP_NAGLE_OFF); 3956 break; 3957 case TCP_CORK: 3958 val = !!(tp->nonagle&TCP_NAGLE_CORK); 3959 break; 3960 case TCP_KEEPIDLE: 3961 val = keepalive_time_when(tp) / HZ; 3962 break; 3963 case TCP_KEEPINTVL: 3964 val = keepalive_intvl_when(tp) / HZ; 3965 break; 3966 case TCP_KEEPCNT: 3967 val = keepalive_probes(tp); 3968 break; 3969 case TCP_SYNCNT: 3970 val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; 3971 break; 3972 case TCP_LINGER2: 3973 val = tp->linger2; 3974 if (val >= 0) 3975 val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ; 3976 break; 3977 case TCP_DEFER_ACCEPT: 3978 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, 3979 TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); 3980 break; 3981 case TCP_WINDOW_CLAMP: 3982 val = tp->window_clamp; 3983 break; 3984 case TCP_INFO: { 3985 struct tcp_info info; 3986 3987 if (get_user(len, optlen)) 3988 return -EFAULT; 3989 3990 tcp_get_info(sk, &info); 3991 3992 len = min_t(unsigned int, len, sizeof(info)); 3993 if (put_user(len, optlen)) 3994 return -EFAULT; 3995 if (copy_to_user(optval, &info, len)) 3996 return -EFAULT; 3997 return 0; 3998 } 3999 case TCP_CC_INFO: { 4000 const struct tcp_congestion_ops *ca_ops; 4001 union tcp_cc_info info; 4002 size_t sz = 0; 4003 int attr; 4004 4005 if (get_user(len, optlen)) 4006 return -EFAULT; 4007 4008 ca_ops = icsk->icsk_ca_ops; 4009 if (ca_ops && ca_ops->get_info) 4010 sz = ca_ops->get_info(sk, ~0U, &attr, &info); 4011 4012 len = min_t(unsigned int, len, sz); 4013 if (put_user(len, optlen)) 4014 return -EFAULT; 4015 if (copy_to_user(optval, &info, len)) 4016 return -EFAULT; 4017 return 0; 4018 } 4019 case TCP_QUICKACK: 4020 val = !inet_csk_in_pingpong_mode(sk); 4021 break; 4022 4023 case TCP_CONGESTION: 4024 if (get_user(len, optlen)) 4025 return -EFAULT; 4026 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 4027 if (put_user(len, optlen)) 4028 return -EFAULT; 4029 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) 4030 return -EFAULT; 4031 return 0; 4032 4033 case TCP_ULP: 4034 if (get_user(len, optlen)) 4035 return -EFAULT; 4036 len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); 4037 if (!icsk->icsk_ulp_ops) { 4038 if (put_user(0, optlen)) 4039 return -EFAULT; 4040 return 0; 4041 } 4042 if (put_user(len, optlen)) 4043 return -EFAULT; 4044 if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len)) 4045 return -EFAULT; 4046 return 0; 4047 4048 case TCP_FASTOPEN_KEY: { 4049 u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; 4050 unsigned int key_len; 4051 4052 if (get_user(len, optlen)) 4053 return -EFAULT; 4054 4055 key_len = tcp_fastopen_get_cipher(net, icsk, key) * 4056 TCP_FASTOPEN_KEY_LENGTH; 4057 len = min_t(unsigned int, len, key_len); 4058 if (put_user(len, optlen)) 4059 return -EFAULT; 4060 if (copy_to_user(optval, key, len)) 4061 return -EFAULT; 4062 return 0; 4063 } 4064 case TCP_THIN_LINEAR_TIMEOUTS: 4065 val = tp->thin_lto; 4066 break; 4067 4068 case TCP_THIN_DUPACK: 4069 val = 0; 4070 break; 4071 4072 case TCP_REPAIR: 4073 val = tp->repair; 4074 break; 4075 4076 case TCP_REPAIR_QUEUE: 4077 if (tp->repair) 4078 val = tp->repair_queue; 4079 else 4080 return -EINVAL; 4081 break; 4082 4083 case TCP_REPAIR_WINDOW: { 4084 struct tcp_repair_window opt; 4085 4086 if (get_user(len, optlen)) 4087 return -EFAULT; 4088 4089 if (len != sizeof(opt)) 4090 return -EINVAL; 4091 4092 if (!tp->repair) 4093 return -EPERM; 4094 4095 opt.snd_wl1 = tp->snd_wl1; 4096 opt.snd_wnd = tp->snd_wnd; 4097 opt.max_window = tp->max_window; 4098 opt.rcv_wnd = tp->rcv_wnd; 4099 opt.rcv_wup = tp->rcv_wup; 4100 4101 if (copy_to_user(optval, &opt, len)) 4102 return -EFAULT; 4103 return 0; 4104 } 4105 case TCP_QUEUE_SEQ: 4106 if (tp->repair_queue == TCP_SEND_QUEUE) 4107 val = tp->write_seq; 4108 else if (tp->repair_queue == TCP_RECV_QUEUE) 4109 val = tp->rcv_nxt; 4110 else 4111 return -EINVAL; 4112 break; 4113 4114 case TCP_USER_TIMEOUT: 4115 val = icsk->icsk_user_timeout; 4116 break; 4117 4118 case TCP_FASTOPEN: 4119 val = icsk->icsk_accept_queue.fastopenq.max_qlen; 4120 break; 4121 4122 case TCP_FASTOPEN_CONNECT: 4123 val = tp->fastopen_connect; 4124 break; 4125 4126 case TCP_FASTOPEN_NO_COOKIE: 4127 val = tp->fastopen_no_cookie; 4128 break; 4129 4130 case TCP_TX_DELAY: 4131 val = tp->tcp_tx_delay; 4132 break; 4133 4134 case TCP_TIMESTAMP: 4135 val = tcp_time_stamp_raw() + tp->tsoffset; 4136 break; 4137 case TCP_NOTSENT_LOWAT: 4138 val = tp->notsent_lowat; 4139 break; 4140 case TCP_INQ: 4141 val = tp->recvmsg_inq; 4142 break; 4143 case TCP_SAVE_SYN: 4144 val = tp->save_syn; 4145 break; 4146 case TCP_SAVED_SYN: { 4147 if (get_user(len, optlen)) 4148 return -EFAULT; 4149 4150 lock_sock(sk); 4151 if (tp->saved_syn) { 4152 if (len < tcp_saved_syn_len(tp->saved_syn)) { 4153 if (put_user(tcp_saved_syn_len(tp->saved_syn), 4154 optlen)) { 4155 release_sock(sk); 4156 return -EFAULT; 4157 } 4158 release_sock(sk); 4159 return -EINVAL; 4160 } 4161 len = tcp_saved_syn_len(tp->saved_syn); 4162 if (put_user(len, optlen)) { 4163 release_sock(sk); 4164 return -EFAULT; 4165 } 4166 if (copy_to_user(optval, tp->saved_syn->data, len)) { 4167 release_sock(sk); 4168 return -EFAULT; 4169 } 4170 tcp_saved_syn_free(tp); 4171 release_sock(sk); 4172 } else { 4173 release_sock(sk); 4174 len = 0; 4175 if (put_user(len, optlen)) 4176 return -EFAULT; 4177 } 4178 return 0; 4179 } 4180 #ifdef CONFIG_MMU 4181 case TCP_ZEROCOPY_RECEIVE: { 4182 struct scm_timestamping_internal tss; 4183 struct tcp_zerocopy_receive zc = {}; 4184 int err; 4185 4186 if (get_user(len, optlen)) 4187 return -EFAULT; 4188 if (len < 0 || 4189 len < offsetofend(struct tcp_zerocopy_receive, length)) 4190 return -EINVAL; 4191 if (unlikely(len > sizeof(zc))) { 4192 err = check_zeroed_user(optval + sizeof(zc), 4193 len - sizeof(zc)); 4194 if (err < 1) 4195 return err == 0 ? -EINVAL : err; 4196 len = sizeof(zc); 4197 if (put_user(len, optlen)) 4198 return -EFAULT; 4199 } 4200 if (copy_from_user(&zc, optval, len)) 4201 return -EFAULT; 4202 if (zc.reserved) 4203 return -EINVAL; 4204 if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) 4205 return -EINVAL; 4206 lock_sock(sk); 4207 err = tcp_zerocopy_receive(sk, &zc, &tss); 4208 err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, 4209 &zc, &len, err); 4210 release_sock(sk); 4211 if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) 4212 goto zerocopy_rcv_cmsg; 4213 switch (len) { 4214 case offsetofend(struct tcp_zerocopy_receive, msg_flags): 4215 goto zerocopy_rcv_cmsg; 4216 case offsetofend(struct tcp_zerocopy_receive, msg_controllen): 4217 case offsetofend(struct tcp_zerocopy_receive, msg_control): 4218 case offsetofend(struct tcp_zerocopy_receive, flags): 4219 case offsetofend(struct tcp_zerocopy_receive, copybuf_len): 4220 case offsetofend(struct tcp_zerocopy_receive, copybuf_address): 4221 case offsetofend(struct tcp_zerocopy_receive, err): 4222 goto zerocopy_rcv_sk_err; 4223 case offsetofend(struct tcp_zerocopy_receive, inq): 4224 goto zerocopy_rcv_inq; 4225 case offsetofend(struct tcp_zerocopy_receive, length): 4226 default: 4227 goto zerocopy_rcv_out; 4228 } 4229 zerocopy_rcv_cmsg: 4230 if (zc.msg_flags & TCP_CMSG_TS) 4231 tcp_zc_finalize_rx_tstamp(sk, &zc, &tss); 4232 else 4233 zc.msg_flags = 0; 4234 zerocopy_rcv_sk_err: 4235 if (!err) 4236 zc.err = sock_error(sk); 4237 zerocopy_rcv_inq: 4238 zc.inq = tcp_inq_hint(sk); 4239 zerocopy_rcv_out: 4240 if (!err && copy_to_user(optval, &zc, len)) 4241 err = -EFAULT; 4242 return err; 4243 } 4244 #endif 4245 default: 4246 return -ENOPROTOOPT; 4247 } 4248 4249 if (put_user(len, optlen)) 4250 return -EFAULT; 4251 if (copy_to_user(optval, &val, len)) 4252 return -EFAULT; 4253 return 0; 4254 } 4255 4256 bool tcp_bpf_bypass_getsockopt(int level, int optname) 4257 { 4258 /* TCP do_tcp_getsockopt has optimized getsockopt implementation 4259 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE. 4260 */ 4261 if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE) 4262 return true; 4263 4264 return false; 4265 } 4266 EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt); 4267 4268 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 4269 int __user *optlen) 4270 { 4271 struct inet_connection_sock *icsk = inet_csk(sk); 4272 4273 if (level != SOL_TCP) 4274 return icsk->icsk_af_ops->getsockopt(sk, level, optname, 4275 optval, optlen); 4276 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 4277 } 4278 EXPORT_SYMBOL(tcp_getsockopt); 4279 4280 #ifdef CONFIG_TCP_MD5SIG 4281 static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); 4282 static DEFINE_MUTEX(tcp_md5sig_mutex); 4283 static bool tcp_md5sig_pool_populated = false; 4284 4285 static void __tcp_alloc_md5sig_pool(void) 4286 { 4287 struct crypto_ahash *hash; 4288 int cpu; 4289 4290 hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 4291 if (IS_ERR(hash)) 4292 return; 4293 4294 for_each_possible_cpu(cpu) { 4295 void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch; 4296 struct ahash_request *req; 4297 4298 if (!scratch) { 4299 scratch = kmalloc_node(sizeof(union tcp_md5sum_block) + 4300 sizeof(struct tcphdr), 4301 GFP_KERNEL, 4302 cpu_to_node(cpu)); 4303 if (!scratch) 4304 return; 4305 per_cpu(tcp_md5sig_pool, cpu).scratch = scratch; 4306 } 4307 if (per_cpu(tcp_md5sig_pool, cpu).md5_req) 4308 continue; 4309 4310 req = ahash_request_alloc(hash, GFP_KERNEL); 4311 if (!req) 4312 return; 4313 4314 ahash_request_set_callback(req, 0, NULL, NULL); 4315 4316 per_cpu(tcp_md5sig_pool, cpu).md5_req = req; 4317 } 4318 /* before setting tcp_md5sig_pool_populated, we must commit all writes 4319 * to memory. See smp_rmb() in tcp_get_md5sig_pool() 4320 */ 4321 smp_wmb(); 4322 tcp_md5sig_pool_populated = true; 4323 } 4324 4325 bool tcp_alloc_md5sig_pool(void) 4326 { 4327 if (unlikely(!tcp_md5sig_pool_populated)) { 4328 mutex_lock(&tcp_md5sig_mutex); 4329 4330 if (!tcp_md5sig_pool_populated) { 4331 __tcp_alloc_md5sig_pool(); 4332 if (tcp_md5sig_pool_populated) 4333 static_branch_inc(&tcp_md5_needed); 4334 } 4335 4336 mutex_unlock(&tcp_md5sig_mutex); 4337 } 4338 return tcp_md5sig_pool_populated; 4339 } 4340 EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 4341 4342 4343 /** 4344 * tcp_get_md5sig_pool - get md5sig_pool for this user 4345 * 4346 * We use percpu structure, so if we succeed, we exit with preemption 4347 * and BH disabled, to make sure another thread or softirq handling 4348 * wont try to get same context. 4349 */ 4350 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 4351 { 4352 local_bh_disable(); 4353 4354 if (tcp_md5sig_pool_populated) { 4355 /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ 4356 smp_rmb(); 4357 return this_cpu_ptr(&tcp_md5sig_pool); 4358 } 4359 local_bh_enable(); 4360 return NULL; 4361 } 4362 EXPORT_SYMBOL(tcp_get_md5sig_pool); 4363 4364 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 4365 const struct sk_buff *skb, unsigned int header_len) 4366 { 4367 struct scatterlist sg; 4368 const struct tcphdr *tp = tcp_hdr(skb); 4369 struct ahash_request *req = hp->md5_req; 4370 unsigned int i; 4371 const unsigned int head_data_len = skb_headlen(skb) > header_len ? 4372 skb_headlen(skb) - header_len : 0; 4373 const struct skb_shared_info *shi = skb_shinfo(skb); 4374 struct sk_buff *frag_iter; 4375 4376 sg_init_table(&sg, 1); 4377 4378 sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); 4379 ahash_request_set_crypt(req, &sg, NULL, head_data_len); 4380 if (crypto_ahash_update(req)) 4381 return 1; 4382 4383 for (i = 0; i < shi->nr_frags; ++i) { 4384 const skb_frag_t *f = &shi->frags[i]; 4385 unsigned int offset = skb_frag_off(f); 4386 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); 4387 4388 sg_set_page(&sg, page, skb_frag_size(f), 4389 offset_in_page(offset)); 4390 ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f)); 4391 if (crypto_ahash_update(req)) 4392 return 1; 4393 } 4394 4395 skb_walk_frags(skb, frag_iter) 4396 if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) 4397 return 1; 4398 4399 return 0; 4400 } 4401 EXPORT_SYMBOL(tcp_md5_hash_skb_data); 4402 4403 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) 4404 { 4405 u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ 4406 struct scatterlist sg; 4407 4408 sg_init_one(&sg, key->key, keylen); 4409 ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); 4410 4411 /* We use data_race() because tcp_md5_do_add() might change key->key under us */ 4412 return data_race(crypto_ahash_update(hp->md5_req)); 4413 } 4414 EXPORT_SYMBOL(tcp_md5_hash_key); 4415 4416 /* Called with rcu_read_lock() */ 4417 enum skb_drop_reason 4418 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 4419 const void *saddr, const void *daddr, 4420 int family, int dif, int sdif) 4421 { 4422 /* 4423 * This gets called for each TCP segment that arrives 4424 * so we want to be efficient. 4425 * We have 3 drop cases: 4426 * o No MD5 hash and one expected. 4427 * o MD5 hash and we're not expecting one. 4428 * o MD5 hash and its wrong. 4429 */ 4430 const __u8 *hash_location = NULL; 4431 struct tcp_md5sig_key *hash_expected; 4432 const struct tcphdr *th = tcp_hdr(skb); 4433 struct tcp_sock *tp = tcp_sk(sk); 4434 int genhash, l3index; 4435 u8 newhash[16]; 4436 4437 /* sdif set, means packet ingressed via a device 4438 * in an L3 domain and dif is set to the l3mdev 4439 */ 4440 l3index = sdif ? dif : 0; 4441 4442 hash_expected = tcp_md5_do_lookup(sk, l3index, saddr, family); 4443 hash_location = tcp_parse_md5sig_option(th); 4444 4445 /* We've parsed the options - do we have a hash? */ 4446 if (!hash_expected && !hash_location) 4447 return SKB_NOT_DROPPED_YET; 4448 4449 if (hash_expected && !hash_location) { 4450 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 4451 return SKB_DROP_REASON_TCP_MD5NOTFOUND; 4452 } 4453 4454 if (!hash_expected && hash_location) { 4455 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 4456 return SKB_DROP_REASON_TCP_MD5UNEXPECTED; 4457 } 4458 4459 /* check the signature */ 4460 genhash = tp->af_specific->calc_md5_hash(newhash, hash_expected, 4461 NULL, skb); 4462 4463 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 4464 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 4465 if (family == AF_INET) { 4466 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n", 4467 saddr, ntohs(th->source), 4468 daddr, ntohs(th->dest), 4469 genhash ? " tcp_v4_calc_md5_hash failed" 4470 : "", l3index); 4471 } else { 4472 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n", 4473 genhash ? "failed" : "mismatch", 4474 saddr, ntohs(th->source), 4475 daddr, ntohs(th->dest), l3index); 4476 } 4477 return SKB_DROP_REASON_TCP_MD5FAILURE; 4478 } 4479 return SKB_NOT_DROPPED_YET; 4480 } 4481 EXPORT_SYMBOL(tcp_inbound_md5_hash); 4482 4483 #endif 4484 4485 void tcp_done(struct sock *sk) 4486 { 4487 struct request_sock *req; 4488 4489 /* We might be called with a new socket, after 4490 * inet_csk_prepare_forced_close() has been called 4491 * so we can not use lockdep_sock_is_held(sk) 4492 */ 4493 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); 4494 4495 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 4496 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 4497 4498 tcp_set_state(sk, TCP_CLOSE); 4499 tcp_clear_xmit_timers(sk); 4500 if (req) 4501 reqsk_fastopen_remove(sk, req, false); 4502 4503 sk->sk_shutdown = SHUTDOWN_MASK; 4504 4505 if (!sock_flag(sk, SOCK_DEAD)) 4506 sk->sk_state_change(sk); 4507 else 4508 inet_csk_destroy_sock(sk); 4509 } 4510 EXPORT_SYMBOL_GPL(tcp_done); 4511 4512 int tcp_abort(struct sock *sk, int err) 4513 { 4514 if (!sk_fullsock(sk)) { 4515 if (sk->sk_state == TCP_NEW_SYN_RECV) { 4516 struct request_sock *req = inet_reqsk(sk); 4517 4518 local_bh_disable(); 4519 inet_csk_reqsk_queue_drop(req->rsk_listener, req); 4520 local_bh_enable(); 4521 return 0; 4522 } 4523 return -EOPNOTSUPP; 4524 } 4525 4526 /* Don't race with userspace socket closes such as tcp_close. */ 4527 lock_sock(sk); 4528 4529 if (sk->sk_state == TCP_LISTEN) { 4530 tcp_set_state(sk, TCP_CLOSE); 4531 inet_csk_listen_stop(sk); 4532 } 4533 4534 /* Don't race with BH socket closes such as inet_csk_listen_stop. */ 4535 local_bh_disable(); 4536 bh_lock_sock(sk); 4537 4538 if (!sock_flag(sk, SOCK_DEAD)) { 4539 sk->sk_err = err; 4540 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4541 smp_wmb(); 4542 sk_error_report(sk); 4543 if (tcp_need_reset(sk->sk_state)) 4544 tcp_send_active_reset(sk, GFP_ATOMIC); 4545 tcp_done(sk); 4546 } 4547 4548 bh_unlock_sock(sk); 4549 local_bh_enable(); 4550 tcp_write_queue_purge(sk); 4551 release_sock(sk); 4552 return 0; 4553 } 4554 EXPORT_SYMBOL_GPL(tcp_abort); 4555 4556 extern struct tcp_congestion_ops tcp_reno; 4557 4558 static __initdata unsigned long thash_entries; 4559 static int __init set_thash_entries(char *str) 4560 { 4561 ssize_t ret; 4562 4563 if (!str) 4564 return 0; 4565 4566 ret = kstrtoul(str, 0, &thash_entries); 4567 if (ret) 4568 return 0; 4569 4570 return 1; 4571 } 4572 __setup("thash_entries=", set_thash_entries); 4573 4574 static void __init tcp_init_mem(void) 4575 { 4576 unsigned long limit = nr_free_buffer_pages() / 16; 4577 4578 limit = max(limit, 128UL); 4579 sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ 4580 sysctl_tcp_mem[1] = limit; /* 6.25 % */ 4581 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ 4582 } 4583 4584 void __init tcp_init(void) 4585 { 4586 int max_rshare, max_wshare, cnt; 4587 unsigned long limit; 4588 unsigned int i; 4589 4590 BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); 4591 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 4592 sizeof_field(struct sk_buff, cb)); 4593 4594 percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 4595 4596 timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); 4597 mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 4598 4599 inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", 4600 thash_entries, 21, /* one slot per 2 MB*/ 4601 0, 64 * 1024); 4602 tcp_hashinfo.bind_bucket_cachep = 4603 kmem_cache_create("tcp_bind_bucket", 4604 sizeof(struct inet_bind_bucket), 0, 4605 SLAB_HWCACHE_ALIGN | SLAB_PANIC | 4606 SLAB_ACCOUNT, 4607 NULL); 4608 4609 /* Size and allocate the main established and bind bucket 4610 * hash tables. 4611 * 4612 * The methodology is similar to that of the buffer cache. 4613 */ 4614 tcp_hashinfo.ehash = 4615 alloc_large_system_hash("TCP established", 4616 sizeof(struct inet_ehash_bucket), 4617 thash_entries, 4618 17, /* one slot per 128 KB of memory */ 4619 0, 4620 NULL, 4621 &tcp_hashinfo.ehash_mask, 4622 0, 4623 thash_entries ? 0 : 512 * 1024); 4624 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) 4625 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 4626 4627 if (inet_ehash_locks_alloc(&tcp_hashinfo)) 4628 panic("TCP: failed to alloc ehash_locks"); 4629 tcp_hashinfo.bhash = 4630 alloc_large_system_hash("TCP bind", 4631 sizeof(struct inet_bind_hashbucket), 4632 tcp_hashinfo.ehash_mask + 1, 4633 17, /* one slot per 128 KB of memory */ 4634 0, 4635 &tcp_hashinfo.bhash_size, 4636 NULL, 4637 0, 4638 64 * 1024); 4639 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 4640 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 4641 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 4642 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 4643 } 4644 4645 4646 cnt = tcp_hashinfo.ehash_mask + 1; 4647 sysctl_tcp_max_orphans = cnt / 2; 4648 4649 tcp_init_mem(); 4650 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 4651 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 4652 max_wshare = min(4UL*1024*1024, limit); 4653 max_rshare = min(6UL*1024*1024, limit); 4654 4655 init_net.ipv4.sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 4656 init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; 4657 init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 4658 4659 init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 4660 init_net.ipv4.sysctl_tcp_rmem[1] = 131072; 4661 init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); 4662 4663 pr_info("Hash tables configured (established %u bind %u)\n", 4664 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 4665 4666 tcp_v4_init(); 4667 tcp_metrics_init(); 4668 BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); 4669 tcp_tasklet_init(); 4670 mptcp_init(); 4671 } 4672