12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket
51da177e4SLinus Torvalds * interface as the means of communication with the user level.
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds *
902c30a84SJesper Juhl * Authors: Ross Biro
101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds *
211da177e4SLinus Torvalds * Fixes:
221da177e4SLinus Torvalds * Alan Cox : Numerous verify_area() calls
231da177e4SLinus Torvalds * Alan Cox : Set the ACK bit on a reset
241da177e4SLinus Torvalds * Alan Cox : Stopped it crashing if it closed while
251da177e4SLinus Torvalds * sk->inuse=1 and was trying to connect
261da177e4SLinus Torvalds * (tcp_err()).
271da177e4SLinus Torvalds * Alan Cox : All icmp error handling was broken
281da177e4SLinus Torvalds * pointers passed where wrong and the
291da177e4SLinus Torvalds * socket was looked up backwards. Nobody
301da177e4SLinus Torvalds * tested any icmp error code obviously.
311da177e4SLinus Torvalds * Alan Cox : tcp_err() now handled properly. It
321da177e4SLinus Torvalds * wakes people on errors. poll
331da177e4SLinus Torvalds * behaves and the icmp error race
341da177e4SLinus Torvalds * has gone by moving it into sock.c
351da177e4SLinus Torvalds * Alan Cox : tcp_send_reset() fixed to work for
361da177e4SLinus Torvalds * everything not just packets for
371da177e4SLinus Torvalds * unknown sockets.
381da177e4SLinus Torvalds * Alan Cox : tcp option processing.
391da177e4SLinus Torvalds * Alan Cox : Reset tweaked (still not 100%) [Had
401da177e4SLinus Torvalds * syn rule wrong]
411da177e4SLinus Torvalds * Herp Rosmanith : More reset fixes
421da177e4SLinus Torvalds * Alan Cox : No longer acks invalid rst frames.
431da177e4SLinus Torvalds * Acking any kind of RST is right out.
441da177e4SLinus Torvalds * Alan Cox : Sets an ignore me flag on an rst
451da177e4SLinus Torvalds * receive otherwise odd bits of prattle
461da177e4SLinus Torvalds * escape still
471da177e4SLinus Torvalds * Alan Cox : Fixed another acking RST frame bug.
481da177e4SLinus Torvalds * Should stop LAN workplace lockups.
491da177e4SLinus Torvalds * Alan Cox : Some tidyups using the new skb list
501da177e4SLinus Torvalds * facilities
511da177e4SLinus Torvalds * Alan Cox : sk->keepopen now seems to work
521da177e4SLinus Torvalds * Alan Cox : Pulls options out correctly on accepts
531da177e4SLinus Torvalds * Alan Cox : Fixed assorted sk->rqueue->next errors
541da177e4SLinus Torvalds * Alan Cox : PSH doesn't end a TCP read. Switched a
551da177e4SLinus Torvalds * bit to skb ops.
561da177e4SLinus Torvalds * Alan Cox : Tidied tcp_data to avoid a potential
571da177e4SLinus Torvalds * nasty.
581da177e4SLinus Torvalds * Alan Cox : Added some better commenting, as the
591da177e4SLinus Torvalds * tcp is hard to follow
601da177e4SLinus Torvalds * Alan Cox : Removed incorrect check for 20 * psh
611da177e4SLinus Torvalds * Michael O'Reilly : ack < copied bug fix.
621da177e4SLinus Torvalds * Johannes Stille : Misc tcp fixes (not all in yet).
631da177e4SLinus Torvalds * Alan Cox : FIN with no memory -> CRASH
641da177e4SLinus Torvalds * Alan Cox : Added socket option proto entries.
651da177e4SLinus Torvalds * Also added awareness of them to accept.
661da177e4SLinus Torvalds * Alan Cox : Added TCP options (SOL_TCP)
671da177e4SLinus Torvalds * Alan Cox : Switched wakeup calls to callbacks,
681da177e4SLinus Torvalds * so the kernel can layer network
691da177e4SLinus Torvalds * sockets.
701da177e4SLinus Torvalds * Alan Cox : Use ip_tos/ip_ttl settings.
711da177e4SLinus Torvalds * Alan Cox : Handle FIN (more) properly (we hope).
721da177e4SLinus Torvalds * Alan Cox : RST frames sent on unsynchronised
731da177e4SLinus Torvalds * state ack error.
741da177e4SLinus Torvalds * Alan Cox : Put in missing check for SYN bit.
751da177e4SLinus Torvalds * Alan Cox : Added tcp_select_window() aka NET2E
761da177e4SLinus Torvalds * window non shrink trick.
771da177e4SLinus Torvalds * Alan Cox : Added a couple of small NET2E timer
781da177e4SLinus Torvalds * fixes
791da177e4SLinus Torvalds * Charles Hedrick : TCP fixes
801da177e4SLinus Torvalds * Toomas Tamm : TCP window fixes
811da177e4SLinus Torvalds * Alan Cox : Small URG fix to rlogin ^C ack fight
821da177e4SLinus Torvalds * Charles Hedrick : Rewrote most of it to actually work
831da177e4SLinus Torvalds * Linus : Rewrote tcp_read() and URG handling
841da177e4SLinus Torvalds * completely
851da177e4SLinus Torvalds * Gerhard Koerting: Fixed some missing timer handling
861da177e4SLinus Torvalds * Matthew Dillon : Reworked TCP machine states as per RFC
871da177e4SLinus Torvalds * Gerhard Koerting: PC/TCP workarounds
881da177e4SLinus Torvalds * Adam Caldwell : Assorted timer/timing errors
891da177e4SLinus Torvalds * Matthew Dillon : Fixed another RST bug
901da177e4SLinus Torvalds * Alan Cox : Move to kernel side addressing changes.
911da177e4SLinus Torvalds * Alan Cox : Beginning work on TCP fastpathing
921da177e4SLinus Torvalds * (not yet usable)
931da177e4SLinus Torvalds * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
941da177e4SLinus Torvalds * Alan Cox : TCP fast path debugging
951da177e4SLinus Torvalds * Alan Cox : Window clamping
961da177e4SLinus Torvalds * Michael Riepe : Bug in tcp_check()
971da177e4SLinus Torvalds * Matt Dillon : More TCP improvements and RST bug fixes
981da177e4SLinus Torvalds * Matt Dillon : Yet more small nasties remove from the
991da177e4SLinus Torvalds * TCP code (Be very nice to this man if
1001da177e4SLinus Torvalds * tcp finally works 100%) 8)
1011da177e4SLinus Torvalds * Alan Cox : BSD accept semantics.
1021da177e4SLinus Torvalds * Alan Cox : Reset on closedown bug.
1031da177e4SLinus Torvalds * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
1041da177e4SLinus Torvalds * Michael Pall : Handle poll() after URG properly in
1051da177e4SLinus Torvalds * all cases.
1061da177e4SLinus Torvalds * Michael Pall : Undo the last fix in tcp_read_urg()
1071da177e4SLinus Torvalds * (multi URG PUSH broke rlogin).
1081da177e4SLinus Torvalds * Michael Pall : Fix the multi URG PUSH problem in
1091da177e4SLinus Torvalds * tcp_readable(), poll() after URG
1101da177e4SLinus Torvalds * works now.
1111da177e4SLinus Torvalds * Michael Pall : recv(...,MSG_OOB) never blocks in the
1121da177e4SLinus Torvalds * BSD api.
1131da177e4SLinus Torvalds * Alan Cox : Changed the semantics of sk->socket to
1141da177e4SLinus Torvalds * fix a race and a signal problem with
1151da177e4SLinus Torvalds * accept() and async I/O.
1161da177e4SLinus Torvalds * Alan Cox : Relaxed the rules on tcp_sendto().
1171da177e4SLinus Torvalds * Yury Shevchuk : Really fixed accept() blocking problem.
1181da177e4SLinus Torvalds * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
1191da177e4SLinus Torvalds * clients/servers which listen in on
1201da177e4SLinus Torvalds * fixed ports.
1211da177e4SLinus Torvalds * Alan Cox : Cleaned the above up and shrank it to
1221da177e4SLinus Torvalds * a sensible code size.
1231da177e4SLinus Torvalds * Alan Cox : Self connect lockup fix.
1241da177e4SLinus Torvalds * Alan Cox : No connect to multicast.
1251da177e4SLinus Torvalds * Ross Biro : Close unaccepted children on master
1261da177e4SLinus Torvalds * socket close.
1271da177e4SLinus Torvalds * Alan Cox : Reset tracing code.
1281da177e4SLinus Torvalds * Alan Cox : Spurious resets on shutdown.
1291da177e4SLinus Torvalds * Alan Cox : Giant 15 minute/60 second timer error
1301da177e4SLinus Torvalds * Alan Cox : Small whoops in polling before an
1311da177e4SLinus Torvalds * accept.
1321da177e4SLinus Torvalds * Alan Cox : Kept the state trace facility since
1331da177e4SLinus Torvalds * it's handy for debugging.
1341da177e4SLinus Torvalds * Alan Cox : More reset handler fixes.
1351da177e4SLinus Torvalds * Alan Cox : Started rewriting the code based on
1361da177e4SLinus Torvalds * the RFC's for other useful protocol
1371da177e4SLinus Torvalds * references see: Comer, KA9Q NOS, and
1381da177e4SLinus Torvalds * for a reference on the difference
1391da177e4SLinus Torvalds * between specifications and how BSD
1401da177e4SLinus Torvalds * works see the 4.4lite source.
1411da177e4SLinus Torvalds * A.N.Kuznetsov : Don't time wait on completion of tidy
1421da177e4SLinus Torvalds * close.
1431da177e4SLinus Torvalds * Linus Torvalds : Fin/Shutdown & copied_seq changes.
1441da177e4SLinus Torvalds * Linus Torvalds : Fixed BSD port reuse to work first syn
1451da177e4SLinus Torvalds * Alan Cox : Reimplemented timers as per the RFC
1461da177e4SLinus Torvalds * and using multiple timers for sanity.
1471da177e4SLinus Torvalds * Alan Cox : Small bug fixes, and a lot of new
1481da177e4SLinus Torvalds * comments.
1491da177e4SLinus Torvalds * Alan Cox : Fixed dual reader crash by locking
1501da177e4SLinus Torvalds * the buffers (much like datagram.c)
1511da177e4SLinus Torvalds * Alan Cox : Fixed stuck sockets in probe. A probe
1521da177e4SLinus Torvalds * now gets fed up of retrying without
1531da177e4SLinus Torvalds * (even a no space) answer.
1541da177e4SLinus Torvalds * Alan Cox : Extracted closing code better
1551da177e4SLinus Torvalds * Alan Cox : Fixed the closing state machine to
1561da177e4SLinus Torvalds * resemble the RFC.
1571da177e4SLinus Torvalds * Alan Cox : More 'per spec' fixes.
1581da177e4SLinus Torvalds * Jorge Cwik : Even faster checksumming.
1591da177e4SLinus Torvalds * Alan Cox : tcp_data() doesn't ack illegal PSH
1601da177e4SLinus Torvalds * only frames. At least one pc tcp stack
1611da177e4SLinus Torvalds * generates them.
1621da177e4SLinus Torvalds * Alan Cox : Cache last socket.
1631da177e4SLinus Torvalds * Alan Cox : Per route irtt.
1641da177e4SLinus Torvalds * Matt Day : poll()->select() match BSD precisely on error
1651da177e4SLinus Torvalds * Alan Cox : New buffers
1661da177e4SLinus Torvalds * Marc Tamsky : Various sk->prot->retransmits and
1671da177e4SLinus Torvalds * sk->retransmits misupdating fixed.
1681da177e4SLinus Torvalds * Fixed tcp_write_timeout: stuck close,
1691da177e4SLinus Torvalds * and TCP syn retries gets used now.
1701da177e4SLinus Torvalds * Mark Yarvis : In tcp_read_wakeup(), don't send an
1711da177e4SLinus Torvalds * ack if state is TCP_CLOSED.
1721da177e4SLinus Torvalds * Alan Cox : Look up device on a retransmit - routes may
1731da177e4SLinus Torvalds * change. Doesn't yet cope with MSS shrink right
1741da177e4SLinus Torvalds * but it's a start!
1751da177e4SLinus Torvalds * Marc Tamsky : Closing in closing fixes.
1761da177e4SLinus Torvalds * Mike Shaver : RFC1122 verifications.
1771da177e4SLinus Torvalds * Alan Cox : rcv_saddr errors.
1781da177e4SLinus Torvalds * Alan Cox : Block double connect().
1791da177e4SLinus Torvalds * Alan Cox : Small hooks for enSKIP.
1801da177e4SLinus Torvalds * Alexey Kuznetsov: Path MTU discovery.
1811da177e4SLinus Torvalds * Alan Cox : Support soft errors.
1821da177e4SLinus Torvalds * Alan Cox : Fix MTU discovery pathological case
1831da177e4SLinus Torvalds * when the remote claims no mtu!
1841da177e4SLinus Torvalds * Marc Tamsky : TCP_CLOSE fix.
1851da177e4SLinus Torvalds * Colin (G3TNE) : Send a reset on syn ack replies in
1861da177e4SLinus Torvalds * window but wrong (fixes NT lpd problems)
1871da177e4SLinus Torvalds * Pedro Roque : Better TCP window handling, delayed ack.
1881da177e4SLinus Torvalds * Joerg Reuter : No modification of locked buffers in
1891da177e4SLinus Torvalds * tcp_do_retransmit()
1901da177e4SLinus Torvalds * Eric Schenk : Changed receiver side silly window
1911da177e4SLinus Torvalds * avoidance algorithm to BSD style
1921da177e4SLinus Torvalds * algorithm. This doubles throughput
1931da177e4SLinus Torvalds * against machines running Solaris,
1941da177e4SLinus Torvalds * and seems to result in general
1951da177e4SLinus Torvalds * improvement.
1961da177e4SLinus Torvalds * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
1971da177e4SLinus Torvalds * Willy Konynenberg : Transparent proxying support.
1981da177e4SLinus Torvalds * Mike McLagan : Routing by source
1991da177e4SLinus Torvalds * Keith Owens : Do proper merging with partial SKB's in
2001da177e4SLinus Torvalds * tcp_do_sendmsg to avoid burstiness.
2011da177e4SLinus Torvalds * Eric Schenk : Fix fast close down bug with
2021da177e4SLinus Torvalds * shutdown() followed by close().
2031da177e4SLinus Torvalds * Andi Kleen : Make poll agree with SIGIO
2041da177e4SLinus Torvalds * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
2051da177e4SLinus Torvalds * lingertime == 0 (RFC 793 ABORT Call)
2061da177e4SLinus Torvalds * Hirokazu Takahashi : Use copy_from_user() instead of
2071da177e4SLinus Torvalds * csum_and_copy_from_user() if possible.
2081da177e4SLinus Torvalds *
2091da177e4SLinus Torvalds * Description of States:
2101da177e4SLinus Torvalds *
2111da177e4SLinus Torvalds * TCP_SYN_SENT sent a connection request, waiting for ack
2121da177e4SLinus Torvalds *
2131da177e4SLinus Torvalds * TCP_SYN_RECV received a connection request, sent ack,
2141da177e4SLinus Torvalds * waiting for final ack in three-way handshake.
2151da177e4SLinus Torvalds *
2161da177e4SLinus Torvalds * TCP_ESTABLISHED connection established
2171da177e4SLinus Torvalds *
2181da177e4SLinus Torvalds * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
2191da177e4SLinus Torvalds * transmission of remaining buffered data
2201da177e4SLinus Torvalds *
2211da177e4SLinus Torvalds * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
2221da177e4SLinus Torvalds * to shutdown
2231da177e4SLinus Torvalds *
2241da177e4SLinus Torvalds * TCP_CLOSING both sides have shutdown but we still have
2251da177e4SLinus Torvalds * data we have to finish sending
2261da177e4SLinus Torvalds *
2271da177e4SLinus Torvalds * TCP_TIME_WAIT timeout to catch resent junk before entering
2281da177e4SLinus Torvalds * closed, can only be entered from FIN_WAIT2
2291da177e4SLinus Torvalds * or CLOSING. Required because the other end
2301da177e4SLinus Torvalds * may not have gotten our last ACK causing it
2311da177e4SLinus Torvalds * to retransmit the data packet (which we ignore)
2321da177e4SLinus Torvalds *
2331da177e4SLinus Torvalds * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
2341da177e4SLinus Torvalds * us to finish writing our data and to shutdown
2351da177e4SLinus Torvalds * (we have to close() to move on to LAST_ACK)
2361da177e4SLinus Torvalds *
2371da177e4SLinus Torvalds * TCP_LAST_ACK out side has shutdown after remote has
2381da177e4SLinus Torvalds * shutdown. There may still be data in our
2391da177e4SLinus Torvalds * buffer that we have to finish sending
2401da177e4SLinus Torvalds *
2411da177e4SLinus Torvalds * TCP_CLOSE socket is finished
2421da177e4SLinus Torvalds */
2431da177e4SLinus Torvalds
244afd46503SJoe Perches #define pr_fmt(fmt) "TCP: " fmt
245afd46503SJoe Perches
246cf80e0e4SHerbert Xu #include <crypto/hash.h>
247172589ccSIlpo Järvinen #include <linux/kernel.h>
2481da177e4SLinus Torvalds #include <linux/module.h>
2491da177e4SLinus Torvalds #include <linux/types.h>
2501da177e4SLinus Torvalds #include <linux/fcntl.h>
2511da177e4SLinus Torvalds #include <linux/poll.h>
2526e9250f5SEric Dumazet #include <linux/inet_diag.h>
2531da177e4SLinus Torvalds #include <linux/init.h>
2541da177e4SLinus Torvalds #include <linux/fs.h>
2559c55e01cSJens Axboe #include <linux/skbuff.h>
25681b23b4aSAndrew Morton #include <linux/scatterlist.h>
2579c55e01cSJens Axboe #include <linux/splice.h>
2589c55e01cSJens Axboe #include <linux/net.h>
2599c55e01cSJens Axboe #include <linux/socket.h>
2601da177e4SLinus Torvalds #include <linux/random.h>
26157c8a661SMike Rapoport #include <linux/memblock.h>
26257413ebcSMiquel van Smoorenburg #include <linux/highmem.h>
263b8059eadSDavid S. Miller #include <linux/cache.h>
264f4c50d99SHerbert Xu #include <linux/err.h>
265da5c78c8SWilliam Allen Simpson #include <linux/time.h>
2665a0e3ad6STejun Heo #include <linux/slab.h>
26798aaa913SMike Maloney #include <linux/errqueue.h>
26860e2a778SUrsula Braun #include <linux/static_key.h>
26997a19cafSYonghong Song #include <linux/btf.h>
2701da177e4SLinus Torvalds
2711da177e4SLinus Torvalds #include <net/icmp.h>
272cf60af03SYuchung Cheng #include <net/inet_common.h>
2731da177e4SLinus Torvalds #include <net/tcp.h>
274f870fa0bSMat Martineau #include <net/mptcp.h>
2751da177e4SLinus Torvalds #include <net/xfrm.h>
2761da177e4SLinus Torvalds #include <net/ip.h>
2779c55e01cSJens Axboe #include <net/sock.h>
2781da177e4SLinus Torvalds
2797c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
2801da177e4SLinus Torvalds #include <asm/ioctls.h>
281076bb0c8SEliezer Tamir #include <net/busy_poll.h>
2821da177e4SLinus Torvalds
283925bba24SArjun Roy /* Track pending CMSGs. */
284925bba24SArjun Roy enum {
285925bba24SArjun Roy TCP_CMSG_INQ = 1,
286925bba24SArjun Roy TCP_CMSG_TS = 2
287925bba24SArjun Roy };
288925bba24SArjun Roy
28919757cebSEric Dumazet DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
29019757cebSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
2910a5578cfSArnaldo Carvalho de Melo
292a4fe34bfSEric W. Biederman long sysctl_tcp_mem[3] __read_mostly;
293a4fe34bfSEric W. Biederman EXPORT_SYMBOL(sysctl_tcp_mem);
2941da177e4SLinus Torvalds
29591b6d325SEric Dumazet atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */
2961da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated);
2970defbb0aSEric Dumazet DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
2980defbb0aSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc);
2991748376bSEric Dumazet
30060e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
30160e2a778SUrsula Braun DEFINE_STATIC_KEY_FALSE(tcp_have_smc);
30260e2a778SUrsula Braun EXPORT_SYMBOL(tcp_have_smc);
30360e2a778SUrsula Braun #endif
30460e2a778SUrsula Braun
3051748376bSEric Dumazet /*
3061748376bSEric Dumazet * Current number of TCP sockets.
3071748376bSEric Dumazet */
30891b6d325SEric Dumazet struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp;
3091da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated);
3101da177e4SLinus Torvalds
3111da177e4SLinus Torvalds /*
3129c55e01cSJens Axboe * TCP splice context
3139c55e01cSJens Axboe */
3149c55e01cSJens Axboe struct tcp_splice_state {
3159c55e01cSJens Axboe struct pipe_inode_info *pipe;
3169c55e01cSJens Axboe size_t len;
3179c55e01cSJens Axboe unsigned int flags;
3189c55e01cSJens Axboe };
3199c55e01cSJens Axboe
3209c55e01cSJens Axboe /*
3211da177e4SLinus Torvalds * Pressure flag: try to collapse.
3221da177e4SLinus Torvalds * Technical note: it is used by multiple contexts non atomically.
3233ab224beSHideo Aoki * All the __sk_mem_schedule() is of this nature: accounting
3241da177e4SLinus Torvalds * is strict, actions are advisory and have some latency.
3251da177e4SLinus Torvalds */
32606044751SEric Dumazet unsigned long tcp_memory_pressure __read_mostly;
32706044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_memory_pressure);
3281da177e4SLinus Torvalds
tcp_enter_memory_pressure(struct sock * sk)3295c52ba17SPavel Emelyanov void tcp_enter_memory_pressure(struct sock *sk)
3301da177e4SLinus Torvalds {
33106044751SEric Dumazet unsigned long val;
33206044751SEric Dumazet
3331f142c17SEric Dumazet if (READ_ONCE(tcp_memory_pressure))
33406044751SEric Dumazet return;
33506044751SEric Dumazet val = jiffies;
33606044751SEric Dumazet
33706044751SEric Dumazet if (!val)
33806044751SEric Dumazet val--;
33906044751SEric Dumazet if (!cmpxchg(&tcp_memory_pressure, 0, val))
3404e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
3411da177e4SLinus Torvalds }
34206044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure);
34306044751SEric Dumazet
tcp_leave_memory_pressure(struct sock * sk)34406044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk)
34506044751SEric Dumazet {
34606044751SEric Dumazet unsigned long val;
34706044751SEric Dumazet
3481f142c17SEric Dumazet if (!READ_ONCE(tcp_memory_pressure))
34906044751SEric Dumazet return;
35006044751SEric Dumazet val = xchg(&tcp_memory_pressure, 0);
35106044751SEric Dumazet if (val)
35206044751SEric Dumazet NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
35306044751SEric Dumazet jiffies_to_msecs(jiffies - val));
3541da177e4SLinus Torvalds }
35506044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure);
3561da177e4SLinus Torvalds
357b103cf34SJulian Anastasov /* Convert seconds to retransmits based on initial and max timeout */
secs_to_retrans(int seconds,int timeout,int rto_max)358b103cf34SJulian Anastasov static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
359b103cf34SJulian Anastasov {
360b103cf34SJulian Anastasov u8 res = 0;
361b103cf34SJulian Anastasov
362b103cf34SJulian Anastasov if (seconds > 0) {
363b103cf34SJulian Anastasov int period = timeout;
364b103cf34SJulian Anastasov
365b103cf34SJulian Anastasov res = 1;
366b103cf34SJulian Anastasov while (seconds > period && res < 255) {
367b103cf34SJulian Anastasov res++;
368b103cf34SJulian Anastasov timeout <<= 1;
369b103cf34SJulian Anastasov if (timeout > rto_max)
370b103cf34SJulian Anastasov timeout = rto_max;
371b103cf34SJulian Anastasov period += timeout;
372b103cf34SJulian Anastasov }
373b103cf34SJulian Anastasov }
374b103cf34SJulian Anastasov return res;
375b103cf34SJulian Anastasov }
376b103cf34SJulian Anastasov
377b103cf34SJulian Anastasov /* Convert retransmits to seconds based on initial and max timeout */
retrans_to_secs(u8 retrans,int timeout,int rto_max)378b103cf34SJulian Anastasov static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
379b103cf34SJulian Anastasov {
380b103cf34SJulian Anastasov int period = 0;
381b103cf34SJulian Anastasov
382b103cf34SJulian Anastasov if (retrans > 0) {
383b103cf34SJulian Anastasov period = timeout;
384b103cf34SJulian Anastasov while (--retrans) {
385b103cf34SJulian Anastasov timeout <<= 1;
386b103cf34SJulian Anastasov if (timeout > rto_max)
387b103cf34SJulian Anastasov timeout = rto_max;
388b103cf34SJulian Anastasov period += timeout;
389b103cf34SJulian Anastasov }
390b103cf34SJulian Anastasov }
391b103cf34SJulian Anastasov return period;
392b103cf34SJulian Anastasov }
393b103cf34SJulian Anastasov
tcp_compute_delivery_rate(const struct tcp_sock * tp)3940263598cSWei Wang static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
3950263598cSWei Wang {
3960263598cSWei Wang u32 rate = READ_ONCE(tp->rate_delivered);
3970263598cSWei Wang u32 intv = READ_ONCE(tp->rate_interval_us);
3980263598cSWei Wang u64 rate64 = 0;
3990263598cSWei Wang
4000263598cSWei Wang if (rate && intv) {
4010263598cSWei Wang rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
4020263598cSWei Wang do_div(rate64, intv);
4030263598cSWei Wang }
4040263598cSWei Wang return rate64;
4050263598cSWei Wang }
4060263598cSWei Wang
407900f65d3SNeal Cardwell /* Address-family independent initialization for a tcp_sock.
408900f65d3SNeal Cardwell *
409900f65d3SNeal Cardwell * NOTE: A lot of things set to zero explicitly by call to
410900f65d3SNeal Cardwell * sk_alloc() so need not be done here.
411900f65d3SNeal Cardwell */
tcp_init_sock(struct sock * sk)412900f65d3SNeal Cardwell void tcp_init_sock(struct sock *sk)
413900f65d3SNeal Cardwell {
414900f65d3SNeal Cardwell struct inet_connection_sock *icsk = inet_csk(sk);
415900f65d3SNeal Cardwell struct tcp_sock *tp = tcp_sk(sk);
416900f65d3SNeal Cardwell
4179f5afeaeSYaogong Wang tp->out_of_order_queue = RB_ROOT;
41875c119afSEric Dumazet sk->tcp_rtx_queue = RB_ROOT;
419900f65d3SNeal Cardwell tcp_init_xmit_timers(sk);
42046d3ceabSEric Dumazet INIT_LIST_HEAD(&tp->tsq_node);
421e2080072SEric Dumazet INIT_LIST_HEAD(&tp->tsorted_sent_queue);
422900f65d3SNeal Cardwell
423900f65d3SNeal Cardwell icsk->icsk_rto = TCP_TIMEOUT_INIT;
424ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN;
4252b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX;
426740b0f18SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
427ac9517fcSEric Dumazet minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
428900f65d3SNeal Cardwell
429900f65d3SNeal Cardwell /* So many TCP implementations out there (incorrectly) count the
430900f65d3SNeal Cardwell * initial SYN frame in their delayed-ACK and congestion control
431900f65d3SNeal Cardwell * algorithms that we must have the following bandaid to talk
432900f65d3SNeal Cardwell * efficiently to them. -DaveM
433900f65d3SNeal Cardwell */
43440570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
435900f65d3SNeal Cardwell
436d7722e85SSoheil Hassas Yeganeh /* There's a bubble in the pipe until at least the first ACK. */
437d7722e85SSoheil Hassas Yeganeh tp->app_limited = ~0U;
438300b655dSDavid Morley tp->rate_app_limited = 1;
439d7722e85SSoheil Hassas Yeganeh
440900f65d3SNeal Cardwell /* See draft-stevens-tcpca-spec-01 for discussion of the
441900f65d3SNeal Cardwell * initialization of these values.
442900f65d3SNeal Cardwell */
443900f65d3SNeal Cardwell tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
444900f65d3SNeal Cardwell tp->snd_cwnd_clamp = ~0;
445900f65d3SNeal Cardwell tp->mss_cache = TCP_MSS_DEFAULT;
446900f65d3SNeal Cardwell
44746778cd1SKuniyuki Iwashima tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
44855d8694fSFlorian Westphal tcp_assign_congestion_control(sk);
449900f65d3SNeal Cardwell
450ceaa1fefSAndrey Vagin tp->tsoffset = 0;
4511f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = 1;
452ceaa1fefSAndrey Vagin
453900f65d3SNeal Cardwell sk->sk_write_space = sk_stream_write_space;
454900f65d3SNeal Cardwell sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
455900f65d3SNeal Cardwell
456900f65d3SNeal Cardwell icsk->icsk_sync_mss = tcp_sync_mss;
457900f65d3SNeal Cardwell
45802739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]));
45902739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]));
460dfa2f048SEric Dumazet tcp_scaling_ratio_init(sk);
461900f65d3SNeal Cardwell
462e993ffe3SPavel Begunkov set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
463900f65d3SNeal Cardwell sk_sockets_allocated_inc(sk);
464900f65d3SNeal Cardwell }
465900f65d3SNeal Cardwell EXPORT_SYMBOL(tcp_init_sock);
466900f65d3SNeal Cardwell
tcp_tx_timestamp(struct sock * sk,u16 tsflags)4674e8cc228SEric Dumazet static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
4684ed2d765SWillem de Bruijn {
4694e8cc228SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk);
4704e8cc228SEric Dumazet
471ad02c4f5SSoheil Hassas Yeganeh if (tsflags && skb) {
4724ed2d765SWillem de Bruijn struct skb_shared_info *shinfo = skb_shinfo(skb);
4736b084928SSoheil Hassas Yeganeh struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
4744ed2d765SWillem de Bruijn
475c14ac945SSoheil Hassas Yeganeh sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
4760a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_ACK)
4770a2cf20cSSoheil Hassas Yeganeh tcb->txstamp_ack = 1;
4780a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
4794ed2d765SWillem de Bruijn shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
4804ed2d765SWillem de Bruijn }
481f066e2b0SWillem de Bruijn }
4824ed2d765SWillem de Bruijn
tcp_stream_is_readable(struct sock * sk,int target)48305dc72abSEric Dumazet static bool tcp_stream_is_readable(struct sock *sk, int target)
4848934ce2fSJohn Fastabend {
48505dc72abSEric Dumazet if (tcp_epollin_ready(sk, target))
48605dc72abSEric Dumazet return true;
4877b50ecfcSCong Wang return sk_is_readable(sk);
4888934ce2fSJohn Fastabend }
4898934ce2fSJohn Fastabend
4901da177e4SLinus Torvalds /*
491a11e1d43SLinus Torvalds * Wait for a TCP event.
492a11e1d43SLinus Torvalds *
493a11e1d43SLinus Torvalds * Note that we don't need to lock the socket, as the upper poll layers
494a11e1d43SLinus Torvalds * take care of normal races (between the test and the event) and we don't
495a11e1d43SLinus Torvalds * go look at any of the socket buffers directly.
4961da177e4SLinus Torvalds */
tcp_poll(struct file * file,struct socket * sock,poll_table * wait)497a11e1d43SLinus Torvalds __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
4981da177e4SLinus Torvalds {
499a11e1d43SLinus Torvalds __poll_t mask;
5001da177e4SLinus Torvalds struct sock *sk = sock->sk;
501cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk);
502e14cadfdSEric Dumazet u8 shutdown;
50300fd38d9SEric Dumazet int state;
5041da177e4SLinus Torvalds
50589ab066dSKarsten Graul sock_poll_wait(file, sock, wait);
506a11e1d43SLinus Torvalds
507986ffdfdSYafang Shao state = inet_sk_state_load(sk);
50800fd38d9SEric Dumazet if (state == TCP_LISTEN)
509dc40c7bcSArnaldo Carvalho de Melo return inet_csk_listen_poll(sk);
5101da177e4SLinus Torvalds
511a11e1d43SLinus Torvalds /* Socket is not locked. We are protected from async events
512a11e1d43SLinus Torvalds * by poll logic and correct handling of state changes
513a11e1d43SLinus Torvalds * made by other threads is impossible in any case.
514a11e1d43SLinus Torvalds */
515a11e1d43SLinus Torvalds
516a11e1d43SLinus Torvalds mask = 0;
517a11e1d43SLinus Torvalds
5181da177e4SLinus Torvalds /*
519a9a08845SLinus Torvalds * EPOLLHUP is certainly not done right. But poll() doesn't
5201da177e4SLinus Torvalds * have a notion of HUP in just one direction, and for a
5211da177e4SLinus Torvalds * socket the read side is more interesting.
5221da177e4SLinus Torvalds *
523a9a08845SLinus Torvalds * Some poll() documentation says that EPOLLHUP is incompatible
524a9a08845SLinus Torvalds * with the EPOLLOUT/POLLWR flags, so somebody should check this
5251da177e4SLinus Torvalds * all. But careful, it tends to be safer to return too many
5261da177e4SLinus Torvalds * bits than too few, and you can easily break real applications
5271da177e4SLinus Torvalds * if you don't tell them that something has hung up!
5281da177e4SLinus Torvalds *
5291da177e4SLinus Torvalds * Check-me.
5301da177e4SLinus Torvalds *
531a9a08845SLinus Torvalds * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
5321da177e4SLinus Torvalds * our fs/select.c). It means that after we received EOF,
5331da177e4SLinus Torvalds * poll always returns immediately, making impossible poll() on write()
534a9a08845SLinus Torvalds * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
5351da177e4SLinus Torvalds * if and only if shutdown has been made in both directions.
5361da177e4SLinus Torvalds * Actually, it is interesting to look how Solaris and DUX
537a9a08845SLinus Torvalds * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
5381da177e4SLinus Torvalds * then we could set it on SND_SHUTDOWN. BTW examples given
5391da177e4SLinus Torvalds * in Stevens' books assume exactly this behaviour, it explains
540a9a08845SLinus Torvalds * why EPOLLHUP is incompatible with EPOLLOUT. --ANK
5411da177e4SLinus Torvalds *
5421da177e4SLinus Torvalds * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
5431da177e4SLinus Torvalds * blocking on fresh not-connected or disconnected socket. --ANK
5441da177e4SLinus Torvalds */
545e14cadfdSEric Dumazet shutdown = READ_ONCE(sk->sk_shutdown);
546e14cadfdSEric Dumazet if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
547a9a08845SLinus Torvalds mask |= EPOLLHUP;
548e14cadfdSEric Dumazet if (shutdown & RCV_SHUTDOWN)
549a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
5501da177e4SLinus Torvalds
5518336886fSJerry Chu /* Connected or passive Fast Open socket? */
55200fd38d9SEric Dumazet if (state != TCP_SYN_SENT &&
553d983ea6fSEric Dumazet (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
554c7004482SDavid S. Miller int target = sock_rcvlowat(sk, 0, INT_MAX);
5557b6a893aSEric Dumazet u16 urg_data = READ_ONCE(tp->urg_data);
556c7004482SDavid S. Miller
557b96c51bdSEric Dumazet if (unlikely(urg_data) &&
5587b6a893aSEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
5597b6a893aSEric Dumazet !sock_flag(sk, SOCK_URGINLINE))
560b634f875SAlexandra Kossovsky target++;
561c7004482SDavid S. Miller
56205dc72abSEric Dumazet if (tcp_stream_is_readable(sk, target))
563a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM;
5641da177e4SLinus Torvalds
565e14cadfdSEric Dumazet if (!(shutdown & SEND_SHUTDOWN)) {
5668ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) {
567a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM;
5681da177e4SLinus Torvalds } else { /* send SIGIO later */
5699cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
5701da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
5711da177e4SLinus Torvalds
5721da177e4SLinus Torvalds /* Race breaker. If space is freed after
5731da177e4SLinus Torvalds * wspace test but before the flags are set,
5743c715127Sjbaron@akamai.com * IO signal will be lost. Memory barrier
5753c715127Sjbaron@akamai.com * pairs with the input side.
5761da177e4SLinus Torvalds */
5773c715127Sjbaron@akamai.com smp_mb__after_atomic();
5788ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1))
579a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM;
5801da177e4SLinus Torvalds }
581d84ba638SKOSAKI Motohiro } else
582a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM;
5831da177e4SLinus Torvalds
5847b6a893aSEric Dumazet if (urg_data & TCP_URG_VALID)
585a9a08845SLinus Torvalds mask |= EPOLLPRI;
58608e39c0dSEric Dumazet } else if (state == TCP_SYN_SENT &&
58708e39c0dSEric Dumazet inet_test_bit(DEFER_CONNECT, sk)) {
58819f6d3f3SWei Wang /* Active TCP fastopen socket with defer_connect
589a9a08845SLinus Torvalds * Return EPOLLOUT so application can call write()
59019f6d3f3SWei Wang * in order for kernel to generate SYN+data
59119f6d3f3SWei Wang */
592a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM;
5931da177e4SLinus Torvalds }
594ecc6836dSEric Dumazet /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */
595a4d25803STom Marshall smp_rmb();
596e13ec3daSEric Dumazet if (READ_ONCE(sk->sk_err) ||
597e13ec3daSEric Dumazet !skb_queue_empty_lockless(&sk->sk_error_queue))
598a9a08845SLinus Torvalds mask |= EPOLLERR;
599a4d25803STom Marshall
6001da177e4SLinus Torvalds return mask;
6011da177e4SLinus Torvalds }
602a11e1d43SLinus Torvalds EXPORT_SYMBOL(tcp_poll);
6031da177e4SLinus Torvalds
tcp_ioctl(struct sock * sk,int cmd,int * karg)604e1d001faSBreno Leitao int tcp_ioctl(struct sock *sk, int cmd, int *karg)
6051da177e4SLinus Torvalds {
6061da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
6071da177e4SLinus Torvalds int answ;
6080e71c55cSEric Dumazet bool slow;
6091da177e4SLinus Torvalds
6101da177e4SLinus Torvalds switch (cmd) {
6111da177e4SLinus Torvalds case SIOCINQ:
6121da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN)
6131da177e4SLinus Torvalds return -EINVAL;
6141da177e4SLinus Torvalds
6150e71c55cSEric Dumazet slow = lock_sock_fast(sk);
616473bd239STom Herbert answ = tcp_inq(sk);
6170e71c55cSEric Dumazet unlock_sock_fast(sk, slow);
6181da177e4SLinus Torvalds break;
6191da177e4SLinus Torvalds case SIOCATMARK:
6207b6a893aSEric Dumazet answ = READ_ONCE(tp->urg_data) &&
621d9b55bf7SEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
6221da177e4SLinus Torvalds break;
6231da177e4SLinus Torvalds case SIOCOUTQ:
6241da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN)
6251da177e4SLinus Torvalds return -EINVAL;
6261da177e4SLinus Torvalds
6271da177e4SLinus Torvalds if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
6281da177e4SLinus Torvalds answ = 0;
6291da177e4SLinus Torvalds else
6300f317464SEric Dumazet answ = READ_ONCE(tp->write_seq) - tp->snd_una;
6311da177e4SLinus Torvalds break;
6322f4e1b39SMario Schuknecht case SIOCOUTQNSD:
6332f4e1b39SMario Schuknecht if (sk->sk_state == TCP_LISTEN)
6342f4e1b39SMario Schuknecht return -EINVAL;
6352f4e1b39SMario Schuknecht
6362f4e1b39SMario Schuknecht if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
6372f4e1b39SMario Schuknecht answ = 0;
6382f4e1b39SMario Schuknecht else
639e0d694d6SEric Dumazet answ = READ_ONCE(tp->write_seq) -
640e0d694d6SEric Dumazet READ_ONCE(tp->snd_nxt);
6412f4e1b39SMario Schuknecht break;
6421da177e4SLinus Torvalds default:
6431da177e4SLinus Torvalds return -ENOIOCTLCMD;
6443ff50b79SStephen Hemminger }
6451da177e4SLinus Torvalds
646e1d001faSBreno Leitao *karg = answ;
647e1d001faSBreno Leitao return 0;
6481da177e4SLinus Torvalds }
6494bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_ioctl);
6501da177e4SLinus Torvalds
tcp_mark_push(struct tcp_sock * tp,struct sk_buff * skb)65104d8825cSPaolo Abeni void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
6521da177e4SLinus Torvalds {
6534de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
6541da177e4SLinus Torvalds tp->pushed_seq = tp->write_seq;
6551da177e4SLinus Torvalds }
6561da177e4SLinus Torvalds
forced_push(const struct tcp_sock * tp)657a2a385d6SEric Dumazet static inline bool forced_push(const struct tcp_sock *tp)
6581da177e4SLinus Torvalds {
6591da177e4SLinus Torvalds return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
6601da177e4SLinus Torvalds }
6611da177e4SLinus Torvalds
tcp_skb_entail(struct sock * sk,struct sk_buff * skb)66204d8825cSPaolo Abeni void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
6631da177e4SLinus Torvalds {
6649e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk);
665352d4800SArnaldo Carvalho de Melo struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
666352d4800SArnaldo Carvalho de Melo
667352d4800SArnaldo Carvalho de Melo tcb->seq = tcb->end_seq = tp->write_seq;
6684de075e0SEric Dumazet tcb->tcp_flags = TCPHDR_ACK;
669f4a775d1SEric Dumazet __skb_header_release(skb);
670fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb);
671ab4e846aSEric Dumazet sk_wmem_queued_add(sk, skb->truesize);
6723ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize);
67389ebd197SDavid S. Miller if (tp->nonagle & TCP_NAGLE_PUSH)
6741da177e4SLinus Torvalds tp->nonagle &= ~TCP_NAGLE_PUSH;
6756f021c62SEric Dumazet
6766f021c62SEric Dumazet tcp_slow_start_after_idle_check(sk);
6771da177e4SLinus Torvalds }
6781da177e4SLinus Torvalds
tcp_mark_urg(struct tcp_sock * tp,int flags)679afeca340SKrishna Kumar static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
6801da177e4SLinus Torvalds {
68133f5f57eSIlpo Järvinen if (flags & MSG_OOB)
6821da177e4SLinus Torvalds tp->snd_up = tp->write_seq;
6831da177e4SLinus Torvalds }
6841da177e4SLinus Torvalds
685f54b3111SEric Dumazet /* If a not yet filled skb is pushed, do not send it if
686a181ceb5SEric Dumazet * we have data packets in Qdisc or NIC queues :
687f54b3111SEric Dumazet * Because TX completion will happen shortly, it gives a chance
688f54b3111SEric Dumazet * to coalesce future sendmsg() payload into this skb, without
689f54b3111SEric Dumazet * need for a timer, and with no latency trade off.
690f54b3111SEric Dumazet * As packets containing data payload have a bigger truesize
691a181ceb5SEric Dumazet * than pure acks (dataless) packets, the last checks prevent
692a181ceb5SEric Dumazet * autocorking if we only have an ACK in Qdisc/NIC queues,
693a181ceb5SEric Dumazet * or if TX completion was delayed after we processed ACK packet.
694f54b3111SEric Dumazet */
tcp_should_autocork(struct sock * sk,struct sk_buff * skb,int size_goal)695f54b3111SEric Dumazet static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
696f54b3111SEric Dumazet int size_goal)
6971da177e4SLinus Torvalds {
698f54b3111SEric Dumazet return skb->len < size_goal &&
69985225e6fSKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
700114f39feSEric Dumazet !tcp_rtx_queue_empty(sk) &&
701b0de0cf4SEric Dumazet refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&
702b0de0cf4SEric Dumazet tcp_skb_can_collapse_to(skb);
703f54b3111SEric Dumazet }
7049e412ba7SIlpo Järvinen
tcp_push(struct sock * sk,int flags,int mss_now,int nonagle,int size_goal)70535b2c321SMat Martineau void tcp_push(struct sock *sk, int flags, int mss_now,
706f54b3111SEric Dumazet int nonagle, int size_goal)
707f54b3111SEric Dumazet {
708f54b3111SEric Dumazet struct tcp_sock *tp = tcp_sk(sk);
709f54b3111SEric Dumazet struct sk_buff *skb;
710f54b3111SEric Dumazet
711f54b3111SEric Dumazet skb = tcp_write_queue_tail(sk);
71275c119afSEric Dumazet if (!skb)
71375c119afSEric Dumazet return;
7141da177e4SLinus Torvalds if (!(flags & MSG_MORE) || forced_push(tp))
715f54b3111SEric Dumazet tcp_mark_push(tp, skb);
716afeca340SKrishna Kumar
717afeca340SKrishna Kumar tcp_mark_urg(tp, flags);
718f54b3111SEric Dumazet
719f54b3111SEric Dumazet if (tcp_should_autocork(sk, skb, size_goal)) {
720f54b3111SEric Dumazet
721f54b3111SEric Dumazet /* avoid atomic op if TSQ_THROTTLED bit is already set */
7227aa5470cSEric Dumazet if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
723f54b3111SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
7247aa5470cSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
72574b85edbSSalvatore Dipietro smp_mb__after_atomic();
7261da177e4SLinus Torvalds }
727a181ceb5SEric Dumazet /* It is possible TX completion already happened
728a181ceb5SEric Dumazet * before we set TSQ_THROTTLED.
729a181ceb5SEric Dumazet */
73014afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
731f54b3111SEric Dumazet return;
732f54b3111SEric Dumazet }
733f54b3111SEric Dumazet
734f54b3111SEric Dumazet if (flags & MSG_MORE)
735f54b3111SEric Dumazet nonagle = TCP_NAGLE_CORK;
736f54b3111SEric Dumazet
737f54b3111SEric Dumazet __tcp_push_pending_frames(sk, mss_now, nonagle);
7381da177e4SLinus Torvalds }
7391da177e4SLinus Torvalds
tcp_splice_data_recv(read_descriptor_t * rd_desc,struct sk_buff * skb,unsigned int offset,size_t len)7406ff7751dSAdrian Bunk static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
7419c55e01cSJens Axboe unsigned int offset, size_t len)
7429c55e01cSJens Axboe {
7439c55e01cSJens Axboe struct tcp_splice_state *tss = rd_desc->arg.data;
74433966dd0SWilly Tarreau int ret;
7459c55e01cSJens Axboe
746a60e3cc7SHannes Frederic Sowa ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
74725869262SAl Viro min(rd_desc->count, len), tss->flags);
74833966dd0SWilly Tarreau if (ret > 0)
74933966dd0SWilly Tarreau rd_desc->count -= ret;
75033966dd0SWilly Tarreau return ret;
7519c55e01cSJens Axboe }
7529c55e01cSJens Axboe
__tcp_splice_read(struct sock * sk,struct tcp_splice_state * tss)7539c55e01cSJens Axboe static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
7549c55e01cSJens Axboe {
7559c55e01cSJens Axboe /* Store TCP splice context information in read_descriptor_t. */
7569c55e01cSJens Axboe read_descriptor_t rd_desc = {
7579c55e01cSJens Axboe .arg.data = tss,
75833966dd0SWilly Tarreau .count = tss->len,
7599c55e01cSJens Axboe };
7609c55e01cSJens Axboe
7619c55e01cSJens Axboe return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
7629c55e01cSJens Axboe }
7639c55e01cSJens Axboe
7649c55e01cSJens Axboe /**
7659c55e01cSJens Axboe * tcp_splice_read - splice data from TCP socket to a pipe
7669c55e01cSJens Axboe * @sock: socket to splice from
7679c55e01cSJens Axboe * @ppos: position (not valid)
7689c55e01cSJens Axboe * @pipe: pipe to splice to
7699c55e01cSJens Axboe * @len: number of bytes to splice
7709c55e01cSJens Axboe * @flags: splice modifier flags
7719c55e01cSJens Axboe *
7729c55e01cSJens Axboe * Description:
7739c55e01cSJens Axboe * Will read pages from given socket and fill them into a pipe.
7749c55e01cSJens Axboe *
7759c55e01cSJens Axboe **/
tcp_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)7769c55e01cSJens Axboe ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
7779c55e01cSJens Axboe struct pipe_inode_info *pipe, size_t len,
7789c55e01cSJens Axboe unsigned int flags)
7799c55e01cSJens Axboe {
7809c55e01cSJens Axboe struct sock *sk = sock->sk;
7819c55e01cSJens Axboe struct tcp_splice_state tss = {
7829c55e01cSJens Axboe .pipe = pipe,
7839c55e01cSJens Axboe .len = len,
7849c55e01cSJens Axboe .flags = flags,
7859c55e01cSJens Axboe };
7869c55e01cSJens Axboe long timeo;
7879c55e01cSJens Axboe ssize_t spliced;
7889c55e01cSJens Axboe int ret;
7899c55e01cSJens Axboe
7903a047bf8SChangli Gao sock_rps_record_flow(sk);
7919c55e01cSJens Axboe /*
7929c55e01cSJens Axboe * We can't seek on a socket input
7939c55e01cSJens Axboe */
7949c55e01cSJens Axboe if (unlikely(*ppos))
7959c55e01cSJens Axboe return -ESPIPE;
7969c55e01cSJens Axboe
7979c55e01cSJens Axboe ret = spliced = 0;
7989c55e01cSJens Axboe
7999c55e01cSJens Axboe lock_sock(sk);
8009c55e01cSJens Axboe
80142324c62SEric Dumazet timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
8029c55e01cSJens Axboe while (tss.len) {
8039c55e01cSJens Axboe ret = __tcp_splice_read(sk, &tss);
8049c55e01cSJens Axboe if (ret < 0)
8059c55e01cSJens Axboe break;
8069c55e01cSJens Axboe else if (!ret) {
8079c55e01cSJens Axboe if (spliced)
8089c55e01cSJens Axboe break;
8099c55e01cSJens Axboe if (sock_flag(sk, SOCK_DONE))
8109c55e01cSJens Axboe break;
8119c55e01cSJens Axboe if (sk->sk_err) {
8129c55e01cSJens Axboe ret = sock_error(sk);
8139c55e01cSJens Axboe break;
8149c55e01cSJens Axboe }
8159c55e01cSJens Axboe if (sk->sk_shutdown & RCV_SHUTDOWN)
8169c55e01cSJens Axboe break;
8179c55e01cSJens Axboe if (sk->sk_state == TCP_CLOSE) {
8189c55e01cSJens Axboe /*
8199c55e01cSJens Axboe * This occurs when user tries to read
8209c55e01cSJens Axboe * from never connected socket.
8219c55e01cSJens Axboe */
8229c55e01cSJens Axboe ret = -ENOTCONN;
8239c55e01cSJens Axboe break;
8249c55e01cSJens Axboe }
8259c55e01cSJens Axboe if (!timeo) {
8269c55e01cSJens Axboe ret = -EAGAIN;
8279c55e01cSJens Axboe break;
8289c55e01cSJens Axboe }
829ccf7abb9SEric Dumazet /* if __tcp_splice_read() got nothing while we have
830ccf7abb9SEric Dumazet * an skb in receive queue, we do not want to loop.
831ccf7abb9SEric Dumazet * This might happen with URG data.
832ccf7abb9SEric Dumazet */
833ccf7abb9SEric Dumazet if (!skb_queue_empty(&sk->sk_receive_queue))
834ccf7abb9SEric Dumazet break;
835419ce133SPaolo Abeni ret = sk_wait_data(sk, &timeo, NULL);
836419ce133SPaolo Abeni if (ret < 0)
837419ce133SPaolo Abeni break;
8389c55e01cSJens Axboe if (signal_pending(current)) {
8399c55e01cSJens Axboe ret = sock_intr_errno(timeo);
8409c55e01cSJens Axboe break;
8419c55e01cSJens Axboe }
8429c55e01cSJens Axboe continue;
8439c55e01cSJens Axboe }
8449c55e01cSJens Axboe tss.len -= ret;
8459c55e01cSJens Axboe spliced += ret;
8469c55e01cSJens Axboe
8472fe11c9dSPavel Begunkov if (!tss.len || !timeo)
84833966dd0SWilly Tarreau break;
8499c55e01cSJens Axboe release_sock(sk);
8509c55e01cSJens Axboe lock_sock(sk);
8519c55e01cSJens Axboe
8529c55e01cSJens Axboe if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
85333966dd0SWilly Tarreau (sk->sk_shutdown & RCV_SHUTDOWN) ||
8549c55e01cSJens Axboe signal_pending(current))
8559c55e01cSJens Axboe break;
8569c55e01cSJens Axboe }
8579c55e01cSJens Axboe
8589c55e01cSJens Axboe release_sock(sk);
8599c55e01cSJens Axboe
8609c55e01cSJens Axboe if (spliced)
8619c55e01cSJens Axboe return spliced;
8629c55e01cSJens Axboe
8639c55e01cSJens Axboe return ret;
8649c55e01cSJens Axboe }
8654bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_splice_read);
8669c55e01cSJens Axboe
tcp_stream_alloc_skb(struct sock * sk,gfp_t gfp,bool force_schedule)8675882efffSEric Dumazet struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
868eb934478SEric Dumazet bool force_schedule)
869f561d0f2SPavel Emelyanov {
870f561d0f2SPavel Emelyanov struct sk_buff *skb;
871f561d0f2SPavel Emelyanov
8725882efffSEric Dumazet skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
8738e4d980aSEric Dumazet if (likely(skb)) {
874eb934478SEric Dumazet bool mem_scheduled;
8758e4d980aSEric Dumazet
8769b65b17dSTalal Ahmad skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
877eb934478SEric Dumazet if (force_schedule) {
878eb934478SEric Dumazet mem_scheduled = true;
8798e4d980aSEric Dumazet sk_forced_mem_schedule(sk, skb->truesize);
8808e4d980aSEric Dumazet } else {
881eb934478SEric Dumazet mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
8828e4d980aSEric Dumazet }
883eb934478SEric Dumazet if (likely(mem_scheduled)) {
8848a794df6SEric Dumazet skb_reserve(skb, MAX_TCP_HEADER);
885a52fe46eSEric Dumazet skb->ip_summed = CHECKSUM_PARTIAL;
886e2080072SEric Dumazet INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
887f561d0f2SPavel Emelyanov return skb;
888f561d0f2SPavel Emelyanov }
889f561d0f2SPavel Emelyanov __kfree_skb(skb);
890f561d0f2SPavel Emelyanov } else {
8915c52ba17SPavel Emelyanov sk->sk_prot->enter_memory_pressure(sk);
892f561d0f2SPavel Emelyanov sk_stream_moderate_sndbuf(sk);
893f561d0f2SPavel Emelyanov }
894f561d0f2SPavel Emelyanov return NULL;
895f561d0f2SPavel Emelyanov }
896f561d0f2SPavel Emelyanov
tcp_xmit_size_goal(struct sock * sk,u32 mss_now,int large_allowed)8970c54b85fSIlpo Järvinen static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
8980c54b85fSIlpo Järvinen int large_allowed)
8990c54b85fSIlpo Järvinen {
9000c54b85fSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk);
9016c09fa09SEric Dumazet u32 new_size_goal, size_goal;
9020c54b85fSIlpo Järvinen
90374d4a8f8SEric Dumazet if (!large_allowed)
904605ad7f1SEric Dumazet return mss_now;
9050c54b85fSIlpo Järvinen
9066c09fa09SEric Dumazet /* Note : tcp_tso_autosize() will eventually split this later */
907ab14f180SDavid Ahern new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size);
9082a3a041cSIlpo Järvinen
9092a3a041cSIlpo Järvinen /* We try hard to avoid divides here */
910605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now;
911605ad7f1SEric Dumazet if (unlikely(new_size_goal < size_goal ||
912605ad7f1SEric Dumazet new_size_goal >= size_goal + mss_now)) {
913605ad7f1SEric Dumazet tp->gso_segs = min_t(u16, new_size_goal / mss_now,
9141485348dSBen Hutchings sk->sk_gso_max_segs);
915605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now;
9160c54b85fSIlpo Järvinen }
9170c54b85fSIlpo Järvinen
918605ad7f1SEric Dumazet return max(size_goal, mss_now);
9190c54b85fSIlpo Järvinen }
9200c54b85fSIlpo Järvinen
tcp_send_mss(struct sock * sk,int * size_goal,int flags)92135b2c321SMat Martineau int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
9220c54b85fSIlpo Järvinen {
9230c54b85fSIlpo Järvinen int mss_now;
9240c54b85fSIlpo Järvinen
9250c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk);
9260c54b85fSIlpo Järvinen *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
9270c54b85fSIlpo Järvinen
9280c54b85fSIlpo Järvinen return mss_now;
9290c54b85fSIlpo Järvinen }
9300c54b85fSIlpo Järvinen
93172bf4f17SEric Dumazet /* In some cases, sendmsg() could have added an skb to the write queue,
932dc97391eSDavid Howells * but failed adding payload on it. We need to remove it to consume less
933dc97391eSDavid Howells * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
93472bf4f17SEric Dumazet * epoll() users. Another reason is that tcp_write_xmit() does not like
93572bf4f17SEric Dumazet * finding an empty skb in the write queue.
936fdfc5c85SEric Dumazet */
tcp_remove_empty_skb(struct sock * sk)93727728ba8SEric Dumazet void tcp_remove_empty_skb(struct sock *sk)
938fdfc5c85SEric Dumazet {
93927728ba8SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk);
94027728ba8SEric Dumazet
941cf12e6f9SJon Maxwell if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
942fdfc5c85SEric Dumazet tcp_unlink_write_queue(skb, sk);
943fdfc5c85SEric Dumazet if (tcp_write_queue_empty(sk))
944fdfc5c85SEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
94503271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb);
946fdfc5c85SEric Dumazet }
947fdfc5c85SEric Dumazet }
948fdfc5c85SEric Dumazet
949f8d9d938SEric Dumazet /* skb changing from pure zc to mixed, must charge zc */
tcp_downgrade_zcopy_pure(struct sock * sk,struct sk_buff * skb)950f8d9d938SEric Dumazet static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
951f8d9d938SEric Dumazet {
952f8d9d938SEric Dumazet if (unlikely(skb_zcopy_pure(skb))) {
953f8d9d938SEric Dumazet u32 extra = skb->truesize -
954f8d9d938SEric Dumazet SKB_TRUESIZE(skb_end_offset(skb));
955f8d9d938SEric Dumazet
956f8d9d938SEric Dumazet if (!sk_wmem_schedule(sk, extra))
957f8d9d938SEric Dumazet return -ENOMEM;
958f8d9d938SEric Dumazet
959f8d9d938SEric Dumazet sk_mem_charge(sk, extra);
960f8d9d938SEric Dumazet skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
961f8d9d938SEric Dumazet }
962f8d9d938SEric Dumazet return 0;
963f8d9d938SEric Dumazet }
964f8d9d938SEric Dumazet
965849b425cSEric Dumazet
tcp_wmem_schedule(struct sock * sk,int copy)966fbf93406SEric Dumazet int tcp_wmem_schedule(struct sock *sk, int copy)
967f54755f6SEric Dumazet {
968f54755f6SEric Dumazet int left;
969f54755f6SEric Dumazet
970f54755f6SEric Dumazet if (likely(sk_wmem_schedule(sk, copy)))
971f54755f6SEric Dumazet return copy;
972f54755f6SEric Dumazet
973f54755f6SEric Dumazet /* We could be in trouble if we have nothing queued.
974f54755f6SEric Dumazet * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
975f54755f6SEric Dumazet * to guarantee some progress.
976f54755f6SEric Dumazet */
977f54755f6SEric Dumazet left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued;
978f54755f6SEric Dumazet if (left > 0)
979f54755f6SEric Dumazet sk_forced_mem_schedule(sk, min(left, copy));
980f54755f6SEric Dumazet return min(copy, sk->sk_forward_alloc);
981f54755f6SEric Dumazet }
982f54755f6SEric Dumazet
tcp_free_fastopen_req(struct tcp_sock * tp)983cf60af03SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp)
984cf60af03SYuchung Cheng {
98500db4124SIan Morris if (tp->fastopen_req) {
986cf60af03SYuchung Cheng kfree(tp->fastopen_req);
987cf60af03SYuchung Cheng tp->fastopen_req = NULL;
988cf60af03SYuchung Cheng }
989cf60af03SYuchung Cheng }
990cf60af03SYuchung Cheng
tcp_sendmsg_fastopen(struct sock * sk,struct msghdr * msg,int * copied,size_t size,struct ubuf_info * uarg)9913242abebSBenjamin Hesmans int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
9923242abebSBenjamin Hesmans size_t size, struct ubuf_info *uarg)
993cf60af03SYuchung Cheng {
994cf60af03SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk);
99519f6d3f3SWei Wang struct inet_sock *inet = inet_sk(sk);
996ba615f67SWei Wang struct sockaddr *uaddr = msg->msg_name;
997cf60af03SYuchung Cheng int err, flags;
998cf60af03SYuchung Cheng
9995a542133SKuniyuki Iwashima if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) &
10005a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) ||
1001ba615f67SWei Wang (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1002ba615f67SWei Wang uaddr->sa_family == AF_UNSPEC))
1003cf60af03SYuchung Cheng return -EOPNOTSUPP;
100400db4124SIan Morris if (tp->fastopen_req)
1005cf60af03SYuchung Cheng return -EALREADY; /* Another Fast Open is in progress */
1006cf60af03SYuchung Cheng
1007cf60af03SYuchung Cheng tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1008cf60af03SYuchung Cheng sk->sk_allocation);
100951456b29SIan Morris if (unlikely(!tp->fastopen_req))
1010cf60af03SYuchung Cheng return -ENOBUFS;
1011cf60af03SYuchung Cheng tp->fastopen_req->data = msg;
1012f5ddcbbbSEric Dumazet tp->fastopen_req->size = size;
1013f859a448SWillem de Bruijn tp->fastopen_req->uarg = uarg;
1014cf60af03SYuchung Cheng
101508e39c0dSEric Dumazet if (inet_test_bit(DEFER_CONNECT, sk)) {
101619f6d3f3SWei Wang err = tcp_connect(sk);
101719f6d3f3SWei Wang /* Same failure procedure as in tcp_v4/6_connect */
101819f6d3f3SWei Wang if (err) {
101919f6d3f3SWei Wang tcp_set_state(sk, TCP_CLOSE);
102019f6d3f3SWei Wang inet->inet_dport = 0;
102119f6d3f3SWei Wang sk->sk_route_caps = 0;
102219f6d3f3SWei Wang }
102319f6d3f3SWei Wang }
1024cf60af03SYuchung Cheng flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1025ba615f67SWei Wang err = __inet_stream_connect(sk->sk_socket, uaddr,
10263979ad7eSWilly Tarreau msg->msg_namelen, flags, 1);
10277db92362SWei Wang /* fastopen_req could already be freed in __inet_stream_connect
10287db92362SWei Wang * if the connection times out or gets rst
10297db92362SWei Wang */
10307db92362SWei Wang if (tp->fastopen_req) {
1031f5ddcbbbSEric Dumazet *copied = tp->fastopen_req->copied;
1032cf60af03SYuchung Cheng tcp_free_fastopen_req(tp);
103308e39c0dSEric Dumazet inet_clear_bit(DEFER_CONNECT, sk);
10347db92362SWei Wang }
1035cf60af03SYuchung Cheng return err;
1036cf60af03SYuchung Cheng }
1037cf60af03SYuchung Cheng
tcp_sendmsg_locked(struct sock * sk,struct msghdr * msg,size_t size)1038306b13ebSTom Herbert int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
10391da177e4SLinus Torvalds {
10401da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
1041f214f915SWillem de Bruijn struct ubuf_info *uarg = NULL;
10421da177e4SLinus Torvalds struct sk_buff *skb;
1043c14ac945SSoheil Hassas Yeganeh struct sockcm_cookie sockc;
104457be5bdaSAl Viro int flags, err, copied = 0;
104557be5bdaSAl Viro int mss_now = 0, size_goal, copied_syn = 0;
10461a991488SEric Dumazet int process_backlog = 0;
1047270a1c3dSDavid Howells int zc = 0;
10481da177e4SLinus Torvalds long timeo;
10491da177e4SLinus Torvalds
10501da177e4SLinus Torvalds flags = msg->msg_flags;
1051f214f915SWillem de Bruijn
1052eb315a7dSPavel Begunkov if ((flags & MSG_ZEROCOPY) && size) {
1053eb315a7dSPavel Begunkov if (msg->msg_ubuf) {
1054eb315a7dSPavel Begunkov uarg = msg->msg_ubuf;
1055270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG)
1056270a1c3dSDavid Howells zc = MSG_ZEROCOPY;
1057eb315a7dSPavel Begunkov } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1058eea96a3eSPavel Begunkov skb = tcp_write_queue_tail(sk);
10598c793822SJonathan Lemon uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb));
1060f214f915SWillem de Bruijn if (!uarg) {
1061f214f915SWillem de Bruijn err = -ENOBUFS;
1062f214f915SWillem de Bruijn goto out_err;
1063f214f915SWillem de Bruijn }
1064270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG)
1065270a1c3dSDavid Howells zc = MSG_ZEROCOPY;
1066270a1c3dSDavid Howells else
1067e7d2b510SPavel Begunkov uarg_to_msgzc(uarg)->zerocopy = 0;
1068f214f915SWillem de Bruijn }
1069270a1c3dSDavid Howells } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) {
1070270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG)
1071270a1c3dSDavid Howells zc = MSG_SPLICE_PAGES;
1072eb315a7dSPavel Begunkov }
1073f214f915SWillem de Bruijn
107408e39c0dSEric Dumazet if (unlikely(flags & MSG_FASTOPEN ||
107508e39c0dSEric Dumazet inet_test_bit(DEFER_CONNECT, sk)) &&
107616ae6aa1SYuchung Cheng !tp->repair) {
1077f859a448SWillem de Bruijn err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg);
1078cf60af03SYuchung Cheng if (err == -EINPROGRESS && copied_syn > 0)
1079cf60af03SYuchung Cheng goto out;
1080cf60af03SYuchung Cheng else if (err)
1081cf60af03SYuchung Cheng goto out_err;
1082cf60af03SYuchung Cheng }
1083cf60af03SYuchung Cheng
10841da177e4SLinus Torvalds timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
10851da177e4SLinus Torvalds
1086d7722e85SSoheil Hassas Yeganeh tcp_rate_check_app_limited(sk); /* is sending application-limited? */
1087d7722e85SSoheil Hassas Yeganeh
10888336886fSJerry Chu /* Wait for a connection to finish. One exception is TCP Fast Open
10898336886fSJerry Chu * (passive side) where data is allowed to be sent before a connection
10908336886fSJerry Chu * is fully established.
10918336886fSJerry Chu */
10928336886fSJerry Chu if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
10938336886fSJerry Chu !tcp_passive_fastopen(sk)) {
1094686a5624SYuvaraja Mariappan err = sk_stream_wait_connect(sk, &timeo);
1095686a5624SYuvaraja Mariappan if (err != 0)
1096cf60af03SYuchung Cheng goto do_error;
10978336886fSJerry Chu }
10981da177e4SLinus Torvalds
1099c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) {
1100c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_RECV_QUEUE) {
1101c0e88ff0SPavel Emelyanov copied = tcp_send_rcvq(sk, msg, size);
11025924f17aSChristoph Paasch goto out_nopush;
1103c0e88ff0SPavel Emelyanov }
1104c0e88ff0SPavel Emelyanov
1105c0e88ff0SPavel Emelyanov err = -EINVAL;
1106c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE)
1107c0e88ff0SPavel Emelyanov goto out_err;
1108c0e88ff0SPavel Emelyanov
1109c0e88ff0SPavel Emelyanov /* 'common' sending to sendq */
1110c0e88ff0SPavel Emelyanov }
1111c0e88ff0SPavel Emelyanov
1112657a0667SWillem de Bruijn sockcm_init(&sockc, sk);
1113c14ac945SSoheil Hassas Yeganeh if (msg->msg_controllen) {
1114c14ac945SSoheil Hassas Yeganeh err = sock_cmsg_send(sk, msg, &sockc);
1115c14ac945SSoheil Hassas Yeganeh if (unlikely(err)) {
1116c14ac945SSoheil Hassas Yeganeh err = -EINVAL;
1117c14ac945SSoheil Hassas Yeganeh goto out_err;
1118c14ac945SSoheil Hassas Yeganeh }
1119c14ac945SSoheil Hassas Yeganeh }
1120c14ac945SSoheil Hassas Yeganeh
11211da177e4SLinus Torvalds /* This should be in poll */
11229cd3e072SEric Dumazet sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
11231da177e4SLinus Torvalds
11241da177e4SLinus Torvalds /* Ok commence sending. */
11251da177e4SLinus Torvalds copied = 0;
11261da177e4SLinus Torvalds
1127d41a69f1SEric Dumazet restart:
1128d41a69f1SEric Dumazet mss_now = tcp_send_mss(sk, &size_goal, flags);
1129d41a69f1SEric Dumazet
11301da177e4SLinus Torvalds err = -EPIPE;
11311da177e4SLinus Torvalds if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
113279d8665bSEric Dumazet goto do_error;
11331da177e4SLinus Torvalds
113401e97e65SAl Viro while (msg_data_left(msg)) {
1135270a1c3dSDavid Howells ssize_t copy = 0;
11361da177e4SLinus Torvalds
1137fe067e8aSDavid S. Miller skb = tcp_write_queue_tail(sk);
113865ec6097SEric Dumazet if (skb)
113965ec6097SEric Dumazet copy = size_goal - skb->len;
11401da177e4SLinus Torvalds
1141c134ecb8SMartin KaFai Lau if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
11423613b3dbSEric Dumazet bool first_skb;
11433613b3dbSEric Dumazet
11441da177e4SLinus Torvalds new_segment:
11451da177e4SLinus Torvalds if (!sk_stream_memory_free(sk))
1146afb83012SSoheil Hassas Yeganeh goto wait_for_space;
11471da177e4SLinus Torvalds
11481a991488SEric Dumazet if (unlikely(process_backlog >= 16)) {
11491a991488SEric Dumazet process_backlog = 0;
11501a991488SEric Dumazet if (sk_flush_backlog(sk))
1151d41a69f1SEric Dumazet goto restart;
1152d4011239SEric Dumazet }
115375c119afSEric Dumazet first_skb = tcp_rtx_and_write_queues_empty(sk);
11545882efffSEric Dumazet skb = tcp_stream_alloc_skb(sk, sk->sk_allocation,
11553613b3dbSEric Dumazet first_skb);
11561da177e4SLinus Torvalds if (!skb)
1157afb83012SSoheil Hassas Yeganeh goto wait_for_space;
11581da177e4SLinus Torvalds
11591a991488SEric Dumazet process_backlog++;
11601da177e4SLinus Torvalds
116150569d12SJakub Kicinski #ifdef CONFIG_SKB_DECRYPTED
116250569d12SJakub Kicinski skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
116350569d12SJakub Kicinski #endif
116404d8825cSPaolo Abeni tcp_skb_entail(sk, skb);
1165c1b4a7e6SDavid S. Miller copy = size_goal;
11669d186cacSAndrey Vagin
11679d186cacSAndrey Vagin /* All packets are restored as if they have
1168d3edd06eSEric Dumazet * already been sent. skb_mstamp_ns isn't set to
11699d186cacSAndrey Vagin * avoid wrong rtt estimation.
11709d186cacSAndrey Vagin */
11719d186cacSAndrey Vagin if (tp->repair)
11729d186cacSAndrey Vagin TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
11731da177e4SLinus Torvalds }
11741da177e4SLinus Torvalds
11751da177e4SLinus Torvalds /* Try to append data to the end of skb. */
117601e97e65SAl Viro if (copy > msg_data_left(msg))
117701e97e65SAl Viro copy = msg_data_left(msg);
11781da177e4SLinus Torvalds
1179270a1c3dSDavid Howells if (zc == 0) {
11805640f768SEric Dumazet bool merge = true;
11811da177e4SLinus Torvalds int i = skb_shinfo(skb)->nr_frags;
11825640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk);
1183761965eaSEric Dumazet
11845640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag))
1185afb83012SSoheil Hassas Yeganeh goto wait_for_space;
1186761965eaSEric Dumazet
11875640f768SEric Dumazet if (!skb_can_coalesce(skb, i, pfrag->page,
11885640f768SEric Dumazet pfrag->offset)) {
1189657b991aSKuniyuki Iwashima if (i >= READ_ONCE(sysctl_max_skb_frags)) {
11901da177e4SLinus Torvalds tcp_mark_push(tp, skb);
11911da177e4SLinus Torvalds goto new_segment;
11921da177e4SLinus Torvalds }
11935640f768SEric Dumazet merge = false;
11945640f768SEric Dumazet }
1195ef015786SHerbert Xu
11965640f768SEric Dumazet copy = min_t(int, copy, pfrag->size - pfrag->offset);
1197ef015786SHerbert Xu
1198eb315a7dSPavel Begunkov if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) {
1199849b425cSEric Dumazet if (tcp_downgrade_zcopy_pure(sk, skb))
1200849b425cSEric Dumazet goto wait_for_space;
1201eb315a7dSPavel Begunkov skb_zcopy_downgrade_managed(skb);
1202eb315a7dSPavel Begunkov }
1203849b425cSEric Dumazet
1204849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy);
1205849b425cSEric Dumazet if (!copy)
1206afb83012SSoheil Hassas Yeganeh goto wait_for_space;
12071da177e4SLinus Torvalds
120857be5bdaSAl Viro err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
12095640f768SEric Dumazet pfrag->page,
12105640f768SEric Dumazet pfrag->offset,
12115640f768SEric Dumazet copy);
12125640f768SEric Dumazet if (err)
12131da177e4SLinus Torvalds goto do_error;
12141da177e4SLinus Torvalds
12151da177e4SLinus Torvalds /* Update the skb. */
12161da177e4SLinus Torvalds if (merge) {
12179e903e08SEric Dumazet skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
12181da177e4SLinus Torvalds } else {
12195640f768SEric Dumazet skb_fill_page_desc(skb, i, pfrag->page,
12205640f768SEric Dumazet pfrag->offset, copy);
12214e33e346SEric Dumazet page_ref_inc(pfrag->page);
12221da177e4SLinus Torvalds }
12235640f768SEric Dumazet pfrag->offset += copy;
1224270a1c3dSDavid Howells } else if (zc == MSG_ZEROCOPY) {
12259b65b17dSTalal Ahmad /* First append to a fragless skb builds initial
12269b65b17dSTalal Ahmad * pure zerocopy skb
12279b65b17dSTalal Ahmad */
12289b65b17dSTalal Ahmad if (!skb->len)
12299b65b17dSTalal Ahmad skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;
12309b65b17dSTalal Ahmad
12319b65b17dSTalal Ahmad if (!skb_zcopy_pure(skb)) {
1232849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy);
1233849b425cSEric Dumazet if (!copy)
1234358ed624STalal Ahmad goto wait_for_space;
12359b65b17dSTalal Ahmad }
1236358ed624STalal Ahmad
1237f214f915SWillem de Bruijn err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
1238111856c7SWillem de Bruijn if (err == -EMSGSIZE || err == -EEXIST) {
1239111856c7SWillem de Bruijn tcp_mark_push(tp, skb);
1240f214f915SWillem de Bruijn goto new_segment;
1241111856c7SWillem de Bruijn }
1242f214f915SWillem de Bruijn if (err < 0)
1243f214f915SWillem de Bruijn goto do_error;
1244f214f915SWillem de Bruijn copy = err;
1245270a1c3dSDavid Howells } else if (zc == MSG_SPLICE_PAGES) {
1246270a1c3dSDavid Howells /* Splice in data if we can; copy if we can't. */
1247270a1c3dSDavid Howells if (tcp_downgrade_zcopy_pure(sk, skb))
1248270a1c3dSDavid Howells goto wait_for_space;
1249270a1c3dSDavid Howells copy = tcp_wmem_schedule(sk, copy);
1250270a1c3dSDavid Howells if (!copy)
1251270a1c3dSDavid Howells goto wait_for_space;
1252270a1c3dSDavid Howells
1253270a1c3dSDavid Howells err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
1254270a1c3dSDavid Howells sk->sk_allocation);
1255270a1c3dSDavid Howells if (err < 0) {
1256270a1c3dSDavid Howells if (err == -EMSGSIZE) {
1257270a1c3dSDavid Howells tcp_mark_push(tp, skb);
1258270a1c3dSDavid Howells goto new_segment;
1259270a1c3dSDavid Howells }
1260270a1c3dSDavid Howells goto do_error;
1261270a1c3dSDavid Howells }
1262270a1c3dSDavid Howells copy = err;
1263270a1c3dSDavid Howells
1264270a1c3dSDavid Howells if (!(flags & MSG_NO_SHARED_FRAGS))
1265270a1c3dSDavid Howells skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
1266270a1c3dSDavid Howells
1267270a1c3dSDavid Howells sk_wmem_queued_add(sk, copy);
1268270a1c3dSDavid Howells sk_mem_charge(sk, copy);
12691da177e4SLinus Torvalds }
12701da177e4SLinus Torvalds
12711da177e4SLinus Torvalds if (!copied)
12724de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
12731da177e4SLinus Torvalds
12740f317464SEric Dumazet WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
12751da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq += copy;
1276cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 0);
12771da177e4SLinus Torvalds
12781da177e4SLinus Torvalds copied += copy;
127901e97e65SAl Viro if (!msg_data_left(msg)) {
1280c134ecb8SMartin KaFai Lau if (unlikely(flags & MSG_EOR))
1281c134ecb8SMartin KaFai Lau TCP_SKB_CB(skb)->eor = 1;
12821da177e4SLinus Torvalds goto out;
12834ed2d765SWillem de Bruijn }
12841da177e4SLinus Torvalds
128565ec6097SEric Dumazet if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair))
12861da177e4SLinus Torvalds continue;
12871da177e4SLinus Torvalds
12881da177e4SLinus Torvalds if (forced_push(tp)) {
12891da177e4SLinus Torvalds tcp_mark_push(tp, skb);
12909e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1291fe067e8aSDavid S. Miller } else if (skb == tcp_send_head(sk))
12921da177e4SLinus Torvalds tcp_push_one(sk, mss_now);
12931da177e4SLinus Torvalds continue;
12941da177e4SLinus Torvalds
1295afb83012SSoheil Hassas Yeganeh wait_for_space:
12961da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
129772bf4f17SEric Dumazet tcp_remove_empty_skb(sk);
1298ec342325SAndrew Vagin if (copied)
1299f54b3111SEric Dumazet tcp_push(sk, flags & ~MSG_MORE, mss_now,
1300f54b3111SEric Dumazet TCP_NAGLE_PUSH, size_goal);
13011da177e4SLinus Torvalds
1302686a5624SYuvaraja Mariappan err = sk_stream_wait_memory(sk, &timeo);
1303686a5624SYuvaraja Mariappan if (err != 0)
13041da177e4SLinus Torvalds goto do_error;
13051da177e4SLinus Torvalds
13060c54b85fSIlpo Järvinen mss_now = tcp_send_mss(sk, &size_goal, flags);
13071da177e4SLinus Torvalds }
13081da177e4SLinus Torvalds
13091da177e4SLinus Torvalds out:
1310ad02c4f5SSoheil Hassas Yeganeh if (copied) {
13114e8cc228SEric Dumazet tcp_tx_timestamp(sk, sockc.tsflags);
1312f54b3111SEric Dumazet tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1313ad02c4f5SSoheil Hassas Yeganeh }
13145924f17aSChristoph Paasch out_nopush:
1315a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
1316a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf)
13178e044917SJonathan Lemon net_zcopy_put(uarg);
1318cf60af03SYuchung Cheng return copied + copied_syn;
13191da177e4SLinus Torvalds
13201da177e4SLinus Torvalds do_error:
132127728ba8SEric Dumazet tcp_remove_empty_skb(sk);
1322fdfc5c85SEric Dumazet
1323cf60af03SYuchung Cheng if (copied + copied_syn)
13241da177e4SLinus Torvalds goto out;
13251da177e4SLinus Torvalds out_err:
1326a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
1327a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf)
13288e044917SJonathan Lemon net_zcopy_put_abort(uarg, true);
13291da177e4SLinus Torvalds err = sk_stream_error(sk, flags, err);
1330ce5ec440SJason Baron /* make sure we wake any epoll edge trigger waiter */
1331216808c6SEric Dumazet if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
1332ce5ec440SJason Baron sk->sk_write_space(sk);
1333b0f71bd3SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1334b0f71bd3SFrancis Yan }
13351da177e4SLinus Torvalds return err;
13361da177e4SLinus Torvalds }
1337774c4673SDavid S. Miller EXPORT_SYMBOL_GPL(tcp_sendmsg_locked);
1338306b13ebSTom Herbert
tcp_sendmsg(struct sock * sk,struct msghdr * msg,size_t size)1339306b13ebSTom Herbert int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1340306b13ebSTom Herbert {
1341306b13ebSTom Herbert int ret;
1342306b13ebSTom Herbert
1343306b13ebSTom Herbert lock_sock(sk);
1344306b13ebSTom Herbert ret = tcp_sendmsg_locked(sk, msg, size);
1345306b13ebSTom Herbert release_sock(sk);
1346306b13ebSTom Herbert
1347306b13ebSTom Herbert return ret;
1348306b13ebSTom Herbert }
13494bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sendmsg);
13501da177e4SLinus Torvalds
tcp_splice_eof(struct socket * sock)13511d7e4538SDavid Howells void tcp_splice_eof(struct socket *sock)
13521d7e4538SDavid Howells {
13531d7e4538SDavid Howells struct sock *sk = sock->sk;
13541d7e4538SDavid Howells struct tcp_sock *tp = tcp_sk(sk);
13551d7e4538SDavid Howells int mss_now, size_goal;
13561d7e4538SDavid Howells
13571d7e4538SDavid Howells if (!tcp_write_queue_tail(sk))
13581d7e4538SDavid Howells return;
13591d7e4538SDavid Howells
13601d7e4538SDavid Howells lock_sock(sk);
13611d7e4538SDavid Howells mss_now = tcp_send_mss(sk, &size_goal, 0);
13621d7e4538SDavid Howells tcp_push(sk, 0, mss_now, tp->nonagle, size_goal);
13631d7e4538SDavid Howells release_sock(sk);
13641d7e4538SDavid Howells }
13651d7e4538SDavid Howells EXPORT_SYMBOL_GPL(tcp_splice_eof);
13661d7e4538SDavid Howells
13671da177e4SLinus Torvalds /*
13681da177e4SLinus Torvalds * Handle reading urgent data. BSD has very simple semantics for
13691da177e4SLinus Torvalds * this, no blocking and very strange errors 8)
13701da177e4SLinus Torvalds */
13711da177e4SLinus Torvalds
tcp_recv_urg(struct sock * sk,struct msghdr * msg,int len,int flags)1372377f0a08SRami Rosen static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
13731da177e4SLinus Torvalds {
13741da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
13751da177e4SLinus Torvalds
13761da177e4SLinus Torvalds /* No URG data to read. */
13771da177e4SLinus Torvalds if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
13781da177e4SLinus Torvalds tp->urg_data == TCP_URG_READ)
13791da177e4SLinus Torvalds return -EINVAL; /* Yes this is right ! */
13801da177e4SLinus Torvalds
13811da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
13821da177e4SLinus Torvalds return -ENOTCONN;
13831da177e4SLinus Torvalds
13841da177e4SLinus Torvalds if (tp->urg_data & TCP_URG_VALID) {
13851da177e4SLinus Torvalds int err = 0;
13861da177e4SLinus Torvalds char c = tp->urg_data;
13871da177e4SLinus Torvalds
13881da177e4SLinus Torvalds if (!(flags & MSG_PEEK))
13897b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, TCP_URG_READ);
13901da177e4SLinus Torvalds
13911da177e4SLinus Torvalds /* Read urgent data. */
13921da177e4SLinus Torvalds msg->msg_flags |= MSG_OOB;
13931da177e4SLinus Torvalds
13941da177e4SLinus Torvalds if (len > 0) {
13951da177e4SLinus Torvalds if (!(flags & MSG_TRUNC))
13967eab8d9eSAl Viro err = memcpy_to_msg(msg, &c, 1);
13971da177e4SLinus Torvalds len = 1;
13981da177e4SLinus Torvalds } else
13991da177e4SLinus Torvalds msg->msg_flags |= MSG_TRUNC;
14001da177e4SLinus Torvalds
14011da177e4SLinus Torvalds return err ? -EFAULT : len;
14021da177e4SLinus Torvalds }
14031da177e4SLinus Torvalds
14041da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
14051da177e4SLinus Torvalds return 0;
14061da177e4SLinus Torvalds
14071da177e4SLinus Torvalds /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
14081da177e4SLinus Torvalds * the available implementations agree in this case:
14091da177e4SLinus Torvalds * this call should never block, independent of the
14101da177e4SLinus Torvalds * blocking state of the socket.
14111da177e4SLinus Torvalds * Mike <pall@rz.uni-karlsruhe.de>
14121da177e4SLinus Torvalds */
14131da177e4SLinus Torvalds return -EAGAIN;
14141da177e4SLinus Torvalds }
14151da177e4SLinus Torvalds
tcp_peek_sndq(struct sock * sk,struct msghdr * msg,int len)1416c0e88ff0SPavel Emelyanov static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1417c0e88ff0SPavel Emelyanov {
1418c0e88ff0SPavel Emelyanov struct sk_buff *skb;
1419c0e88ff0SPavel Emelyanov int copied = 0, err = 0;
1420c0e88ff0SPavel Emelyanov
1421c0e88ff0SPavel Emelyanov /* XXX -- need to support SO_PEEK_OFF */
1422c0e88ff0SPavel Emelyanov
142375c119afSEric Dumazet skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
142475c119afSEric Dumazet err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
142575c119afSEric Dumazet if (err)
142675c119afSEric Dumazet return err;
142775c119afSEric Dumazet copied += skb->len;
142875c119afSEric Dumazet }
142975c119afSEric Dumazet
1430c0e88ff0SPavel Emelyanov skb_queue_walk(&sk->sk_write_queue, skb) {
143151f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1432c0e88ff0SPavel Emelyanov if (err)
1433c0e88ff0SPavel Emelyanov break;
1434c0e88ff0SPavel Emelyanov
1435c0e88ff0SPavel Emelyanov copied += skb->len;
1436c0e88ff0SPavel Emelyanov }
1437c0e88ff0SPavel Emelyanov
1438c0e88ff0SPavel Emelyanov return err ?: copied;
1439c0e88ff0SPavel Emelyanov }
1440c0e88ff0SPavel Emelyanov
14411da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user,
14421da177e4SLinus Torvalds * then send an ACK if necessary. COPIED is the number of bytes
14431da177e4SLinus Torvalds * tcp_recvmsg has given to the user so far, it speeds up the
14441da177e4SLinus Torvalds * calculation of whether or not we must ACK for the sake of
14451da177e4SLinus Torvalds * a window update.
14461da177e4SLinus Torvalds */
__tcp_cleanup_rbuf(struct sock * sk,int copied)1447e5c6de5fSJohn Fastabend void __tcp_cleanup_rbuf(struct sock *sk, int copied)
14481da177e4SLinus Torvalds {
14491da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
1450a2a385d6SEric Dumazet bool time_to_ack = false;
14511da177e4SLinus Torvalds
1452463c84b9SArnaldo Carvalho de Melo if (inet_csk_ack_scheduled(sk)) {
1453463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk);
1454b6b6d653SEric Dumazet
1455b6b6d653SEric Dumazet if (/* Once-per-two-segments ACK was not sent by tcp_input.c */
1456463c84b9SArnaldo Carvalho de Melo tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
14571da177e4SLinus Torvalds /*
14581da177e4SLinus Torvalds * If this read emptied read buffer, we send ACK, if
14591da177e4SLinus Torvalds * connection is not bidirectional, user drained
14601da177e4SLinus Torvalds * receive buffer and there was a small segment
14611da177e4SLinus Torvalds * in queue.
14621da177e4SLinus Torvalds */
14631ef9696cSAlexey Kuznetsov (copied > 0 &&
14641ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
14651ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
146631954cd8SWei Wang !inet_csk_in_pingpong_mode(sk))) &&
14671ef9696cSAlexey Kuznetsov !atomic_read(&sk->sk_rmem_alloc)))
1468a2a385d6SEric Dumazet time_to_ack = true;
14691da177e4SLinus Torvalds }
14701da177e4SLinus Torvalds
14711da177e4SLinus Torvalds /* We send an ACK if we can now advertise a non-zero window
14721da177e4SLinus Torvalds * which has been raised "significantly".
14731da177e4SLinus Torvalds *
14741da177e4SLinus Torvalds * Even if window raised up to infinity, do not send window open ACK
14751da177e4SLinus Torvalds * in states, where we will not receive more. It is useless.
14761da177e4SLinus Torvalds */
14771da177e4SLinus Torvalds if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
14781da177e4SLinus Torvalds __u32 rcv_window_now = tcp_receive_window(tp);
14791da177e4SLinus Torvalds
14801da177e4SLinus Torvalds /* Optimize, __tcp_select_window() is not cheap. */
14811da177e4SLinus Torvalds if (2*rcv_window_now <= tp->window_clamp) {
14821da177e4SLinus Torvalds __u32 new_window = __tcp_select_window(sk);
14831da177e4SLinus Torvalds
14841da177e4SLinus Torvalds /* Send ACK now, if this read freed lots of space
14851da177e4SLinus Torvalds * in our buffer. Certainly, new_window is new window.
14861da177e4SLinus Torvalds * We can advertise it now, if it is not less than current one.
14871da177e4SLinus Torvalds * "Lots" means "at least twice" here.
14881da177e4SLinus Torvalds */
14891da177e4SLinus Torvalds if (new_window && new_window >= 2 * rcv_window_now)
1490a2a385d6SEric Dumazet time_to_ack = true;
14911da177e4SLinus Torvalds }
14921da177e4SLinus Torvalds }
14931da177e4SLinus Torvalds if (time_to_ack)
14941da177e4SLinus Torvalds tcp_send_ack(sk);
14951da177e4SLinus Torvalds }
14961da177e4SLinus Torvalds
tcp_cleanup_rbuf(struct sock * sk,int copied)1497c457985aSCong Wang void tcp_cleanup_rbuf(struct sock *sk, int copied)
1498c457985aSCong Wang {
1499c457985aSCong Wang struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1500c457985aSCong Wang struct tcp_sock *tp = tcp_sk(sk);
1501c457985aSCong Wang
1502c457985aSCong Wang WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1503c457985aSCong Wang "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1504c457985aSCong Wang tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1505c457985aSCong Wang __tcp_cleanup_rbuf(sk, copied);
1506c457985aSCong Wang }
1507c457985aSCong Wang
tcp_eat_recv_skb(struct sock * sk,struct sk_buff * skb)15083df684c1SEric Dumazet static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
15093df684c1SEric Dumazet {
1510f35f8219SEric Dumazet __skb_unlink(skb, &sk->sk_receive_queue);
15113df684c1SEric Dumazet if (likely(skb->destructor == sock_rfree)) {
15123df684c1SEric Dumazet sock_rfree(skb);
15133df684c1SEric Dumazet skb->destructor = NULL;
15143df684c1SEric Dumazet skb->sk = NULL;
151568822bdfSEric Dumazet return skb_attempt_defer_free(skb);
1516f35f8219SEric Dumazet }
1517f35f8219SEric Dumazet __kfree_skb(skb);
15183df684c1SEric Dumazet }
15193df684c1SEric Dumazet
tcp_recv_skb(struct sock * sk,u32 seq,u32 * off)15203f92a64eSJakub Kicinski struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
15211da177e4SLinus Torvalds {
15221da177e4SLinus Torvalds struct sk_buff *skb;
15231da177e4SLinus Torvalds u32 offset;
15241da177e4SLinus Torvalds
1525f26845b4SEric Dumazet while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
15261da177e4SLinus Torvalds offset = seq - TCP_SKB_CB(skb)->seq;
15279d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
15289d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__);
15291da177e4SLinus Torvalds offset--;
15309d691539SEric Dumazet }
1531e11ecddfSEric Dumazet if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
15321da177e4SLinus Torvalds *off = offset;
15331da177e4SLinus Torvalds return skb;
15341da177e4SLinus Torvalds }
1535f26845b4SEric Dumazet /* This looks weird, but this can happen if TCP collapsing
1536f26845b4SEric Dumazet * splitted a fat GRO packet, while we released socket lock
1537f26845b4SEric Dumazet * in skb_splice_bits()
1538f26845b4SEric Dumazet */
15393df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb);
15401da177e4SLinus Torvalds }
15411da177e4SLinus Torvalds return NULL;
15421da177e4SLinus Torvalds }
15433f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_recv_skb);
15441da177e4SLinus Torvalds
15451da177e4SLinus Torvalds /*
15461da177e4SLinus Torvalds * This routine provides an alternative to tcp_recvmsg() for routines
15471da177e4SLinus Torvalds * that would like to handle copying from skbuffs directly in 'sendfile'
15481da177e4SLinus Torvalds * fashion.
15491da177e4SLinus Torvalds * Note:
15501da177e4SLinus Torvalds * - It is assumed that the socket was locked by the caller.
15511da177e4SLinus Torvalds * - The routine does not block.
15521da177e4SLinus Torvalds * - At present, there is no support for reading OOB data
15531da177e4SLinus Torvalds * or for 'peeking' the socket using this routine
15541da177e4SLinus Torvalds * (although both would be easy to implement).
15551da177e4SLinus Torvalds */
__tcp_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t recv_actor,bool noack,u32 * copied_seq)1556*05a571eeSJiayuan Chen static int __tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1557*05a571eeSJiayuan Chen sk_read_actor_t recv_actor, bool noack,
1558*05a571eeSJiayuan Chen u32 *copied_seq)
15591da177e4SLinus Torvalds {
15601da177e4SLinus Torvalds struct sk_buff *skb;
15611da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
1562*05a571eeSJiayuan Chen u32 seq = *copied_seq;
15631da177e4SLinus Torvalds u32 offset;
15641da177e4SLinus Torvalds int copied = 0;
15651da177e4SLinus Torvalds
15661da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN)
15671da177e4SLinus Torvalds return -ENOTCONN;
15681da177e4SLinus Torvalds while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
15691da177e4SLinus Torvalds if (offset < skb->len) {
1570374e7b59SOctavian Purdila int used;
1571374e7b59SOctavian Purdila size_t len;
15721da177e4SLinus Torvalds
15731da177e4SLinus Torvalds len = skb->len - offset;
15741da177e4SLinus Torvalds /* Stop reading if we hit a patch of urgent data */
1575b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) {
15761da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - seq;
15771da177e4SLinus Torvalds if (urg_offset < len)
15781da177e4SLinus Torvalds len = urg_offset;
15791da177e4SLinus Torvalds if (!len)
15801da177e4SLinus Torvalds break;
15811da177e4SLinus Torvalds }
15821da177e4SLinus Torvalds used = recv_actor(desc, skb, offset, len);
1583ff905b1eSEric Dumazet if (used <= 0) {
1584ddb61a57SJens Axboe if (!copied)
1585ddb61a57SJens Axboe copied = used;
1586ddb61a57SJens Axboe break;
1587e3d5ea2cSEric Dumazet }
1588e3d5ea2cSEric Dumazet if (WARN_ON_ONCE(used > len))
1589e3d5ea2cSEric Dumazet used = len;
15901da177e4SLinus Torvalds seq += used;
15911da177e4SLinus Torvalds copied += used;
15921da177e4SLinus Torvalds offset += used;
1593e3d5ea2cSEric Dumazet
159402275a2eSWilly Tarreau /* If recv_actor drops the lock (e.g. TCP splice
1595293ad604SOctavian Purdila * receive) the skb pointer might be invalid when
1596293ad604SOctavian Purdila * getting here: tcp_collapse might have deleted it
1597293ad604SOctavian Purdila * while aggregating skbs from the socket queue.
1598293ad604SOctavian Purdila */
1599293ad604SOctavian Purdila skb = tcp_recv_skb(sk, seq - 1, &offset);
160002275a2eSWilly Tarreau if (!skb)
16011da177e4SLinus Torvalds break;
160202275a2eSWilly Tarreau /* TCP coalescing might have appended data to the skb.
160302275a2eSWilly Tarreau * Try to splice more frags
160402275a2eSWilly Tarreau */
160502275a2eSWilly Tarreau if (offset + 1 != skb->len)
160602275a2eSWilly Tarreau continue;
16071da177e4SLinus Torvalds }
1608e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
16093df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb);
16101da177e4SLinus Torvalds ++seq;
16111da177e4SLinus Torvalds break;
16121da177e4SLinus Torvalds }
16133df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb);
16141da177e4SLinus Torvalds if (!desc->count)
16151da177e4SLinus Torvalds break;
1616*05a571eeSJiayuan Chen WRITE_ONCE(*copied_seq, seq);
16171da177e4SLinus Torvalds }
1618*05a571eeSJiayuan Chen WRITE_ONCE(*copied_seq, seq);
1619*05a571eeSJiayuan Chen
1620*05a571eeSJiayuan Chen if (noack)
1621*05a571eeSJiayuan Chen goto out;
16221da177e4SLinus Torvalds
16231da177e4SLinus Torvalds tcp_rcv_space_adjust(sk);
16241da177e4SLinus Torvalds
16251da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */
1626f26845b4SEric Dumazet if (copied > 0) {
1627f26845b4SEric Dumazet tcp_recv_skb(sk, seq, &offset);
16280e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied);
1629f26845b4SEric Dumazet }
1630*05a571eeSJiayuan Chen out:
16311da177e4SLinus Torvalds return copied;
16321da177e4SLinus Torvalds }
1633*05a571eeSJiayuan Chen
tcp_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t recv_actor)1634*05a571eeSJiayuan Chen int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1635*05a571eeSJiayuan Chen sk_read_actor_t recv_actor)
1636*05a571eeSJiayuan Chen {
1637*05a571eeSJiayuan Chen return __tcp_read_sock(sk, desc, recv_actor, false,
1638*05a571eeSJiayuan Chen &tcp_sk(sk)->copied_seq);
1639*05a571eeSJiayuan Chen }
16404bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_read_sock);
16411da177e4SLinus Torvalds
tcp_read_sock_noack(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t recv_actor,bool noack,u32 * copied_seq)1642*05a571eeSJiayuan Chen int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
1643*05a571eeSJiayuan Chen sk_read_actor_t recv_actor, bool noack,
1644*05a571eeSJiayuan Chen u32 *copied_seq)
1645*05a571eeSJiayuan Chen {
1646*05a571eeSJiayuan Chen return __tcp_read_sock(sk, desc, recv_actor, noack, copied_seq);
1647*05a571eeSJiayuan Chen }
1648*05a571eeSJiayuan Chen
tcp_read_skb(struct sock * sk,skb_read_actor_t recv_actor)1649965b57b4SCong Wang int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
165004919bedSCong Wang {
165104919bedSCong Wang struct sk_buff *skb;
165204919bedSCong Wang int copied = 0;
165304919bedSCong Wang
165404919bedSCong Wang if (sk->sk_state == TCP_LISTEN)
165504919bedSCong Wang return -ENOTCONN;
165604919bedSCong Wang
16579b7177b1SJohn Fastabend while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1658db4192a7SCong Wang u8 tcp_flags;
1659db4192a7SCong Wang int used;
166004919bedSCong Wang
166104919bedSCong Wang __skb_unlink(skb, &sk->sk_receive_queue);
166296628951SPeilin Ye WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
1663db4192a7SCong Wang tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
1664db4192a7SCong Wang used = recv_actor(sk, skb);
1665db4192a7SCong Wang if (used < 0) {
1666db4192a7SCong Wang if (!copied)
1667db4192a7SCong Wang copied = used;
1668db4192a7SCong Wang break;
1669db4192a7SCong Wang }
1670db4192a7SCong Wang copied += used;
1671db4192a7SCong Wang
16729b7177b1SJohn Fastabend if (tcp_flags & TCPHDR_FIN)
1673db4192a7SCong Wang break;
1674db4192a7SCong Wang }
167504919bedSCong Wang return copied;
167604919bedSCong Wang }
167704919bedSCong Wang EXPORT_SYMBOL(tcp_read_skb);
167804919bedSCong Wang
tcp_read_done(struct sock * sk,size_t len)16793f92a64eSJakub Kicinski void tcp_read_done(struct sock *sk, size_t len)
16803f92a64eSJakub Kicinski {
16813f92a64eSJakub Kicinski struct tcp_sock *tp = tcp_sk(sk);
16823f92a64eSJakub Kicinski u32 seq = tp->copied_seq;
16833f92a64eSJakub Kicinski struct sk_buff *skb;
16843f92a64eSJakub Kicinski size_t left;
16853f92a64eSJakub Kicinski u32 offset;
16863f92a64eSJakub Kicinski
16873f92a64eSJakub Kicinski if (sk->sk_state == TCP_LISTEN)
16883f92a64eSJakub Kicinski return;
16893f92a64eSJakub Kicinski
16903f92a64eSJakub Kicinski left = len;
16913f92a64eSJakub Kicinski while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
16923f92a64eSJakub Kicinski int used;
16933f92a64eSJakub Kicinski
16943f92a64eSJakub Kicinski used = min_t(size_t, skb->len - offset, left);
16953f92a64eSJakub Kicinski seq += used;
16963f92a64eSJakub Kicinski left -= used;
16973f92a64eSJakub Kicinski
16983f92a64eSJakub Kicinski if (skb->len > offset + used)
16993f92a64eSJakub Kicinski break;
17003f92a64eSJakub Kicinski
17013f92a64eSJakub Kicinski if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
17023f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb);
17033f92a64eSJakub Kicinski ++seq;
17043f92a64eSJakub Kicinski break;
17053f92a64eSJakub Kicinski }
17063f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb);
17073f92a64eSJakub Kicinski }
17083f92a64eSJakub Kicinski WRITE_ONCE(tp->copied_seq, seq);
17093f92a64eSJakub Kicinski
17103f92a64eSJakub Kicinski tcp_rcv_space_adjust(sk);
17113f92a64eSJakub Kicinski
17123f92a64eSJakub Kicinski /* Clean up data we have read: This will do ACK frames. */
17133f92a64eSJakub Kicinski if (left != len)
17143f92a64eSJakub Kicinski tcp_cleanup_rbuf(sk, len - left);
17153f92a64eSJakub Kicinski }
17163f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_read_done);
17173f92a64eSJakub Kicinski
tcp_peek_len(struct socket * sock)171832035585STom Herbert int tcp_peek_len(struct socket *sock)
171932035585STom Herbert {
172032035585STom Herbert return tcp_inq(sock->sk);
172132035585STom Herbert }
172232035585STom Herbert EXPORT_SYMBOL(tcp_peek_len);
172332035585STom Herbert
1724d1361840SEric Dumazet /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
tcp_set_rcvlowat(struct sock * sk,int val)1725d1361840SEric Dumazet int tcp_set_rcvlowat(struct sock *sk, int val)
1726d1361840SEric Dumazet {
1727dfa2f048SEric Dumazet int space, cap;
1728867f816bSSoheil Hassas Yeganeh
1729867f816bSSoheil Hassas Yeganeh if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1730867f816bSSoheil Hassas Yeganeh cap = sk->sk_rcvbuf >> 1;
1731867f816bSSoheil Hassas Yeganeh else
173202739545SKuniyuki Iwashima cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
1733867f816bSSoheil Hassas Yeganeh val = min(val, cap);
1734eac66402SEric Dumazet WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
173503f45c88SEric Dumazet
173603f45c88SEric Dumazet /* Check if we need to signal EPOLLIN right now */
173703f45c88SEric Dumazet tcp_data_ready(sk);
173803f45c88SEric Dumazet
1739d1361840SEric Dumazet if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1740d1361840SEric Dumazet return 0;
1741d1361840SEric Dumazet
1742dfa2f048SEric Dumazet space = tcp_space_from_win(sk, val);
1743dfa2f048SEric Dumazet if (space > sk->sk_rcvbuf) {
1744dfa2f048SEric Dumazet WRITE_ONCE(sk->sk_rcvbuf, space);
1745f9fef23aSEric Dumazet WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
1746d1361840SEric Dumazet }
1747d1361840SEric Dumazet return 0;
1748d1361840SEric Dumazet }
1749d1361840SEric Dumazet EXPORT_SYMBOL(tcp_set_rcvlowat);
1750d1361840SEric Dumazet
tcp_update_recv_tstamps(struct sk_buff * skb,struct scm_timestamping_internal * tss)1751892bfd3dSFlorian Westphal void tcp_update_recv_tstamps(struct sk_buff *skb,
17527eeba170SArjun Roy struct scm_timestamping_internal *tss)
17537eeba170SArjun Roy {
17547eeba170SArjun Roy if (skb->tstamp)
17557eeba170SArjun Roy tss->ts[0] = ktime_to_timespec64(skb->tstamp);
17567eeba170SArjun Roy else
17577eeba170SArjun Roy tss->ts[0] = (struct timespec64) {0};
17587eeba170SArjun Roy
17597eeba170SArjun Roy if (skb_hwtstamps(skb)->hwtstamp)
17607eeba170SArjun Roy tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp);
17617eeba170SArjun Roy else
17627eeba170SArjun Roy tss->ts[2] = (struct timespec64) {0};
17637eeba170SArjun Roy }
17647eeba170SArjun Roy
176505255b82SEric Dumazet #ifdef CONFIG_MMU
1766350f6bbcSMatthew Wilcox (Oracle) static const struct vm_operations_struct tcp_vm_ops = {
176705255b82SEric Dumazet };
176805255b82SEric Dumazet
tcp_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)176993ab6cc6SEric Dumazet int tcp_mmap(struct file *file, struct socket *sock,
177093ab6cc6SEric Dumazet struct vm_area_struct *vma)
177193ab6cc6SEric Dumazet {
177205255b82SEric Dumazet if (vma->vm_flags & (VM_WRITE | VM_EXEC))
177305255b82SEric Dumazet return -EPERM;
17741c71222eSSuren Baghdasaryan vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC);
177505255b82SEric Dumazet
17763e4e28c5SMichel Lespinasse /* Instruct vm_insert_page() to not mmap_read_lock(mm) */
17771c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_MIXEDMAP);
177805255b82SEric Dumazet
177905255b82SEric Dumazet vma->vm_ops = &tcp_vm_ops;
178005255b82SEric Dumazet return 0;
178105255b82SEric Dumazet }
178205255b82SEric Dumazet EXPORT_SYMBOL(tcp_mmap);
178305255b82SEric Dumazet
skb_advance_to_frag(struct sk_buff * skb,u32 offset_skb,u32 * offset_frag)17847fba5309SArjun Roy static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
17857fba5309SArjun Roy u32 *offset_frag)
17867fba5309SArjun Roy {
17877fba5309SArjun Roy skb_frag_t *frag;
17887fba5309SArjun Roy
178970701b83SArjun Roy if (unlikely(offset_skb >= skb->len))
179070701b83SArjun Roy return NULL;
179170701b83SArjun Roy
17927fba5309SArjun Roy offset_skb -= skb_headlen(skb);
17937fba5309SArjun Roy if ((int)offset_skb < 0 || skb_has_frag_list(skb))
17947fba5309SArjun Roy return NULL;
17957fba5309SArjun Roy
17967fba5309SArjun Roy frag = skb_shinfo(skb)->frags;
17977fba5309SArjun Roy while (offset_skb) {
17987fba5309SArjun Roy if (skb_frag_size(frag) > offset_skb) {
17997fba5309SArjun Roy *offset_frag = offset_skb;
18007fba5309SArjun Roy return frag;
18017fba5309SArjun Roy }
18027fba5309SArjun Roy offset_skb -= skb_frag_size(frag);
18037fba5309SArjun Roy ++frag;
18047fba5309SArjun Roy }
18057fba5309SArjun Roy *offset_frag = 0;
18067fba5309SArjun Roy return frag;
18077fba5309SArjun Roy }
18087fba5309SArjun Roy
can_map_frag(const skb_frag_t * frag)180998917cf0SArjun Roy static bool can_map_frag(const skb_frag_t *frag)
181098917cf0SArjun Roy {
1811d15cc0f6SEric Dumazet struct page *page;
1812d15cc0f6SEric Dumazet
1813d15cc0f6SEric Dumazet if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag))
1814d15cc0f6SEric Dumazet return false;
1815d15cc0f6SEric Dumazet
1816d15cc0f6SEric Dumazet page = skb_frag_page(frag);
1817d15cc0f6SEric Dumazet
1818d15cc0f6SEric Dumazet if (PageCompound(page) || page->mapping)
1819d15cc0f6SEric Dumazet return false;
1820d15cc0f6SEric Dumazet
1821d15cc0f6SEric Dumazet return true;
182298917cf0SArjun Roy }
182398917cf0SArjun Roy
find_next_mappable_frag(const skb_frag_t * frag,int remaining_in_skb)182498917cf0SArjun Roy static int find_next_mappable_frag(const skb_frag_t *frag,
182598917cf0SArjun Roy int remaining_in_skb)
182698917cf0SArjun Roy {
182798917cf0SArjun Roy int offset = 0;
182898917cf0SArjun Roy
182998917cf0SArjun Roy if (likely(can_map_frag(frag)))
183098917cf0SArjun Roy return 0;
183198917cf0SArjun Roy
183298917cf0SArjun Roy while (offset < remaining_in_skb && !can_map_frag(frag)) {
183398917cf0SArjun Roy offset += skb_frag_size(frag);
183498917cf0SArjun Roy ++frag;
183598917cf0SArjun Roy }
183698917cf0SArjun Roy return offset;
183798917cf0SArjun Roy }
183898917cf0SArjun Roy
tcp_zerocopy_set_hint_for_skb(struct sock * sk,struct tcp_zerocopy_receive * zc,struct sk_buff * skb,u32 offset)18390c3936d3SArjun Roy static void tcp_zerocopy_set_hint_for_skb(struct sock *sk,
18400c3936d3SArjun Roy struct tcp_zerocopy_receive *zc,
18410c3936d3SArjun Roy struct sk_buff *skb, u32 offset)
18420c3936d3SArjun Roy {
18430c3936d3SArjun Roy u32 frag_offset, partial_frag_remainder = 0;
18440c3936d3SArjun Roy int mappable_offset;
18450c3936d3SArjun Roy skb_frag_t *frag;
18460c3936d3SArjun Roy
18470c3936d3SArjun Roy /* worst case: skip to next skb. try to improve on this case below */
18480c3936d3SArjun Roy zc->recv_skip_hint = skb->len - offset;
18490c3936d3SArjun Roy
18500c3936d3SArjun Roy /* Find the frag containing this offset (and how far into that frag) */
18510c3936d3SArjun Roy frag = skb_advance_to_frag(skb, offset, &frag_offset);
18520c3936d3SArjun Roy if (!frag)
18530c3936d3SArjun Roy return;
18540c3936d3SArjun Roy
18550c3936d3SArjun Roy if (frag_offset) {
18560c3936d3SArjun Roy struct skb_shared_info *info = skb_shinfo(skb);
18570c3936d3SArjun Roy
18580c3936d3SArjun Roy /* We read part of the last frag, must recvmsg() rest of skb. */
18590c3936d3SArjun Roy if (frag == &info->frags[info->nr_frags - 1])
18600c3936d3SArjun Roy return;
18610c3936d3SArjun Roy
18620c3936d3SArjun Roy /* Else, we must at least read the remainder in this frag. */
18630c3936d3SArjun Roy partial_frag_remainder = skb_frag_size(frag) - frag_offset;
18640c3936d3SArjun Roy zc->recv_skip_hint -= partial_frag_remainder;
18650c3936d3SArjun Roy ++frag;
18660c3936d3SArjun Roy }
18670c3936d3SArjun Roy
18680c3936d3SArjun Roy /* partial_frag_remainder: If part way through a frag, must read rest.
18690c3936d3SArjun Roy * mappable_offset: Bytes till next mappable frag, *not* counting bytes
18700c3936d3SArjun Roy * in partial_frag_remainder.
18710c3936d3SArjun Roy */
18720c3936d3SArjun Roy mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint);
18730c3936d3SArjun Roy zc->recv_skip_hint = mappable_offset + partial_frag_remainder;
18740c3936d3SArjun Roy }
18750c3936d3SArjun Roy
1876f21a3c48SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
1877ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss,
1878f21a3c48SArjun Roy int *cmsg_flags);
receive_fallback_to_copy(struct sock * sk,struct tcp_zerocopy_receive * zc,int inq,struct scm_timestamping_internal * tss)1879f21a3c48SArjun Roy static int receive_fallback_to_copy(struct sock *sk,
18807eeba170SArjun Roy struct tcp_zerocopy_receive *zc, int inq,
18817eeba170SArjun Roy struct scm_timestamping_internal *tss)
1882f21a3c48SArjun Roy {
1883f21a3c48SArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address;
1884f21a3c48SArjun Roy struct msghdr msg = {};
1885f21a3c48SArjun Roy struct iovec iov;
18867eeba170SArjun Roy int err;
1887f21a3c48SArjun Roy
1888f21a3c48SArjun Roy zc->length = 0;
1889f21a3c48SArjun Roy zc->recv_skip_hint = 0;
1890f21a3c48SArjun Roy
1891f21a3c48SArjun Roy if (copy_address != zc->copybuf_address)
1892f21a3c48SArjun Roy return -EINVAL;
1893f21a3c48SArjun Roy
1894de4eda9dSAl Viro err = import_single_range(ITER_DEST, (void __user *)copy_address,
1895f21a3c48SArjun Roy inq, &iov, &msg.msg_iter);
1896f21a3c48SArjun Roy if (err)
1897f21a3c48SArjun Roy return err;
1898f21a3c48SArjun Roy
1899ec095263SOliver Hartkopp err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT,
19007eeba170SArjun Roy tss, &zc->msg_flags);
1901f21a3c48SArjun Roy if (err < 0)
1902f21a3c48SArjun Roy return err;
1903f21a3c48SArjun Roy
1904f21a3c48SArjun Roy zc->copybuf_len = err;
19050c3936d3SArjun Roy if (likely(zc->copybuf_len)) {
19060c3936d3SArjun Roy struct sk_buff *skb;
19070c3936d3SArjun Roy u32 offset;
19080c3936d3SArjun Roy
19090c3936d3SArjun Roy skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset);
19100c3936d3SArjun Roy if (skb)
19110c3936d3SArjun Roy tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset);
19120c3936d3SArjun Roy }
1913f21a3c48SArjun Roy return 0;
1914f21a3c48SArjun Roy }
1915f21a3c48SArjun Roy
tcp_copy_straggler_data(struct tcp_zerocopy_receive * zc,struct sk_buff * skb,u32 copylen,u32 * offset,u32 * seq)191618fb76edSArjun Roy static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc,
191718fb76edSArjun Roy struct sk_buff *skb, u32 copylen,
191818fb76edSArjun Roy u32 *offset, u32 *seq)
191918fb76edSArjun Roy {
192018fb76edSArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address;
192118fb76edSArjun Roy struct msghdr msg = {};
192218fb76edSArjun Roy struct iovec iov;
192318fb76edSArjun Roy int err;
192418fb76edSArjun Roy
192518fb76edSArjun Roy if (copy_address != zc->copybuf_address)
192618fb76edSArjun Roy return -EINVAL;
192718fb76edSArjun Roy
1928de4eda9dSAl Viro err = import_single_range(ITER_DEST, (void __user *)copy_address,
192918fb76edSArjun Roy copylen, &iov, &msg.msg_iter);
193018fb76edSArjun Roy if (err)
193118fb76edSArjun Roy return err;
193218fb76edSArjun Roy err = skb_copy_datagram_msg(skb, *offset, &msg, copylen);
193318fb76edSArjun Roy if (err)
193418fb76edSArjun Roy return err;
193518fb76edSArjun Roy zc->recv_skip_hint -= copylen;
193618fb76edSArjun Roy *offset += copylen;
193718fb76edSArjun Roy *seq += copylen;
193818fb76edSArjun Roy return (__s32)copylen;
193918fb76edSArjun Roy }
194018fb76edSArjun Roy
tcp_zc_handle_leftover(struct tcp_zerocopy_receive * zc,struct sock * sk,struct sk_buff * skb,u32 * seq,s32 copybuf_len,struct scm_timestamping_internal * tss)19417eeba170SArjun Roy static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc,
194218fb76edSArjun Roy struct sock *sk,
194318fb76edSArjun Roy struct sk_buff *skb,
194418fb76edSArjun Roy u32 *seq,
19457eeba170SArjun Roy s32 copybuf_len,
19467eeba170SArjun Roy struct scm_timestamping_internal *tss)
194718fb76edSArjun Roy {
194818fb76edSArjun Roy u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint);
194918fb76edSArjun Roy
195018fb76edSArjun Roy if (!copylen)
195118fb76edSArjun Roy return 0;
195218fb76edSArjun Roy /* skb is null if inq < PAGE_SIZE. */
19537eeba170SArjun Roy if (skb) {
195418fb76edSArjun Roy offset = *seq - TCP_SKB_CB(skb)->seq;
19557eeba170SArjun Roy } else {
195618fb76edSArjun Roy skb = tcp_recv_skb(sk, *seq, &offset);
19577eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) {
19587eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss);
19597eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS;
19607eeba170SArjun Roy }
19617eeba170SArjun Roy }
196218fb76edSArjun Roy
196318fb76edSArjun Roy zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset,
196418fb76edSArjun Roy seq);
196518fb76edSArjun Roy return zc->copybuf_len < 0 ? 0 : copylen;
196618fb76edSArjun Roy }
196718fb76edSArjun Roy
tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct * vma,struct page ** pending_pages,unsigned long pages_remaining,unsigned long * address,u32 * length,u32 * seq,struct tcp_zerocopy_receive * zc,u32 total_bytes_to_map,int err)196894ab9eb9SArjun Roy static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,
196994ab9eb9SArjun Roy struct page **pending_pages,
197094ab9eb9SArjun Roy unsigned long pages_remaining,
197194ab9eb9SArjun Roy unsigned long *address,
197294ab9eb9SArjun Roy u32 *length,
197394ab9eb9SArjun Roy u32 *seq,
197494ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc,
197594ab9eb9SArjun Roy u32 total_bytes_to_map,
197694ab9eb9SArjun Roy int err)
197794ab9eb9SArjun Roy {
197894ab9eb9SArjun Roy /* At least one page did not map. Try zapping if we skipped earlier. */
197994ab9eb9SArjun Roy if (err == -EBUSY &&
198094ab9eb9SArjun Roy zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) {
198194ab9eb9SArjun Roy u32 maybe_zap_len;
198294ab9eb9SArjun Roy
198394ab9eb9SArjun Roy maybe_zap_len = total_bytes_to_map - /* All bytes to map */
198494ab9eb9SArjun Roy *length + /* Mapped or pending */
198594ab9eb9SArjun Roy (pages_remaining * PAGE_SIZE); /* Failed map. */
1986e9adcfecSMike Kravetz zap_page_range_single(vma, *address, maybe_zap_len, NULL);
198794ab9eb9SArjun Roy err = 0;
198894ab9eb9SArjun Roy }
198994ab9eb9SArjun Roy
199094ab9eb9SArjun Roy if (!err) {
199194ab9eb9SArjun Roy unsigned long leftover_pages = pages_remaining;
199294ab9eb9SArjun Roy int bytes_mapped;
199394ab9eb9SArjun Roy
1994e9adcfecSMike Kravetz /* We called zap_page_range_single, try to reinsert. */
199594ab9eb9SArjun Roy err = vm_insert_pages(vma, *address,
199694ab9eb9SArjun Roy pending_pages,
199794ab9eb9SArjun Roy &pages_remaining);
199894ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining);
199994ab9eb9SArjun Roy *seq += bytes_mapped;
200094ab9eb9SArjun Roy *address += bytes_mapped;
200194ab9eb9SArjun Roy }
200294ab9eb9SArjun Roy if (err) {
200394ab9eb9SArjun Roy /* Either we were unable to zap, OR we zapped, retried an
200494ab9eb9SArjun Roy * insert, and still had an issue. Either ways, pages_remaining
200594ab9eb9SArjun Roy * is the number of pages we were unable to map, and we unroll
200694ab9eb9SArjun Roy * some state we speculatively touched before.
200794ab9eb9SArjun Roy */
200894ab9eb9SArjun Roy const int bytes_not_mapped = PAGE_SIZE * pages_remaining;
200994ab9eb9SArjun Roy
201094ab9eb9SArjun Roy *length -= bytes_not_mapped;
201194ab9eb9SArjun Roy zc->recv_skip_hint += bytes_not_mapped;
201294ab9eb9SArjun Roy }
201394ab9eb9SArjun Roy return err;
201494ab9eb9SArjun Roy }
201594ab9eb9SArjun Roy
tcp_zerocopy_vm_insert_batch(struct vm_area_struct * vma,struct page ** pages,unsigned int pages_to_map,unsigned long * address,u32 * length,u32 * seq,struct tcp_zerocopy_receive * zc,u32 total_bytes_to_map)20163763a24cSArjun Roy static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma,
20173763a24cSArjun Roy struct page **pages,
201894ab9eb9SArjun Roy unsigned int pages_to_map,
201994ab9eb9SArjun Roy unsigned long *address,
202094ab9eb9SArjun Roy u32 *length,
20213763a24cSArjun Roy u32 *seq,
202294ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc,
202394ab9eb9SArjun Roy u32 total_bytes_to_map)
20243763a24cSArjun Roy {
20253763a24cSArjun Roy unsigned long pages_remaining = pages_to_map;
202694ab9eb9SArjun Roy unsigned int pages_mapped;
202794ab9eb9SArjun Roy unsigned int bytes_mapped;
202894ab9eb9SArjun Roy int err;
20293763a24cSArjun Roy
203094ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, pages, &pages_remaining);
203194ab9eb9SArjun Roy pages_mapped = pages_to_map - (unsigned int)pages_remaining;
203294ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * pages_mapped;
20333763a24cSArjun Roy /* Even if vm_insert_pages fails, it may have partially succeeded in
20343763a24cSArjun Roy * mapping (some but not all of the pages).
20353763a24cSArjun Roy */
20363763a24cSArjun Roy *seq += bytes_mapped;
203794ab9eb9SArjun Roy *address += bytes_mapped;
203894ab9eb9SArjun Roy
203994ab9eb9SArjun Roy if (likely(!err))
204094ab9eb9SArjun Roy return 0;
204194ab9eb9SArjun Roy
204294ab9eb9SArjun Roy /* Error: maybe zap and retry + rollback state for failed inserts. */
204394ab9eb9SArjun Roy return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped,
204494ab9eb9SArjun Roy pages_remaining, address, length, seq, zc, total_bytes_to_map,
204594ab9eb9SArjun Roy err);
20463763a24cSArjun Roy }
20473763a24cSArjun Roy
20483c5a2fd0SArjun Roy #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS)
tcp_zc_finalize_rx_tstamp(struct sock * sk,struct tcp_zerocopy_receive * zc,struct scm_timestamping_internal * tss)20497eeba170SArjun Roy static void tcp_zc_finalize_rx_tstamp(struct sock *sk,
20507eeba170SArjun Roy struct tcp_zerocopy_receive *zc,
20517eeba170SArjun Roy struct scm_timestamping_internal *tss)
20527eeba170SArjun Roy {
20537eeba170SArjun Roy unsigned long msg_control_addr;
20547eeba170SArjun Roy struct msghdr cmsg_dummy;
20557eeba170SArjun Roy
20567eeba170SArjun Roy msg_control_addr = (unsigned long)zc->msg_control;
2057c39ef213SKevin Brodsky cmsg_dummy.msg_control_user = (void __user *)msg_control_addr;
20587eeba170SArjun Roy cmsg_dummy.msg_controllen =
20597eeba170SArjun Roy (__kernel_size_t)zc->msg_controllen;
20607eeba170SArjun Roy cmsg_dummy.msg_flags = in_compat_syscall()
20617eeba170SArjun Roy ? MSG_CMSG_COMPAT : 0;
2062a6f8ee58SArjun Roy cmsg_dummy.msg_control_is_user = true;
20637eeba170SArjun Roy zc->msg_flags = 0;
20647eeba170SArjun Roy if (zc->msg_control == msg_control_addr &&
20657eeba170SArjun Roy zc->msg_controllen == cmsg_dummy.msg_controllen) {
20667eeba170SArjun Roy tcp_recv_timestamp(&cmsg_dummy, sk, tss);
20677eeba170SArjun Roy zc->msg_control = (__u64)
2068c39ef213SKevin Brodsky ((uintptr_t)cmsg_dummy.msg_control_user);
20697eeba170SArjun Roy zc->msg_controllen =
20707eeba170SArjun Roy (__u64)cmsg_dummy.msg_controllen;
20717eeba170SArjun Roy zc->msg_flags = (__u32)cmsg_dummy.msg_flags;
20727eeba170SArjun Roy }
20737eeba170SArjun Roy }
20747eeba170SArjun Roy
find_tcp_vma(struct mm_struct * mm,unsigned long address,bool * mmap_locked)20757a7f0946SArjun Roy static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm,
20767a7f0946SArjun Roy unsigned long address,
20777a7f0946SArjun Roy bool *mmap_locked)
20787a7f0946SArjun Roy {
2079350f6bbcSMatthew Wilcox (Oracle) struct vm_area_struct *vma = lock_vma_under_rcu(mm, address);
20807a7f0946SArjun Roy
20817a7f0946SArjun Roy if (vma) {
2082350f6bbcSMatthew Wilcox (Oracle) if (vma->vm_ops != &tcp_vm_ops) {
20837a7f0946SArjun Roy vma_end_read(vma);
20847a7f0946SArjun Roy return NULL;
20857a7f0946SArjun Roy }
20867a7f0946SArjun Roy *mmap_locked = false;
20877a7f0946SArjun Roy return vma;
20887a7f0946SArjun Roy }
20897a7f0946SArjun Roy
20907a7f0946SArjun Roy mmap_read_lock(mm);
20917a7f0946SArjun Roy vma = vma_lookup(mm, address);
2092350f6bbcSMatthew Wilcox (Oracle) if (!vma || vma->vm_ops != &tcp_vm_ops) {
20937a7f0946SArjun Roy mmap_read_unlock(mm);
20947a7f0946SArjun Roy return NULL;
20957a7f0946SArjun Roy }
20967a7f0946SArjun Roy *mmap_locked = true;
20977a7f0946SArjun Roy return vma;
20987a7f0946SArjun Roy }
20997a7f0946SArjun Roy
210094ab9eb9SArjun Roy #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32
tcp_zerocopy_receive(struct sock * sk,struct tcp_zerocopy_receive * zc,struct scm_timestamping_internal * tss)210105255b82SEric Dumazet static int tcp_zerocopy_receive(struct sock *sk,
21027eeba170SArjun Roy struct tcp_zerocopy_receive *zc,
21037eeba170SArjun Roy struct scm_timestamping_internal *tss)
210405255b82SEric Dumazet {
210594ab9eb9SArjun Roy u32 length = 0, offset, vma_len, avail_len, copylen = 0;
210605255b82SEric Dumazet unsigned long address = (unsigned long)zc->address;
210794ab9eb9SArjun Roy struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE];
210818fb76edSArjun Roy s32 copybuf_len = zc->copybuf_len;
210918fb76edSArjun Roy struct tcp_sock *tp = tcp_sk(sk);
211005255b82SEric Dumazet const skb_frag_t *frags = NULL;
211194ab9eb9SArjun Roy unsigned int pages_to_map = 0;
211205255b82SEric Dumazet struct vm_area_struct *vma;
211305255b82SEric Dumazet struct sk_buff *skb = NULL;
211418fb76edSArjun Roy u32 seq = tp->copied_seq;
211594ab9eb9SArjun Roy u32 total_bytes_to_map;
211618fb76edSArjun Roy int inq = tcp_inq(sk);
21177a7f0946SArjun Roy bool mmap_locked;
211893ab6cc6SEric Dumazet int ret;
211993ab6cc6SEric Dumazet
212018fb76edSArjun Roy zc->copybuf_len = 0;
21217eeba170SArjun Roy zc->msg_flags = 0;
212218fb76edSArjun Roy
212305255b82SEric Dumazet if (address & (PAGE_SIZE - 1) || address != zc->address)
212493ab6cc6SEric Dumazet return -EINVAL;
212593ab6cc6SEric Dumazet
212693ab6cc6SEric Dumazet if (sk->sk_state == TCP_LISTEN)
212705255b82SEric Dumazet return -ENOTCONN;
212893ab6cc6SEric Dumazet
212993ab6cc6SEric Dumazet sock_rps_record_flow(sk);
213093ab6cc6SEric Dumazet
2131f21a3c48SArjun Roy if (inq && inq <= copybuf_len)
21327eeba170SArjun Roy return receive_fallback_to_copy(sk, zc, inq, tss);
2133f21a3c48SArjun Roy
2134936ced41SArjun Roy if (inq < PAGE_SIZE) {
2135936ced41SArjun Roy zc->length = 0;
2136936ced41SArjun Roy zc->recv_skip_hint = inq;
2137936ced41SArjun Roy if (!inq && sock_flag(sk, SOCK_DONE))
2138936ced41SArjun Roy return -EIO;
2139936ced41SArjun Roy return 0;
2140936ced41SArjun Roy }
2141936ced41SArjun Roy
21427a7f0946SArjun Roy vma = find_tcp_vma(current->mm, address, &mmap_locked);
21437a7f0946SArjun Roy if (!vma)
2144e776af60SEric Dumazet return -EINVAL;
21457a7f0946SArjun Roy
214618fb76edSArjun Roy vma_len = min_t(unsigned long, zc->length, vma->vm_end - address);
214718fb76edSArjun Roy avail_len = min_t(u32, vma_len, inq);
214894ab9eb9SArjun Roy total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
214994ab9eb9SArjun Roy if (total_bytes_to_map) {
215094ab9eb9SArjun Roy if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
2151e9adcfecSMike Kravetz zap_page_range_single(vma, address, total_bytes_to_map,
2152e9adcfecSMike Kravetz NULL);
215394ab9eb9SArjun Roy zc->length = total_bytes_to_map;
215405255b82SEric Dumazet zc->recv_skip_hint = 0;
21558f2b0293SSoheil Hassas Yeganeh } else {
215618fb76edSArjun Roy zc->length = avail_len;
215718fb76edSArjun Roy zc->recv_skip_hint = avail_len;
21588f2b0293SSoheil Hassas Yeganeh }
215905255b82SEric Dumazet ret = 0;
216005255b82SEric Dumazet while (length + PAGE_SIZE <= zc->length) {
216198917cf0SArjun Roy int mappable_offset;
216294ab9eb9SArjun Roy struct page *page;
216398917cf0SArjun Roy
216405255b82SEric Dumazet if (zc->recv_skip_hint < PAGE_SIZE) {
21657fba5309SArjun Roy u32 offset_frag;
21667fba5309SArjun Roy
216705255b82SEric Dumazet if (skb) {
21680e627190SArjun Roy if (zc->recv_skip_hint > 0)
21690e627190SArjun Roy break;
217005255b82SEric Dumazet skb = skb->next;
217105255b82SEric Dumazet offset = seq - TCP_SKB_CB(skb)->seq;
217205255b82SEric Dumazet } else {
217393ab6cc6SEric Dumazet skb = tcp_recv_skb(sk, seq, &offset);
217405255b82SEric Dumazet }
21757eeba170SArjun Roy
21767eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) {
21777eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss);
21787eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS;
21797eeba170SArjun Roy }
218005255b82SEric Dumazet zc->recv_skip_hint = skb->len - offset;
21817fba5309SArjun Roy frags = skb_advance_to_frag(skb, offset, &offset_frag);
21827fba5309SArjun Roy if (!frags || offset_frag)
218305255b82SEric Dumazet break;
218405255b82SEric Dumazet }
2185789762ceSSoheil Hassas Yeganeh
218698917cf0SArjun Roy mappable_offset = find_next_mappable_frag(frags,
218798917cf0SArjun Roy zc->recv_skip_hint);
218898917cf0SArjun Roy if (mappable_offset) {
218998917cf0SArjun Roy zc->recv_skip_hint = mappable_offset;
219005255b82SEric Dumazet break;
2191789762ceSSoheil Hassas Yeganeh }
219294ab9eb9SArjun Roy page = skb_frag_page(frags);
219394ab9eb9SArjun Roy prefetchw(page);
219494ab9eb9SArjun Roy pages[pages_to_map++] = page;
219505255b82SEric Dumazet length += PAGE_SIZE;
219605255b82SEric Dumazet zc->recv_skip_hint -= PAGE_SIZE;
219705255b82SEric Dumazet frags++;
219894ab9eb9SArjun Roy if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE ||
219994ab9eb9SArjun Roy zc->recv_skip_hint < PAGE_SIZE) {
220094ab9eb9SArjun Roy /* Either full batch, or we're about to go to next skb
220194ab9eb9SArjun Roy * (and we cannot unroll failed ops across skbs).
220294ab9eb9SArjun Roy */
220394ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages,
220494ab9eb9SArjun Roy pages_to_map,
220594ab9eb9SArjun Roy &address, &length,
220694ab9eb9SArjun Roy &seq, zc,
220794ab9eb9SArjun Roy total_bytes_to_map);
22083763a24cSArjun Roy if (ret)
22093763a24cSArjun Roy goto out;
221094ab9eb9SArjun Roy pages_to_map = 0;
22113763a24cSArjun Roy }
22123763a24cSArjun Roy }
221394ab9eb9SArjun Roy if (pages_to_map) {
221494ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map,
221594ab9eb9SArjun Roy &address, &length, &seq,
221694ab9eb9SArjun Roy zc, total_bytes_to_map);
221793ab6cc6SEric Dumazet }
221805255b82SEric Dumazet out:
22197a7f0946SArjun Roy if (mmap_locked)
2220d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm);
22217a7f0946SArjun Roy else
22227a7f0946SArjun Roy vma_end_read(vma);
222318fb76edSArjun Roy /* Try to copy straggler data. */
222418fb76edSArjun Roy if (!ret)
22257eeba170SArjun Roy copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss);
222618fb76edSArjun Roy
222718fb76edSArjun Roy if (length + copylen) {
22287db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq);
222993ab6cc6SEric Dumazet tcp_rcv_space_adjust(sk);
223093ab6cc6SEric Dumazet
223193ab6cc6SEric Dumazet /* Clean up data we have read: This will do ACK frames. */
223293ab6cc6SEric Dumazet tcp_recv_skb(sk, seq, &offset);
223318fb76edSArjun Roy tcp_cleanup_rbuf(sk, length + copylen);
223493ab6cc6SEric Dumazet ret = 0;
223505255b82SEric Dumazet if (length == zc->length)
223605255b82SEric Dumazet zc->recv_skip_hint = 0;
223705255b82SEric Dumazet } else {
223805255b82SEric Dumazet if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE))
223905255b82SEric Dumazet ret = -EIO;
224005255b82SEric Dumazet }
224105255b82SEric Dumazet zc->length = length;
224293ab6cc6SEric Dumazet return ret;
224393ab6cc6SEric Dumazet }
224405255b82SEric Dumazet #endif
224593ab6cc6SEric Dumazet
224698aaa913SMike Maloney /* Similar to __sock_recv_timestamp, but does not require an skb */
tcp_recv_timestamp(struct msghdr * msg,const struct sock * sk,struct scm_timestamping_internal * tss)2247892bfd3dSFlorian Westphal void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
22489718475eSDeepa Dinamani struct scm_timestamping_internal *tss)
224998aaa913SMike Maloney {
2250887feae3SDeepa Dinamani int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
225198aaa913SMike Maloney bool has_timestamping = false;
225298aaa913SMike Maloney
225398aaa913SMike Maloney if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
225498aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMP)) {
225598aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMPNS)) {
2256887feae3SDeepa Dinamani if (new_tstamp) {
2257df1b4ba9SArnd Bergmann struct __kernel_timespec kts = {
2258df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec,
2259df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec,
2260df1b4ba9SArnd Bergmann };
2261887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
2262887feae3SDeepa Dinamani sizeof(kts), &kts);
2263887feae3SDeepa Dinamani } else {
2264df1b4ba9SArnd Bergmann struct __kernel_old_timespec ts_old = {
2265df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec,
2266df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec,
2267df1b4ba9SArnd Bergmann };
22687f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
22699718475eSDeepa Dinamani sizeof(ts_old), &ts_old);
2270887feae3SDeepa Dinamani }
227198aaa913SMike Maloney } else {
2272887feae3SDeepa Dinamani if (new_tstamp) {
2273df1b4ba9SArnd Bergmann struct __kernel_sock_timeval stv = {
2274df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec,
2275df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000,
2276df1b4ba9SArnd Bergmann };
2277887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
2278887feae3SDeepa Dinamani sizeof(stv), &stv);
2279887feae3SDeepa Dinamani } else {
2280df1b4ba9SArnd Bergmann struct __kernel_old_timeval tv = {
2281df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec,
2282df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000,
2283df1b4ba9SArnd Bergmann };
22847f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
228598aaa913SMike Maloney sizeof(tv), &tv);
228698aaa913SMike Maloney }
228798aaa913SMike Maloney }
2288887feae3SDeepa Dinamani }
228998aaa913SMike Maloney
2290e3390b30SEric Dumazet if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE)
229198aaa913SMike Maloney has_timestamping = true;
229298aaa913SMike Maloney else
22939718475eSDeepa Dinamani tss->ts[0] = (struct timespec64) {0};
229498aaa913SMike Maloney }
229598aaa913SMike Maloney
229698aaa913SMike Maloney if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
2297e3390b30SEric Dumazet if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE)
229898aaa913SMike Maloney has_timestamping = true;
229998aaa913SMike Maloney else
23009718475eSDeepa Dinamani tss->ts[2] = (struct timespec64) {0};
230198aaa913SMike Maloney }
230298aaa913SMike Maloney
230398aaa913SMike Maloney if (has_timestamping) {
23049718475eSDeepa Dinamani tss->ts[1] = (struct timespec64) {0};
23059718475eSDeepa Dinamani if (sock_flag(sk, SOCK_TSTAMP_NEW))
23069718475eSDeepa Dinamani put_cmsg_scm_timestamping64(msg, tss);
23079718475eSDeepa Dinamani else
23089718475eSDeepa Dinamani put_cmsg_scm_timestamping(msg, tss);
230998aaa913SMike Maloney }
231098aaa913SMike Maloney }
231198aaa913SMike Maloney
tcp_inq_hint(struct sock * sk)2312b75eba76SSoheil Hassas Yeganeh static int tcp_inq_hint(struct sock *sk)
2313b75eba76SSoheil Hassas Yeganeh {
2314b75eba76SSoheil Hassas Yeganeh const struct tcp_sock *tp = tcp_sk(sk);
2315b75eba76SSoheil Hassas Yeganeh u32 copied_seq = READ_ONCE(tp->copied_seq);
2316b75eba76SSoheil Hassas Yeganeh u32 rcv_nxt = READ_ONCE(tp->rcv_nxt);
2317b75eba76SSoheil Hassas Yeganeh int inq;
2318b75eba76SSoheil Hassas Yeganeh
2319b75eba76SSoheil Hassas Yeganeh inq = rcv_nxt - copied_seq;
2320b75eba76SSoheil Hassas Yeganeh if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) {
2321b75eba76SSoheil Hassas Yeganeh lock_sock(sk);
2322b75eba76SSoheil Hassas Yeganeh inq = tp->rcv_nxt - tp->copied_seq;
2323b75eba76SSoheil Hassas Yeganeh release_sock(sk);
2324b75eba76SSoheil Hassas Yeganeh }
23256466e715SSoheil Hassas Yeganeh /* After receiving a FIN, tell the user-space to continue reading
23266466e715SSoheil Hassas Yeganeh * by returning a non-zero inq.
23276466e715SSoheil Hassas Yeganeh */
23286466e715SSoheil Hassas Yeganeh if (inq == 0 && sock_flag(sk, SOCK_DONE))
23296466e715SSoheil Hassas Yeganeh inq = 1;
2330b75eba76SSoheil Hassas Yeganeh return inq;
2331b75eba76SSoheil Hassas Yeganeh }
2332b75eba76SSoheil Hassas Yeganeh
23331da177e4SLinus Torvalds /*
23341da177e4SLinus Torvalds * This routine copies from a sock struct into the user buffer.
23351da177e4SLinus Torvalds *
23361da177e4SLinus Torvalds * Technical note: in 2.3 we work on _locked_ socket, so that
23371da177e4SLinus Torvalds * tricks with *seq access order and skb->users are not required.
23381da177e4SLinus Torvalds * Probably, code can be easily improved even more.
23391da177e4SLinus Torvalds */
23401da177e4SLinus Torvalds
tcp_recvmsg_locked(struct sock * sk,struct msghdr * msg,size_t len,int flags,struct scm_timestamping_internal * tss,int * cmsg_flags)23412cd81161SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
2342ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss,
23432cd81161SArjun Roy int *cmsg_flags)
23441da177e4SLinus Torvalds {
23451da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
23461da177e4SLinus Torvalds int copied = 0;
23471da177e4SLinus Torvalds u32 peek_seq;
23481da177e4SLinus Torvalds u32 *seq;
23491da177e4SLinus Torvalds unsigned long used;
23502cd81161SArjun Roy int err;
23511da177e4SLinus Torvalds int target; /* Read at least this many bytes */
23521da177e4SLinus Torvalds long timeo;
2353dfbafc99SSabrina Dubroca struct sk_buff *skb, *last;
235477527313SIlpo Järvinen u32 urg_hole = 0;
23551da177e4SLinus Torvalds
23561da177e4SLinus Torvalds err = -ENOTCONN;
23571da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN)
23581da177e4SLinus Torvalds goto out;
23591da177e4SLinus Torvalds
2360f94fd25cSJens Axboe if (tp->recvmsg_inq) {
2361925bba24SArjun Roy *cmsg_flags = TCP_CMSG_INQ;
2362f94fd25cSJens Axboe msg->msg_get_inq = 1;
2363f94fd25cSJens Axboe }
2364ec095263SOliver Hartkopp timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
23651da177e4SLinus Torvalds
23661da177e4SLinus Torvalds /* Urgent data needs to be handled specially. */
23671da177e4SLinus Torvalds if (flags & MSG_OOB)
23681da177e4SLinus Torvalds goto recv_urg;
23691da177e4SLinus Torvalds
2370c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) {
2371c0e88ff0SPavel Emelyanov err = -EPERM;
2372c0e88ff0SPavel Emelyanov if (!(flags & MSG_PEEK))
2373c0e88ff0SPavel Emelyanov goto out;
2374c0e88ff0SPavel Emelyanov
2375c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE)
2376c0e88ff0SPavel Emelyanov goto recv_sndq;
2377c0e88ff0SPavel Emelyanov
2378c0e88ff0SPavel Emelyanov err = -EINVAL;
2379c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE)
2380c0e88ff0SPavel Emelyanov goto out;
2381c0e88ff0SPavel Emelyanov
2382c0e88ff0SPavel Emelyanov /* 'common' recv queue MSG_PEEK-ing */
2383c0e88ff0SPavel Emelyanov }
2384c0e88ff0SPavel Emelyanov
23851da177e4SLinus Torvalds seq = &tp->copied_seq;
23861da177e4SLinus Torvalds if (flags & MSG_PEEK) {
23871da177e4SLinus Torvalds peek_seq = tp->copied_seq;
23881da177e4SLinus Torvalds seq = &peek_seq;
23891da177e4SLinus Torvalds }
23901da177e4SLinus Torvalds
23911da177e4SLinus Torvalds target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
23921da177e4SLinus Torvalds
23931da177e4SLinus Torvalds do {
23941da177e4SLinus Torvalds u32 offset;
23951da177e4SLinus Torvalds
23961da177e4SLinus Torvalds /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
2397b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && tp->urg_seq == *seq) {
23981da177e4SLinus Torvalds if (copied)
23991da177e4SLinus Torvalds break;
24001da177e4SLinus Torvalds if (signal_pending(current)) {
24011da177e4SLinus Torvalds copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
24021da177e4SLinus Torvalds break;
24031da177e4SLinus Torvalds }
24041da177e4SLinus Torvalds }
24051da177e4SLinus Torvalds
24061da177e4SLinus Torvalds /* Next get a buffer. */
24071da177e4SLinus Torvalds
2408dfbafc99SSabrina Dubroca last = skb_peek_tail(&sk->sk_receive_queue);
240991521944SDavid S. Miller skb_queue_walk(&sk->sk_receive_queue, skb) {
2410dfbafc99SSabrina Dubroca last = skb;
24111da177e4SLinus Torvalds /* Now that we have two receive queues this
24121da177e4SLinus Torvalds * shouldn't happen.
24131da177e4SLinus Torvalds */
2414d792c100SIlpo Järvinen if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
2415e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
24162af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
2417d792c100SIlpo Järvinen flags))
24181da177e4SLinus Torvalds break;
2419d792c100SIlpo Järvinen
24201da177e4SLinus Torvalds offset = *seq - TCP_SKB_CB(skb)->seq;
24219d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
24229d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__);
24231da177e4SLinus Torvalds offset--;
24249d691539SEric Dumazet }
24251da177e4SLinus Torvalds if (offset < skb->len)
24261da177e4SLinus Torvalds goto found_ok_skb;
2427e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
24281da177e4SLinus Torvalds goto found_fin_ok;
24292af6fd8bSJoe Perches WARN(!(flags & MSG_PEEK),
2430e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
24312af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
243291521944SDavid S. Miller }
24331da177e4SLinus Torvalds
24341da177e4SLinus Torvalds /* Well, if we have backlog, try to process it now yet. */
24351da177e4SLinus Torvalds
24369ed498c6SEric Dumazet if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
24371da177e4SLinus Torvalds break;
24381da177e4SLinus Torvalds
24391da177e4SLinus Torvalds if (copied) {
24408bd172b7SEric Dumazet if (!timeo ||
24418bd172b7SEric Dumazet sk->sk_err ||
24421da177e4SLinus Torvalds sk->sk_state == TCP_CLOSE ||
24431da177e4SLinus Torvalds (sk->sk_shutdown & RCV_SHUTDOWN) ||
2444518a09efSDavid S. Miller signal_pending(current))
24451da177e4SLinus Torvalds break;
24461da177e4SLinus Torvalds } else {
24471da177e4SLinus Torvalds if (sock_flag(sk, SOCK_DONE))
24481da177e4SLinus Torvalds break;
24491da177e4SLinus Torvalds
24501da177e4SLinus Torvalds if (sk->sk_err) {
24511da177e4SLinus Torvalds copied = sock_error(sk);
24521da177e4SLinus Torvalds break;
24531da177e4SLinus Torvalds }
24541da177e4SLinus Torvalds
24551da177e4SLinus Torvalds if (sk->sk_shutdown & RCV_SHUTDOWN)
24561da177e4SLinus Torvalds break;
24571da177e4SLinus Torvalds
24581da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE) {
24591da177e4SLinus Torvalds /* This occurs when user tries to read
24601da177e4SLinus Torvalds * from never connected socket.
24611da177e4SLinus Torvalds */
24621da177e4SLinus Torvalds copied = -ENOTCONN;
24631da177e4SLinus Torvalds break;
24641da177e4SLinus Torvalds }
24651da177e4SLinus Torvalds
24661da177e4SLinus Torvalds if (!timeo) {
24671da177e4SLinus Torvalds copied = -EAGAIN;
24681da177e4SLinus Torvalds break;
24691da177e4SLinus Torvalds }
24701da177e4SLinus Torvalds
24711da177e4SLinus Torvalds if (signal_pending(current)) {
24721da177e4SLinus Torvalds copied = sock_intr_errno(timeo);
24731da177e4SLinus Torvalds break;
24741da177e4SLinus Torvalds }
24751da177e4SLinus Torvalds }
24761da177e4SLinus Torvalds
24771da177e4SLinus Torvalds if (copied >= target) {
24781da177e4SLinus Torvalds /* Do not sleep, just process backlog. */
247993afcfd1SEric Dumazet __sk_flush_backlog(sk);
2480dfbafc99SSabrina Dubroca } else {
248129fbc26eSEric Dumazet tcp_cleanup_rbuf(sk, copied);
2482419ce133SPaolo Abeni err = sk_wait_data(sk, &timeo, last);
2483419ce133SPaolo Abeni if (err < 0) {
2484419ce133SPaolo Abeni err = copied ? : err;
2485419ce133SPaolo Abeni goto out;
2486419ce133SPaolo Abeni }
2487dfbafc99SSabrina Dubroca }
24881da177e4SLinus Torvalds
248977527313SIlpo Järvinen if ((flags & MSG_PEEK) &&
249077527313SIlpo Järvinen (peek_seq - copied - urg_hole != tp->copied_seq)) {
2491e87cc472SJoe Perches net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
2492e87cc472SJoe Perches current->comm,
2493e87cc472SJoe Perches task_pid_nr(current));
24941da177e4SLinus Torvalds peek_seq = tp->copied_seq;
24951da177e4SLinus Torvalds }
24961da177e4SLinus Torvalds continue;
24971da177e4SLinus Torvalds
24981da177e4SLinus Torvalds found_ok_skb:
24991da177e4SLinus Torvalds /* Ok so how much can we use? */
25001da177e4SLinus Torvalds used = skb->len - offset;
25011da177e4SLinus Torvalds if (len < used)
25021da177e4SLinus Torvalds used = len;
25031da177e4SLinus Torvalds
25041da177e4SLinus Torvalds /* Do we have urgent data here? */
2505b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) {
25061da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - *seq;
25071da177e4SLinus Torvalds if (urg_offset < used) {
25081da177e4SLinus Torvalds if (!urg_offset) {
25091da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_URGINLINE)) {
25107db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1);
251177527313SIlpo Järvinen urg_hole++;
25121da177e4SLinus Torvalds offset++;
25131da177e4SLinus Torvalds used--;
25141da177e4SLinus Torvalds if (!used)
25151da177e4SLinus Torvalds goto skip_copy;
25161da177e4SLinus Torvalds }
25171da177e4SLinus Torvalds } else
25181da177e4SLinus Torvalds used = urg_offset;
25191da177e4SLinus Torvalds }
25201da177e4SLinus Torvalds }
25211da177e4SLinus Torvalds
25221da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) {
252351f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, offset, msg, used);
25241da177e4SLinus Torvalds if (err) {
25251da177e4SLinus Torvalds /* Exception. Bailout! */
25261da177e4SLinus Torvalds if (!copied)
25271da177e4SLinus Torvalds copied = -EFAULT;
25281da177e4SLinus Torvalds break;
25291da177e4SLinus Torvalds }
25301da177e4SLinus Torvalds }
25311da177e4SLinus Torvalds
25327db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + used);
25331da177e4SLinus Torvalds copied += used;
25341da177e4SLinus Torvalds len -= used;
25351da177e4SLinus Torvalds
25361da177e4SLinus Torvalds tcp_rcv_space_adjust(sk);
25371da177e4SLinus Torvalds
25381da177e4SLinus Torvalds skip_copy:
2539b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) {
25407b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0);
254131770e34SFlorian Westphal tcp_fast_path_check(sk);
254231770e34SFlorian Westphal }
25431da177e4SLinus Torvalds
254498aaa913SMike Maloney if (TCP_SKB_CB(skb)->has_rxtstamp) {
25452cd81161SArjun Roy tcp_update_recv_tstamps(skb, tss);
2546925bba24SArjun Roy *cmsg_flags |= TCP_CMSG_TS;
254798aaa913SMike Maloney }
2548cc4de047SKelly Littlepage
2549cc4de047SKelly Littlepage if (used + offset < skb->len)
2550cc4de047SKelly Littlepage continue;
2551cc4de047SKelly Littlepage
2552e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
25531da177e4SLinus Torvalds goto found_fin_ok;
25547bced397SDan Williams if (!(flags & MSG_PEEK))
25553df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb);
25561da177e4SLinus Torvalds continue;
25571da177e4SLinus Torvalds
25581da177e4SLinus Torvalds found_fin_ok:
25591da177e4SLinus Torvalds /* Process the FIN. */
25607db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1);
25617bced397SDan Williams if (!(flags & MSG_PEEK))
25623df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb);
25631da177e4SLinus Torvalds break;
25641da177e4SLinus Torvalds } while (len > 0);
25651da177e4SLinus Torvalds
25661da177e4SLinus Torvalds /* According to UNIX98, msg_name/msg_namelen are ignored
25671da177e4SLinus Torvalds * on connected socket. I was just happy when found this 8) --ANK
25681da177e4SLinus Torvalds */
25691da177e4SLinus Torvalds
25701da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */
25710e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied);
25721da177e4SLinus Torvalds return copied;
25731da177e4SLinus Torvalds
25741da177e4SLinus Torvalds out:
25751da177e4SLinus Torvalds return err;
25761da177e4SLinus Torvalds
25771da177e4SLinus Torvalds recv_urg:
2578377f0a08SRami Rosen err = tcp_recv_urg(sk, msg, len, flags);
25791da177e4SLinus Torvalds goto out;
2580c0e88ff0SPavel Emelyanov
2581c0e88ff0SPavel Emelyanov recv_sndq:
2582c0e88ff0SPavel Emelyanov err = tcp_peek_sndq(sk, msg, len);
2583c0e88ff0SPavel Emelyanov goto out;
25841da177e4SLinus Torvalds }
25852cd81161SArjun Roy
tcp_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)2586ec095263SOliver Hartkopp int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
2587ec095263SOliver Hartkopp int *addr_len)
25882cd81161SArjun Roy {
2589f94fd25cSJens Axboe int cmsg_flags = 0, ret;
25902cd81161SArjun Roy struct scm_timestamping_internal tss;
25912cd81161SArjun Roy
25922cd81161SArjun Roy if (unlikely(flags & MSG_ERRQUEUE))
25932cd81161SArjun Roy return inet_recv_error(sk, msg, len, addr_len);
25942cd81161SArjun Roy
25952cd81161SArjun Roy if (sk_can_busy_loop(sk) &&
25962cd81161SArjun Roy skb_queue_empty_lockless(&sk->sk_receive_queue) &&
25972cd81161SArjun Roy sk->sk_state == TCP_ESTABLISHED)
2598ec095263SOliver Hartkopp sk_busy_loop(sk, flags & MSG_DONTWAIT);
25992cd81161SArjun Roy
26002cd81161SArjun Roy lock_sock(sk);
2601ec095263SOliver Hartkopp ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags);
26022cd81161SArjun Roy release_sock(sk);
26032cd81161SArjun Roy
2604f94fd25cSJens Axboe if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) {
2605925bba24SArjun Roy if (cmsg_flags & TCP_CMSG_TS)
26062cd81161SArjun Roy tcp_recv_timestamp(msg, sk, &tss);
2607f94fd25cSJens Axboe if (msg->msg_get_inq) {
2608f94fd25cSJens Axboe msg->msg_inq = tcp_inq_hint(sk);
2609f94fd25cSJens Axboe if (cmsg_flags & TCP_CMSG_INQ)
2610f94fd25cSJens Axboe put_cmsg(msg, SOL_TCP, TCP_CM_INQ,
2611f94fd25cSJens Axboe sizeof(msg->msg_inq), &msg->msg_inq);
26122cd81161SArjun Roy }
26132cd81161SArjun Roy }
26142cd81161SArjun Roy return ret;
26152cd81161SArjun Roy }
26164bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_recvmsg);
26171da177e4SLinus Torvalds
tcp_set_state(struct sock * sk,int state)2618490d5046SIlpo Järvinen void tcp_set_state(struct sock *sk, int state)
2619490d5046SIlpo Järvinen {
2620490d5046SIlpo Järvinen int oldstate = sk->sk_state;
2621490d5046SIlpo Järvinen
2622d4487491SLawrence Brakmo /* We defined a new enum for TCP states that are exported in BPF
2623d4487491SLawrence Brakmo * so as not force the internal TCP states to be frozen. The
2624d4487491SLawrence Brakmo * following checks will detect if an internal state value ever
2625d4487491SLawrence Brakmo * differs from the BPF value. If this ever happens, then we will
2626d4487491SLawrence Brakmo * need to remap the internal value to the BPF value before calling
2627d4487491SLawrence Brakmo * tcp_call_bpf_2arg.
2628d4487491SLawrence Brakmo */
2629d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED);
2630d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT);
2631d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV);
2632d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1);
2633d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2);
2634d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT);
2635d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE);
2636d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT);
2637d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK);
2638d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN);
2639d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING);
2640d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV);
2641d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES);
2642d4487491SLawrence Brakmo
264397a19cafSYonghong Song /* bpf uapi header bpf.h defines an anonymous enum with values
264497a19cafSYonghong Song * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux
264597a19cafSYonghong Song * is able to emit this enum in DWARF due to the above BUILD_BUG_ON.
264697a19cafSYonghong Song * But clang built vmlinux does not have this enum in DWARF
264797a19cafSYonghong Song * since clang removes the above code before generating IR/debuginfo.
264897a19cafSYonghong Song * Let us explicitly emit the type debuginfo to ensure the
264997a19cafSYonghong Song * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF
265097a19cafSYonghong Song * regardless of which compiler is used.
265197a19cafSYonghong Song */
265297a19cafSYonghong Song BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED);
265397a19cafSYonghong Song
2654d4487491SLawrence Brakmo if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
2655d4487491SLawrence Brakmo tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
2656e8fce239SSong Liu
2657490d5046SIlpo Järvinen switch (state) {
2658490d5046SIlpo Järvinen case TCP_ESTABLISHED:
2659490d5046SIlpo Järvinen if (oldstate != TCP_ESTABLISHED)
266081cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2661490d5046SIlpo Järvinen break;
2662acdf1754SJason Xing case TCP_CLOSE_WAIT:
2663acdf1754SJason Xing if (oldstate == TCP_SYN_RECV)
2664acdf1754SJason Xing TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2665acdf1754SJason Xing break;
2666490d5046SIlpo Järvinen
2667490d5046SIlpo Järvinen case TCP_CLOSE:
2668490d5046SIlpo Järvinen if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
266981cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2670490d5046SIlpo Järvinen
2671490d5046SIlpo Järvinen sk->sk_prot->unhash(sk);
2672490d5046SIlpo Järvinen if (inet_csk(sk)->icsk_bind_hash &&
2673490d5046SIlpo Järvinen !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2674ab1e0a13SArnaldo Carvalho de Melo inet_put_port(sk);
2675a8eceea8SJoe Perches fallthrough;
2676490d5046SIlpo Järvinen default:
2677acdf1754SJason Xing if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
267874688e48SPavel Emelyanov TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2679490d5046SIlpo Järvinen }
2680490d5046SIlpo Järvinen
2681490d5046SIlpo Järvinen /* Change state AFTER socket is unhashed to avoid closed
2682490d5046SIlpo Järvinen * socket sitting in hash tables.
2683490d5046SIlpo Järvinen */
2684563e0bb0SYafang Shao inet_sk_state_store(sk, state);
2685490d5046SIlpo Järvinen }
2686490d5046SIlpo Järvinen EXPORT_SYMBOL_GPL(tcp_set_state);
2687490d5046SIlpo Järvinen
26881da177e4SLinus Torvalds /*
26891da177e4SLinus Torvalds * State processing on a close. This implements the state shift for
26901da177e4SLinus Torvalds * sending our FIN frame. Note that we only send a FIN for some
26911da177e4SLinus Torvalds * states. A shutdown() may have already sent the FIN, or we may be
26921da177e4SLinus Torvalds * closed.
26931da177e4SLinus Torvalds */
26941da177e4SLinus Torvalds
26959b5b5cffSArjan van de Ven static const unsigned char new_state[16] = {
26961da177e4SLinus Torvalds /* current state: new state: action: */
26970980c1e3SEric Dumazet [0 /* (Invalid) */] = TCP_CLOSE,
26980980c1e3SEric Dumazet [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
26990980c1e3SEric Dumazet [TCP_SYN_SENT] = TCP_CLOSE,
27000980c1e3SEric Dumazet [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
27010980c1e3SEric Dumazet [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
27020980c1e3SEric Dumazet [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
27030980c1e3SEric Dumazet [TCP_TIME_WAIT] = TCP_CLOSE,
27040980c1e3SEric Dumazet [TCP_CLOSE] = TCP_CLOSE,
27050980c1e3SEric Dumazet [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
27060980c1e3SEric Dumazet [TCP_LAST_ACK] = TCP_LAST_ACK,
27070980c1e3SEric Dumazet [TCP_LISTEN] = TCP_CLOSE,
27080980c1e3SEric Dumazet [TCP_CLOSING] = TCP_CLOSING,
27090980c1e3SEric Dumazet [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
27101da177e4SLinus Torvalds };
27111da177e4SLinus Torvalds
tcp_close_state(struct sock * sk)27121da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk)
27131da177e4SLinus Torvalds {
27141da177e4SLinus Torvalds int next = (int)new_state[sk->sk_state];
27151da177e4SLinus Torvalds int ns = next & TCP_STATE_MASK;
27161da177e4SLinus Torvalds
27171da177e4SLinus Torvalds tcp_set_state(sk, ns);
27181da177e4SLinus Torvalds
27191da177e4SLinus Torvalds return next & TCP_ACTION_FIN;
27201da177e4SLinus Torvalds }
27211da177e4SLinus Torvalds
27221da177e4SLinus Torvalds /*
27231da177e4SLinus Torvalds * Shutdown the sending side of a connection. Much like close except
27241f29b058SSatoru SATOH * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
27251da177e4SLinus Torvalds */
27261da177e4SLinus Torvalds
tcp_shutdown(struct sock * sk,int how)27271da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how)
27281da177e4SLinus Torvalds {
27291da177e4SLinus Torvalds /* We need to grab some memory, and put together a FIN,
27301da177e4SLinus Torvalds * and then put it into the queue to be sent.
27311da177e4SLinus Torvalds * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
27321da177e4SLinus Torvalds */
27331da177e4SLinus Torvalds if (!(how & SEND_SHUTDOWN))
27341da177e4SLinus Torvalds return;
27351da177e4SLinus Torvalds
27361da177e4SLinus Torvalds /* If we've already sent a FIN, or it's a closed state, skip this. */
27371da177e4SLinus Torvalds if ((1 << sk->sk_state) &
27381da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2739f47d0d32SEric Dumazet TCPF_CLOSE_WAIT)) {
27401da177e4SLinus Torvalds /* Clear out any half completed packets. FIN if needed. */
27411da177e4SLinus Torvalds if (tcp_close_state(sk))
27421da177e4SLinus Torvalds tcp_send_fin(sk);
27431da177e4SLinus Torvalds }
27441da177e4SLinus Torvalds }
27454bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_shutdown);
27461da177e4SLinus Torvalds
tcp_orphan_count_sum(void)274719757cebSEric Dumazet int tcp_orphan_count_sum(void)
274819757cebSEric Dumazet {
274919757cebSEric Dumazet int i, total = 0;
275019757cebSEric Dumazet
275119757cebSEric Dumazet for_each_possible_cpu(i)
275219757cebSEric Dumazet total += per_cpu(tcp_orphan_count, i);
275319757cebSEric Dumazet
275419757cebSEric Dumazet return max(total, 0);
275519757cebSEric Dumazet }
275619757cebSEric Dumazet
275719757cebSEric Dumazet static int tcp_orphan_cache;
275819757cebSEric Dumazet static struct timer_list tcp_orphan_timer;
275919757cebSEric Dumazet #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100)
276019757cebSEric Dumazet
tcp_orphan_update(struct timer_list * unused)276119757cebSEric Dumazet static void tcp_orphan_update(struct timer_list *unused)
276219757cebSEric Dumazet {
276319757cebSEric Dumazet WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum());
276419757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
276519757cebSEric Dumazet }
276619757cebSEric Dumazet
tcp_too_many_orphans(int shift)276719757cebSEric Dumazet static bool tcp_too_many_orphans(int shift)
276819757cebSEric Dumazet {
276947e6ab24SKuniyuki Iwashima return READ_ONCE(tcp_orphan_cache) << shift >
277047e6ab24SKuniyuki Iwashima READ_ONCE(sysctl_tcp_max_orphans);
277119757cebSEric Dumazet }
277219757cebSEric Dumazet
tcp_check_oom(struct sock * sk,int shift)2773efcdbf24SArun Sharma bool tcp_check_oom(struct sock *sk, int shift)
2774efcdbf24SArun Sharma {
2775efcdbf24SArun Sharma bool too_many_orphans, out_of_socket_memory;
2776efcdbf24SArun Sharma
277719757cebSEric Dumazet too_many_orphans = tcp_too_many_orphans(shift);
2778efcdbf24SArun Sharma out_of_socket_memory = tcp_out_of_memory(sk);
2779efcdbf24SArun Sharma
2780e87cc472SJoe Perches if (too_many_orphans)
2781e87cc472SJoe Perches net_info_ratelimited("too many orphaned sockets\n");
2782e87cc472SJoe Perches if (out_of_socket_memory)
2783e87cc472SJoe Perches net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2784efcdbf24SArun Sharma return too_many_orphans || out_of_socket_memory;
2785efcdbf24SArun Sharma }
2786efcdbf24SArun Sharma
__tcp_close(struct sock * sk,long timeout)278777c3c956SPaolo Abeni void __tcp_close(struct sock *sk, long timeout)
27881da177e4SLinus Torvalds {
27891da177e4SLinus Torvalds struct sk_buff *skb;
27901da177e4SLinus Torvalds int data_was_unread = 0;
279175c2d907SHerbert Xu int state;
27921da177e4SLinus Torvalds
2793e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
27941da177e4SLinus Torvalds
27951da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) {
27961da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE);
27971da177e4SLinus Torvalds
27981da177e4SLinus Torvalds /* Special case. */
27990a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk);
28001da177e4SLinus Torvalds
28011da177e4SLinus Torvalds goto adjudge_to_death;
28021da177e4SLinus Torvalds }
28031da177e4SLinus Torvalds
28041da177e4SLinus Torvalds /* We need to flush the recv. buffs. We do this only on the
28051da177e4SLinus Torvalds * descriptor close, not protocol-sourced closes, because the
28061da177e4SLinus Torvalds * reader process may not have drained the data yet!
28071da177e4SLinus Torvalds */
28081da177e4SLinus Torvalds while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2809e11ecddfSEric Dumazet u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
2810e11ecddfSEric Dumazet
2811e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2812e11ecddfSEric Dumazet len--;
28131da177e4SLinus Torvalds data_was_unread += len;
28141da177e4SLinus Torvalds __kfree_skb(skb);
28151da177e4SLinus Torvalds }
28161da177e4SLinus Torvalds
2817565b7b2dSKonstantin Khorenko /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2818565b7b2dSKonstantin Khorenko if (sk->sk_state == TCP_CLOSE)
2819565b7b2dSKonstantin Khorenko goto adjudge_to_death;
2820565b7b2dSKonstantin Khorenko
282165bb723cSGerrit Renker /* As outlined in RFC 2525, section 2.17, we send a RST here because
282265bb723cSGerrit Renker * data was lost. To witness the awful effects of the old behavior of
282365bb723cSGerrit Renker * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
282465bb723cSGerrit Renker * GET in an FTP client, suspend the process, wait for the client to
282565bb723cSGerrit Renker * advertise a zero window, then kill -9 the FTP client, wheee...
282665bb723cSGerrit Renker * Note: timeout is always zero in such a case.
28271da177e4SLinus Torvalds */
2828ee995283SPavel Emelyanov if (unlikely(tcp_sk(sk)->repair)) {
2829ee995283SPavel Emelyanov sk->sk_prot->disconnect(sk, 0);
2830ee995283SPavel Emelyanov } else if (data_was_unread) {
28311da177e4SLinus Torvalds /* Unread data was tossed, zap the connection. */
28326aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
28331da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE);
2834aa133076SWu Fengguang tcp_send_active_reset(sk, sk->sk_allocation);
28351da177e4SLinus Torvalds } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
28361da177e4SLinus Torvalds /* Check zero linger _after_ checking for unread data. */
28371da177e4SLinus Torvalds sk->sk_prot->disconnect(sk, 0);
28386aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
28391da177e4SLinus Torvalds } else if (tcp_close_state(sk)) {
28401da177e4SLinus Torvalds /* We FIN if the application ate all the data before
28411da177e4SLinus Torvalds * zapping the connection.
28421da177e4SLinus Torvalds */
28431da177e4SLinus Torvalds
28441da177e4SLinus Torvalds /* RED-PEN. Formally speaking, we have broken TCP state
28451da177e4SLinus Torvalds * machine. State transitions:
28461da177e4SLinus Torvalds *
28471da177e4SLinus Torvalds * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2848f47d0d32SEric Dumazet * TCP_SYN_RECV -> TCP_FIN_WAIT1 (it is difficult)
28491da177e4SLinus Torvalds * TCP_CLOSE_WAIT -> TCP_LAST_ACK
28501da177e4SLinus Torvalds *
28511da177e4SLinus Torvalds * are legal only when FIN has been sent (i.e. in window),
28521da177e4SLinus Torvalds * rather than queued out of window. Purists blame.
28531da177e4SLinus Torvalds *
28541da177e4SLinus Torvalds * F.e. "RFC state" is ESTABLISHED,
28551da177e4SLinus Torvalds * if Linux state is FIN-WAIT-1, but FIN is still not sent.
28561da177e4SLinus Torvalds *
28571da177e4SLinus Torvalds * The visible declinations are that sometimes
28581da177e4SLinus Torvalds * we enter time-wait state, when it is not required really
28591da177e4SLinus Torvalds * (harmless), do not send active resets, when they are
28601da177e4SLinus Torvalds * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
28611da177e4SLinus Torvalds * they look as CLOSING or LAST_ACK for Linux)
28621da177e4SLinus Torvalds * Probably, I missed some more holelets.
28631da177e4SLinus Torvalds * --ANK
28648336886fSJerry Chu * XXX (TFO) - To start off we don't support SYN+ACK+FIN
28658336886fSJerry Chu * in a single packet! (May consider it later but will
28668336886fSJerry Chu * probably need API support or TCP_CORK SYN-ACK until
28678336886fSJerry Chu * data is written and socket is closed.)
28681da177e4SLinus Torvalds */
28691da177e4SLinus Torvalds tcp_send_fin(sk);
28701da177e4SLinus Torvalds }
28711da177e4SLinus Torvalds
28721da177e4SLinus Torvalds sk_stream_wait_close(sk, timeout);
28731da177e4SLinus Torvalds
28741da177e4SLinus Torvalds adjudge_to_death:
287575c2d907SHerbert Xu state = sk->sk_state;
287675c2d907SHerbert Xu sock_hold(sk);
287775c2d907SHerbert Xu sock_orphan(sk);
287875c2d907SHerbert Xu
28791da177e4SLinus Torvalds local_bh_disable();
28801da177e4SLinus Torvalds bh_lock_sock(sk);
28818873c064SEric Dumazet /* remove backlog if any, without releasing ownership. */
28828873c064SEric Dumazet __release_sock(sk);
28831da177e4SLinus Torvalds
288419757cebSEric Dumazet this_cpu_inc(tcp_orphan_count);
2885eb4dea58SHerbert Xu
288675c2d907SHerbert Xu /* Have we already been destroyed by a softirq or backlog? */
288775c2d907SHerbert Xu if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
288875c2d907SHerbert Xu goto out;
28891da177e4SLinus Torvalds
28901da177e4SLinus Torvalds /* This is a (useful) BSD violating of the RFC. There is a
28911da177e4SLinus Torvalds * problem with TCP as specified in that the other end could
28921da177e4SLinus Torvalds * keep a socket open forever with no application left this end.
2893b10bd54cSJesper Juhl * We use a 1 minute timeout (about the same as BSD) then kill
28941da177e4SLinus Torvalds * our end. If they send after that then tough - BUT: long enough
28951da177e4SLinus Torvalds * that we won't make the old 4*rto = almost no time - whoops
28961da177e4SLinus Torvalds * reset mistake.
28971da177e4SLinus Torvalds *
28981da177e4SLinus Torvalds * Nope, it was not mistake. It is really desired behaviour
28991da177e4SLinus Torvalds * f.e. on http servers, when such sockets are useless, but
29001da177e4SLinus Torvalds * consume significant resources. Let's do it with special
29011da177e4SLinus Torvalds * linger2 option. --ANK
29021da177e4SLinus Torvalds */
29031da177e4SLinus Torvalds
29041da177e4SLinus Torvalds if (sk->sk_state == TCP_FIN_WAIT2) {
29051da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
2906a81722ddSEric Dumazet if (READ_ONCE(tp->linger2) < 0) {
29071da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE);
29081da177e4SLinus Torvalds tcp_send_active_reset(sk, GFP_ATOMIC);
290902a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk),
2910de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONLINGER);
29111da177e4SLinus Torvalds } else {
2912463c84b9SArnaldo Carvalho de Melo const int tmo = tcp_fin_time(sk);
29131da177e4SLinus Torvalds
29141da177e4SLinus Torvalds if (tmo > TCP_TIMEWAIT_LEN) {
291552499afeSDavid S. Miller inet_csk_reset_keepalive_timer(sk,
291652499afeSDavid S. Miller tmo - TCP_TIMEWAIT_LEN);
29171da177e4SLinus Torvalds } else {
29181da177e4SLinus Torvalds tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
29191da177e4SLinus Torvalds goto out;
29201da177e4SLinus Torvalds }
29211da177e4SLinus Torvalds }
29221da177e4SLinus Torvalds }
29231da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) {
2924efcdbf24SArun Sharma if (tcp_check_oom(sk, 0)) {
29251da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE);
29261da177e4SLinus Torvalds tcp_send_active_reset(sk, GFP_ATOMIC);
292702a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk),
2928de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONMEMORY);
29294ee806d5SDan Streetman } else if (!check_net(sock_net(sk))) {
29304ee806d5SDan Streetman /* Not possible to send reset; just close */
29314ee806d5SDan Streetman tcp_set_state(sk, TCP_CLOSE);
29321da177e4SLinus Torvalds }
29331da177e4SLinus Torvalds }
29341da177e4SLinus Torvalds
29358336886fSJerry Chu if (sk->sk_state == TCP_CLOSE) {
2936d983ea6fSEric Dumazet struct request_sock *req;
2937d983ea6fSEric Dumazet
2938d983ea6fSEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
2939d983ea6fSEric Dumazet lockdep_sock_is_held(sk));
29408336886fSJerry Chu /* We could get here with a non-NULL req if the socket is
29418336886fSJerry Chu * aborted (e.g., closed with unread data) before 3WHS
29428336886fSJerry Chu * finishes.
29438336886fSJerry Chu */
294400db4124SIan Morris if (req)
29458336886fSJerry Chu reqsk_fastopen_remove(sk, req, false);
29460a5578cfSArnaldo Carvalho de Melo inet_csk_destroy_sock(sk);
29478336886fSJerry Chu }
29481da177e4SLinus Torvalds /* Otherwise, socket is reprieved until protocol close. */
29491da177e4SLinus Torvalds
29501da177e4SLinus Torvalds out:
29511da177e4SLinus Torvalds bh_unlock_sock(sk);
29521da177e4SLinus Torvalds local_bh_enable();
295377c3c956SPaolo Abeni }
295477c3c956SPaolo Abeni
tcp_close(struct sock * sk,long timeout)295577c3c956SPaolo Abeni void tcp_close(struct sock *sk, long timeout)
295677c3c956SPaolo Abeni {
295777c3c956SPaolo Abeni lock_sock(sk);
295877c3c956SPaolo Abeni __tcp_close(sk, timeout);
29598873c064SEric Dumazet release_sock(sk);
2960c1ae4d1eSEric Dumazet if (!sk->sk_net_refcnt)
2961c1ae4d1eSEric Dumazet inet_csk_clear_xmit_timers_sync(sk);
29621da177e4SLinus Torvalds sock_put(sk);
29631da177e4SLinus Torvalds }
29644bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_close);
29651da177e4SLinus Torvalds
29661da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */
29671da177e4SLinus Torvalds
tcp_need_reset(int state)2968a2a385d6SEric Dumazet static inline bool tcp_need_reset(int state)
29691da177e4SLinus Torvalds {
29701da177e4SLinus Torvalds return (1 << state) &
29711da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2972a7150e38SEric Dumazet TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
29731da177e4SLinus Torvalds }
29741da177e4SLinus Torvalds
tcp_rtx_queue_purge(struct sock * sk)297575c119afSEric Dumazet static void tcp_rtx_queue_purge(struct sock *sk)
297675c119afSEric Dumazet {
297775c119afSEric Dumazet struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
297875c119afSEric Dumazet
29792bec445fSEric Dumazet tcp_sk(sk)->highest_sack = NULL;
298075c119afSEric Dumazet while (p) {
298175c119afSEric Dumazet struct sk_buff *skb = rb_to_skb(p);
298275c119afSEric Dumazet
298375c119afSEric Dumazet p = rb_next(p);
298475c119afSEric Dumazet /* Since we are deleting whole queue, no need to
298575c119afSEric Dumazet * list_del(&skb->tcp_tsorted_anchor)
298675c119afSEric Dumazet */
298775c119afSEric Dumazet tcp_rtx_queue_unlink(skb, sk);
298803271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb);
298975c119afSEric Dumazet }
299075c119afSEric Dumazet }
299175c119afSEric Dumazet
tcp_write_queue_purge(struct sock * sk)2992ac3f09baSEric Dumazet void tcp_write_queue_purge(struct sock *sk)
2993ac3f09baSEric Dumazet {
2994ac3f09baSEric Dumazet struct sk_buff *skb;
2995ac3f09baSEric Dumazet
2996ac3f09baSEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
2997ac3f09baSEric Dumazet while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
2998ac3f09baSEric Dumazet tcp_skb_tsorted_anchor_cleanup(skb);
299903271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb);
3000ac3f09baSEric Dumazet }
300175c119afSEric Dumazet tcp_rtx_queue_purge(sk);
3002ac3f09baSEric Dumazet INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
3003ac3f09baSEric Dumazet tcp_clear_all_retrans_hints(tcp_sk(sk));
3004bffd168cSSoheil Hassas Yeganeh tcp_sk(sk)->packets_out = 0;
300504c03114SEric Dumazet inet_csk(sk)->icsk_backoff = 0;
3006ac3f09baSEric Dumazet }
3007ac3f09baSEric Dumazet
tcp_disconnect(struct sock * sk,int flags)30081da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags)
30091da177e4SLinus Torvalds {
30101da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk);
3011463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk);
30121da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
30131da177e4SLinus Torvalds int old_state = sk->sk_state;
30140f317464SEric Dumazet u32 seq;
30151da177e4SLinus Torvalds
30161da177e4SLinus Torvalds if (old_state != TCP_CLOSE)
30171da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE);
30181da177e4SLinus Torvalds
30191da177e4SLinus Torvalds /* ABORT function of RFC793 */
30201da177e4SLinus Torvalds if (old_state == TCP_LISTEN) {
30210a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk);
3022ee995283SPavel Emelyanov } else if (unlikely(tp->repair)) {
3023e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNABORTED);
30241da177e4SLinus Torvalds } else if (tcp_need_reset(old_state) ||
30251da177e4SLinus Torvalds (tp->snd_nxt != tp->write_seq &&
30261da177e4SLinus Torvalds (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
3027caa20d9aSStephen Hemminger /* The last check adjusts for discrepancy of Linux wrt. RFC
30281da177e4SLinus Torvalds * states
30291da177e4SLinus Torvalds */
30301da177e4SLinus Torvalds tcp_send_active_reset(sk, gfp_any());
3031e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET);
3032a7150e38SEric Dumazet } else if (old_state == TCP_SYN_SENT)
3033e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET);
30341da177e4SLinus Torvalds
30351da177e4SLinus Torvalds tcp_clear_xmit_timers(sk);
30361da177e4SLinus Torvalds __skb_queue_purge(&sk->sk_receive_queue);
30377db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
30387b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0);
3039fe067e8aSDavid S. Miller tcp_write_queue_purge(sk);
3040cf1ef3f0SWei Wang tcp_fastopen_active_disable_ofo_check(sk);
30419f5afeaeSYaogong Wang skb_rbtree_purge(&tp->out_of_order_queue);
30421da177e4SLinus Torvalds
3043c720c7e8SEric Dumazet inet->inet_dport = 0;
30441da177e4SLinus Torvalds
3045e0833d1fSKuniyuki Iwashima inet_bhash2_reset_saddr(sk);
30461da177e4SLinus Torvalds
3047e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, 0);
30481da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE);
3049740b0f18SEric Dumazet tp->srtt_us = 0;
3050b9e2e689SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
30513f6c65d6SWei Wang tp->rcv_rtt_last_tsecr = 0;
30520f317464SEric Dumazet
30530f317464SEric Dumazet seq = tp->write_seq + tp->max_window + 2;
30540f317464SEric Dumazet if (!seq)
30550f317464SEric Dumazet seq = 1;
30560f317464SEric Dumazet WRITE_ONCE(tp->write_seq, seq);
30570f317464SEric Dumazet
3058463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0;
30596687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0;
30609d9b1ee0SEnke Chen icsk->icsk_probes_tstamp = 0;
30616a408147SEric Dumazet icsk->icsk_rto = TCP_TIMEOUT_INIT;
3062ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN;
30632b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX;
30640b6a05c1SIlpo Järvinen tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
306540570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
30661da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0;
3067f4ce91ceSNeal Cardwell tp->is_cwnd_limited = 0;
3068f4ce91ceSNeal Cardwell tp->max_packets_out = 0;
30691fdf475aSEric Dumazet tp->window_clamp = 0;
30702fbdd562SEric Dumazet tp->delivered = 0;
3071e21db6f6SYuchung Cheng tp->delivered_ce = 0;
3072ce69e563SChristoph Paasch if (icsk->icsk_ca_ops->release)
3073ce69e563SChristoph Paasch icsk->icsk_ca_ops->release(sk);
3074ce69e563SChristoph Paasch memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
30758919a9b3SNeal Cardwell icsk->icsk_ca_initialized = 0;
30766687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open);
3077d4761754SYousuk Seung tp->is_sack_reneg = 0;
30781da177e4SLinus Torvalds tcp_clear_retrans(tp);
3079c13c48c0SEric Dumazet tp->total_retrans = 0;
3080463c84b9SArnaldo Carvalho de Melo inet_csk_delack_init(sk);
3081499350a5SWei Wang /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
3082499350a5SWei Wang * issue in __tcp_select_window()
3083499350a5SWei Wang */
3084499350a5SWei Wang icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
3085b40b4f79SSrinivas Aji memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
30861da177e4SLinus Torvalds __sk_dst_reset(sk);
308770530a2fSEric Dumazet dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL)));
308817c3060bSEric Dumazet tcp_saved_syn_free(tp);
30895d9f4262SEric Dumazet tp->compressed_ack = 0;
3090784f8344SEric Dumazet tp->segs_in = 0;
3091784f8344SEric Dumazet tp->segs_out = 0;
3092ba113c3aSWei Wang tp->bytes_sent = 0;
3093e858faf5SChristoph Paasch tp->bytes_acked = 0;
3094e858faf5SChristoph Paasch tp->bytes_received = 0;
3095fb31c9b9SWei Wang tp->bytes_retrans = 0;
3096db7ffee6SEric Dumazet tp->data_segs_in = 0;
3097db7ffee6SEric Dumazet tp->data_segs_out = 0;
30987788174eSYuchung Cheng tp->duplicate_sack[0].start_seq = 0;
30997788174eSYuchung Cheng tp->duplicate_sack[0].end_seq = 0;
31007e10b655SWei Wang tp->dsack_dups = 0;
31017ec65372SWei Wang tp->reord_seen = 0;
31025c701549SEric Dumazet tp->retrans_out = 0;
31035c701549SEric Dumazet tp->sacked_out = 0;
31045c701549SEric Dumazet tp->tlp_high_seq = 0;
31055c701549SEric Dumazet tp->last_oow_ack_time = 0;
310629c1c446SMubashir Adnan Qureshi tp->plb_rehash = 0;
31076cda8b74SEric Dumazet /* There's a bubble in the pipe until at least the first ACK. */
31086cda8b74SEric Dumazet tp->app_limited = ~0U;
3109300b655dSDavid Morley tp->rate_app_limited = 1;
3110792c4354SEric Dumazet tp->rack.mstamp = 0;
3111792c4354SEric Dumazet tp->rack.advanced = 0;
3112792c4354SEric Dumazet tp->rack.reo_wnd_steps = 1;
3113792c4354SEric Dumazet tp->rack.last_delivered = 0;
3114792c4354SEric Dumazet tp->rack.reo_wnd_persist = 0;
3115792c4354SEric Dumazet tp->rack.dsack_seen = 0;
31166bcdc40dSEric Dumazet tp->syn_data_acked = 0;
31176bcdc40dSEric Dumazet tp->rx_opt.saw_tstamp = 0;
31186bcdc40dSEric Dumazet tp->rx_opt.dsack = 0;
31196bcdc40dSEric Dumazet tp->rx_opt.num_sacks = 0;
3120f9af2dbbSThomas Higdon tp->rcv_ooopack = 0;
31216cda8b74SEric Dumazet
31221da177e4SLinus Torvalds
31237db92362SWei Wang /* Clean up fastopen related fields */
31247db92362SWei Wang tcp_free_fastopen_req(tp);
312508e39c0dSEric Dumazet inet_clear_bit(DEFER_CONNECT, sk);
312648027478SJason Baron tp->fastopen_client_fail = 0;
31277db92362SWei Wang
3128c720c7e8SEric Dumazet WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
31291da177e4SLinus Torvalds
31309b42d55aSLi RongQing if (sk->sk_frag.page) {
31319b42d55aSLi RongQing put_page(sk->sk_frag.page);
31329b42d55aSLi RongQing sk->sk_frag.page = NULL;
31339b42d55aSLi RongQing sk->sk_frag.offset = 0;
31349b42d55aSLi RongQing }
3135e3ae2365SAlexander Aring sk_error_report(sk);
3136a01512b1SYueHaibing return 0;
31371da177e4SLinus Torvalds }
31384bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_disconnect);
31391da177e4SLinus Torvalds
tcp_can_repair_sock(const struct sock * sk)3140a2a385d6SEric Dumazet static inline bool tcp_can_repair_sock(const struct sock *sk)
3141ee995283SPavel Emelyanov {
3142cb388e7eSMartin KaFai Lau return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
3143319b0534SAndrey Vagin (sk->sk_state != TCP_LISTEN);
3144ee995283SPavel Emelyanov }
3145ee995283SPavel Emelyanov
tcp_repair_set_window(struct tcp_sock * tp,sockptr_t optbuf,int len)3146d38d2b00SChristoph Hellwig static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len)
3147b1ed4c4fSAndrey Vagin {
3148b1ed4c4fSAndrey Vagin struct tcp_repair_window opt;
3149b1ed4c4fSAndrey Vagin
3150b1ed4c4fSAndrey Vagin if (!tp->repair)
3151b1ed4c4fSAndrey Vagin return -EPERM;
3152b1ed4c4fSAndrey Vagin
3153b1ed4c4fSAndrey Vagin if (len != sizeof(opt))
3154b1ed4c4fSAndrey Vagin return -EINVAL;
3155b1ed4c4fSAndrey Vagin
3156d38d2b00SChristoph Hellwig if (copy_from_sockptr(&opt, optbuf, sizeof(opt)))
3157b1ed4c4fSAndrey Vagin return -EFAULT;
3158b1ed4c4fSAndrey Vagin
3159b1ed4c4fSAndrey Vagin if (opt.max_window < opt.snd_wnd)
3160b1ed4c4fSAndrey Vagin return -EINVAL;
3161b1ed4c4fSAndrey Vagin
3162b1ed4c4fSAndrey Vagin if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
3163b1ed4c4fSAndrey Vagin return -EINVAL;
3164b1ed4c4fSAndrey Vagin
3165b1ed4c4fSAndrey Vagin if (after(opt.rcv_wup, tp->rcv_nxt))
3166b1ed4c4fSAndrey Vagin return -EINVAL;
3167b1ed4c4fSAndrey Vagin
3168b1ed4c4fSAndrey Vagin tp->snd_wl1 = opt.snd_wl1;
3169b1ed4c4fSAndrey Vagin tp->snd_wnd = opt.snd_wnd;
3170b1ed4c4fSAndrey Vagin tp->max_window = opt.max_window;
3171b1ed4c4fSAndrey Vagin
3172b1ed4c4fSAndrey Vagin tp->rcv_wnd = opt.rcv_wnd;
3173b1ed4c4fSAndrey Vagin tp->rcv_wup = opt.rcv_wup;
3174b1ed4c4fSAndrey Vagin
3175b1ed4c4fSAndrey Vagin return 0;
3176b1ed4c4fSAndrey Vagin }
3177b1ed4c4fSAndrey Vagin
tcp_repair_options_est(struct sock * sk,sockptr_t optbuf,unsigned int len)3178d38d2b00SChristoph Hellwig static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf,
3179d38d2b00SChristoph Hellwig unsigned int len)
3180b139ba4eSPavel Emelyanov {
318115e56515SDouglas Caetano dos Santos struct tcp_sock *tp = tcp_sk(sk);
3182de248a75SPavel Emelyanov struct tcp_repair_opt opt;
3183d3c48151SChristoph Hellwig size_t offset = 0;
3184b139ba4eSPavel Emelyanov
3185de248a75SPavel Emelyanov while (len >= sizeof(opt)) {
3186d3c48151SChristoph Hellwig if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt)))
3187b139ba4eSPavel Emelyanov return -EFAULT;
3188b139ba4eSPavel Emelyanov
3189d3c48151SChristoph Hellwig offset += sizeof(opt);
3190de248a75SPavel Emelyanov len -= sizeof(opt);
3191b139ba4eSPavel Emelyanov
3192de248a75SPavel Emelyanov switch (opt.opt_code) {
3193de248a75SPavel Emelyanov case TCPOPT_MSS:
3194de248a75SPavel Emelyanov tp->rx_opt.mss_clamp = opt.opt_val;
319515e56515SDouglas Caetano dos Santos tcp_mtup_init(sk);
3196b139ba4eSPavel Emelyanov break;
3197de248a75SPavel Emelyanov case TCPOPT_WINDOW:
3198bc26ccd8SAndrey Vagin {
3199bc26ccd8SAndrey Vagin u16 snd_wscale = opt.opt_val & 0xFFFF;
3200bc26ccd8SAndrey Vagin u16 rcv_wscale = opt.opt_val >> 16;
3201bc26ccd8SAndrey Vagin
3202589c49cbSGao Feng if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE)
3203b139ba4eSPavel Emelyanov return -EFBIG;
3204b139ba4eSPavel Emelyanov
3205bc26ccd8SAndrey Vagin tp->rx_opt.snd_wscale = snd_wscale;
3206bc26ccd8SAndrey Vagin tp->rx_opt.rcv_wscale = rcv_wscale;
3207bc26ccd8SAndrey Vagin tp->rx_opt.wscale_ok = 1;
3208bc26ccd8SAndrey Vagin }
3209b139ba4eSPavel Emelyanov break;
3210b139ba4eSPavel Emelyanov case TCPOPT_SACK_PERM:
3211de248a75SPavel Emelyanov if (opt.opt_val != 0)
3212de248a75SPavel Emelyanov return -EINVAL;
3213de248a75SPavel Emelyanov
3214b139ba4eSPavel Emelyanov tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
3215b139ba4eSPavel Emelyanov break;
3216b139ba4eSPavel Emelyanov case TCPOPT_TIMESTAMP:
3217de248a75SPavel Emelyanov if (opt.opt_val != 0)
3218de248a75SPavel Emelyanov return -EINVAL;
3219de248a75SPavel Emelyanov
3220b139ba4eSPavel Emelyanov tp->rx_opt.tstamp_ok = 1;
3221b139ba4eSPavel Emelyanov break;
3222b139ba4eSPavel Emelyanov }
3223b139ba4eSPavel Emelyanov }
3224b139ba4eSPavel Emelyanov
3225b139ba4eSPavel Emelyanov return 0;
3226b139ba4eSPavel Emelyanov }
3227b139ba4eSPavel Emelyanov
3228a842fe14SEric Dumazet DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
3229a842fe14SEric Dumazet EXPORT_SYMBOL(tcp_tx_delay_enabled);
3230a842fe14SEric Dumazet
tcp_enable_tx_delay(void)3231a842fe14SEric Dumazet static void tcp_enable_tx_delay(void)
3232a842fe14SEric Dumazet {
3233a842fe14SEric Dumazet if (!static_branch_unlikely(&tcp_tx_delay_enabled)) {
3234a842fe14SEric Dumazet static int __tcp_tx_delay_enabled = 0;
3235a842fe14SEric Dumazet
3236a842fe14SEric Dumazet if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) {
3237a842fe14SEric Dumazet static_branch_enable(&tcp_tx_delay_enabled);
3238a842fe14SEric Dumazet pr_info("TCP_TX_DELAY enabled\n");
3239a842fe14SEric Dumazet }
3240a842fe14SEric Dumazet }
3241a842fe14SEric Dumazet }
3242a842fe14SEric Dumazet
3243db10538aSChristoph Hellwig /* When set indicates to always queue non-full frames. Later the user clears
3244db10538aSChristoph Hellwig * this option and we transmit any pending partial frames in the queue. This is
3245db10538aSChristoph Hellwig * meant to be used alongside sendfile() to get properly filled frames when the
3246db10538aSChristoph Hellwig * user (for example) must write out headers with a write() call first and then
3247db10538aSChristoph Hellwig * use sendfile to send out the data parts.
3248db10538aSChristoph Hellwig *
3249db10538aSChristoph Hellwig * TCP_CORK can be set together with TCP_NODELAY and it is stronger than
3250db10538aSChristoph Hellwig * TCP_NODELAY.
3251db10538aSChristoph Hellwig */
__tcp_sock_set_cork(struct sock * sk,bool on)32526fadaa56SMaxim Galaganov void __tcp_sock_set_cork(struct sock *sk, bool on)
3253db10538aSChristoph Hellwig {
3254db10538aSChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk);
3255db10538aSChristoph Hellwig
3256db10538aSChristoph Hellwig if (on) {
3257db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_CORK;
3258db10538aSChristoph Hellwig } else {
3259db10538aSChristoph Hellwig tp->nonagle &= ~TCP_NAGLE_CORK;
3260db10538aSChristoph Hellwig if (tp->nonagle & TCP_NAGLE_OFF)
3261db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_PUSH;
3262db10538aSChristoph Hellwig tcp_push_pending_frames(sk);
3263db10538aSChristoph Hellwig }
3264db10538aSChristoph Hellwig }
3265db10538aSChristoph Hellwig
tcp_sock_set_cork(struct sock * sk,bool on)3266db10538aSChristoph Hellwig void tcp_sock_set_cork(struct sock *sk, bool on)
3267db10538aSChristoph Hellwig {
3268db10538aSChristoph Hellwig lock_sock(sk);
3269db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, on);
3270db10538aSChristoph Hellwig release_sock(sk);
3271db10538aSChristoph Hellwig }
3272db10538aSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_cork);
3273db10538aSChristoph Hellwig
327412abc5eeSChristoph Hellwig /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is
327512abc5eeSChristoph Hellwig * remembered, but it is not activated until cork is cleared.
327612abc5eeSChristoph Hellwig *
327712abc5eeSChristoph Hellwig * However, when TCP_NODELAY is set we make an explicit push, which overrides
327812abc5eeSChristoph Hellwig * even TCP_CORK for currently queued segments.
327912abc5eeSChristoph Hellwig */
__tcp_sock_set_nodelay(struct sock * sk,bool on)32806fadaa56SMaxim Galaganov void __tcp_sock_set_nodelay(struct sock *sk, bool on)
328112abc5eeSChristoph Hellwig {
328212abc5eeSChristoph Hellwig if (on) {
328312abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
328412abc5eeSChristoph Hellwig tcp_push_pending_frames(sk);
328512abc5eeSChristoph Hellwig } else {
328612abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF;
328712abc5eeSChristoph Hellwig }
328812abc5eeSChristoph Hellwig }
328912abc5eeSChristoph Hellwig
tcp_sock_set_nodelay(struct sock * sk)329012abc5eeSChristoph Hellwig void tcp_sock_set_nodelay(struct sock *sk)
329112abc5eeSChristoph Hellwig {
329212abc5eeSChristoph Hellwig lock_sock(sk);
329312abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, true);
329412abc5eeSChristoph Hellwig release_sock(sk);
329512abc5eeSChristoph Hellwig }
329612abc5eeSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_nodelay);
329712abc5eeSChristoph Hellwig
__tcp_sock_set_quickack(struct sock * sk,int val)3298ddd061b8SChristoph Hellwig static void __tcp_sock_set_quickack(struct sock *sk, int val)
3299ddd061b8SChristoph Hellwig {
3300ddd061b8SChristoph Hellwig if (!val) {
3301ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk);
3302ddd061b8SChristoph Hellwig return;
3303ddd061b8SChristoph Hellwig }
3304ddd061b8SChristoph Hellwig
3305ddd061b8SChristoph Hellwig inet_csk_exit_pingpong_mode(sk);
3306ddd061b8SChristoph Hellwig if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
3307ddd061b8SChristoph Hellwig inet_csk_ack_scheduled(sk)) {
3308ddd061b8SChristoph Hellwig inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED;
3309ddd061b8SChristoph Hellwig tcp_cleanup_rbuf(sk, 1);
3310ddd061b8SChristoph Hellwig if (!(val & 1))
3311ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk);
3312ddd061b8SChristoph Hellwig }
3313ddd061b8SChristoph Hellwig }
3314ddd061b8SChristoph Hellwig
tcp_sock_set_quickack(struct sock * sk,int val)3315ddd061b8SChristoph Hellwig void tcp_sock_set_quickack(struct sock *sk, int val)
3316ddd061b8SChristoph Hellwig {
3317ddd061b8SChristoph Hellwig lock_sock(sk);
3318ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val);
3319ddd061b8SChristoph Hellwig release_sock(sk);
3320ddd061b8SChristoph Hellwig }
3321ddd061b8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_quickack);
3322ddd061b8SChristoph Hellwig
tcp_sock_set_syncnt(struct sock * sk,int val)3323557eadfcSChristoph Hellwig int tcp_sock_set_syncnt(struct sock *sk, int val)
3324557eadfcSChristoph Hellwig {
3325557eadfcSChristoph Hellwig if (val < 1 || val > MAX_TCP_SYNCNT)
3326557eadfcSChristoph Hellwig return -EINVAL;
3327557eadfcSChristoph Hellwig
33283a037f0fSEric Dumazet WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val);
3329557eadfcSChristoph Hellwig return 0;
3330557eadfcSChristoph Hellwig }
3331557eadfcSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_syncnt);
3332557eadfcSChristoph Hellwig
tcp_sock_set_user_timeout(struct sock * sk,int val)3333d58f2e15SEric Dumazet int tcp_sock_set_user_timeout(struct sock *sk, int val)
3334c488aeadSChristoph Hellwig {
3335d58f2e15SEric Dumazet /* Cap the max time in ms TCP will retry or probe the window
3336d58f2e15SEric Dumazet * before giving up and aborting (ETIMEDOUT) a connection.
3337d58f2e15SEric Dumazet */
3338d58f2e15SEric Dumazet if (val < 0)
3339d58f2e15SEric Dumazet return -EINVAL;
3340d58f2e15SEric Dumazet
334126023e91SEric Dumazet WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val);
3342d58f2e15SEric Dumazet return 0;
3343c488aeadSChristoph Hellwig }
3344c488aeadSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_user_timeout);
3345c488aeadSChristoph Hellwig
tcp_sock_set_keepidle_locked(struct sock * sk,int val)3346aad4a0a9SDmitry Yakunin int tcp_sock_set_keepidle_locked(struct sock *sk, int val)
334771c48eb8SChristoph Hellwig {
334871c48eb8SChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk);
334971c48eb8SChristoph Hellwig
335071c48eb8SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPIDLE)
335171c48eb8SChristoph Hellwig return -EINVAL;
335271c48eb8SChristoph Hellwig
33534164245cSEric Dumazet /* Paired with WRITE_ONCE() in keepalive_time_when() */
33544164245cSEric Dumazet WRITE_ONCE(tp->keepalive_time, val * HZ);
335571c48eb8SChristoph Hellwig if (sock_flag(sk, SOCK_KEEPOPEN) &&
335671c48eb8SChristoph Hellwig !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
335771c48eb8SChristoph Hellwig u32 elapsed = keepalive_time_elapsed(tp);
335871c48eb8SChristoph Hellwig
335971c48eb8SChristoph Hellwig if (tp->keepalive_time > elapsed)
336071c48eb8SChristoph Hellwig elapsed = tp->keepalive_time - elapsed;
336171c48eb8SChristoph Hellwig else
336271c48eb8SChristoph Hellwig elapsed = 0;
336371c48eb8SChristoph Hellwig inet_csk_reset_keepalive_timer(sk, elapsed);
336471c48eb8SChristoph Hellwig }
336571c48eb8SChristoph Hellwig
336671c48eb8SChristoph Hellwig return 0;
336771c48eb8SChristoph Hellwig }
336871c48eb8SChristoph Hellwig
tcp_sock_set_keepidle(struct sock * sk,int val)336971c48eb8SChristoph Hellwig int tcp_sock_set_keepidle(struct sock *sk, int val)
337071c48eb8SChristoph Hellwig {
337171c48eb8SChristoph Hellwig int err;
337271c48eb8SChristoph Hellwig
337371c48eb8SChristoph Hellwig lock_sock(sk);
3374aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val);
337571c48eb8SChristoph Hellwig release_sock(sk);
337671c48eb8SChristoph Hellwig return err;
337771c48eb8SChristoph Hellwig }
337871c48eb8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepidle);
337971c48eb8SChristoph Hellwig
tcp_sock_set_keepintvl(struct sock * sk,int val)3380d41ecaacSChristoph Hellwig int tcp_sock_set_keepintvl(struct sock *sk, int val)
3381d41ecaacSChristoph Hellwig {
3382d41ecaacSChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPINTVL)
3383d41ecaacSChristoph Hellwig return -EINVAL;
3384d41ecaacSChristoph Hellwig
33855ecf9d4fSEric Dumazet WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ);
3386d41ecaacSChristoph Hellwig return 0;
3387d41ecaacSChristoph Hellwig }
3388d41ecaacSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepintvl);
3389d41ecaacSChristoph Hellwig
tcp_sock_set_keepcnt(struct sock * sk,int val)3390480aeb96SChristoph Hellwig int tcp_sock_set_keepcnt(struct sock *sk, int val)
3391480aeb96SChristoph Hellwig {
3392480aeb96SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPCNT)
3393480aeb96SChristoph Hellwig return -EINVAL;
3394480aeb96SChristoph Hellwig
33956e5e1de6SEric Dumazet /* Paired with READ_ONCE() in keepalive_probes() */
33966e5e1de6SEric Dumazet WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val);
3397480aeb96SChristoph Hellwig return 0;
3398480aeb96SChristoph Hellwig }
3399480aeb96SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepcnt);
3400480aeb96SChristoph Hellwig
tcp_set_window_clamp(struct sock * sk,int val)3401cb811109SPrankur gupta int tcp_set_window_clamp(struct sock *sk, int val)
3402cb811109SPrankur gupta {
3403cb811109SPrankur gupta struct tcp_sock *tp = tcp_sk(sk);
3404cb811109SPrankur gupta
3405cb811109SPrankur gupta if (!val) {
3406cb811109SPrankur gupta if (sk->sk_state != TCP_CLOSE)
3407cb811109SPrankur gupta return -EINVAL;
3408f9fef23aSEric Dumazet WRITE_ONCE(tp->window_clamp, 0);
3409cb811109SPrankur gupta } else {
3410e4a2a432SPaolo Abeni u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
3411e4a2a432SPaolo Abeni u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
3412cb811109SPrankur gupta SOCK_MIN_RCVBUF / 2 : val;
3413e4a2a432SPaolo Abeni
3414e4a2a432SPaolo Abeni if (new_window_clamp == old_window_clamp)
3415e4a2a432SPaolo Abeni return 0;
3416e4a2a432SPaolo Abeni
3417f9fef23aSEric Dumazet WRITE_ONCE(tp->window_clamp, new_window_clamp);
3418e4a2a432SPaolo Abeni if (new_window_clamp < old_window_clamp) {
3419e4a2a432SPaolo Abeni /* need to apply the reserved mem provisioning only
3420e4a2a432SPaolo Abeni * when shrinking the window clamp
3421e4a2a432SPaolo Abeni */
3422e4a2a432SPaolo Abeni __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp);
3423e4a2a432SPaolo Abeni
3424e4a2a432SPaolo Abeni } else {
3425e4a2a432SPaolo Abeni new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
3426e4a2a432SPaolo Abeni tp->rcv_ssthresh = max(new_rcv_ssthresh,
3427e4a2a432SPaolo Abeni tp->rcv_ssthresh);
3428e4a2a432SPaolo Abeni }
3429cb811109SPrankur gupta }
3430cb811109SPrankur gupta return 0;
3431cb811109SPrankur gupta }
3432cb811109SPrankur gupta
34331da177e4SLinus Torvalds /*
34341da177e4SLinus Torvalds * Socket option code for TCP.
34351da177e4SLinus Torvalds */
do_tcp_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)34360c751f70SMartin KaFai Lau int do_tcp_setsockopt(struct sock *sk, int level, int optname,
3437d38d2b00SChristoph Hellwig sockptr_t optval, unsigned int optlen)
34381da177e4SLinus Torvalds {
34391da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
3440463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk);
34411e579caaSNikolay Borisov struct net *net = sock_net(sk);
34421da177e4SLinus Torvalds int val;
34431da177e4SLinus Torvalds int err = 0;
34441da177e4SLinus Torvalds
3445e56fb50fSWilliam Allen Simpson /* These are data/string values, all the others are ints */
3446e56fb50fSWilliam Allen Simpson switch (optname) {
3447e56fb50fSWilliam Allen Simpson case TCP_CONGESTION: {
34485f8ef48dSStephen Hemminger char name[TCP_CA_NAME_MAX];
34495f8ef48dSStephen Hemminger
34505f8ef48dSStephen Hemminger if (optlen < 1)
34515f8ef48dSStephen Hemminger return -EINVAL;
34525f8ef48dSStephen Hemminger
3453d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval,
34544fdb78d3SAndrew Morton min_t(long, TCP_CA_NAME_MAX-1, optlen));
34555f8ef48dSStephen Hemminger if (val < 0)
34565f8ef48dSStephen Hemminger return -EFAULT;
34575f8ef48dSStephen Hemminger name[val] = 0;
34585f8ef48dSStephen Hemminger
3459cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk);
346084e5a0f2SMartin KaFai Lau err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(),
3461cb388e7eSMartin KaFai Lau sockopt_ns_capable(sock_net(sk)->user_ns,
34628d650cdeSEric Dumazet CAP_NET_ADMIN));
3463cb388e7eSMartin KaFai Lau sockopt_release_sock(sk);
34645f8ef48dSStephen Hemminger return err;
34655f8ef48dSStephen Hemminger }
3466734942ccSDave Watson case TCP_ULP: {
3467734942ccSDave Watson char name[TCP_ULP_NAME_MAX];
3468734942ccSDave Watson
3469734942ccSDave Watson if (optlen < 1)
3470734942ccSDave Watson return -EINVAL;
3471734942ccSDave Watson
3472d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval,
3473734942ccSDave Watson min_t(long, TCP_ULP_NAME_MAX - 1,
3474734942ccSDave Watson optlen));
3475734942ccSDave Watson if (val < 0)
3476734942ccSDave Watson return -EFAULT;
3477734942ccSDave Watson name[val] = 0;
3478734942ccSDave Watson
3479cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk);
3480734942ccSDave Watson err = tcp_set_ulp(sk, name);
3481cb388e7eSMartin KaFai Lau sockopt_release_sock(sk);
3482734942ccSDave Watson return err;
3483734942ccSDave Watson }
34841fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: {
34850f1ce023SJason Baron __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
34860f1ce023SJason Baron __u8 *backup_key = NULL;
34871fba70e5SYuchung Cheng
34880f1ce023SJason Baron /* Allow a backup key as well to facilitate key rotation
34890f1ce023SJason Baron * First key is the active one.
34900f1ce023SJason Baron */
34910f1ce023SJason Baron if (optlen != TCP_FASTOPEN_KEY_LENGTH &&
34920f1ce023SJason Baron optlen != TCP_FASTOPEN_KEY_BUF_LENGTH)
34931fba70e5SYuchung Cheng return -EINVAL;
34941fba70e5SYuchung Cheng
3495d38d2b00SChristoph Hellwig if (copy_from_sockptr(key, optval, optlen))
34961fba70e5SYuchung Cheng return -EFAULT;
34971fba70e5SYuchung Cheng
34980f1ce023SJason Baron if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH)
34990f1ce023SJason Baron backup_key = key + TCP_FASTOPEN_KEY_LENGTH;
35000f1ce023SJason Baron
3501438ac880SArd Biesheuvel return tcp_fastopen_reset_cipher(net, sk, key, backup_key);
35021fba70e5SYuchung Cheng }
3503e56fb50fSWilliam Allen Simpson default:
3504e56fb50fSWilliam Allen Simpson /* fallthru */
3505e56fb50fSWilliam Allen Simpson break;
3506ccbd6a5aSJoe Perches }
35075f8ef48dSStephen Hemminger
35081da177e4SLinus Torvalds if (optlen < sizeof(int))
35091da177e4SLinus Torvalds return -EINVAL;
35101da177e4SLinus Torvalds
3511d38d2b00SChristoph Hellwig if (copy_from_sockptr(&val, optval, sizeof(val)))
35121da177e4SLinus Torvalds return -EFAULT;
35131da177e4SLinus Torvalds
3514d44fd4a7SEric Dumazet /* Handle options that can be set without locking the socket. */
3515d44fd4a7SEric Dumazet switch (optname) {
3516d44fd4a7SEric Dumazet case TCP_SYNCNT:
3517d44fd4a7SEric Dumazet return tcp_sock_set_syncnt(sk, val);
3518d58f2e15SEric Dumazet case TCP_USER_TIMEOUT:
3519d58f2e15SEric Dumazet return tcp_sock_set_user_timeout(sk, val);
35206fd70a6bSEric Dumazet case TCP_KEEPINTVL:
35216fd70a6bSEric Dumazet return tcp_sock_set_keepintvl(sk, val);
352284485080SEric Dumazet case TCP_KEEPCNT:
352384485080SEric Dumazet return tcp_sock_set_keepcnt(sk, val);
3524a81722ddSEric Dumazet case TCP_LINGER2:
3525a81722ddSEric Dumazet if (val < 0)
3526a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, -1);
3527a81722ddSEric Dumazet else if (val > TCP_FIN_TIMEOUT_MAX / HZ)
3528a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX);
3529a81722ddSEric Dumazet else
3530a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, val * HZ);
3531a81722ddSEric Dumazet return 0;
35326e97ba55SEric Dumazet case TCP_DEFER_ACCEPT:
35336e97ba55SEric Dumazet /* Translate value in seconds to number of retransmits */
35346e97ba55SEric Dumazet WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept,
35356e97ba55SEric Dumazet secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
35366e97ba55SEric Dumazet TCP_RTO_MAX / HZ));
35376e97ba55SEric Dumazet return 0;
3538d44fd4a7SEric Dumazet }
3539d44fd4a7SEric Dumazet
3540cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk);
35411da177e4SLinus Torvalds
35421da177e4SLinus Torvalds switch (optname) {
35431da177e4SLinus Torvalds case TCP_MAXSEG:
35441da177e4SLinus Torvalds /* Values greater than interface MTU won't take effect. However
35451da177e4SLinus Torvalds * at the point when this call is done we typically don't yet
3546a777f715SRohit Chavan * know which interface is going to be used
3547a777f715SRohit Chavan */
3548cfc62d87SGao Feng if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
35491da177e4SLinus Torvalds err = -EINVAL;
35501da177e4SLinus Torvalds break;
35511da177e4SLinus Torvalds }
35521da177e4SLinus Torvalds tp->rx_opt.user_mss = val;
35531da177e4SLinus Torvalds break;
35541da177e4SLinus Torvalds
35551da177e4SLinus Torvalds case TCP_NODELAY:
355612abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, val);
35571da177e4SLinus Torvalds break;
35581da177e4SLinus Torvalds
355936e31b0aSAndreas Petlund case TCP_THIN_LINEAR_TIMEOUTS:
356036e31b0aSAndreas Petlund if (val < 0 || val > 1)
356136e31b0aSAndreas Petlund err = -EINVAL;
356236e31b0aSAndreas Petlund else
356336e31b0aSAndreas Petlund tp->thin_lto = val;
356436e31b0aSAndreas Petlund break;
356536e31b0aSAndreas Petlund
35667e380175SAndreas Petlund case TCP_THIN_DUPACK:
35677e380175SAndreas Petlund if (val < 0 || val > 1)
35687e380175SAndreas Petlund err = -EINVAL;
35697e380175SAndreas Petlund break;
35707e380175SAndreas Petlund
3571ee995283SPavel Emelyanov case TCP_REPAIR:
3572ee995283SPavel Emelyanov if (!tcp_can_repair_sock(sk))
3573ee995283SPavel Emelyanov err = -EPERM;
357431048d7aSStefan Baranoff else if (val == TCP_REPAIR_ON) {
3575ee995283SPavel Emelyanov tp->repair = 1;
3576ee995283SPavel Emelyanov sk->sk_reuse = SK_FORCE_REUSE;
3577ee995283SPavel Emelyanov tp->repair_queue = TCP_NO_QUEUE;
357831048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF) {
3579ee995283SPavel Emelyanov tp->repair = 0;
3580ee995283SPavel Emelyanov sk->sk_reuse = SK_NO_REUSE;
3581ee995283SPavel Emelyanov tcp_send_window_probe(sk);
358231048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF_NO_WP) {
358331048d7aSStefan Baranoff tp->repair = 0;
358431048d7aSStefan Baranoff sk->sk_reuse = SK_NO_REUSE;
3585ee995283SPavel Emelyanov } else
3586ee995283SPavel Emelyanov err = -EINVAL;
3587ee995283SPavel Emelyanov
3588ee995283SPavel Emelyanov break;
3589ee995283SPavel Emelyanov
3590ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE:
3591ee995283SPavel Emelyanov if (!tp->repair)
3592ee995283SPavel Emelyanov err = -EPERM;
3593bf2acc94SEric Dumazet else if ((unsigned int)val < TCP_QUEUES_NR)
3594ee995283SPavel Emelyanov tp->repair_queue = val;
3595ee995283SPavel Emelyanov else
3596ee995283SPavel Emelyanov err = -EINVAL;
3597ee995283SPavel Emelyanov break;
3598ee995283SPavel Emelyanov
3599ee995283SPavel Emelyanov case TCP_QUEUE_SEQ:
36008811f4a9SEric Dumazet if (sk->sk_state != TCP_CLOSE) {
3601ee995283SPavel Emelyanov err = -EPERM;
36028811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_SEND_QUEUE) {
36038811f4a9SEric Dumazet if (!tcp_rtx_queue_empty(sk))
36048811f4a9SEric Dumazet err = -EPERM;
36058811f4a9SEric Dumazet else
36060f317464SEric Dumazet WRITE_ONCE(tp->write_seq, val);
36078811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_RECV_QUEUE) {
36088811f4a9SEric Dumazet if (tp->rcv_nxt != tp->copied_seq) {
36098811f4a9SEric Dumazet err = -EPERM;
36108811f4a9SEric Dumazet } else {
3611dba7d9b8SEric Dumazet WRITE_ONCE(tp->rcv_nxt, val);
36126cd6cbf5SEric Dumazet WRITE_ONCE(tp->copied_seq, val);
36136cd6cbf5SEric Dumazet }
36148811f4a9SEric Dumazet } else {
3615ee995283SPavel Emelyanov err = -EINVAL;
36168811f4a9SEric Dumazet }
3617ee995283SPavel Emelyanov break;
3618ee995283SPavel Emelyanov
3619b139ba4eSPavel Emelyanov case TCP_REPAIR_OPTIONS:
3620b139ba4eSPavel Emelyanov if (!tp->repair)
3621b139ba4eSPavel Emelyanov err = -EINVAL;
36220c175da7SLu Wei else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent)
3623d38d2b00SChristoph Hellwig err = tcp_repair_options_est(sk, optval, optlen);
3624b139ba4eSPavel Emelyanov else
3625b139ba4eSPavel Emelyanov err = -EPERM;
3626b139ba4eSPavel Emelyanov break;
3627b139ba4eSPavel Emelyanov
36281da177e4SLinus Torvalds case TCP_CORK:
3629db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, val);
36301da177e4SLinus Torvalds break;
36311da177e4SLinus Torvalds
36321da177e4SLinus Torvalds case TCP_KEEPIDLE:
3633aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val);
36341da177e4SLinus Torvalds break;
3635cd8ae852SEric Dumazet case TCP_SAVE_SYN:
3636267cf9faSMartin KaFai Lau /* 0: disable, 1: enable, 2: start from ether_header */
3637267cf9faSMartin KaFai Lau if (val < 0 || val > 2)
3638cd8ae852SEric Dumazet err = -EINVAL;
3639cd8ae852SEric Dumazet else
3640cd8ae852SEric Dumazet tp->save_syn = val;
3641cd8ae852SEric Dumazet break;
3642cd8ae852SEric Dumazet
36431da177e4SLinus Torvalds case TCP_WINDOW_CLAMP:
3644cb811109SPrankur gupta err = tcp_set_window_clamp(sk, val);
36451da177e4SLinus Torvalds break;
36461da177e4SLinus Torvalds
36471da177e4SLinus Torvalds case TCP_QUICKACK:
3648ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val);
36491da177e4SLinus Torvalds break;
36501da177e4SLinus Torvalds
3651cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3652cfb6eeb4SYOSHIFUJI Hideaki case TCP_MD5SIG:
36538917a777SIvan Delalande case TCP_MD5SIG_EXT:
3654d38d2b00SChristoph Hellwig err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
3655cfb6eeb4SYOSHIFUJI Hideaki break;
3656cfb6eeb4SYOSHIFUJI Hideaki #endif
36578336886fSJerry Chu case TCP_FASTOPEN:
36588336886fSJerry Chu if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
3659dfea2aa6SChristoph Paasch TCPF_LISTEN))) {
366043713848SHaishuang Yan tcp_fastopen_init_key_once(net);
3661dfea2aa6SChristoph Paasch
36620536fcc0SEric Dumazet fastopen_queue_tune(sk, val);
3663dfea2aa6SChristoph Paasch } else {
36648336886fSJerry Chu err = -EINVAL;
3665dfea2aa6SChristoph Paasch }
36668336886fSJerry Chu break;
366719f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT:
366819f6d3f3SWei Wang if (val > 1 || val < 0) {
366919f6d3f3SWei Wang err = -EINVAL;
36705a542133SKuniyuki Iwashima } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) &
36715a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) {
367219f6d3f3SWei Wang if (sk->sk_state == TCP_CLOSE)
367319f6d3f3SWei Wang tp->fastopen_connect = val;
367419f6d3f3SWei Wang else
367519f6d3f3SWei Wang err = -EINVAL;
367619f6d3f3SWei Wang } else {
367719f6d3f3SWei Wang err = -EOPNOTSUPP;
367819f6d3f3SWei Wang }
367919f6d3f3SWei Wang break;
368071c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE:
368171c02379SChristoph Paasch if (val > 1 || val < 0)
368271c02379SChristoph Paasch err = -EINVAL;
368371c02379SChristoph Paasch else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
368471c02379SChristoph Paasch err = -EINVAL;
368571c02379SChristoph Paasch else
368671c02379SChristoph Paasch tp->fastopen_no_cookie = val;
368771c02379SChristoph Paasch break;
368893be6ce0SAndrey Vagin case TCP_TIMESTAMP:
368993be6ce0SAndrey Vagin if (!tp->repair)
369093be6ce0SAndrey Vagin err = -EPERM;
369193be6ce0SAndrey Vagin else
3692dd23c9f1SEric Dumazet WRITE_ONCE(tp->tsoffset, val - tcp_time_stamp_raw());
369393be6ce0SAndrey Vagin break;
3694b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW:
3695b1ed4c4fSAndrey Vagin err = tcp_repair_set_window(tp, optval, optlen);
3696b1ed4c4fSAndrey Vagin break;
3697c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT:
36981aeb87bcSEric Dumazet WRITE_ONCE(tp->notsent_lowat, val);
3699c9bee3b7SEric Dumazet sk->sk_write_space(sk);
3700c9bee3b7SEric Dumazet break;
3701b75eba76SSoheil Hassas Yeganeh case TCP_INQ:
3702b75eba76SSoheil Hassas Yeganeh if (val > 1 || val < 0)
3703b75eba76SSoheil Hassas Yeganeh err = -EINVAL;
3704b75eba76SSoheil Hassas Yeganeh else
3705b75eba76SSoheil Hassas Yeganeh tp->recvmsg_inq = val;
3706b75eba76SSoheil Hassas Yeganeh break;
3707a842fe14SEric Dumazet case TCP_TX_DELAY:
3708a842fe14SEric Dumazet if (val)
3709a842fe14SEric Dumazet tcp_enable_tx_delay();
3710348b81b6SEric Dumazet WRITE_ONCE(tp->tcp_tx_delay, val);
3711a842fe14SEric Dumazet break;
37121da177e4SLinus Torvalds default:
37131da177e4SLinus Torvalds err = -ENOPROTOOPT;
37141da177e4SLinus Torvalds break;
37153ff50b79SStephen Hemminger }
37163ff50b79SStephen Hemminger
3717cb388e7eSMartin KaFai Lau sockopt_release_sock(sk);
37181da177e4SLinus Torvalds return err;
37191da177e4SLinus Torvalds }
37201da177e4SLinus Torvalds
tcp_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)3721a7b75c5aSChristoph Hellwig int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
3722b7058842SDavid S. Miller unsigned int optlen)
37233fdadf7dSDmitry Mishin {
3724cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk);
37253fdadf7dSDmitry Mishin
37263fdadf7dSDmitry Mishin if (level != SOL_TCP)
3727f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
3728f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname,
37293fdadf7dSDmitry Mishin optval, optlen);
3730a7b75c5aSChristoph Hellwig return do_tcp_setsockopt(sk, level, optname, optval, optlen);
37313fdadf7dSDmitry Mishin }
37324bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_setsockopt);
37333fdadf7dSDmitry Mishin
tcp_get_info_chrono_stats(const struct tcp_sock * tp,struct tcp_info * info)3734efd90174SFrancis Yan static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
3735efd90174SFrancis Yan struct tcp_info *info)
3736efd90174SFrancis Yan {
3737efd90174SFrancis Yan u64 stats[__TCP_CHRONO_MAX], total = 0;
3738efd90174SFrancis Yan enum tcp_chrono i;
3739efd90174SFrancis Yan
3740efd90174SFrancis Yan for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
3741efd90174SFrancis Yan stats[i] = tp->chrono_stat[i - 1];
3742efd90174SFrancis Yan if (i == tp->chrono_type)
3743628174ccSEric Dumazet stats[i] += tcp_jiffies32 - tp->chrono_start;
3744efd90174SFrancis Yan stats[i] *= USEC_PER_SEC / HZ;
3745efd90174SFrancis Yan total += stats[i];
3746efd90174SFrancis Yan }
3747efd90174SFrancis Yan
3748efd90174SFrancis Yan info->tcpi_busy_time = total;
3749efd90174SFrancis Yan info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED];
3750efd90174SFrancis Yan info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED];
3751efd90174SFrancis Yan }
3752efd90174SFrancis Yan
37531da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */
tcp_get_info(struct sock * sk,struct tcp_info * info)37540df48c26SEric Dumazet void tcp_get_info(struct sock *sk, struct tcp_info *info)
37551da177e4SLinus Torvalds {
375635ac838aSCraig Gallek const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
3757463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk);
375876a9ebe8SEric Dumazet unsigned long rate;
37590263598cSWei Wang u32 now;
3760ff5d7497SEric Dumazet u64 rate64;
376167db3e4bSEric Dumazet bool slow;
37621da177e4SLinus Torvalds
37631da177e4SLinus Torvalds memset(info, 0, sizeof(*info));
376435ac838aSCraig Gallek if (sk->sk_type != SOCK_STREAM)
376535ac838aSCraig Gallek return;
37661da177e4SLinus Torvalds
3767986ffdfdSYafang Shao info->tcpi_state = inet_sk_state_load(sk);
376800fd38d9SEric Dumazet
3769ccbf3bfaSEric Dumazet /* Report meaningful fields for all TCP states, including listeners */
3770ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_pacing_rate);
377176a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL;
3772f522a5fcSEric Dumazet info->tcpi_pacing_rate = rate64;
3773ccbf3bfaSEric Dumazet
3774ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_max_pacing_rate);
377576a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL;
3776f522a5fcSEric Dumazet info->tcpi_max_pacing_rate = rate64;
3777ccbf3bfaSEric Dumazet
3778ccbf3bfaSEric Dumazet info->tcpi_reordering = tp->reordering;
377940570375SEric Dumazet info->tcpi_snd_cwnd = tcp_snd_cwnd(tp);
3780ccbf3bfaSEric Dumazet
3781ccbf3bfaSEric Dumazet if (info->tcpi_state == TCP_LISTEN) {
3782ccbf3bfaSEric Dumazet /* listeners aliased fields :
3783ccbf3bfaSEric Dumazet * tcpi_unacked -> Number of children ready for accept()
3784ccbf3bfaSEric Dumazet * tcpi_sacked -> max backlog
3785ccbf3bfaSEric Dumazet */
3786288efe86SEric Dumazet info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
3787099ecf59SEric Dumazet info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog);
3788ccbf3bfaSEric Dumazet return;
3789ccbf3bfaSEric Dumazet }
3790b369e7fdSEric Dumazet
3791b369e7fdSEric Dumazet slow = lock_sock_fast(sk);
3792b369e7fdSEric Dumazet
37936687e988SArnaldo Carvalho de Melo info->tcpi_ca_state = icsk->icsk_ca_state;
3794463c84b9SArnaldo Carvalho de Melo info->tcpi_retransmits = icsk->icsk_retransmits;
37956687e988SArnaldo Carvalho de Melo info->tcpi_probes = icsk->icsk_probes_out;
3796463c84b9SArnaldo Carvalho de Melo info->tcpi_backoff = icsk->icsk_backoff;
37971da177e4SLinus Torvalds
37981da177e4SLinus Torvalds if (tp->rx_opt.tstamp_ok)
37991da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
3800e60402d0SIlpo Järvinen if (tcp_is_sack(tp))
38011da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_SACK;
38021da177e4SLinus Torvalds if (tp->rx_opt.wscale_ok) {
38031da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_WSCALE;
38041da177e4SLinus Torvalds info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
38051da177e4SLinus Torvalds info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
38061da177e4SLinus Torvalds }
38071da177e4SLinus Torvalds
38081da177e4SLinus Torvalds if (tp->ecn_flags & TCP_ECN_OK)
38091da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_ECN;
3810b5c5693bSEric Dumazet if (tp->ecn_flags & TCP_ECN_SEEN)
3811b5c5693bSEric Dumazet info->tcpi_options |= TCPI_OPT_ECN_SEEN;
38126f73601eSYuchung Cheng if (tp->syn_data_acked)
38136f73601eSYuchung Cheng info->tcpi_options |= TCPI_OPT_SYN_DATA;
38141da177e4SLinus Torvalds
3815463c84b9SArnaldo Carvalho de Melo info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
3816030346dfSEric Dumazet info->tcpi_ato = jiffies_to_usecs(min(icsk->icsk_ack.ato,
3817030346dfSEric Dumazet tcp_delack_max(sk)));
3818c1b4a7e6SDavid S. Miller info->tcpi_snd_mss = tp->mss_cache;
3819463c84b9SArnaldo Carvalho de Melo info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
38201da177e4SLinus Torvalds
38211da177e4SLinus Torvalds info->tcpi_unacked = tp->packets_out;
38221da177e4SLinus Torvalds info->tcpi_sacked = tp->sacked_out;
3823ccbf3bfaSEric Dumazet
38241da177e4SLinus Torvalds info->tcpi_lost = tp->lost_out;
38251da177e4SLinus Torvalds info->tcpi_retrans = tp->retrans_out;
38261da177e4SLinus Torvalds
3827d635fbe2SEric Dumazet now = tcp_jiffies32;
38281da177e4SLinus Torvalds info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
3829463c84b9SArnaldo Carvalho de Melo info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
38301da177e4SLinus Torvalds info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
38311da177e4SLinus Torvalds
3832d83d8461SArnaldo Carvalho de Melo info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
38331da177e4SLinus Torvalds info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
3834740b0f18SEric Dumazet info->tcpi_rtt = tp->srtt_us >> 3;
3835740b0f18SEric Dumazet info->tcpi_rttvar = tp->mdev_us >> 2;
38361da177e4SLinus Torvalds info->tcpi_snd_ssthresh = tp->snd_ssthresh;
38371da177e4SLinus Torvalds info->tcpi_advmss = tp->advmss;
38381da177e4SLinus Torvalds
3839645f4c6fSEric Dumazet info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3;
38401da177e4SLinus Torvalds info->tcpi_rcv_space = tp->rcvq_space.space;
38411da177e4SLinus Torvalds
38421da177e4SLinus Torvalds info->tcpi_total_retrans = tp->total_retrans;
3843977cb0ecSEric Dumazet
3844f522a5fcSEric Dumazet info->tcpi_bytes_acked = tp->bytes_acked;
3845f522a5fcSEric Dumazet info->tcpi_bytes_received = tp->bytes_received;
384667db3e4bSEric Dumazet info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
3847efd90174SFrancis Yan tcp_get_info_chrono_stats(tp, info);
384867db3e4bSEric Dumazet
38492efd055cSMarcelo Ricardo Leitner info->tcpi_segs_out = tp->segs_out;
38500307a0b7SEric Dumazet
38510307a0b7SEric Dumazet /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */
38520307a0b7SEric Dumazet info->tcpi_segs_in = READ_ONCE(tp->segs_in);
38530307a0b7SEric Dumazet info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in);
3854cd9b2660SEric Dumazet
3855cd9b2660SEric Dumazet info->tcpi_min_rtt = tcp_min_rtt(tp);
3856a44d6eacSMartin KaFai Lau info->tcpi_data_segs_out = tp->data_segs_out;
3857eb8329e0SYuchung Cheng
3858eb8329e0SYuchung Cheng info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
38590263598cSWei Wang rate64 = tcp_compute_delivery_rate(tp);
38600263598cSWei Wang if (rate64)
3861f522a5fcSEric Dumazet info->tcpi_delivery_rate = rate64;
3862feb5f2ecSYuchung Cheng info->tcpi_delivered = tp->delivered;
3863feb5f2ecSYuchung Cheng info->tcpi_delivered_ce = tp->delivered_ce;
3864ba113c3aSWei Wang info->tcpi_bytes_sent = tp->bytes_sent;
3865fb31c9b9SWei Wang info->tcpi_bytes_retrans = tp->bytes_retrans;
38667e10b655SWei Wang info->tcpi_dsack_dups = tp->dsack_dups;
38677ec65372SWei Wang info->tcpi_reord_seen = tp->reord_seen;
3868f9af2dbbSThomas Higdon info->tcpi_rcv_ooopack = tp->rcv_ooopack;
38698f7baad7SThomas Higdon info->tcpi_snd_wnd = tp->snd_wnd;
387071fc7047SMubashir Adnan Qureshi info->tcpi_rcv_wnd = tp->rcv_wnd;
387171fc7047SMubashir Adnan Qureshi info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash;
387248027478SJason Baron info->tcpi_fastopen_client_fail = tp->fastopen_client_fail;
3873718c49f8SAananth V
3874718c49f8SAananth V info->tcpi_total_rto = tp->total_rto;
3875718c49f8SAananth V info->tcpi_total_rto_recoveries = tp->total_rto_recoveries;
3876718c49f8SAananth V info->tcpi_total_rto_time = tp->total_rto_time;
3877718c49f8SAananth V if (tp->rto_stamp) {
3878718c49f8SAananth V info->tcpi_total_rto_time += tcp_time_stamp_raw() -
3879718c49f8SAananth V tp->rto_stamp;
3880718c49f8SAananth V }
3881718c49f8SAananth V
3882b369e7fdSEric Dumazet unlock_sock_fast(sk, slow);
38831da177e4SLinus Torvalds }
38841da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info);
38851da177e4SLinus Torvalds
tcp_opt_stats_get_size(void)3886984988aaSWei Wang static size_t tcp_opt_stats_get_size(void)
3887984988aaSWei Wang {
3888984988aaSWei Wang return
3889984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */
3890984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */
3891984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */
3892984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */
3893984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */
3894984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */
3895984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */
3896984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */
3897984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */
3898984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */
3899984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */
3900984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */
3901984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */
3902984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */
3903984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */
3904984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */
3905984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */
3906ba113c3aSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */
3907fb31c9b9SWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */
39087e10b655SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
39097ec65372SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
3910e8bd8fcaSYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
391132efcc06SAbdul Kabbani nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */
3912e08ab0b3SYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */
391348040793SYousuk Seung nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */
3914e7ed11eeSYousuk Seung nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */
391529c1c446SMubashir Adnan Qureshi nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */
3916984988aaSWei Wang 0;
3917984988aaSWei Wang }
3918984988aaSWei Wang
3919e7ed11eeSYousuk Seung /* Returns TTL or hop limit of an incoming packet from skb. */
tcp_skb_ttl_or_hop_limit(const struct sk_buff * skb)3920e7ed11eeSYousuk Seung static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb)
3921e7ed11eeSYousuk Seung {
3922e7ed11eeSYousuk Seung if (skb->protocol == htons(ETH_P_IP))
3923e7ed11eeSYousuk Seung return ip_hdr(skb)->ttl;
3924e7ed11eeSYousuk Seung else if (skb->protocol == htons(ETH_P_IPV6))
3925e7ed11eeSYousuk Seung return ipv6_hdr(skb)->hop_limit;
3926e7ed11eeSYousuk Seung else
3927e7ed11eeSYousuk Seung return 0;
3928e7ed11eeSYousuk Seung }
3929e7ed11eeSYousuk Seung
tcp_get_timestamping_opt_stats(const struct sock * sk,const struct sk_buff * orig_skb,const struct sk_buff * ack_skb)393048040793SYousuk Seung struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
3931e7ed11eeSYousuk Seung const struct sk_buff *orig_skb,
3932e7ed11eeSYousuk Seung const struct sk_buff *ack_skb)
39331c885808SFrancis Yan {
39341c885808SFrancis Yan const struct tcp_sock *tp = tcp_sk(sk);
39351c885808SFrancis Yan struct sk_buff *stats;
39361c885808SFrancis Yan struct tcp_info info;
393776a9ebe8SEric Dumazet unsigned long rate;
3938bb7c19f9SWei Wang u64 rate64;
39391c885808SFrancis Yan
3940984988aaSWei Wang stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC);
39411c885808SFrancis Yan if (!stats)
39421c885808SFrancis Yan return NULL;
39431c885808SFrancis Yan
39441c885808SFrancis Yan tcp_get_info_chrono_stats(tp, &info);
39451c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_BUSY,
39461c885808SFrancis Yan info.tcpi_busy_time, TCP_NLA_PAD);
39471c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
39481c885808SFrancis Yan info.tcpi_rwnd_limited, TCP_NLA_PAD);
39491c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
39501c885808SFrancis Yan info.tcpi_sndbuf_limited, TCP_NLA_PAD);
39517e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
39527e98102fSYuchung Cheng tp->data_segs_out, TCP_NLA_PAD);
39537e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
39547e98102fSYuchung Cheng tp->total_retrans, TCP_NLA_PAD);
3955bb7c19f9SWei Wang
3956bb7c19f9SWei Wang rate = READ_ONCE(sk->sk_pacing_rate);
395776a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL;
3958bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD);
3959bb7c19f9SWei Wang
3960bb7c19f9SWei Wang rate64 = tcp_compute_delivery_rate(tp);
3961bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
3962bb7c19f9SWei Wang
396340570375SEric Dumazet nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp));
3964bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
3965bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
3966bb7c19f9SWei Wang
3967bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
3968bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
39697156d194SYousuk Seung nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
3970feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
3971feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
397287ecc95dSPriyaranjan Jha
397387ecc95dSPriyaranjan Jha nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
3974be631892SPriyaranjan Jha nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
3975feb5f2ecSYuchung Cheng
3976ba113c3aSWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent,
3977ba113c3aSWei Wang TCP_NLA_PAD);
3978fb31c9b9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans,
3979fb31c9b9SWei Wang TCP_NLA_PAD);
39807e10b655SWei Wang nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
39817ec65372SWei Wang nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
3982e8bd8fcaSYousuk Seung nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
398332efcc06SAbdul Kabbani nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash);
3984e08ab0b3SYousuk Seung nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT,
3985e08ab0b3SYousuk Seung max_t(int, 0, tp->write_seq - tp->snd_nxt));
398648040793SYousuk Seung nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns,
398748040793SYousuk Seung TCP_NLA_PAD);
3988e7ed11eeSYousuk Seung if (ack_skb)
3989e7ed11eeSYousuk Seung nla_put_u8(stats, TCP_NLA_TTL,
3990e7ed11eeSYousuk Seung tcp_skb_ttl_or_hop_limit(ack_skb));
3991ba113c3aSWei Wang
399229c1c446SMubashir Adnan Qureshi nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash);
39931c885808SFrancis Yan return stats;
39941c885808SFrancis Yan }
39951c885808SFrancis Yan
do_tcp_getsockopt(struct sock * sk,int level,int optname,sockptr_t optval,sockptr_t optlen)3996273b7f0fSMartin KaFai Lau int do_tcp_getsockopt(struct sock *sk, int level,
399734704ef0SMartin KaFai Lau int optname, sockptr_t optval, sockptr_t optlen)
39981da177e4SLinus Torvalds {
3999295f7324SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk);
40001da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
40016fa25166SNikolay Borisov struct net *net = sock_net(sk);
40021da177e4SLinus Torvalds int val, len;
40031da177e4SLinus Torvalds
400434704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
40051da177e4SLinus Torvalds return -EFAULT;
40061da177e4SLinus Torvalds
40071da177e4SLinus Torvalds if (len < 0)
40081da177e4SLinus Torvalds return -EINVAL;
40091da177e4SLinus Torvalds
40100709f6faSGavrilov Ilia len = min_t(unsigned int, len, sizeof(int));
40110709f6faSGavrilov Ilia
40121da177e4SLinus Torvalds switch (optname) {
40131da177e4SLinus Torvalds case TCP_MAXSEG:
4014c1b4a7e6SDavid S. Miller val = tp->mss_cache;
401534dfde4aSCambda Zhu if (tp->rx_opt.user_mss &&
401634dfde4aSCambda Zhu ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
40171da177e4SLinus Torvalds val = tp->rx_opt.user_mss;
40185e6a3ce6SPavel Emelyanov if (tp->repair)
40195e6a3ce6SPavel Emelyanov val = tp->rx_opt.mss_clamp;
40201da177e4SLinus Torvalds break;
40211da177e4SLinus Torvalds case TCP_NODELAY:
40221da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_OFF);
40231da177e4SLinus Torvalds break;
40241da177e4SLinus Torvalds case TCP_CORK:
40251da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_CORK);
40261da177e4SLinus Torvalds break;
40271da177e4SLinus Torvalds case TCP_KEEPIDLE:
4028df19a626SEric Dumazet val = keepalive_time_when(tp) / HZ;
40291da177e4SLinus Torvalds break;
40301da177e4SLinus Torvalds case TCP_KEEPINTVL:
4031df19a626SEric Dumazet val = keepalive_intvl_when(tp) / HZ;
40321da177e4SLinus Torvalds break;
40331da177e4SLinus Torvalds case TCP_KEEPCNT:
4034df19a626SEric Dumazet val = keepalive_probes(tp);
40351da177e4SLinus Torvalds break;
40361da177e4SLinus Torvalds case TCP_SYNCNT:
40373a037f0fSEric Dumazet val = READ_ONCE(icsk->icsk_syn_retries) ? :
403820a3b1c0SKuniyuki Iwashima READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
40391da177e4SLinus Torvalds break;
40401da177e4SLinus Torvalds case TCP_LINGER2:
40419df5335cSEric Dumazet val = READ_ONCE(tp->linger2);
40421da177e4SLinus Torvalds if (val >= 0)
404339e24435SKuniyuki Iwashima val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
40441da177e4SLinus Torvalds break;
40451da177e4SLinus Torvalds case TCP_DEFER_ACCEPT:
4046ae488c74SEric Dumazet val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept);
4047ae488c74SEric Dumazet val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ,
4048ae488c74SEric Dumazet TCP_RTO_MAX / HZ);
40491da177e4SLinus Torvalds break;
40501da177e4SLinus Torvalds case TCP_WINDOW_CLAMP:
4051f9fef23aSEric Dumazet val = READ_ONCE(tp->window_clamp);
40521da177e4SLinus Torvalds break;
40531da177e4SLinus Torvalds case TCP_INFO: {
40541da177e4SLinus Torvalds struct tcp_info info;
40551da177e4SLinus Torvalds
405634704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
40571da177e4SLinus Torvalds return -EFAULT;
40581da177e4SLinus Torvalds
40591da177e4SLinus Torvalds tcp_get_info(sk, &info);
40601da177e4SLinus Torvalds
40611da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(info));
406234704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
40631da177e4SLinus Torvalds return -EFAULT;
406434704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len))
40651da177e4SLinus Torvalds return -EFAULT;
40661da177e4SLinus Torvalds return 0;
40671da177e4SLinus Torvalds }
40686e9250f5SEric Dumazet case TCP_CC_INFO: {
40696e9250f5SEric Dumazet const struct tcp_congestion_ops *ca_ops;
40706e9250f5SEric Dumazet union tcp_cc_info info;
40716e9250f5SEric Dumazet size_t sz = 0;
40726e9250f5SEric Dumazet int attr;
40736e9250f5SEric Dumazet
407434704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
40756e9250f5SEric Dumazet return -EFAULT;
40766e9250f5SEric Dumazet
40776e9250f5SEric Dumazet ca_ops = icsk->icsk_ca_ops;
40786e9250f5SEric Dumazet if (ca_ops && ca_ops->get_info)
40796e9250f5SEric Dumazet sz = ca_ops->get_info(sk, ~0U, &attr, &info);
40806e9250f5SEric Dumazet
40816e9250f5SEric Dumazet len = min_t(unsigned int, len, sz);
408234704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
40836e9250f5SEric Dumazet return -EFAULT;
408434704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len))
40856e9250f5SEric Dumazet return -EFAULT;
40866e9250f5SEric Dumazet return 0;
40876e9250f5SEric Dumazet }
40881da177e4SLinus Torvalds case TCP_QUICKACK:
408931954cd8SWei Wang val = !inet_csk_in_pingpong_mode(sk);
40901da177e4SLinus Torvalds break;
40915f8ef48dSStephen Hemminger
40925f8ef48dSStephen Hemminger case TCP_CONGESTION:
409334704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
40945f8ef48dSStephen Hemminger return -EFAULT;
40955f8ef48dSStephen Hemminger len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
409634704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
40975f8ef48dSStephen Hemminger return -EFAULT;
409834704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len))
40995f8ef48dSStephen Hemminger return -EFAULT;
41005f8ef48dSStephen Hemminger return 0;
4101e56fb50fSWilliam Allen Simpson
4102734942ccSDave Watson case TCP_ULP:
410334704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
4104734942ccSDave Watson return -EFAULT;
4105734942ccSDave Watson len = min_t(unsigned int, len, TCP_ULP_NAME_MAX);
4106d97af30fSDave Watson if (!icsk->icsk_ulp_ops) {
410734704ef0SMartin KaFai Lau len = 0;
410834704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
4109d97af30fSDave Watson return -EFAULT;
4110d97af30fSDave Watson return 0;
4111d97af30fSDave Watson }
411234704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
4113734942ccSDave Watson return -EFAULT;
411434704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len))
4115734942ccSDave Watson return -EFAULT;
4116734942ccSDave Watson return 0;
4117734942ccSDave Watson
41181fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: {
4119f19008e6SJason Baron u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
4120f19008e6SJason Baron unsigned int key_len;
41211fba70e5SYuchung Cheng
412234704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
41231fba70e5SYuchung Cheng return -EFAULT;
41241fba70e5SYuchung Cheng
4125f19008e6SJason Baron key_len = tcp_fastopen_get_cipher(net, icsk, key) *
41260f1ce023SJason Baron TCP_FASTOPEN_KEY_LENGTH;
41270f1ce023SJason Baron len = min_t(unsigned int, len, key_len);
412834704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
41291fba70e5SYuchung Cheng return -EFAULT;
413034704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, key, len))
41311fba70e5SYuchung Cheng return -EFAULT;
41321fba70e5SYuchung Cheng return 0;
41331fba70e5SYuchung Cheng }
41343c0fef0bSJosh Hunt case TCP_THIN_LINEAR_TIMEOUTS:
41353c0fef0bSJosh Hunt val = tp->thin_lto;
41363c0fef0bSJosh Hunt break;
41374a7f6009SYuchung Cheng
41383c0fef0bSJosh Hunt case TCP_THIN_DUPACK:
41394a7f6009SYuchung Cheng val = 0;
41403c0fef0bSJosh Hunt break;
4141dca43c75SJerry Chu
4142ee995283SPavel Emelyanov case TCP_REPAIR:
4143ee995283SPavel Emelyanov val = tp->repair;
4144ee995283SPavel Emelyanov break;
4145ee995283SPavel Emelyanov
4146ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE:
4147ee995283SPavel Emelyanov if (tp->repair)
4148ee995283SPavel Emelyanov val = tp->repair_queue;
4149ee995283SPavel Emelyanov else
4150ee995283SPavel Emelyanov return -EINVAL;
4151ee995283SPavel Emelyanov break;
4152ee995283SPavel Emelyanov
4153b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: {
4154b1ed4c4fSAndrey Vagin struct tcp_repair_window opt;
4155b1ed4c4fSAndrey Vagin
415634704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
4157b1ed4c4fSAndrey Vagin return -EFAULT;
4158b1ed4c4fSAndrey Vagin
4159b1ed4c4fSAndrey Vagin if (len != sizeof(opt))
4160b1ed4c4fSAndrey Vagin return -EINVAL;
4161b1ed4c4fSAndrey Vagin
4162b1ed4c4fSAndrey Vagin if (!tp->repair)
4163b1ed4c4fSAndrey Vagin return -EPERM;
4164b1ed4c4fSAndrey Vagin
4165b1ed4c4fSAndrey Vagin opt.snd_wl1 = tp->snd_wl1;
4166b1ed4c4fSAndrey Vagin opt.snd_wnd = tp->snd_wnd;
4167b1ed4c4fSAndrey Vagin opt.max_window = tp->max_window;
4168b1ed4c4fSAndrey Vagin opt.rcv_wnd = tp->rcv_wnd;
4169b1ed4c4fSAndrey Vagin opt.rcv_wup = tp->rcv_wup;
4170b1ed4c4fSAndrey Vagin
417134704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &opt, len))
4172b1ed4c4fSAndrey Vagin return -EFAULT;
4173b1ed4c4fSAndrey Vagin return 0;
4174b1ed4c4fSAndrey Vagin }
4175ee995283SPavel Emelyanov case TCP_QUEUE_SEQ:
4176ee995283SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE)
4177ee995283SPavel Emelyanov val = tp->write_seq;
4178ee995283SPavel Emelyanov else if (tp->repair_queue == TCP_RECV_QUEUE)
4179ee995283SPavel Emelyanov val = tp->rcv_nxt;
4180ee995283SPavel Emelyanov else
4181ee995283SPavel Emelyanov return -EINVAL;
4182ee995283SPavel Emelyanov break;
4183ee995283SPavel Emelyanov
4184dca43c75SJerry Chu case TCP_USER_TIMEOUT:
418526023e91SEric Dumazet val = READ_ONCE(icsk->icsk_user_timeout);
4186dca43c75SJerry Chu break;
41871536e285SKenjiro Nakayama
41881536e285SKenjiro Nakayama case TCP_FASTOPEN:
418970f360ddSEric Dumazet val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen);
41901536e285SKenjiro Nakayama break;
41911536e285SKenjiro Nakayama
419219f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT:
419319f6d3f3SWei Wang val = tp->fastopen_connect;
419419f6d3f3SWei Wang break;
419519f6d3f3SWei Wang
419671c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE:
419771c02379SChristoph Paasch val = tp->fastopen_no_cookie;
419871c02379SChristoph Paasch break;
419971c02379SChristoph Paasch
4200a842fe14SEric Dumazet case TCP_TX_DELAY:
4201348b81b6SEric Dumazet val = READ_ONCE(tp->tcp_tx_delay);
4202a842fe14SEric Dumazet break;
4203a842fe14SEric Dumazet
420493be6ce0SAndrey Vagin case TCP_TIMESTAMP:
4205dd23c9f1SEric Dumazet val = tcp_time_stamp_raw() + READ_ONCE(tp->tsoffset);
420693be6ce0SAndrey Vagin break;
4207c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT:
42081aeb87bcSEric Dumazet val = READ_ONCE(tp->notsent_lowat);
4209c9bee3b7SEric Dumazet break;
4210b75eba76SSoheil Hassas Yeganeh case TCP_INQ:
4211b75eba76SSoheil Hassas Yeganeh val = tp->recvmsg_inq;
4212b75eba76SSoheil Hassas Yeganeh break;
4213cd8ae852SEric Dumazet case TCP_SAVE_SYN:
4214cd8ae852SEric Dumazet val = tp->save_syn;
4215cd8ae852SEric Dumazet break;
4216cd8ae852SEric Dumazet case TCP_SAVED_SYN: {
421734704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
4218cd8ae852SEric Dumazet return -EFAULT;
4219cd8ae852SEric Dumazet
4220d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk);
4221cd8ae852SEric Dumazet if (tp->saved_syn) {
422270a217f1SMartin KaFai Lau if (len < tcp_saved_syn_len(tp->saved_syn)) {
422334704ef0SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn);
422434704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) {
4225d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4226aea0929eSEric B Munson return -EFAULT;
4227aea0929eSEric B Munson }
4228d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4229aea0929eSEric B Munson return -EINVAL;
4230aea0929eSEric B Munson }
423170a217f1SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn);
423234704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) {
4233d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4234cd8ae852SEric Dumazet return -EFAULT;
4235cd8ae852SEric Dumazet }
423634704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, tp->saved_syn->data, len)) {
4237d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4238cd8ae852SEric Dumazet return -EFAULT;
4239cd8ae852SEric Dumazet }
4240cd8ae852SEric Dumazet tcp_saved_syn_free(tp);
4241d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4242cd8ae852SEric Dumazet } else {
4243d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4244cd8ae852SEric Dumazet len = 0;
424534704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
4246cd8ae852SEric Dumazet return -EFAULT;
4247cd8ae852SEric Dumazet }
4248cd8ae852SEric Dumazet return 0;
4249cd8ae852SEric Dumazet }
425005255b82SEric Dumazet #ifdef CONFIG_MMU
425105255b82SEric Dumazet case TCP_ZEROCOPY_RECEIVE: {
42527eeba170SArjun Roy struct scm_timestamping_internal tss;
4253e0fecb28SArjun Roy struct tcp_zerocopy_receive zc = {};
425405255b82SEric Dumazet int err;
425505255b82SEric Dumazet
425634704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
425705255b82SEric Dumazet return -EFAULT;
42582107d45fSArjun Roy if (len < 0 ||
42592107d45fSArjun Roy len < offsetofend(struct tcp_zerocopy_receive, length))
426005255b82SEric Dumazet return -EINVAL;
42613c5a2fd0SArjun Roy if (unlikely(len > sizeof(zc))) {
426234704ef0SMartin KaFai Lau err = check_zeroed_sockptr(optval, sizeof(zc),
42633c5a2fd0SArjun Roy len - sizeof(zc));
42643c5a2fd0SArjun Roy if (err < 1)
42653c5a2fd0SArjun Roy return err == 0 ? -EINVAL : err;
4266c8856c05SArjun Roy len = sizeof(zc);
426734704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
42680b7f41f6SArjun Roy return -EFAULT;
42690b7f41f6SArjun Roy }
427034704ef0SMartin KaFai Lau if (copy_from_sockptr(&zc, optval, len))
427105255b82SEric Dumazet return -EFAULT;
42723c5a2fd0SArjun Roy if (zc.reserved)
42733c5a2fd0SArjun Roy return -EINVAL;
42743c5a2fd0SArjun Roy if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS))
42753c5a2fd0SArjun Roy return -EINVAL;
4276d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk);
42777eeba170SArjun Roy err = tcp_zerocopy_receive(sk, &zc, &tss);
42789cacf81fSStanislav Fomichev err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname,
42799cacf81fSStanislav Fomichev &zc, &len, err);
4280d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
42817eeba170SArjun Roy if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags))
42827eeba170SArjun Roy goto zerocopy_rcv_cmsg;
4283c8856c05SArjun Roy switch (len) {
42847eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_flags):
42857eeba170SArjun Roy goto zerocopy_rcv_cmsg;
42867eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_controllen):
42877eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_control):
42887eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, flags):
42897eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_len):
42907eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_address):
429133946518SArjun Roy case offsetofend(struct tcp_zerocopy_receive, err):
429233946518SArjun Roy goto zerocopy_rcv_sk_err;
4293c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, inq):
4294c8856c05SArjun Roy goto zerocopy_rcv_inq;
4295c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, length):
4296c8856c05SArjun Roy default:
4297c8856c05SArjun Roy goto zerocopy_rcv_out;
4298c8856c05SArjun Roy }
42997eeba170SArjun Roy zerocopy_rcv_cmsg:
43007eeba170SArjun Roy if (zc.msg_flags & TCP_CMSG_TS)
43017eeba170SArjun Roy tcp_zc_finalize_rx_tstamp(sk, &zc, &tss);
43027eeba170SArjun Roy else
43037eeba170SArjun Roy zc.msg_flags = 0;
430433946518SArjun Roy zerocopy_rcv_sk_err:
430533946518SArjun Roy if (!err)
430633946518SArjun Roy zc.err = sock_error(sk);
4307c8856c05SArjun Roy zerocopy_rcv_inq:
4308c8856c05SArjun Roy zc.inq = tcp_inq_hint(sk);
4309c8856c05SArjun Roy zerocopy_rcv_out:
431034704ef0SMartin KaFai Lau if (!err && copy_to_sockptr(optval, &zc, len))
431105255b82SEric Dumazet err = -EFAULT;
431205255b82SEric Dumazet return err;
431305255b82SEric Dumazet }
431405255b82SEric Dumazet #endif
43151da177e4SLinus Torvalds default:
43161da177e4SLinus Torvalds return -ENOPROTOOPT;
43173ff50b79SStephen Hemminger }
43181da177e4SLinus Torvalds
431934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
43201da177e4SLinus Torvalds return -EFAULT;
432134704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &val, len))
43221da177e4SLinus Torvalds return -EFAULT;
43231da177e4SLinus Torvalds return 0;
43241da177e4SLinus Torvalds }
43251da177e4SLinus Torvalds
tcp_bpf_bypass_getsockopt(int level,int optname)43269cacf81fSStanislav Fomichev bool tcp_bpf_bypass_getsockopt(int level, int optname)
43279cacf81fSStanislav Fomichev {
43289cacf81fSStanislav Fomichev /* TCP do_tcp_getsockopt has optimized getsockopt implementation
43299cacf81fSStanislav Fomichev * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
43309cacf81fSStanislav Fomichev */
43319cacf81fSStanislav Fomichev if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE)
43329cacf81fSStanislav Fomichev return true;
43339cacf81fSStanislav Fomichev
43349cacf81fSStanislav Fomichev return false;
43359cacf81fSStanislav Fomichev }
43369cacf81fSStanislav Fomichev EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt);
43379cacf81fSStanislav Fomichev
tcp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)43383fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
43393fdadf7dSDmitry Mishin int __user *optlen)
43403fdadf7dSDmitry Mishin {
43413fdadf7dSDmitry Mishin struct inet_connection_sock *icsk = inet_csk(sk);
43423fdadf7dSDmitry Mishin
43433fdadf7dSDmitry Mishin if (level != SOL_TCP)
4344f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
4345f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname,
43463fdadf7dSDmitry Mishin optval, optlen);
434734704ef0SMartin KaFai Lau return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval),
434834704ef0SMartin KaFai Lau USER_SOCKPTR(optlen));
43493fdadf7dSDmitry Mishin }
43504bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_getsockopt);
43513fdadf7dSDmitry Mishin
4352cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
4353349ce993SEric Dumazet static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
435471cea17eSEric Dumazet static DEFINE_MUTEX(tcp_md5sig_mutex);
4355349ce993SEric Dumazet static bool tcp_md5sig_pool_populated = false;
4356cfb6eeb4SYOSHIFUJI Hideaki
__tcp_alloc_md5sig_pool(void)435771cea17eSEric Dumazet static void __tcp_alloc_md5sig_pool(void)
4358cfb6eeb4SYOSHIFUJI Hideaki {
4359cf80e0e4SHerbert Xu struct crypto_ahash *hash;
4360cfb6eeb4SYOSHIFUJI Hideaki int cpu;
4361cfb6eeb4SYOSHIFUJI Hideaki
4362cf80e0e4SHerbert Xu hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
43631eea84b7SInsu Yun if (IS_ERR(hash))
4364349ce993SEric Dumazet return;
4365cf80e0e4SHerbert Xu
4366cf80e0e4SHerbert Xu for_each_possible_cpu(cpu) {
436719689e38SEric Dumazet void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch;
4368cf80e0e4SHerbert Xu struct ahash_request *req;
4369cf80e0e4SHerbert Xu
437019689e38SEric Dumazet if (!scratch) {
437119689e38SEric Dumazet scratch = kmalloc_node(sizeof(union tcp_md5sum_block) +
437219689e38SEric Dumazet sizeof(struct tcphdr),
437319689e38SEric Dumazet GFP_KERNEL,
437419689e38SEric Dumazet cpu_to_node(cpu));
437519689e38SEric Dumazet if (!scratch)
437619689e38SEric Dumazet return;
437719689e38SEric Dumazet per_cpu(tcp_md5sig_pool, cpu).scratch = scratch;
437819689e38SEric Dumazet }
4379cf80e0e4SHerbert Xu if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
4380cf80e0e4SHerbert Xu continue;
4381cf80e0e4SHerbert Xu
4382cf80e0e4SHerbert Xu req = ahash_request_alloc(hash, GFP_KERNEL);
4383cf80e0e4SHerbert Xu if (!req)
4384cf80e0e4SHerbert Xu return;
4385cf80e0e4SHerbert Xu
4386cf80e0e4SHerbert Xu ahash_request_set_callback(req, 0, NULL, NULL);
4387cf80e0e4SHerbert Xu
4388cf80e0e4SHerbert Xu per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
4389349ce993SEric Dumazet }
4390349ce993SEric Dumazet /* before setting tcp_md5sig_pool_populated, we must commit all writes
4391349ce993SEric Dumazet * to memory. See smp_rmb() in tcp_get_md5sig_pool()
439271cea17eSEric Dumazet */
439371cea17eSEric Dumazet smp_wmb();
4394aacd467cSEric Dumazet /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool()
4395aacd467cSEric Dumazet * and tcp_get_md5sig_pool().
4396aacd467cSEric Dumazet */
4397aacd467cSEric Dumazet WRITE_ONCE(tcp_md5sig_pool_populated, true);
4398cfb6eeb4SYOSHIFUJI Hideaki }
4399cfb6eeb4SYOSHIFUJI Hideaki
tcp_alloc_md5sig_pool(void)440071cea17eSEric Dumazet bool tcp_alloc_md5sig_pool(void)
4401cfb6eeb4SYOSHIFUJI Hideaki {
4402aacd467cSEric Dumazet /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
4403aacd467cSEric Dumazet if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) {
440471cea17eSEric Dumazet mutex_lock(&tcp_md5sig_mutex);
4405cfb6eeb4SYOSHIFUJI Hideaki
4406459837b5SDmitry Safonov if (!tcp_md5sig_pool_populated)
440771cea17eSEric Dumazet __tcp_alloc_md5sig_pool();
4408cfb6eeb4SYOSHIFUJI Hideaki
440971cea17eSEric Dumazet mutex_unlock(&tcp_md5sig_mutex);
4410cfb6eeb4SYOSHIFUJI Hideaki }
4411aacd467cSEric Dumazet /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
4412aacd467cSEric Dumazet return READ_ONCE(tcp_md5sig_pool_populated);
4413cfb6eeb4SYOSHIFUJI Hideaki }
4414cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
4415cfb6eeb4SYOSHIFUJI Hideaki
441635790c04SEric Dumazet
441735790c04SEric Dumazet /**
441835790c04SEric Dumazet * tcp_get_md5sig_pool - get md5sig_pool for this user
441935790c04SEric Dumazet *
442035790c04SEric Dumazet * We use percpu structure, so if we succeed, we exit with preemption
442135790c04SEric Dumazet * and BH disabled, to make sure another thread or softirq handling
442235790c04SEric Dumazet * wont try to get same context.
442335790c04SEric Dumazet */
tcp_get_md5sig_pool(void)442435790c04SEric Dumazet struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
4425cfb6eeb4SYOSHIFUJI Hideaki {
442635790c04SEric Dumazet local_bh_disable();
442735790c04SEric Dumazet
4428aacd467cSEric Dumazet /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
4429aacd467cSEric Dumazet if (READ_ONCE(tcp_md5sig_pool_populated)) {
4430349ce993SEric Dumazet /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
4431349ce993SEric Dumazet smp_rmb();
4432349ce993SEric Dumazet return this_cpu_ptr(&tcp_md5sig_pool);
4433349ce993SEric Dumazet }
443435790c04SEric Dumazet local_bh_enable();
443535790c04SEric Dumazet return NULL;
4436cfb6eeb4SYOSHIFUJI Hideaki }
443735790c04SEric Dumazet EXPORT_SYMBOL(tcp_get_md5sig_pool);
4438cfb6eeb4SYOSHIFUJI Hideaki
tcp_md5_hash_skb_data(struct tcp_md5sig_pool * hp,const struct sk_buff * skb,unsigned int header_len)443949a72dfbSAdam Langley int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
4440cf533ea5SEric Dumazet const struct sk_buff *skb, unsigned int header_len)
444149a72dfbSAdam Langley {
444249a72dfbSAdam Langley struct scatterlist sg;
444349a72dfbSAdam Langley const struct tcphdr *tp = tcp_hdr(skb);
4444cf80e0e4SHerbert Xu struct ahash_request *req = hp->md5_req;
444595c96174SEric Dumazet unsigned int i;
444695c96174SEric Dumazet const unsigned int head_data_len = skb_headlen(skb) > header_len ?
444749a72dfbSAdam Langley skb_headlen(skb) - header_len : 0;
444849a72dfbSAdam Langley const struct skb_shared_info *shi = skb_shinfo(skb);
4449d7fd1b57SEric Dumazet struct sk_buff *frag_iter;
445049a72dfbSAdam Langley
445149a72dfbSAdam Langley sg_init_table(&sg, 1);
445249a72dfbSAdam Langley
445349a72dfbSAdam Langley sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
4454cf80e0e4SHerbert Xu ahash_request_set_crypt(req, &sg, NULL, head_data_len);
4455cf80e0e4SHerbert Xu if (crypto_ahash_update(req))
445649a72dfbSAdam Langley return 1;
445749a72dfbSAdam Langley
445849a72dfbSAdam Langley for (i = 0; i < shi->nr_frags; ++i) {
4459d8e18a51SMatthew Wilcox (Oracle) const skb_frag_t *f = &shi->frags[i];
4460b54c9d5bSJonathan Lemon unsigned int offset = skb_frag_off(f);
446154d27fcbSEric Dumazet struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
446254d27fcbSEric Dumazet
446354d27fcbSEric Dumazet sg_set_page(&sg, page, skb_frag_size(f),
446454d27fcbSEric Dumazet offset_in_page(offset));
4465cf80e0e4SHerbert Xu ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
4466cf80e0e4SHerbert Xu if (crypto_ahash_update(req))
446749a72dfbSAdam Langley return 1;
446849a72dfbSAdam Langley }
446949a72dfbSAdam Langley
4470d7fd1b57SEric Dumazet skb_walk_frags(skb, frag_iter)
4471d7fd1b57SEric Dumazet if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
4472d7fd1b57SEric Dumazet return 1;
4473d7fd1b57SEric Dumazet
447449a72dfbSAdam Langley return 0;
447549a72dfbSAdam Langley }
447649a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_skb_data);
447749a72dfbSAdam Langley
tcp_md5_hash_key(struct tcp_md5sig_pool * hp,const struct tcp_md5sig_key * key)4478cf533ea5SEric Dumazet int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
447949a72dfbSAdam Langley {
4480e6ced831SEric Dumazet u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
448149a72dfbSAdam Langley struct scatterlist sg;
448249a72dfbSAdam Langley
44836a2febecSEric Dumazet sg_init_one(&sg, key->key, keylen);
44846a2febecSEric Dumazet ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
4485e6ced831SEric Dumazet
4486e6ced831SEric Dumazet /* We use data_race() because tcp_md5_do_add() might change key->key under us */
4487e6ced831SEric Dumazet return data_race(crypto_ahash_update(hp->md5_req));
448849a72dfbSAdam Langley }
448949a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_key);
449049a72dfbSAdam Langley
44917bbb765bSDmitry Safonov /* Called with rcu_read_lock() */
44921330b6efSJakub Kicinski enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock * sk,const struct sk_buff * skb,const void * saddr,const void * daddr,int family,int dif,int sdif)44931330b6efSJakub Kicinski tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
44947bbb765bSDmitry Safonov const void *saddr, const void *daddr,
44957bbb765bSDmitry Safonov int family, int dif, int sdif)
44967bbb765bSDmitry Safonov {
44977bbb765bSDmitry Safonov /*
44987bbb765bSDmitry Safonov * This gets called for each TCP segment that arrives
44997bbb765bSDmitry Safonov * so we want to be efficient.
45007bbb765bSDmitry Safonov * We have 3 drop cases:
45017bbb765bSDmitry Safonov * o No MD5 hash and one expected.
45027bbb765bSDmitry Safonov * o MD5 hash and we're not expecting one.
45037bbb765bSDmitry Safonov * o MD5 hash and its wrong.
45047bbb765bSDmitry Safonov */
45057bbb765bSDmitry Safonov const __u8 *hash_location = NULL;
45067bbb765bSDmitry Safonov struct tcp_md5sig_key *hash_expected;
45077bbb765bSDmitry Safonov const struct tcphdr *th = tcp_hdr(skb);
4508e9d9da91SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk);
45097bbb765bSDmitry Safonov int genhash, l3index;
45107bbb765bSDmitry Safonov u8 newhash[16];
45117bbb765bSDmitry Safonov
45127bbb765bSDmitry Safonov /* sdif set, means packet ingressed via a device
45137bbb765bSDmitry Safonov * in an L3 domain and dif is set to the l3mdev
45147bbb765bSDmitry Safonov */
45157bbb765bSDmitry Safonov l3index = sdif ? dif : 0;
45167bbb765bSDmitry Safonov
45177bbb765bSDmitry Safonov hash_expected = tcp_md5_do_lookup(sk, l3index, saddr, family);
45187bbb765bSDmitry Safonov hash_location = tcp_parse_md5sig_option(th);
45197bbb765bSDmitry Safonov
45207bbb765bSDmitry Safonov /* We've parsed the options - do we have a hash? */
45217bbb765bSDmitry Safonov if (!hash_expected && !hash_location)
45221330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET;
45237bbb765bSDmitry Safonov
45247bbb765bSDmitry Safonov if (hash_expected && !hash_location) {
45257bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
45261330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5NOTFOUND;
45277bbb765bSDmitry Safonov }
45287bbb765bSDmitry Safonov
45297bbb765bSDmitry Safonov if (!hash_expected && hash_location) {
45307bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
45311330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
45327bbb765bSDmitry Safonov }
45337bbb765bSDmitry Safonov
4534e62d2e11SEric Dumazet /* Check the signature.
4535e62d2e11SEric Dumazet * To support dual stack listeners, we need to handle
4536e62d2e11SEric Dumazet * IPv4-mapped case.
4537e62d2e11SEric Dumazet */
4538e62d2e11SEric Dumazet if (family == AF_INET)
4539e62d2e11SEric Dumazet genhash = tcp_v4_md5_hash_skb(newhash,
4540e62d2e11SEric Dumazet hash_expected,
4541e62d2e11SEric Dumazet NULL, skb);
4542e62d2e11SEric Dumazet else
4543e62d2e11SEric Dumazet genhash = tp->af_specific->calc_md5_hash(newhash,
4544e62d2e11SEric Dumazet hash_expected,
45457bbb765bSDmitry Safonov NULL, skb);
45467bbb765bSDmitry Safonov
45477bbb765bSDmitry Safonov if (genhash || memcmp(hash_location, newhash, 16) != 0) {
45487bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
45497bbb765bSDmitry Safonov if (family == AF_INET) {
45507bbb765bSDmitry Safonov net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
45517bbb765bSDmitry Safonov saddr, ntohs(th->source),
45527bbb765bSDmitry Safonov daddr, ntohs(th->dest),
45537bbb765bSDmitry Safonov genhash ? " tcp_v4_calc_md5_hash failed"
45547bbb765bSDmitry Safonov : "", l3index);
45557bbb765bSDmitry Safonov } else {
45567bbb765bSDmitry Safonov net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
45577bbb765bSDmitry Safonov genhash ? "failed" : "mismatch",
45587bbb765bSDmitry Safonov saddr, ntohs(th->source),
45597bbb765bSDmitry Safonov daddr, ntohs(th->dest), l3index);
45607bbb765bSDmitry Safonov }
45611330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5FAILURE;
45627bbb765bSDmitry Safonov }
45631330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET;
45647bbb765bSDmitry Safonov }
45657bbb765bSDmitry Safonov EXPORT_SYMBOL(tcp_inbound_md5_hash);
45667bbb765bSDmitry Safonov
4567cfb6eeb4SYOSHIFUJI Hideaki #endif
4568cfb6eeb4SYOSHIFUJI Hideaki
tcp_done(struct sock * sk)45694ac02babSAndi Kleen void tcp_done(struct sock *sk)
45704ac02babSAndi Kleen {
4571d983ea6fSEric Dumazet struct request_sock *req;
45728336886fSJerry Chu
4573cab209e5SEric Dumazet /* We might be called with a new socket, after
4574cab209e5SEric Dumazet * inet_csk_prepare_forced_close() has been called
4575cab209e5SEric Dumazet * so we can not use lockdep_sock_is_held(sk)
4576cab209e5SEric Dumazet */
4577cab209e5SEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
45784ac02babSAndi Kleen
45794ac02babSAndi Kleen if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
4580c10d9310SEric Dumazet TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
45814ac02babSAndi Kleen
45824ac02babSAndi Kleen tcp_set_state(sk, TCP_CLOSE);
45834ac02babSAndi Kleen tcp_clear_xmit_timers(sk);
458400db4124SIan Morris if (req)
45858336886fSJerry Chu reqsk_fastopen_remove(sk, req, false);
45864ac02babSAndi Kleen
4587e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
45884ac02babSAndi Kleen
45894ac02babSAndi Kleen if (!sock_flag(sk, SOCK_DEAD))
45904ac02babSAndi Kleen sk->sk_state_change(sk);
45914ac02babSAndi Kleen else
45924ac02babSAndi Kleen inet_csk_destroy_sock(sk);
45934ac02babSAndi Kleen }
45944ac02babSAndi Kleen EXPORT_SYMBOL_GPL(tcp_done);
45954ac02babSAndi Kleen
tcp_abort(struct sock * sk,int err)4596c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err)
4597c1e64e29SLorenzo Colitti {
4598af9784d0SEric Dumazet int state = inet_sk_state_load(sk);
4599af9784d0SEric Dumazet
4600af9784d0SEric Dumazet if (state == TCP_NEW_SYN_RECV) {
460107f6f4a3SEric Dumazet struct request_sock *req = inet_reqsk(sk);
460207f6f4a3SEric Dumazet
460307f6f4a3SEric Dumazet local_bh_disable();
4604acc2cf4eSLorenzo Colitti inet_csk_reqsk_queue_drop(req->rsk_listener, req);
460507f6f4a3SEric Dumazet local_bh_enable();
460607f6f4a3SEric Dumazet return 0;
460707f6f4a3SEric Dumazet }
4608af9784d0SEric Dumazet if (state == TCP_TIME_WAIT) {
4609af9784d0SEric Dumazet struct inet_timewait_sock *tw = inet_twsk(sk);
4610af9784d0SEric Dumazet
4611af9784d0SEric Dumazet refcount_inc(&tw->tw_refcnt);
4612af9784d0SEric Dumazet local_bh_disable();
4613af9784d0SEric Dumazet inet_twsk_deschedule_put(tw);
4614af9784d0SEric Dumazet local_bh_enable();
4615af9784d0SEric Dumazet return 0;
4616c1e64e29SLorenzo Colitti }
4617c1e64e29SLorenzo Colitti
46184ddbcb88SAditi Ghag /* BPF context ensures sock locking. */
46194ddbcb88SAditi Ghag if (!has_current_bpf_ctx())
4620c1e64e29SLorenzo Colitti /* Don't race with userspace socket closes such as tcp_close. */
4621c1e64e29SLorenzo Colitti lock_sock(sk);
4622c1e64e29SLorenzo Colitti
46232010b93eSLorenzo Colitti if (sk->sk_state == TCP_LISTEN) {
46242010b93eSLorenzo Colitti tcp_set_state(sk, TCP_CLOSE);
46252010b93eSLorenzo Colitti inet_csk_listen_stop(sk);
46262010b93eSLorenzo Colitti }
46272010b93eSLorenzo Colitti
4628c1e64e29SLorenzo Colitti /* Don't race with BH socket closes such as inet_csk_listen_stop. */
4629c1e64e29SLorenzo Colitti local_bh_disable();
4630c1e64e29SLorenzo Colitti bh_lock_sock(sk);
4631c1e64e29SLorenzo Colitti
4632c1e64e29SLorenzo Colitti if (!sock_flag(sk, SOCK_DEAD)) {
4633e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, err);
4634c1e64e29SLorenzo Colitti /* This barrier is coupled with smp_rmb() in tcp_poll() */
4635c1e64e29SLorenzo Colitti smp_wmb();
4636e3ae2365SAlexander Aring sk_error_report(sk);
4637c1e64e29SLorenzo Colitti if (tcp_need_reset(sk->sk_state))
4638c1e64e29SLorenzo Colitti tcp_send_active_reset(sk, GFP_ATOMIC);
4639c1e64e29SLorenzo Colitti tcp_done(sk);
4640c1e64e29SLorenzo Colitti }
4641c1e64e29SLorenzo Colitti
4642c1e64e29SLorenzo Colitti bh_unlock_sock(sk);
4643c1e64e29SLorenzo Colitti local_bh_enable();
4644e05836acSSoheil Hassas Yeganeh tcp_write_queue_purge(sk);
46454ddbcb88SAditi Ghag if (!has_current_bpf_ctx())
4646c1e64e29SLorenzo Colitti release_sock(sk);
4647c1e64e29SLorenzo Colitti return 0;
4648c1e64e29SLorenzo Colitti }
4649c1e64e29SLorenzo Colitti EXPORT_SYMBOL_GPL(tcp_abort);
4650c1e64e29SLorenzo Colitti
46515f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno;
46521da177e4SLinus Torvalds
46531da177e4SLinus Torvalds static __initdata unsigned long thash_entries;
set_thash_entries(char * str)46541da177e4SLinus Torvalds static int __init set_thash_entries(char *str)
46551da177e4SLinus Torvalds {
4656413c27d8SEldad Zack ssize_t ret;
4657413c27d8SEldad Zack
46581da177e4SLinus Torvalds if (!str)
46591da177e4SLinus Torvalds return 0;
4660413c27d8SEldad Zack
4661413c27d8SEldad Zack ret = kstrtoul(str, 0, &thash_entries);
4662413c27d8SEldad Zack if (ret)
4663413c27d8SEldad Zack return 0;
4664413c27d8SEldad Zack
46651da177e4SLinus Torvalds return 1;
46661da177e4SLinus Torvalds }
46671da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries);
46681da177e4SLinus Torvalds
tcp_init_mem(void)466947d7a88cSFabian Frederick static void __init tcp_init_mem(void)
46704acb4190SGlauber Costa {
4671b66e91ccSEric Dumazet unsigned long limit = nr_free_buffer_pages() / 16;
4672b66e91ccSEric Dumazet
46734acb4190SGlauber Costa limit = max(limit, 128UL);
4674b66e91ccSEric Dumazet sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */
4675b66e91ccSEric Dumazet sysctl_tcp_mem[1] = limit; /* 6.25 % */
4676b66e91ccSEric Dumazet sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */
46774acb4190SGlauber Costa }
46784acb4190SGlauber Costa
tcp_init(void)46791da177e4SLinus Torvalds void __init tcp_init(void)
46801da177e4SLinus Torvalds {
4681b49960a0SEric Dumazet int max_rshare, max_wshare, cnt;
4682b2d3ea4aSEric Dumazet unsigned long limit;
4683074b8517SDimitri Sivanich unsigned int i;
46841da177e4SLinus Torvalds
46853b4929f6SEric Dumazet BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
4686b2d3ea4aSEric Dumazet BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
4687c593642cSPankaj Bharadiya sizeof_field(struct sk_buff, cb));
46881da177e4SLinus Torvalds
4689908c7f19STejun Heo percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
469019757cebSEric Dumazet
469119757cebSEric Dumazet timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE);
469219757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
469319757cebSEric Dumazet
469427da6d37SMartin KaFai Lau inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
469527da6d37SMartin KaFai Lau thash_entries, 21, /* one slot per 2 MB*/
469627da6d37SMartin KaFai Lau 0, 64 * 1024);
46976e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bind_bucket_cachep =
46986e04e021SArnaldo Carvalho de Melo kmem_cache_create("tcp_bind_bucket",
46996e04e021SArnaldo Carvalho de Melo sizeof(struct inet_bind_bucket), 0,
4700990c74e3SVasily Averin SLAB_HWCACHE_ALIGN | SLAB_PANIC |
4701990c74e3SVasily Averin SLAB_ACCOUNT,
4702990c74e3SVasily Averin NULL);
470328044fc1SJoanne Koong tcp_hashinfo.bind2_bucket_cachep =
470428044fc1SJoanne Koong kmem_cache_create("tcp_bind2_bucket",
470528044fc1SJoanne Koong sizeof(struct inet_bind2_bucket), 0,
470628044fc1SJoanne Koong SLAB_HWCACHE_ALIGN | SLAB_PANIC |
470728044fc1SJoanne Koong SLAB_ACCOUNT,
470828044fc1SJoanne Koong NULL);
47091da177e4SLinus Torvalds
47101da177e4SLinus Torvalds /* Size and allocate the main established and bind bucket
47111da177e4SLinus Torvalds * hash tables.
47121da177e4SLinus Torvalds *
47131da177e4SLinus Torvalds * The methodology is similar to that of the buffer cache.
47141da177e4SLinus Torvalds */
47156e04e021SArnaldo Carvalho de Melo tcp_hashinfo.ehash =
47161da177e4SLinus Torvalds alloc_large_system_hash("TCP established",
47170f7ff927SArnaldo Carvalho de Melo sizeof(struct inet_ehash_bucket),
47181da177e4SLinus Torvalds thash_entries,
4719fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */
47209e950efaSJohn Heffner 0,
47211da177e4SLinus Torvalds NULL,
4722f373b53bSEric Dumazet &tcp_hashinfo.ehash_mask,
472331fe62b9STim Bird 0,
47240ccfe618SJean Delvare thash_entries ? 0 : 512 * 1024);
472505dbc7b5SEric Dumazet for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
47263ab5aee7SEric Dumazet INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
472705dbc7b5SEric Dumazet
4728230140cfSEric Dumazet if (inet_ehash_locks_alloc(&tcp_hashinfo))
4729230140cfSEric Dumazet panic("TCP: failed to alloc ehash_locks");
47306e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bhash =
4731593d1ebeSJoanne Koong alloc_large_system_hash("TCP bind",
473228044fc1SJoanne Koong 2 * sizeof(struct inet_bind_hashbucket),
4733f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1,
4734fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */
47359e950efaSJohn Heffner 0,
47366e04e021SArnaldo Carvalho de Melo &tcp_hashinfo.bhash_size,
47371da177e4SLinus Torvalds NULL,
473831fe62b9STim Bird 0,
47391da177e4SLinus Torvalds 64 * 1024);
4740074b8517SDimitri Sivanich tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
474128044fc1SJoanne Koong tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size;
47426e04e021SArnaldo Carvalho de Melo for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
47436e04e021SArnaldo Carvalho de Melo spin_lock_init(&tcp_hashinfo.bhash[i].lock);
47446e04e021SArnaldo Carvalho de Melo INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
474528044fc1SJoanne Koong spin_lock_init(&tcp_hashinfo.bhash2[i].lock);
474628044fc1SJoanne Koong INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain);
47471da177e4SLinus Torvalds }
47481da177e4SLinus Torvalds
4749d1e5e640SKuniyuki Iwashima tcp_hashinfo.pernet = false;
4750c5ed63d6SEric Dumazet
4751c5ed63d6SEric Dumazet cnt = tcp_hashinfo.ehash_mask + 1;
4752c5ed63d6SEric Dumazet sysctl_tcp_max_orphans = cnt / 2;
47531da177e4SLinus Torvalds
4754a4fe34bfSEric W. Biederman tcp_init_mem();
4755c43b874dSJason Wang /* Set per-socket limits to no more than 1/128 the pressure threshold */
47565fb84b14SEric Dumazet limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
4757b49960a0SEric Dumazet max_wshare = min(4UL*1024*1024, limit);
4758b49960a0SEric Dumazet max_rshare = min(6UL*1024*1024, limit);
47597b4f4b5eSJohn Heffner
4760100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE;
4761356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024;
4762356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
47637b4f4b5eSJohn Heffner
4764100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE;
4765a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
4766a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
47671da177e4SLinus Torvalds
4768afd46503SJoe Perches pr_info("Hash tables configured (established %u bind %u)\n",
4769f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
4770317a76f9SStephen Hemminger
47711946e672SHaishuang Yan tcp_v4_init();
477251c5d0c4SDavid S. Miller tcp_metrics_init();
477355d8694fSFlorian Westphal BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
477446d3ceabSEric Dumazet tcp_tasklet_init();
4775f870fa0bSMat Martineau mptcp_init();
47761da177e4SLinus Torvalds }
4777