xref: /openbmc/linux/net/ipv4/tcp.c (revision 7bcae826)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
11  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
15  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
16  *		Matthew Dillon, <dillon@apollo.west.oic.com>
17  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *		Jorge Cwik, <jorge@laser.satlink.net>
19  *
20  * Fixes:
21  *		Alan Cox	:	Numerous verify_area() calls
22  *		Alan Cox	:	Set the ACK bit on a reset
23  *		Alan Cox	:	Stopped it crashing if it closed while
24  *					sk->inuse=1 and was trying to connect
25  *					(tcp_err()).
26  *		Alan Cox	:	All icmp error handling was broken
27  *					pointers passed where wrong and the
28  *					socket was looked up backwards. Nobody
29  *					tested any icmp error code obviously.
30  *		Alan Cox	:	tcp_err() now handled properly. It
31  *					wakes people on errors. poll
32  *					behaves and the icmp error race
33  *					has gone by moving it into sock.c
34  *		Alan Cox	:	tcp_send_reset() fixed to work for
35  *					everything not just packets for
36  *					unknown sockets.
37  *		Alan Cox	:	tcp option processing.
38  *		Alan Cox	:	Reset tweaked (still not 100%) [Had
39  *					syn rule wrong]
40  *		Herp Rosmanith  :	More reset fixes
41  *		Alan Cox	:	No longer acks invalid rst frames.
42  *					Acking any kind of RST is right out.
43  *		Alan Cox	:	Sets an ignore me flag on an rst
44  *					receive otherwise odd bits of prattle
45  *					escape still
46  *		Alan Cox	:	Fixed another acking RST frame bug.
47  *					Should stop LAN workplace lockups.
48  *		Alan Cox	: 	Some tidyups using the new skb list
49  *					facilities
50  *		Alan Cox	:	sk->keepopen now seems to work
51  *		Alan Cox	:	Pulls options out correctly on accepts
52  *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
53  *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
54  *					bit to skb ops.
55  *		Alan Cox	:	Tidied tcp_data to avoid a potential
56  *					nasty.
57  *		Alan Cox	:	Added some better commenting, as the
58  *					tcp is hard to follow
59  *		Alan Cox	:	Removed incorrect check for 20 * psh
60  *	Michael O'Reilly	:	ack < copied bug fix.
61  *	Johannes Stille		:	Misc tcp fixes (not all in yet).
62  *		Alan Cox	:	FIN with no memory -> CRASH
63  *		Alan Cox	:	Added socket option proto entries.
64  *					Also added awareness of them to accept.
65  *		Alan Cox	:	Added TCP options (SOL_TCP)
66  *		Alan Cox	:	Switched wakeup calls to callbacks,
67  *					so the kernel can layer network
68  *					sockets.
69  *		Alan Cox	:	Use ip_tos/ip_ttl settings.
70  *		Alan Cox	:	Handle FIN (more) properly (we hope).
71  *		Alan Cox	:	RST frames sent on unsynchronised
72  *					state ack error.
73  *		Alan Cox	:	Put in missing check for SYN bit.
74  *		Alan Cox	:	Added tcp_select_window() aka NET2E
75  *					window non shrink trick.
76  *		Alan Cox	:	Added a couple of small NET2E timer
77  *					fixes
78  *		Charles Hedrick :	TCP fixes
79  *		Toomas Tamm	:	TCP window fixes
80  *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
81  *		Charles Hedrick	:	Rewrote most of it to actually work
82  *		Linus		:	Rewrote tcp_read() and URG handling
83  *					completely
84  *		Gerhard Koerting:	Fixed some missing timer handling
85  *		Matthew Dillon  :	Reworked TCP machine states as per RFC
86  *		Gerhard Koerting:	PC/TCP workarounds
87  *		Adam Caldwell	:	Assorted timer/timing errors
88  *		Matthew Dillon	:	Fixed another RST bug
89  *		Alan Cox	:	Move to kernel side addressing changes.
90  *		Alan Cox	:	Beginning work on TCP fastpathing
91  *					(not yet usable)
92  *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
93  *		Alan Cox	:	TCP fast path debugging
94  *		Alan Cox	:	Window clamping
95  *		Michael Riepe	:	Bug in tcp_check()
96  *		Matt Dillon	:	More TCP improvements and RST bug fixes
97  *		Matt Dillon	:	Yet more small nasties remove from the
98  *					TCP code (Be very nice to this man if
99  *					tcp finally works 100%) 8)
100  *		Alan Cox	:	BSD accept semantics.
101  *		Alan Cox	:	Reset on closedown bug.
102  *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
103  *		Michael Pall	:	Handle poll() after URG properly in
104  *					all cases.
105  *		Michael Pall	:	Undo the last fix in tcp_read_urg()
106  *					(multi URG PUSH broke rlogin).
107  *		Michael Pall	:	Fix the multi URG PUSH problem in
108  *					tcp_readable(), poll() after URG
109  *					works now.
110  *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
111  *					BSD api.
112  *		Alan Cox	:	Changed the semantics of sk->socket to
113  *					fix a race and a signal problem with
114  *					accept() and async I/O.
115  *		Alan Cox	:	Relaxed the rules on tcp_sendto().
116  *		Yury Shevchuk	:	Really fixed accept() blocking problem.
117  *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
118  *					clients/servers which listen in on
119  *					fixed ports.
120  *		Alan Cox	:	Cleaned the above up and shrank it to
121  *					a sensible code size.
122  *		Alan Cox	:	Self connect lockup fix.
123  *		Alan Cox	:	No connect to multicast.
124  *		Ross Biro	:	Close unaccepted children on master
125  *					socket close.
126  *		Alan Cox	:	Reset tracing code.
127  *		Alan Cox	:	Spurious resets on shutdown.
128  *		Alan Cox	:	Giant 15 minute/60 second timer error
129  *		Alan Cox	:	Small whoops in polling before an
130  *					accept.
131  *		Alan Cox	:	Kept the state trace facility since
132  *					it's handy for debugging.
133  *		Alan Cox	:	More reset handler fixes.
134  *		Alan Cox	:	Started rewriting the code based on
135  *					the RFC's for other useful protocol
136  *					references see: Comer, KA9Q NOS, and
137  *					for a reference on the difference
138  *					between specifications and how BSD
139  *					works see the 4.4lite source.
140  *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
141  *					close.
142  *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
143  *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
144  *		Alan Cox	:	Reimplemented timers as per the RFC
145  *					and using multiple timers for sanity.
146  *		Alan Cox	:	Small bug fixes, and a lot of new
147  *					comments.
148  *		Alan Cox	:	Fixed dual reader crash by locking
149  *					the buffers (much like datagram.c)
150  *		Alan Cox	:	Fixed stuck sockets in probe. A probe
151  *					now gets fed up of retrying without
152  *					(even a no space) answer.
153  *		Alan Cox	:	Extracted closing code better
154  *		Alan Cox	:	Fixed the closing state machine to
155  *					resemble the RFC.
156  *		Alan Cox	:	More 'per spec' fixes.
157  *		Jorge Cwik	:	Even faster checksumming.
158  *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
159  *					only frames. At least one pc tcp stack
160  *					generates them.
161  *		Alan Cox	:	Cache last socket.
162  *		Alan Cox	:	Per route irtt.
163  *		Matt Day	:	poll()->select() match BSD precisely on error
164  *		Alan Cox	:	New buffers
165  *		Marc Tamsky	:	Various sk->prot->retransmits and
166  *					sk->retransmits misupdating fixed.
167  *					Fixed tcp_write_timeout: stuck close,
168  *					and TCP syn retries gets used now.
169  *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
170  *					ack if state is TCP_CLOSED.
171  *		Alan Cox	:	Look up device on a retransmit - routes may
172  *					change. Doesn't yet cope with MSS shrink right
173  *					but it's a start!
174  *		Marc Tamsky	:	Closing in closing fixes.
175  *		Mike Shaver	:	RFC1122 verifications.
176  *		Alan Cox	:	rcv_saddr errors.
177  *		Alan Cox	:	Block double connect().
178  *		Alan Cox	:	Small hooks for enSKIP.
179  *		Alexey Kuznetsov:	Path MTU discovery.
180  *		Alan Cox	:	Support soft errors.
181  *		Alan Cox	:	Fix MTU discovery pathological case
182  *					when the remote claims no mtu!
183  *		Marc Tamsky	:	TCP_CLOSE fix.
184  *		Colin (G3TNE)	:	Send a reset on syn ack replies in
185  *					window but wrong (fixes NT lpd problems)
186  *		Pedro Roque	:	Better TCP window handling, delayed ack.
187  *		Joerg Reuter	:	No modification of locked buffers in
188  *					tcp_do_retransmit()
189  *		Eric Schenk	:	Changed receiver side silly window
190  *					avoidance algorithm to BSD style
191  *					algorithm. This doubles throughput
192  *					against machines running Solaris,
193  *					and seems to result in general
194  *					improvement.
195  *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
196  *	Willy Konynenberg	:	Transparent proxying support.
197  *	Mike McLagan		:	Routing by source
198  *		Keith Owens	:	Do proper merging with partial SKB's in
199  *					tcp_do_sendmsg to avoid burstiness.
200  *		Eric Schenk	:	Fix fast close down bug with
201  *					shutdown() followed by close().
202  *		Andi Kleen 	:	Make poll agree with SIGIO
203  *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
204  *					lingertime == 0 (RFC 793 ABORT Call)
205  *	Hirokazu Takahashi	:	Use copy_from_user() instead of
206  *					csum_and_copy_from_user() if possible.
207  *
208  *		This program is free software; you can redistribute it and/or
209  *		modify it under the terms of the GNU General Public License
210  *		as published by the Free Software Foundation; either version
211  *		2 of the License, or(at your option) any later version.
212  *
213  * Description of States:
214  *
215  *	TCP_SYN_SENT		sent a connection request, waiting for ack
216  *
217  *	TCP_SYN_RECV		received a connection request, sent ack,
218  *				waiting for final ack in three-way handshake.
219  *
220  *	TCP_ESTABLISHED		connection established
221  *
222  *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
223  *				transmission of remaining buffered data
224  *
225  *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
226  *				to shutdown
227  *
228  *	TCP_CLOSING		both sides have shutdown but we still have
229  *				data we have to finish sending
230  *
231  *	TCP_TIME_WAIT		timeout to catch resent junk before entering
232  *				closed, can only be entered from FIN_WAIT2
233  *				or CLOSING.  Required because the other end
234  *				may not have gotten our last ACK causing it
235  *				to retransmit the data packet (which we ignore)
236  *
237  *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
238  *				us to finish writing our data and to shutdown
239  *				(we have to close() to move on to LAST_ACK)
240  *
241  *	TCP_LAST_ACK		out side has shutdown after remote has
242  *				shutdown.  There may still be data in our
243  *				buffer that we have to finish sending
244  *
245  *	TCP_CLOSE		socket is finished
246  */
247 
248 #define pr_fmt(fmt) "TCP: " fmt
249 
250 #include <crypto/hash.h>
251 #include <linux/kernel.h>
252 #include <linux/module.h>
253 #include <linux/types.h>
254 #include <linux/fcntl.h>
255 #include <linux/poll.h>
256 #include <linux/inet_diag.h>
257 #include <linux/init.h>
258 #include <linux/fs.h>
259 #include <linux/skbuff.h>
260 #include <linux/scatterlist.h>
261 #include <linux/splice.h>
262 #include <linux/net.h>
263 #include <linux/socket.h>
264 #include <linux/random.h>
265 #include <linux/bootmem.h>
266 #include <linux/highmem.h>
267 #include <linux/swap.h>
268 #include <linux/cache.h>
269 #include <linux/err.h>
270 #include <linux/time.h>
271 #include <linux/slab.h>
272 
273 #include <net/icmp.h>
274 #include <net/inet_common.h>
275 #include <net/tcp.h>
276 #include <net/xfrm.h>
277 #include <net/ip.h>
278 #include <net/sock.h>
279 
280 #include <linux/uaccess.h>
281 #include <asm/ioctls.h>
282 #include <net/busy_poll.h>
283 
284 int sysctl_tcp_min_tso_segs __read_mostly = 2;
285 
286 int sysctl_tcp_autocorking __read_mostly = 1;
287 
288 struct percpu_counter tcp_orphan_count;
289 EXPORT_SYMBOL_GPL(tcp_orphan_count);
290 
291 long sysctl_tcp_mem[3] __read_mostly;
292 int sysctl_tcp_wmem[3] __read_mostly;
293 int sysctl_tcp_rmem[3] __read_mostly;
294 
295 EXPORT_SYMBOL(sysctl_tcp_mem);
296 EXPORT_SYMBOL(sysctl_tcp_rmem);
297 EXPORT_SYMBOL(sysctl_tcp_wmem);
298 
299 atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
300 EXPORT_SYMBOL(tcp_memory_allocated);
301 
302 /*
303  * Current number of TCP sockets.
304  */
305 struct percpu_counter tcp_sockets_allocated;
306 EXPORT_SYMBOL(tcp_sockets_allocated);
307 
308 /*
309  * TCP splice context
310  */
311 struct tcp_splice_state {
312 	struct pipe_inode_info *pipe;
313 	size_t len;
314 	unsigned int flags;
315 };
316 
317 /*
318  * Pressure flag: try to collapse.
319  * Technical note: it is used by multiple contexts non atomically.
320  * All the __sk_mem_schedule() is of this nature: accounting
321  * is strict, actions are advisory and have some latency.
322  */
323 int tcp_memory_pressure __read_mostly;
324 EXPORT_SYMBOL(tcp_memory_pressure);
325 
326 void tcp_enter_memory_pressure(struct sock *sk)
327 {
328 	if (!tcp_memory_pressure) {
329 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
330 		tcp_memory_pressure = 1;
331 	}
332 }
333 EXPORT_SYMBOL(tcp_enter_memory_pressure);
334 
335 /* Convert seconds to retransmits based on initial and max timeout */
336 static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
337 {
338 	u8 res = 0;
339 
340 	if (seconds > 0) {
341 		int period = timeout;
342 
343 		res = 1;
344 		while (seconds > period && res < 255) {
345 			res++;
346 			timeout <<= 1;
347 			if (timeout > rto_max)
348 				timeout = rto_max;
349 			period += timeout;
350 		}
351 	}
352 	return res;
353 }
354 
355 /* Convert retransmits to seconds based on initial and max timeout */
356 static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
357 {
358 	int period = 0;
359 
360 	if (retrans > 0) {
361 		period = timeout;
362 		while (--retrans) {
363 			timeout <<= 1;
364 			if (timeout > rto_max)
365 				timeout = rto_max;
366 			period += timeout;
367 		}
368 	}
369 	return period;
370 }
371 
372 /* Address-family independent initialization for a tcp_sock.
373  *
374  * NOTE: A lot of things set to zero explicitly by call to
375  *       sk_alloc() so need not be done here.
376  */
377 void tcp_init_sock(struct sock *sk)
378 {
379 	struct inet_connection_sock *icsk = inet_csk(sk);
380 	struct tcp_sock *tp = tcp_sk(sk);
381 
382 	tp->out_of_order_queue = RB_ROOT;
383 	tcp_init_xmit_timers(sk);
384 	tcp_prequeue_init(tp);
385 	INIT_LIST_HEAD(&tp->tsq_node);
386 
387 	icsk->icsk_rto = TCP_TIMEOUT_INIT;
388 	tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
389 	minmax_reset(&tp->rtt_min, tcp_time_stamp, ~0U);
390 
391 	/* So many TCP implementations out there (incorrectly) count the
392 	 * initial SYN frame in their delayed-ACK and congestion control
393 	 * algorithms that we must have the following bandaid to talk
394 	 * efficiently to them.  -DaveM
395 	 */
396 	tp->snd_cwnd = TCP_INIT_CWND;
397 
398 	/* There's a bubble in the pipe until at least the first ACK. */
399 	tp->app_limited = ~0U;
400 
401 	/* See draft-stevens-tcpca-spec-01 for discussion of the
402 	 * initialization of these values.
403 	 */
404 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
405 	tp->snd_cwnd_clamp = ~0;
406 	tp->mss_cache = TCP_MSS_DEFAULT;
407 
408 	tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
409 	tcp_assign_congestion_control(sk);
410 
411 	tp->tsoffset = 0;
412 
413 	sk->sk_state = TCP_CLOSE;
414 
415 	sk->sk_write_space = sk_stream_write_space;
416 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
417 
418 	icsk->icsk_sync_mss = tcp_sync_mss;
419 
420 	sk->sk_sndbuf = sysctl_tcp_wmem[1];
421 	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
422 
423 	sk_sockets_allocated_inc(sk);
424 }
425 EXPORT_SYMBOL(tcp_init_sock);
426 
427 static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb)
428 {
429 	if (tsflags && skb) {
430 		struct skb_shared_info *shinfo = skb_shinfo(skb);
431 		struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
432 
433 		sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
434 		if (tsflags & SOF_TIMESTAMPING_TX_ACK)
435 			tcb->txstamp_ack = 1;
436 		if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
437 			shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
438 	}
439 }
440 
441 /*
442  *	Wait for a TCP event.
443  *
444  *	Note that we don't need to lock the socket, as the upper poll layers
445  *	take care of normal races (between the test and the event) and we don't
446  *	go look at any of the socket buffers directly.
447  */
448 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
449 {
450 	unsigned int mask;
451 	struct sock *sk = sock->sk;
452 	const struct tcp_sock *tp = tcp_sk(sk);
453 	int state;
454 
455 	sock_rps_record_flow(sk);
456 
457 	sock_poll_wait(file, sk_sleep(sk), wait);
458 
459 	state = sk_state_load(sk);
460 	if (state == TCP_LISTEN)
461 		return inet_csk_listen_poll(sk);
462 
463 	/* Socket is not locked. We are protected from async events
464 	 * by poll logic and correct handling of state changes
465 	 * made by other threads is impossible in any case.
466 	 */
467 
468 	mask = 0;
469 
470 	/*
471 	 * POLLHUP is certainly not done right. But poll() doesn't
472 	 * have a notion of HUP in just one direction, and for a
473 	 * socket the read side is more interesting.
474 	 *
475 	 * Some poll() documentation says that POLLHUP is incompatible
476 	 * with the POLLOUT/POLLWR flags, so somebody should check this
477 	 * all. But careful, it tends to be safer to return too many
478 	 * bits than too few, and you can easily break real applications
479 	 * if you don't tell them that something has hung up!
480 	 *
481 	 * Check-me.
482 	 *
483 	 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
484 	 * our fs/select.c). It means that after we received EOF,
485 	 * poll always returns immediately, making impossible poll() on write()
486 	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
487 	 * if and only if shutdown has been made in both directions.
488 	 * Actually, it is interesting to look how Solaris and DUX
489 	 * solve this dilemma. I would prefer, if POLLHUP were maskable,
490 	 * then we could set it on SND_SHUTDOWN. BTW examples given
491 	 * in Stevens' books assume exactly this behaviour, it explains
492 	 * why POLLHUP is incompatible with POLLOUT.	--ANK
493 	 *
494 	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
495 	 * blocking on fresh not-connected or disconnected socket. --ANK
496 	 */
497 	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
498 		mask |= POLLHUP;
499 	if (sk->sk_shutdown & RCV_SHUTDOWN)
500 		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
501 
502 	/* Connected or passive Fast Open socket? */
503 	if (state != TCP_SYN_SENT &&
504 	    (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
505 		int target = sock_rcvlowat(sk, 0, INT_MAX);
506 
507 		if (tp->urg_seq == tp->copied_seq &&
508 		    !sock_flag(sk, SOCK_URGINLINE) &&
509 		    tp->urg_data)
510 			target++;
511 
512 		if (tp->rcv_nxt - tp->copied_seq >= target)
513 			mask |= POLLIN | POLLRDNORM;
514 
515 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
516 			if (sk_stream_is_writeable(sk)) {
517 				mask |= POLLOUT | POLLWRNORM;
518 			} else {  /* send SIGIO later */
519 				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
520 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
521 
522 				/* Race breaker. If space is freed after
523 				 * wspace test but before the flags are set,
524 				 * IO signal will be lost. Memory barrier
525 				 * pairs with the input side.
526 				 */
527 				smp_mb__after_atomic();
528 				if (sk_stream_is_writeable(sk))
529 					mask |= POLLOUT | POLLWRNORM;
530 			}
531 		} else
532 			mask |= POLLOUT | POLLWRNORM;
533 
534 		if (tp->urg_data & TCP_URG_VALID)
535 			mask |= POLLPRI;
536 	} else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
537 		/* Active TCP fastopen socket with defer_connect
538 		 * Return POLLOUT so application can call write()
539 		 * in order for kernel to generate SYN+data
540 		 */
541 		mask |= POLLOUT | POLLWRNORM;
542 	}
543 	/* This barrier is coupled with smp_wmb() in tcp_reset() */
544 	smp_rmb();
545 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
546 		mask |= POLLERR;
547 
548 	return mask;
549 }
550 EXPORT_SYMBOL(tcp_poll);
551 
552 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
553 {
554 	struct tcp_sock *tp = tcp_sk(sk);
555 	int answ;
556 	bool slow;
557 
558 	switch (cmd) {
559 	case SIOCINQ:
560 		if (sk->sk_state == TCP_LISTEN)
561 			return -EINVAL;
562 
563 		slow = lock_sock_fast(sk);
564 		answ = tcp_inq(sk);
565 		unlock_sock_fast(sk, slow);
566 		break;
567 	case SIOCATMARK:
568 		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
569 		break;
570 	case SIOCOUTQ:
571 		if (sk->sk_state == TCP_LISTEN)
572 			return -EINVAL;
573 
574 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
575 			answ = 0;
576 		else
577 			answ = tp->write_seq - tp->snd_una;
578 		break;
579 	case SIOCOUTQNSD:
580 		if (sk->sk_state == TCP_LISTEN)
581 			return -EINVAL;
582 
583 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
584 			answ = 0;
585 		else
586 			answ = tp->write_seq - tp->snd_nxt;
587 		break;
588 	default:
589 		return -ENOIOCTLCMD;
590 	}
591 
592 	return put_user(answ, (int __user *)arg);
593 }
594 EXPORT_SYMBOL(tcp_ioctl);
595 
596 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
597 {
598 	TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
599 	tp->pushed_seq = tp->write_seq;
600 }
601 
602 static inline bool forced_push(const struct tcp_sock *tp)
603 {
604 	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
605 }
606 
607 static void skb_entail(struct sock *sk, struct sk_buff *skb)
608 {
609 	struct tcp_sock *tp = tcp_sk(sk);
610 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
611 
612 	skb->csum    = 0;
613 	tcb->seq     = tcb->end_seq = tp->write_seq;
614 	tcb->tcp_flags = TCPHDR_ACK;
615 	tcb->sacked  = 0;
616 	__skb_header_release(skb);
617 	tcp_add_write_queue_tail(sk, skb);
618 	sk->sk_wmem_queued += skb->truesize;
619 	sk_mem_charge(sk, skb->truesize);
620 	if (tp->nonagle & TCP_NAGLE_PUSH)
621 		tp->nonagle &= ~TCP_NAGLE_PUSH;
622 
623 	tcp_slow_start_after_idle_check(sk);
624 }
625 
626 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
627 {
628 	if (flags & MSG_OOB)
629 		tp->snd_up = tp->write_seq;
630 }
631 
632 /* If a not yet filled skb is pushed, do not send it if
633  * we have data packets in Qdisc or NIC queues :
634  * Because TX completion will happen shortly, it gives a chance
635  * to coalesce future sendmsg() payload into this skb, without
636  * need for a timer, and with no latency trade off.
637  * As packets containing data payload have a bigger truesize
638  * than pure acks (dataless) packets, the last checks prevent
639  * autocorking if we only have an ACK in Qdisc/NIC queues,
640  * or if TX completion was delayed after we processed ACK packet.
641  */
642 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
643 				int size_goal)
644 {
645 	return skb->len < size_goal &&
646 	       sysctl_tcp_autocorking &&
647 	       skb != tcp_write_queue_head(sk) &&
648 	       atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
649 }
650 
651 static void tcp_push(struct sock *sk, int flags, int mss_now,
652 		     int nonagle, int size_goal)
653 {
654 	struct tcp_sock *tp = tcp_sk(sk);
655 	struct sk_buff *skb;
656 
657 	if (!tcp_send_head(sk))
658 		return;
659 
660 	skb = tcp_write_queue_tail(sk);
661 	if (!(flags & MSG_MORE) || forced_push(tp))
662 		tcp_mark_push(tp, skb);
663 
664 	tcp_mark_urg(tp, flags);
665 
666 	if (tcp_should_autocork(sk, skb, size_goal)) {
667 
668 		/* avoid atomic op if TSQ_THROTTLED bit is already set */
669 		if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
670 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
671 			set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
672 		}
673 		/* It is possible TX completion already happened
674 		 * before we set TSQ_THROTTLED.
675 		 */
676 		if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
677 			return;
678 	}
679 
680 	if (flags & MSG_MORE)
681 		nonagle = TCP_NAGLE_CORK;
682 
683 	__tcp_push_pending_frames(sk, mss_now, nonagle);
684 }
685 
686 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
687 				unsigned int offset, size_t len)
688 {
689 	struct tcp_splice_state *tss = rd_desc->arg.data;
690 	int ret;
691 
692 	ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
693 			      min(rd_desc->count, len), tss->flags);
694 	if (ret > 0)
695 		rd_desc->count -= ret;
696 	return ret;
697 }
698 
699 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
700 {
701 	/* Store TCP splice context information in read_descriptor_t. */
702 	read_descriptor_t rd_desc = {
703 		.arg.data = tss,
704 		.count	  = tss->len,
705 	};
706 
707 	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
708 }
709 
710 /**
711  *  tcp_splice_read - splice data from TCP socket to a pipe
712  * @sock:	socket to splice from
713  * @ppos:	position (not valid)
714  * @pipe:	pipe to splice to
715  * @len:	number of bytes to splice
716  * @flags:	splice modifier flags
717  *
718  * Description:
719  *    Will read pages from given socket and fill them into a pipe.
720  *
721  **/
722 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
723 			struct pipe_inode_info *pipe, size_t len,
724 			unsigned int flags)
725 {
726 	struct sock *sk = sock->sk;
727 	struct tcp_splice_state tss = {
728 		.pipe = pipe,
729 		.len = len,
730 		.flags = flags,
731 	};
732 	long timeo;
733 	ssize_t spliced;
734 	int ret;
735 
736 	sock_rps_record_flow(sk);
737 	/*
738 	 * We can't seek on a socket input
739 	 */
740 	if (unlikely(*ppos))
741 		return -ESPIPE;
742 
743 	ret = spliced = 0;
744 
745 	lock_sock(sk);
746 
747 	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
748 	while (tss.len) {
749 		ret = __tcp_splice_read(sk, &tss);
750 		if (ret < 0)
751 			break;
752 		else if (!ret) {
753 			if (spliced)
754 				break;
755 			if (sock_flag(sk, SOCK_DONE))
756 				break;
757 			if (sk->sk_err) {
758 				ret = sock_error(sk);
759 				break;
760 			}
761 			if (sk->sk_shutdown & RCV_SHUTDOWN)
762 				break;
763 			if (sk->sk_state == TCP_CLOSE) {
764 				/*
765 				 * This occurs when user tries to read
766 				 * from never connected socket.
767 				 */
768 				if (!sock_flag(sk, SOCK_DONE))
769 					ret = -ENOTCONN;
770 				break;
771 			}
772 			if (!timeo) {
773 				ret = -EAGAIN;
774 				break;
775 			}
776 			/* if __tcp_splice_read() got nothing while we have
777 			 * an skb in receive queue, we do not want to loop.
778 			 * This might happen with URG data.
779 			 */
780 			if (!skb_queue_empty(&sk->sk_receive_queue))
781 				break;
782 			sk_wait_data(sk, &timeo, NULL);
783 			if (signal_pending(current)) {
784 				ret = sock_intr_errno(timeo);
785 				break;
786 			}
787 			continue;
788 		}
789 		tss.len -= ret;
790 		spliced += ret;
791 
792 		if (!timeo)
793 			break;
794 		release_sock(sk);
795 		lock_sock(sk);
796 
797 		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
798 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
799 		    signal_pending(current))
800 			break;
801 	}
802 
803 	release_sock(sk);
804 
805 	if (spliced)
806 		return spliced;
807 
808 	return ret;
809 }
810 EXPORT_SYMBOL(tcp_splice_read);
811 
812 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
813 				    bool force_schedule)
814 {
815 	struct sk_buff *skb;
816 
817 	/* The TCP header must be at least 32-bit aligned.  */
818 	size = ALIGN(size, 4);
819 
820 	if (unlikely(tcp_under_memory_pressure(sk)))
821 		sk_mem_reclaim_partial(sk);
822 
823 	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
824 	if (likely(skb)) {
825 		bool mem_scheduled;
826 
827 		if (force_schedule) {
828 			mem_scheduled = true;
829 			sk_forced_mem_schedule(sk, skb->truesize);
830 		} else {
831 			mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
832 		}
833 		if (likely(mem_scheduled)) {
834 			skb_reserve(skb, sk->sk_prot->max_header);
835 			/*
836 			 * Make sure that we have exactly size bytes
837 			 * available to the caller, no more, no less.
838 			 */
839 			skb->reserved_tailroom = skb->end - skb->tail - size;
840 			return skb;
841 		}
842 		__kfree_skb(skb);
843 	} else {
844 		sk->sk_prot->enter_memory_pressure(sk);
845 		sk_stream_moderate_sndbuf(sk);
846 	}
847 	return NULL;
848 }
849 
850 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
851 				       int large_allowed)
852 {
853 	struct tcp_sock *tp = tcp_sk(sk);
854 	u32 new_size_goal, size_goal;
855 
856 	if (!large_allowed || !sk_can_gso(sk))
857 		return mss_now;
858 
859 	/* Note : tcp_tso_autosize() will eventually split this later */
860 	new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
861 	new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
862 
863 	/* We try hard to avoid divides here */
864 	size_goal = tp->gso_segs * mss_now;
865 	if (unlikely(new_size_goal < size_goal ||
866 		     new_size_goal >= size_goal + mss_now)) {
867 		tp->gso_segs = min_t(u16, new_size_goal / mss_now,
868 				     sk->sk_gso_max_segs);
869 		size_goal = tp->gso_segs * mss_now;
870 	}
871 
872 	return max(size_goal, mss_now);
873 }
874 
875 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
876 {
877 	int mss_now;
878 
879 	mss_now = tcp_current_mss(sk);
880 	*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
881 
882 	return mss_now;
883 }
884 
885 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
886 				size_t size, int flags)
887 {
888 	struct tcp_sock *tp = tcp_sk(sk);
889 	int mss_now, size_goal;
890 	int err;
891 	ssize_t copied;
892 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
893 
894 	/* Wait for a connection to finish. One exception is TCP Fast Open
895 	 * (passive side) where data is allowed to be sent before a connection
896 	 * is fully established.
897 	 */
898 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
899 	    !tcp_passive_fastopen(sk)) {
900 		err = sk_stream_wait_connect(sk, &timeo);
901 		if (err != 0)
902 			goto out_err;
903 	}
904 
905 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
906 
907 	mss_now = tcp_send_mss(sk, &size_goal, flags);
908 	copied = 0;
909 
910 	err = -EPIPE;
911 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
912 		goto out_err;
913 
914 	while (size > 0) {
915 		struct sk_buff *skb = tcp_write_queue_tail(sk);
916 		int copy, i;
917 		bool can_coalesce;
918 
919 		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0 ||
920 		    !tcp_skb_can_collapse_to(skb)) {
921 new_segment:
922 			if (!sk_stream_memory_free(sk))
923 				goto wait_for_sndbuf;
924 
925 			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
926 						  skb_queue_empty(&sk->sk_write_queue));
927 			if (!skb)
928 				goto wait_for_memory;
929 
930 			skb_entail(sk, skb);
931 			copy = size_goal;
932 		}
933 
934 		if (copy > size)
935 			copy = size;
936 
937 		i = skb_shinfo(skb)->nr_frags;
938 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
939 		if (!can_coalesce && i >= sysctl_max_skb_frags) {
940 			tcp_mark_push(tp, skb);
941 			goto new_segment;
942 		}
943 		if (!sk_wmem_schedule(sk, copy))
944 			goto wait_for_memory;
945 
946 		if (can_coalesce) {
947 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
948 		} else {
949 			get_page(page);
950 			skb_fill_page_desc(skb, i, page, offset, copy);
951 		}
952 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
953 
954 		skb->len += copy;
955 		skb->data_len += copy;
956 		skb->truesize += copy;
957 		sk->sk_wmem_queued += copy;
958 		sk_mem_charge(sk, copy);
959 		skb->ip_summed = CHECKSUM_PARTIAL;
960 		tp->write_seq += copy;
961 		TCP_SKB_CB(skb)->end_seq += copy;
962 		tcp_skb_pcount_set(skb, 0);
963 
964 		if (!copied)
965 			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
966 
967 		copied += copy;
968 		offset += copy;
969 		size -= copy;
970 		if (!size)
971 			goto out;
972 
973 		if (skb->len < size_goal || (flags & MSG_OOB))
974 			continue;
975 
976 		if (forced_push(tp)) {
977 			tcp_mark_push(tp, skb);
978 			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
979 		} else if (skb == tcp_send_head(sk))
980 			tcp_push_one(sk, mss_now);
981 		continue;
982 
983 wait_for_sndbuf:
984 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
985 wait_for_memory:
986 		tcp_push(sk, flags & ~MSG_MORE, mss_now,
987 			 TCP_NAGLE_PUSH, size_goal);
988 
989 		err = sk_stream_wait_memory(sk, &timeo);
990 		if (err != 0)
991 			goto do_error;
992 
993 		mss_now = tcp_send_mss(sk, &size_goal, flags);
994 	}
995 
996 out:
997 	if (copied) {
998 		tcp_tx_timestamp(sk, sk->sk_tsflags, tcp_write_queue_tail(sk));
999 		if (!(flags & MSG_SENDPAGE_NOTLAST))
1000 			tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1001 	}
1002 	return copied;
1003 
1004 do_error:
1005 	if (copied)
1006 		goto out;
1007 out_err:
1008 	/* make sure we wake any epoll edge trigger waiter */
1009 	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
1010 		     err == -EAGAIN)) {
1011 		sk->sk_write_space(sk);
1012 		tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1013 	}
1014 	return sk_stream_error(sk, flags, err);
1015 }
1016 
1017 int tcp_sendpage(struct sock *sk, struct page *page, int offset,
1018 		 size_t size, int flags)
1019 {
1020 	ssize_t res;
1021 
1022 	if (!(sk->sk_route_caps & NETIF_F_SG) ||
1023 	    !sk_check_csum_caps(sk))
1024 		return sock_no_sendpage(sk->sk_socket, page, offset, size,
1025 					flags);
1026 
1027 	lock_sock(sk);
1028 
1029 	tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
1030 
1031 	res = do_tcp_sendpages(sk, page, offset, size, flags);
1032 	release_sock(sk);
1033 	return res;
1034 }
1035 EXPORT_SYMBOL(tcp_sendpage);
1036 
1037 /* Do not bother using a page frag for very small frames.
1038  * But use this heuristic only for the first skb in write queue.
1039  *
1040  * Having no payload in skb->head allows better SACK shifting
1041  * in tcp_shift_skb_data(), reducing sack/rack overhead, because
1042  * write queue has less skbs.
1043  * Each skb can hold up to MAX_SKB_FRAGS * 32Kbytes, or ~0.5 MB.
1044  * This also speeds up tso_fragment(), since it wont fallback
1045  * to tcp_fragment().
1046  */
1047 static int linear_payload_sz(bool first_skb)
1048 {
1049 	if (first_skb)
1050 		return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
1051 	return 0;
1052 }
1053 
1054 static int select_size(const struct sock *sk, bool sg, bool first_skb)
1055 {
1056 	const struct tcp_sock *tp = tcp_sk(sk);
1057 	int tmp = tp->mss_cache;
1058 
1059 	if (sg) {
1060 		if (sk_can_gso(sk)) {
1061 			tmp = linear_payload_sz(first_skb);
1062 		} else {
1063 			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
1064 
1065 			if (tmp >= pgbreak &&
1066 			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
1067 				tmp = pgbreak;
1068 		}
1069 	}
1070 
1071 	return tmp;
1072 }
1073 
1074 void tcp_free_fastopen_req(struct tcp_sock *tp)
1075 {
1076 	if (tp->fastopen_req) {
1077 		kfree(tp->fastopen_req);
1078 		tp->fastopen_req = NULL;
1079 	}
1080 }
1081 
1082 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1083 				int *copied, size_t size)
1084 {
1085 	struct tcp_sock *tp = tcp_sk(sk);
1086 	struct inet_sock *inet = inet_sk(sk);
1087 	int err, flags;
1088 
1089 	if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1090 		return -EOPNOTSUPP;
1091 	if (tp->fastopen_req)
1092 		return -EALREADY; /* Another Fast Open is in progress */
1093 
1094 	tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1095 				   sk->sk_allocation);
1096 	if (unlikely(!tp->fastopen_req))
1097 		return -ENOBUFS;
1098 	tp->fastopen_req->data = msg;
1099 	tp->fastopen_req->size = size;
1100 
1101 	if (inet->defer_connect) {
1102 		err = tcp_connect(sk);
1103 		/* Same failure procedure as in tcp_v4/6_connect */
1104 		if (err) {
1105 			tcp_set_state(sk, TCP_CLOSE);
1106 			inet->inet_dport = 0;
1107 			sk->sk_route_caps = 0;
1108 		}
1109 	}
1110 	flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1111 	err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1112 				    msg->msg_namelen, flags, 1);
1113 	inet->defer_connect = 0;
1114 	*copied = tp->fastopen_req->copied;
1115 	tcp_free_fastopen_req(tp);
1116 	return err;
1117 }
1118 
1119 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1120 {
1121 	struct tcp_sock *tp = tcp_sk(sk);
1122 	struct sk_buff *skb;
1123 	struct sockcm_cookie sockc;
1124 	int flags, err, copied = 0;
1125 	int mss_now = 0, size_goal, copied_syn = 0;
1126 	bool process_backlog = false;
1127 	bool sg;
1128 	long timeo;
1129 
1130 	lock_sock(sk);
1131 
1132 	flags = msg->msg_flags;
1133 	if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
1134 		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1135 		if (err == -EINPROGRESS && copied_syn > 0)
1136 			goto out;
1137 		else if (err)
1138 			goto out_err;
1139 	}
1140 
1141 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1142 
1143 	tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
1144 
1145 	/* Wait for a connection to finish. One exception is TCP Fast Open
1146 	 * (passive side) where data is allowed to be sent before a connection
1147 	 * is fully established.
1148 	 */
1149 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1150 	    !tcp_passive_fastopen(sk)) {
1151 		err = sk_stream_wait_connect(sk, &timeo);
1152 		if (err != 0)
1153 			goto do_error;
1154 	}
1155 
1156 	if (unlikely(tp->repair)) {
1157 		if (tp->repair_queue == TCP_RECV_QUEUE) {
1158 			copied = tcp_send_rcvq(sk, msg, size);
1159 			goto out_nopush;
1160 		}
1161 
1162 		err = -EINVAL;
1163 		if (tp->repair_queue == TCP_NO_QUEUE)
1164 			goto out_err;
1165 
1166 		/* 'common' sending to sendq */
1167 	}
1168 
1169 	sockc.tsflags = sk->sk_tsflags;
1170 	if (msg->msg_controllen) {
1171 		err = sock_cmsg_send(sk, msg, &sockc);
1172 		if (unlikely(err)) {
1173 			err = -EINVAL;
1174 			goto out_err;
1175 		}
1176 	}
1177 
1178 	/* This should be in poll */
1179 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1180 
1181 	/* Ok commence sending. */
1182 	copied = 0;
1183 
1184 restart:
1185 	mss_now = tcp_send_mss(sk, &size_goal, flags);
1186 
1187 	err = -EPIPE;
1188 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1189 		goto do_error;
1190 
1191 	sg = !!(sk->sk_route_caps & NETIF_F_SG);
1192 
1193 	while (msg_data_left(msg)) {
1194 		int copy = 0;
1195 		int max = size_goal;
1196 
1197 		skb = tcp_write_queue_tail(sk);
1198 		if (tcp_send_head(sk)) {
1199 			if (skb->ip_summed == CHECKSUM_NONE)
1200 				max = mss_now;
1201 			copy = max - skb->len;
1202 		}
1203 
1204 		if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
1205 			bool first_skb;
1206 
1207 new_segment:
1208 			/* Allocate new segment. If the interface is SG,
1209 			 * allocate skb fitting to single page.
1210 			 */
1211 			if (!sk_stream_memory_free(sk))
1212 				goto wait_for_sndbuf;
1213 
1214 			if (process_backlog && sk_flush_backlog(sk)) {
1215 				process_backlog = false;
1216 				goto restart;
1217 			}
1218 			first_skb = skb_queue_empty(&sk->sk_write_queue);
1219 			skb = sk_stream_alloc_skb(sk,
1220 						  select_size(sk, sg, first_skb),
1221 						  sk->sk_allocation,
1222 						  first_skb);
1223 			if (!skb)
1224 				goto wait_for_memory;
1225 
1226 			process_backlog = true;
1227 			/*
1228 			 * Check whether we can use HW checksum.
1229 			 */
1230 			if (sk_check_csum_caps(sk))
1231 				skb->ip_summed = CHECKSUM_PARTIAL;
1232 
1233 			skb_entail(sk, skb);
1234 			copy = size_goal;
1235 			max = size_goal;
1236 
1237 			/* All packets are restored as if they have
1238 			 * already been sent. skb_mstamp isn't set to
1239 			 * avoid wrong rtt estimation.
1240 			 */
1241 			if (tp->repair)
1242 				TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
1243 		}
1244 
1245 		/* Try to append data to the end of skb. */
1246 		if (copy > msg_data_left(msg))
1247 			copy = msg_data_left(msg);
1248 
1249 		/* Where to copy to? */
1250 		if (skb_availroom(skb) > 0) {
1251 			/* We have some space in skb head. Superb! */
1252 			copy = min_t(int, copy, skb_availroom(skb));
1253 			err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
1254 			if (err)
1255 				goto do_fault;
1256 		} else {
1257 			bool merge = true;
1258 			int i = skb_shinfo(skb)->nr_frags;
1259 			struct page_frag *pfrag = sk_page_frag(sk);
1260 
1261 			if (!sk_page_frag_refill(sk, pfrag))
1262 				goto wait_for_memory;
1263 
1264 			if (!skb_can_coalesce(skb, i, pfrag->page,
1265 					      pfrag->offset)) {
1266 				if (i >= sysctl_max_skb_frags || !sg) {
1267 					tcp_mark_push(tp, skb);
1268 					goto new_segment;
1269 				}
1270 				merge = false;
1271 			}
1272 
1273 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1274 
1275 			if (!sk_wmem_schedule(sk, copy))
1276 				goto wait_for_memory;
1277 
1278 			err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1279 						       pfrag->page,
1280 						       pfrag->offset,
1281 						       copy);
1282 			if (err)
1283 				goto do_error;
1284 
1285 			/* Update the skb. */
1286 			if (merge) {
1287 				skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1288 			} else {
1289 				skb_fill_page_desc(skb, i, pfrag->page,
1290 						   pfrag->offset, copy);
1291 				page_ref_inc(pfrag->page);
1292 			}
1293 			pfrag->offset += copy;
1294 		}
1295 
1296 		if (!copied)
1297 			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1298 
1299 		tp->write_seq += copy;
1300 		TCP_SKB_CB(skb)->end_seq += copy;
1301 		tcp_skb_pcount_set(skb, 0);
1302 
1303 		copied += copy;
1304 		if (!msg_data_left(msg)) {
1305 			if (unlikely(flags & MSG_EOR))
1306 				TCP_SKB_CB(skb)->eor = 1;
1307 			goto out;
1308 		}
1309 
1310 		if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1311 			continue;
1312 
1313 		if (forced_push(tp)) {
1314 			tcp_mark_push(tp, skb);
1315 			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1316 		} else if (skb == tcp_send_head(sk))
1317 			tcp_push_one(sk, mss_now);
1318 		continue;
1319 
1320 wait_for_sndbuf:
1321 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1322 wait_for_memory:
1323 		if (copied)
1324 			tcp_push(sk, flags & ~MSG_MORE, mss_now,
1325 				 TCP_NAGLE_PUSH, size_goal);
1326 
1327 		err = sk_stream_wait_memory(sk, &timeo);
1328 		if (err != 0)
1329 			goto do_error;
1330 
1331 		mss_now = tcp_send_mss(sk, &size_goal, flags);
1332 	}
1333 
1334 out:
1335 	if (copied) {
1336 		tcp_tx_timestamp(sk, sockc.tsflags, tcp_write_queue_tail(sk));
1337 		tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1338 	}
1339 out_nopush:
1340 	release_sock(sk);
1341 	return copied + copied_syn;
1342 
1343 do_fault:
1344 	if (!skb->len) {
1345 		tcp_unlink_write_queue(skb, sk);
1346 		/* It is the one place in all of TCP, except connection
1347 		 * reset, where we can be unlinking the send_head.
1348 		 */
1349 		tcp_check_send_head(sk, skb);
1350 		sk_wmem_free_skb(sk, skb);
1351 	}
1352 
1353 do_error:
1354 	if (copied + copied_syn)
1355 		goto out;
1356 out_err:
1357 	err = sk_stream_error(sk, flags, err);
1358 	/* make sure we wake any epoll edge trigger waiter */
1359 	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
1360 		     err == -EAGAIN)) {
1361 		sk->sk_write_space(sk);
1362 		tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1363 	}
1364 	release_sock(sk);
1365 	return err;
1366 }
1367 EXPORT_SYMBOL(tcp_sendmsg);
1368 
1369 /*
1370  *	Handle reading urgent data. BSD has very simple semantics for
1371  *	this, no blocking and very strange errors 8)
1372  */
1373 
1374 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1375 {
1376 	struct tcp_sock *tp = tcp_sk(sk);
1377 
1378 	/* No URG data to read. */
1379 	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1380 	    tp->urg_data == TCP_URG_READ)
1381 		return -EINVAL;	/* Yes this is right ! */
1382 
1383 	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1384 		return -ENOTCONN;
1385 
1386 	if (tp->urg_data & TCP_URG_VALID) {
1387 		int err = 0;
1388 		char c = tp->urg_data;
1389 
1390 		if (!(flags & MSG_PEEK))
1391 			tp->urg_data = TCP_URG_READ;
1392 
1393 		/* Read urgent data. */
1394 		msg->msg_flags |= MSG_OOB;
1395 
1396 		if (len > 0) {
1397 			if (!(flags & MSG_TRUNC))
1398 				err = memcpy_to_msg(msg, &c, 1);
1399 			len = 1;
1400 		} else
1401 			msg->msg_flags |= MSG_TRUNC;
1402 
1403 		return err ? -EFAULT : len;
1404 	}
1405 
1406 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1407 		return 0;
1408 
1409 	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1410 	 * the available implementations agree in this case:
1411 	 * this call should never block, independent of the
1412 	 * blocking state of the socket.
1413 	 * Mike <pall@rz.uni-karlsruhe.de>
1414 	 */
1415 	return -EAGAIN;
1416 }
1417 
1418 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1419 {
1420 	struct sk_buff *skb;
1421 	int copied = 0, err = 0;
1422 
1423 	/* XXX -- need to support SO_PEEK_OFF */
1424 
1425 	skb_queue_walk(&sk->sk_write_queue, skb) {
1426 		err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1427 		if (err)
1428 			break;
1429 
1430 		copied += skb->len;
1431 	}
1432 
1433 	return err ?: copied;
1434 }
1435 
1436 /* Clean up the receive buffer for full frames taken by the user,
1437  * then send an ACK if necessary.  COPIED is the number of bytes
1438  * tcp_recvmsg has given to the user so far, it speeds up the
1439  * calculation of whether or not we must ACK for the sake of
1440  * a window update.
1441  */
1442 static void tcp_cleanup_rbuf(struct sock *sk, int copied)
1443 {
1444 	struct tcp_sock *tp = tcp_sk(sk);
1445 	bool time_to_ack = false;
1446 
1447 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1448 
1449 	WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1450 	     "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1451 	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1452 
1453 	if (inet_csk_ack_scheduled(sk)) {
1454 		const struct inet_connection_sock *icsk = inet_csk(sk);
1455 		   /* Delayed ACKs frequently hit locked sockets during bulk
1456 		    * receive. */
1457 		if (icsk->icsk_ack.blocked ||
1458 		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
1459 		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1460 		    /*
1461 		     * If this read emptied read buffer, we send ACK, if
1462 		     * connection is not bidirectional, user drained
1463 		     * receive buffer and there was a small segment
1464 		     * in queue.
1465 		     */
1466 		    (copied > 0 &&
1467 		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1468 		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1469 		       !icsk->icsk_ack.pingpong)) &&
1470 		      !atomic_read(&sk->sk_rmem_alloc)))
1471 			time_to_ack = true;
1472 	}
1473 
1474 	/* We send an ACK if we can now advertise a non-zero window
1475 	 * which has been raised "significantly".
1476 	 *
1477 	 * Even if window raised up to infinity, do not send window open ACK
1478 	 * in states, where we will not receive more. It is useless.
1479 	 */
1480 	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1481 		__u32 rcv_window_now = tcp_receive_window(tp);
1482 
1483 		/* Optimize, __tcp_select_window() is not cheap. */
1484 		if (2*rcv_window_now <= tp->window_clamp) {
1485 			__u32 new_window = __tcp_select_window(sk);
1486 
1487 			/* Send ACK now, if this read freed lots of space
1488 			 * in our buffer. Certainly, new_window is new window.
1489 			 * We can advertise it now, if it is not less than current one.
1490 			 * "Lots" means "at least twice" here.
1491 			 */
1492 			if (new_window && new_window >= 2 * rcv_window_now)
1493 				time_to_ack = true;
1494 		}
1495 	}
1496 	if (time_to_ack)
1497 		tcp_send_ack(sk);
1498 }
1499 
1500 static void tcp_prequeue_process(struct sock *sk)
1501 {
1502 	struct sk_buff *skb;
1503 	struct tcp_sock *tp = tcp_sk(sk);
1504 
1505 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1506 
1507 	while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1508 		sk_backlog_rcv(sk, skb);
1509 
1510 	/* Clear memory counter. */
1511 	tp->ucopy.memory = 0;
1512 }
1513 
1514 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1515 {
1516 	struct sk_buff *skb;
1517 	u32 offset;
1518 
1519 	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1520 		offset = seq - TCP_SKB_CB(skb)->seq;
1521 		if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
1522 			pr_err_once("%s: found a SYN, please report !\n", __func__);
1523 			offset--;
1524 		}
1525 		if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
1526 			*off = offset;
1527 			return skb;
1528 		}
1529 		/* This looks weird, but this can happen if TCP collapsing
1530 		 * splitted a fat GRO packet, while we released socket lock
1531 		 * in skb_splice_bits()
1532 		 */
1533 		sk_eat_skb(sk, skb);
1534 	}
1535 	return NULL;
1536 }
1537 
1538 /*
1539  * This routine provides an alternative to tcp_recvmsg() for routines
1540  * that would like to handle copying from skbuffs directly in 'sendfile'
1541  * fashion.
1542  * Note:
1543  *	- It is assumed that the socket was locked by the caller.
1544  *	- The routine does not block.
1545  *	- At present, there is no support for reading OOB data
1546  *	  or for 'peeking' the socket using this routine
1547  *	  (although both would be easy to implement).
1548  */
1549 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1550 		  sk_read_actor_t recv_actor)
1551 {
1552 	struct sk_buff *skb;
1553 	struct tcp_sock *tp = tcp_sk(sk);
1554 	u32 seq = tp->copied_seq;
1555 	u32 offset;
1556 	int copied = 0;
1557 
1558 	if (sk->sk_state == TCP_LISTEN)
1559 		return -ENOTCONN;
1560 	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1561 		if (offset < skb->len) {
1562 			int used;
1563 			size_t len;
1564 
1565 			len = skb->len - offset;
1566 			/* Stop reading if we hit a patch of urgent data */
1567 			if (tp->urg_data) {
1568 				u32 urg_offset = tp->urg_seq - seq;
1569 				if (urg_offset < len)
1570 					len = urg_offset;
1571 				if (!len)
1572 					break;
1573 			}
1574 			used = recv_actor(desc, skb, offset, len);
1575 			if (used <= 0) {
1576 				if (!copied)
1577 					copied = used;
1578 				break;
1579 			} else if (used <= len) {
1580 				seq += used;
1581 				copied += used;
1582 				offset += used;
1583 			}
1584 			/* If recv_actor drops the lock (e.g. TCP splice
1585 			 * receive) the skb pointer might be invalid when
1586 			 * getting here: tcp_collapse might have deleted it
1587 			 * while aggregating skbs from the socket queue.
1588 			 */
1589 			skb = tcp_recv_skb(sk, seq - 1, &offset);
1590 			if (!skb)
1591 				break;
1592 			/* TCP coalescing might have appended data to the skb.
1593 			 * Try to splice more frags
1594 			 */
1595 			if (offset + 1 != skb->len)
1596 				continue;
1597 		}
1598 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
1599 			sk_eat_skb(sk, skb);
1600 			++seq;
1601 			break;
1602 		}
1603 		sk_eat_skb(sk, skb);
1604 		if (!desc->count)
1605 			break;
1606 		tp->copied_seq = seq;
1607 	}
1608 	tp->copied_seq = seq;
1609 
1610 	tcp_rcv_space_adjust(sk);
1611 
1612 	/* Clean up data we have read: This will do ACK frames. */
1613 	if (copied > 0) {
1614 		tcp_recv_skb(sk, seq, &offset);
1615 		tcp_cleanup_rbuf(sk, copied);
1616 	}
1617 	return copied;
1618 }
1619 EXPORT_SYMBOL(tcp_read_sock);
1620 
1621 int tcp_peek_len(struct socket *sock)
1622 {
1623 	return tcp_inq(sock->sk);
1624 }
1625 EXPORT_SYMBOL(tcp_peek_len);
1626 
1627 /*
1628  *	This routine copies from a sock struct into the user buffer.
1629  *
1630  *	Technical note: in 2.3 we work on _locked_ socket, so that
1631  *	tricks with *seq access order and skb->users are not required.
1632  *	Probably, code can be easily improved even more.
1633  */
1634 
1635 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1636 		int flags, int *addr_len)
1637 {
1638 	struct tcp_sock *tp = tcp_sk(sk);
1639 	int copied = 0;
1640 	u32 peek_seq;
1641 	u32 *seq;
1642 	unsigned long used;
1643 	int err;
1644 	int target;		/* Read at least this many bytes */
1645 	long timeo;
1646 	struct task_struct *user_recv = NULL;
1647 	struct sk_buff *skb, *last;
1648 	u32 urg_hole = 0;
1649 
1650 	if (unlikely(flags & MSG_ERRQUEUE))
1651 		return inet_recv_error(sk, msg, len, addr_len);
1652 
1653 	if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1654 	    (sk->sk_state == TCP_ESTABLISHED))
1655 		sk_busy_loop(sk, nonblock);
1656 
1657 	lock_sock(sk);
1658 
1659 	err = -ENOTCONN;
1660 	if (sk->sk_state == TCP_LISTEN)
1661 		goto out;
1662 
1663 	timeo = sock_rcvtimeo(sk, nonblock);
1664 
1665 	/* Urgent data needs to be handled specially. */
1666 	if (flags & MSG_OOB)
1667 		goto recv_urg;
1668 
1669 	if (unlikely(tp->repair)) {
1670 		err = -EPERM;
1671 		if (!(flags & MSG_PEEK))
1672 			goto out;
1673 
1674 		if (tp->repair_queue == TCP_SEND_QUEUE)
1675 			goto recv_sndq;
1676 
1677 		err = -EINVAL;
1678 		if (tp->repair_queue == TCP_NO_QUEUE)
1679 			goto out;
1680 
1681 		/* 'common' recv queue MSG_PEEK-ing */
1682 	}
1683 
1684 	seq = &tp->copied_seq;
1685 	if (flags & MSG_PEEK) {
1686 		peek_seq = tp->copied_seq;
1687 		seq = &peek_seq;
1688 	}
1689 
1690 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1691 
1692 	do {
1693 		u32 offset;
1694 
1695 		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1696 		if (tp->urg_data && tp->urg_seq == *seq) {
1697 			if (copied)
1698 				break;
1699 			if (signal_pending(current)) {
1700 				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1701 				break;
1702 			}
1703 		}
1704 
1705 		/* Next get a buffer. */
1706 
1707 		last = skb_peek_tail(&sk->sk_receive_queue);
1708 		skb_queue_walk(&sk->sk_receive_queue, skb) {
1709 			last = skb;
1710 			/* Now that we have two receive queues this
1711 			 * shouldn't happen.
1712 			 */
1713 			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1714 				 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1715 				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1716 				 flags))
1717 				break;
1718 
1719 			offset = *seq - TCP_SKB_CB(skb)->seq;
1720 			if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
1721 				pr_err_once("%s: found a SYN, please report !\n", __func__);
1722 				offset--;
1723 			}
1724 			if (offset < skb->len)
1725 				goto found_ok_skb;
1726 			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1727 				goto found_fin_ok;
1728 			WARN(!(flags & MSG_PEEK),
1729 			     "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1730 			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1731 		}
1732 
1733 		/* Well, if we have backlog, try to process it now yet. */
1734 
1735 		if (copied >= target && !sk->sk_backlog.tail)
1736 			break;
1737 
1738 		if (copied) {
1739 			if (sk->sk_err ||
1740 			    sk->sk_state == TCP_CLOSE ||
1741 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1742 			    !timeo ||
1743 			    signal_pending(current))
1744 				break;
1745 		} else {
1746 			if (sock_flag(sk, SOCK_DONE))
1747 				break;
1748 
1749 			if (sk->sk_err) {
1750 				copied = sock_error(sk);
1751 				break;
1752 			}
1753 
1754 			if (sk->sk_shutdown & RCV_SHUTDOWN)
1755 				break;
1756 
1757 			if (sk->sk_state == TCP_CLOSE) {
1758 				if (!sock_flag(sk, SOCK_DONE)) {
1759 					/* This occurs when user tries to read
1760 					 * from never connected socket.
1761 					 */
1762 					copied = -ENOTCONN;
1763 					break;
1764 				}
1765 				break;
1766 			}
1767 
1768 			if (!timeo) {
1769 				copied = -EAGAIN;
1770 				break;
1771 			}
1772 
1773 			if (signal_pending(current)) {
1774 				copied = sock_intr_errno(timeo);
1775 				break;
1776 			}
1777 		}
1778 
1779 		tcp_cleanup_rbuf(sk, copied);
1780 
1781 		if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1782 			/* Install new reader */
1783 			if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1784 				user_recv = current;
1785 				tp->ucopy.task = user_recv;
1786 				tp->ucopy.msg = msg;
1787 			}
1788 
1789 			tp->ucopy.len = len;
1790 
1791 			WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1792 				!(flags & (MSG_PEEK | MSG_TRUNC)));
1793 
1794 			/* Ugly... If prequeue is not empty, we have to
1795 			 * process it before releasing socket, otherwise
1796 			 * order will be broken at second iteration.
1797 			 * More elegant solution is required!!!
1798 			 *
1799 			 * Look: we have the following (pseudo)queues:
1800 			 *
1801 			 * 1. packets in flight
1802 			 * 2. backlog
1803 			 * 3. prequeue
1804 			 * 4. receive_queue
1805 			 *
1806 			 * Each queue can be processed only if the next ones
1807 			 * are empty. At this point we have empty receive_queue.
1808 			 * But prequeue _can_ be not empty after 2nd iteration,
1809 			 * when we jumped to start of loop because backlog
1810 			 * processing added something to receive_queue.
1811 			 * We cannot release_sock(), because backlog contains
1812 			 * packets arrived _after_ prequeued ones.
1813 			 *
1814 			 * Shortly, algorithm is clear --- to process all
1815 			 * the queues in order. We could make it more directly,
1816 			 * requeueing packets from backlog to prequeue, if
1817 			 * is not empty. It is more elegant, but eats cycles,
1818 			 * unfortunately.
1819 			 */
1820 			if (!skb_queue_empty(&tp->ucopy.prequeue))
1821 				goto do_prequeue;
1822 
1823 			/* __ Set realtime policy in scheduler __ */
1824 		}
1825 
1826 		if (copied >= target) {
1827 			/* Do not sleep, just process backlog. */
1828 			release_sock(sk);
1829 			lock_sock(sk);
1830 		} else {
1831 			sk_wait_data(sk, &timeo, last);
1832 		}
1833 
1834 		if (user_recv) {
1835 			int chunk;
1836 
1837 			/* __ Restore normal policy in scheduler __ */
1838 
1839 			chunk = len - tp->ucopy.len;
1840 			if (chunk != 0) {
1841 				NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1842 				len -= chunk;
1843 				copied += chunk;
1844 			}
1845 
1846 			if (tp->rcv_nxt == tp->copied_seq &&
1847 			    !skb_queue_empty(&tp->ucopy.prequeue)) {
1848 do_prequeue:
1849 				tcp_prequeue_process(sk);
1850 
1851 				chunk = len - tp->ucopy.len;
1852 				if (chunk != 0) {
1853 					NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1854 					len -= chunk;
1855 					copied += chunk;
1856 				}
1857 			}
1858 		}
1859 		if ((flags & MSG_PEEK) &&
1860 		    (peek_seq - copied - urg_hole != tp->copied_seq)) {
1861 			net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1862 					    current->comm,
1863 					    task_pid_nr(current));
1864 			peek_seq = tp->copied_seq;
1865 		}
1866 		continue;
1867 
1868 	found_ok_skb:
1869 		/* Ok so how much can we use? */
1870 		used = skb->len - offset;
1871 		if (len < used)
1872 			used = len;
1873 
1874 		/* Do we have urgent data here? */
1875 		if (tp->urg_data) {
1876 			u32 urg_offset = tp->urg_seq - *seq;
1877 			if (urg_offset < used) {
1878 				if (!urg_offset) {
1879 					if (!sock_flag(sk, SOCK_URGINLINE)) {
1880 						++*seq;
1881 						urg_hole++;
1882 						offset++;
1883 						used--;
1884 						if (!used)
1885 							goto skip_copy;
1886 					}
1887 				} else
1888 					used = urg_offset;
1889 			}
1890 		}
1891 
1892 		if (!(flags & MSG_TRUNC)) {
1893 			err = skb_copy_datagram_msg(skb, offset, msg, used);
1894 			if (err) {
1895 				/* Exception. Bailout! */
1896 				if (!copied)
1897 					copied = -EFAULT;
1898 				break;
1899 			}
1900 		}
1901 
1902 		*seq += used;
1903 		copied += used;
1904 		len -= used;
1905 
1906 		tcp_rcv_space_adjust(sk);
1907 
1908 skip_copy:
1909 		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1910 			tp->urg_data = 0;
1911 			tcp_fast_path_check(sk);
1912 		}
1913 		if (used + offset < skb->len)
1914 			continue;
1915 
1916 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1917 			goto found_fin_ok;
1918 		if (!(flags & MSG_PEEK))
1919 			sk_eat_skb(sk, skb);
1920 		continue;
1921 
1922 	found_fin_ok:
1923 		/* Process the FIN. */
1924 		++*seq;
1925 		if (!(flags & MSG_PEEK))
1926 			sk_eat_skb(sk, skb);
1927 		break;
1928 	} while (len > 0);
1929 
1930 	if (user_recv) {
1931 		if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1932 			int chunk;
1933 
1934 			tp->ucopy.len = copied > 0 ? len : 0;
1935 
1936 			tcp_prequeue_process(sk);
1937 
1938 			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1939 				NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1940 				len -= chunk;
1941 				copied += chunk;
1942 			}
1943 		}
1944 
1945 		tp->ucopy.task = NULL;
1946 		tp->ucopy.len = 0;
1947 	}
1948 
1949 	/* According to UNIX98, msg_name/msg_namelen are ignored
1950 	 * on connected socket. I was just happy when found this 8) --ANK
1951 	 */
1952 
1953 	/* Clean up data we have read: This will do ACK frames. */
1954 	tcp_cleanup_rbuf(sk, copied);
1955 
1956 	release_sock(sk);
1957 	return copied;
1958 
1959 out:
1960 	release_sock(sk);
1961 	return err;
1962 
1963 recv_urg:
1964 	err = tcp_recv_urg(sk, msg, len, flags);
1965 	goto out;
1966 
1967 recv_sndq:
1968 	err = tcp_peek_sndq(sk, msg, len);
1969 	goto out;
1970 }
1971 EXPORT_SYMBOL(tcp_recvmsg);
1972 
1973 void tcp_set_state(struct sock *sk, int state)
1974 {
1975 	int oldstate = sk->sk_state;
1976 
1977 	switch (state) {
1978 	case TCP_ESTABLISHED:
1979 		if (oldstate != TCP_ESTABLISHED)
1980 			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1981 		break;
1982 
1983 	case TCP_CLOSE:
1984 		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1985 			TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
1986 
1987 		sk->sk_prot->unhash(sk);
1988 		if (inet_csk(sk)->icsk_bind_hash &&
1989 		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1990 			inet_put_port(sk);
1991 		/* fall through */
1992 	default:
1993 		if (oldstate == TCP_ESTABLISHED)
1994 			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1995 	}
1996 
1997 	/* Change state AFTER socket is unhashed to avoid closed
1998 	 * socket sitting in hash tables.
1999 	 */
2000 	sk_state_store(sk, state);
2001 
2002 #ifdef STATE_TRACE
2003 	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
2004 #endif
2005 }
2006 EXPORT_SYMBOL_GPL(tcp_set_state);
2007 
2008 /*
2009  *	State processing on a close. This implements the state shift for
2010  *	sending our FIN frame. Note that we only send a FIN for some
2011  *	states. A shutdown() may have already sent the FIN, or we may be
2012  *	closed.
2013  */
2014 
2015 static const unsigned char new_state[16] = {
2016   /* current state:        new state:      action:	*/
2017   [0 /* (Invalid) */]	= TCP_CLOSE,
2018   [TCP_ESTABLISHED]	= TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2019   [TCP_SYN_SENT]	= TCP_CLOSE,
2020   [TCP_SYN_RECV]	= TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2021   [TCP_FIN_WAIT1]	= TCP_FIN_WAIT1,
2022   [TCP_FIN_WAIT2]	= TCP_FIN_WAIT2,
2023   [TCP_TIME_WAIT]	= TCP_CLOSE,
2024   [TCP_CLOSE]		= TCP_CLOSE,
2025   [TCP_CLOSE_WAIT]	= TCP_LAST_ACK  | TCP_ACTION_FIN,
2026   [TCP_LAST_ACK]	= TCP_LAST_ACK,
2027   [TCP_LISTEN]		= TCP_CLOSE,
2028   [TCP_CLOSING]		= TCP_CLOSING,
2029   [TCP_NEW_SYN_RECV]	= TCP_CLOSE,	/* should not happen ! */
2030 };
2031 
2032 static int tcp_close_state(struct sock *sk)
2033 {
2034 	int next = (int)new_state[sk->sk_state];
2035 	int ns = next & TCP_STATE_MASK;
2036 
2037 	tcp_set_state(sk, ns);
2038 
2039 	return next & TCP_ACTION_FIN;
2040 }
2041 
2042 /*
2043  *	Shutdown the sending side of a connection. Much like close except
2044  *	that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2045  */
2046 
2047 void tcp_shutdown(struct sock *sk, int how)
2048 {
2049 	/*	We need to grab some memory, and put together a FIN,
2050 	 *	and then put it into the queue to be sent.
2051 	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2052 	 */
2053 	if (!(how & SEND_SHUTDOWN))
2054 		return;
2055 
2056 	/* If we've already sent a FIN, or it's a closed state, skip this. */
2057 	if ((1 << sk->sk_state) &
2058 	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2059 	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2060 		/* Clear out any half completed packets.  FIN if needed. */
2061 		if (tcp_close_state(sk))
2062 			tcp_send_fin(sk);
2063 	}
2064 }
2065 EXPORT_SYMBOL(tcp_shutdown);
2066 
2067 bool tcp_check_oom(struct sock *sk, int shift)
2068 {
2069 	bool too_many_orphans, out_of_socket_memory;
2070 
2071 	too_many_orphans = tcp_too_many_orphans(sk, shift);
2072 	out_of_socket_memory = tcp_out_of_memory(sk);
2073 
2074 	if (too_many_orphans)
2075 		net_info_ratelimited("too many orphaned sockets\n");
2076 	if (out_of_socket_memory)
2077 		net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2078 	return too_many_orphans || out_of_socket_memory;
2079 }
2080 
2081 void tcp_close(struct sock *sk, long timeout)
2082 {
2083 	struct sk_buff *skb;
2084 	int data_was_unread = 0;
2085 	int state;
2086 
2087 	lock_sock(sk);
2088 	sk->sk_shutdown = SHUTDOWN_MASK;
2089 
2090 	if (sk->sk_state == TCP_LISTEN) {
2091 		tcp_set_state(sk, TCP_CLOSE);
2092 
2093 		/* Special case. */
2094 		inet_csk_listen_stop(sk);
2095 
2096 		goto adjudge_to_death;
2097 	}
2098 
2099 	/*  We need to flush the recv. buffs.  We do this only on the
2100 	 *  descriptor close, not protocol-sourced closes, because the
2101 	 *  reader process may not have drained the data yet!
2102 	 */
2103 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2104 		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
2105 
2106 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2107 			len--;
2108 		data_was_unread += len;
2109 		__kfree_skb(skb);
2110 	}
2111 
2112 	sk_mem_reclaim(sk);
2113 
2114 	/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2115 	if (sk->sk_state == TCP_CLOSE)
2116 		goto adjudge_to_death;
2117 
2118 	/* As outlined in RFC 2525, section 2.17, we send a RST here because
2119 	 * data was lost. To witness the awful effects of the old behavior of
2120 	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2121 	 * GET in an FTP client, suspend the process, wait for the client to
2122 	 * advertise a zero window, then kill -9 the FTP client, wheee...
2123 	 * Note: timeout is always zero in such a case.
2124 	 */
2125 	if (unlikely(tcp_sk(sk)->repair)) {
2126 		sk->sk_prot->disconnect(sk, 0);
2127 	} else if (data_was_unread) {
2128 		/* Unread data was tossed, zap the connection. */
2129 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2130 		tcp_set_state(sk, TCP_CLOSE);
2131 		tcp_send_active_reset(sk, sk->sk_allocation);
2132 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2133 		/* Check zero linger _after_ checking for unread data. */
2134 		sk->sk_prot->disconnect(sk, 0);
2135 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2136 	} else if (tcp_close_state(sk)) {
2137 		/* We FIN if the application ate all the data before
2138 		 * zapping the connection.
2139 		 */
2140 
2141 		/* RED-PEN. Formally speaking, we have broken TCP state
2142 		 * machine. State transitions:
2143 		 *
2144 		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2145 		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
2146 		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2147 		 *
2148 		 * are legal only when FIN has been sent (i.e. in window),
2149 		 * rather than queued out of window. Purists blame.
2150 		 *
2151 		 * F.e. "RFC state" is ESTABLISHED,
2152 		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2153 		 *
2154 		 * The visible declinations are that sometimes
2155 		 * we enter time-wait state, when it is not required really
2156 		 * (harmless), do not send active resets, when they are
2157 		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2158 		 * they look as CLOSING or LAST_ACK for Linux)
2159 		 * Probably, I missed some more holelets.
2160 		 * 						--ANK
2161 		 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2162 		 * in a single packet! (May consider it later but will
2163 		 * probably need API support or TCP_CORK SYN-ACK until
2164 		 * data is written and socket is closed.)
2165 		 */
2166 		tcp_send_fin(sk);
2167 	}
2168 
2169 	sk_stream_wait_close(sk, timeout);
2170 
2171 adjudge_to_death:
2172 	state = sk->sk_state;
2173 	sock_hold(sk);
2174 	sock_orphan(sk);
2175 
2176 	/* It is the last release_sock in its life. It will remove backlog. */
2177 	release_sock(sk);
2178 
2179 
2180 	/* Now socket is owned by kernel and we acquire BH lock
2181 	   to finish close. No need to check for user refs.
2182 	 */
2183 	local_bh_disable();
2184 	bh_lock_sock(sk);
2185 	WARN_ON(sock_owned_by_user(sk));
2186 
2187 	percpu_counter_inc(sk->sk_prot->orphan_count);
2188 
2189 	/* Have we already been destroyed by a softirq or backlog? */
2190 	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2191 		goto out;
2192 
2193 	/*	This is a (useful) BSD violating of the RFC. There is a
2194 	 *	problem with TCP as specified in that the other end could
2195 	 *	keep a socket open forever with no application left this end.
2196 	 *	We use a 1 minute timeout (about the same as BSD) then kill
2197 	 *	our end. If they send after that then tough - BUT: long enough
2198 	 *	that we won't make the old 4*rto = almost no time - whoops
2199 	 *	reset mistake.
2200 	 *
2201 	 *	Nope, it was not mistake. It is really desired behaviour
2202 	 *	f.e. on http servers, when such sockets are useless, but
2203 	 *	consume significant resources. Let's do it with special
2204 	 *	linger2	option.					--ANK
2205 	 */
2206 
2207 	if (sk->sk_state == TCP_FIN_WAIT2) {
2208 		struct tcp_sock *tp = tcp_sk(sk);
2209 		if (tp->linger2 < 0) {
2210 			tcp_set_state(sk, TCP_CLOSE);
2211 			tcp_send_active_reset(sk, GFP_ATOMIC);
2212 			__NET_INC_STATS(sock_net(sk),
2213 					LINUX_MIB_TCPABORTONLINGER);
2214 		} else {
2215 			const int tmo = tcp_fin_time(sk);
2216 
2217 			if (tmo > TCP_TIMEWAIT_LEN) {
2218 				inet_csk_reset_keepalive_timer(sk,
2219 						tmo - TCP_TIMEWAIT_LEN);
2220 			} else {
2221 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2222 				goto out;
2223 			}
2224 		}
2225 	}
2226 	if (sk->sk_state != TCP_CLOSE) {
2227 		sk_mem_reclaim(sk);
2228 		if (tcp_check_oom(sk, 0)) {
2229 			tcp_set_state(sk, TCP_CLOSE);
2230 			tcp_send_active_reset(sk, GFP_ATOMIC);
2231 			__NET_INC_STATS(sock_net(sk),
2232 					LINUX_MIB_TCPABORTONMEMORY);
2233 		}
2234 	}
2235 
2236 	if (sk->sk_state == TCP_CLOSE) {
2237 		struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2238 		/* We could get here with a non-NULL req if the socket is
2239 		 * aborted (e.g., closed with unread data) before 3WHS
2240 		 * finishes.
2241 		 */
2242 		if (req)
2243 			reqsk_fastopen_remove(sk, req, false);
2244 		inet_csk_destroy_sock(sk);
2245 	}
2246 	/* Otherwise, socket is reprieved until protocol close. */
2247 
2248 out:
2249 	bh_unlock_sock(sk);
2250 	local_bh_enable();
2251 	sock_put(sk);
2252 }
2253 EXPORT_SYMBOL(tcp_close);
2254 
2255 /* These states need RST on ABORT according to RFC793 */
2256 
2257 static inline bool tcp_need_reset(int state)
2258 {
2259 	return (1 << state) &
2260 	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2261 		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2262 }
2263 
2264 int tcp_disconnect(struct sock *sk, int flags)
2265 {
2266 	struct inet_sock *inet = inet_sk(sk);
2267 	struct inet_connection_sock *icsk = inet_csk(sk);
2268 	struct tcp_sock *tp = tcp_sk(sk);
2269 	int err = 0;
2270 	int old_state = sk->sk_state;
2271 
2272 	if (old_state != TCP_CLOSE)
2273 		tcp_set_state(sk, TCP_CLOSE);
2274 
2275 	/* ABORT function of RFC793 */
2276 	if (old_state == TCP_LISTEN) {
2277 		inet_csk_listen_stop(sk);
2278 	} else if (unlikely(tp->repair)) {
2279 		sk->sk_err = ECONNABORTED;
2280 	} else if (tcp_need_reset(old_state) ||
2281 		   (tp->snd_nxt != tp->write_seq &&
2282 		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2283 		/* The last check adjusts for discrepancy of Linux wrt. RFC
2284 		 * states
2285 		 */
2286 		tcp_send_active_reset(sk, gfp_any());
2287 		sk->sk_err = ECONNRESET;
2288 	} else if (old_state == TCP_SYN_SENT)
2289 		sk->sk_err = ECONNRESET;
2290 
2291 	tcp_clear_xmit_timers(sk);
2292 	__skb_queue_purge(&sk->sk_receive_queue);
2293 	tcp_write_queue_purge(sk);
2294 	skb_rbtree_purge(&tp->out_of_order_queue);
2295 
2296 	inet->inet_dport = 0;
2297 
2298 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2299 		inet_reset_saddr(sk);
2300 
2301 	sk->sk_shutdown = 0;
2302 	sock_reset_flag(sk, SOCK_DONE);
2303 	tp->srtt_us = 0;
2304 	tp->write_seq += tp->max_window + 2;
2305 	if (tp->write_seq == 0)
2306 		tp->write_seq = 1;
2307 	icsk->icsk_backoff = 0;
2308 	tp->snd_cwnd = 2;
2309 	icsk->icsk_probes_out = 0;
2310 	tp->packets_out = 0;
2311 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2312 	tp->snd_cwnd_cnt = 0;
2313 	tp->window_clamp = 0;
2314 	tcp_set_ca_state(sk, TCP_CA_Open);
2315 	tcp_clear_retrans(tp);
2316 	inet_csk_delack_init(sk);
2317 	tcp_init_send_head(sk);
2318 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2319 	__sk_dst_reset(sk);
2320 
2321 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2322 
2323 	sk->sk_error_report(sk);
2324 	return err;
2325 }
2326 EXPORT_SYMBOL(tcp_disconnect);
2327 
2328 static inline bool tcp_can_repair_sock(const struct sock *sk)
2329 {
2330 	return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2331 		(sk->sk_state != TCP_LISTEN);
2332 }
2333 
2334 static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len)
2335 {
2336 	struct tcp_repair_window opt;
2337 
2338 	if (!tp->repair)
2339 		return -EPERM;
2340 
2341 	if (len != sizeof(opt))
2342 		return -EINVAL;
2343 
2344 	if (copy_from_user(&opt, optbuf, sizeof(opt)))
2345 		return -EFAULT;
2346 
2347 	if (opt.max_window < opt.snd_wnd)
2348 		return -EINVAL;
2349 
2350 	if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
2351 		return -EINVAL;
2352 
2353 	if (after(opt.rcv_wup, tp->rcv_nxt))
2354 		return -EINVAL;
2355 
2356 	tp->snd_wl1	= opt.snd_wl1;
2357 	tp->snd_wnd	= opt.snd_wnd;
2358 	tp->max_window	= opt.max_window;
2359 
2360 	tp->rcv_wnd	= opt.rcv_wnd;
2361 	tp->rcv_wup	= opt.rcv_wup;
2362 
2363 	return 0;
2364 }
2365 
2366 static int tcp_repair_options_est(struct tcp_sock *tp,
2367 		struct tcp_repair_opt __user *optbuf, unsigned int len)
2368 {
2369 	struct tcp_repair_opt opt;
2370 
2371 	while (len >= sizeof(opt)) {
2372 		if (copy_from_user(&opt, optbuf, sizeof(opt)))
2373 			return -EFAULT;
2374 
2375 		optbuf++;
2376 		len -= sizeof(opt);
2377 
2378 		switch (opt.opt_code) {
2379 		case TCPOPT_MSS:
2380 			tp->rx_opt.mss_clamp = opt.opt_val;
2381 			break;
2382 		case TCPOPT_WINDOW:
2383 			{
2384 				u16 snd_wscale = opt.opt_val & 0xFFFF;
2385 				u16 rcv_wscale = opt.opt_val >> 16;
2386 
2387 				if (snd_wscale > 14 || rcv_wscale > 14)
2388 					return -EFBIG;
2389 
2390 				tp->rx_opt.snd_wscale = snd_wscale;
2391 				tp->rx_opt.rcv_wscale = rcv_wscale;
2392 				tp->rx_opt.wscale_ok = 1;
2393 			}
2394 			break;
2395 		case TCPOPT_SACK_PERM:
2396 			if (opt.opt_val != 0)
2397 				return -EINVAL;
2398 
2399 			tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2400 			if (sysctl_tcp_fack)
2401 				tcp_enable_fack(tp);
2402 			break;
2403 		case TCPOPT_TIMESTAMP:
2404 			if (opt.opt_val != 0)
2405 				return -EINVAL;
2406 
2407 			tp->rx_opt.tstamp_ok = 1;
2408 			break;
2409 		}
2410 	}
2411 
2412 	return 0;
2413 }
2414 
2415 /*
2416  *	Socket option code for TCP.
2417  */
2418 static int do_tcp_setsockopt(struct sock *sk, int level,
2419 		int optname, char __user *optval, unsigned int optlen)
2420 {
2421 	struct tcp_sock *tp = tcp_sk(sk);
2422 	struct inet_connection_sock *icsk = inet_csk(sk);
2423 	struct net *net = sock_net(sk);
2424 	int val;
2425 	int err = 0;
2426 
2427 	/* These are data/string values, all the others are ints */
2428 	switch (optname) {
2429 	case TCP_CONGESTION: {
2430 		char name[TCP_CA_NAME_MAX];
2431 
2432 		if (optlen < 1)
2433 			return -EINVAL;
2434 
2435 		val = strncpy_from_user(name, optval,
2436 					min_t(long, TCP_CA_NAME_MAX-1, optlen));
2437 		if (val < 0)
2438 			return -EFAULT;
2439 		name[val] = 0;
2440 
2441 		lock_sock(sk);
2442 		err = tcp_set_congestion_control(sk, name);
2443 		release_sock(sk);
2444 		return err;
2445 	}
2446 	default:
2447 		/* fallthru */
2448 		break;
2449 	}
2450 
2451 	if (optlen < sizeof(int))
2452 		return -EINVAL;
2453 
2454 	if (get_user(val, (int __user *)optval))
2455 		return -EFAULT;
2456 
2457 	lock_sock(sk);
2458 
2459 	switch (optname) {
2460 	case TCP_MAXSEG:
2461 		/* Values greater than interface MTU won't take effect. However
2462 		 * at the point when this call is done we typically don't yet
2463 		 * know which interface is going to be used */
2464 		if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2465 			err = -EINVAL;
2466 			break;
2467 		}
2468 		tp->rx_opt.user_mss = val;
2469 		break;
2470 
2471 	case TCP_NODELAY:
2472 		if (val) {
2473 			/* TCP_NODELAY is weaker than TCP_CORK, so that
2474 			 * this option on corked socket is remembered, but
2475 			 * it is not activated until cork is cleared.
2476 			 *
2477 			 * However, when TCP_NODELAY is set we make
2478 			 * an explicit push, which overrides even TCP_CORK
2479 			 * for currently queued segments.
2480 			 */
2481 			tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2482 			tcp_push_pending_frames(sk);
2483 		} else {
2484 			tp->nonagle &= ~TCP_NAGLE_OFF;
2485 		}
2486 		break;
2487 
2488 	case TCP_THIN_LINEAR_TIMEOUTS:
2489 		if (val < 0 || val > 1)
2490 			err = -EINVAL;
2491 		else
2492 			tp->thin_lto = val;
2493 		break;
2494 
2495 	case TCP_THIN_DUPACK:
2496 		if (val < 0 || val > 1)
2497 			err = -EINVAL;
2498 		break;
2499 
2500 	case TCP_REPAIR:
2501 		if (!tcp_can_repair_sock(sk))
2502 			err = -EPERM;
2503 		else if (val == 1) {
2504 			tp->repair = 1;
2505 			sk->sk_reuse = SK_FORCE_REUSE;
2506 			tp->repair_queue = TCP_NO_QUEUE;
2507 		} else if (val == 0) {
2508 			tp->repair = 0;
2509 			sk->sk_reuse = SK_NO_REUSE;
2510 			tcp_send_window_probe(sk);
2511 		} else
2512 			err = -EINVAL;
2513 
2514 		break;
2515 
2516 	case TCP_REPAIR_QUEUE:
2517 		if (!tp->repair)
2518 			err = -EPERM;
2519 		else if (val < TCP_QUEUES_NR)
2520 			tp->repair_queue = val;
2521 		else
2522 			err = -EINVAL;
2523 		break;
2524 
2525 	case TCP_QUEUE_SEQ:
2526 		if (sk->sk_state != TCP_CLOSE)
2527 			err = -EPERM;
2528 		else if (tp->repair_queue == TCP_SEND_QUEUE)
2529 			tp->write_seq = val;
2530 		else if (tp->repair_queue == TCP_RECV_QUEUE)
2531 			tp->rcv_nxt = val;
2532 		else
2533 			err = -EINVAL;
2534 		break;
2535 
2536 	case TCP_REPAIR_OPTIONS:
2537 		if (!tp->repair)
2538 			err = -EINVAL;
2539 		else if (sk->sk_state == TCP_ESTABLISHED)
2540 			err = tcp_repair_options_est(tp,
2541 					(struct tcp_repair_opt __user *)optval,
2542 					optlen);
2543 		else
2544 			err = -EPERM;
2545 		break;
2546 
2547 	case TCP_CORK:
2548 		/* When set indicates to always queue non-full frames.
2549 		 * Later the user clears this option and we transmit
2550 		 * any pending partial frames in the queue.  This is
2551 		 * meant to be used alongside sendfile() to get properly
2552 		 * filled frames when the user (for example) must write
2553 		 * out headers with a write() call first and then use
2554 		 * sendfile to send out the data parts.
2555 		 *
2556 		 * TCP_CORK can be set together with TCP_NODELAY and it is
2557 		 * stronger than TCP_NODELAY.
2558 		 */
2559 		if (val) {
2560 			tp->nonagle |= TCP_NAGLE_CORK;
2561 		} else {
2562 			tp->nonagle &= ~TCP_NAGLE_CORK;
2563 			if (tp->nonagle&TCP_NAGLE_OFF)
2564 				tp->nonagle |= TCP_NAGLE_PUSH;
2565 			tcp_push_pending_frames(sk);
2566 		}
2567 		break;
2568 
2569 	case TCP_KEEPIDLE:
2570 		if (val < 1 || val > MAX_TCP_KEEPIDLE)
2571 			err = -EINVAL;
2572 		else {
2573 			tp->keepalive_time = val * HZ;
2574 			if (sock_flag(sk, SOCK_KEEPOPEN) &&
2575 			    !((1 << sk->sk_state) &
2576 			      (TCPF_CLOSE | TCPF_LISTEN))) {
2577 				u32 elapsed = keepalive_time_elapsed(tp);
2578 				if (tp->keepalive_time > elapsed)
2579 					elapsed = tp->keepalive_time - elapsed;
2580 				else
2581 					elapsed = 0;
2582 				inet_csk_reset_keepalive_timer(sk, elapsed);
2583 			}
2584 		}
2585 		break;
2586 	case TCP_KEEPINTVL:
2587 		if (val < 1 || val > MAX_TCP_KEEPINTVL)
2588 			err = -EINVAL;
2589 		else
2590 			tp->keepalive_intvl = val * HZ;
2591 		break;
2592 	case TCP_KEEPCNT:
2593 		if (val < 1 || val > MAX_TCP_KEEPCNT)
2594 			err = -EINVAL;
2595 		else
2596 			tp->keepalive_probes = val;
2597 		break;
2598 	case TCP_SYNCNT:
2599 		if (val < 1 || val > MAX_TCP_SYNCNT)
2600 			err = -EINVAL;
2601 		else
2602 			icsk->icsk_syn_retries = val;
2603 		break;
2604 
2605 	case TCP_SAVE_SYN:
2606 		if (val < 0 || val > 1)
2607 			err = -EINVAL;
2608 		else
2609 			tp->save_syn = val;
2610 		break;
2611 
2612 	case TCP_LINGER2:
2613 		if (val < 0)
2614 			tp->linger2 = -1;
2615 		else if (val > net->ipv4.sysctl_tcp_fin_timeout / HZ)
2616 			tp->linger2 = 0;
2617 		else
2618 			tp->linger2 = val * HZ;
2619 		break;
2620 
2621 	case TCP_DEFER_ACCEPT:
2622 		/* Translate value in seconds to number of retransmits */
2623 		icsk->icsk_accept_queue.rskq_defer_accept =
2624 			secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2625 					TCP_RTO_MAX / HZ);
2626 		break;
2627 
2628 	case TCP_WINDOW_CLAMP:
2629 		if (!val) {
2630 			if (sk->sk_state != TCP_CLOSE) {
2631 				err = -EINVAL;
2632 				break;
2633 			}
2634 			tp->window_clamp = 0;
2635 		} else
2636 			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2637 						SOCK_MIN_RCVBUF / 2 : val;
2638 		break;
2639 
2640 	case TCP_QUICKACK:
2641 		if (!val) {
2642 			icsk->icsk_ack.pingpong = 1;
2643 		} else {
2644 			icsk->icsk_ack.pingpong = 0;
2645 			if ((1 << sk->sk_state) &
2646 			    (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2647 			    inet_csk_ack_scheduled(sk)) {
2648 				icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2649 				tcp_cleanup_rbuf(sk, 1);
2650 				if (!(val & 1))
2651 					icsk->icsk_ack.pingpong = 1;
2652 			}
2653 		}
2654 		break;
2655 
2656 #ifdef CONFIG_TCP_MD5SIG
2657 	case TCP_MD5SIG:
2658 		/* Read the IP->Key mappings from userspace */
2659 		err = tp->af_specific->md5_parse(sk, optval, optlen);
2660 		break;
2661 #endif
2662 	case TCP_USER_TIMEOUT:
2663 		/* Cap the max time in ms TCP will retry or probe the window
2664 		 * before giving up and aborting (ETIMEDOUT) a connection.
2665 		 */
2666 		if (val < 0)
2667 			err = -EINVAL;
2668 		else
2669 			icsk->icsk_user_timeout = msecs_to_jiffies(val);
2670 		break;
2671 
2672 	case TCP_FASTOPEN:
2673 		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
2674 		    TCPF_LISTEN))) {
2675 			tcp_fastopen_init_key_once(true);
2676 
2677 			fastopen_queue_tune(sk, val);
2678 		} else {
2679 			err = -EINVAL;
2680 		}
2681 		break;
2682 	case TCP_FASTOPEN_CONNECT:
2683 		if (val > 1 || val < 0) {
2684 			err = -EINVAL;
2685 		} else if (sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) {
2686 			if (sk->sk_state == TCP_CLOSE)
2687 				tp->fastopen_connect = val;
2688 			else
2689 				err = -EINVAL;
2690 		} else {
2691 			err = -EOPNOTSUPP;
2692 		}
2693 		break;
2694 	case TCP_TIMESTAMP:
2695 		if (!tp->repair)
2696 			err = -EPERM;
2697 		else
2698 			tp->tsoffset = val - tcp_time_stamp;
2699 		break;
2700 	case TCP_REPAIR_WINDOW:
2701 		err = tcp_repair_set_window(tp, optval, optlen);
2702 		break;
2703 	case TCP_NOTSENT_LOWAT:
2704 		tp->notsent_lowat = val;
2705 		sk->sk_write_space(sk);
2706 		break;
2707 	default:
2708 		err = -ENOPROTOOPT;
2709 		break;
2710 	}
2711 
2712 	release_sock(sk);
2713 	return err;
2714 }
2715 
2716 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2717 		   unsigned int optlen)
2718 {
2719 	const struct inet_connection_sock *icsk = inet_csk(sk);
2720 
2721 	if (level != SOL_TCP)
2722 		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2723 						     optval, optlen);
2724 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2725 }
2726 EXPORT_SYMBOL(tcp_setsockopt);
2727 
2728 #ifdef CONFIG_COMPAT
2729 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2730 			  char __user *optval, unsigned int optlen)
2731 {
2732 	if (level != SOL_TCP)
2733 		return inet_csk_compat_setsockopt(sk, level, optname,
2734 						  optval, optlen);
2735 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2736 }
2737 EXPORT_SYMBOL(compat_tcp_setsockopt);
2738 #endif
2739 
2740 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
2741 				      struct tcp_info *info)
2742 {
2743 	u64 stats[__TCP_CHRONO_MAX], total = 0;
2744 	enum tcp_chrono i;
2745 
2746 	for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
2747 		stats[i] = tp->chrono_stat[i - 1];
2748 		if (i == tp->chrono_type)
2749 			stats[i] += tcp_time_stamp - tp->chrono_start;
2750 		stats[i] *= USEC_PER_SEC / HZ;
2751 		total += stats[i];
2752 	}
2753 
2754 	info->tcpi_busy_time = total;
2755 	info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED];
2756 	info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED];
2757 }
2758 
2759 /* Return information about state of tcp endpoint in API format. */
2760 void tcp_get_info(struct sock *sk, struct tcp_info *info)
2761 {
2762 	const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
2763 	const struct inet_connection_sock *icsk = inet_csk(sk);
2764 	u32 now = tcp_time_stamp, intv;
2765 	u64 rate64;
2766 	bool slow;
2767 	u32 rate;
2768 
2769 	memset(info, 0, sizeof(*info));
2770 	if (sk->sk_type != SOCK_STREAM)
2771 		return;
2772 
2773 	info->tcpi_state = sk_state_load(sk);
2774 
2775 	/* Report meaningful fields for all TCP states, including listeners */
2776 	rate = READ_ONCE(sk->sk_pacing_rate);
2777 	rate64 = rate != ~0U ? rate : ~0ULL;
2778 	info->tcpi_pacing_rate = rate64;
2779 
2780 	rate = READ_ONCE(sk->sk_max_pacing_rate);
2781 	rate64 = rate != ~0U ? rate : ~0ULL;
2782 	info->tcpi_max_pacing_rate = rate64;
2783 
2784 	info->tcpi_reordering = tp->reordering;
2785 	info->tcpi_snd_cwnd = tp->snd_cwnd;
2786 
2787 	if (info->tcpi_state == TCP_LISTEN) {
2788 		/* listeners aliased fields :
2789 		 * tcpi_unacked -> Number of children ready for accept()
2790 		 * tcpi_sacked  -> max backlog
2791 		 */
2792 		info->tcpi_unacked = sk->sk_ack_backlog;
2793 		info->tcpi_sacked = sk->sk_max_ack_backlog;
2794 		return;
2795 	}
2796 
2797 	slow = lock_sock_fast(sk);
2798 
2799 	info->tcpi_ca_state = icsk->icsk_ca_state;
2800 	info->tcpi_retransmits = icsk->icsk_retransmits;
2801 	info->tcpi_probes = icsk->icsk_probes_out;
2802 	info->tcpi_backoff = icsk->icsk_backoff;
2803 
2804 	if (tp->rx_opt.tstamp_ok)
2805 		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2806 	if (tcp_is_sack(tp))
2807 		info->tcpi_options |= TCPI_OPT_SACK;
2808 	if (tp->rx_opt.wscale_ok) {
2809 		info->tcpi_options |= TCPI_OPT_WSCALE;
2810 		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2811 		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2812 	}
2813 
2814 	if (tp->ecn_flags & TCP_ECN_OK)
2815 		info->tcpi_options |= TCPI_OPT_ECN;
2816 	if (tp->ecn_flags & TCP_ECN_SEEN)
2817 		info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2818 	if (tp->syn_data_acked)
2819 		info->tcpi_options |= TCPI_OPT_SYN_DATA;
2820 
2821 	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2822 	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2823 	info->tcpi_snd_mss = tp->mss_cache;
2824 	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2825 
2826 	info->tcpi_unacked = tp->packets_out;
2827 	info->tcpi_sacked = tp->sacked_out;
2828 
2829 	info->tcpi_lost = tp->lost_out;
2830 	info->tcpi_retrans = tp->retrans_out;
2831 	info->tcpi_fackets = tp->fackets_out;
2832 
2833 	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2834 	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2835 	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2836 
2837 	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2838 	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2839 	info->tcpi_rtt = tp->srtt_us >> 3;
2840 	info->tcpi_rttvar = tp->mdev_us >> 2;
2841 	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2842 	info->tcpi_advmss = tp->advmss;
2843 
2844 	info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2845 	info->tcpi_rcv_space = tp->rcvq_space.space;
2846 
2847 	info->tcpi_total_retrans = tp->total_retrans;
2848 
2849 	info->tcpi_bytes_acked = tp->bytes_acked;
2850 	info->tcpi_bytes_received = tp->bytes_received;
2851 	info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
2852 	tcp_get_info_chrono_stats(tp, info);
2853 
2854 	info->tcpi_segs_out = tp->segs_out;
2855 	info->tcpi_segs_in = tp->segs_in;
2856 
2857 	info->tcpi_min_rtt = tcp_min_rtt(tp);
2858 	info->tcpi_data_segs_in = tp->data_segs_in;
2859 	info->tcpi_data_segs_out = tp->data_segs_out;
2860 
2861 	info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
2862 	rate = READ_ONCE(tp->rate_delivered);
2863 	intv = READ_ONCE(tp->rate_interval_us);
2864 	if (rate && intv) {
2865 		rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
2866 		do_div(rate64, intv);
2867 		info->tcpi_delivery_rate = rate64;
2868 	}
2869 	unlock_sock_fast(sk, slow);
2870 }
2871 EXPORT_SYMBOL_GPL(tcp_get_info);
2872 
2873 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
2874 {
2875 	const struct tcp_sock *tp = tcp_sk(sk);
2876 	struct sk_buff *stats;
2877 	struct tcp_info info;
2878 
2879 	stats = alloc_skb(5 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC);
2880 	if (!stats)
2881 		return NULL;
2882 
2883 	tcp_get_info_chrono_stats(tp, &info);
2884 	nla_put_u64_64bit(stats, TCP_NLA_BUSY,
2885 			  info.tcpi_busy_time, TCP_NLA_PAD);
2886 	nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
2887 			  info.tcpi_rwnd_limited, TCP_NLA_PAD);
2888 	nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
2889 			  info.tcpi_sndbuf_limited, TCP_NLA_PAD);
2890 	nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
2891 			  tp->data_segs_out, TCP_NLA_PAD);
2892 	nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
2893 			  tp->total_retrans, TCP_NLA_PAD);
2894 	return stats;
2895 }
2896 
2897 static int do_tcp_getsockopt(struct sock *sk, int level,
2898 		int optname, char __user *optval, int __user *optlen)
2899 {
2900 	struct inet_connection_sock *icsk = inet_csk(sk);
2901 	struct tcp_sock *tp = tcp_sk(sk);
2902 	struct net *net = sock_net(sk);
2903 	int val, len;
2904 
2905 	if (get_user(len, optlen))
2906 		return -EFAULT;
2907 
2908 	len = min_t(unsigned int, len, sizeof(int));
2909 
2910 	if (len < 0)
2911 		return -EINVAL;
2912 
2913 	switch (optname) {
2914 	case TCP_MAXSEG:
2915 		val = tp->mss_cache;
2916 		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2917 			val = tp->rx_opt.user_mss;
2918 		if (tp->repair)
2919 			val = tp->rx_opt.mss_clamp;
2920 		break;
2921 	case TCP_NODELAY:
2922 		val = !!(tp->nonagle&TCP_NAGLE_OFF);
2923 		break;
2924 	case TCP_CORK:
2925 		val = !!(tp->nonagle&TCP_NAGLE_CORK);
2926 		break;
2927 	case TCP_KEEPIDLE:
2928 		val = keepalive_time_when(tp) / HZ;
2929 		break;
2930 	case TCP_KEEPINTVL:
2931 		val = keepalive_intvl_when(tp) / HZ;
2932 		break;
2933 	case TCP_KEEPCNT:
2934 		val = keepalive_probes(tp);
2935 		break;
2936 	case TCP_SYNCNT:
2937 		val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
2938 		break;
2939 	case TCP_LINGER2:
2940 		val = tp->linger2;
2941 		if (val >= 0)
2942 			val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ;
2943 		break;
2944 	case TCP_DEFER_ACCEPT:
2945 		val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2946 				      TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2947 		break;
2948 	case TCP_WINDOW_CLAMP:
2949 		val = tp->window_clamp;
2950 		break;
2951 	case TCP_INFO: {
2952 		struct tcp_info info;
2953 
2954 		if (get_user(len, optlen))
2955 			return -EFAULT;
2956 
2957 		tcp_get_info(sk, &info);
2958 
2959 		len = min_t(unsigned int, len, sizeof(info));
2960 		if (put_user(len, optlen))
2961 			return -EFAULT;
2962 		if (copy_to_user(optval, &info, len))
2963 			return -EFAULT;
2964 		return 0;
2965 	}
2966 	case TCP_CC_INFO: {
2967 		const struct tcp_congestion_ops *ca_ops;
2968 		union tcp_cc_info info;
2969 		size_t sz = 0;
2970 		int attr;
2971 
2972 		if (get_user(len, optlen))
2973 			return -EFAULT;
2974 
2975 		ca_ops = icsk->icsk_ca_ops;
2976 		if (ca_ops && ca_ops->get_info)
2977 			sz = ca_ops->get_info(sk, ~0U, &attr, &info);
2978 
2979 		len = min_t(unsigned int, len, sz);
2980 		if (put_user(len, optlen))
2981 			return -EFAULT;
2982 		if (copy_to_user(optval, &info, len))
2983 			return -EFAULT;
2984 		return 0;
2985 	}
2986 	case TCP_QUICKACK:
2987 		val = !icsk->icsk_ack.pingpong;
2988 		break;
2989 
2990 	case TCP_CONGESTION:
2991 		if (get_user(len, optlen))
2992 			return -EFAULT;
2993 		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2994 		if (put_user(len, optlen))
2995 			return -EFAULT;
2996 		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2997 			return -EFAULT;
2998 		return 0;
2999 
3000 	case TCP_THIN_LINEAR_TIMEOUTS:
3001 		val = tp->thin_lto;
3002 		break;
3003 
3004 	case TCP_THIN_DUPACK:
3005 		val = 0;
3006 		break;
3007 
3008 	case TCP_REPAIR:
3009 		val = tp->repair;
3010 		break;
3011 
3012 	case TCP_REPAIR_QUEUE:
3013 		if (tp->repair)
3014 			val = tp->repair_queue;
3015 		else
3016 			return -EINVAL;
3017 		break;
3018 
3019 	case TCP_REPAIR_WINDOW: {
3020 		struct tcp_repair_window opt;
3021 
3022 		if (get_user(len, optlen))
3023 			return -EFAULT;
3024 
3025 		if (len != sizeof(opt))
3026 			return -EINVAL;
3027 
3028 		if (!tp->repair)
3029 			return -EPERM;
3030 
3031 		opt.snd_wl1	= tp->snd_wl1;
3032 		opt.snd_wnd	= tp->snd_wnd;
3033 		opt.max_window	= tp->max_window;
3034 		opt.rcv_wnd	= tp->rcv_wnd;
3035 		opt.rcv_wup	= tp->rcv_wup;
3036 
3037 		if (copy_to_user(optval, &opt, len))
3038 			return -EFAULT;
3039 		return 0;
3040 	}
3041 	case TCP_QUEUE_SEQ:
3042 		if (tp->repair_queue == TCP_SEND_QUEUE)
3043 			val = tp->write_seq;
3044 		else if (tp->repair_queue == TCP_RECV_QUEUE)
3045 			val = tp->rcv_nxt;
3046 		else
3047 			return -EINVAL;
3048 		break;
3049 
3050 	case TCP_USER_TIMEOUT:
3051 		val = jiffies_to_msecs(icsk->icsk_user_timeout);
3052 		break;
3053 
3054 	case TCP_FASTOPEN:
3055 		val = icsk->icsk_accept_queue.fastopenq.max_qlen;
3056 		break;
3057 
3058 	case TCP_FASTOPEN_CONNECT:
3059 		val = tp->fastopen_connect;
3060 		break;
3061 
3062 	case TCP_TIMESTAMP:
3063 		val = tcp_time_stamp + tp->tsoffset;
3064 		break;
3065 	case TCP_NOTSENT_LOWAT:
3066 		val = tp->notsent_lowat;
3067 		break;
3068 	case TCP_SAVE_SYN:
3069 		val = tp->save_syn;
3070 		break;
3071 	case TCP_SAVED_SYN: {
3072 		if (get_user(len, optlen))
3073 			return -EFAULT;
3074 
3075 		lock_sock(sk);
3076 		if (tp->saved_syn) {
3077 			if (len < tp->saved_syn[0]) {
3078 				if (put_user(tp->saved_syn[0], optlen)) {
3079 					release_sock(sk);
3080 					return -EFAULT;
3081 				}
3082 				release_sock(sk);
3083 				return -EINVAL;
3084 			}
3085 			len = tp->saved_syn[0];
3086 			if (put_user(len, optlen)) {
3087 				release_sock(sk);
3088 				return -EFAULT;
3089 			}
3090 			if (copy_to_user(optval, tp->saved_syn + 1, len)) {
3091 				release_sock(sk);
3092 				return -EFAULT;
3093 			}
3094 			tcp_saved_syn_free(tp);
3095 			release_sock(sk);
3096 		} else {
3097 			release_sock(sk);
3098 			len = 0;
3099 			if (put_user(len, optlen))
3100 				return -EFAULT;
3101 		}
3102 		return 0;
3103 	}
3104 	default:
3105 		return -ENOPROTOOPT;
3106 	}
3107 
3108 	if (put_user(len, optlen))
3109 		return -EFAULT;
3110 	if (copy_to_user(optval, &val, len))
3111 		return -EFAULT;
3112 	return 0;
3113 }
3114 
3115 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
3116 		   int __user *optlen)
3117 {
3118 	struct inet_connection_sock *icsk = inet_csk(sk);
3119 
3120 	if (level != SOL_TCP)
3121 		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
3122 						     optval, optlen);
3123 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
3124 }
3125 EXPORT_SYMBOL(tcp_getsockopt);
3126 
3127 #ifdef CONFIG_COMPAT
3128 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
3129 			  char __user *optval, int __user *optlen)
3130 {
3131 	if (level != SOL_TCP)
3132 		return inet_csk_compat_getsockopt(sk, level, optname,
3133 						  optval, optlen);
3134 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
3135 }
3136 EXPORT_SYMBOL(compat_tcp_getsockopt);
3137 #endif
3138 
3139 #ifdef CONFIG_TCP_MD5SIG
3140 static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
3141 static DEFINE_MUTEX(tcp_md5sig_mutex);
3142 static bool tcp_md5sig_pool_populated = false;
3143 
3144 static void __tcp_alloc_md5sig_pool(void)
3145 {
3146 	struct crypto_ahash *hash;
3147 	int cpu;
3148 
3149 	hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
3150 	if (IS_ERR(hash))
3151 		return;
3152 
3153 	for_each_possible_cpu(cpu) {
3154 		void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch;
3155 		struct ahash_request *req;
3156 
3157 		if (!scratch) {
3158 			scratch = kmalloc_node(sizeof(union tcp_md5sum_block) +
3159 					       sizeof(struct tcphdr),
3160 					       GFP_KERNEL,
3161 					       cpu_to_node(cpu));
3162 			if (!scratch)
3163 				return;
3164 			per_cpu(tcp_md5sig_pool, cpu).scratch = scratch;
3165 		}
3166 		if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
3167 			continue;
3168 
3169 		req = ahash_request_alloc(hash, GFP_KERNEL);
3170 		if (!req)
3171 			return;
3172 
3173 		ahash_request_set_callback(req, 0, NULL, NULL);
3174 
3175 		per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
3176 	}
3177 	/* before setting tcp_md5sig_pool_populated, we must commit all writes
3178 	 * to memory. See smp_rmb() in tcp_get_md5sig_pool()
3179 	 */
3180 	smp_wmb();
3181 	tcp_md5sig_pool_populated = true;
3182 }
3183 
3184 bool tcp_alloc_md5sig_pool(void)
3185 {
3186 	if (unlikely(!tcp_md5sig_pool_populated)) {
3187 		mutex_lock(&tcp_md5sig_mutex);
3188 
3189 		if (!tcp_md5sig_pool_populated)
3190 			__tcp_alloc_md5sig_pool();
3191 
3192 		mutex_unlock(&tcp_md5sig_mutex);
3193 	}
3194 	return tcp_md5sig_pool_populated;
3195 }
3196 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
3197 
3198 
3199 /**
3200  *	tcp_get_md5sig_pool - get md5sig_pool for this user
3201  *
3202  *	We use percpu structure, so if we succeed, we exit with preemption
3203  *	and BH disabled, to make sure another thread or softirq handling
3204  *	wont try to get same context.
3205  */
3206 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
3207 {
3208 	local_bh_disable();
3209 
3210 	if (tcp_md5sig_pool_populated) {
3211 		/* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
3212 		smp_rmb();
3213 		return this_cpu_ptr(&tcp_md5sig_pool);
3214 	}
3215 	local_bh_enable();
3216 	return NULL;
3217 }
3218 EXPORT_SYMBOL(tcp_get_md5sig_pool);
3219 
3220 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3221 			  const struct sk_buff *skb, unsigned int header_len)
3222 {
3223 	struct scatterlist sg;
3224 	const struct tcphdr *tp = tcp_hdr(skb);
3225 	struct ahash_request *req = hp->md5_req;
3226 	unsigned int i;
3227 	const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3228 					   skb_headlen(skb) - header_len : 0;
3229 	const struct skb_shared_info *shi = skb_shinfo(skb);
3230 	struct sk_buff *frag_iter;
3231 
3232 	sg_init_table(&sg, 1);
3233 
3234 	sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3235 	ahash_request_set_crypt(req, &sg, NULL, head_data_len);
3236 	if (crypto_ahash_update(req))
3237 		return 1;
3238 
3239 	for (i = 0; i < shi->nr_frags; ++i) {
3240 		const struct skb_frag_struct *f = &shi->frags[i];
3241 		unsigned int offset = f->page_offset;
3242 		struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3243 
3244 		sg_set_page(&sg, page, skb_frag_size(f),
3245 			    offset_in_page(offset));
3246 		ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
3247 		if (crypto_ahash_update(req))
3248 			return 1;
3249 	}
3250 
3251 	skb_walk_frags(skb, frag_iter)
3252 		if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3253 			return 1;
3254 
3255 	return 0;
3256 }
3257 EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3258 
3259 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3260 {
3261 	struct scatterlist sg;
3262 
3263 	sg_init_one(&sg, key->key, key->keylen);
3264 	ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
3265 	return crypto_ahash_update(hp->md5_req);
3266 }
3267 EXPORT_SYMBOL(tcp_md5_hash_key);
3268 
3269 #endif
3270 
3271 void tcp_done(struct sock *sk)
3272 {
3273 	struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3274 
3275 	if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3276 		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3277 
3278 	tcp_set_state(sk, TCP_CLOSE);
3279 	tcp_clear_xmit_timers(sk);
3280 	if (req)
3281 		reqsk_fastopen_remove(sk, req, false);
3282 
3283 	sk->sk_shutdown = SHUTDOWN_MASK;
3284 
3285 	if (!sock_flag(sk, SOCK_DEAD))
3286 		sk->sk_state_change(sk);
3287 	else
3288 		inet_csk_destroy_sock(sk);
3289 }
3290 EXPORT_SYMBOL_GPL(tcp_done);
3291 
3292 int tcp_abort(struct sock *sk, int err)
3293 {
3294 	if (!sk_fullsock(sk)) {
3295 		if (sk->sk_state == TCP_NEW_SYN_RECV) {
3296 			struct request_sock *req = inet_reqsk(sk);
3297 
3298 			local_bh_disable();
3299 			inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
3300 							  req);
3301 			local_bh_enable();
3302 			return 0;
3303 		}
3304 		return -EOPNOTSUPP;
3305 	}
3306 
3307 	/* Don't race with userspace socket closes such as tcp_close. */
3308 	lock_sock(sk);
3309 
3310 	if (sk->sk_state == TCP_LISTEN) {
3311 		tcp_set_state(sk, TCP_CLOSE);
3312 		inet_csk_listen_stop(sk);
3313 	}
3314 
3315 	/* Don't race with BH socket closes such as inet_csk_listen_stop. */
3316 	local_bh_disable();
3317 	bh_lock_sock(sk);
3318 
3319 	if (!sock_flag(sk, SOCK_DEAD)) {
3320 		sk->sk_err = err;
3321 		/* This barrier is coupled with smp_rmb() in tcp_poll() */
3322 		smp_wmb();
3323 		sk->sk_error_report(sk);
3324 		if (tcp_need_reset(sk->sk_state))
3325 			tcp_send_active_reset(sk, GFP_ATOMIC);
3326 		tcp_done(sk);
3327 	}
3328 
3329 	bh_unlock_sock(sk);
3330 	local_bh_enable();
3331 	release_sock(sk);
3332 	return 0;
3333 }
3334 EXPORT_SYMBOL_GPL(tcp_abort);
3335 
3336 extern struct tcp_congestion_ops tcp_reno;
3337 
3338 static __initdata unsigned long thash_entries;
3339 static int __init set_thash_entries(char *str)
3340 {
3341 	ssize_t ret;
3342 
3343 	if (!str)
3344 		return 0;
3345 
3346 	ret = kstrtoul(str, 0, &thash_entries);
3347 	if (ret)
3348 		return 0;
3349 
3350 	return 1;
3351 }
3352 __setup("thash_entries=", set_thash_entries);
3353 
3354 static void __init tcp_init_mem(void)
3355 {
3356 	unsigned long limit = nr_free_buffer_pages() / 16;
3357 
3358 	limit = max(limit, 128UL);
3359 	sysctl_tcp_mem[0] = limit / 4 * 3;		/* 4.68 % */
3360 	sysctl_tcp_mem[1] = limit;			/* 6.25 % */
3361 	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;	/* 9.37 % */
3362 }
3363 
3364 void __init tcp_init(void)
3365 {
3366 	int max_rshare, max_wshare, cnt;
3367 	unsigned long limit;
3368 	unsigned int i;
3369 
3370 	BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
3371 		     FIELD_SIZEOF(struct sk_buff, cb));
3372 
3373 	percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
3374 	percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
3375 	inet_hashinfo_init(&tcp_hashinfo);
3376 	tcp_hashinfo.bind_bucket_cachep =
3377 		kmem_cache_create("tcp_bind_bucket",
3378 				  sizeof(struct inet_bind_bucket), 0,
3379 				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3380 
3381 	/* Size and allocate the main established and bind bucket
3382 	 * hash tables.
3383 	 *
3384 	 * The methodology is similar to that of the buffer cache.
3385 	 */
3386 	tcp_hashinfo.ehash =
3387 		alloc_large_system_hash("TCP established",
3388 					sizeof(struct inet_ehash_bucket),
3389 					thash_entries,
3390 					17, /* one slot per 128 KB of memory */
3391 					0,
3392 					NULL,
3393 					&tcp_hashinfo.ehash_mask,
3394 					0,
3395 					thash_entries ? 0 : 512 * 1024);
3396 	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
3397 		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3398 
3399 	if (inet_ehash_locks_alloc(&tcp_hashinfo))
3400 		panic("TCP: failed to alloc ehash_locks");
3401 	tcp_hashinfo.bhash =
3402 		alloc_large_system_hash("TCP bind",
3403 					sizeof(struct inet_bind_hashbucket),
3404 					tcp_hashinfo.ehash_mask + 1,
3405 					17, /* one slot per 128 KB of memory */
3406 					0,
3407 					&tcp_hashinfo.bhash_size,
3408 					NULL,
3409 					0,
3410 					64 * 1024);
3411 	tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3412 	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3413 		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3414 		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3415 	}
3416 
3417 
3418 	cnt = tcp_hashinfo.ehash_mask + 1;
3419 	sysctl_tcp_max_orphans = cnt / 2;
3420 
3421 	tcp_init_mem();
3422 	/* Set per-socket limits to no more than 1/128 the pressure threshold */
3423 	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3424 	max_wshare = min(4UL*1024*1024, limit);
3425 	max_rshare = min(6UL*1024*1024, limit);
3426 
3427 	sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3428 	sysctl_tcp_wmem[1] = 16*1024;
3429 	sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3430 
3431 	sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3432 	sysctl_tcp_rmem[1] = 87380;
3433 	sysctl_tcp_rmem[2] = max(87380, max_rshare);
3434 
3435 	pr_info("Hash tables configured (established %u bind %u)\n",
3436 		tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3437 
3438 	tcp_v4_init();
3439 	tcp_metrics_init();
3440 	BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
3441 	tcp_tasklet_init();
3442 }
3443