xref: /openbmc/linux/net/ipv4/tcp.c (revision 483eb062)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
11  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
15  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
16  *		Matthew Dillon, <dillon@apollo.west.oic.com>
17  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *		Jorge Cwik, <jorge@laser.satlink.net>
19  *
20  * Fixes:
21  *		Alan Cox	:	Numerous verify_area() calls
22  *		Alan Cox	:	Set the ACK bit on a reset
23  *		Alan Cox	:	Stopped it crashing if it closed while
24  *					sk->inuse=1 and was trying to connect
25  *					(tcp_err()).
26  *		Alan Cox	:	All icmp error handling was broken
27  *					pointers passed where wrong and the
28  *					socket was looked up backwards. Nobody
29  *					tested any icmp error code obviously.
30  *		Alan Cox	:	tcp_err() now handled properly. It
31  *					wakes people on errors. poll
32  *					behaves and the icmp error race
33  *					has gone by moving it into sock.c
34  *		Alan Cox	:	tcp_send_reset() fixed to work for
35  *					everything not just packets for
36  *					unknown sockets.
37  *		Alan Cox	:	tcp option processing.
38  *		Alan Cox	:	Reset tweaked (still not 100%) [Had
39  *					syn rule wrong]
40  *		Herp Rosmanith  :	More reset fixes
41  *		Alan Cox	:	No longer acks invalid rst frames.
42  *					Acking any kind of RST is right out.
43  *		Alan Cox	:	Sets an ignore me flag on an rst
44  *					receive otherwise odd bits of prattle
45  *					escape still
46  *		Alan Cox	:	Fixed another acking RST frame bug.
47  *					Should stop LAN workplace lockups.
48  *		Alan Cox	: 	Some tidyups using the new skb list
49  *					facilities
50  *		Alan Cox	:	sk->keepopen now seems to work
51  *		Alan Cox	:	Pulls options out correctly on accepts
52  *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
53  *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
54  *					bit to skb ops.
55  *		Alan Cox	:	Tidied tcp_data to avoid a potential
56  *					nasty.
57  *		Alan Cox	:	Added some better commenting, as the
58  *					tcp is hard to follow
59  *		Alan Cox	:	Removed incorrect check for 20 * psh
60  *	Michael O'Reilly	:	ack < copied bug fix.
61  *	Johannes Stille		:	Misc tcp fixes (not all in yet).
62  *		Alan Cox	:	FIN with no memory -> CRASH
63  *		Alan Cox	:	Added socket option proto entries.
64  *					Also added awareness of them to accept.
65  *		Alan Cox	:	Added TCP options (SOL_TCP)
66  *		Alan Cox	:	Switched wakeup calls to callbacks,
67  *					so the kernel can layer network
68  *					sockets.
69  *		Alan Cox	:	Use ip_tos/ip_ttl settings.
70  *		Alan Cox	:	Handle FIN (more) properly (we hope).
71  *		Alan Cox	:	RST frames sent on unsynchronised
72  *					state ack error.
73  *		Alan Cox	:	Put in missing check for SYN bit.
74  *		Alan Cox	:	Added tcp_select_window() aka NET2E
75  *					window non shrink trick.
76  *		Alan Cox	:	Added a couple of small NET2E timer
77  *					fixes
78  *		Charles Hedrick :	TCP fixes
79  *		Toomas Tamm	:	TCP window fixes
80  *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
81  *		Charles Hedrick	:	Rewrote most of it to actually work
82  *		Linus		:	Rewrote tcp_read() and URG handling
83  *					completely
84  *		Gerhard Koerting:	Fixed some missing timer handling
85  *		Matthew Dillon  :	Reworked TCP machine states as per RFC
86  *		Gerhard Koerting:	PC/TCP workarounds
87  *		Adam Caldwell	:	Assorted timer/timing errors
88  *		Matthew Dillon	:	Fixed another RST bug
89  *		Alan Cox	:	Move to kernel side addressing changes.
90  *		Alan Cox	:	Beginning work on TCP fastpathing
91  *					(not yet usable)
92  *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
93  *		Alan Cox	:	TCP fast path debugging
94  *		Alan Cox	:	Window clamping
95  *		Michael Riepe	:	Bug in tcp_check()
96  *		Matt Dillon	:	More TCP improvements and RST bug fixes
97  *		Matt Dillon	:	Yet more small nasties remove from the
98  *					TCP code (Be very nice to this man if
99  *					tcp finally works 100%) 8)
100  *		Alan Cox	:	BSD accept semantics.
101  *		Alan Cox	:	Reset on closedown bug.
102  *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
103  *		Michael Pall	:	Handle poll() after URG properly in
104  *					all cases.
105  *		Michael Pall	:	Undo the last fix in tcp_read_urg()
106  *					(multi URG PUSH broke rlogin).
107  *		Michael Pall	:	Fix the multi URG PUSH problem in
108  *					tcp_readable(), poll() after URG
109  *					works now.
110  *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
111  *					BSD api.
112  *		Alan Cox	:	Changed the semantics of sk->socket to
113  *					fix a race and a signal problem with
114  *					accept() and async I/O.
115  *		Alan Cox	:	Relaxed the rules on tcp_sendto().
116  *		Yury Shevchuk	:	Really fixed accept() blocking problem.
117  *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
118  *					clients/servers which listen in on
119  *					fixed ports.
120  *		Alan Cox	:	Cleaned the above up and shrank it to
121  *					a sensible code size.
122  *		Alan Cox	:	Self connect lockup fix.
123  *		Alan Cox	:	No connect to multicast.
124  *		Ross Biro	:	Close unaccepted children on master
125  *					socket close.
126  *		Alan Cox	:	Reset tracing code.
127  *		Alan Cox	:	Spurious resets on shutdown.
128  *		Alan Cox	:	Giant 15 minute/60 second timer error
129  *		Alan Cox	:	Small whoops in polling before an
130  *					accept.
131  *		Alan Cox	:	Kept the state trace facility since
132  *					it's handy for debugging.
133  *		Alan Cox	:	More reset handler fixes.
134  *		Alan Cox	:	Started rewriting the code based on
135  *					the RFC's for other useful protocol
136  *					references see: Comer, KA9Q NOS, and
137  *					for a reference on the difference
138  *					between specifications and how BSD
139  *					works see the 4.4lite source.
140  *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
141  *					close.
142  *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
143  *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
144  *		Alan Cox	:	Reimplemented timers as per the RFC
145  *					and using multiple timers for sanity.
146  *		Alan Cox	:	Small bug fixes, and a lot of new
147  *					comments.
148  *		Alan Cox	:	Fixed dual reader crash by locking
149  *					the buffers (much like datagram.c)
150  *		Alan Cox	:	Fixed stuck sockets in probe. A probe
151  *					now gets fed up of retrying without
152  *					(even a no space) answer.
153  *		Alan Cox	:	Extracted closing code better
154  *		Alan Cox	:	Fixed the closing state machine to
155  *					resemble the RFC.
156  *		Alan Cox	:	More 'per spec' fixes.
157  *		Jorge Cwik	:	Even faster checksumming.
158  *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
159  *					only frames. At least one pc tcp stack
160  *					generates them.
161  *		Alan Cox	:	Cache last socket.
162  *		Alan Cox	:	Per route irtt.
163  *		Matt Day	:	poll()->select() match BSD precisely on error
164  *		Alan Cox	:	New buffers
165  *		Marc Tamsky	:	Various sk->prot->retransmits and
166  *					sk->retransmits misupdating fixed.
167  *					Fixed tcp_write_timeout: stuck close,
168  *					and TCP syn retries gets used now.
169  *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
170  *					ack if state is TCP_CLOSED.
171  *		Alan Cox	:	Look up device on a retransmit - routes may
172  *					change. Doesn't yet cope with MSS shrink right
173  *					but it's a start!
174  *		Marc Tamsky	:	Closing in closing fixes.
175  *		Mike Shaver	:	RFC1122 verifications.
176  *		Alan Cox	:	rcv_saddr errors.
177  *		Alan Cox	:	Block double connect().
178  *		Alan Cox	:	Small hooks for enSKIP.
179  *		Alexey Kuznetsov:	Path MTU discovery.
180  *		Alan Cox	:	Support soft errors.
181  *		Alan Cox	:	Fix MTU discovery pathological case
182  *					when the remote claims no mtu!
183  *		Marc Tamsky	:	TCP_CLOSE fix.
184  *		Colin (G3TNE)	:	Send a reset on syn ack replies in
185  *					window but wrong (fixes NT lpd problems)
186  *		Pedro Roque	:	Better TCP window handling, delayed ack.
187  *		Joerg Reuter	:	No modification of locked buffers in
188  *					tcp_do_retransmit()
189  *		Eric Schenk	:	Changed receiver side silly window
190  *					avoidance algorithm to BSD style
191  *					algorithm. This doubles throughput
192  *					against machines running Solaris,
193  *					and seems to result in general
194  *					improvement.
195  *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
196  *	Willy Konynenberg	:	Transparent proxying support.
197  *	Mike McLagan		:	Routing by source
198  *		Keith Owens	:	Do proper merging with partial SKB's in
199  *					tcp_do_sendmsg to avoid burstiness.
200  *		Eric Schenk	:	Fix fast close down bug with
201  *					shutdown() followed by close().
202  *		Andi Kleen 	:	Make poll agree with SIGIO
203  *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
204  *					lingertime == 0 (RFC 793 ABORT Call)
205  *	Hirokazu Takahashi	:	Use copy_from_user() instead of
206  *					csum_and_copy_from_user() if possible.
207  *
208  *		This program is free software; you can redistribute it and/or
209  *		modify it under the terms of the GNU General Public License
210  *		as published by the Free Software Foundation; either version
211  *		2 of the License, or(at your option) any later version.
212  *
213  * Description of States:
214  *
215  *	TCP_SYN_SENT		sent a connection request, waiting for ack
216  *
217  *	TCP_SYN_RECV		received a connection request, sent ack,
218  *				waiting for final ack in three-way handshake.
219  *
220  *	TCP_ESTABLISHED		connection established
221  *
222  *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
223  *				transmission of remaining buffered data
224  *
225  *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
226  *				to shutdown
227  *
228  *	TCP_CLOSING		both sides have shutdown but we still have
229  *				data we have to finish sending
230  *
231  *	TCP_TIME_WAIT		timeout to catch resent junk before entering
232  *				closed, can only be entered from FIN_WAIT2
233  *				or CLOSING.  Required because the other end
234  *				may not have gotten our last ACK causing it
235  *				to retransmit the data packet (which we ignore)
236  *
237  *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
238  *				us to finish writing our data and to shutdown
239  *				(we have to close() to move on to LAST_ACK)
240  *
241  *	TCP_LAST_ACK		out side has shutdown after remote has
242  *				shutdown.  There may still be data in our
243  *				buffer that we have to finish sending
244  *
245  *	TCP_CLOSE		socket is finished
246  */
247 
248 #define pr_fmt(fmt) "TCP: " fmt
249 
250 #include <linux/kernel.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/fs.h>
257 #include <linux/skbuff.h>
258 #include <linux/scatterlist.h>
259 #include <linux/splice.h>
260 #include <linux/net.h>
261 #include <linux/socket.h>
262 #include <linux/random.h>
263 #include <linux/bootmem.h>
264 #include <linux/highmem.h>
265 #include <linux/swap.h>
266 #include <linux/cache.h>
267 #include <linux/err.h>
268 #include <linux/crypto.h>
269 #include <linux/time.h>
270 #include <linux/slab.h>
271 
272 #include <net/icmp.h>
273 #include <net/inet_common.h>
274 #include <net/tcp.h>
275 #include <net/xfrm.h>
276 #include <net/ip.h>
277 #include <net/netdma.h>
278 #include <net/sock.h>
279 
280 #include <asm/uaccess.h>
281 #include <asm/ioctls.h>
282 #include <net/busy_poll.h>
283 
284 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
285 
286 int sysctl_tcp_min_tso_segs __read_mostly = 2;
287 
288 int sysctl_tcp_autocorking __read_mostly = 1;
289 
290 struct percpu_counter tcp_orphan_count;
291 EXPORT_SYMBOL_GPL(tcp_orphan_count);
292 
293 long sysctl_tcp_mem[3] __read_mostly;
294 int sysctl_tcp_wmem[3] __read_mostly;
295 int sysctl_tcp_rmem[3] __read_mostly;
296 
297 EXPORT_SYMBOL(sysctl_tcp_mem);
298 EXPORT_SYMBOL(sysctl_tcp_rmem);
299 EXPORT_SYMBOL(sysctl_tcp_wmem);
300 
301 atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
302 EXPORT_SYMBOL(tcp_memory_allocated);
303 
304 /*
305  * Current number of TCP sockets.
306  */
307 struct percpu_counter tcp_sockets_allocated;
308 EXPORT_SYMBOL(tcp_sockets_allocated);
309 
310 /*
311  * TCP splice context
312  */
313 struct tcp_splice_state {
314 	struct pipe_inode_info *pipe;
315 	size_t len;
316 	unsigned int flags;
317 };
318 
319 /*
320  * Pressure flag: try to collapse.
321  * Technical note: it is used by multiple contexts non atomically.
322  * All the __sk_mem_schedule() is of this nature: accounting
323  * is strict, actions are advisory and have some latency.
324  */
325 int tcp_memory_pressure __read_mostly;
326 EXPORT_SYMBOL(tcp_memory_pressure);
327 
328 void tcp_enter_memory_pressure(struct sock *sk)
329 {
330 	if (!tcp_memory_pressure) {
331 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
332 		tcp_memory_pressure = 1;
333 	}
334 }
335 EXPORT_SYMBOL(tcp_enter_memory_pressure);
336 
337 /* Convert seconds to retransmits based on initial and max timeout */
338 static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
339 {
340 	u8 res = 0;
341 
342 	if (seconds > 0) {
343 		int period = timeout;
344 
345 		res = 1;
346 		while (seconds > period && res < 255) {
347 			res++;
348 			timeout <<= 1;
349 			if (timeout > rto_max)
350 				timeout = rto_max;
351 			period += timeout;
352 		}
353 	}
354 	return res;
355 }
356 
357 /* Convert retransmits to seconds based on initial and max timeout */
358 static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
359 {
360 	int period = 0;
361 
362 	if (retrans > 0) {
363 		period = timeout;
364 		while (--retrans) {
365 			timeout <<= 1;
366 			if (timeout > rto_max)
367 				timeout = rto_max;
368 			period += timeout;
369 		}
370 	}
371 	return period;
372 }
373 
374 /* Address-family independent initialization for a tcp_sock.
375  *
376  * NOTE: A lot of things set to zero explicitly by call to
377  *       sk_alloc() so need not be done here.
378  */
379 void tcp_init_sock(struct sock *sk)
380 {
381 	struct inet_connection_sock *icsk = inet_csk(sk);
382 	struct tcp_sock *tp = tcp_sk(sk);
383 
384 	__skb_queue_head_init(&tp->out_of_order_queue);
385 	tcp_init_xmit_timers(sk);
386 	tcp_prequeue_init(tp);
387 	INIT_LIST_HEAD(&tp->tsq_node);
388 
389 	icsk->icsk_rto = TCP_TIMEOUT_INIT;
390 	tp->mdev = TCP_TIMEOUT_INIT;
391 
392 	/* So many TCP implementations out there (incorrectly) count the
393 	 * initial SYN frame in their delayed-ACK and congestion control
394 	 * algorithms that we must have the following bandaid to talk
395 	 * efficiently to them.  -DaveM
396 	 */
397 	tp->snd_cwnd = TCP_INIT_CWND;
398 
399 	/* See draft-stevens-tcpca-spec-01 for discussion of the
400 	 * initialization of these values.
401 	 */
402 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
403 	tp->snd_cwnd_clamp = ~0;
404 	tp->mss_cache = TCP_MSS_DEFAULT;
405 
406 	tp->reordering = sysctl_tcp_reordering;
407 	tcp_enable_early_retrans(tp);
408 	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
409 
410 	tp->tsoffset = 0;
411 
412 	sk->sk_state = TCP_CLOSE;
413 
414 	sk->sk_write_space = sk_stream_write_space;
415 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
416 
417 	icsk->icsk_sync_mss = tcp_sync_mss;
418 
419 	sk->sk_sndbuf = sysctl_tcp_wmem[1];
420 	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
421 
422 	local_bh_disable();
423 	sock_update_memcg(sk);
424 	sk_sockets_allocated_inc(sk);
425 	local_bh_enable();
426 }
427 EXPORT_SYMBOL(tcp_init_sock);
428 
429 /*
430  *	Wait for a TCP event.
431  *
432  *	Note that we don't need to lock the socket, as the upper poll layers
433  *	take care of normal races (between the test and the event) and we don't
434  *	go look at any of the socket buffers directly.
435  */
436 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
437 {
438 	unsigned int mask;
439 	struct sock *sk = sock->sk;
440 	const struct tcp_sock *tp = tcp_sk(sk);
441 
442 	sock_rps_record_flow(sk);
443 
444 	sock_poll_wait(file, sk_sleep(sk), wait);
445 	if (sk->sk_state == TCP_LISTEN)
446 		return inet_csk_listen_poll(sk);
447 
448 	/* Socket is not locked. We are protected from async events
449 	 * by poll logic and correct handling of state changes
450 	 * made by other threads is impossible in any case.
451 	 */
452 
453 	mask = 0;
454 
455 	/*
456 	 * POLLHUP is certainly not done right. But poll() doesn't
457 	 * have a notion of HUP in just one direction, and for a
458 	 * socket the read side is more interesting.
459 	 *
460 	 * Some poll() documentation says that POLLHUP is incompatible
461 	 * with the POLLOUT/POLLWR flags, so somebody should check this
462 	 * all. But careful, it tends to be safer to return too many
463 	 * bits than too few, and you can easily break real applications
464 	 * if you don't tell them that something has hung up!
465 	 *
466 	 * Check-me.
467 	 *
468 	 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
469 	 * our fs/select.c). It means that after we received EOF,
470 	 * poll always returns immediately, making impossible poll() on write()
471 	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
472 	 * if and only if shutdown has been made in both directions.
473 	 * Actually, it is interesting to look how Solaris and DUX
474 	 * solve this dilemma. I would prefer, if POLLHUP were maskable,
475 	 * then we could set it on SND_SHUTDOWN. BTW examples given
476 	 * in Stevens' books assume exactly this behaviour, it explains
477 	 * why POLLHUP is incompatible with POLLOUT.	--ANK
478 	 *
479 	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
480 	 * blocking on fresh not-connected or disconnected socket. --ANK
481 	 */
482 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
483 		mask |= POLLHUP;
484 	if (sk->sk_shutdown & RCV_SHUTDOWN)
485 		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
486 
487 	/* Connected or passive Fast Open socket? */
488 	if (sk->sk_state != TCP_SYN_SENT &&
489 	    (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
490 		int target = sock_rcvlowat(sk, 0, INT_MAX);
491 
492 		if (tp->urg_seq == tp->copied_seq &&
493 		    !sock_flag(sk, SOCK_URGINLINE) &&
494 		    tp->urg_data)
495 			target++;
496 
497 		/* Potential race condition. If read of tp below will
498 		 * escape above sk->sk_state, we can be illegally awaken
499 		 * in SYN_* states. */
500 		if (tp->rcv_nxt - tp->copied_seq >= target)
501 			mask |= POLLIN | POLLRDNORM;
502 
503 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
504 			if (sk_stream_is_writeable(sk)) {
505 				mask |= POLLOUT | POLLWRNORM;
506 			} else {  /* send SIGIO later */
507 				set_bit(SOCK_ASYNC_NOSPACE,
508 					&sk->sk_socket->flags);
509 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
510 
511 				/* Race breaker. If space is freed after
512 				 * wspace test but before the flags are set,
513 				 * IO signal will be lost.
514 				 */
515 				if (sk_stream_is_writeable(sk))
516 					mask |= POLLOUT | POLLWRNORM;
517 			}
518 		} else
519 			mask |= POLLOUT | POLLWRNORM;
520 
521 		if (tp->urg_data & TCP_URG_VALID)
522 			mask |= POLLPRI;
523 	}
524 	/* This barrier is coupled with smp_wmb() in tcp_reset() */
525 	smp_rmb();
526 	if (sk->sk_err)
527 		mask |= POLLERR;
528 
529 	return mask;
530 }
531 EXPORT_SYMBOL(tcp_poll);
532 
533 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
534 {
535 	struct tcp_sock *tp = tcp_sk(sk);
536 	int answ;
537 	bool slow;
538 
539 	switch (cmd) {
540 	case SIOCINQ:
541 		if (sk->sk_state == TCP_LISTEN)
542 			return -EINVAL;
543 
544 		slow = lock_sock_fast(sk);
545 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
546 			answ = 0;
547 		else if (sock_flag(sk, SOCK_URGINLINE) ||
548 			 !tp->urg_data ||
549 			 before(tp->urg_seq, tp->copied_seq) ||
550 			 !before(tp->urg_seq, tp->rcv_nxt)) {
551 
552 			answ = tp->rcv_nxt - tp->copied_seq;
553 
554 			/* Subtract 1, if FIN was received */
555 			if (answ && sock_flag(sk, SOCK_DONE))
556 				answ--;
557 		} else
558 			answ = tp->urg_seq - tp->copied_seq;
559 		unlock_sock_fast(sk, slow);
560 		break;
561 	case SIOCATMARK:
562 		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
563 		break;
564 	case SIOCOUTQ:
565 		if (sk->sk_state == TCP_LISTEN)
566 			return -EINVAL;
567 
568 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
569 			answ = 0;
570 		else
571 			answ = tp->write_seq - tp->snd_una;
572 		break;
573 	case SIOCOUTQNSD:
574 		if (sk->sk_state == TCP_LISTEN)
575 			return -EINVAL;
576 
577 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
578 			answ = 0;
579 		else
580 			answ = tp->write_seq - tp->snd_nxt;
581 		break;
582 	default:
583 		return -ENOIOCTLCMD;
584 	}
585 
586 	return put_user(answ, (int __user *)arg);
587 }
588 EXPORT_SYMBOL(tcp_ioctl);
589 
590 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
591 {
592 	TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
593 	tp->pushed_seq = tp->write_seq;
594 }
595 
596 static inline bool forced_push(const struct tcp_sock *tp)
597 {
598 	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
599 }
600 
601 static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
602 {
603 	struct tcp_sock *tp = tcp_sk(sk);
604 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
605 
606 	skb->csum    = 0;
607 	tcb->seq     = tcb->end_seq = tp->write_seq;
608 	tcb->tcp_flags = TCPHDR_ACK;
609 	tcb->sacked  = 0;
610 	skb_header_release(skb);
611 	tcp_add_write_queue_tail(sk, skb);
612 	sk->sk_wmem_queued += skb->truesize;
613 	sk_mem_charge(sk, skb->truesize);
614 	if (tp->nonagle & TCP_NAGLE_PUSH)
615 		tp->nonagle &= ~TCP_NAGLE_PUSH;
616 }
617 
618 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
619 {
620 	if (flags & MSG_OOB)
621 		tp->snd_up = tp->write_seq;
622 }
623 
624 /* If a not yet filled skb is pushed, do not send it if
625  * we have data packets in Qdisc or NIC queues :
626  * Because TX completion will happen shortly, it gives a chance
627  * to coalesce future sendmsg() payload into this skb, without
628  * need for a timer, and with no latency trade off.
629  * As packets containing data payload have a bigger truesize
630  * than pure acks (dataless) packets, the last checks prevent
631  * autocorking if we only have an ACK in Qdisc/NIC queues,
632  * or if TX completion was delayed after we processed ACK packet.
633  */
634 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
635 				int size_goal)
636 {
637 	return skb->len < size_goal &&
638 	       sysctl_tcp_autocorking &&
639 	       skb != tcp_write_queue_head(sk) &&
640 	       atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
641 }
642 
643 static void tcp_push(struct sock *sk, int flags, int mss_now,
644 		     int nonagle, int size_goal)
645 {
646 	struct tcp_sock *tp = tcp_sk(sk);
647 	struct sk_buff *skb;
648 
649 	if (!tcp_send_head(sk))
650 		return;
651 
652 	skb = tcp_write_queue_tail(sk);
653 	if (!(flags & MSG_MORE) || forced_push(tp))
654 		tcp_mark_push(tp, skb);
655 
656 	tcp_mark_urg(tp, flags);
657 
658 	if (tcp_should_autocork(sk, skb, size_goal)) {
659 
660 		/* avoid atomic op if TSQ_THROTTLED bit is already set */
661 		if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
662 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
663 			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
664 		}
665 		/* It is possible TX completion already happened
666 		 * before we set TSQ_THROTTLED.
667 		 */
668 		if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
669 			return;
670 	}
671 
672 	if (flags & MSG_MORE)
673 		nonagle = TCP_NAGLE_CORK;
674 
675 	__tcp_push_pending_frames(sk, mss_now, nonagle);
676 }
677 
678 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
679 				unsigned int offset, size_t len)
680 {
681 	struct tcp_splice_state *tss = rd_desc->arg.data;
682 	int ret;
683 
684 	ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
685 			      tss->flags);
686 	if (ret > 0)
687 		rd_desc->count -= ret;
688 	return ret;
689 }
690 
691 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
692 {
693 	/* Store TCP splice context information in read_descriptor_t. */
694 	read_descriptor_t rd_desc = {
695 		.arg.data = tss,
696 		.count	  = tss->len,
697 	};
698 
699 	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
700 }
701 
702 /**
703  *  tcp_splice_read - splice data from TCP socket to a pipe
704  * @sock:	socket to splice from
705  * @ppos:	position (not valid)
706  * @pipe:	pipe to splice to
707  * @len:	number of bytes to splice
708  * @flags:	splice modifier flags
709  *
710  * Description:
711  *    Will read pages from given socket and fill them into a pipe.
712  *
713  **/
714 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
715 			struct pipe_inode_info *pipe, size_t len,
716 			unsigned int flags)
717 {
718 	struct sock *sk = sock->sk;
719 	struct tcp_splice_state tss = {
720 		.pipe = pipe,
721 		.len = len,
722 		.flags = flags,
723 	};
724 	long timeo;
725 	ssize_t spliced;
726 	int ret;
727 
728 	sock_rps_record_flow(sk);
729 	/*
730 	 * We can't seek on a socket input
731 	 */
732 	if (unlikely(*ppos))
733 		return -ESPIPE;
734 
735 	ret = spliced = 0;
736 
737 	lock_sock(sk);
738 
739 	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
740 	while (tss.len) {
741 		ret = __tcp_splice_read(sk, &tss);
742 		if (ret < 0)
743 			break;
744 		else if (!ret) {
745 			if (spliced)
746 				break;
747 			if (sock_flag(sk, SOCK_DONE))
748 				break;
749 			if (sk->sk_err) {
750 				ret = sock_error(sk);
751 				break;
752 			}
753 			if (sk->sk_shutdown & RCV_SHUTDOWN)
754 				break;
755 			if (sk->sk_state == TCP_CLOSE) {
756 				/*
757 				 * This occurs when user tries to read
758 				 * from never connected socket.
759 				 */
760 				if (!sock_flag(sk, SOCK_DONE))
761 					ret = -ENOTCONN;
762 				break;
763 			}
764 			if (!timeo) {
765 				ret = -EAGAIN;
766 				break;
767 			}
768 			sk_wait_data(sk, &timeo);
769 			if (signal_pending(current)) {
770 				ret = sock_intr_errno(timeo);
771 				break;
772 			}
773 			continue;
774 		}
775 		tss.len -= ret;
776 		spliced += ret;
777 
778 		if (!timeo)
779 			break;
780 		release_sock(sk);
781 		lock_sock(sk);
782 
783 		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
784 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
785 		    signal_pending(current))
786 			break;
787 	}
788 
789 	release_sock(sk);
790 
791 	if (spliced)
792 		return spliced;
793 
794 	return ret;
795 }
796 EXPORT_SYMBOL(tcp_splice_read);
797 
798 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
799 {
800 	struct sk_buff *skb;
801 
802 	/* The TCP header must be at least 32-bit aligned.  */
803 	size = ALIGN(size, 4);
804 
805 	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
806 	if (skb) {
807 		if (sk_wmem_schedule(sk, skb->truesize)) {
808 			skb_reserve(skb, sk->sk_prot->max_header);
809 			/*
810 			 * Make sure that we have exactly size bytes
811 			 * available to the caller, no more, no less.
812 			 */
813 			skb->reserved_tailroom = skb->end - skb->tail - size;
814 			return skb;
815 		}
816 		__kfree_skb(skb);
817 	} else {
818 		sk->sk_prot->enter_memory_pressure(sk);
819 		sk_stream_moderate_sndbuf(sk);
820 	}
821 	return NULL;
822 }
823 
824 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
825 				       int large_allowed)
826 {
827 	struct tcp_sock *tp = tcp_sk(sk);
828 	u32 xmit_size_goal, old_size_goal;
829 
830 	xmit_size_goal = mss_now;
831 
832 	if (large_allowed && sk_can_gso(sk)) {
833 		u32 gso_size, hlen;
834 
835 		/* Maybe we should/could use sk->sk_prot->max_header here ? */
836 		hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
837 		       inet_csk(sk)->icsk_ext_hdr_len +
838 		       tp->tcp_header_len;
839 
840 		/* Goal is to send at least one packet per ms,
841 		 * not one big TSO packet every 100 ms.
842 		 * This preserves ACK clocking and is consistent
843 		 * with tcp_tso_should_defer() heuristic.
844 		 */
845 		gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
846 		gso_size = max_t(u32, gso_size,
847 				 sysctl_tcp_min_tso_segs * mss_now);
848 
849 		xmit_size_goal = min_t(u32, gso_size,
850 				       sk->sk_gso_max_size - 1 - hlen);
851 
852 		xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
853 
854 		/* We try hard to avoid divides here */
855 		old_size_goal = tp->xmit_size_goal_segs * mss_now;
856 
857 		if (likely(old_size_goal <= xmit_size_goal &&
858 			   old_size_goal + mss_now > xmit_size_goal)) {
859 			xmit_size_goal = old_size_goal;
860 		} else {
861 			tp->xmit_size_goal_segs =
862 				min_t(u16, xmit_size_goal / mss_now,
863 				      sk->sk_gso_max_segs);
864 			xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
865 		}
866 	}
867 
868 	return max(xmit_size_goal, mss_now);
869 }
870 
871 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
872 {
873 	int mss_now;
874 
875 	mss_now = tcp_current_mss(sk);
876 	*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
877 
878 	return mss_now;
879 }
880 
881 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
882 				size_t size, int flags)
883 {
884 	struct tcp_sock *tp = tcp_sk(sk);
885 	int mss_now, size_goal;
886 	int err;
887 	ssize_t copied;
888 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
889 
890 	/* Wait for a connection to finish. One exception is TCP Fast Open
891 	 * (passive side) where data is allowed to be sent before a connection
892 	 * is fully established.
893 	 */
894 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
895 	    !tcp_passive_fastopen(sk)) {
896 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
897 			goto out_err;
898 	}
899 
900 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
901 
902 	mss_now = tcp_send_mss(sk, &size_goal, flags);
903 	copied = 0;
904 
905 	err = -EPIPE;
906 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
907 		goto out_err;
908 
909 	while (size > 0) {
910 		struct sk_buff *skb = tcp_write_queue_tail(sk);
911 		int copy, i;
912 		bool can_coalesce;
913 
914 		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
915 new_segment:
916 			if (!sk_stream_memory_free(sk))
917 				goto wait_for_sndbuf;
918 
919 			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
920 			if (!skb)
921 				goto wait_for_memory;
922 
923 			skb_entail(sk, skb);
924 			copy = size_goal;
925 		}
926 
927 		if (copy > size)
928 			copy = size;
929 
930 		i = skb_shinfo(skb)->nr_frags;
931 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
932 		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
933 			tcp_mark_push(tp, skb);
934 			goto new_segment;
935 		}
936 		if (!sk_wmem_schedule(sk, copy))
937 			goto wait_for_memory;
938 
939 		if (can_coalesce) {
940 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
941 		} else {
942 			get_page(page);
943 			skb_fill_page_desc(skb, i, page, offset, copy);
944 		}
945 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
946 
947 		skb->len += copy;
948 		skb->data_len += copy;
949 		skb->truesize += copy;
950 		sk->sk_wmem_queued += copy;
951 		sk_mem_charge(sk, copy);
952 		skb->ip_summed = CHECKSUM_PARTIAL;
953 		tp->write_seq += copy;
954 		TCP_SKB_CB(skb)->end_seq += copy;
955 		skb_shinfo(skb)->gso_segs = 0;
956 
957 		if (!copied)
958 			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
959 
960 		copied += copy;
961 		offset += copy;
962 		if (!(size -= copy))
963 			goto out;
964 
965 		if (skb->len < size_goal || (flags & MSG_OOB))
966 			continue;
967 
968 		if (forced_push(tp)) {
969 			tcp_mark_push(tp, skb);
970 			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
971 		} else if (skb == tcp_send_head(sk))
972 			tcp_push_one(sk, mss_now);
973 		continue;
974 
975 wait_for_sndbuf:
976 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
977 wait_for_memory:
978 		tcp_push(sk, flags & ~MSG_MORE, mss_now,
979 			 TCP_NAGLE_PUSH, size_goal);
980 
981 		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
982 			goto do_error;
983 
984 		mss_now = tcp_send_mss(sk, &size_goal, flags);
985 	}
986 
987 out:
988 	if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
989 		tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
990 	return copied;
991 
992 do_error:
993 	if (copied)
994 		goto out;
995 out_err:
996 	return sk_stream_error(sk, flags, err);
997 }
998 
999 int tcp_sendpage(struct sock *sk, struct page *page, int offset,
1000 		 size_t size, int flags)
1001 {
1002 	ssize_t res;
1003 
1004 	if (!(sk->sk_route_caps & NETIF_F_SG) ||
1005 	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
1006 		return sock_no_sendpage(sk->sk_socket, page, offset, size,
1007 					flags);
1008 
1009 	lock_sock(sk);
1010 	res = do_tcp_sendpages(sk, page, offset, size, flags);
1011 	release_sock(sk);
1012 	return res;
1013 }
1014 EXPORT_SYMBOL(tcp_sendpage);
1015 
1016 static inline int select_size(const struct sock *sk, bool sg)
1017 {
1018 	const struct tcp_sock *tp = tcp_sk(sk);
1019 	int tmp = tp->mss_cache;
1020 
1021 	if (sg) {
1022 		if (sk_can_gso(sk)) {
1023 			/* Small frames wont use a full page:
1024 			 * Payload will immediately follow tcp header.
1025 			 */
1026 			tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
1027 		} else {
1028 			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
1029 
1030 			if (tmp >= pgbreak &&
1031 			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
1032 				tmp = pgbreak;
1033 		}
1034 	}
1035 
1036 	return tmp;
1037 }
1038 
1039 void tcp_free_fastopen_req(struct tcp_sock *tp)
1040 {
1041 	if (tp->fastopen_req != NULL) {
1042 		kfree(tp->fastopen_req);
1043 		tp->fastopen_req = NULL;
1044 	}
1045 }
1046 
1047 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
1048 {
1049 	struct tcp_sock *tp = tcp_sk(sk);
1050 	int err, flags;
1051 
1052 	if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1053 		return -EOPNOTSUPP;
1054 	if (tp->fastopen_req != NULL)
1055 		return -EALREADY; /* Another Fast Open is in progress */
1056 
1057 	tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1058 				   sk->sk_allocation);
1059 	if (unlikely(tp->fastopen_req == NULL))
1060 		return -ENOBUFS;
1061 	tp->fastopen_req->data = msg;
1062 
1063 	flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1064 	err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1065 				    msg->msg_namelen, flags);
1066 	*size = tp->fastopen_req->copied;
1067 	tcp_free_fastopen_req(tp);
1068 	return err;
1069 }
1070 
1071 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1072 		size_t size)
1073 {
1074 	struct iovec *iov;
1075 	struct tcp_sock *tp = tcp_sk(sk);
1076 	struct sk_buff *skb;
1077 	int iovlen, flags, err, copied = 0;
1078 	int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
1079 	bool sg;
1080 	long timeo;
1081 
1082 	lock_sock(sk);
1083 
1084 	flags = msg->msg_flags;
1085 	if (flags & MSG_FASTOPEN) {
1086 		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
1087 		if (err == -EINPROGRESS && copied_syn > 0)
1088 			goto out;
1089 		else if (err)
1090 			goto out_err;
1091 		offset = copied_syn;
1092 	}
1093 
1094 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1095 
1096 	/* Wait for a connection to finish. One exception is TCP Fast Open
1097 	 * (passive side) where data is allowed to be sent before a connection
1098 	 * is fully established.
1099 	 */
1100 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1101 	    !tcp_passive_fastopen(sk)) {
1102 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
1103 			goto do_error;
1104 	}
1105 
1106 	if (unlikely(tp->repair)) {
1107 		if (tp->repair_queue == TCP_RECV_QUEUE) {
1108 			copied = tcp_send_rcvq(sk, msg, size);
1109 			goto out;
1110 		}
1111 
1112 		err = -EINVAL;
1113 		if (tp->repair_queue == TCP_NO_QUEUE)
1114 			goto out_err;
1115 
1116 		/* 'common' sending to sendq */
1117 	}
1118 
1119 	/* This should be in poll */
1120 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1121 
1122 	mss_now = tcp_send_mss(sk, &size_goal, flags);
1123 
1124 	/* Ok commence sending. */
1125 	iovlen = msg->msg_iovlen;
1126 	iov = msg->msg_iov;
1127 	copied = 0;
1128 
1129 	err = -EPIPE;
1130 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1131 		goto out_err;
1132 
1133 	sg = !!(sk->sk_route_caps & NETIF_F_SG);
1134 
1135 	while (--iovlen >= 0) {
1136 		size_t seglen = iov->iov_len;
1137 		unsigned char __user *from = iov->iov_base;
1138 
1139 		iov++;
1140 		if (unlikely(offset > 0)) {  /* Skip bytes copied in SYN */
1141 			if (offset >= seglen) {
1142 				offset -= seglen;
1143 				continue;
1144 			}
1145 			seglen -= offset;
1146 			from += offset;
1147 			offset = 0;
1148 		}
1149 
1150 		while (seglen > 0) {
1151 			int copy = 0;
1152 			int max = size_goal;
1153 
1154 			skb = tcp_write_queue_tail(sk);
1155 			if (tcp_send_head(sk)) {
1156 				if (skb->ip_summed == CHECKSUM_NONE)
1157 					max = mss_now;
1158 				copy = max - skb->len;
1159 			}
1160 
1161 			if (copy <= 0) {
1162 new_segment:
1163 				/* Allocate new segment. If the interface is SG,
1164 				 * allocate skb fitting to single page.
1165 				 */
1166 				if (!sk_stream_memory_free(sk))
1167 					goto wait_for_sndbuf;
1168 
1169 				skb = sk_stream_alloc_skb(sk,
1170 							  select_size(sk, sg),
1171 							  sk->sk_allocation);
1172 				if (!skb)
1173 					goto wait_for_memory;
1174 
1175 				/*
1176 				 * All packets are restored as if they have
1177 				 * already been sent.
1178 				 */
1179 				if (tp->repair)
1180 					TCP_SKB_CB(skb)->when = tcp_time_stamp;
1181 
1182 				/*
1183 				 * Check whether we can use HW checksum.
1184 				 */
1185 				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
1186 					skb->ip_summed = CHECKSUM_PARTIAL;
1187 
1188 				skb_entail(sk, skb);
1189 				copy = size_goal;
1190 				max = size_goal;
1191 			}
1192 
1193 			/* Try to append data to the end of skb. */
1194 			if (copy > seglen)
1195 				copy = seglen;
1196 
1197 			/* Where to copy to? */
1198 			if (skb_availroom(skb) > 0) {
1199 				/* We have some space in skb head. Superb! */
1200 				copy = min_t(int, copy, skb_availroom(skb));
1201 				err = skb_add_data_nocache(sk, skb, from, copy);
1202 				if (err)
1203 					goto do_fault;
1204 			} else {
1205 				bool merge = true;
1206 				int i = skb_shinfo(skb)->nr_frags;
1207 				struct page_frag *pfrag = sk_page_frag(sk);
1208 
1209 				if (!sk_page_frag_refill(sk, pfrag))
1210 					goto wait_for_memory;
1211 
1212 				if (!skb_can_coalesce(skb, i, pfrag->page,
1213 						      pfrag->offset)) {
1214 					if (i == MAX_SKB_FRAGS || !sg) {
1215 						tcp_mark_push(tp, skb);
1216 						goto new_segment;
1217 					}
1218 					merge = false;
1219 				}
1220 
1221 				copy = min_t(int, copy, pfrag->size - pfrag->offset);
1222 
1223 				if (!sk_wmem_schedule(sk, copy))
1224 					goto wait_for_memory;
1225 
1226 				err = skb_copy_to_page_nocache(sk, from, skb,
1227 							       pfrag->page,
1228 							       pfrag->offset,
1229 							       copy);
1230 				if (err)
1231 					goto do_error;
1232 
1233 				/* Update the skb. */
1234 				if (merge) {
1235 					skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1236 				} else {
1237 					skb_fill_page_desc(skb, i, pfrag->page,
1238 							   pfrag->offset, copy);
1239 					get_page(pfrag->page);
1240 				}
1241 				pfrag->offset += copy;
1242 			}
1243 
1244 			if (!copied)
1245 				TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1246 
1247 			tp->write_seq += copy;
1248 			TCP_SKB_CB(skb)->end_seq += copy;
1249 			skb_shinfo(skb)->gso_segs = 0;
1250 
1251 			from += copy;
1252 			copied += copy;
1253 			if ((seglen -= copy) == 0 && iovlen == 0)
1254 				goto out;
1255 
1256 			if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1257 				continue;
1258 
1259 			if (forced_push(tp)) {
1260 				tcp_mark_push(tp, skb);
1261 				__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1262 			} else if (skb == tcp_send_head(sk))
1263 				tcp_push_one(sk, mss_now);
1264 			continue;
1265 
1266 wait_for_sndbuf:
1267 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1268 wait_for_memory:
1269 			if (copied)
1270 				tcp_push(sk, flags & ~MSG_MORE, mss_now,
1271 					 TCP_NAGLE_PUSH, size_goal);
1272 
1273 			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1274 				goto do_error;
1275 
1276 			mss_now = tcp_send_mss(sk, &size_goal, flags);
1277 		}
1278 	}
1279 
1280 out:
1281 	if (copied)
1282 		tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1283 	release_sock(sk);
1284 	return copied + copied_syn;
1285 
1286 do_fault:
1287 	if (!skb->len) {
1288 		tcp_unlink_write_queue(skb, sk);
1289 		/* It is the one place in all of TCP, except connection
1290 		 * reset, where we can be unlinking the send_head.
1291 		 */
1292 		tcp_check_send_head(sk, skb);
1293 		sk_wmem_free_skb(sk, skb);
1294 	}
1295 
1296 do_error:
1297 	if (copied + copied_syn)
1298 		goto out;
1299 out_err:
1300 	err = sk_stream_error(sk, flags, err);
1301 	release_sock(sk);
1302 	return err;
1303 }
1304 EXPORT_SYMBOL(tcp_sendmsg);
1305 
1306 /*
1307  *	Handle reading urgent data. BSD has very simple semantics for
1308  *	this, no blocking and very strange errors 8)
1309  */
1310 
1311 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1312 {
1313 	struct tcp_sock *tp = tcp_sk(sk);
1314 
1315 	/* No URG data to read. */
1316 	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1317 	    tp->urg_data == TCP_URG_READ)
1318 		return -EINVAL;	/* Yes this is right ! */
1319 
1320 	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1321 		return -ENOTCONN;
1322 
1323 	if (tp->urg_data & TCP_URG_VALID) {
1324 		int err = 0;
1325 		char c = tp->urg_data;
1326 
1327 		if (!(flags & MSG_PEEK))
1328 			tp->urg_data = TCP_URG_READ;
1329 
1330 		/* Read urgent data. */
1331 		msg->msg_flags |= MSG_OOB;
1332 
1333 		if (len > 0) {
1334 			if (!(flags & MSG_TRUNC))
1335 				err = memcpy_toiovec(msg->msg_iov, &c, 1);
1336 			len = 1;
1337 		} else
1338 			msg->msg_flags |= MSG_TRUNC;
1339 
1340 		return err ? -EFAULT : len;
1341 	}
1342 
1343 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1344 		return 0;
1345 
1346 	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1347 	 * the available implementations agree in this case:
1348 	 * this call should never block, independent of the
1349 	 * blocking state of the socket.
1350 	 * Mike <pall@rz.uni-karlsruhe.de>
1351 	 */
1352 	return -EAGAIN;
1353 }
1354 
1355 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1356 {
1357 	struct sk_buff *skb;
1358 	int copied = 0, err = 0;
1359 
1360 	/* XXX -- need to support SO_PEEK_OFF */
1361 
1362 	skb_queue_walk(&sk->sk_write_queue, skb) {
1363 		err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
1364 		if (err)
1365 			break;
1366 
1367 		copied += skb->len;
1368 	}
1369 
1370 	return err ?: copied;
1371 }
1372 
1373 /* Clean up the receive buffer for full frames taken by the user,
1374  * then send an ACK if necessary.  COPIED is the number of bytes
1375  * tcp_recvmsg has given to the user so far, it speeds up the
1376  * calculation of whether or not we must ACK for the sake of
1377  * a window update.
1378  */
1379 void tcp_cleanup_rbuf(struct sock *sk, int copied)
1380 {
1381 	struct tcp_sock *tp = tcp_sk(sk);
1382 	bool time_to_ack = false;
1383 
1384 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1385 
1386 	WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1387 	     "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1388 	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1389 
1390 	if (inet_csk_ack_scheduled(sk)) {
1391 		const struct inet_connection_sock *icsk = inet_csk(sk);
1392 		   /* Delayed ACKs frequently hit locked sockets during bulk
1393 		    * receive. */
1394 		if (icsk->icsk_ack.blocked ||
1395 		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
1396 		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1397 		    /*
1398 		     * If this read emptied read buffer, we send ACK, if
1399 		     * connection is not bidirectional, user drained
1400 		     * receive buffer and there was a small segment
1401 		     * in queue.
1402 		     */
1403 		    (copied > 0 &&
1404 		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1405 		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1406 		       !icsk->icsk_ack.pingpong)) &&
1407 		      !atomic_read(&sk->sk_rmem_alloc)))
1408 			time_to_ack = true;
1409 	}
1410 
1411 	/* We send an ACK if we can now advertise a non-zero window
1412 	 * which has been raised "significantly".
1413 	 *
1414 	 * Even if window raised up to infinity, do not send window open ACK
1415 	 * in states, where we will not receive more. It is useless.
1416 	 */
1417 	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1418 		__u32 rcv_window_now = tcp_receive_window(tp);
1419 
1420 		/* Optimize, __tcp_select_window() is not cheap. */
1421 		if (2*rcv_window_now <= tp->window_clamp) {
1422 			__u32 new_window = __tcp_select_window(sk);
1423 
1424 			/* Send ACK now, if this read freed lots of space
1425 			 * in our buffer. Certainly, new_window is new window.
1426 			 * We can advertise it now, if it is not less than current one.
1427 			 * "Lots" means "at least twice" here.
1428 			 */
1429 			if (new_window && new_window >= 2 * rcv_window_now)
1430 				time_to_ack = true;
1431 		}
1432 	}
1433 	if (time_to_ack)
1434 		tcp_send_ack(sk);
1435 }
1436 
1437 static void tcp_prequeue_process(struct sock *sk)
1438 {
1439 	struct sk_buff *skb;
1440 	struct tcp_sock *tp = tcp_sk(sk);
1441 
1442 	NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1443 
1444 	/* RX process wants to run with disabled BHs, though it is not
1445 	 * necessary */
1446 	local_bh_disable();
1447 	while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1448 		sk_backlog_rcv(sk, skb);
1449 	local_bh_enable();
1450 
1451 	/* Clear memory counter. */
1452 	tp->ucopy.memory = 0;
1453 }
1454 
1455 #ifdef CONFIG_NET_DMA
1456 static void tcp_service_net_dma(struct sock *sk, bool wait)
1457 {
1458 	dma_cookie_t done, used;
1459 	dma_cookie_t last_issued;
1460 	struct tcp_sock *tp = tcp_sk(sk);
1461 
1462 	if (!tp->ucopy.dma_chan)
1463 		return;
1464 
1465 	last_issued = tp->ucopy.dma_cookie;
1466 	dma_async_issue_pending(tp->ucopy.dma_chan);
1467 
1468 	do {
1469 		if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1470 					      last_issued, &done,
1471 					      &used) == DMA_COMPLETE) {
1472 			/* Safe to free early-copied skbs now */
1473 			__skb_queue_purge(&sk->sk_async_wait_queue);
1474 			break;
1475 		} else {
1476 			struct sk_buff *skb;
1477 			while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1478 			       (dma_async_is_complete(skb->dma_cookie, done,
1479 						      used) == DMA_COMPLETE)) {
1480 				__skb_dequeue(&sk->sk_async_wait_queue);
1481 				kfree_skb(skb);
1482 			}
1483 		}
1484 	} while (wait);
1485 }
1486 #endif
1487 
1488 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1489 {
1490 	struct sk_buff *skb;
1491 	u32 offset;
1492 
1493 	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1494 		offset = seq - TCP_SKB_CB(skb)->seq;
1495 		if (tcp_hdr(skb)->syn)
1496 			offset--;
1497 		if (offset < skb->len || tcp_hdr(skb)->fin) {
1498 			*off = offset;
1499 			return skb;
1500 		}
1501 		/* This looks weird, but this can happen if TCP collapsing
1502 		 * splitted a fat GRO packet, while we released socket lock
1503 		 * in skb_splice_bits()
1504 		 */
1505 		sk_eat_skb(sk, skb, false);
1506 	}
1507 	return NULL;
1508 }
1509 
1510 /*
1511  * This routine provides an alternative to tcp_recvmsg() for routines
1512  * that would like to handle copying from skbuffs directly in 'sendfile'
1513  * fashion.
1514  * Note:
1515  *	- It is assumed that the socket was locked by the caller.
1516  *	- The routine does not block.
1517  *	- At present, there is no support for reading OOB data
1518  *	  or for 'peeking' the socket using this routine
1519  *	  (although both would be easy to implement).
1520  */
1521 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1522 		  sk_read_actor_t recv_actor)
1523 {
1524 	struct sk_buff *skb;
1525 	struct tcp_sock *tp = tcp_sk(sk);
1526 	u32 seq = tp->copied_seq;
1527 	u32 offset;
1528 	int copied = 0;
1529 
1530 	if (sk->sk_state == TCP_LISTEN)
1531 		return -ENOTCONN;
1532 	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1533 		if (offset < skb->len) {
1534 			int used;
1535 			size_t len;
1536 
1537 			len = skb->len - offset;
1538 			/* Stop reading if we hit a patch of urgent data */
1539 			if (tp->urg_data) {
1540 				u32 urg_offset = tp->urg_seq - seq;
1541 				if (urg_offset < len)
1542 					len = urg_offset;
1543 				if (!len)
1544 					break;
1545 			}
1546 			used = recv_actor(desc, skb, offset, len);
1547 			if (used <= 0) {
1548 				if (!copied)
1549 					copied = used;
1550 				break;
1551 			} else if (used <= len) {
1552 				seq += used;
1553 				copied += used;
1554 				offset += used;
1555 			}
1556 			/* If recv_actor drops the lock (e.g. TCP splice
1557 			 * receive) the skb pointer might be invalid when
1558 			 * getting here: tcp_collapse might have deleted it
1559 			 * while aggregating skbs from the socket queue.
1560 			 */
1561 			skb = tcp_recv_skb(sk, seq - 1, &offset);
1562 			if (!skb)
1563 				break;
1564 			/* TCP coalescing might have appended data to the skb.
1565 			 * Try to splice more frags
1566 			 */
1567 			if (offset + 1 != skb->len)
1568 				continue;
1569 		}
1570 		if (tcp_hdr(skb)->fin) {
1571 			sk_eat_skb(sk, skb, false);
1572 			++seq;
1573 			break;
1574 		}
1575 		sk_eat_skb(sk, skb, false);
1576 		if (!desc->count)
1577 			break;
1578 		tp->copied_seq = seq;
1579 	}
1580 	tp->copied_seq = seq;
1581 
1582 	tcp_rcv_space_adjust(sk);
1583 
1584 	/* Clean up data we have read: This will do ACK frames. */
1585 	if (copied > 0) {
1586 		tcp_recv_skb(sk, seq, &offset);
1587 		tcp_cleanup_rbuf(sk, copied);
1588 	}
1589 	return copied;
1590 }
1591 EXPORT_SYMBOL(tcp_read_sock);
1592 
1593 /*
1594  *	This routine copies from a sock struct into the user buffer.
1595  *
1596  *	Technical note: in 2.3 we work on _locked_ socket, so that
1597  *	tricks with *seq access order and skb->users are not required.
1598  *	Probably, code can be easily improved even more.
1599  */
1600 
1601 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1602 		size_t len, int nonblock, int flags, int *addr_len)
1603 {
1604 	struct tcp_sock *tp = tcp_sk(sk);
1605 	int copied = 0;
1606 	u32 peek_seq;
1607 	u32 *seq;
1608 	unsigned long used;
1609 	int err;
1610 	int target;		/* Read at least this many bytes */
1611 	long timeo;
1612 	struct task_struct *user_recv = NULL;
1613 	bool copied_early = false;
1614 	struct sk_buff *skb;
1615 	u32 urg_hole = 0;
1616 
1617 	if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1618 	    (sk->sk_state == TCP_ESTABLISHED))
1619 		sk_busy_loop(sk, nonblock);
1620 
1621 	lock_sock(sk);
1622 
1623 	err = -ENOTCONN;
1624 	if (sk->sk_state == TCP_LISTEN)
1625 		goto out;
1626 
1627 	timeo = sock_rcvtimeo(sk, nonblock);
1628 
1629 	/* Urgent data needs to be handled specially. */
1630 	if (flags & MSG_OOB)
1631 		goto recv_urg;
1632 
1633 	if (unlikely(tp->repair)) {
1634 		err = -EPERM;
1635 		if (!(flags & MSG_PEEK))
1636 			goto out;
1637 
1638 		if (tp->repair_queue == TCP_SEND_QUEUE)
1639 			goto recv_sndq;
1640 
1641 		err = -EINVAL;
1642 		if (tp->repair_queue == TCP_NO_QUEUE)
1643 			goto out;
1644 
1645 		/* 'common' recv queue MSG_PEEK-ing */
1646 	}
1647 
1648 	seq = &tp->copied_seq;
1649 	if (flags & MSG_PEEK) {
1650 		peek_seq = tp->copied_seq;
1651 		seq = &peek_seq;
1652 	}
1653 
1654 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1655 
1656 #ifdef CONFIG_NET_DMA
1657 	tp->ucopy.dma_chan = NULL;
1658 	preempt_disable();
1659 	skb = skb_peek_tail(&sk->sk_receive_queue);
1660 	{
1661 		int available = 0;
1662 
1663 		if (skb)
1664 			available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1665 		if ((available < target) &&
1666 		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1667 		    !sysctl_tcp_low_latency &&
1668 		    net_dma_find_channel()) {
1669 			preempt_enable();
1670 			tp->ucopy.pinned_list =
1671 					dma_pin_iovec_pages(msg->msg_iov, len);
1672 		} else {
1673 			preempt_enable();
1674 		}
1675 	}
1676 #endif
1677 
1678 	do {
1679 		u32 offset;
1680 
1681 		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1682 		if (tp->urg_data && tp->urg_seq == *seq) {
1683 			if (copied)
1684 				break;
1685 			if (signal_pending(current)) {
1686 				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1687 				break;
1688 			}
1689 		}
1690 
1691 		/* Next get a buffer. */
1692 
1693 		skb_queue_walk(&sk->sk_receive_queue, skb) {
1694 			/* Now that we have two receive queues this
1695 			 * shouldn't happen.
1696 			 */
1697 			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1698 				 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1699 				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1700 				 flags))
1701 				break;
1702 
1703 			offset = *seq - TCP_SKB_CB(skb)->seq;
1704 			if (tcp_hdr(skb)->syn)
1705 				offset--;
1706 			if (offset < skb->len)
1707 				goto found_ok_skb;
1708 			if (tcp_hdr(skb)->fin)
1709 				goto found_fin_ok;
1710 			WARN(!(flags & MSG_PEEK),
1711 			     "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1712 			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1713 		}
1714 
1715 		/* Well, if we have backlog, try to process it now yet. */
1716 
1717 		if (copied >= target && !sk->sk_backlog.tail)
1718 			break;
1719 
1720 		if (copied) {
1721 			if (sk->sk_err ||
1722 			    sk->sk_state == TCP_CLOSE ||
1723 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1724 			    !timeo ||
1725 			    signal_pending(current))
1726 				break;
1727 		} else {
1728 			if (sock_flag(sk, SOCK_DONE))
1729 				break;
1730 
1731 			if (sk->sk_err) {
1732 				copied = sock_error(sk);
1733 				break;
1734 			}
1735 
1736 			if (sk->sk_shutdown & RCV_SHUTDOWN)
1737 				break;
1738 
1739 			if (sk->sk_state == TCP_CLOSE) {
1740 				if (!sock_flag(sk, SOCK_DONE)) {
1741 					/* This occurs when user tries to read
1742 					 * from never connected socket.
1743 					 */
1744 					copied = -ENOTCONN;
1745 					break;
1746 				}
1747 				break;
1748 			}
1749 
1750 			if (!timeo) {
1751 				copied = -EAGAIN;
1752 				break;
1753 			}
1754 
1755 			if (signal_pending(current)) {
1756 				copied = sock_intr_errno(timeo);
1757 				break;
1758 			}
1759 		}
1760 
1761 		tcp_cleanup_rbuf(sk, copied);
1762 
1763 		if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1764 			/* Install new reader */
1765 			if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1766 				user_recv = current;
1767 				tp->ucopy.task = user_recv;
1768 				tp->ucopy.iov = msg->msg_iov;
1769 			}
1770 
1771 			tp->ucopy.len = len;
1772 
1773 			WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1774 				!(flags & (MSG_PEEK | MSG_TRUNC)));
1775 
1776 			/* Ugly... If prequeue is not empty, we have to
1777 			 * process it before releasing socket, otherwise
1778 			 * order will be broken at second iteration.
1779 			 * More elegant solution is required!!!
1780 			 *
1781 			 * Look: we have the following (pseudo)queues:
1782 			 *
1783 			 * 1. packets in flight
1784 			 * 2. backlog
1785 			 * 3. prequeue
1786 			 * 4. receive_queue
1787 			 *
1788 			 * Each queue can be processed only if the next ones
1789 			 * are empty. At this point we have empty receive_queue.
1790 			 * But prequeue _can_ be not empty after 2nd iteration,
1791 			 * when we jumped to start of loop because backlog
1792 			 * processing added something to receive_queue.
1793 			 * We cannot release_sock(), because backlog contains
1794 			 * packets arrived _after_ prequeued ones.
1795 			 *
1796 			 * Shortly, algorithm is clear --- to process all
1797 			 * the queues in order. We could make it more directly,
1798 			 * requeueing packets from backlog to prequeue, if
1799 			 * is not empty. It is more elegant, but eats cycles,
1800 			 * unfortunately.
1801 			 */
1802 			if (!skb_queue_empty(&tp->ucopy.prequeue))
1803 				goto do_prequeue;
1804 
1805 			/* __ Set realtime policy in scheduler __ */
1806 		}
1807 
1808 #ifdef CONFIG_NET_DMA
1809 		if (tp->ucopy.dma_chan) {
1810 			if (tp->rcv_wnd == 0 &&
1811 			    !skb_queue_empty(&sk->sk_async_wait_queue)) {
1812 				tcp_service_net_dma(sk, true);
1813 				tcp_cleanup_rbuf(sk, copied);
1814 			} else
1815 				dma_async_issue_pending(tp->ucopy.dma_chan);
1816 		}
1817 #endif
1818 		if (copied >= target) {
1819 			/* Do not sleep, just process backlog. */
1820 			release_sock(sk);
1821 			lock_sock(sk);
1822 		} else
1823 			sk_wait_data(sk, &timeo);
1824 
1825 #ifdef CONFIG_NET_DMA
1826 		tcp_service_net_dma(sk, false);  /* Don't block */
1827 		tp->ucopy.wakeup = 0;
1828 #endif
1829 
1830 		if (user_recv) {
1831 			int chunk;
1832 
1833 			/* __ Restore normal policy in scheduler __ */
1834 
1835 			if ((chunk = len - tp->ucopy.len) != 0) {
1836 				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1837 				len -= chunk;
1838 				copied += chunk;
1839 			}
1840 
1841 			if (tp->rcv_nxt == tp->copied_seq &&
1842 			    !skb_queue_empty(&tp->ucopy.prequeue)) {
1843 do_prequeue:
1844 				tcp_prequeue_process(sk);
1845 
1846 				if ((chunk = len - tp->ucopy.len) != 0) {
1847 					NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1848 					len -= chunk;
1849 					copied += chunk;
1850 				}
1851 			}
1852 		}
1853 		if ((flags & MSG_PEEK) &&
1854 		    (peek_seq - copied - urg_hole != tp->copied_seq)) {
1855 			net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1856 					    current->comm,
1857 					    task_pid_nr(current));
1858 			peek_seq = tp->copied_seq;
1859 		}
1860 		continue;
1861 
1862 	found_ok_skb:
1863 		/* Ok so how much can we use? */
1864 		used = skb->len - offset;
1865 		if (len < used)
1866 			used = len;
1867 
1868 		/* Do we have urgent data here? */
1869 		if (tp->urg_data) {
1870 			u32 urg_offset = tp->urg_seq - *seq;
1871 			if (urg_offset < used) {
1872 				if (!urg_offset) {
1873 					if (!sock_flag(sk, SOCK_URGINLINE)) {
1874 						++*seq;
1875 						urg_hole++;
1876 						offset++;
1877 						used--;
1878 						if (!used)
1879 							goto skip_copy;
1880 					}
1881 				} else
1882 					used = urg_offset;
1883 			}
1884 		}
1885 
1886 		if (!(flags & MSG_TRUNC)) {
1887 #ifdef CONFIG_NET_DMA
1888 			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1889 				tp->ucopy.dma_chan = net_dma_find_channel();
1890 
1891 			if (tp->ucopy.dma_chan) {
1892 				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1893 					tp->ucopy.dma_chan, skb, offset,
1894 					msg->msg_iov, used,
1895 					tp->ucopy.pinned_list);
1896 
1897 				if (tp->ucopy.dma_cookie < 0) {
1898 
1899 					pr_alert("%s: dma_cookie < 0\n",
1900 						 __func__);
1901 
1902 					/* Exception. Bailout! */
1903 					if (!copied)
1904 						copied = -EFAULT;
1905 					break;
1906 				}
1907 
1908 				dma_async_issue_pending(tp->ucopy.dma_chan);
1909 
1910 				if ((offset + used) == skb->len)
1911 					copied_early = true;
1912 
1913 			} else
1914 #endif
1915 			{
1916 				err = skb_copy_datagram_iovec(skb, offset,
1917 						msg->msg_iov, used);
1918 				if (err) {
1919 					/* Exception. Bailout! */
1920 					if (!copied)
1921 						copied = -EFAULT;
1922 					break;
1923 				}
1924 			}
1925 		}
1926 
1927 		*seq += used;
1928 		copied += used;
1929 		len -= used;
1930 
1931 		tcp_rcv_space_adjust(sk);
1932 
1933 skip_copy:
1934 		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1935 			tp->urg_data = 0;
1936 			tcp_fast_path_check(sk);
1937 		}
1938 		if (used + offset < skb->len)
1939 			continue;
1940 
1941 		if (tcp_hdr(skb)->fin)
1942 			goto found_fin_ok;
1943 		if (!(flags & MSG_PEEK)) {
1944 			sk_eat_skb(sk, skb, copied_early);
1945 			copied_early = false;
1946 		}
1947 		continue;
1948 
1949 	found_fin_ok:
1950 		/* Process the FIN. */
1951 		++*seq;
1952 		if (!(flags & MSG_PEEK)) {
1953 			sk_eat_skb(sk, skb, copied_early);
1954 			copied_early = false;
1955 		}
1956 		break;
1957 	} while (len > 0);
1958 
1959 	if (user_recv) {
1960 		if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1961 			int chunk;
1962 
1963 			tp->ucopy.len = copied > 0 ? len : 0;
1964 
1965 			tcp_prequeue_process(sk);
1966 
1967 			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1968 				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1969 				len -= chunk;
1970 				copied += chunk;
1971 			}
1972 		}
1973 
1974 		tp->ucopy.task = NULL;
1975 		tp->ucopy.len = 0;
1976 	}
1977 
1978 #ifdef CONFIG_NET_DMA
1979 	tcp_service_net_dma(sk, true);  /* Wait for queue to drain */
1980 	tp->ucopy.dma_chan = NULL;
1981 
1982 	if (tp->ucopy.pinned_list) {
1983 		dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1984 		tp->ucopy.pinned_list = NULL;
1985 	}
1986 #endif
1987 
1988 	/* According to UNIX98, msg_name/msg_namelen are ignored
1989 	 * on connected socket. I was just happy when found this 8) --ANK
1990 	 */
1991 
1992 	/* Clean up data we have read: This will do ACK frames. */
1993 	tcp_cleanup_rbuf(sk, copied);
1994 
1995 	release_sock(sk);
1996 	return copied;
1997 
1998 out:
1999 	release_sock(sk);
2000 	return err;
2001 
2002 recv_urg:
2003 	err = tcp_recv_urg(sk, msg, len, flags);
2004 	goto out;
2005 
2006 recv_sndq:
2007 	err = tcp_peek_sndq(sk, msg, len);
2008 	goto out;
2009 }
2010 EXPORT_SYMBOL(tcp_recvmsg);
2011 
2012 void tcp_set_state(struct sock *sk, int state)
2013 {
2014 	int oldstate = sk->sk_state;
2015 
2016 	switch (state) {
2017 	case TCP_ESTABLISHED:
2018 		if (oldstate != TCP_ESTABLISHED)
2019 			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2020 		break;
2021 
2022 	case TCP_CLOSE:
2023 		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
2024 			TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2025 
2026 		sk->sk_prot->unhash(sk);
2027 		if (inet_csk(sk)->icsk_bind_hash &&
2028 		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2029 			inet_put_port(sk);
2030 		/* fall through */
2031 	default:
2032 		if (oldstate == TCP_ESTABLISHED)
2033 			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2034 	}
2035 
2036 	/* Change state AFTER socket is unhashed to avoid closed
2037 	 * socket sitting in hash tables.
2038 	 */
2039 	sk->sk_state = state;
2040 
2041 #ifdef STATE_TRACE
2042 	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
2043 #endif
2044 }
2045 EXPORT_SYMBOL_GPL(tcp_set_state);
2046 
2047 /*
2048  *	State processing on a close. This implements the state shift for
2049  *	sending our FIN frame. Note that we only send a FIN for some
2050  *	states. A shutdown() may have already sent the FIN, or we may be
2051  *	closed.
2052  */
2053 
2054 static const unsigned char new_state[16] = {
2055   /* current state:        new state:      action:	*/
2056   /* (Invalid)		*/ TCP_CLOSE,
2057   /* TCP_ESTABLISHED	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2058   /* TCP_SYN_SENT	*/ TCP_CLOSE,
2059   /* TCP_SYN_RECV	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2060   /* TCP_FIN_WAIT1	*/ TCP_FIN_WAIT1,
2061   /* TCP_FIN_WAIT2	*/ TCP_FIN_WAIT2,
2062   /* TCP_TIME_WAIT	*/ TCP_CLOSE,
2063   /* TCP_CLOSE		*/ TCP_CLOSE,
2064   /* TCP_CLOSE_WAIT	*/ TCP_LAST_ACK  | TCP_ACTION_FIN,
2065   /* TCP_LAST_ACK	*/ TCP_LAST_ACK,
2066   /* TCP_LISTEN		*/ TCP_CLOSE,
2067   /* TCP_CLOSING	*/ TCP_CLOSING,
2068 };
2069 
2070 static int tcp_close_state(struct sock *sk)
2071 {
2072 	int next = (int)new_state[sk->sk_state];
2073 	int ns = next & TCP_STATE_MASK;
2074 
2075 	tcp_set_state(sk, ns);
2076 
2077 	return next & TCP_ACTION_FIN;
2078 }
2079 
2080 /*
2081  *	Shutdown the sending side of a connection. Much like close except
2082  *	that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2083  */
2084 
2085 void tcp_shutdown(struct sock *sk, int how)
2086 {
2087 	/*	We need to grab some memory, and put together a FIN,
2088 	 *	and then put it into the queue to be sent.
2089 	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2090 	 */
2091 	if (!(how & SEND_SHUTDOWN))
2092 		return;
2093 
2094 	/* If we've already sent a FIN, or it's a closed state, skip this. */
2095 	if ((1 << sk->sk_state) &
2096 	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2097 	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2098 		/* Clear out any half completed packets.  FIN if needed. */
2099 		if (tcp_close_state(sk))
2100 			tcp_send_fin(sk);
2101 	}
2102 }
2103 EXPORT_SYMBOL(tcp_shutdown);
2104 
2105 bool tcp_check_oom(struct sock *sk, int shift)
2106 {
2107 	bool too_many_orphans, out_of_socket_memory;
2108 
2109 	too_many_orphans = tcp_too_many_orphans(sk, shift);
2110 	out_of_socket_memory = tcp_out_of_memory(sk);
2111 
2112 	if (too_many_orphans)
2113 		net_info_ratelimited("too many orphaned sockets\n");
2114 	if (out_of_socket_memory)
2115 		net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2116 	return too_many_orphans || out_of_socket_memory;
2117 }
2118 
2119 void tcp_close(struct sock *sk, long timeout)
2120 {
2121 	struct sk_buff *skb;
2122 	int data_was_unread = 0;
2123 	int state;
2124 
2125 	lock_sock(sk);
2126 	sk->sk_shutdown = SHUTDOWN_MASK;
2127 
2128 	if (sk->sk_state == TCP_LISTEN) {
2129 		tcp_set_state(sk, TCP_CLOSE);
2130 
2131 		/* Special case. */
2132 		inet_csk_listen_stop(sk);
2133 
2134 		goto adjudge_to_death;
2135 	}
2136 
2137 	/*  We need to flush the recv. buffs.  We do this only on the
2138 	 *  descriptor close, not protocol-sourced closes, because the
2139 	 *  reader process may not have drained the data yet!
2140 	 */
2141 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2142 		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
2143 			  tcp_hdr(skb)->fin;
2144 		data_was_unread += len;
2145 		__kfree_skb(skb);
2146 	}
2147 
2148 	sk_mem_reclaim(sk);
2149 
2150 	/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2151 	if (sk->sk_state == TCP_CLOSE)
2152 		goto adjudge_to_death;
2153 
2154 	/* As outlined in RFC 2525, section 2.17, we send a RST here because
2155 	 * data was lost. To witness the awful effects of the old behavior of
2156 	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2157 	 * GET in an FTP client, suspend the process, wait for the client to
2158 	 * advertise a zero window, then kill -9 the FTP client, wheee...
2159 	 * Note: timeout is always zero in such a case.
2160 	 */
2161 	if (unlikely(tcp_sk(sk)->repair)) {
2162 		sk->sk_prot->disconnect(sk, 0);
2163 	} else if (data_was_unread) {
2164 		/* Unread data was tossed, zap the connection. */
2165 		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2166 		tcp_set_state(sk, TCP_CLOSE);
2167 		tcp_send_active_reset(sk, sk->sk_allocation);
2168 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2169 		/* Check zero linger _after_ checking for unread data. */
2170 		sk->sk_prot->disconnect(sk, 0);
2171 		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2172 	} else if (tcp_close_state(sk)) {
2173 		/* We FIN if the application ate all the data before
2174 		 * zapping the connection.
2175 		 */
2176 
2177 		/* RED-PEN. Formally speaking, we have broken TCP state
2178 		 * machine. State transitions:
2179 		 *
2180 		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2181 		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
2182 		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2183 		 *
2184 		 * are legal only when FIN has been sent (i.e. in window),
2185 		 * rather than queued out of window. Purists blame.
2186 		 *
2187 		 * F.e. "RFC state" is ESTABLISHED,
2188 		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2189 		 *
2190 		 * The visible declinations are that sometimes
2191 		 * we enter time-wait state, when it is not required really
2192 		 * (harmless), do not send active resets, when they are
2193 		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2194 		 * they look as CLOSING or LAST_ACK for Linux)
2195 		 * Probably, I missed some more holelets.
2196 		 * 						--ANK
2197 		 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2198 		 * in a single packet! (May consider it later but will
2199 		 * probably need API support or TCP_CORK SYN-ACK until
2200 		 * data is written and socket is closed.)
2201 		 */
2202 		tcp_send_fin(sk);
2203 	}
2204 
2205 	sk_stream_wait_close(sk, timeout);
2206 
2207 adjudge_to_death:
2208 	state = sk->sk_state;
2209 	sock_hold(sk);
2210 	sock_orphan(sk);
2211 
2212 	/* It is the last release_sock in its life. It will remove backlog. */
2213 	release_sock(sk);
2214 
2215 
2216 	/* Now socket is owned by kernel and we acquire BH lock
2217 	   to finish close. No need to check for user refs.
2218 	 */
2219 	local_bh_disable();
2220 	bh_lock_sock(sk);
2221 	WARN_ON(sock_owned_by_user(sk));
2222 
2223 	percpu_counter_inc(sk->sk_prot->orphan_count);
2224 
2225 	/* Have we already been destroyed by a softirq or backlog? */
2226 	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2227 		goto out;
2228 
2229 	/*	This is a (useful) BSD violating of the RFC. There is a
2230 	 *	problem with TCP as specified in that the other end could
2231 	 *	keep a socket open forever with no application left this end.
2232 	 *	We use a 3 minute timeout (about the same as BSD) then kill
2233 	 *	our end. If they send after that then tough - BUT: long enough
2234 	 *	that we won't make the old 4*rto = almost no time - whoops
2235 	 *	reset mistake.
2236 	 *
2237 	 *	Nope, it was not mistake. It is really desired behaviour
2238 	 *	f.e. on http servers, when such sockets are useless, but
2239 	 *	consume significant resources. Let's do it with special
2240 	 *	linger2	option.					--ANK
2241 	 */
2242 
2243 	if (sk->sk_state == TCP_FIN_WAIT2) {
2244 		struct tcp_sock *tp = tcp_sk(sk);
2245 		if (tp->linger2 < 0) {
2246 			tcp_set_state(sk, TCP_CLOSE);
2247 			tcp_send_active_reset(sk, GFP_ATOMIC);
2248 			NET_INC_STATS_BH(sock_net(sk),
2249 					LINUX_MIB_TCPABORTONLINGER);
2250 		} else {
2251 			const int tmo = tcp_fin_time(sk);
2252 
2253 			if (tmo > TCP_TIMEWAIT_LEN) {
2254 				inet_csk_reset_keepalive_timer(sk,
2255 						tmo - TCP_TIMEWAIT_LEN);
2256 			} else {
2257 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2258 				goto out;
2259 			}
2260 		}
2261 	}
2262 	if (sk->sk_state != TCP_CLOSE) {
2263 		sk_mem_reclaim(sk);
2264 		if (tcp_check_oom(sk, 0)) {
2265 			tcp_set_state(sk, TCP_CLOSE);
2266 			tcp_send_active_reset(sk, GFP_ATOMIC);
2267 			NET_INC_STATS_BH(sock_net(sk),
2268 					LINUX_MIB_TCPABORTONMEMORY);
2269 		}
2270 	}
2271 
2272 	if (sk->sk_state == TCP_CLOSE) {
2273 		struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2274 		/* We could get here with a non-NULL req if the socket is
2275 		 * aborted (e.g., closed with unread data) before 3WHS
2276 		 * finishes.
2277 		 */
2278 		if (req != NULL)
2279 			reqsk_fastopen_remove(sk, req, false);
2280 		inet_csk_destroy_sock(sk);
2281 	}
2282 	/* Otherwise, socket is reprieved until protocol close. */
2283 
2284 out:
2285 	bh_unlock_sock(sk);
2286 	local_bh_enable();
2287 	sock_put(sk);
2288 }
2289 EXPORT_SYMBOL(tcp_close);
2290 
2291 /* These states need RST on ABORT according to RFC793 */
2292 
2293 static inline bool tcp_need_reset(int state)
2294 {
2295 	return (1 << state) &
2296 	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2297 		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2298 }
2299 
2300 int tcp_disconnect(struct sock *sk, int flags)
2301 {
2302 	struct inet_sock *inet = inet_sk(sk);
2303 	struct inet_connection_sock *icsk = inet_csk(sk);
2304 	struct tcp_sock *tp = tcp_sk(sk);
2305 	int err = 0;
2306 	int old_state = sk->sk_state;
2307 
2308 	if (old_state != TCP_CLOSE)
2309 		tcp_set_state(sk, TCP_CLOSE);
2310 
2311 	/* ABORT function of RFC793 */
2312 	if (old_state == TCP_LISTEN) {
2313 		inet_csk_listen_stop(sk);
2314 	} else if (unlikely(tp->repair)) {
2315 		sk->sk_err = ECONNABORTED;
2316 	} else if (tcp_need_reset(old_state) ||
2317 		   (tp->snd_nxt != tp->write_seq &&
2318 		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2319 		/* The last check adjusts for discrepancy of Linux wrt. RFC
2320 		 * states
2321 		 */
2322 		tcp_send_active_reset(sk, gfp_any());
2323 		sk->sk_err = ECONNRESET;
2324 	} else if (old_state == TCP_SYN_SENT)
2325 		sk->sk_err = ECONNRESET;
2326 
2327 	tcp_clear_xmit_timers(sk);
2328 	__skb_queue_purge(&sk->sk_receive_queue);
2329 	tcp_write_queue_purge(sk);
2330 	__skb_queue_purge(&tp->out_of_order_queue);
2331 #ifdef CONFIG_NET_DMA
2332 	__skb_queue_purge(&sk->sk_async_wait_queue);
2333 #endif
2334 
2335 	inet->inet_dport = 0;
2336 
2337 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2338 		inet_reset_saddr(sk);
2339 
2340 	sk->sk_shutdown = 0;
2341 	sock_reset_flag(sk, SOCK_DONE);
2342 	tp->srtt = 0;
2343 	if ((tp->write_seq += tp->max_window + 2) == 0)
2344 		tp->write_seq = 1;
2345 	icsk->icsk_backoff = 0;
2346 	tp->snd_cwnd = 2;
2347 	icsk->icsk_probes_out = 0;
2348 	tp->packets_out = 0;
2349 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2350 	tp->snd_cwnd_cnt = 0;
2351 	tp->window_clamp = 0;
2352 	tcp_set_ca_state(sk, TCP_CA_Open);
2353 	tcp_clear_retrans(tp);
2354 	inet_csk_delack_init(sk);
2355 	tcp_init_send_head(sk);
2356 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2357 	__sk_dst_reset(sk);
2358 
2359 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2360 
2361 	sk->sk_error_report(sk);
2362 	return err;
2363 }
2364 EXPORT_SYMBOL(tcp_disconnect);
2365 
2366 void tcp_sock_destruct(struct sock *sk)
2367 {
2368 	inet_sock_destruct(sk);
2369 
2370 	kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
2371 }
2372 
2373 static inline bool tcp_can_repair_sock(const struct sock *sk)
2374 {
2375 	return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2376 		((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
2377 }
2378 
2379 static int tcp_repair_options_est(struct tcp_sock *tp,
2380 		struct tcp_repair_opt __user *optbuf, unsigned int len)
2381 {
2382 	struct tcp_repair_opt opt;
2383 
2384 	while (len >= sizeof(opt)) {
2385 		if (copy_from_user(&opt, optbuf, sizeof(opt)))
2386 			return -EFAULT;
2387 
2388 		optbuf++;
2389 		len -= sizeof(opt);
2390 
2391 		switch (opt.opt_code) {
2392 		case TCPOPT_MSS:
2393 			tp->rx_opt.mss_clamp = opt.opt_val;
2394 			break;
2395 		case TCPOPT_WINDOW:
2396 			{
2397 				u16 snd_wscale = opt.opt_val & 0xFFFF;
2398 				u16 rcv_wscale = opt.opt_val >> 16;
2399 
2400 				if (snd_wscale > 14 || rcv_wscale > 14)
2401 					return -EFBIG;
2402 
2403 				tp->rx_opt.snd_wscale = snd_wscale;
2404 				tp->rx_opt.rcv_wscale = rcv_wscale;
2405 				tp->rx_opt.wscale_ok = 1;
2406 			}
2407 			break;
2408 		case TCPOPT_SACK_PERM:
2409 			if (opt.opt_val != 0)
2410 				return -EINVAL;
2411 
2412 			tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2413 			if (sysctl_tcp_fack)
2414 				tcp_enable_fack(tp);
2415 			break;
2416 		case TCPOPT_TIMESTAMP:
2417 			if (opt.opt_val != 0)
2418 				return -EINVAL;
2419 
2420 			tp->rx_opt.tstamp_ok = 1;
2421 			break;
2422 		}
2423 	}
2424 
2425 	return 0;
2426 }
2427 
2428 /*
2429  *	Socket option code for TCP.
2430  */
2431 static int do_tcp_setsockopt(struct sock *sk, int level,
2432 		int optname, char __user *optval, unsigned int optlen)
2433 {
2434 	struct tcp_sock *tp = tcp_sk(sk);
2435 	struct inet_connection_sock *icsk = inet_csk(sk);
2436 	int val;
2437 	int err = 0;
2438 
2439 	/* These are data/string values, all the others are ints */
2440 	switch (optname) {
2441 	case TCP_CONGESTION: {
2442 		char name[TCP_CA_NAME_MAX];
2443 
2444 		if (optlen < 1)
2445 			return -EINVAL;
2446 
2447 		val = strncpy_from_user(name, optval,
2448 					min_t(long, TCP_CA_NAME_MAX-1, optlen));
2449 		if (val < 0)
2450 			return -EFAULT;
2451 		name[val] = 0;
2452 
2453 		lock_sock(sk);
2454 		err = tcp_set_congestion_control(sk, name);
2455 		release_sock(sk);
2456 		return err;
2457 	}
2458 	default:
2459 		/* fallthru */
2460 		break;
2461 	}
2462 
2463 	if (optlen < sizeof(int))
2464 		return -EINVAL;
2465 
2466 	if (get_user(val, (int __user *)optval))
2467 		return -EFAULT;
2468 
2469 	lock_sock(sk);
2470 
2471 	switch (optname) {
2472 	case TCP_MAXSEG:
2473 		/* Values greater than interface MTU won't take effect. However
2474 		 * at the point when this call is done we typically don't yet
2475 		 * know which interface is going to be used */
2476 		if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2477 			err = -EINVAL;
2478 			break;
2479 		}
2480 		tp->rx_opt.user_mss = val;
2481 		break;
2482 
2483 	case TCP_NODELAY:
2484 		if (val) {
2485 			/* TCP_NODELAY is weaker than TCP_CORK, so that
2486 			 * this option on corked socket is remembered, but
2487 			 * it is not activated until cork is cleared.
2488 			 *
2489 			 * However, when TCP_NODELAY is set we make
2490 			 * an explicit push, which overrides even TCP_CORK
2491 			 * for currently queued segments.
2492 			 */
2493 			tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2494 			tcp_push_pending_frames(sk);
2495 		} else {
2496 			tp->nonagle &= ~TCP_NAGLE_OFF;
2497 		}
2498 		break;
2499 
2500 	case TCP_THIN_LINEAR_TIMEOUTS:
2501 		if (val < 0 || val > 1)
2502 			err = -EINVAL;
2503 		else
2504 			tp->thin_lto = val;
2505 		break;
2506 
2507 	case TCP_THIN_DUPACK:
2508 		if (val < 0 || val > 1)
2509 			err = -EINVAL;
2510 		else {
2511 			tp->thin_dupack = val;
2512 			if (tp->thin_dupack)
2513 				tcp_disable_early_retrans(tp);
2514 		}
2515 		break;
2516 
2517 	case TCP_REPAIR:
2518 		if (!tcp_can_repair_sock(sk))
2519 			err = -EPERM;
2520 		else if (val == 1) {
2521 			tp->repair = 1;
2522 			sk->sk_reuse = SK_FORCE_REUSE;
2523 			tp->repair_queue = TCP_NO_QUEUE;
2524 		} else if (val == 0) {
2525 			tp->repair = 0;
2526 			sk->sk_reuse = SK_NO_REUSE;
2527 			tcp_send_window_probe(sk);
2528 		} else
2529 			err = -EINVAL;
2530 
2531 		break;
2532 
2533 	case TCP_REPAIR_QUEUE:
2534 		if (!tp->repair)
2535 			err = -EPERM;
2536 		else if (val < TCP_QUEUES_NR)
2537 			tp->repair_queue = val;
2538 		else
2539 			err = -EINVAL;
2540 		break;
2541 
2542 	case TCP_QUEUE_SEQ:
2543 		if (sk->sk_state != TCP_CLOSE)
2544 			err = -EPERM;
2545 		else if (tp->repair_queue == TCP_SEND_QUEUE)
2546 			tp->write_seq = val;
2547 		else if (tp->repair_queue == TCP_RECV_QUEUE)
2548 			tp->rcv_nxt = val;
2549 		else
2550 			err = -EINVAL;
2551 		break;
2552 
2553 	case TCP_REPAIR_OPTIONS:
2554 		if (!tp->repair)
2555 			err = -EINVAL;
2556 		else if (sk->sk_state == TCP_ESTABLISHED)
2557 			err = tcp_repair_options_est(tp,
2558 					(struct tcp_repair_opt __user *)optval,
2559 					optlen);
2560 		else
2561 			err = -EPERM;
2562 		break;
2563 
2564 	case TCP_CORK:
2565 		/* When set indicates to always queue non-full frames.
2566 		 * Later the user clears this option and we transmit
2567 		 * any pending partial frames in the queue.  This is
2568 		 * meant to be used alongside sendfile() to get properly
2569 		 * filled frames when the user (for example) must write
2570 		 * out headers with a write() call first and then use
2571 		 * sendfile to send out the data parts.
2572 		 *
2573 		 * TCP_CORK can be set together with TCP_NODELAY and it is
2574 		 * stronger than TCP_NODELAY.
2575 		 */
2576 		if (val) {
2577 			tp->nonagle |= TCP_NAGLE_CORK;
2578 		} else {
2579 			tp->nonagle &= ~TCP_NAGLE_CORK;
2580 			if (tp->nonagle&TCP_NAGLE_OFF)
2581 				tp->nonagle |= TCP_NAGLE_PUSH;
2582 			tcp_push_pending_frames(sk);
2583 		}
2584 		break;
2585 
2586 	case TCP_KEEPIDLE:
2587 		if (val < 1 || val > MAX_TCP_KEEPIDLE)
2588 			err = -EINVAL;
2589 		else {
2590 			tp->keepalive_time = val * HZ;
2591 			if (sock_flag(sk, SOCK_KEEPOPEN) &&
2592 			    !((1 << sk->sk_state) &
2593 			      (TCPF_CLOSE | TCPF_LISTEN))) {
2594 				u32 elapsed = keepalive_time_elapsed(tp);
2595 				if (tp->keepalive_time > elapsed)
2596 					elapsed = tp->keepalive_time - elapsed;
2597 				else
2598 					elapsed = 0;
2599 				inet_csk_reset_keepalive_timer(sk, elapsed);
2600 			}
2601 		}
2602 		break;
2603 	case TCP_KEEPINTVL:
2604 		if (val < 1 || val > MAX_TCP_KEEPINTVL)
2605 			err = -EINVAL;
2606 		else
2607 			tp->keepalive_intvl = val * HZ;
2608 		break;
2609 	case TCP_KEEPCNT:
2610 		if (val < 1 || val > MAX_TCP_KEEPCNT)
2611 			err = -EINVAL;
2612 		else
2613 			tp->keepalive_probes = val;
2614 		break;
2615 	case TCP_SYNCNT:
2616 		if (val < 1 || val > MAX_TCP_SYNCNT)
2617 			err = -EINVAL;
2618 		else
2619 			icsk->icsk_syn_retries = val;
2620 		break;
2621 
2622 	case TCP_LINGER2:
2623 		if (val < 0)
2624 			tp->linger2 = -1;
2625 		else if (val > sysctl_tcp_fin_timeout / HZ)
2626 			tp->linger2 = 0;
2627 		else
2628 			tp->linger2 = val * HZ;
2629 		break;
2630 
2631 	case TCP_DEFER_ACCEPT:
2632 		/* Translate value in seconds to number of retransmits */
2633 		icsk->icsk_accept_queue.rskq_defer_accept =
2634 			secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2635 					TCP_RTO_MAX / HZ);
2636 		break;
2637 
2638 	case TCP_WINDOW_CLAMP:
2639 		if (!val) {
2640 			if (sk->sk_state != TCP_CLOSE) {
2641 				err = -EINVAL;
2642 				break;
2643 			}
2644 			tp->window_clamp = 0;
2645 		} else
2646 			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2647 						SOCK_MIN_RCVBUF / 2 : val;
2648 		break;
2649 
2650 	case TCP_QUICKACK:
2651 		if (!val) {
2652 			icsk->icsk_ack.pingpong = 1;
2653 		} else {
2654 			icsk->icsk_ack.pingpong = 0;
2655 			if ((1 << sk->sk_state) &
2656 			    (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2657 			    inet_csk_ack_scheduled(sk)) {
2658 				icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2659 				tcp_cleanup_rbuf(sk, 1);
2660 				if (!(val & 1))
2661 					icsk->icsk_ack.pingpong = 1;
2662 			}
2663 		}
2664 		break;
2665 
2666 #ifdef CONFIG_TCP_MD5SIG
2667 	case TCP_MD5SIG:
2668 		/* Read the IP->Key mappings from userspace */
2669 		err = tp->af_specific->md5_parse(sk, optval, optlen);
2670 		break;
2671 #endif
2672 	case TCP_USER_TIMEOUT:
2673 		/* Cap the max timeout in ms TCP will retry/retrans
2674 		 * before giving up and aborting (ETIMEDOUT) a connection.
2675 		 */
2676 		if (val < 0)
2677 			err = -EINVAL;
2678 		else
2679 			icsk->icsk_user_timeout = msecs_to_jiffies(val);
2680 		break;
2681 
2682 	case TCP_FASTOPEN:
2683 		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
2684 		    TCPF_LISTEN)))
2685 			err = fastopen_init_queue(sk, val);
2686 		else
2687 			err = -EINVAL;
2688 		break;
2689 	case TCP_TIMESTAMP:
2690 		if (!tp->repair)
2691 			err = -EPERM;
2692 		else
2693 			tp->tsoffset = val - tcp_time_stamp;
2694 		break;
2695 	case TCP_NOTSENT_LOWAT:
2696 		tp->notsent_lowat = val;
2697 		sk->sk_write_space(sk);
2698 		break;
2699 	default:
2700 		err = -ENOPROTOOPT;
2701 		break;
2702 	}
2703 
2704 	release_sock(sk);
2705 	return err;
2706 }
2707 
2708 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2709 		   unsigned int optlen)
2710 {
2711 	const struct inet_connection_sock *icsk = inet_csk(sk);
2712 
2713 	if (level != SOL_TCP)
2714 		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2715 						     optval, optlen);
2716 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2717 }
2718 EXPORT_SYMBOL(tcp_setsockopt);
2719 
2720 #ifdef CONFIG_COMPAT
2721 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2722 			  char __user *optval, unsigned int optlen)
2723 {
2724 	if (level != SOL_TCP)
2725 		return inet_csk_compat_setsockopt(sk, level, optname,
2726 						  optval, optlen);
2727 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2728 }
2729 EXPORT_SYMBOL(compat_tcp_setsockopt);
2730 #endif
2731 
2732 /* Return information about state of tcp endpoint in API format. */
2733 void tcp_get_info(const struct sock *sk, struct tcp_info *info)
2734 {
2735 	const struct tcp_sock *tp = tcp_sk(sk);
2736 	const struct inet_connection_sock *icsk = inet_csk(sk);
2737 	u32 now = tcp_time_stamp;
2738 
2739 	memset(info, 0, sizeof(*info));
2740 
2741 	info->tcpi_state = sk->sk_state;
2742 	info->tcpi_ca_state = icsk->icsk_ca_state;
2743 	info->tcpi_retransmits = icsk->icsk_retransmits;
2744 	info->tcpi_probes = icsk->icsk_probes_out;
2745 	info->tcpi_backoff = icsk->icsk_backoff;
2746 
2747 	if (tp->rx_opt.tstamp_ok)
2748 		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2749 	if (tcp_is_sack(tp))
2750 		info->tcpi_options |= TCPI_OPT_SACK;
2751 	if (tp->rx_opt.wscale_ok) {
2752 		info->tcpi_options |= TCPI_OPT_WSCALE;
2753 		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2754 		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2755 	}
2756 
2757 	if (tp->ecn_flags & TCP_ECN_OK)
2758 		info->tcpi_options |= TCPI_OPT_ECN;
2759 	if (tp->ecn_flags & TCP_ECN_SEEN)
2760 		info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2761 	if (tp->syn_data_acked)
2762 		info->tcpi_options |= TCPI_OPT_SYN_DATA;
2763 
2764 	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2765 	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2766 	info->tcpi_snd_mss = tp->mss_cache;
2767 	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2768 
2769 	if (sk->sk_state == TCP_LISTEN) {
2770 		info->tcpi_unacked = sk->sk_ack_backlog;
2771 		info->tcpi_sacked = sk->sk_max_ack_backlog;
2772 	} else {
2773 		info->tcpi_unacked = tp->packets_out;
2774 		info->tcpi_sacked = tp->sacked_out;
2775 	}
2776 	info->tcpi_lost = tp->lost_out;
2777 	info->tcpi_retrans = tp->retrans_out;
2778 	info->tcpi_fackets = tp->fackets_out;
2779 
2780 	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2781 	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2782 	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2783 
2784 	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2785 	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2786 	info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2787 	info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2788 	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2789 	info->tcpi_snd_cwnd = tp->snd_cwnd;
2790 	info->tcpi_advmss = tp->advmss;
2791 	info->tcpi_reordering = tp->reordering;
2792 
2793 	info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2794 	info->tcpi_rcv_space = tp->rcvq_space.space;
2795 
2796 	info->tcpi_total_retrans = tp->total_retrans;
2797 }
2798 EXPORT_SYMBOL_GPL(tcp_get_info);
2799 
2800 static int do_tcp_getsockopt(struct sock *sk, int level,
2801 		int optname, char __user *optval, int __user *optlen)
2802 {
2803 	struct inet_connection_sock *icsk = inet_csk(sk);
2804 	struct tcp_sock *tp = tcp_sk(sk);
2805 	int val, len;
2806 
2807 	if (get_user(len, optlen))
2808 		return -EFAULT;
2809 
2810 	len = min_t(unsigned int, len, sizeof(int));
2811 
2812 	if (len < 0)
2813 		return -EINVAL;
2814 
2815 	switch (optname) {
2816 	case TCP_MAXSEG:
2817 		val = tp->mss_cache;
2818 		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2819 			val = tp->rx_opt.user_mss;
2820 		if (tp->repair)
2821 			val = tp->rx_opt.mss_clamp;
2822 		break;
2823 	case TCP_NODELAY:
2824 		val = !!(tp->nonagle&TCP_NAGLE_OFF);
2825 		break;
2826 	case TCP_CORK:
2827 		val = !!(tp->nonagle&TCP_NAGLE_CORK);
2828 		break;
2829 	case TCP_KEEPIDLE:
2830 		val = keepalive_time_when(tp) / HZ;
2831 		break;
2832 	case TCP_KEEPINTVL:
2833 		val = keepalive_intvl_when(tp) / HZ;
2834 		break;
2835 	case TCP_KEEPCNT:
2836 		val = keepalive_probes(tp);
2837 		break;
2838 	case TCP_SYNCNT:
2839 		val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2840 		break;
2841 	case TCP_LINGER2:
2842 		val = tp->linger2;
2843 		if (val >= 0)
2844 			val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2845 		break;
2846 	case TCP_DEFER_ACCEPT:
2847 		val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2848 				      TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2849 		break;
2850 	case TCP_WINDOW_CLAMP:
2851 		val = tp->window_clamp;
2852 		break;
2853 	case TCP_INFO: {
2854 		struct tcp_info info;
2855 
2856 		if (get_user(len, optlen))
2857 			return -EFAULT;
2858 
2859 		tcp_get_info(sk, &info);
2860 
2861 		len = min_t(unsigned int, len, sizeof(info));
2862 		if (put_user(len, optlen))
2863 			return -EFAULT;
2864 		if (copy_to_user(optval, &info, len))
2865 			return -EFAULT;
2866 		return 0;
2867 	}
2868 	case TCP_QUICKACK:
2869 		val = !icsk->icsk_ack.pingpong;
2870 		break;
2871 
2872 	case TCP_CONGESTION:
2873 		if (get_user(len, optlen))
2874 			return -EFAULT;
2875 		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2876 		if (put_user(len, optlen))
2877 			return -EFAULT;
2878 		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2879 			return -EFAULT;
2880 		return 0;
2881 
2882 	case TCP_THIN_LINEAR_TIMEOUTS:
2883 		val = tp->thin_lto;
2884 		break;
2885 	case TCP_THIN_DUPACK:
2886 		val = tp->thin_dupack;
2887 		break;
2888 
2889 	case TCP_REPAIR:
2890 		val = tp->repair;
2891 		break;
2892 
2893 	case TCP_REPAIR_QUEUE:
2894 		if (tp->repair)
2895 			val = tp->repair_queue;
2896 		else
2897 			return -EINVAL;
2898 		break;
2899 
2900 	case TCP_QUEUE_SEQ:
2901 		if (tp->repair_queue == TCP_SEND_QUEUE)
2902 			val = tp->write_seq;
2903 		else if (tp->repair_queue == TCP_RECV_QUEUE)
2904 			val = tp->rcv_nxt;
2905 		else
2906 			return -EINVAL;
2907 		break;
2908 
2909 	case TCP_USER_TIMEOUT:
2910 		val = jiffies_to_msecs(icsk->icsk_user_timeout);
2911 		break;
2912 	case TCP_TIMESTAMP:
2913 		val = tcp_time_stamp + tp->tsoffset;
2914 		break;
2915 	case TCP_NOTSENT_LOWAT:
2916 		val = tp->notsent_lowat;
2917 		break;
2918 	default:
2919 		return -ENOPROTOOPT;
2920 	}
2921 
2922 	if (put_user(len, optlen))
2923 		return -EFAULT;
2924 	if (copy_to_user(optval, &val, len))
2925 		return -EFAULT;
2926 	return 0;
2927 }
2928 
2929 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2930 		   int __user *optlen)
2931 {
2932 	struct inet_connection_sock *icsk = inet_csk(sk);
2933 
2934 	if (level != SOL_TCP)
2935 		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2936 						     optval, optlen);
2937 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2938 }
2939 EXPORT_SYMBOL(tcp_getsockopt);
2940 
2941 #ifdef CONFIG_COMPAT
2942 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2943 			  char __user *optval, int __user *optlen)
2944 {
2945 	if (level != SOL_TCP)
2946 		return inet_csk_compat_getsockopt(sk, level, optname,
2947 						  optval, optlen);
2948 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2949 }
2950 EXPORT_SYMBOL(compat_tcp_getsockopt);
2951 #endif
2952 
2953 #ifdef CONFIG_TCP_MD5SIG
2954 static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
2955 static DEFINE_MUTEX(tcp_md5sig_mutex);
2956 
2957 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
2958 {
2959 	int cpu;
2960 
2961 	for_each_possible_cpu(cpu) {
2962 		struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
2963 
2964 		if (p->md5_desc.tfm)
2965 			crypto_free_hash(p->md5_desc.tfm);
2966 	}
2967 	free_percpu(pool);
2968 }
2969 
2970 static void __tcp_alloc_md5sig_pool(void)
2971 {
2972 	int cpu;
2973 	struct tcp_md5sig_pool __percpu *pool;
2974 
2975 	pool = alloc_percpu(struct tcp_md5sig_pool);
2976 	if (!pool)
2977 		return;
2978 
2979 	for_each_possible_cpu(cpu) {
2980 		struct crypto_hash *hash;
2981 
2982 		hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2983 		if (IS_ERR_OR_NULL(hash))
2984 			goto out_free;
2985 
2986 		per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
2987 	}
2988 	/* before setting tcp_md5sig_pool, we must commit all writes
2989 	 * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
2990 	 */
2991 	smp_wmb();
2992 	tcp_md5sig_pool = pool;
2993 	return;
2994 out_free:
2995 	__tcp_free_md5sig_pool(pool);
2996 }
2997 
2998 bool tcp_alloc_md5sig_pool(void)
2999 {
3000 	if (unlikely(!tcp_md5sig_pool)) {
3001 		mutex_lock(&tcp_md5sig_mutex);
3002 
3003 		if (!tcp_md5sig_pool)
3004 			__tcp_alloc_md5sig_pool();
3005 
3006 		mutex_unlock(&tcp_md5sig_mutex);
3007 	}
3008 	return tcp_md5sig_pool != NULL;
3009 }
3010 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
3011 
3012 
3013 /**
3014  *	tcp_get_md5sig_pool - get md5sig_pool for this user
3015  *
3016  *	We use percpu structure, so if we succeed, we exit with preemption
3017  *	and BH disabled, to make sure another thread or softirq handling
3018  *	wont try to get same context.
3019  */
3020 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
3021 {
3022 	struct tcp_md5sig_pool __percpu *p;
3023 
3024 	local_bh_disable();
3025 	p = ACCESS_ONCE(tcp_md5sig_pool);
3026 	if (p)
3027 		return __this_cpu_ptr(p);
3028 
3029 	local_bh_enable();
3030 	return NULL;
3031 }
3032 EXPORT_SYMBOL(tcp_get_md5sig_pool);
3033 
3034 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3035 			const struct tcphdr *th)
3036 {
3037 	struct scatterlist sg;
3038 	struct tcphdr hdr;
3039 	int err;
3040 
3041 	/* We are not allowed to change tcphdr, make a local copy */
3042 	memcpy(&hdr, th, sizeof(hdr));
3043 	hdr.check = 0;
3044 
3045 	/* options aren't included in the hash */
3046 	sg_init_one(&sg, &hdr, sizeof(hdr));
3047 	err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
3048 	return err;
3049 }
3050 EXPORT_SYMBOL(tcp_md5_hash_header);
3051 
3052 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3053 			  const struct sk_buff *skb, unsigned int header_len)
3054 {
3055 	struct scatterlist sg;
3056 	const struct tcphdr *tp = tcp_hdr(skb);
3057 	struct hash_desc *desc = &hp->md5_desc;
3058 	unsigned int i;
3059 	const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3060 					   skb_headlen(skb) - header_len : 0;
3061 	const struct skb_shared_info *shi = skb_shinfo(skb);
3062 	struct sk_buff *frag_iter;
3063 
3064 	sg_init_table(&sg, 1);
3065 
3066 	sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3067 	if (crypto_hash_update(desc, &sg, head_data_len))
3068 		return 1;
3069 
3070 	for (i = 0; i < shi->nr_frags; ++i) {
3071 		const struct skb_frag_struct *f = &shi->frags[i];
3072 		unsigned int offset = f->page_offset;
3073 		struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3074 
3075 		sg_set_page(&sg, page, skb_frag_size(f),
3076 			    offset_in_page(offset));
3077 		if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
3078 			return 1;
3079 	}
3080 
3081 	skb_walk_frags(skb, frag_iter)
3082 		if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3083 			return 1;
3084 
3085 	return 0;
3086 }
3087 EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3088 
3089 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3090 {
3091 	struct scatterlist sg;
3092 
3093 	sg_init_one(&sg, key->key, key->keylen);
3094 	return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
3095 }
3096 EXPORT_SYMBOL(tcp_md5_hash_key);
3097 
3098 #endif
3099 
3100 void tcp_done(struct sock *sk)
3101 {
3102 	struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3103 
3104 	if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3105 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3106 
3107 	tcp_set_state(sk, TCP_CLOSE);
3108 	tcp_clear_xmit_timers(sk);
3109 	if (req != NULL)
3110 		reqsk_fastopen_remove(sk, req, false);
3111 
3112 	sk->sk_shutdown = SHUTDOWN_MASK;
3113 
3114 	if (!sock_flag(sk, SOCK_DEAD))
3115 		sk->sk_state_change(sk);
3116 	else
3117 		inet_csk_destroy_sock(sk);
3118 }
3119 EXPORT_SYMBOL_GPL(tcp_done);
3120 
3121 extern struct tcp_congestion_ops tcp_reno;
3122 
3123 static __initdata unsigned long thash_entries;
3124 static int __init set_thash_entries(char *str)
3125 {
3126 	ssize_t ret;
3127 
3128 	if (!str)
3129 		return 0;
3130 
3131 	ret = kstrtoul(str, 0, &thash_entries);
3132 	if (ret)
3133 		return 0;
3134 
3135 	return 1;
3136 }
3137 __setup("thash_entries=", set_thash_entries);
3138 
3139 static void tcp_init_mem(void)
3140 {
3141 	unsigned long limit = nr_free_buffer_pages() / 8;
3142 	limit = max(limit, 128UL);
3143 	sysctl_tcp_mem[0] = limit / 4 * 3;
3144 	sysctl_tcp_mem[1] = limit;
3145 	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
3146 }
3147 
3148 void __init tcp_init(void)
3149 {
3150 	struct sk_buff *skb = NULL;
3151 	unsigned long limit;
3152 	int max_rshare, max_wshare, cnt;
3153 	unsigned int i;
3154 
3155 	BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
3156 
3157 	percpu_counter_init(&tcp_sockets_allocated, 0);
3158 	percpu_counter_init(&tcp_orphan_count, 0);
3159 	tcp_hashinfo.bind_bucket_cachep =
3160 		kmem_cache_create("tcp_bind_bucket",
3161 				  sizeof(struct inet_bind_bucket), 0,
3162 				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3163 
3164 	/* Size and allocate the main established and bind bucket
3165 	 * hash tables.
3166 	 *
3167 	 * The methodology is similar to that of the buffer cache.
3168 	 */
3169 	tcp_hashinfo.ehash =
3170 		alloc_large_system_hash("TCP established",
3171 					sizeof(struct inet_ehash_bucket),
3172 					thash_entries,
3173 					17, /* one slot per 128 KB of memory */
3174 					0,
3175 					NULL,
3176 					&tcp_hashinfo.ehash_mask,
3177 					0,
3178 					thash_entries ? 0 : 512 * 1024);
3179 	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
3180 		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3181 
3182 	if (inet_ehash_locks_alloc(&tcp_hashinfo))
3183 		panic("TCP: failed to alloc ehash_locks");
3184 	tcp_hashinfo.bhash =
3185 		alloc_large_system_hash("TCP bind",
3186 					sizeof(struct inet_bind_hashbucket),
3187 					tcp_hashinfo.ehash_mask + 1,
3188 					17, /* one slot per 128 KB of memory */
3189 					0,
3190 					&tcp_hashinfo.bhash_size,
3191 					NULL,
3192 					0,
3193 					64 * 1024);
3194 	tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3195 	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3196 		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3197 		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3198 	}
3199 
3200 
3201 	cnt = tcp_hashinfo.ehash_mask + 1;
3202 
3203 	tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3204 	sysctl_tcp_max_orphans = cnt / 2;
3205 	sysctl_max_syn_backlog = max(128, cnt / 256);
3206 
3207 	tcp_init_mem();
3208 	/* Set per-socket limits to no more than 1/128 the pressure threshold */
3209 	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3210 	max_wshare = min(4UL*1024*1024, limit);
3211 	max_rshare = min(6UL*1024*1024, limit);
3212 
3213 	sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3214 	sysctl_tcp_wmem[1] = 16*1024;
3215 	sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3216 
3217 	sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3218 	sysctl_tcp_rmem[1] = 87380;
3219 	sysctl_tcp_rmem[2] = max(87380, max_rshare);
3220 
3221 	pr_info("Hash tables configured (established %u bind %u)\n",
3222 		tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3223 
3224 	tcp_metrics_init();
3225 
3226 	tcp_register_congestion_control(&tcp_reno);
3227 
3228 	tcp_tasklet_init();
3229 }
3230