xref: /openbmc/linux/net/ipv4/tcp_ipv4.c (revision d31346494bd2b1185949dc64ab6467186b80fb05)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78 
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87 
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96 
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99 
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 					  ip_hdr(skb)->saddr,
104 					  tcp_hdr(skb)->dest,
105 					  tcp_hdr(skb)->source);
106 }
107 
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 	struct tcp_sock *tp = tcp_sk(sk);
112 
113 	/* With PAWS, it is safe from the viewpoint
114 	   of data integrity. Even without PAWS it is safe provided sequence
115 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116 
117 	   Actually, the idea is close to VJ's one, only timestamp cache is
118 	   held not per host, but per port pair and TW bucket is used as state
119 	   holder.
120 
121 	   If TW bucket has been already destroyed we fall back to VJ's scheme
122 	   and use initial timestamp retrieved from peer table.
123 	 */
124 	if (tcptw->tw_ts_recent_stamp &&
125 	    (twp == NULL || (sysctl_tcp_tw_reuse &&
126 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 		if (tp->write_seq == 0)
129 			tp->write_seq = 1;
130 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
131 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 		sock_hold(sktw);
133 		return 1;
134 	}
135 
136 	return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139 
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 	struct inet_sock *inet = inet_sk(sk);
145 	struct tcp_sock *tp = tcp_sk(sk);
146 	__be16 orig_sport, orig_dport;
147 	__be32 daddr, nexthop;
148 	struct flowi4 *fl4;
149 	struct rtable *rt;
150 	int err;
151 	struct ip_options_rcu *inet_opt;
152 
153 	if (addr_len < sizeof(struct sockaddr_in))
154 		return -EINVAL;
155 
156 	if (usin->sin_family != AF_INET)
157 		return -EAFNOSUPPORT;
158 
159 	nexthop = daddr = usin->sin_addr.s_addr;
160 	inet_opt = rcu_dereference_protected(inet->inet_opt,
161 					     sock_owned_by_user(sk));
162 	if (inet_opt && inet_opt->opt.srr) {
163 		if (!daddr)
164 			return -EINVAL;
165 		nexthop = inet_opt->opt.faddr;
166 	}
167 
168 	orig_sport = inet->inet_sport;
169 	orig_dport = usin->sin_port;
170 	fl4 = &inet->cork.fl.u.ip4;
171 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 			      IPPROTO_TCP,
174 			      orig_sport, orig_dport, sk);
175 	if (IS_ERR(rt)) {
176 		err = PTR_ERR(rt);
177 		if (err == -ENETUNREACH)
178 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 		return err;
180 	}
181 
182 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 		ip_rt_put(rt);
184 		return -ENETUNREACH;
185 	}
186 
187 	if (!inet_opt || !inet_opt->opt.srr)
188 		daddr = fl4->daddr;
189 
190 	if (!inet->inet_saddr)
191 		inet->inet_saddr = fl4->saddr;
192 	sk_rcv_saddr_set(sk, inet->inet_saddr);
193 
194 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 		/* Reset inherited state */
196 		tp->rx_opt.ts_recent	   = 0;
197 		tp->rx_opt.ts_recent_stamp = 0;
198 		if (likely(!tp->repair))
199 			tp->write_seq	   = 0;
200 	}
201 
202 	if (tcp_death_row.sysctl_tw_recycle &&
203 	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 		tcp_fetch_timewait_stamp(sk, &rt->dst);
205 
206 	inet->inet_dport = usin->sin_port;
207 	sk_daddr_set(sk, daddr);
208 
209 	inet_csk(sk)->icsk_ext_hdr_len = 0;
210 	if (inet_opt)
211 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212 
213 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214 
215 	/* Socket identity is still unknown (sport may be zero).
216 	 * However we set state to SYN-SENT and not releasing socket
217 	 * lock select source port, enter ourselves into the hash tables and
218 	 * complete initialization after this.
219 	 */
220 	tcp_set_state(sk, TCP_SYN_SENT);
221 	err = inet_hash_connect(&tcp_death_row, sk);
222 	if (err)
223 		goto failure;
224 
225 	inet_set_txhash(sk);
226 
227 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 			       inet->inet_sport, inet->inet_dport, sk);
229 	if (IS_ERR(rt)) {
230 		err = PTR_ERR(rt);
231 		rt = NULL;
232 		goto failure;
233 	}
234 	/* OK, now commit destination to socket.  */
235 	sk->sk_gso_type = SKB_GSO_TCPV4;
236 	sk_setup_caps(sk, &rt->dst);
237 
238 	if (!tp->write_seq && likely(!tp->repair))
239 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 							   inet->inet_daddr,
241 							   inet->inet_sport,
242 							   usin->sin_port);
243 
244 	inet->inet_id = tp->write_seq ^ jiffies;
245 
246 	err = tcp_connect(sk);
247 
248 	rt = NULL;
249 	if (err)
250 		goto failure;
251 
252 	return 0;
253 
254 failure:
255 	/*
256 	 * This unhashes the socket and releases the local port,
257 	 * if necessary.
258 	 */
259 	tcp_set_state(sk, TCP_CLOSE);
260 	ip_rt_put(rt);
261 	sk->sk_route_caps = 0;
262 	inet->inet_dport = 0;
263 	return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266 
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274 	struct dst_entry *dst;
275 	struct inet_sock *inet = inet_sk(sk);
276 	u32 mtu = tcp_sk(sk)->mtu_info;
277 
278 	dst = inet_csk_update_pmtu(sk, mtu);
279 	if (!dst)
280 		return;
281 
282 	/* Something is about to be wrong... Remember soft error
283 	 * for the case, if this connection will not able to recover.
284 	 */
285 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 		sk->sk_err_soft = EMSGSIZE;
287 
288 	mtu = dst_mtu(dst);
289 
290 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 	    ip_sk_accept_pmtu(sk) &&
292 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 		tcp_sync_mss(sk, mtu);
294 
295 		/* Resend the TCP packet because it's
296 		 * clear that the old packet has been
297 		 * dropped. This is the new "fast" path mtu
298 		 * discovery.
299 		 */
300 		tcp_simple_retransmit(sk);
301 	} /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304 
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307 	struct dst_entry *dst = __sk_dst_check(sk, 0);
308 
309 	if (dst)
310 		dst->ops->redirect(dst, sk, skb);
311 }
312 
313 
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317 	struct request_sock *req = inet_reqsk(sk);
318 	struct net *net = sock_net(sk);
319 
320 	/* ICMPs are not backlogged, hence we cannot get
321 	 * an established socket here.
322 	 */
323 	WARN_ON(req->sk);
324 
325 	if (seq != tcp_rsk(req)->snt_isn) {
326 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327 		reqsk_put(req);
328 	} else {
329 		/*
330 		 * Still in SYN_RECV, just remove it silently.
331 		 * There is no good way to pass the error to the newly
332 		 * created socket, and POSIX does not want network
333 		 * errors returned from accept().
334 		 */
335 		NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
337 	}
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340 
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356 
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 	struct inet_connection_sock *icsk;
362 	struct tcp_sock *tp;
363 	struct inet_sock *inet;
364 	const int type = icmp_hdr(icmp_skb)->type;
365 	const int code = icmp_hdr(icmp_skb)->code;
366 	struct sock *sk;
367 	struct sk_buff *skb;
368 	struct request_sock *fastopen;
369 	__u32 seq, snd_una;
370 	__u32 remaining;
371 	int err;
372 	struct net *net = dev_net(icmp_skb->dev);
373 
374 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 				       th->dest, iph->saddr, ntohs(th->source),
376 				       inet_iif(icmp_skb));
377 	if (!sk) {
378 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379 		return;
380 	}
381 	if (sk->sk_state == TCP_TIME_WAIT) {
382 		inet_twsk_put(inet_twsk(sk));
383 		return;
384 	}
385 	seq = ntohl(th->seq);
386 	if (sk->sk_state == TCP_NEW_SYN_RECV)
387 		return tcp_req_err(sk, seq);
388 
389 	bh_lock_sock(sk);
390 	/* If too many ICMPs get dropped on busy
391 	 * servers this needs to be solved differently.
392 	 * We do take care of PMTU discovery (RFC1191) special case :
393 	 * we can receive locally generated ICMP messages while socket is held.
394 	 */
395 	if (sock_owned_by_user(sk)) {
396 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 	}
399 	if (sk->sk_state == TCP_CLOSE)
400 		goto out;
401 
402 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404 		goto out;
405 	}
406 
407 	icsk = inet_csk(sk);
408 	tp = tcp_sk(sk);
409 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 	fastopen = tp->fastopen_rsk;
411 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 	if (sk->sk_state != TCP_LISTEN &&
413 	    !between(seq, snd_una, tp->snd_nxt)) {
414 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415 		goto out;
416 	}
417 
418 	switch (type) {
419 	case ICMP_REDIRECT:
420 		do_redirect(icmp_skb, sk);
421 		goto out;
422 	case ICMP_SOURCE_QUENCH:
423 		/* Just silently ignore these. */
424 		goto out;
425 	case ICMP_PARAMETERPROB:
426 		err = EPROTO;
427 		break;
428 	case ICMP_DEST_UNREACH:
429 		if (code > NR_ICMP_UNREACH)
430 			goto out;
431 
432 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 			/* We are not interested in TCP_LISTEN and open_requests
434 			 * (SYN-ACKs send out by Linux are always <576bytes so
435 			 * they should go through unfragmented).
436 			 */
437 			if (sk->sk_state == TCP_LISTEN)
438 				goto out;
439 
440 			tp->mtu_info = info;
441 			if (!sock_owned_by_user(sk)) {
442 				tcp_v4_mtu_reduced(sk);
443 			} else {
444 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445 					sock_hold(sk);
446 			}
447 			goto out;
448 		}
449 
450 		err = icmp_err_convert[code].errno;
451 		/* check if icmp_skb allows revert of backoff
452 		 * (see draft-zimmermann-tcp-lcd) */
453 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 			break;
455 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456 		    !icsk->icsk_backoff || fastopen)
457 			break;
458 
459 		if (sock_owned_by_user(sk))
460 			break;
461 
462 		icsk->icsk_backoff--;
463 		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 					       TCP_TIMEOUT_INIT;
465 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466 
467 		skb = tcp_write_queue_head(sk);
468 		BUG_ON(!skb);
469 
470 		remaining = icsk->icsk_rto -
471 			    min(icsk->icsk_rto,
472 				tcp_time_stamp - tcp_skb_timestamp(skb));
473 
474 		if (remaining) {
475 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 						  remaining, TCP_RTO_MAX);
477 		} else {
478 			/* RTO revert clocked out retransmission.
479 			 * Will retransmit now */
480 			tcp_retransmit_timer(sk);
481 		}
482 
483 		break;
484 	case ICMP_TIME_EXCEEDED:
485 		err = EHOSTUNREACH;
486 		break;
487 	default:
488 		goto out;
489 	}
490 
491 	switch (sk->sk_state) {
492 	case TCP_SYN_SENT:
493 	case TCP_SYN_RECV:
494 		/* Only in fast or simultaneous open. If a fast open socket is
495 		 * is already accepted it is treated as a connected one below.
496 		 */
497 		if (fastopen && fastopen->sk == NULL)
498 			break;
499 
500 		if (!sock_owned_by_user(sk)) {
501 			sk->sk_err = err;
502 
503 			sk->sk_error_report(sk);
504 
505 			tcp_done(sk);
506 		} else {
507 			sk->sk_err_soft = err;
508 		}
509 		goto out;
510 	}
511 
512 	/* If we've already connected we will keep trying
513 	 * until we time out, or the user gives up.
514 	 *
515 	 * rfc1122 4.2.3.9 allows to consider as hard errors
516 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 	 * but it is obsoleted by pmtu discovery).
518 	 *
519 	 * Note, that in modern internet, where routing is unreliable
520 	 * and in each dark corner broken firewalls sit, sending random
521 	 * errors ordered by their masters even this two messages finally lose
522 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 	 *
524 	 * Now we are in compliance with RFCs.
525 	 *							--ANK (980905)
526 	 */
527 
528 	inet = inet_sk(sk);
529 	if (!sock_owned_by_user(sk) && inet->recverr) {
530 		sk->sk_err = err;
531 		sk->sk_error_report(sk);
532 	} else	{ /* Only an error on timeout */
533 		sk->sk_err_soft = err;
534 	}
535 
536 out:
537 	bh_unlock_sock(sk);
538 	sock_put(sk);
539 }
540 
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543 	struct tcphdr *th = tcp_hdr(skb);
544 
545 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 		skb->csum_start = skb_transport_header(skb) - skb->head;
548 		skb->csum_offset = offsetof(struct tcphdr, check);
549 	} else {
550 		th->check = tcp_v4_check(skb->len, saddr, daddr,
551 					 csum_partial(th,
552 						      th->doff << 2,
553 						      skb->csum));
554 	}
555 }
556 
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560 	const struct inet_sock *inet = inet_sk(sk);
561 
562 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565 
566 /*
567  *	This routine will send an RST to the other tcp.
568  *
569  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *		      for reset.
571  *	Answer: if a packet caused RST, it is not for a socket
572  *		existing in our system, if it is matched to a socket,
573  *		it is just duplicate segment or bug in other side's TCP.
574  *		So that we build reply only basing on parameters
575  *		arrived with segment.
576  *	Exception: precedence violation. We do not implement it in any case.
577  */
578 
579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 {
581 	const struct tcphdr *th = tcp_hdr(skb);
582 	struct {
583 		struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587 	} rep;
588 	struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 	struct tcp_md5sig_key *key;
591 	const __u8 *hash_location = NULL;
592 	unsigned char newhash[16];
593 	int genhash;
594 	struct sock *sk1 = NULL;
595 #endif
596 	struct net *net;
597 
598 	/* Never send a reset in response to a reset. */
599 	if (th->rst)
600 		return;
601 
602 	/* If sk not NULL, it means we did a successful lookup and incoming
603 	 * route had to be correct. prequeue might have dropped our dst.
604 	 */
605 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606 		return;
607 
608 	/* Swap the send and the receive. */
609 	memset(&rep, 0, sizeof(rep));
610 	rep.th.dest   = th->source;
611 	rep.th.source = th->dest;
612 	rep.th.doff   = sizeof(struct tcphdr) / 4;
613 	rep.th.rst    = 1;
614 
615 	if (th->ack) {
616 		rep.th.seq = th->ack_seq;
617 	} else {
618 		rep.th.ack = 1;
619 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 				       skb->len - (th->doff << 2));
621 	}
622 
623 	memset(&arg, 0, sizeof(arg));
624 	arg.iov[0].iov_base = (unsigned char *)&rep;
625 	arg.iov[0].iov_len  = sizeof(rep.th);
626 
627 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 	hash_location = tcp_parse_md5sig_option(th);
630 	if (!sk && hash_location) {
631 		/*
632 		 * active side is lost. Try to find listening socket through
633 		 * source port, and then find md5 key through listening socket.
634 		 * we are not loose security here:
635 		 * Incoming packet is checked with md5 hash with finding key,
636 		 * no RST generated if md5 hash doesn't match.
637 		 */
638 		sk1 = __inet_lookup_listener(net,
639 					     &tcp_hashinfo, ip_hdr(skb)->saddr,
640 					     th->source, ip_hdr(skb)->daddr,
641 					     ntohs(th->source), inet_iif(skb));
642 		/* don't send rst if it can't find key */
643 		if (!sk1)
644 			return;
645 		rcu_read_lock();
646 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 					&ip_hdr(skb)->saddr, AF_INET);
648 		if (!key)
649 			goto release_sk1;
650 
651 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 			goto release_sk1;
654 	} else {
655 		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 					     &ip_hdr(skb)->saddr,
657 					     AF_INET) : NULL;
658 	}
659 
660 	if (key) {
661 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 				   (TCPOPT_NOP << 16) |
663 				   (TCPOPT_MD5SIG << 8) |
664 				   TCPOLEN_MD5SIG);
665 		/* Update length and the length the header thinks exists */
666 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 		rep.th.doff = arg.iov[0].iov_len / 4;
668 
669 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 				     key, ip_hdr(skb)->saddr,
671 				     ip_hdr(skb)->daddr, &rep.th);
672 	}
673 #endif
674 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 				      ip_hdr(skb)->saddr, /* XXX */
676 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 	/* When socket is gone, all binding information is lost.
680 	 * routing might fail in this case. No choice here, if we choose to force
681 	 * input interface, we will misroute in case of asymmetric route.
682 	 */
683 	if (sk)
684 		arg.bound_dev_if = sk->sk_bound_dev_if;
685 
686 	arg.tos = ip_hdr(skb)->tos;
687 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
689 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 			      &arg, arg.iov[0].iov_len);
691 
692 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694 
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697 	if (sk1) {
698 		rcu_read_unlock();
699 		sock_put(sk1);
700 	}
701 #endif
702 }
703 
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707 
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709 			    u32 win, u32 tsval, u32 tsecr, int oif,
710 			    struct tcp_md5sig_key *key,
711 			    int reply_flags, u8 tos)
712 {
713 	const struct tcphdr *th = tcp_hdr(skb);
714 	struct {
715 		struct tcphdr th;
716 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720 			];
721 	} rep;
722 	struct ip_reply_arg arg;
723 	struct net *net = dev_net(skb_dst(skb)->dev);
724 
725 	memset(&rep.th, 0, sizeof(struct tcphdr));
726 	memset(&arg, 0, sizeof(arg));
727 
728 	arg.iov[0].iov_base = (unsigned char *)&rep;
729 	arg.iov[0].iov_len  = sizeof(rep.th);
730 	if (tsecr) {
731 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 				   (TCPOPT_TIMESTAMP << 8) |
733 				   TCPOLEN_TIMESTAMP);
734 		rep.opt[1] = htonl(tsval);
735 		rep.opt[2] = htonl(tsecr);
736 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737 	}
738 
739 	/* Swap the send and the receive. */
740 	rep.th.dest    = th->source;
741 	rep.th.source  = th->dest;
742 	rep.th.doff    = arg.iov[0].iov_len / 4;
743 	rep.th.seq     = htonl(seq);
744 	rep.th.ack_seq = htonl(ack);
745 	rep.th.ack     = 1;
746 	rep.th.window  = htons(win);
747 
748 #ifdef CONFIG_TCP_MD5SIG
749 	if (key) {
750 		int offset = (tsecr) ? 3 : 0;
751 
752 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 					  (TCPOPT_NOP << 16) |
754 					  (TCPOPT_MD5SIG << 8) |
755 					  TCPOLEN_MD5SIG);
756 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 		rep.th.doff = arg.iov[0].iov_len/4;
758 
759 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760 				    key, ip_hdr(skb)->saddr,
761 				    ip_hdr(skb)->daddr, &rep.th);
762 	}
763 #endif
764 	arg.flags = reply_flags;
765 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 				      ip_hdr(skb)->saddr, /* XXX */
767 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769 	if (oif)
770 		arg.bound_dev_if = oif;
771 	arg.tos = tos;
772 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
774 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 			      &arg, arg.iov[0].iov_len);
776 
777 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779 
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782 	struct inet_timewait_sock *tw = inet_twsk(sk);
783 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784 
785 	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787 			tcp_time_stamp + tcptw->tw_ts_offset,
788 			tcptw->tw_ts_recent,
789 			tw->tw_bound_dev_if,
790 			tcp_twsk_md5_key(tcptw),
791 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792 			tw->tw_tos
793 			);
794 
795 	inet_twsk_put(tw);
796 }
797 
798 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
799 				  struct request_sock *req)
800 {
801 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803 	 */
804 	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
807 			tcp_time_stamp,
808 			req->ts_recent,
809 			0,
810 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811 					  AF_INET),
812 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 			ip_hdr(skb)->tos);
814 }
815 
816 /*
817  *	Send a SYN-ACK after having received a SYN.
818  *	This still operates on a request_sock only, not on a big
819  *	socket.
820  */
821 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
822 			      struct flowi *fl,
823 			      struct request_sock *req,
824 			      u16 queue_mapping,
825 			      struct tcp_fastopen_cookie *foc)
826 {
827 	const struct inet_request_sock *ireq = inet_rsk(req);
828 	struct flowi4 fl4;
829 	int err = -1;
830 	struct sk_buff *skb;
831 
832 	/* First, grab a route. */
833 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834 		return -1;
835 
836 	skb = tcp_make_synack(sk, dst, req, foc);
837 
838 	if (skb) {
839 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840 
841 		skb_set_queue_mapping(skb, queue_mapping);
842 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
843 					    ireq->ir_rmt_addr,
844 					    ireq->opt);
845 		err = net_xmit_eval(err);
846 	}
847 
848 	return err;
849 }
850 
851 /*
852  *	IPv4 request_sock destructor.
853  */
854 static void tcp_v4_reqsk_destructor(struct request_sock *req)
855 {
856 	kfree(inet_rsk(req)->opt);
857 }
858 
859 /*
860  * Return true if a syncookie should be sent
861  */
862 bool tcp_syn_flood_action(struct sock *sk,
863 			 const struct sk_buff *skb,
864 			 const char *proto)
865 {
866 	const char *msg = "Dropping request";
867 	bool want_cookie = false;
868 	struct listen_sock *lopt;
869 
870 #ifdef CONFIG_SYN_COOKIES
871 	if (sysctl_tcp_syncookies) {
872 		msg = "Sending cookies";
873 		want_cookie = true;
874 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
875 	} else
876 #endif
877 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
878 
879 	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
880 	if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
881 		lopt->synflood_warned = 1;
882 		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
883 			proto, ntohs(tcp_hdr(skb)->dest), msg);
884 	}
885 	return want_cookie;
886 }
887 EXPORT_SYMBOL(tcp_syn_flood_action);
888 
889 #ifdef CONFIG_TCP_MD5SIG
890 /*
891  * RFC2385 MD5 checksumming requires a mapping of
892  * IP address->MD5 Key.
893  * We need to maintain these in the sk structure.
894  */
895 
896 /* Find the Key structure for an address.  */
897 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
898 					 const union tcp_md5_addr *addr,
899 					 int family)
900 {
901 	const struct tcp_sock *tp = tcp_sk(sk);
902 	struct tcp_md5sig_key *key;
903 	unsigned int size = sizeof(struct in_addr);
904 	const struct tcp_md5sig_info *md5sig;
905 
906 	/* caller either holds rcu_read_lock() or socket lock */
907 	md5sig = rcu_dereference_check(tp->md5sig_info,
908 				       sock_owned_by_user(sk) ||
909 				       lockdep_is_held(&sk->sk_lock.slock));
910 	if (!md5sig)
911 		return NULL;
912 #if IS_ENABLED(CONFIG_IPV6)
913 	if (family == AF_INET6)
914 		size = sizeof(struct in6_addr);
915 #endif
916 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
917 		if (key->family != family)
918 			continue;
919 		if (!memcmp(&key->addr, addr, size))
920 			return key;
921 	}
922 	return NULL;
923 }
924 EXPORT_SYMBOL(tcp_md5_do_lookup);
925 
926 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
927 					 const struct sock *addr_sk)
928 {
929 	union tcp_md5_addr *addr;
930 
931 	addr = (union tcp_md5_addr *)&sk->sk_daddr;
932 	return tcp_md5_do_lookup(sk, addr, AF_INET);
933 }
934 EXPORT_SYMBOL(tcp_v4_md5_lookup);
935 
936 /* This can be called on a newly created socket, from other files */
937 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
938 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
939 {
940 	/* Add Key to the list */
941 	struct tcp_md5sig_key *key;
942 	struct tcp_sock *tp = tcp_sk(sk);
943 	struct tcp_md5sig_info *md5sig;
944 
945 	key = tcp_md5_do_lookup(sk, addr, family);
946 	if (key) {
947 		/* Pre-existing entry - just update that one. */
948 		memcpy(key->key, newkey, newkeylen);
949 		key->keylen = newkeylen;
950 		return 0;
951 	}
952 
953 	md5sig = rcu_dereference_protected(tp->md5sig_info,
954 					   sock_owned_by_user(sk));
955 	if (!md5sig) {
956 		md5sig = kmalloc(sizeof(*md5sig), gfp);
957 		if (!md5sig)
958 			return -ENOMEM;
959 
960 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
961 		INIT_HLIST_HEAD(&md5sig->head);
962 		rcu_assign_pointer(tp->md5sig_info, md5sig);
963 	}
964 
965 	key = sock_kmalloc(sk, sizeof(*key), gfp);
966 	if (!key)
967 		return -ENOMEM;
968 	if (!tcp_alloc_md5sig_pool()) {
969 		sock_kfree_s(sk, key, sizeof(*key));
970 		return -ENOMEM;
971 	}
972 
973 	memcpy(key->key, newkey, newkeylen);
974 	key->keylen = newkeylen;
975 	key->family = family;
976 	memcpy(&key->addr, addr,
977 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
978 				      sizeof(struct in_addr));
979 	hlist_add_head_rcu(&key->node, &md5sig->head);
980 	return 0;
981 }
982 EXPORT_SYMBOL(tcp_md5_do_add);
983 
984 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
985 {
986 	struct tcp_md5sig_key *key;
987 
988 	key = tcp_md5_do_lookup(sk, addr, family);
989 	if (!key)
990 		return -ENOENT;
991 	hlist_del_rcu(&key->node);
992 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
993 	kfree_rcu(key, rcu);
994 	return 0;
995 }
996 EXPORT_SYMBOL(tcp_md5_do_del);
997 
998 static void tcp_clear_md5_list(struct sock *sk)
999 {
1000 	struct tcp_sock *tp = tcp_sk(sk);
1001 	struct tcp_md5sig_key *key;
1002 	struct hlist_node *n;
1003 	struct tcp_md5sig_info *md5sig;
1004 
1005 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1006 
1007 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1008 		hlist_del_rcu(&key->node);
1009 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1010 		kfree_rcu(key, rcu);
1011 	}
1012 }
1013 
1014 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1015 				 int optlen)
1016 {
1017 	struct tcp_md5sig cmd;
1018 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1019 
1020 	if (optlen < sizeof(cmd))
1021 		return -EINVAL;
1022 
1023 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1024 		return -EFAULT;
1025 
1026 	if (sin->sin_family != AF_INET)
1027 		return -EINVAL;
1028 
1029 	if (!cmd.tcpm_keylen)
1030 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1031 				      AF_INET);
1032 
1033 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1034 		return -EINVAL;
1035 
1036 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1037 			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1038 			      GFP_KERNEL);
1039 }
1040 
1041 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1042 					__be32 daddr, __be32 saddr, int nbytes)
1043 {
1044 	struct tcp4_pseudohdr *bp;
1045 	struct scatterlist sg;
1046 
1047 	bp = &hp->md5_blk.ip4;
1048 
1049 	/*
1050 	 * 1. the TCP pseudo-header (in the order: source IP address,
1051 	 * destination IP address, zero-padded protocol number, and
1052 	 * segment length)
1053 	 */
1054 	bp->saddr = saddr;
1055 	bp->daddr = daddr;
1056 	bp->pad = 0;
1057 	bp->protocol = IPPROTO_TCP;
1058 	bp->len = cpu_to_be16(nbytes);
1059 
1060 	sg_init_one(&sg, bp, sizeof(*bp));
1061 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1062 }
1063 
1064 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1065 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1066 {
1067 	struct tcp_md5sig_pool *hp;
1068 	struct hash_desc *desc;
1069 
1070 	hp = tcp_get_md5sig_pool();
1071 	if (!hp)
1072 		goto clear_hash_noput;
1073 	desc = &hp->md5_desc;
1074 
1075 	if (crypto_hash_init(desc))
1076 		goto clear_hash;
1077 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1078 		goto clear_hash;
1079 	if (tcp_md5_hash_header(hp, th))
1080 		goto clear_hash;
1081 	if (tcp_md5_hash_key(hp, key))
1082 		goto clear_hash;
1083 	if (crypto_hash_final(desc, md5_hash))
1084 		goto clear_hash;
1085 
1086 	tcp_put_md5sig_pool();
1087 	return 0;
1088 
1089 clear_hash:
1090 	tcp_put_md5sig_pool();
1091 clear_hash_noput:
1092 	memset(md5_hash, 0, 16);
1093 	return 1;
1094 }
1095 
1096 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1097 			const struct sock *sk,
1098 			const struct sk_buff *skb)
1099 {
1100 	struct tcp_md5sig_pool *hp;
1101 	struct hash_desc *desc;
1102 	const struct tcphdr *th = tcp_hdr(skb);
1103 	__be32 saddr, daddr;
1104 
1105 	if (sk) { /* valid for establish/request sockets */
1106 		saddr = sk->sk_rcv_saddr;
1107 		daddr = sk->sk_daddr;
1108 	} else {
1109 		const struct iphdr *iph = ip_hdr(skb);
1110 		saddr = iph->saddr;
1111 		daddr = iph->daddr;
1112 	}
1113 
1114 	hp = tcp_get_md5sig_pool();
1115 	if (!hp)
1116 		goto clear_hash_noput;
1117 	desc = &hp->md5_desc;
1118 
1119 	if (crypto_hash_init(desc))
1120 		goto clear_hash;
1121 
1122 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1123 		goto clear_hash;
1124 	if (tcp_md5_hash_header(hp, th))
1125 		goto clear_hash;
1126 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1127 		goto clear_hash;
1128 	if (tcp_md5_hash_key(hp, key))
1129 		goto clear_hash;
1130 	if (crypto_hash_final(desc, md5_hash))
1131 		goto clear_hash;
1132 
1133 	tcp_put_md5sig_pool();
1134 	return 0;
1135 
1136 clear_hash:
1137 	tcp_put_md5sig_pool();
1138 clear_hash_noput:
1139 	memset(md5_hash, 0, 16);
1140 	return 1;
1141 }
1142 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1143 
1144 /* Called with rcu_read_lock() */
1145 static bool tcp_v4_inbound_md5_hash(struct sock *sk,
1146 				    const struct sk_buff *skb)
1147 {
1148 	/*
1149 	 * This gets called for each TCP segment that arrives
1150 	 * so we want to be efficient.
1151 	 * We have 3 drop cases:
1152 	 * o No MD5 hash and one expected.
1153 	 * o MD5 hash and we're not expecting one.
1154 	 * o MD5 hash and its wrong.
1155 	 */
1156 	const __u8 *hash_location = NULL;
1157 	struct tcp_md5sig_key *hash_expected;
1158 	const struct iphdr *iph = ip_hdr(skb);
1159 	const struct tcphdr *th = tcp_hdr(skb);
1160 	int genhash;
1161 	unsigned char newhash[16];
1162 
1163 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1164 					  AF_INET);
1165 	hash_location = tcp_parse_md5sig_option(th);
1166 
1167 	/* We've parsed the options - do we have a hash? */
1168 	if (!hash_expected && !hash_location)
1169 		return false;
1170 
1171 	if (hash_expected && !hash_location) {
1172 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1173 		return true;
1174 	}
1175 
1176 	if (!hash_expected && hash_location) {
1177 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1178 		return true;
1179 	}
1180 
1181 	/* Okay, so this is hash_expected and hash_location -
1182 	 * so we need to calculate the checksum.
1183 	 */
1184 	genhash = tcp_v4_md5_hash_skb(newhash,
1185 				      hash_expected,
1186 				      NULL, skb);
1187 
1188 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1189 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1190 				     &iph->saddr, ntohs(th->source),
1191 				     &iph->daddr, ntohs(th->dest),
1192 				     genhash ? " tcp_v4_calc_md5_hash failed"
1193 				     : "");
1194 		return true;
1195 	}
1196 	return false;
1197 }
1198 #endif
1199 
1200 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
1201 			    struct sk_buff *skb)
1202 {
1203 	struct inet_request_sock *ireq = inet_rsk(req);
1204 
1205 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1206 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1207 	ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1208 	ireq->opt = tcp_v4_save_options(skb);
1209 }
1210 
1211 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1212 					  const struct request_sock *req,
1213 					  bool *strict)
1214 {
1215 	struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1216 
1217 	if (strict) {
1218 		if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1219 			*strict = true;
1220 		else
1221 			*strict = false;
1222 	}
1223 
1224 	return dst;
1225 }
1226 
1227 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1228 	.family		=	PF_INET,
1229 	.obj_size	=	sizeof(struct tcp_request_sock),
1230 	.rtx_syn_ack	=	tcp_rtx_synack,
1231 	.send_ack	=	tcp_v4_reqsk_send_ack,
1232 	.destructor	=	tcp_v4_reqsk_destructor,
1233 	.send_reset	=	tcp_v4_send_reset,
1234 	.syn_ack_timeout =	tcp_syn_ack_timeout,
1235 };
1236 
1237 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1238 	.mss_clamp	=	TCP_MSS_DEFAULT,
1239 #ifdef CONFIG_TCP_MD5SIG
1240 	.req_md5_lookup	=	tcp_v4_md5_lookup,
1241 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1242 #endif
1243 	.init_req	=	tcp_v4_init_req,
1244 #ifdef CONFIG_SYN_COOKIES
1245 	.cookie_init_seq =	cookie_v4_init_sequence,
1246 #endif
1247 	.route_req	=	tcp_v4_route_req,
1248 	.init_seq	=	tcp_v4_init_sequence,
1249 	.send_synack	=	tcp_v4_send_synack,
1250 	.queue_hash_add =	inet_csk_reqsk_queue_hash_add,
1251 };
1252 
1253 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1254 {
1255 	/* Never answer to SYNs send to broadcast or multicast */
1256 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1257 		goto drop;
1258 
1259 	return tcp_conn_request(&tcp_request_sock_ops,
1260 				&tcp_request_sock_ipv4_ops, sk, skb);
1261 
1262 drop:
1263 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1264 	return 0;
1265 }
1266 EXPORT_SYMBOL(tcp_v4_conn_request);
1267 
1268 
1269 /*
1270  * The three way handshake has completed - we got a valid synack -
1271  * now create the new socket.
1272  */
1273 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1274 				  struct request_sock *req,
1275 				  struct dst_entry *dst)
1276 {
1277 	struct inet_request_sock *ireq;
1278 	struct inet_sock *newinet;
1279 	struct tcp_sock *newtp;
1280 	struct sock *newsk;
1281 #ifdef CONFIG_TCP_MD5SIG
1282 	struct tcp_md5sig_key *key;
1283 #endif
1284 	struct ip_options_rcu *inet_opt;
1285 
1286 	if (sk_acceptq_is_full(sk))
1287 		goto exit_overflow;
1288 
1289 	newsk = tcp_create_openreq_child(sk, req, skb);
1290 	if (!newsk)
1291 		goto exit_nonewsk;
1292 
1293 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1294 	inet_sk_rx_dst_set(newsk, skb);
1295 
1296 	newtp		      = tcp_sk(newsk);
1297 	newinet		      = inet_sk(newsk);
1298 	ireq		      = inet_rsk(req);
1299 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1300 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1301 	newinet->inet_saddr	      = ireq->ir_loc_addr;
1302 	inet_opt	      = ireq->opt;
1303 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1304 	ireq->opt	      = NULL;
1305 	newinet->mc_index     = inet_iif(skb);
1306 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1307 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1308 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1309 	inet_set_txhash(newsk);
1310 	if (inet_opt)
1311 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1312 	newinet->inet_id = newtp->write_seq ^ jiffies;
1313 
1314 	if (!dst) {
1315 		dst = inet_csk_route_child_sock(sk, newsk, req);
1316 		if (!dst)
1317 			goto put_and_exit;
1318 	} else {
1319 		/* syncookie case : see end of cookie_v4_check() */
1320 	}
1321 	sk_setup_caps(newsk, dst);
1322 
1323 	tcp_ca_openreq_child(newsk, dst);
1324 
1325 	tcp_sync_mss(newsk, dst_mtu(dst));
1326 	newtp->advmss = dst_metric_advmss(dst);
1327 	if (tcp_sk(sk)->rx_opt.user_mss &&
1328 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1329 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1330 
1331 	tcp_initialize_rcv_mss(newsk);
1332 
1333 #ifdef CONFIG_TCP_MD5SIG
1334 	/* Copy over the MD5 key from the original socket */
1335 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1336 				AF_INET);
1337 	if (key != NULL) {
1338 		/*
1339 		 * We're using one, so create a matching key
1340 		 * on the newsk structure. If we fail to get
1341 		 * memory, then we end up not copying the key
1342 		 * across. Shucks.
1343 		 */
1344 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1345 			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1346 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1347 	}
1348 #endif
1349 
1350 	if (__inet_inherit_port(sk, newsk) < 0)
1351 		goto put_and_exit;
1352 	__inet_hash_nolisten(newsk, NULL);
1353 
1354 	return newsk;
1355 
1356 exit_overflow:
1357 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1358 exit_nonewsk:
1359 	dst_release(dst);
1360 exit:
1361 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1362 	return NULL;
1363 put_and_exit:
1364 	inet_csk_prepare_forced_close(newsk);
1365 	tcp_done(newsk);
1366 	goto exit;
1367 }
1368 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1369 
1370 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1371 {
1372 	const struct tcphdr *th = tcp_hdr(skb);
1373 	const struct iphdr *iph = ip_hdr(skb);
1374 	struct request_sock *req;
1375 	struct sock *nsk;
1376 
1377 	req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1378 	if (req) {
1379 		nsk = tcp_check_req(sk, skb, req, false);
1380 		reqsk_put(req);
1381 		return nsk;
1382 	}
1383 
1384 	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1385 			th->source, iph->daddr, th->dest, inet_iif(skb));
1386 
1387 	if (nsk) {
1388 		if (nsk->sk_state != TCP_TIME_WAIT) {
1389 			bh_lock_sock(nsk);
1390 			return nsk;
1391 		}
1392 		inet_twsk_put(inet_twsk(nsk));
1393 		return NULL;
1394 	}
1395 
1396 #ifdef CONFIG_SYN_COOKIES
1397 	if (!th->syn)
1398 		sk = cookie_v4_check(sk, skb);
1399 #endif
1400 	return sk;
1401 }
1402 
1403 /* The socket must have it's spinlock held when we get
1404  * here.
1405  *
1406  * We have a potential double-lock case here, so even when
1407  * doing backlog processing we use the BH locking scheme.
1408  * This is because we cannot sleep with the original spinlock
1409  * held.
1410  */
1411 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1412 {
1413 	struct sock *rsk;
1414 
1415 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1416 		struct dst_entry *dst = sk->sk_rx_dst;
1417 
1418 		sock_rps_save_rxhash(sk, skb);
1419 		sk_mark_napi_id(sk, skb);
1420 		if (dst) {
1421 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1422 			    dst->ops->check(dst, 0) == NULL) {
1423 				dst_release(dst);
1424 				sk->sk_rx_dst = NULL;
1425 			}
1426 		}
1427 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1428 		return 0;
1429 	}
1430 
1431 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1432 		goto csum_err;
1433 
1434 	if (sk->sk_state == TCP_LISTEN) {
1435 		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1436 		if (!nsk)
1437 			goto discard;
1438 
1439 		if (nsk != sk) {
1440 			sock_rps_save_rxhash(nsk, skb);
1441 			sk_mark_napi_id(sk, skb);
1442 			if (tcp_child_process(sk, nsk, skb)) {
1443 				rsk = nsk;
1444 				goto reset;
1445 			}
1446 			return 0;
1447 		}
1448 	} else
1449 		sock_rps_save_rxhash(sk, skb);
1450 
1451 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1452 		rsk = sk;
1453 		goto reset;
1454 	}
1455 	return 0;
1456 
1457 reset:
1458 	tcp_v4_send_reset(rsk, skb);
1459 discard:
1460 	kfree_skb(skb);
1461 	/* Be careful here. If this function gets more complicated and
1462 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1463 	 * might be destroyed here. This current version compiles correctly,
1464 	 * but you have been warned.
1465 	 */
1466 	return 0;
1467 
1468 csum_err:
1469 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1470 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1471 	goto discard;
1472 }
1473 EXPORT_SYMBOL(tcp_v4_do_rcv);
1474 
1475 void tcp_v4_early_demux(struct sk_buff *skb)
1476 {
1477 	const struct iphdr *iph;
1478 	const struct tcphdr *th;
1479 	struct sock *sk;
1480 
1481 	if (skb->pkt_type != PACKET_HOST)
1482 		return;
1483 
1484 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1485 		return;
1486 
1487 	iph = ip_hdr(skb);
1488 	th = tcp_hdr(skb);
1489 
1490 	if (th->doff < sizeof(struct tcphdr) / 4)
1491 		return;
1492 
1493 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1494 				       iph->saddr, th->source,
1495 				       iph->daddr, ntohs(th->dest),
1496 				       skb->skb_iif);
1497 	if (sk) {
1498 		skb->sk = sk;
1499 		skb->destructor = sock_edemux;
1500 		if (sk_fullsock(sk)) {
1501 			struct dst_entry *dst = sk->sk_rx_dst;
1502 
1503 			if (dst)
1504 				dst = dst_check(dst, 0);
1505 			if (dst &&
1506 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1507 				skb_dst_set_noref(skb, dst);
1508 		}
1509 	}
1510 }
1511 
1512 /* Packet is added to VJ-style prequeue for processing in process
1513  * context, if a reader task is waiting. Apparently, this exciting
1514  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1515  * failed somewhere. Latency? Burstiness? Well, at least now we will
1516  * see, why it failed. 8)8)				  --ANK
1517  *
1518  */
1519 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1520 {
1521 	struct tcp_sock *tp = tcp_sk(sk);
1522 
1523 	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1524 		return false;
1525 
1526 	if (skb->len <= tcp_hdrlen(skb) &&
1527 	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1528 		return false;
1529 
1530 	/* Before escaping RCU protected region, we need to take care of skb
1531 	 * dst. Prequeue is only enabled for established sockets.
1532 	 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1533 	 * Instead of doing full sk_rx_dst validity here, let's perform
1534 	 * an optimistic check.
1535 	 */
1536 	if (likely(sk->sk_rx_dst))
1537 		skb_dst_drop(skb);
1538 	else
1539 		skb_dst_force(skb);
1540 
1541 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1542 	tp->ucopy.memory += skb->truesize;
1543 	if (tp->ucopy.memory > sk->sk_rcvbuf) {
1544 		struct sk_buff *skb1;
1545 
1546 		BUG_ON(sock_owned_by_user(sk));
1547 
1548 		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1549 			sk_backlog_rcv(sk, skb1);
1550 			NET_INC_STATS_BH(sock_net(sk),
1551 					 LINUX_MIB_TCPPREQUEUEDROPPED);
1552 		}
1553 
1554 		tp->ucopy.memory = 0;
1555 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1556 		wake_up_interruptible_sync_poll(sk_sleep(sk),
1557 					   POLLIN | POLLRDNORM | POLLRDBAND);
1558 		if (!inet_csk_ack_scheduled(sk))
1559 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1560 						  (3 * tcp_rto_min(sk)) / 4,
1561 						  TCP_RTO_MAX);
1562 	}
1563 	return true;
1564 }
1565 EXPORT_SYMBOL(tcp_prequeue);
1566 
1567 /*
1568  *	From tcp_input.c
1569  */
1570 
1571 int tcp_v4_rcv(struct sk_buff *skb)
1572 {
1573 	const struct iphdr *iph;
1574 	const struct tcphdr *th;
1575 	struct sock *sk;
1576 	int ret;
1577 	struct net *net = dev_net(skb->dev);
1578 
1579 	if (skb->pkt_type != PACKET_HOST)
1580 		goto discard_it;
1581 
1582 	/* Count it even if it's bad */
1583 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1584 
1585 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1586 		goto discard_it;
1587 
1588 	th = tcp_hdr(skb);
1589 
1590 	if (th->doff < sizeof(struct tcphdr) / 4)
1591 		goto bad_packet;
1592 	if (!pskb_may_pull(skb, th->doff * 4))
1593 		goto discard_it;
1594 
1595 	/* An explanation is required here, I think.
1596 	 * Packet length and doff are validated by header prediction,
1597 	 * provided case of th->doff==0 is eliminated.
1598 	 * So, we defer the checks. */
1599 
1600 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1601 		goto csum_error;
1602 
1603 	th = tcp_hdr(skb);
1604 	iph = ip_hdr(skb);
1605 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1606 	 * barrier() makes sure compiler wont play fool^Waliasing games.
1607 	 */
1608 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1609 		sizeof(struct inet_skb_parm));
1610 	barrier();
1611 
1612 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1613 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1614 				    skb->len - th->doff * 4);
1615 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1616 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1617 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1618 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1619 	TCP_SKB_CB(skb)->sacked	 = 0;
1620 
1621 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1622 	if (!sk)
1623 		goto no_tcp_socket;
1624 
1625 process:
1626 	if (sk->sk_state == TCP_TIME_WAIT)
1627 		goto do_time_wait;
1628 
1629 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1630 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1631 		goto discard_and_relse;
1632 	}
1633 
1634 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1635 		goto discard_and_relse;
1636 
1637 #ifdef CONFIG_TCP_MD5SIG
1638 	/*
1639 	 * We really want to reject the packet as early as possible
1640 	 * if:
1641 	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1642 	 *  o There is an MD5 option and we're not expecting one
1643 	 */
1644 	if (tcp_v4_inbound_md5_hash(sk, skb))
1645 		goto discard_and_relse;
1646 #endif
1647 
1648 	nf_reset(skb);
1649 
1650 	if (sk_filter(sk, skb))
1651 		goto discard_and_relse;
1652 
1653 	sk_incoming_cpu_update(sk);
1654 	skb->dev = NULL;
1655 
1656 	bh_lock_sock_nested(sk);
1657 	ret = 0;
1658 	if (!sock_owned_by_user(sk)) {
1659 		if (!tcp_prequeue(sk, skb))
1660 			ret = tcp_v4_do_rcv(sk, skb);
1661 	} else if (unlikely(sk_add_backlog(sk, skb,
1662 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1663 		bh_unlock_sock(sk);
1664 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1665 		goto discard_and_relse;
1666 	}
1667 	bh_unlock_sock(sk);
1668 
1669 	sock_put(sk);
1670 
1671 	return ret;
1672 
1673 no_tcp_socket:
1674 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1675 		goto discard_it;
1676 
1677 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1678 csum_error:
1679 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1680 bad_packet:
1681 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1682 	} else {
1683 		tcp_v4_send_reset(NULL, skb);
1684 	}
1685 
1686 discard_it:
1687 	/* Discard frame. */
1688 	kfree_skb(skb);
1689 	return 0;
1690 
1691 discard_and_relse:
1692 	sock_put(sk);
1693 	goto discard_it;
1694 
1695 do_time_wait:
1696 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1697 		inet_twsk_put(inet_twsk(sk));
1698 		goto discard_it;
1699 	}
1700 
1701 	if (skb->len < (th->doff << 2)) {
1702 		inet_twsk_put(inet_twsk(sk));
1703 		goto bad_packet;
1704 	}
1705 	if (tcp_checksum_complete(skb)) {
1706 		inet_twsk_put(inet_twsk(sk));
1707 		goto csum_error;
1708 	}
1709 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1710 	case TCP_TW_SYN: {
1711 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1712 							&tcp_hashinfo,
1713 							iph->saddr, th->source,
1714 							iph->daddr, th->dest,
1715 							inet_iif(skb));
1716 		if (sk2) {
1717 			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1718 			inet_twsk_put(inet_twsk(sk));
1719 			sk = sk2;
1720 			goto process;
1721 		}
1722 		/* Fall through to ACK */
1723 	}
1724 	case TCP_TW_ACK:
1725 		tcp_v4_timewait_ack(sk, skb);
1726 		break;
1727 	case TCP_TW_RST:
1728 		goto no_tcp_socket;
1729 	case TCP_TW_SUCCESS:;
1730 	}
1731 	goto discard_it;
1732 }
1733 
1734 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1735 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1736 	.twsk_unique	= tcp_twsk_unique,
1737 	.twsk_destructor= tcp_twsk_destructor,
1738 };
1739 
1740 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1741 {
1742 	struct dst_entry *dst = skb_dst(skb);
1743 
1744 	if (dst) {
1745 		dst_hold(dst);
1746 		sk->sk_rx_dst = dst;
1747 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1748 	}
1749 }
1750 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1751 
1752 const struct inet_connection_sock_af_ops ipv4_specific = {
1753 	.queue_xmit	   = ip_queue_xmit,
1754 	.send_check	   = tcp_v4_send_check,
1755 	.rebuild_header	   = inet_sk_rebuild_header,
1756 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1757 	.conn_request	   = tcp_v4_conn_request,
1758 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1759 	.net_header_len	   = sizeof(struct iphdr),
1760 	.setsockopt	   = ip_setsockopt,
1761 	.getsockopt	   = ip_getsockopt,
1762 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1763 	.sockaddr_len	   = sizeof(struct sockaddr_in),
1764 	.bind_conflict	   = inet_csk_bind_conflict,
1765 #ifdef CONFIG_COMPAT
1766 	.compat_setsockopt = compat_ip_setsockopt,
1767 	.compat_getsockopt = compat_ip_getsockopt,
1768 #endif
1769 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1770 };
1771 EXPORT_SYMBOL(ipv4_specific);
1772 
1773 #ifdef CONFIG_TCP_MD5SIG
1774 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1775 	.md5_lookup		= tcp_v4_md5_lookup,
1776 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1777 	.md5_parse		= tcp_v4_parse_md5_keys,
1778 };
1779 #endif
1780 
1781 /* NOTE: A lot of things set to zero explicitly by call to
1782  *       sk_alloc() so need not be done here.
1783  */
1784 static int tcp_v4_init_sock(struct sock *sk)
1785 {
1786 	struct inet_connection_sock *icsk = inet_csk(sk);
1787 
1788 	tcp_init_sock(sk);
1789 
1790 	icsk->icsk_af_ops = &ipv4_specific;
1791 
1792 #ifdef CONFIG_TCP_MD5SIG
1793 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1794 #endif
1795 
1796 	return 0;
1797 }
1798 
1799 void tcp_v4_destroy_sock(struct sock *sk)
1800 {
1801 	struct tcp_sock *tp = tcp_sk(sk);
1802 
1803 	tcp_clear_xmit_timers(sk);
1804 
1805 	tcp_cleanup_congestion_control(sk);
1806 
1807 	/* Cleanup up the write buffer. */
1808 	tcp_write_queue_purge(sk);
1809 
1810 	/* Cleans up our, hopefully empty, out_of_order_queue. */
1811 	__skb_queue_purge(&tp->out_of_order_queue);
1812 
1813 #ifdef CONFIG_TCP_MD5SIG
1814 	/* Clean up the MD5 key list, if any */
1815 	if (tp->md5sig_info) {
1816 		tcp_clear_md5_list(sk);
1817 		kfree_rcu(tp->md5sig_info, rcu);
1818 		tp->md5sig_info = NULL;
1819 	}
1820 #endif
1821 
1822 	/* Clean prequeue, it must be empty really */
1823 	__skb_queue_purge(&tp->ucopy.prequeue);
1824 
1825 	/* Clean up a referenced TCP bind bucket. */
1826 	if (inet_csk(sk)->icsk_bind_hash)
1827 		inet_put_port(sk);
1828 
1829 	BUG_ON(tp->fastopen_rsk != NULL);
1830 
1831 	/* If socket is aborted during connect operation */
1832 	tcp_free_fastopen_req(tp);
1833 
1834 	sk_sockets_allocated_dec(sk);
1835 	sock_release_memcg(sk);
1836 }
1837 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1838 
1839 #ifdef CONFIG_PROC_FS
1840 /* Proc filesystem TCP sock list dumping. */
1841 
1842 /*
1843  * Get next listener socket follow cur.  If cur is NULL, get first socket
1844  * starting from bucket given in st->bucket; when st->bucket is zero the
1845  * very first socket in the hash table is returned.
1846  */
1847 static void *listening_get_next(struct seq_file *seq, void *cur)
1848 {
1849 	struct inet_connection_sock *icsk;
1850 	struct hlist_nulls_node *node;
1851 	struct sock *sk = cur;
1852 	struct inet_listen_hashbucket *ilb;
1853 	struct tcp_iter_state *st = seq->private;
1854 	struct net *net = seq_file_net(seq);
1855 
1856 	if (!sk) {
1857 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1858 		spin_lock_bh(&ilb->lock);
1859 		sk = sk_nulls_head(&ilb->head);
1860 		st->offset = 0;
1861 		goto get_sk;
1862 	}
1863 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1864 	++st->num;
1865 	++st->offset;
1866 
1867 	if (st->state == TCP_SEQ_STATE_OPENREQ) {
1868 		struct request_sock *req = cur;
1869 
1870 		icsk = inet_csk(st->syn_wait_sk);
1871 		req = req->dl_next;
1872 		while (1) {
1873 			while (req) {
1874 				if (req->rsk_ops->family == st->family) {
1875 					cur = req;
1876 					goto out;
1877 				}
1878 				req = req->dl_next;
1879 			}
1880 			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1881 				break;
1882 get_req:
1883 			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1884 		}
1885 		sk	  = sk_nulls_next(st->syn_wait_sk);
1886 		st->state = TCP_SEQ_STATE_LISTENING;
1887 		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1888 	} else {
1889 		icsk = inet_csk(sk);
1890 		spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1891 		if (reqsk_queue_len(&icsk->icsk_accept_queue))
1892 			goto start_req;
1893 		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1894 		sk = sk_nulls_next(sk);
1895 	}
1896 get_sk:
1897 	sk_nulls_for_each_from(sk, node) {
1898 		if (!net_eq(sock_net(sk), net))
1899 			continue;
1900 		if (sk->sk_family == st->family) {
1901 			cur = sk;
1902 			goto out;
1903 		}
1904 		icsk = inet_csk(sk);
1905 		spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1906 		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1907 start_req:
1908 			st->uid		= sock_i_uid(sk);
1909 			st->syn_wait_sk = sk;
1910 			st->state	= TCP_SEQ_STATE_OPENREQ;
1911 			st->sbucket	= 0;
1912 			goto get_req;
1913 		}
1914 		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1915 	}
1916 	spin_unlock_bh(&ilb->lock);
1917 	st->offset = 0;
1918 	if (++st->bucket < INET_LHTABLE_SIZE) {
1919 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1920 		spin_lock_bh(&ilb->lock);
1921 		sk = sk_nulls_head(&ilb->head);
1922 		goto get_sk;
1923 	}
1924 	cur = NULL;
1925 out:
1926 	return cur;
1927 }
1928 
1929 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1930 {
1931 	struct tcp_iter_state *st = seq->private;
1932 	void *rc;
1933 
1934 	st->bucket = 0;
1935 	st->offset = 0;
1936 	rc = listening_get_next(seq, NULL);
1937 
1938 	while (rc && *pos) {
1939 		rc = listening_get_next(seq, rc);
1940 		--*pos;
1941 	}
1942 	return rc;
1943 }
1944 
1945 static inline bool empty_bucket(const struct tcp_iter_state *st)
1946 {
1947 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1948 }
1949 
1950 /*
1951  * Get first established socket starting from bucket given in st->bucket.
1952  * If st->bucket is zero, the very first socket in the hash is returned.
1953  */
1954 static void *established_get_first(struct seq_file *seq)
1955 {
1956 	struct tcp_iter_state *st = seq->private;
1957 	struct net *net = seq_file_net(seq);
1958 	void *rc = NULL;
1959 
1960 	st->offset = 0;
1961 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1962 		struct sock *sk;
1963 		struct hlist_nulls_node *node;
1964 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1965 
1966 		/* Lockless fast path for the common case of empty buckets */
1967 		if (empty_bucket(st))
1968 			continue;
1969 
1970 		spin_lock_bh(lock);
1971 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1972 			if (sk->sk_family != st->family ||
1973 			    !net_eq(sock_net(sk), net)) {
1974 				continue;
1975 			}
1976 			rc = sk;
1977 			goto out;
1978 		}
1979 		spin_unlock_bh(lock);
1980 	}
1981 out:
1982 	return rc;
1983 }
1984 
1985 static void *established_get_next(struct seq_file *seq, void *cur)
1986 {
1987 	struct sock *sk = cur;
1988 	struct hlist_nulls_node *node;
1989 	struct tcp_iter_state *st = seq->private;
1990 	struct net *net = seq_file_net(seq);
1991 
1992 	++st->num;
1993 	++st->offset;
1994 
1995 	sk = sk_nulls_next(sk);
1996 
1997 	sk_nulls_for_each_from(sk, node) {
1998 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1999 			return sk;
2000 	}
2001 
2002 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2003 	++st->bucket;
2004 	return established_get_first(seq);
2005 }
2006 
2007 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2008 {
2009 	struct tcp_iter_state *st = seq->private;
2010 	void *rc;
2011 
2012 	st->bucket = 0;
2013 	rc = established_get_first(seq);
2014 
2015 	while (rc && pos) {
2016 		rc = established_get_next(seq, rc);
2017 		--pos;
2018 	}
2019 	return rc;
2020 }
2021 
2022 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2023 {
2024 	void *rc;
2025 	struct tcp_iter_state *st = seq->private;
2026 
2027 	st->state = TCP_SEQ_STATE_LISTENING;
2028 	rc	  = listening_get_idx(seq, &pos);
2029 
2030 	if (!rc) {
2031 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2032 		rc	  = established_get_idx(seq, pos);
2033 	}
2034 
2035 	return rc;
2036 }
2037 
2038 static void *tcp_seek_last_pos(struct seq_file *seq)
2039 {
2040 	struct tcp_iter_state *st = seq->private;
2041 	int offset = st->offset;
2042 	int orig_num = st->num;
2043 	void *rc = NULL;
2044 
2045 	switch (st->state) {
2046 	case TCP_SEQ_STATE_OPENREQ:
2047 	case TCP_SEQ_STATE_LISTENING:
2048 		if (st->bucket >= INET_LHTABLE_SIZE)
2049 			break;
2050 		st->state = TCP_SEQ_STATE_LISTENING;
2051 		rc = listening_get_next(seq, NULL);
2052 		while (offset-- && rc)
2053 			rc = listening_get_next(seq, rc);
2054 		if (rc)
2055 			break;
2056 		st->bucket = 0;
2057 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2058 		/* Fallthrough */
2059 	case TCP_SEQ_STATE_ESTABLISHED:
2060 		if (st->bucket > tcp_hashinfo.ehash_mask)
2061 			break;
2062 		rc = established_get_first(seq);
2063 		while (offset-- && rc)
2064 			rc = established_get_next(seq, rc);
2065 	}
2066 
2067 	st->num = orig_num;
2068 
2069 	return rc;
2070 }
2071 
2072 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2073 {
2074 	struct tcp_iter_state *st = seq->private;
2075 	void *rc;
2076 
2077 	if (*pos && *pos == st->last_pos) {
2078 		rc = tcp_seek_last_pos(seq);
2079 		if (rc)
2080 			goto out;
2081 	}
2082 
2083 	st->state = TCP_SEQ_STATE_LISTENING;
2084 	st->num = 0;
2085 	st->bucket = 0;
2086 	st->offset = 0;
2087 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2088 
2089 out:
2090 	st->last_pos = *pos;
2091 	return rc;
2092 }
2093 
2094 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2095 {
2096 	struct tcp_iter_state *st = seq->private;
2097 	void *rc = NULL;
2098 
2099 	if (v == SEQ_START_TOKEN) {
2100 		rc = tcp_get_idx(seq, 0);
2101 		goto out;
2102 	}
2103 
2104 	switch (st->state) {
2105 	case TCP_SEQ_STATE_OPENREQ:
2106 	case TCP_SEQ_STATE_LISTENING:
2107 		rc = listening_get_next(seq, v);
2108 		if (!rc) {
2109 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2110 			st->bucket = 0;
2111 			st->offset = 0;
2112 			rc	  = established_get_first(seq);
2113 		}
2114 		break;
2115 	case TCP_SEQ_STATE_ESTABLISHED:
2116 		rc = established_get_next(seq, v);
2117 		break;
2118 	}
2119 out:
2120 	++*pos;
2121 	st->last_pos = *pos;
2122 	return rc;
2123 }
2124 
2125 static void tcp_seq_stop(struct seq_file *seq, void *v)
2126 {
2127 	struct tcp_iter_state *st = seq->private;
2128 
2129 	switch (st->state) {
2130 	case TCP_SEQ_STATE_OPENREQ:
2131 		if (v) {
2132 			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2133 			spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2134 		}
2135 	case TCP_SEQ_STATE_LISTENING:
2136 		if (v != SEQ_START_TOKEN)
2137 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2138 		break;
2139 	case TCP_SEQ_STATE_ESTABLISHED:
2140 		if (v)
2141 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2142 		break;
2143 	}
2144 }
2145 
2146 int tcp_seq_open(struct inode *inode, struct file *file)
2147 {
2148 	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2149 	struct tcp_iter_state *s;
2150 	int err;
2151 
2152 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2153 			  sizeof(struct tcp_iter_state));
2154 	if (err < 0)
2155 		return err;
2156 
2157 	s = ((struct seq_file *)file->private_data)->private;
2158 	s->family		= afinfo->family;
2159 	s->last_pos		= 0;
2160 	return 0;
2161 }
2162 EXPORT_SYMBOL(tcp_seq_open);
2163 
2164 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2165 {
2166 	int rc = 0;
2167 	struct proc_dir_entry *p;
2168 
2169 	afinfo->seq_ops.start		= tcp_seq_start;
2170 	afinfo->seq_ops.next		= tcp_seq_next;
2171 	afinfo->seq_ops.stop		= tcp_seq_stop;
2172 
2173 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2174 			     afinfo->seq_fops, afinfo);
2175 	if (!p)
2176 		rc = -ENOMEM;
2177 	return rc;
2178 }
2179 EXPORT_SYMBOL(tcp_proc_register);
2180 
2181 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2182 {
2183 	remove_proc_entry(afinfo->name, net->proc_net);
2184 }
2185 EXPORT_SYMBOL(tcp_proc_unregister);
2186 
2187 static void get_openreq4(const struct request_sock *req,
2188 			 struct seq_file *f, int i, kuid_t uid)
2189 {
2190 	const struct inet_request_sock *ireq = inet_rsk(req);
2191 	long delta = req->rsk_timer.expires - jiffies;
2192 
2193 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2194 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2195 		i,
2196 		ireq->ir_loc_addr,
2197 		ireq->ir_num,
2198 		ireq->ir_rmt_addr,
2199 		ntohs(ireq->ir_rmt_port),
2200 		TCP_SYN_RECV,
2201 		0, 0, /* could print option size, but that is af dependent. */
2202 		1,    /* timers active (only the expire timer) */
2203 		jiffies_delta_to_clock_t(delta),
2204 		req->num_timeout,
2205 		from_kuid_munged(seq_user_ns(f), uid),
2206 		0,  /* non standard timer */
2207 		0, /* open_requests have no inode */
2208 		0,
2209 		req);
2210 }
2211 
2212 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2213 {
2214 	int timer_active;
2215 	unsigned long timer_expires;
2216 	const struct tcp_sock *tp = tcp_sk(sk);
2217 	const struct inet_connection_sock *icsk = inet_csk(sk);
2218 	const struct inet_sock *inet = inet_sk(sk);
2219 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2220 	__be32 dest = inet->inet_daddr;
2221 	__be32 src = inet->inet_rcv_saddr;
2222 	__u16 destp = ntohs(inet->inet_dport);
2223 	__u16 srcp = ntohs(inet->inet_sport);
2224 	int rx_queue;
2225 
2226 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2227 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2228 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2229 		timer_active	= 1;
2230 		timer_expires	= icsk->icsk_timeout;
2231 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2232 		timer_active	= 4;
2233 		timer_expires	= icsk->icsk_timeout;
2234 	} else if (timer_pending(&sk->sk_timer)) {
2235 		timer_active	= 2;
2236 		timer_expires	= sk->sk_timer.expires;
2237 	} else {
2238 		timer_active	= 0;
2239 		timer_expires = jiffies;
2240 	}
2241 
2242 	if (sk->sk_state == TCP_LISTEN)
2243 		rx_queue = sk->sk_ack_backlog;
2244 	else
2245 		/*
2246 		 * because we dont lock socket, we might find a transient negative value
2247 		 */
2248 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2249 
2250 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2251 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2252 		i, src, srcp, dest, destp, sk->sk_state,
2253 		tp->write_seq - tp->snd_una,
2254 		rx_queue,
2255 		timer_active,
2256 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2257 		icsk->icsk_retransmits,
2258 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2259 		icsk->icsk_probes_out,
2260 		sock_i_ino(sk),
2261 		atomic_read(&sk->sk_refcnt), sk,
2262 		jiffies_to_clock_t(icsk->icsk_rto),
2263 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2264 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2265 		tp->snd_cwnd,
2266 		sk->sk_state == TCP_LISTEN ?
2267 		    (fastopenq ? fastopenq->max_qlen : 0) :
2268 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2269 }
2270 
2271 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2272 			       struct seq_file *f, int i)
2273 {
2274 	__be32 dest, src;
2275 	__u16 destp, srcp;
2276 	s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2277 
2278 	dest  = tw->tw_daddr;
2279 	src   = tw->tw_rcv_saddr;
2280 	destp = ntohs(tw->tw_dport);
2281 	srcp  = ntohs(tw->tw_sport);
2282 
2283 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2284 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2285 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2286 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2287 		atomic_read(&tw->tw_refcnt), tw);
2288 }
2289 
2290 #define TMPSZ 150
2291 
2292 static int tcp4_seq_show(struct seq_file *seq, void *v)
2293 {
2294 	struct tcp_iter_state *st;
2295 	struct sock *sk = v;
2296 
2297 	seq_setwidth(seq, TMPSZ - 1);
2298 	if (v == SEQ_START_TOKEN) {
2299 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2300 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2301 			   "inode");
2302 		goto out;
2303 	}
2304 	st = seq->private;
2305 
2306 	switch (st->state) {
2307 	case TCP_SEQ_STATE_LISTENING:
2308 	case TCP_SEQ_STATE_ESTABLISHED:
2309 		if (sk->sk_state == TCP_TIME_WAIT)
2310 			get_timewait4_sock(v, seq, st->num);
2311 		else
2312 			get_tcp4_sock(v, seq, st->num);
2313 		break;
2314 	case TCP_SEQ_STATE_OPENREQ:
2315 		get_openreq4(v, seq, st->num, st->uid);
2316 		break;
2317 	}
2318 out:
2319 	seq_pad(seq, '\n');
2320 	return 0;
2321 }
2322 
2323 static const struct file_operations tcp_afinfo_seq_fops = {
2324 	.owner   = THIS_MODULE,
2325 	.open    = tcp_seq_open,
2326 	.read    = seq_read,
2327 	.llseek  = seq_lseek,
2328 	.release = seq_release_net
2329 };
2330 
2331 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2332 	.name		= "tcp",
2333 	.family		= AF_INET,
2334 	.seq_fops	= &tcp_afinfo_seq_fops,
2335 	.seq_ops	= {
2336 		.show		= tcp4_seq_show,
2337 	},
2338 };
2339 
2340 static int __net_init tcp4_proc_init_net(struct net *net)
2341 {
2342 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2343 }
2344 
2345 static void __net_exit tcp4_proc_exit_net(struct net *net)
2346 {
2347 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2348 }
2349 
2350 static struct pernet_operations tcp4_net_ops = {
2351 	.init = tcp4_proc_init_net,
2352 	.exit = tcp4_proc_exit_net,
2353 };
2354 
2355 int __init tcp4_proc_init(void)
2356 {
2357 	return register_pernet_subsys(&tcp4_net_ops);
2358 }
2359 
2360 void tcp4_proc_exit(void)
2361 {
2362 	unregister_pernet_subsys(&tcp4_net_ops);
2363 }
2364 #endif /* CONFIG_PROC_FS */
2365 
2366 struct proto tcp_prot = {
2367 	.name			= "TCP",
2368 	.owner			= THIS_MODULE,
2369 	.close			= tcp_close,
2370 	.connect		= tcp_v4_connect,
2371 	.disconnect		= tcp_disconnect,
2372 	.accept			= inet_csk_accept,
2373 	.ioctl			= tcp_ioctl,
2374 	.init			= tcp_v4_init_sock,
2375 	.destroy		= tcp_v4_destroy_sock,
2376 	.shutdown		= tcp_shutdown,
2377 	.setsockopt		= tcp_setsockopt,
2378 	.getsockopt		= tcp_getsockopt,
2379 	.recvmsg		= tcp_recvmsg,
2380 	.sendmsg		= tcp_sendmsg,
2381 	.sendpage		= tcp_sendpage,
2382 	.backlog_rcv		= tcp_v4_do_rcv,
2383 	.release_cb		= tcp_release_cb,
2384 	.hash			= inet_hash,
2385 	.unhash			= inet_unhash,
2386 	.get_port		= inet_csk_get_port,
2387 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2388 	.stream_memory_free	= tcp_stream_memory_free,
2389 	.sockets_allocated	= &tcp_sockets_allocated,
2390 	.orphan_count		= &tcp_orphan_count,
2391 	.memory_allocated	= &tcp_memory_allocated,
2392 	.memory_pressure	= &tcp_memory_pressure,
2393 	.sysctl_mem		= sysctl_tcp_mem,
2394 	.sysctl_wmem		= sysctl_tcp_wmem,
2395 	.sysctl_rmem		= sysctl_tcp_rmem,
2396 	.max_header		= MAX_TCP_HEADER,
2397 	.obj_size		= sizeof(struct tcp_sock),
2398 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2399 	.twsk_prot		= &tcp_timewait_sock_ops,
2400 	.rsk_prot		= &tcp_request_sock_ops,
2401 	.h.hashinfo		= &tcp_hashinfo,
2402 	.no_autobind		= true,
2403 #ifdef CONFIG_COMPAT
2404 	.compat_setsockopt	= compat_tcp_setsockopt,
2405 	.compat_getsockopt	= compat_tcp_getsockopt,
2406 #endif
2407 #ifdef CONFIG_MEMCG_KMEM
2408 	.init_cgroup		= tcp_init_cgroup,
2409 	.destroy_cgroup		= tcp_destroy_cgroup,
2410 	.proto_cgroup		= tcp_proto_cgroup,
2411 #endif
2412 };
2413 EXPORT_SYMBOL(tcp_prot);
2414 
2415 static void __net_exit tcp_sk_exit(struct net *net)
2416 {
2417 	int cpu;
2418 
2419 	for_each_possible_cpu(cpu)
2420 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2421 	free_percpu(net->ipv4.tcp_sk);
2422 }
2423 
2424 static int __net_init tcp_sk_init(struct net *net)
2425 {
2426 	int res, cpu;
2427 
2428 	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2429 	if (!net->ipv4.tcp_sk)
2430 		return -ENOMEM;
2431 
2432 	for_each_possible_cpu(cpu) {
2433 		struct sock *sk;
2434 
2435 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2436 					   IPPROTO_TCP, net);
2437 		if (res)
2438 			goto fail;
2439 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2440 	}
2441 	net->ipv4.sysctl_tcp_ecn = 2;
2442 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2443 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2444 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2445 	return 0;
2446 
2447 fail:
2448 	tcp_sk_exit(net);
2449 
2450 	return res;
2451 }
2452 
2453 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2454 {
2455 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2456 }
2457 
2458 static struct pernet_operations __net_initdata tcp_sk_ops = {
2459        .init	   = tcp_sk_init,
2460        .exit	   = tcp_sk_exit,
2461        .exit_batch = tcp_sk_exit_batch,
2462 };
2463 
2464 void __init tcp_v4_init(void)
2465 {
2466 	inet_hashinfo_init(&tcp_hashinfo);
2467 	if (register_pernet_subsys(&tcp_sk_ops))
2468 		panic("Failed to create the TCP control socket.\n");
2469 }
2470