xref: /openbmc/linux/net/ipv4/tcp_ipv4.c (revision a8da474e)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78 
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87 
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96 
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99 
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 					  ip_hdr(skb)->saddr,
104 					  tcp_hdr(skb)->dest,
105 					  tcp_hdr(skb)->source);
106 }
107 
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 	struct tcp_sock *tp = tcp_sk(sk);
112 
113 	/* With PAWS, it is safe from the viewpoint
114 	   of data integrity. Even without PAWS it is safe provided sequence
115 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116 
117 	   Actually, the idea is close to VJ's one, only timestamp cache is
118 	   held not per host, but per port pair and TW bucket is used as state
119 	   holder.
120 
121 	   If TW bucket has been already destroyed we fall back to VJ's scheme
122 	   and use initial timestamp retrieved from peer table.
123 	 */
124 	if (tcptw->tw_ts_recent_stamp &&
125 	    (!twp || (sysctl_tcp_tw_reuse &&
126 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 		if (tp->write_seq == 0)
129 			tp->write_seq = 1;
130 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
131 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 		sock_hold(sktw);
133 		return 1;
134 	}
135 
136 	return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139 
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 	struct inet_sock *inet = inet_sk(sk);
145 	struct tcp_sock *tp = tcp_sk(sk);
146 	__be16 orig_sport, orig_dport;
147 	__be32 daddr, nexthop;
148 	struct flowi4 *fl4;
149 	struct rtable *rt;
150 	int err;
151 	struct ip_options_rcu *inet_opt;
152 
153 	if (addr_len < sizeof(struct sockaddr_in))
154 		return -EINVAL;
155 
156 	if (usin->sin_family != AF_INET)
157 		return -EAFNOSUPPORT;
158 
159 	nexthop = daddr = usin->sin_addr.s_addr;
160 	inet_opt = rcu_dereference_protected(inet->inet_opt,
161 					     sock_owned_by_user(sk));
162 	if (inet_opt && inet_opt->opt.srr) {
163 		if (!daddr)
164 			return -EINVAL;
165 		nexthop = inet_opt->opt.faddr;
166 	}
167 
168 	orig_sport = inet->inet_sport;
169 	orig_dport = usin->sin_port;
170 	fl4 = &inet->cork.fl.u.ip4;
171 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 			      IPPROTO_TCP,
174 			      orig_sport, orig_dport, sk);
175 	if (IS_ERR(rt)) {
176 		err = PTR_ERR(rt);
177 		if (err == -ENETUNREACH)
178 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 		return err;
180 	}
181 
182 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 		ip_rt_put(rt);
184 		return -ENETUNREACH;
185 	}
186 
187 	if (!inet_opt || !inet_opt->opt.srr)
188 		daddr = fl4->daddr;
189 
190 	if (!inet->inet_saddr)
191 		inet->inet_saddr = fl4->saddr;
192 	sk_rcv_saddr_set(sk, inet->inet_saddr);
193 
194 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 		/* Reset inherited state */
196 		tp->rx_opt.ts_recent	   = 0;
197 		tp->rx_opt.ts_recent_stamp = 0;
198 		if (likely(!tp->repair))
199 			tp->write_seq	   = 0;
200 	}
201 
202 	if (tcp_death_row.sysctl_tw_recycle &&
203 	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 		tcp_fetch_timewait_stamp(sk, &rt->dst);
205 
206 	inet->inet_dport = usin->sin_port;
207 	sk_daddr_set(sk, daddr);
208 
209 	inet_csk(sk)->icsk_ext_hdr_len = 0;
210 	if (inet_opt)
211 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212 
213 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214 
215 	/* Socket identity is still unknown (sport may be zero).
216 	 * However we set state to SYN-SENT and not releasing socket
217 	 * lock select source port, enter ourselves into the hash tables and
218 	 * complete initialization after this.
219 	 */
220 	tcp_set_state(sk, TCP_SYN_SENT);
221 	err = inet_hash_connect(&tcp_death_row, sk);
222 	if (err)
223 		goto failure;
224 
225 	sk_set_txhash(sk);
226 
227 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 			       inet->inet_sport, inet->inet_dport, sk);
229 	if (IS_ERR(rt)) {
230 		err = PTR_ERR(rt);
231 		rt = NULL;
232 		goto failure;
233 	}
234 	/* OK, now commit destination to socket.  */
235 	sk->sk_gso_type = SKB_GSO_TCPV4;
236 	sk_setup_caps(sk, &rt->dst);
237 
238 	if (!tp->write_seq && likely(!tp->repair))
239 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 							   inet->inet_daddr,
241 							   inet->inet_sport,
242 							   usin->sin_port);
243 
244 	inet->inet_id = tp->write_seq ^ jiffies;
245 
246 	err = tcp_connect(sk);
247 
248 	rt = NULL;
249 	if (err)
250 		goto failure;
251 
252 	return 0;
253 
254 failure:
255 	/*
256 	 * This unhashes the socket and releases the local port,
257 	 * if necessary.
258 	 */
259 	tcp_set_state(sk, TCP_CLOSE);
260 	ip_rt_put(rt);
261 	sk->sk_route_caps = 0;
262 	inet->inet_dport = 0;
263 	return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266 
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274 	struct dst_entry *dst;
275 	struct inet_sock *inet = inet_sk(sk);
276 	u32 mtu = tcp_sk(sk)->mtu_info;
277 
278 	dst = inet_csk_update_pmtu(sk, mtu);
279 	if (!dst)
280 		return;
281 
282 	/* Something is about to be wrong... Remember soft error
283 	 * for the case, if this connection will not able to recover.
284 	 */
285 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 		sk->sk_err_soft = EMSGSIZE;
287 
288 	mtu = dst_mtu(dst);
289 
290 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 	    ip_sk_accept_pmtu(sk) &&
292 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 		tcp_sync_mss(sk, mtu);
294 
295 		/* Resend the TCP packet because it's
296 		 * clear that the old packet has been
297 		 * dropped. This is the new "fast" path mtu
298 		 * discovery.
299 		 */
300 		tcp_simple_retransmit(sk);
301 	} /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304 
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307 	struct dst_entry *dst = __sk_dst_check(sk, 0);
308 
309 	if (dst)
310 		dst->ops->redirect(dst, sk, skb);
311 }
312 
313 
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317 	struct request_sock *req = inet_reqsk(sk);
318 	struct net *net = sock_net(sk);
319 
320 	/* ICMPs are not backlogged, hence we cannot get
321 	 * an established socket here.
322 	 */
323 	WARN_ON(req->sk);
324 
325 	if (seq != tcp_rsk(req)->snt_isn) {
326 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327 	} else {
328 		/*
329 		 * Still in SYN_RECV, just remove it silently.
330 		 * There is no good way to pass the error to the newly
331 		 * created socket, and POSIX does not want network
332 		 * errors returned from accept().
333 		 */
334 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
335 		NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336 	}
337 	reqsk_put(req);
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340 
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356 
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 	struct inet_connection_sock *icsk;
362 	struct tcp_sock *tp;
363 	struct inet_sock *inet;
364 	const int type = icmp_hdr(icmp_skb)->type;
365 	const int code = icmp_hdr(icmp_skb)->code;
366 	struct sock *sk;
367 	struct sk_buff *skb;
368 	struct request_sock *fastopen;
369 	__u32 seq, snd_una;
370 	__u32 remaining;
371 	int err;
372 	struct net *net = dev_net(icmp_skb->dev);
373 
374 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 				       th->dest, iph->saddr, ntohs(th->source),
376 				       inet_iif(icmp_skb));
377 	if (!sk) {
378 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379 		return;
380 	}
381 	if (sk->sk_state == TCP_TIME_WAIT) {
382 		inet_twsk_put(inet_twsk(sk));
383 		return;
384 	}
385 	seq = ntohl(th->seq);
386 	if (sk->sk_state == TCP_NEW_SYN_RECV)
387 		return tcp_req_err(sk, seq);
388 
389 	bh_lock_sock(sk);
390 	/* If too many ICMPs get dropped on busy
391 	 * servers this needs to be solved differently.
392 	 * We do take care of PMTU discovery (RFC1191) special case :
393 	 * we can receive locally generated ICMP messages while socket is held.
394 	 */
395 	if (sock_owned_by_user(sk)) {
396 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 	}
399 	if (sk->sk_state == TCP_CLOSE)
400 		goto out;
401 
402 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404 		goto out;
405 	}
406 
407 	icsk = inet_csk(sk);
408 	tp = tcp_sk(sk);
409 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 	fastopen = tp->fastopen_rsk;
411 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 	if (sk->sk_state != TCP_LISTEN &&
413 	    !between(seq, snd_una, tp->snd_nxt)) {
414 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415 		goto out;
416 	}
417 
418 	switch (type) {
419 	case ICMP_REDIRECT:
420 		do_redirect(icmp_skb, sk);
421 		goto out;
422 	case ICMP_SOURCE_QUENCH:
423 		/* Just silently ignore these. */
424 		goto out;
425 	case ICMP_PARAMETERPROB:
426 		err = EPROTO;
427 		break;
428 	case ICMP_DEST_UNREACH:
429 		if (code > NR_ICMP_UNREACH)
430 			goto out;
431 
432 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 			/* We are not interested in TCP_LISTEN and open_requests
434 			 * (SYN-ACKs send out by Linux are always <576bytes so
435 			 * they should go through unfragmented).
436 			 */
437 			if (sk->sk_state == TCP_LISTEN)
438 				goto out;
439 
440 			tp->mtu_info = info;
441 			if (!sock_owned_by_user(sk)) {
442 				tcp_v4_mtu_reduced(sk);
443 			} else {
444 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445 					sock_hold(sk);
446 			}
447 			goto out;
448 		}
449 
450 		err = icmp_err_convert[code].errno;
451 		/* check if icmp_skb allows revert of backoff
452 		 * (see draft-zimmermann-tcp-lcd) */
453 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 			break;
455 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456 		    !icsk->icsk_backoff || fastopen)
457 			break;
458 
459 		if (sock_owned_by_user(sk))
460 			break;
461 
462 		icsk->icsk_backoff--;
463 		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 					       TCP_TIMEOUT_INIT;
465 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466 
467 		skb = tcp_write_queue_head(sk);
468 		BUG_ON(!skb);
469 
470 		remaining = icsk->icsk_rto -
471 			    min(icsk->icsk_rto,
472 				tcp_time_stamp - tcp_skb_timestamp(skb));
473 
474 		if (remaining) {
475 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 						  remaining, TCP_RTO_MAX);
477 		} else {
478 			/* RTO revert clocked out retransmission.
479 			 * Will retransmit now */
480 			tcp_retransmit_timer(sk);
481 		}
482 
483 		break;
484 	case ICMP_TIME_EXCEEDED:
485 		err = EHOSTUNREACH;
486 		break;
487 	default:
488 		goto out;
489 	}
490 
491 	switch (sk->sk_state) {
492 	case TCP_SYN_SENT:
493 	case TCP_SYN_RECV:
494 		/* Only in fast or simultaneous open. If a fast open socket is
495 		 * is already accepted it is treated as a connected one below.
496 		 */
497 		if (fastopen && !fastopen->sk)
498 			break;
499 
500 		if (!sock_owned_by_user(sk)) {
501 			sk->sk_err = err;
502 
503 			sk->sk_error_report(sk);
504 
505 			tcp_done(sk);
506 		} else {
507 			sk->sk_err_soft = err;
508 		}
509 		goto out;
510 	}
511 
512 	/* If we've already connected we will keep trying
513 	 * until we time out, or the user gives up.
514 	 *
515 	 * rfc1122 4.2.3.9 allows to consider as hard errors
516 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 	 * but it is obsoleted by pmtu discovery).
518 	 *
519 	 * Note, that in modern internet, where routing is unreliable
520 	 * and in each dark corner broken firewalls sit, sending random
521 	 * errors ordered by their masters even this two messages finally lose
522 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 	 *
524 	 * Now we are in compliance with RFCs.
525 	 *							--ANK (980905)
526 	 */
527 
528 	inet = inet_sk(sk);
529 	if (!sock_owned_by_user(sk) && inet->recverr) {
530 		sk->sk_err = err;
531 		sk->sk_error_report(sk);
532 	} else	{ /* Only an error on timeout */
533 		sk->sk_err_soft = err;
534 	}
535 
536 out:
537 	bh_unlock_sock(sk);
538 	sock_put(sk);
539 }
540 
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543 	struct tcphdr *th = tcp_hdr(skb);
544 
545 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 		skb->csum_start = skb_transport_header(skb) - skb->head;
548 		skb->csum_offset = offsetof(struct tcphdr, check);
549 	} else {
550 		th->check = tcp_v4_check(skb->len, saddr, daddr,
551 					 csum_partial(th,
552 						      th->doff << 2,
553 						      skb->csum));
554 	}
555 }
556 
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560 	const struct inet_sock *inet = inet_sk(sk);
561 
562 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565 
566 /*
567  *	This routine will send an RST to the other tcp.
568  *
569  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *		      for reset.
571  *	Answer: if a packet caused RST, it is not for a socket
572  *		existing in our system, if it is matched to a socket,
573  *		it is just duplicate segment or bug in other side's TCP.
574  *		So that we build reply only basing on parameters
575  *		arrived with segment.
576  *	Exception: precedence violation. We do not implement it in any case.
577  */
578 
579 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
580 {
581 	const struct tcphdr *th = tcp_hdr(skb);
582 	struct {
583 		struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587 	} rep;
588 	struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 	struct tcp_md5sig_key *key;
591 	const __u8 *hash_location = NULL;
592 	unsigned char newhash[16];
593 	int genhash;
594 	struct sock *sk1 = NULL;
595 #endif
596 	struct net *net;
597 
598 	/* Never send a reset in response to a reset. */
599 	if (th->rst)
600 		return;
601 
602 	/* If sk not NULL, it means we did a successful lookup and incoming
603 	 * route had to be correct. prequeue might have dropped our dst.
604 	 */
605 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606 		return;
607 
608 	/* Swap the send and the receive. */
609 	memset(&rep, 0, sizeof(rep));
610 	rep.th.dest   = th->source;
611 	rep.th.source = th->dest;
612 	rep.th.doff   = sizeof(struct tcphdr) / 4;
613 	rep.th.rst    = 1;
614 
615 	if (th->ack) {
616 		rep.th.seq = th->ack_seq;
617 	} else {
618 		rep.th.ack = 1;
619 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 				       skb->len - (th->doff << 2));
621 	}
622 
623 	memset(&arg, 0, sizeof(arg));
624 	arg.iov[0].iov_base = (unsigned char *)&rep;
625 	arg.iov[0].iov_len  = sizeof(rep.th);
626 
627 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 	hash_location = tcp_parse_md5sig_option(th);
630 	if (!sk && hash_location) {
631 		/*
632 		 * active side is lost. Try to find listening socket through
633 		 * source port, and then find md5 key through listening socket.
634 		 * we are not loose security here:
635 		 * Incoming packet is checked with md5 hash with finding key,
636 		 * no RST generated if md5 hash doesn't match.
637 		 */
638 		sk1 = __inet_lookup_listener(net,
639 					     &tcp_hashinfo, ip_hdr(skb)->saddr,
640 					     th->source, ip_hdr(skb)->daddr,
641 					     ntohs(th->source), inet_iif(skb));
642 		/* don't send rst if it can't find key */
643 		if (!sk1)
644 			return;
645 		rcu_read_lock();
646 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 					&ip_hdr(skb)->saddr, AF_INET);
648 		if (!key)
649 			goto release_sk1;
650 
651 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 			goto release_sk1;
654 	} else {
655 		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 					     &ip_hdr(skb)->saddr,
657 					     AF_INET) : NULL;
658 	}
659 
660 	if (key) {
661 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 				   (TCPOPT_NOP << 16) |
663 				   (TCPOPT_MD5SIG << 8) |
664 				   TCPOLEN_MD5SIG);
665 		/* Update length and the length the header thinks exists */
666 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 		rep.th.doff = arg.iov[0].iov_len / 4;
668 
669 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 				     key, ip_hdr(skb)->saddr,
671 				     ip_hdr(skb)->daddr, &rep.th);
672 	}
673 #endif
674 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 				      ip_hdr(skb)->saddr, /* XXX */
676 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 	/* When socket is gone, all binding information is lost.
680 	 * routing might fail in this case. No choice here, if we choose to force
681 	 * input interface, we will misroute in case of asymmetric route.
682 	 */
683 	if (sk)
684 		arg.bound_dev_if = sk->sk_bound_dev_if;
685 
686 	arg.tos = ip_hdr(skb)->tos;
687 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
689 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 			      &arg, arg.iov[0].iov_len);
691 
692 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694 
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697 	if (sk1) {
698 		rcu_read_unlock();
699 		sock_put(sk1);
700 	}
701 #endif
702 }
703 
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707 
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709 			    u32 win, u32 tsval, u32 tsecr, int oif,
710 			    struct tcp_md5sig_key *key,
711 			    int reply_flags, u8 tos)
712 {
713 	const struct tcphdr *th = tcp_hdr(skb);
714 	struct {
715 		struct tcphdr th;
716 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720 			];
721 	} rep;
722 	struct ip_reply_arg arg;
723 	struct net *net = dev_net(skb_dst(skb)->dev);
724 
725 	memset(&rep.th, 0, sizeof(struct tcphdr));
726 	memset(&arg, 0, sizeof(arg));
727 
728 	arg.iov[0].iov_base = (unsigned char *)&rep;
729 	arg.iov[0].iov_len  = sizeof(rep.th);
730 	if (tsecr) {
731 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 				   (TCPOPT_TIMESTAMP << 8) |
733 				   TCPOLEN_TIMESTAMP);
734 		rep.opt[1] = htonl(tsval);
735 		rep.opt[2] = htonl(tsecr);
736 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737 	}
738 
739 	/* Swap the send and the receive. */
740 	rep.th.dest    = th->source;
741 	rep.th.source  = th->dest;
742 	rep.th.doff    = arg.iov[0].iov_len / 4;
743 	rep.th.seq     = htonl(seq);
744 	rep.th.ack_seq = htonl(ack);
745 	rep.th.ack     = 1;
746 	rep.th.window  = htons(win);
747 
748 #ifdef CONFIG_TCP_MD5SIG
749 	if (key) {
750 		int offset = (tsecr) ? 3 : 0;
751 
752 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 					  (TCPOPT_NOP << 16) |
754 					  (TCPOPT_MD5SIG << 8) |
755 					  TCPOLEN_MD5SIG);
756 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 		rep.th.doff = arg.iov[0].iov_len/4;
758 
759 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760 				    key, ip_hdr(skb)->saddr,
761 				    ip_hdr(skb)->daddr, &rep.th);
762 	}
763 #endif
764 	arg.flags = reply_flags;
765 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 				      ip_hdr(skb)->saddr, /* XXX */
767 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769 	if (oif)
770 		arg.bound_dev_if = oif;
771 	arg.tos = tos;
772 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
774 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 			      &arg, arg.iov[0].iov_len);
776 
777 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779 
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782 	struct inet_timewait_sock *tw = inet_twsk(sk);
783 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784 
785 	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787 			tcp_time_stamp + tcptw->tw_ts_offset,
788 			tcptw->tw_ts_recent,
789 			tw->tw_bound_dev_if,
790 			tcp_twsk_md5_key(tcptw),
791 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792 			tw->tw_tos
793 			);
794 
795 	inet_twsk_put(tw);
796 }
797 
798 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
799 				  struct request_sock *req)
800 {
801 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803 	 */
804 	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806 			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
807 			tcp_time_stamp,
808 			req->ts_recent,
809 			0,
810 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811 					  AF_INET),
812 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 			ip_hdr(skb)->tos);
814 }
815 
816 /*
817  *	Send a SYN-ACK after having received a SYN.
818  *	This still operates on a request_sock only, not on a big
819  *	socket.
820  */
821 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
822 			      struct flowi *fl,
823 			      struct request_sock *req,
824 			      struct tcp_fastopen_cookie *foc,
825 				  bool attach_req)
826 {
827 	const struct inet_request_sock *ireq = inet_rsk(req);
828 	struct flowi4 fl4;
829 	int err = -1;
830 	struct sk_buff *skb;
831 
832 	/* First, grab a route. */
833 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834 		return -1;
835 
836 	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
837 
838 	if (skb) {
839 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840 
841 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
842 					    ireq->ir_rmt_addr,
843 					    ireq->opt);
844 		err = net_xmit_eval(err);
845 	}
846 
847 	return err;
848 }
849 
850 /*
851  *	IPv4 request_sock destructor.
852  */
853 static void tcp_v4_reqsk_destructor(struct request_sock *req)
854 {
855 	kfree(inet_rsk(req)->opt);
856 }
857 
858 
859 #ifdef CONFIG_TCP_MD5SIG
860 /*
861  * RFC2385 MD5 checksumming requires a mapping of
862  * IP address->MD5 Key.
863  * We need to maintain these in the sk structure.
864  */
865 
866 /* Find the Key structure for an address.  */
867 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
868 					 const union tcp_md5_addr *addr,
869 					 int family)
870 {
871 	const struct tcp_sock *tp = tcp_sk(sk);
872 	struct tcp_md5sig_key *key;
873 	unsigned int size = sizeof(struct in_addr);
874 	const struct tcp_md5sig_info *md5sig;
875 
876 	/* caller either holds rcu_read_lock() or socket lock */
877 	md5sig = rcu_dereference_check(tp->md5sig_info,
878 				       sock_owned_by_user(sk) ||
879 				       lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
880 	if (!md5sig)
881 		return NULL;
882 #if IS_ENABLED(CONFIG_IPV6)
883 	if (family == AF_INET6)
884 		size = sizeof(struct in6_addr);
885 #endif
886 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
887 		if (key->family != family)
888 			continue;
889 		if (!memcmp(&key->addr, addr, size))
890 			return key;
891 	}
892 	return NULL;
893 }
894 EXPORT_SYMBOL(tcp_md5_do_lookup);
895 
896 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
897 					 const struct sock *addr_sk)
898 {
899 	const union tcp_md5_addr *addr;
900 
901 	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
902 	return tcp_md5_do_lookup(sk, addr, AF_INET);
903 }
904 EXPORT_SYMBOL(tcp_v4_md5_lookup);
905 
906 /* This can be called on a newly created socket, from other files */
907 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
908 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
909 {
910 	/* Add Key to the list */
911 	struct tcp_md5sig_key *key;
912 	struct tcp_sock *tp = tcp_sk(sk);
913 	struct tcp_md5sig_info *md5sig;
914 
915 	key = tcp_md5_do_lookup(sk, addr, family);
916 	if (key) {
917 		/* Pre-existing entry - just update that one. */
918 		memcpy(key->key, newkey, newkeylen);
919 		key->keylen = newkeylen;
920 		return 0;
921 	}
922 
923 	md5sig = rcu_dereference_protected(tp->md5sig_info,
924 					   sock_owned_by_user(sk));
925 	if (!md5sig) {
926 		md5sig = kmalloc(sizeof(*md5sig), gfp);
927 		if (!md5sig)
928 			return -ENOMEM;
929 
930 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
931 		INIT_HLIST_HEAD(&md5sig->head);
932 		rcu_assign_pointer(tp->md5sig_info, md5sig);
933 	}
934 
935 	key = sock_kmalloc(sk, sizeof(*key), gfp);
936 	if (!key)
937 		return -ENOMEM;
938 	if (!tcp_alloc_md5sig_pool()) {
939 		sock_kfree_s(sk, key, sizeof(*key));
940 		return -ENOMEM;
941 	}
942 
943 	memcpy(key->key, newkey, newkeylen);
944 	key->keylen = newkeylen;
945 	key->family = family;
946 	memcpy(&key->addr, addr,
947 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
948 				      sizeof(struct in_addr));
949 	hlist_add_head_rcu(&key->node, &md5sig->head);
950 	return 0;
951 }
952 EXPORT_SYMBOL(tcp_md5_do_add);
953 
954 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
955 {
956 	struct tcp_md5sig_key *key;
957 
958 	key = tcp_md5_do_lookup(sk, addr, family);
959 	if (!key)
960 		return -ENOENT;
961 	hlist_del_rcu(&key->node);
962 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
963 	kfree_rcu(key, rcu);
964 	return 0;
965 }
966 EXPORT_SYMBOL(tcp_md5_do_del);
967 
968 static void tcp_clear_md5_list(struct sock *sk)
969 {
970 	struct tcp_sock *tp = tcp_sk(sk);
971 	struct tcp_md5sig_key *key;
972 	struct hlist_node *n;
973 	struct tcp_md5sig_info *md5sig;
974 
975 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
976 
977 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
978 		hlist_del_rcu(&key->node);
979 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
980 		kfree_rcu(key, rcu);
981 	}
982 }
983 
984 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
985 				 int optlen)
986 {
987 	struct tcp_md5sig cmd;
988 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
989 
990 	if (optlen < sizeof(cmd))
991 		return -EINVAL;
992 
993 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
994 		return -EFAULT;
995 
996 	if (sin->sin_family != AF_INET)
997 		return -EINVAL;
998 
999 	if (!cmd.tcpm_keylen)
1000 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1001 				      AF_INET);
1002 
1003 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1004 		return -EINVAL;
1005 
1006 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1007 			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1008 			      GFP_KERNEL);
1009 }
1010 
1011 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1012 					__be32 daddr, __be32 saddr, int nbytes)
1013 {
1014 	struct tcp4_pseudohdr *bp;
1015 	struct scatterlist sg;
1016 
1017 	bp = &hp->md5_blk.ip4;
1018 
1019 	/*
1020 	 * 1. the TCP pseudo-header (in the order: source IP address,
1021 	 * destination IP address, zero-padded protocol number, and
1022 	 * segment length)
1023 	 */
1024 	bp->saddr = saddr;
1025 	bp->daddr = daddr;
1026 	bp->pad = 0;
1027 	bp->protocol = IPPROTO_TCP;
1028 	bp->len = cpu_to_be16(nbytes);
1029 
1030 	sg_init_one(&sg, bp, sizeof(*bp));
1031 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1032 }
1033 
1034 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1035 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1036 {
1037 	struct tcp_md5sig_pool *hp;
1038 	struct hash_desc *desc;
1039 
1040 	hp = tcp_get_md5sig_pool();
1041 	if (!hp)
1042 		goto clear_hash_noput;
1043 	desc = &hp->md5_desc;
1044 
1045 	if (crypto_hash_init(desc))
1046 		goto clear_hash;
1047 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1048 		goto clear_hash;
1049 	if (tcp_md5_hash_header(hp, th))
1050 		goto clear_hash;
1051 	if (tcp_md5_hash_key(hp, key))
1052 		goto clear_hash;
1053 	if (crypto_hash_final(desc, md5_hash))
1054 		goto clear_hash;
1055 
1056 	tcp_put_md5sig_pool();
1057 	return 0;
1058 
1059 clear_hash:
1060 	tcp_put_md5sig_pool();
1061 clear_hash_noput:
1062 	memset(md5_hash, 0, 16);
1063 	return 1;
1064 }
1065 
1066 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1067 			const struct sock *sk,
1068 			const struct sk_buff *skb)
1069 {
1070 	struct tcp_md5sig_pool *hp;
1071 	struct hash_desc *desc;
1072 	const struct tcphdr *th = tcp_hdr(skb);
1073 	__be32 saddr, daddr;
1074 
1075 	if (sk) { /* valid for establish/request sockets */
1076 		saddr = sk->sk_rcv_saddr;
1077 		daddr = sk->sk_daddr;
1078 	} else {
1079 		const struct iphdr *iph = ip_hdr(skb);
1080 		saddr = iph->saddr;
1081 		daddr = iph->daddr;
1082 	}
1083 
1084 	hp = tcp_get_md5sig_pool();
1085 	if (!hp)
1086 		goto clear_hash_noput;
1087 	desc = &hp->md5_desc;
1088 
1089 	if (crypto_hash_init(desc))
1090 		goto clear_hash;
1091 
1092 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1093 		goto clear_hash;
1094 	if (tcp_md5_hash_header(hp, th))
1095 		goto clear_hash;
1096 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1097 		goto clear_hash;
1098 	if (tcp_md5_hash_key(hp, key))
1099 		goto clear_hash;
1100 	if (crypto_hash_final(desc, md5_hash))
1101 		goto clear_hash;
1102 
1103 	tcp_put_md5sig_pool();
1104 	return 0;
1105 
1106 clear_hash:
1107 	tcp_put_md5sig_pool();
1108 clear_hash_noput:
1109 	memset(md5_hash, 0, 16);
1110 	return 1;
1111 }
1112 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1113 
1114 #endif
1115 
1116 /* Called with rcu_read_lock() */
1117 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1118 				    const struct sk_buff *skb)
1119 {
1120 #ifdef CONFIG_TCP_MD5SIG
1121 	/*
1122 	 * This gets called for each TCP segment that arrives
1123 	 * so we want to be efficient.
1124 	 * We have 3 drop cases:
1125 	 * o No MD5 hash and one expected.
1126 	 * o MD5 hash and we're not expecting one.
1127 	 * o MD5 hash and its wrong.
1128 	 */
1129 	const __u8 *hash_location = NULL;
1130 	struct tcp_md5sig_key *hash_expected;
1131 	const struct iphdr *iph = ip_hdr(skb);
1132 	const struct tcphdr *th = tcp_hdr(skb);
1133 	int genhash;
1134 	unsigned char newhash[16];
1135 
1136 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1137 					  AF_INET);
1138 	hash_location = tcp_parse_md5sig_option(th);
1139 
1140 	/* We've parsed the options - do we have a hash? */
1141 	if (!hash_expected && !hash_location)
1142 		return false;
1143 
1144 	if (hash_expected && !hash_location) {
1145 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1146 		return true;
1147 	}
1148 
1149 	if (!hash_expected && hash_location) {
1150 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1151 		return true;
1152 	}
1153 
1154 	/* Okay, so this is hash_expected and hash_location -
1155 	 * so we need to calculate the checksum.
1156 	 */
1157 	genhash = tcp_v4_md5_hash_skb(newhash,
1158 				      hash_expected,
1159 				      NULL, skb);
1160 
1161 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1162 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1163 				     &iph->saddr, ntohs(th->source),
1164 				     &iph->daddr, ntohs(th->dest),
1165 				     genhash ? " tcp_v4_calc_md5_hash failed"
1166 				     : "");
1167 		return true;
1168 	}
1169 	return false;
1170 #endif
1171 	return false;
1172 }
1173 
1174 static void tcp_v4_init_req(struct request_sock *req,
1175 			    const struct sock *sk_listener,
1176 			    struct sk_buff *skb)
1177 {
1178 	struct inet_request_sock *ireq = inet_rsk(req);
1179 
1180 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1181 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1182 	ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1183 	ireq->opt = tcp_v4_save_options(skb);
1184 }
1185 
1186 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1187 					  struct flowi *fl,
1188 					  const struct request_sock *req,
1189 					  bool *strict)
1190 {
1191 	struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1192 
1193 	if (strict) {
1194 		if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1195 			*strict = true;
1196 		else
1197 			*strict = false;
1198 	}
1199 
1200 	return dst;
1201 }
1202 
1203 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1204 	.family		=	PF_INET,
1205 	.obj_size	=	sizeof(struct tcp_request_sock),
1206 	.rtx_syn_ack	=	tcp_rtx_synack,
1207 	.send_ack	=	tcp_v4_reqsk_send_ack,
1208 	.destructor	=	tcp_v4_reqsk_destructor,
1209 	.send_reset	=	tcp_v4_send_reset,
1210 	.syn_ack_timeout =	tcp_syn_ack_timeout,
1211 };
1212 
1213 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1214 	.mss_clamp	=	TCP_MSS_DEFAULT,
1215 #ifdef CONFIG_TCP_MD5SIG
1216 	.req_md5_lookup	=	tcp_v4_md5_lookup,
1217 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1218 #endif
1219 	.init_req	=	tcp_v4_init_req,
1220 #ifdef CONFIG_SYN_COOKIES
1221 	.cookie_init_seq =	cookie_v4_init_sequence,
1222 #endif
1223 	.route_req	=	tcp_v4_route_req,
1224 	.init_seq	=	tcp_v4_init_sequence,
1225 	.send_synack	=	tcp_v4_send_synack,
1226 };
1227 
1228 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1229 {
1230 	/* Never answer to SYNs send to broadcast or multicast */
1231 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1232 		goto drop;
1233 
1234 	return tcp_conn_request(&tcp_request_sock_ops,
1235 				&tcp_request_sock_ipv4_ops, sk, skb);
1236 
1237 drop:
1238 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1239 	return 0;
1240 }
1241 EXPORT_SYMBOL(tcp_v4_conn_request);
1242 
1243 
1244 /*
1245  * The three way handshake has completed - we got a valid synack -
1246  * now create the new socket.
1247  */
1248 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1249 				  struct request_sock *req,
1250 				  struct dst_entry *dst,
1251 				  struct request_sock *req_unhash,
1252 				  bool *own_req)
1253 {
1254 	struct inet_request_sock *ireq;
1255 	struct inet_sock *newinet;
1256 	struct tcp_sock *newtp;
1257 	struct sock *newsk;
1258 #ifdef CONFIG_TCP_MD5SIG
1259 	struct tcp_md5sig_key *key;
1260 #endif
1261 	struct ip_options_rcu *inet_opt;
1262 
1263 	if (sk_acceptq_is_full(sk))
1264 		goto exit_overflow;
1265 
1266 	newsk = tcp_create_openreq_child(sk, req, skb);
1267 	if (!newsk)
1268 		goto exit_nonewsk;
1269 
1270 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1271 	inet_sk_rx_dst_set(newsk, skb);
1272 
1273 	newtp		      = tcp_sk(newsk);
1274 	newinet		      = inet_sk(newsk);
1275 	ireq		      = inet_rsk(req);
1276 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1277 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1278 	newinet->inet_saddr	      = ireq->ir_loc_addr;
1279 	inet_opt	      = ireq->opt;
1280 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1281 	ireq->opt	      = NULL;
1282 	newinet->mc_index     = inet_iif(skb);
1283 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1284 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1285 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1286 	if (inet_opt)
1287 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1288 	newinet->inet_id = newtp->write_seq ^ jiffies;
1289 
1290 	if (!dst) {
1291 		dst = inet_csk_route_child_sock(sk, newsk, req);
1292 		if (!dst)
1293 			goto put_and_exit;
1294 	} else {
1295 		/* syncookie case : see end of cookie_v4_check() */
1296 	}
1297 	sk_setup_caps(newsk, dst);
1298 
1299 	tcp_ca_openreq_child(newsk, dst);
1300 
1301 	tcp_sync_mss(newsk, dst_mtu(dst));
1302 	newtp->advmss = dst_metric_advmss(dst);
1303 	if (tcp_sk(sk)->rx_opt.user_mss &&
1304 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1305 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1306 
1307 	tcp_initialize_rcv_mss(newsk);
1308 
1309 #ifdef CONFIG_TCP_MD5SIG
1310 	/* Copy over the MD5 key from the original socket */
1311 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1312 				AF_INET);
1313 	if (key) {
1314 		/*
1315 		 * We're using one, so create a matching key
1316 		 * on the newsk structure. If we fail to get
1317 		 * memory, then we end up not copying the key
1318 		 * across. Shucks.
1319 		 */
1320 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1321 			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1322 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1323 	}
1324 #endif
1325 
1326 	if (__inet_inherit_port(sk, newsk) < 0)
1327 		goto put_and_exit;
1328 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1329 	if (*own_req)
1330 		tcp_move_syn(newtp, req);
1331 
1332 	return newsk;
1333 
1334 exit_overflow:
1335 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1336 exit_nonewsk:
1337 	dst_release(dst);
1338 exit:
1339 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1340 	return NULL;
1341 put_and_exit:
1342 	inet_csk_prepare_forced_close(newsk);
1343 	tcp_done(newsk);
1344 	goto exit;
1345 }
1346 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1347 
1348 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1349 {
1350 #ifdef CONFIG_SYN_COOKIES
1351 	const struct tcphdr *th = tcp_hdr(skb);
1352 
1353 	if (!th->syn)
1354 		sk = cookie_v4_check(sk, skb);
1355 #endif
1356 	return sk;
1357 }
1358 
1359 /* The socket must have it's spinlock held when we get
1360  * here, unless it is a TCP_LISTEN socket.
1361  *
1362  * We have a potential double-lock case here, so even when
1363  * doing backlog processing we use the BH locking scheme.
1364  * This is because we cannot sleep with the original spinlock
1365  * held.
1366  */
1367 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1368 {
1369 	struct sock *rsk;
1370 
1371 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1372 		struct dst_entry *dst = sk->sk_rx_dst;
1373 
1374 		sock_rps_save_rxhash(sk, skb);
1375 		sk_mark_napi_id(sk, skb);
1376 		if (dst) {
1377 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1378 			    !dst->ops->check(dst, 0)) {
1379 				dst_release(dst);
1380 				sk->sk_rx_dst = NULL;
1381 			}
1382 		}
1383 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1384 		return 0;
1385 	}
1386 
1387 	if (tcp_checksum_complete(skb))
1388 		goto csum_err;
1389 
1390 	if (sk->sk_state == TCP_LISTEN) {
1391 		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1392 
1393 		if (!nsk)
1394 			goto discard;
1395 		if (nsk != sk) {
1396 			sock_rps_save_rxhash(nsk, skb);
1397 			sk_mark_napi_id(nsk, skb);
1398 			if (tcp_child_process(sk, nsk, skb)) {
1399 				rsk = nsk;
1400 				goto reset;
1401 			}
1402 			return 0;
1403 		}
1404 	} else
1405 		sock_rps_save_rxhash(sk, skb);
1406 
1407 	if (tcp_rcv_state_process(sk, skb)) {
1408 		rsk = sk;
1409 		goto reset;
1410 	}
1411 	return 0;
1412 
1413 reset:
1414 	tcp_v4_send_reset(rsk, skb);
1415 discard:
1416 	kfree_skb(skb);
1417 	/* Be careful here. If this function gets more complicated and
1418 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1419 	 * might be destroyed here. This current version compiles correctly,
1420 	 * but you have been warned.
1421 	 */
1422 	return 0;
1423 
1424 csum_err:
1425 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1426 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1427 	goto discard;
1428 }
1429 EXPORT_SYMBOL(tcp_v4_do_rcv);
1430 
1431 void tcp_v4_early_demux(struct sk_buff *skb)
1432 {
1433 	const struct iphdr *iph;
1434 	const struct tcphdr *th;
1435 	struct sock *sk;
1436 
1437 	if (skb->pkt_type != PACKET_HOST)
1438 		return;
1439 
1440 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1441 		return;
1442 
1443 	iph = ip_hdr(skb);
1444 	th = tcp_hdr(skb);
1445 
1446 	if (th->doff < sizeof(struct tcphdr) / 4)
1447 		return;
1448 
1449 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1450 				       iph->saddr, th->source,
1451 				       iph->daddr, ntohs(th->dest),
1452 				       skb->skb_iif);
1453 	if (sk) {
1454 		skb->sk = sk;
1455 		skb->destructor = sock_edemux;
1456 		if (sk_fullsock(sk)) {
1457 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1458 
1459 			if (dst)
1460 				dst = dst_check(dst, 0);
1461 			if (dst &&
1462 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1463 				skb_dst_set_noref(skb, dst);
1464 		}
1465 	}
1466 }
1467 
1468 /* Packet is added to VJ-style prequeue for processing in process
1469  * context, if a reader task is waiting. Apparently, this exciting
1470  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1471  * failed somewhere. Latency? Burstiness? Well, at least now we will
1472  * see, why it failed. 8)8)				  --ANK
1473  *
1474  */
1475 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1476 {
1477 	struct tcp_sock *tp = tcp_sk(sk);
1478 
1479 	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1480 		return false;
1481 
1482 	if (skb->len <= tcp_hdrlen(skb) &&
1483 	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1484 		return false;
1485 
1486 	/* Before escaping RCU protected region, we need to take care of skb
1487 	 * dst. Prequeue is only enabled for established sockets.
1488 	 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1489 	 * Instead of doing full sk_rx_dst validity here, let's perform
1490 	 * an optimistic check.
1491 	 */
1492 	if (likely(sk->sk_rx_dst))
1493 		skb_dst_drop(skb);
1494 	else
1495 		skb_dst_force(skb);
1496 
1497 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1498 	tp->ucopy.memory += skb->truesize;
1499 	if (tp->ucopy.memory > sk->sk_rcvbuf) {
1500 		struct sk_buff *skb1;
1501 
1502 		BUG_ON(sock_owned_by_user(sk));
1503 
1504 		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1505 			sk_backlog_rcv(sk, skb1);
1506 			NET_INC_STATS_BH(sock_net(sk),
1507 					 LINUX_MIB_TCPPREQUEUEDROPPED);
1508 		}
1509 
1510 		tp->ucopy.memory = 0;
1511 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1512 		wake_up_interruptible_sync_poll(sk_sleep(sk),
1513 					   POLLIN | POLLRDNORM | POLLRDBAND);
1514 		if (!inet_csk_ack_scheduled(sk))
1515 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1516 						  (3 * tcp_rto_min(sk)) / 4,
1517 						  TCP_RTO_MAX);
1518 	}
1519 	return true;
1520 }
1521 EXPORT_SYMBOL(tcp_prequeue);
1522 
1523 /*
1524  *	From tcp_input.c
1525  */
1526 
1527 int tcp_v4_rcv(struct sk_buff *skb)
1528 {
1529 	const struct iphdr *iph;
1530 	const struct tcphdr *th;
1531 	struct sock *sk;
1532 	int ret;
1533 	struct net *net = dev_net(skb->dev);
1534 
1535 	if (skb->pkt_type != PACKET_HOST)
1536 		goto discard_it;
1537 
1538 	/* Count it even if it's bad */
1539 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1540 
1541 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1542 		goto discard_it;
1543 
1544 	th = tcp_hdr(skb);
1545 
1546 	if (th->doff < sizeof(struct tcphdr) / 4)
1547 		goto bad_packet;
1548 	if (!pskb_may_pull(skb, th->doff * 4))
1549 		goto discard_it;
1550 
1551 	/* An explanation is required here, I think.
1552 	 * Packet length and doff are validated by header prediction,
1553 	 * provided case of th->doff==0 is eliminated.
1554 	 * So, we defer the checks. */
1555 
1556 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1557 		goto csum_error;
1558 
1559 	th = tcp_hdr(skb);
1560 	iph = ip_hdr(skb);
1561 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1562 	 * barrier() makes sure compiler wont play fool^Waliasing games.
1563 	 */
1564 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1565 		sizeof(struct inet_skb_parm));
1566 	barrier();
1567 
1568 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1569 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1570 				    skb->len - th->doff * 4);
1571 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1572 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1573 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1574 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1575 	TCP_SKB_CB(skb)->sacked	 = 0;
1576 
1577 lookup:
1578 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1579 	if (!sk)
1580 		goto no_tcp_socket;
1581 
1582 process:
1583 	if (sk->sk_state == TCP_TIME_WAIT)
1584 		goto do_time_wait;
1585 
1586 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1587 		struct request_sock *req = inet_reqsk(sk);
1588 		struct sock *nsk = NULL;
1589 
1590 		sk = req->rsk_listener;
1591 		if (tcp_v4_inbound_md5_hash(sk, skb))
1592 			goto discard_and_relse;
1593 		if (likely(sk->sk_state == TCP_LISTEN)) {
1594 			nsk = tcp_check_req(sk, skb, req, false);
1595 		} else {
1596 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1597 			goto lookup;
1598 		}
1599 		if (!nsk) {
1600 			reqsk_put(req);
1601 			goto discard_it;
1602 		}
1603 		if (nsk == sk) {
1604 			sock_hold(sk);
1605 			reqsk_put(req);
1606 		} else if (tcp_child_process(sk, nsk, skb)) {
1607 			tcp_v4_send_reset(nsk, skb);
1608 			goto discard_it;
1609 		} else {
1610 			return 0;
1611 		}
1612 	}
1613 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1614 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1615 		goto discard_and_relse;
1616 	}
1617 
1618 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1619 		goto discard_and_relse;
1620 
1621 	if (tcp_v4_inbound_md5_hash(sk, skb))
1622 		goto discard_and_relse;
1623 
1624 	nf_reset(skb);
1625 
1626 	if (sk_filter(sk, skb))
1627 		goto discard_and_relse;
1628 
1629 	skb->dev = NULL;
1630 
1631 	if (sk->sk_state == TCP_LISTEN) {
1632 		ret = tcp_v4_do_rcv(sk, skb);
1633 		goto put_and_return;
1634 	}
1635 
1636 	sk_incoming_cpu_update(sk);
1637 
1638 	bh_lock_sock_nested(sk);
1639 	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1640 	ret = 0;
1641 	if (!sock_owned_by_user(sk)) {
1642 		if (!tcp_prequeue(sk, skb))
1643 			ret = tcp_v4_do_rcv(sk, skb);
1644 	} else if (unlikely(sk_add_backlog(sk, skb,
1645 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1646 		bh_unlock_sock(sk);
1647 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1648 		goto discard_and_relse;
1649 	}
1650 	bh_unlock_sock(sk);
1651 
1652 put_and_return:
1653 	sock_put(sk);
1654 
1655 	return ret;
1656 
1657 no_tcp_socket:
1658 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1659 		goto discard_it;
1660 
1661 	if (tcp_checksum_complete(skb)) {
1662 csum_error:
1663 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1664 bad_packet:
1665 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1666 	} else {
1667 		tcp_v4_send_reset(NULL, skb);
1668 	}
1669 
1670 discard_it:
1671 	/* Discard frame. */
1672 	kfree_skb(skb);
1673 	return 0;
1674 
1675 discard_and_relse:
1676 	sock_put(sk);
1677 	goto discard_it;
1678 
1679 do_time_wait:
1680 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1681 		inet_twsk_put(inet_twsk(sk));
1682 		goto discard_it;
1683 	}
1684 
1685 	if (tcp_checksum_complete(skb)) {
1686 		inet_twsk_put(inet_twsk(sk));
1687 		goto csum_error;
1688 	}
1689 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1690 	case TCP_TW_SYN: {
1691 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1692 							&tcp_hashinfo,
1693 							iph->saddr, th->source,
1694 							iph->daddr, th->dest,
1695 							inet_iif(skb));
1696 		if (sk2) {
1697 			inet_twsk_deschedule_put(inet_twsk(sk));
1698 			sk = sk2;
1699 			goto process;
1700 		}
1701 		/* Fall through to ACK */
1702 	}
1703 	case TCP_TW_ACK:
1704 		tcp_v4_timewait_ack(sk, skb);
1705 		break;
1706 	case TCP_TW_RST:
1707 		goto no_tcp_socket;
1708 	case TCP_TW_SUCCESS:;
1709 	}
1710 	goto discard_it;
1711 }
1712 
1713 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1714 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1715 	.twsk_unique	= tcp_twsk_unique,
1716 	.twsk_destructor= tcp_twsk_destructor,
1717 };
1718 
1719 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1720 {
1721 	struct dst_entry *dst = skb_dst(skb);
1722 
1723 	if (dst) {
1724 		dst_hold(dst);
1725 		sk->sk_rx_dst = dst;
1726 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1727 	}
1728 }
1729 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1730 
1731 const struct inet_connection_sock_af_ops ipv4_specific = {
1732 	.queue_xmit	   = ip_queue_xmit,
1733 	.send_check	   = tcp_v4_send_check,
1734 	.rebuild_header	   = inet_sk_rebuild_header,
1735 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1736 	.conn_request	   = tcp_v4_conn_request,
1737 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1738 	.net_header_len	   = sizeof(struct iphdr),
1739 	.setsockopt	   = ip_setsockopt,
1740 	.getsockopt	   = ip_getsockopt,
1741 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1742 	.sockaddr_len	   = sizeof(struct sockaddr_in),
1743 	.bind_conflict	   = inet_csk_bind_conflict,
1744 #ifdef CONFIG_COMPAT
1745 	.compat_setsockopt = compat_ip_setsockopt,
1746 	.compat_getsockopt = compat_ip_getsockopt,
1747 #endif
1748 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1749 };
1750 EXPORT_SYMBOL(ipv4_specific);
1751 
1752 #ifdef CONFIG_TCP_MD5SIG
1753 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1754 	.md5_lookup		= tcp_v4_md5_lookup,
1755 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1756 	.md5_parse		= tcp_v4_parse_md5_keys,
1757 };
1758 #endif
1759 
1760 /* NOTE: A lot of things set to zero explicitly by call to
1761  *       sk_alloc() so need not be done here.
1762  */
1763 static int tcp_v4_init_sock(struct sock *sk)
1764 {
1765 	struct inet_connection_sock *icsk = inet_csk(sk);
1766 
1767 	tcp_init_sock(sk);
1768 
1769 	icsk->icsk_af_ops = &ipv4_specific;
1770 
1771 #ifdef CONFIG_TCP_MD5SIG
1772 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1773 #endif
1774 
1775 	return 0;
1776 }
1777 
1778 void tcp_v4_destroy_sock(struct sock *sk)
1779 {
1780 	struct tcp_sock *tp = tcp_sk(sk);
1781 
1782 	tcp_clear_xmit_timers(sk);
1783 
1784 	tcp_cleanup_congestion_control(sk);
1785 
1786 	/* Cleanup up the write buffer. */
1787 	tcp_write_queue_purge(sk);
1788 
1789 	/* Cleans up our, hopefully empty, out_of_order_queue. */
1790 	__skb_queue_purge(&tp->out_of_order_queue);
1791 
1792 #ifdef CONFIG_TCP_MD5SIG
1793 	/* Clean up the MD5 key list, if any */
1794 	if (tp->md5sig_info) {
1795 		tcp_clear_md5_list(sk);
1796 		kfree_rcu(tp->md5sig_info, rcu);
1797 		tp->md5sig_info = NULL;
1798 	}
1799 #endif
1800 
1801 	/* Clean prequeue, it must be empty really */
1802 	__skb_queue_purge(&tp->ucopy.prequeue);
1803 
1804 	/* Clean up a referenced TCP bind bucket. */
1805 	if (inet_csk(sk)->icsk_bind_hash)
1806 		inet_put_port(sk);
1807 
1808 	BUG_ON(tp->fastopen_rsk);
1809 
1810 	/* If socket is aborted during connect operation */
1811 	tcp_free_fastopen_req(tp);
1812 	tcp_saved_syn_free(tp);
1813 
1814 	sk_sockets_allocated_dec(sk);
1815 	sock_release_memcg(sk);
1816 }
1817 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1818 
1819 #ifdef CONFIG_PROC_FS
1820 /* Proc filesystem TCP sock list dumping. */
1821 
1822 /*
1823  * Get next listener socket follow cur.  If cur is NULL, get first socket
1824  * starting from bucket given in st->bucket; when st->bucket is zero the
1825  * very first socket in the hash table is returned.
1826  */
1827 static void *listening_get_next(struct seq_file *seq, void *cur)
1828 {
1829 	struct inet_connection_sock *icsk;
1830 	struct hlist_nulls_node *node;
1831 	struct sock *sk = cur;
1832 	struct inet_listen_hashbucket *ilb;
1833 	struct tcp_iter_state *st = seq->private;
1834 	struct net *net = seq_file_net(seq);
1835 
1836 	if (!sk) {
1837 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1838 		spin_lock_bh(&ilb->lock);
1839 		sk = sk_nulls_head(&ilb->head);
1840 		st->offset = 0;
1841 		goto get_sk;
1842 	}
1843 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1844 	++st->num;
1845 	++st->offset;
1846 
1847 	sk = sk_nulls_next(sk);
1848 get_sk:
1849 	sk_nulls_for_each_from(sk, node) {
1850 		if (!net_eq(sock_net(sk), net))
1851 			continue;
1852 		if (sk->sk_family == st->family) {
1853 			cur = sk;
1854 			goto out;
1855 		}
1856 		icsk = inet_csk(sk);
1857 	}
1858 	spin_unlock_bh(&ilb->lock);
1859 	st->offset = 0;
1860 	if (++st->bucket < INET_LHTABLE_SIZE) {
1861 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1862 		spin_lock_bh(&ilb->lock);
1863 		sk = sk_nulls_head(&ilb->head);
1864 		goto get_sk;
1865 	}
1866 	cur = NULL;
1867 out:
1868 	return cur;
1869 }
1870 
1871 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1872 {
1873 	struct tcp_iter_state *st = seq->private;
1874 	void *rc;
1875 
1876 	st->bucket = 0;
1877 	st->offset = 0;
1878 	rc = listening_get_next(seq, NULL);
1879 
1880 	while (rc && *pos) {
1881 		rc = listening_get_next(seq, rc);
1882 		--*pos;
1883 	}
1884 	return rc;
1885 }
1886 
1887 static inline bool empty_bucket(const struct tcp_iter_state *st)
1888 {
1889 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1890 }
1891 
1892 /*
1893  * Get first established socket starting from bucket given in st->bucket.
1894  * If st->bucket is zero, the very first socket in the hash is returned.
1895  */
1896 static void *established_get_first(struct seq_file *seq)
1897 {
1898 	struct tcp_iter_state *st = seq->private;
1899 	struct net *net = seq_file_net(seq);
1900 	void *rc = NULL;
1901 
1902 	st->offset = 0;
1903 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1904 		struct sock *sk;
1905 		struct hlist_nulls_node *node;
1906 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1907 
1908 		/* Lockless fast path for the common case of empty buckets */
1909 		if (empty_bucket(st))
1910 			continue;
1911 
1912 		spin_lock_bh(lock);
1913 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1914 			if (sk->sk_family != st->family ||
1915 			    !net_eq(sock_net(sk), net)) {
1916 				continue;
1917 			}
1918 			rc = sk;
1919 			goto out;
1920 		}
1921 		spin_unlock_bh(lock);
1922 	}
1923 out:
1924 	return rc;
1925 }
1926 
1927 static void *established_get_next(struct seq_file *seq, void *cur)
1928 {
1929 	struct sock *sk = cur;
1930 	struct hlist_nulls_node *node;
1931 	struct tcp_iter_state *st = seq->private;
1932 	struct net *net = seq_file_net(seq);
1933 
1934 	++st->num;
1935 	++st->offset;
1936 
1937 	sk = sk_nulls_next(sk);
1938 
1939 	sk_nulls_for_each_from(sk, node) {
1940 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1941 			return sk;
1942 	}
1943 
1944 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1945 	++st->bucket;
1946 	return established_get_first(seq);
1947 }
1948 
1949 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1950 {
1951 	struct tcp_iter_state *st = seq->private;
1952 	void *rc;
1953 
1954 	st->bucket = 0;
1955 	rc = established_get_first(seq);
1956 
1957 	while (rc && pos) {
1958 		rc = established_get_next(seq, rc);
1959 		--pos;
1960 	}
1961 	return rc;
1962 }
1963 
1964 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1965 {
1966 	void *rc;
1967 	struct tcp_iter_state *st = seq->private;
1968 
1969 	st->state = TCP_SEQ_STATE_LISTENING;
1970 	rc	  = listening_get_idx(seq, &pos);
1971 
1972 	if (!rc) {
1973 		st->state = TCP_SEQ_STATE_ESTABLISHED;
1974 		rc	  = established_get_idx(seq, pos);
1975 	}
1976 
1977 	return rc;
1978 }
1979 
1980 static void *tcp_seek_last_pos(struct seq_file *seq)
1981 {
1982 	struct tcp_iter_state *st = seq->private;
1983 	int offset = st->offset;
1984 	int orig_num = st->num;
1985 	void *rc = NULL;
1986 
1987 	switch (st->state) {
1988 	case TCP_SEQ_STATE_LISTENING:
1989 		if (st->bucket >= INET_LHTABLE_SIZE)
1990 			break;
1991 		st->state = TCP_SEQ_STATE_LISTENING;
1992 		rc = listening_get_next(seq, NULL);
1993 		while (offset-- && rc)
1994 			rc = listening_get_next(seq, rc);
1995 		if (rc)
1996 			break;
1997 		st->bucket = 0;
1998 		st->state = TCP_SEQ_STATE_ESTABLISHED;
1999 		/* Fallthrough */
2000 	case TCP_SEQ_STATE_ESTABLISHED:
2001 		if (st->bucket > tcp_hashinfo.ehash_mask)
2002 			break;
2003 		rc = established_get_first(seq);
2004 		while (offset-- && rc)
2005 			rc = established_get_next(seq, rc);
2006 	}
2007 
2008 	st->num = orig_num;
2009 
2010 	return rc;
2011 }
2012 
2013 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2014 {
2015 	struct tcp_iter_state *st = seq->private;
2016 	void *rc;
2017 
2018 	if (*pos && *pos == st->last_pos) {
2019 		rc = tcp_seek_last_pos(seq);
2020 		if (rc)
2021 			goto out;
2022 	}
2023 
2024 	st->state = TCP_SEQ_STATE_LISTENING;
2025 	st->num = 0;
2026 	st->bucket = 0;
2027 	st->offset = 0;
2028 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2029 
2030 out:
2031 	st->last_pos = *pos;
2032 	return rc;
2033 }
2034 
2035 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2036 {
2037 	struct tcp_iter_state *st = seq->private;
2038 	void *rc = NULL;
2039 
2040 	if (v == SEQ_START_TOKEN) {
2041 		rc = tcp_get_idx(seq, 0);
2042 		goto out;
2043 	}
2044 
2045 	switch (st->state) {
2046 	case TCP_SEQ_STATE_LISTENING:
2047 		rc = listening_get_next(seq, v);
2048 		if (!rc) {
2049 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2050 			st->bucket = 0;
2051 			st->offset = 0;
2052 			rc	  = established_get_first(seq);
2053 		}
2054 		break;
2055 	case TCP_SEQ_STATE_ESTABLISHED:
2056 		rc = established_get_next(seq, v);
2057 		break;
2058 	}
2059 out:
2060 	++*pos;
2061 	st->last_pos = *pos;
2062 	return rc;
2063 }
2064 
2065 static void tcp_seq_stop(struct seq_file *seq, void *v)
2066 {
2067 	struct tcp_iter_state *st = seq->private;
2068 
2069 	switch (st->state) {
2070 	case TCP_SEQ_STATE_LISTENING:
2071 		if (v != SEQ_START_TOKEN)
2072 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2073 		break;
2074 	case TCP_SEQ_STATE_ESTABLISHED:
2075 		if (v)
2076 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2077 		break;
2078 	}
2079 }
2080 
2081 int tcp_seq_open(struct inode *inode, struct file *file)
2082 {
2083 	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2084 	struct tcp_iter_state *s;
2085 	int err;
2086 
2087 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2088 			  sizeof(struct tcp_iter_state));
2089 	if (err < 0)
2090 		return err;
2091 
2092 	s = ((struct seq_file *)file->private_data)->private;
2093 	s->family		= afinfo->family;
2094 	s->last_pos		= 0;
2095 	return 0;
2096 }
2097 EXPORT_SYMBOL(tcp_seq_open);
2098 
2099 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2100 {
2101 	int rc = 0;
2102 	struct proc_dir_entry *p;
2103 
2104 	afinfo->seq_ops.start		= tcp_seq_start;
2105 	afinfo->seq_ops.next		= tcp_seq_next;
2106 	afinfo->seq_ops.stop		= tcp_seq_stop;
2107 
2108 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2109 			     afinfo->seq_fops, afinfo);
2110 	if (!p)
2111 		rc = -ENOMEM;
2112 	return rc;
2113 }
2114 EXPORT_SYMBOL(tcp_proc_register);
2115 
2116 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2117 {
2118 	remove_proc_entry(afinfo->name, net->proc_net);
2119 }
2120 EXPORT_SYMBOL(tcp_proc_unregister);
2121 
2122 static void get_openreq4(const struct request_sock *req,
2123 			 struct seq_file *f, int i)
2124 {
2125 	const struct inet_request_sock *ireq = inet_rsk(req);
2126 	long delta = req->rsk_timer.expires - jiffies;
2127 
2128 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2129 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2130 		i,
2131 		ireq->ir_loc_addr,
2132 		ireq->ir_num,
2133 		ireq->ir_rmt_addr,
2134 		ntohs(ireq->ir_rmt_port),
2135 		TCP_SYN_RECV,
2136 		0, 0, /* could print option size, but that is af dependent. */
2137 		1,    /* timers active (only the expire timer) */
2138 		jiffies_delta_to_clock_t(delta),
2139 		req->num_timeout,
2140 		from_kuid_munged(seq_user_ns(f),
2141 				 sock_i_uid(req->rsk_listener)),
2142 		0,  /* non standard timer */
2143 		0, /* open_requests have no inode */
2144 		0,
2145 		req);
2146 }
2147 
2148 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2149 {
2150 	int timer_active;
2151 	unsigned long timer_expires;
2152 	const struct tcp_sock *tp = tcp_sk(sk);
2153 	const struct inet_connection_sock *icsk = inet_csk(sk);
2154 	const struct inet_sock *inet = inet_sk(sk);
2155 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2156 	__be32 dest = inet->inet_daddr;
2157 	__be32 src = inet->inet_rcv_saddr;
2158 	__u16 destp = ntohs(inet->inet_dport);
2159 	__u16 srcp = ntohs(inet->inet_sport);
2160 	int rx_queue;
2161 	int state;
2162 
2163 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2164 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2165 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2166 		timer_active	= 1;
2167 		timer_expires	= icsk->icsk_timeout;
2168 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2169 		timer_active	= 4;
2170 		timer_expires	= icsk->icsk_timeout;
2171 	} else if (timer_pending(&sk->sk_timer)) {
2172 		timer_active	= 2;
2173 		timer_expires	= sk->sk_timer.expires;
2174 	} else {
2175 		timer_active	= 0;
2176 		timer_expires = jiffies;
2177 	}
2178 
2179 	state = sk_state_load(sk);
2180 	if (state == TCP_LISTEN)
2181 		rx_queue = sk->sk_ack_backlog;
2182 	else
2183 		/* Because we don't lock the socket,
2184 		 * we might find a transient negative value.
2185 		 */
2186 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2187 
2188 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2189 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2190 		i, src, srcp, dest, destp, state,
2191 		tp->write_seq - tp->snd_una,
2192 		rx_queue,
2193 		timer_active,
2194 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2195 		icsk->icsk_retransmits,
2196 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2197 		icsk->icsk_probes_out,
2198 		sock_i_ino(sk),
2199 		atomic_read(&sk->sk_refcnt), sk,
2200 		jiffies_to_clock_t(icsk->icsk_rto),
2201 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2202 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2203 		tp->snd_cwnd,
2204 		state == TCP_LISTEN ?
2205 		    fastopenq->max_qlen :
2206 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2207 }
2208 
2209 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2210 			       struct seq_file *f, int i)
2211 {
2212 	long delta = tw->tw_timer.expires - jiffies;
2213 	__be32 dest, src;
2214 	__u16 destp, srcp;
2215 
2216 	dest  = tw->tw_daddr;
2217 	src   = tw->tw_rcv_saddr;
2218 	destp = ntohs(tw->tw_dport);
2219 	srcp  = ntohs(tw->tw_sport);
2220 
2221 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2222 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2223 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2224 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2225 		atomic_read(&tw->tw_refcnt), tw);
2226 }
2227 
2228 #define TMPSZ 150
2229 
2230 static int tcp4_seq_show(struct seq_file *seq, void *v)
2231 {
2232 	struct tcp_iter_state *st;
2233 	struct sock *sk = v;
2234 
2235 	seq_setwidth(seq, TMPSZ - 1);
2236 	if (v == SEQ_START_TOKEN) {
2237 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2238 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2239 			   "inode");
2240 		goto out;
2241 	}
2242 	st = seq->private;
2243 
2244 	if (sk->sk_state == TCP_TIME_WAIT)
2245 		get_timewait4_sock(v, seq, st->num);
2246 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2247 		get_openreq4(v, seq, st->num);
2248 	else
2249 		get_tcp4_sock(v, seq, st->num);
2250 out:
2251 	seq_pad(seq, '\n');
2252 	return 0;
2253 }
2254 
2255 static const struct file_operations tcp_afinfo_seq_fops = {
2256 	.owner   = THIS_MODULE,
2257 	.open    = tcp_seq_open,
2258 	.read    = seq_read,
2259 	.llseek  = seq_lseek,
2260 	.release = seq_release_net
2261 };
2262 
2263 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2264 	.name		= "tcp",
2265 	.family		= AF_INET,
2266 	.seq_fops	= &tcp_afinfo_seq_fops,
2267 	.seq_ops	= {
2268 		.show		= tcp4_seq_show,
2269 	},
2270 };
2271 
2272 static int __net_init tcp4_proc_init_net(struct net *net)
2273 {
2274 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2275 }
2276 
2277 static void __net_exit tcp4_proc_exit_net(struct net *net)
2278 {
2279 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2280 }
2281 
2282 static struct pernet_operations tcp4_net_ops = {
2283 	.init = tcp4_proc_init_net,
2284 	.exit = tcp4_proc_exit_net,
2285 };
2286 
2287 int __init tcp4_proc_init(void)
2288 {
2289 	return register_pernet_subsys(&tcp4_net_ops);
2290 }
2291 
2292 void tcp4_proc_exit(void)
2293 {
2294 	unregister_pernet_subsys(&tcp4_net_ops);
2295 }
2296 #endif /* CONFIG_PROC_FS */
2297 
2298 struct proto tcp_prot = {
2299 	.name			= "TCP",
2300 	.owner			= THIS_MODULE,
2301 	.close			= tcp_close,
2302 	.connect		= tcp_v4_connect,
2303 	.disconnect		= tcp_disconnect,
2304 	.accept			= inet_csk_accept,
2305 	.ioctl			= tcp_ioctl,
2306 	.init			= tcp_v4_init_sock,
2307 	.destroy		= tcp_v4_destroy_sock,
2308 	.shutdown		= tcp_shutdown,
2309 	.setsockopt		= tcp_setsockopt,
2310 	.getsockopt		= tcp_getsockopt,
2311 	.recvmsg		= tcp_recvmsg,
2312 	.sendmsg		= tcp_sendmsg,
2313 	.sendpage		= tcp_sendpage,
2314 	.backlog_rcv		= tcp_v4_do_rcv,
2315 	.release_cb		= tcp_release_cb,
2316 	.hash			= inet_hash,
2317 	.unhash			= inet_unhash,
2318 	.get_port		= inet_csk_get_port,
2319 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2320 	.stream_memory_free	= tcp_stream_memory_free,
2321 	.sockets_allocated	= &tcp_sockets_allocated,
2322 	.orphan_count		= &tcp_orphan_count,
2323 	.memory_allocated	= &tcp_memory_allocated,
2324 	.memory_pressure	= &tcp_memory_pressure,
2325 	.sysctl_mem		= sysctl_tcp_mem,
2326 	.sysctl_wmem		= sysctl_tcp_wmem,
2327 	.sysctl_rmem		= sysctl_tcp_rmem,
2328 	.max_header		= MAX_TCP_HEADER,
2329 	.obj_size		= sizeof(struct tcp_sock),
2330 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2331 	.twsk_prot		= &tcp_timewait_sock_ops,
2332 	.rsk_prot		= &tcp_request_sock_ops,
2333 	.h.hashinfo		= &tcp_hashinfo,
2334 	.no_autobind		= true,
2335 #ifdef CONFIG_COMPAT
2336 	.compat_setsockopt	= compat_tcp_setsockopt,
2337 	.compat_getsockopt	= compat_tcp_getsockopt,
2338 #endif
2339 #ifdef CONFIG_MEMCG_KMEM
2340 	.init_cgroup		= tcp_init_cgroup,
2341 	.destroy_cgroup		= tcp_destroy_cgroup,
2342 	.proto_cgroup		= tcp_proto_cgroup,
2343 #endif
2344 };
2345 EXPORT_SYMBOL(tcp_prot);
2346 
2347 static void __net_exit tcp_sk_exit(struct net *net)
2348 {
2349 	int cpu;
2350 
2351 	for_each_possible_cpu(cpu)
2352 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2353 	free_percpu(net->ipv4.tcp_sk);
2354 }
2355 
2356 static int __net_init tcp_sk_init(struct net *net)
2357 {
2358 	int res, cpu;
2359 
2360 	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2361 	if (!net->ipv4.tcp_sk)
2362 		return -ENOMEM;
2363 
2364 	for_each_possible_cpu(cpu) {
2365 		struct sock *sk;
2366 
2367 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2368 					   IPPROTO_TCP, net);
2369 		if (res)
2370 			goto fail;
2371 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2372 	}
2373 
2374 	net->ipv4.sysctl_tcp_ecn = 2;
2375 	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2376 
2377 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2378 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2379 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2380 
2381 	return 0;
2382 fail:
2383 	tcp_sk_exit(net);
2384 
2385 	return res;
2386 }
2387 
2388 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2389 {
2390 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2391 }
2392 
2393 static struct pernet_operations __net_initdata tcp_sk_ops = {
2394        .init	   = tcp_sk_init,
2395        .exit	   = tcp_sk_exit,
2396        .exit_batch = tcp_sk_exit_batch,
2397 };
2398 
2399 void __init tcp_v4_init(void)
2400 {
2401 	inet_hashinfo_init(&tcp_hashinfo);
2402 	if (register_pernet_subsys(&tcp_sk_ops))
2403 		panic("Failed to create the TCP control socket.\n");
2404 }
2405