xref: /openbmc/linux/net/ipv4/tcp_ipv4.c (revision b0f85fa11aefc4f3e03306b4cd47f113bd57dcba)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78 
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87 
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96 
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99 
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 					  ip_hdr(skb)->saddr,
104 					  tcp_hdr(skb)->dest,
105 					  tcp_hdr(skb)->source);
106 }
107 
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 	struct tcp_sock *tp = tcp_sk(sk);
112 
113 	/* With PAWS, it is safe from the viewpoint
114 	   of data integrity. Even without PAWS it is safe provided sequence
115 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116 
117 	   Actually, the idea is close to VJ's one, only timestamp cache is
118 	   held not per host, but per port pair and TW bucket is used as state
119 	   holder.
120 
121 	   If TW bucket has been already destroyed we fall back to VJ's scheme
122 	   and use initial timestamp retrieved from peer table.
123 	 */
124 	if (tcptw->tw_ts_recent_stamp &&
125 	    (!twp || (sysctl_tcp_tw_reuse &&
126 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 		if (tp->write_seq == 0)
129 			tp->write_seq = 1;
130 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
131 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 		sock_hold(sktw);
133 		return 1;
134 	}
135 
136 	return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139 
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 	struct inet_sock *inet = inet_sk(sk);
145 	struct tcp_sock *tp = tcp_sk(sk);
146 	__be16 orig_sport, orig_dport;
147 	__be32 daddr, nexthop;
148 	struct flowi4 *fl4;
149 	struct rtable *rt;
150 	int err;
151 	struct ip_options_rcu *inet_opt;
152 
153 	if (addr_len < sizeof(struct sockaddr_in))
154 		return -EINVAL;
155 
156 	if (usin->sin_family != AF_INET)
157 		return -EAFNOSUPPORT;
158 
159 	nexthop = daddr = usin->sin_addr.s_addr;
160 	inet_opt = rcu_dereference_protected(inet->inet_opt,
161 					     sock_owned_by_user(sk));
162 	if (inet_opt && inet_opt->opt.srr) {
163 		if (!daddr)
164 			return -EINVAL;
165 		nexthop = inet_opt->opt.faddr;
166 	}
167 
168 	orig_sport = inet->inet_sport;
169 	orig_dport = usin->sin_port;
170 	fl4 = &inet->cork.fl.u.ip4;
171 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 			      IPPROTO_TCP,
174 			      orig_sport, orig_dport, sk);
175 	if (IS_ERR(rt)) {
176 		err = PTR_ERR(rt);
177 		if (err == -ENETUNREACH)
178 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 		return err;
180 	}
181 
182 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 		ip_rt_put(rt);
184 		return -ENETUNREACH;
185 	}
186 
187 	if (!inet_opt || !inet_opt->opt.srr)
188 		daddr = fl4->daddr;
189 
190 	if (!inet->inet_saddr)
191 		inet->inet_saddr = fl4->saddr;
192 	sk_rcv_saddr_set(sk, inet->inet_saddr);
193 
194 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 		/* Reset inherited state */
196 		tp->rx_opt.ts_recent	   = 0;
197 		tp->rx_opt.ts_recent_stamp = 0;
198 		if (likely(!tp->repair))
199 			tp->write_seq	   = 0;
200 	}
201 
202 	if (tcp_death_row.sysctl_tw_recycle &&
203 	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 		tcp_fetch_timewait_stamp(sk, &rt->dst);
205 
206 	inet->inet_dport = usin->sin_port;
207 	sk_daddr_set(sk, daddr);
208 
209 	inet_csk(sk)->icsk_ext_hdr_len = 0;
210 	if (inet_opt)
211 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212 
213 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214 
215 	/* Socket identity is still unknown (sport may be zero).
216 	 * However we set state to SYN-SENT and not releasing socket
217 	 * lock select source port, enter ourselves into the hash tables and
218 	 * complete initialization after this.
219 	 */
220 	tcp_set_state(sk, TCP_SYN_SENT);
221 	err = inet_hash_connect(&tcp_death_row, sk);
222 	if (err)
223 		goto failure;
224 
225 	sk_set_txhash(sk);
226 
227 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 			       inet->inet_sport, inet->inet_dport, sk);
229 	if (IS_ERR(rt)) {
230 		err = PTR_ERR(rt);
231 		rt = NULL;
232 		goto failure;
233 	}
234 	/* OK, now commit destination to socket.  */
235 	sk->sk_gso_type = SKB_GSO_TCPV4;
236 	sk_setup_caps(sk, &rt->dst);
237 
238 	if (!tp->write_seq && likely(!tp->repair))
239 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 							   inet->inet_daddr,
241 							   inet->inet_sport,
242 							   usin->sin_port);
243 
244 	inet->inet_id = tp->write_seq ^ jiffies;
245 
246 	err = tcp_connect(sk);
247 
248 	rt = NULL;
249 	if (err)
250 		goto failure;
251 
252 	return 0;
253 
254 failure:
255 	/*
256 	 * This unhashes the socket and releases the local port,
257 	 * if necessary.
258 	 */
259 	tcp_set_state(sk, TCP_CLOSE);
260 	ip_rt_put(rt);
261 	sk->sk_route_caps = 0;
262 	inet->inet_dport = 0;
263 	return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266 
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274 	struct dst_entry *dst;
275 	struct inet_sock *inet = inet_sk(sk);
276 	u32 mtu = tcp_sk(sk)->mtu_info;
277 
278 	dst = inet_csk_update_pmtu(sk, mtu);
279 	if (!dst)
280 		return;
281 
282 	/* Something is about to be wrong... Remember soft error
283 	 * for the case, if this connection will not able to recover.
284 	 */
285 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 		sk->sk_err_soft = EMSGSIZE;
287 
288 	mtu = dst_mtu(dst);
289 
290 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 	    ip_sk_accept_pmtu(sk) &&
292 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 		tcp_sync_mss(sk, mtu);
294 
295 		/* Resend the TCP packet because it's
296 		 * clear that the old packet has been
297 		 * dropped. This is the new "fast" path mtu
298 		 * discovery.
299 		 */
300 		tcp_simple_retransmit(sk);
301 	} /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304 
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307 	struct dst_entry *dst = __sk_dst_check(sk, 0);
308 
309 	if (dst)
310 		dst->ops->redirect(dst, sk, skb);
311 }
312 
313 
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317 	struct request_sock *req = inet_reqsk(sk);
318 	struct net *net = sock_net(sk);
319 
320 	/* ICMPs are not backlogged, hence we cannot get
321 	 * an established socket here.
322 	 */
323 	WARN_ON(req->sk);
324 
325 	if (seq != tcp_rsk(req)->snt_isn) {
326 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327 	} else {
328 		/*
329 		 * Still in SYN_RECV, just remove it silently.
330 		 * There is no good way to pass the error to the newly
331 		 * created socket, and POSIX does not want network
332 		 * errors returned from accept().
333 		 */
334 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
335 		NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336 	}
337 	reqsk_put(req);
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340 
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356 
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 	struct inet_connection_sock *icsk;
362 	struct tcp_sock *tp;
363 	struct inet_sock *inet;
364 	const int type = icmp_hdr(icmp_skb)->type;
365 	const int code = icmp_hdr(icmp_skb)->code;
366 	struct sock *sk;
367 	struct sk_buff *skb;
368 	struct request_sock *fastopen;
369 	__u32 seq, snd_una;
370 	__u32 remaining;
371 	int err;
372 	struct net *net = dev_net(icmp_skb->dev);
373 
374 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 				       th->dest, iph->saddr, ntohs(th->source),
376 				       inet_iif(icmp_skb));
377 	if (!sk) {
378 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379 		return;
380 	}
381 	if (sk->sk_state == TCP_TIME_WAIT) {
382 		inet_twsk_put(inet_twsk(sk));
383 		return;
384 	}
385 	seq = ntohl(th->seq);
386 	if (sk->sk_state == TCP_NEW_SYN_RECV)
387 		return tcp_req_err(sk, seq);
388 
389 	bh_lock_sock(sk);
390 	/* If too many ICMPs get dropped on busy
391 	 * servers this needs to be solved differently.
392 	 * We do take care of PMTU discovery (RFC1191) special case :
393 	 * we can receive locally generated ICMP messages while socket is held.
394 	 */
395 	if (sock_owned_by_user(sk)) {
396 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 	}
399 	if (sk->sk_state == TCP_CLOSE)
400 		goto out;
401 
402 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404 		goto out;
405 	}
406 
407 	icsk = inet_csk(sk);
408 	tp = tcp_sk(sk);
409 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 	fastopen = tp->fastopen_rsk;
411 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 	if (sk->sk_state != TCP_LISTEN &&
413 	    !between(seq, snd_una, tp->snd_nxt)) {
414 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415 		goto out;
416 	}
417 
418 	switch (type) {
419 	case ICMP_REDIRECT:
420 		do_redirect(icmp_skb, sk);
421 		goto out;
422 	case ICMP_SOURCE_QUENCH:
423 		/* Just silently ignore these. */
424 		goto out;
425 	case ICMP_PARAMETERPROB:
426 		err = EPROTO;
427 		break;
428 	case ICMP_DEST_UNREACH:
429 		if (code > NR_ICMP_UNREACH)
430 			goto out;
431 
432 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 			/* We are not interested in TCP_LISTEN and open_requests
434 			 * (SYN-ACKs send out by Linux are always <576bytes so
435 			 * they should go through unfragmented).
436 			 */
437 			if (sk->sk_state == TCP_LISTEN)
438 				goto out;
439 
440 			tp->mtu_info = info;
441 			if (!sock_owned_by_user(sk)) {
442 				tcp_v4_mtu_reduced(sk);
443 			} else {
444 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445 					sock_hold(sk);
446 			}
447 			goto out;
448 		}
449 
450 		err = icmp_err_convert[code].errno;
451 		/* check if icmp_skb allows revert of backoff
452 		 * (see draft-zimmermann-tcp-lcd) */
453 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 			break;
455 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456 		    !icsk->icsk_backoff || fastopen)
457 			break;
458 
459 		if (sock_owned_by_user(sk))
460 			break;
461 
462 		icsk->icsk_backoff--;
463 		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 					       TCP_TIMEOUT_INIT;
465 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466 
467 		skb = tcp_write_queue_head(sk);
468 		BUG_ON(!skb);
469 
470 		remaining = icsk->icsk_rto -
471 			    min(icsk->icsk_rto,
472 				tcp_time_stamp - tcp_skb_timestamp(skb));
473 
474 		if (remaining) {
475 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 						  remaining, TCP_RTO_MAX);
477 		} else {
478 			/* RTO revert clocked out retransmission.
479 			 * Will retransmit now */
480 			tcp_retransmit_timer(sk);
481 		}
482 
483 		break;
484 	case ICMP_TIME_EXCEEDED:
485 		err = EHOSTUNREACH;
486 		break;
487 	default:
488 		goto out;
489 	}
490 
491 	switch (sk->sk_state) {
492 	case TCP_SYN_SENT:
493 	case TCP_SYN_RECV:
494 		/* Only in fast or simultaneous open. If a fast open socket is
495 		 * is already accepted it is treated as a connected one below.
496 		 */
497 		if (fastopen && !fastopen->sk)
498 			break;
499 
500 		if (!sock_owned_by_user(sk)) {
501 			sk->sk_err = err;
502 
503 			sk->sk_error_report(sk);
504 
505 			tcp_done(sk);
506 		} else {
507 			sk->sk_err_soft = err;
508 		}
509 		goto out;
510 	}
511 
512 	/* If we've already connected we will keep trying
513 	 * until we time out, or the user gives up.
514 	 *
515 	 * rfc1122 4.2.3.9 allows to consider as hard errors
516 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 	 * but it is obsoleted by pmtu discovery).
518 	 *
519 	 * Note, that in modern internet, where routing is unreliable
520 	 * and in each dark corner broken firewalls sit, sending random
521 	 * errors ordered by their masters even this two messages finally lose
522 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 	 *
524 	 * Now we are in compliance with RFCs.
525 	 *							--ANK (980905)
526 	 */
527 
528 	inet = inet_sk(sk);
529 	if (!sock_owned_by_user(sk) && inet->recverr) {
530 		sk->sk_err = err;
531 		sk->sk_error_report(sk);
532 	} else	{ /* Only an error on timeout */
533 		sk->sk_err_soft = err;
534 	}
535 
536 out:
537 	bh_unlock_sock(sk);
538 	sock_put(sk);
539 }
540 
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543 	struct tcphdr *th = tcp_hdr(skb);
544 
545 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 		skb->csum_start = skb_transport_header(skb) - skb->head;
548 		skb->csum_offset = offsetof(struct tcphdr, check);
549 	} else {
550 		th->check = tcp_v4_check(skb->len, saddr, daddr,
551 					 csum_partial(th,
552 						      th->doff << 2,
553 						      skb->csum));
554 	}
555 }
556 
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560 	const struct inet_sock *inet = inet_sk(sk);
561 
562 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565 
566 /*
567  *	This routine will send an RST to the other tcp.
568  *
569  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *		      for reset.
571  *	Answer: if a packet caused RST, it is not for a socket
572  *		existing in our system, if it is matched to a socket,
573  *		it is just duplicate segment or bug in other side's TCP.
574  *		So that we build reply only basing on parameters
575  *		arrived with segment.
576  *	Exception: precedence violation. We do not implement it in any case.
577  */
578 
579 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
580 {
581 	const struct tcphdr *th = tcp_hdr(skb);
582 	struct {
583 		struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587 	} rep;
588 	struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 	struct tcp_md5sig_key *key;
591 	const __u8 *hash_location = NULL;
592 	unsigned char newhash[16];
593 	int genhash;
594 	struct sock *sk1 = NULL;
595 #endif
596 	struct net *net;
597 
598 	/* Never send a reset in response to a reset. */
599 	if (th->rst)
600 		return;
601 
602 	/* If sk not NULL, it means we did a successful lookup and incoming
603 	 * route had to be correct. prequeue might have dropped our dst.
604 	 */
605 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606 		return;
607 
608 	/* Swap the send and the receive. */
609 	memset(&rep, 0, sizeof(rep));
610 	rep.th.dest   = th->source;
611 	rep.th.source = th->dest;
612 	rep.th.doff   = sizeof(struct tcphdr) / 4;
613 	rep.th.rst    = 1;
614 
615 	if (th->ack) {
616 		rep.th.seq = th->ack_seq;
617 	} else {
618 		rep.th.ack = 1;
619 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 				       skb->len - (th->doff << 2));
621 	}
622 
623 	memset(&arg, 0, sizeof(arg));
624 	arg.iov[0].iov_base = (unsigned char *)&rep;
625 	arg.iov[0].iov_len  = sizeof(rep.th);
626 
627 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 	hash_location = tcp_parse_md5sig_option(th);
630 	if (!sk && hash_location) {
631 		/*
632 		 * active side is lost. Try to find listening socket through
633 		 * source port, and then find md5 key through listening socket.
634 		 * we are not loose security here:
635 		 * Incoming packet is checked with md5 hash with finding key,
636 		 * no RST generated if md5 hash doesn't match.
637 		 */
638 		sk1 = __inet_lookup_listener(net,
639 					     &tcp_hashinfo, ip_hdr(skb)->saddr,
640 					     th->source, ip_hdr(skb)->daddr,
641 					     ntohs(th->source), inet_iif(skb));
642 		/* don't send rst if it can't find key */
643 		if (!sk1)
644 			return;
645 		rcu_read_lock();
646 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 					&ip_hdr(skb)->saddr, AF_INET);
648 		if (!key)
649 			goto release_sk1;
650 
651 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 			goto release_sk1;
654 	} else {
655 		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 					     &ip_hdr(skb)->saddr,
657 					     AF_INET) : NULL;
658 	}
659 
660 	if (key) {
661 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 				   (TCPOPT_NOP << 16) |
663 				   (TCPOPT_MD5SIG << 8) |
664 				   TCPOLEN_MD5SIG);
665 		/* Update length and the length the header thinks exists */
666 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 		rep.th.doff = arg.iov[0].iov_len / 4;
668 
669 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 				     key, ip_hdr(skb)->saddr,
671 				     ip_hdr(skb)->daddr, &rep.th);
672 	}
673 #endif
674 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 				      ip_hdr(skb)->saddr, /* XXX */
676 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 	/* When socket is gone, all binding information is lost.
680 	 * routing might fail in this case. No choice here, if we choose to force
681 	 * input interface, we will misroute in case of asymmetric route.
682 	 */
683 	if (sk)
684 		arg.bound_dev_if = sk->sk_bound_dev_if;
685 
686 	arg.tos = ip_hdr(skb)->tos;
687 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
689 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 			      &arg, arg.iov[0].iov_len);
691 
692 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694 
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697 	if (sk1) {
698 		rcu_read_unlock();
699 		sock_put(sk1);
700 	}
701 #endif
702 }
703 
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707 
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709 			    u32 win, u32 tsval, u32 tsecr, int oif,
710 			    struct tcp_md5sig_key *key,
711 			    int reply_flags, u8 tos)
712 {
713 	const struct tcphdr *th = tcp_hdr(skb);
714 	struct {
715 		struct tcphdr th;
716 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720 			];
721 	} rep;
722 	struct ip_reply_arg arg;
723 	struct net *net = dev_net(skb_dst(skb)->dev);
724 
725 	memset(&rep.th, 0, sizeof(struct tcphdr));
726 	memset(&arg, 0, sizeof(arg));
727 
728 	arg.iov[0].iov_base = (unsigned char *)&rep;
729 	arg.iov[0].iov_len  = sizeof(rep.th);
730 	if (tsecr) {
731 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 				   (TCPOPT_TIMESTAMP << 8) |
733 				   TCPOLEN_TIMESTAMP);
734 		rep.opt[1] = htonl(tsval);
735 		rep.opt[2] = htonl(tsecr);
736 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737 	}
738 
739 	/* Swap the send and the receive. */
740 	rep.th.dest    = th->source;
741 	rep.th.source  = th->dest;
742 	rep.th.doff    = arg.iov[0].iov_len / 4;
743 	rep.th.seq     = htonl(seq);
744 	rep.th.ack_seq = htonl(ack);
745 	rep.th.ack     = 1;
746 	rep.th.window  = htons(win);
747 
748 #ifdef CONFIG_TCP_MD5SIG
749 	if (key) {
750 		int offset = (tsecr) ? 3 : 0;
751 
752 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 					  (TCPOPT_NOP << 16) |
754 					  (TCPOPT_MD5SIG << 8) |
755 					  TCPOLEN_MD5SIG);
756 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 		rep.th.doff = arg.iov[0].iov_len/4;
758 
759 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760 				    key, ip_hdr(skb)->saddr,
761 				    ip_hdr(skb)->daddr, &rep.th);
762 	}
763 #endif
764 	arg.flags = reply_flags;
765 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 				      ip_hdr(skb)->saddr, /* XXX */
767 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769 	if (oif)
770 		arg.bound_dev_if = oif;
771 	arg.tos = tos;
772 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
774 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 			      &arg, arg.iov[0].iov_len);
776 
777 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779 
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782 	struct inet_timewait_sock *tw = inet_twsk(sk);
783 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784 
785 	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787 			tcp_time_stamp + tcptw->tw_ts_offset,
788 			tcptw->tw_ts_recent,
789 			tw->tw_bound_dev_if,
790 			tcp_twsk_md5_key(tcptw),
791 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792 			tw->tw_tos
793 			);
794 
795 	inet_twsk_put(tw);
796 }
797 
798 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
799 				  struct request_sock *req)
800 {
801 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803 	 */
804 	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806 			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
807 			tcp_time_stamp,
808 			req->ts_recent,
809 			0,
810 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811 					  AF_INET),
812 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 			ip_hdr(skb)->tos);
814 }
815 
816 /*
817  *	Send a SYN-ACK after having received a SYN.
818  *	This still operates on a request_sock only, not on a big
819  *	socket.
820  */
821 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
822 			      struct flowi *fl,
823 			      struct request_sock *req,
824 			      struct tcp_fastopen_cookie *foc,
825 				  bool attach_req)
826 {
827 	const struct inet_request_sock *ireq = inet_rsk(req);
828 	struct flowi4 fl4;
829 	int err = -1;
830 	struct sk_buff *skb;
831 
832 	/* First, grab a route. */
833 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834 		return -1;
835 
836 	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
837 
838 	if (skb) {
839 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840 
841 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
842 					    ireq->ir_rmt_addr,
843 					    ireq->opt);
844 		err = net_xmit_eval(err);
845 	}
846 
847 	return err;
848 }
849 
850 /*
851  *	IPv4 request_sock destructor.
852  */
853 static void tcp_v4_reqsk_destructor(struct request_sock *req)
854 {
855 	kfree(inet_rsk(req)->opt);
856 }
857 
858 
859 #ifdef CONFIG_TCP_MD5SIG
860 /*
861  * RFC2385 MD5 checksumming requires a mapping of
862  * IP address->MD5 Key.
863  * We need to maintain these in the sk structure.
864  */
865 
866 /* Find the Key structure for an address.  */
867 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
868 					 const union tcp_md5_addr *addr,
869 					 int family)
870 {
871 	const struct tcp_sock *tp = tcp_sk(sk);
872 	struct tcp_md5sig_key *key;
873 	unsigned int size = sizeof(struct in_addr);
874 	const struct tcp_md5sig_info *md5sig;
875 
876 	/* caller either holds rcu_read_lock() or socket lock */
877 	md5sig = rcu_dereference_check(tp->md5sig_info,
878 				       sock_owned_by_user(sk) ||
879 				       lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
880 	if (!md5sig)
881 		return NULL;
882 #if IS_ENABLED(CONFIG_IPV6)
883 	if (family == AF_INET6)
884 		size = sizeof(struct in6_addr);
885 #endif
886 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
887 		if (key->family != family)
888 			continue;
889 		if (!memcmp(&key->addr, addr, size))
890 			return key;
891 	}
892 	return NULL;
893 }
894 EXPORT_SYMBOL(tcp_md5_do_lookup);
895 
896 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
897 					 const struct sock *addr_sk)
898 {
899 	const union tcp_md5_addr *addr;
900 
901 	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
902 	return tcp_md5_do_lookup(sk, addr, AF_INET);
903 }
904 EXPORT_SYMBOL(tcp_v4_md5_lookup);
905 
906 /* This can be called on a newly created socket, from other files */
907 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
908 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
909 {
910 	/* Add Key to the list */
911 	struct tcp_md5sig_key *key;
912 	struct tcp_sock *tp = tcp_sk(sk);
913 	struct tcp_md5sig_info *md5sig;
914 
915 	key = tcp_md5_do_lookup(sk, addr, family);
916 	if (key) {
917 		/* Pre-existing entry - just update that one. */
918 		memcpy(key->key, newkey, newkeylen);
919 		key->keylen = newkeylen;
920 		return 0;
921 	}
922 
923 	md5sig = rcu_dereference_protected(tp->md5sig_info,
924 					   sock_owned_by_user(sk));
925 	if (!md5sig) {
926 		md5sig = kmalloc(sizeof(*md5sig), gfp);
927 		if (!md5sig)
928 			return -ENOMEM;
929 
930 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
931 		INIT_HLIST_HEAD(&md5sig->head);
932 		rcu_assign_pointer(tp->md5sig_info, md5sig);
933 	}
934 
935 	key = sock_kmalloc(sk, sizeof(*key), gfp);
936 	if (!key)
937 		return -ENOMEM;
938 	if (!tcp_alloc_md5sig_pool()) {
939 		sock_kfree_s(sk, key, sizeof(*key));
940 		return -ENOMEM;
941 	}
942 
943 	memcpy(key->key, newkey, newkeylen);
944 	key->keylen = newkeylen;
945 	key->family = family;
946 	memcpy(&key->addr, addr,
947 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
948 				      sizeof(struct in_addr));
949 	hlist_add_head_rcu(&key->node, &md5sig->head);
950 	return 0;
951 }
952 EXPORT_SYMBOL(tcp_md5_do_add);
953 
954 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
955 {
956 	struct tcp_md5sig_key *key;
957 
958 	key = tcp_md5_do_lookup(sk, addr, family);
959 	if (!key)
960 		return -ENOENT;
961 	hlist_del_rcu(&key->node);
962 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
963 	kfree_rcu(key, rcu);
964 	return 0;
965 }
966 EXPORT_SYMBOL(tcp_md5_do_del);
967 
968 static void tcp_clear_md5_list(struct sock *sk)
969 {
970 	struct tcp_sock *tp = tcp_sk(sk);
971 	struct tcp_md5sig_key *key;
972 	struct hlist_node *n;
973 	struct tcp_md5sig_info *md5sig;
974 
975 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
976 
977 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
978 		hlist_del_rcu(&key->node);
979 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
980 		kfree_rcu(key, rcu);
981 	}
982 }
983 
984 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
985 				 int optlen)
986 {
987 	struct tcp_md5sig cmd;
988 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
989 
990 	if (optlen < sizeof(cmd))
991 		return -EINVAL;
992 
993 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
994 		return -EFAULT;
995 
996 	if (sin->sin_family != AF_INET)
997 		return -EINVAL;
998 
999 	if (!cmd.tcpm_keylen)
1000 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1001 				      AF_INET);
1002 
1003 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1004 		return -EINVAL;
1005 
1006 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1007 			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1008 			      GFP_KERNEL);
1009 }
1010 
1011 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1012 					__be32 daddr, __be32 saddr, int nbytes)
1013 {
1014 	struct tcp4_pseudohdr *bp;
1015 	struct scatterlist sg;
1016 
1017 	bp = &hp->md5_blk.ip4;
1018 
1019 	/*
1020 	 * 1. the TCP pseudo-header (in the order: source IP address,
1021 	 * destination IP address, zero-padded protocol number, and
1022 	 * segment length)
1023 	 */
1024 	bp->saddr = saddr;
1025 	bp->daddr = daddr;
1026 	bp->pad = 0;
1027 	bp->protocol = IPPROTO_TCP;
1028 	bp->len = cpu_to_be16(nbytes);
1029 
1030 	sg_init_one(&sg, bp, sizeof(*bp));
1031 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1032 }
1033 
1034 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1035 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1036 {
1037 	struct tcp_md5sig_pool *hp;
1038 	struct hash_desc *desc;
1039 
1040 	hp = tcp_get_md5sig_pool();
1041 	if (!hp)
1042 		goto clear_hash_noput;
1043 	desc = &hp->md5_desc;
1044 
1045 	if (crypto_hash_init(desc))
1046 		goto clear_hash;
1047 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1048 		goto clear_hash;
1049 	if (tcp_md5_hash_header(hp, th))
1050 		goto clear_hash;
1051 	if (tcp_md5_hash_key(hp, key))
1052 		goto clear_hash;
1053 	if (crypto_hash_final(desc, md5_hash))
1054 		goto clear_hash;
1055 
1056 	tcp_put_md5sig_pool();
1057 	return 0;
1058 
1059 clear_hash:
1060 	tcp_put_md5sig_pool();
1061 clear_hash_noput:
1062 	memset(md5_hash, 0, 16);
1063 	return 1;
1064 }
1065 
1066 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1067 			const struct sock *sk,
1068 			const struct sk_buff *skb)
1069 {
1070 	struct tcp_md5sig_pool *hp;
1071 	struct hash_desc *desc;
1072 	const struct tcphdr *th = tcp_hdr(skb);
1073 	__be32 saddr, daddr;
1074 
1075 	if (sk) { /* valid for establish/request sockets */
1076 		saddr = sk->sk_rcv_saddr;
1077 		daddr = sk->sk_daddr;
1078 	} else {
1079 		const struct iphdr *iph = ip_hdr(skb);
1080 		saddr = iph->saddr;
1081 		daddr = iph->daddr;
1082 	}
1083 
1084 	hp = tcp_get_md5sig_pool();
1085 	if (!hp)
1086 		goto clear_hash_noput;
1087 	desc = &hp->md5_desc;
1088 
1089 	if (crypto_hash_init(desc))
1090 		goto clear_hash;
1091 
1092 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1093 		goto clear_hash;
1094 	if (tcp_md5_hash_header(hp, th))
1095 		goto clear_hash;
1096 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1097 		goto clear_hash;
1098 	if (tcp_md5_hash_key(hp, key))
1099 		goto clear_hash;
1100 	if (crypto_hash_final(desc, md5_hash))
1101 		goto clear_hash;
1102 
1103 	tcp_put_md5sig_pool();
1104 	return 0;
1105 
1106 clear_hash:
1107 	tcp_put_md5sig_pool();
1108 clear_hash_noput:
1109 	memset(md5_hash, 0, 16);
1110 	return 1;
1111 }
1112 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1113 
1114 #endif
1115 
1116 /* Called with rcu_read_lock() */
1117 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1118 				    const struct sk_buff *skb)
1119 {
1120 #ifdef CONFIG_TCP_MD5SIG
1121 	/*
1122 	 * This gets called for each TCP segment that arrives
1123 	 * so we want to be efficient.
1124 	 * We have 3 drop cases:
1125 	 * o No MD5 hash and one expected.
1126 	 * o MD5 hash and we're not expecting one.
1127 	 * o MD5 hash and its wrong.
1128 	 */
1129 	const __u8 *hash_location = NULL;
1130 	struct tcp_md5sig_key *hash_expected;
1131 	const struct iphdr *iph = ip_hdr(skb);
1132 	const struct tcphdr *th = tcp_hdr(skb);
1133 	int genhash;
1134 	unsigned char newhash[16];
1135 
1136 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1137 					  AF_INET);
1138 	hash_location = tcp_parse_md5sig_option(th);
1139 
1140 	/* We've parsed the options - do we have a hash? */
1141 	if (!hash_expected && !hash_location)
1142 		return false;
1143 
1144 	if (hash_expected && !hash_location) {
1145 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1146 		return true;
1147 	}
1148 
1149 	if (!hash_expected && hash_location) {
1150 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1151 		return true;
1152 	}
1153 
1154 	/* Okay, so this is hash_expected and hash_location -
1155 	 * so we need to calculate the checksum.
1156 	 */
1157 	genhash = tcp_v4_md5_hash_skb(newhash,
1158 				      hash_expected,
1159 				      NULL, skb);
1160 
1161 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1162 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1163 				     &iph->saddr, ntohs(th->source),
1164 				     &iph->daddr, ntohs(th->dest),
1165 				     genhash ? " tcp_v4_calc_md5_hash failed"
1166 				     : "");
1167 		return true;
1168 	}
1169 	return false;
1170 #endif
1171 	return false;
1172 }
1173 
1174 static void tcp_v4_init_req(struct request_sock *req,
1175 			    const struct sock *sk_listener,
1176 			    struct sk_buff *skb)
1177 {
1178 	struct inet_request_sock *ireq = inet_rsk(req);
1179 
1180 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1181 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1182 	ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1183 	ireq->opt = tcp_v4_save_options(skb);
1184 }
1185 
1186 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1187 					  struct flowi *fl,
1188 					  const struct request_sock *req,
1189 					  bool *strict)
1190 {
1191 	struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1192 
1193 	if (strict) {
1194 		if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1195 			*strict = true;
1196 		else
1197 			*strict = false;
1198 	}
1199 
1200 	return dst;
1201 }
1202 
1203 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1204 	.family		=	PF_INET,
1205 	.obj_size	=	sizeof(struct tcp_request_sock),
1206 	.rtx_syn_ack	=	tcp_rtx_synack,
1207 	.send_ack	=	tcp_v4_reqsk_send_ack,
1208 	.destructor	=	tcp_v4_reqsk_destructor,
1209 	.send_reset	=	tcp_v4_send_reset,
1210 	.syn_ack_timeout =	tcp_syn_ack_timeout,
1211 };
1212 
1213 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1214 	.mss_clamp	=	TCP_MSS_DEFAULT,
1215 #ifdef CONFIG_TCP_MD5SIG
1216 	.req_md5_lookup	=	tcp_v4_md5_lookup,
1217 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1218 #endif
1219 	.init_req	=	tcp_v4_init_req,
1220 #ifdef CONFIG_SYN_COOKIES
1221 	.cookie_init_seq =	cookie_v4_init_sequence,
1222 #endif
1223 	.route_req	=	tcp_v4_route_req,
1224 	.init_seq	=	tcp_v4_init_sequence,
1225 	.send_synack	=	tcp_v4_send_synack,
1226 };
1227 
1228 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1229 {
1230 	/* Never answer to SYNs send to broadcast or multicast */
1231 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1232 		goto drop;
1233 
1234 	return tcp_conn_request(&tcp_request_sock_ops,
1235 				&tcp_request_sock_ipv4_ops, sk, skb);
1236 
1237 drop:
1238 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1239 	return 0;
1240 }
1241 EXPORT_SYMBOL(tcp_v4_conn_request);
1242 
1243 
1244 /*
1245  * The three way handshake has completed - we got a valid synack -
1246  * now create the new socket.
1247  */
1248 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1249 				  struct request_sock *req,
1250 				  struct dst_entry *dst,
1251 				  struct request_sock *req_unhash,
1252 				  bool *own_req)
1253 {
1254 	struct inet_request_sock *ireq;
1255 	struct inet_sock *newinet;
1256 	struct tcp_sock *newtp;
1257 	struct sock *newsk;
1258 #ifdef CONFIG_TCP_MD5SIG
1259 	struct tcp_md5sig_key *key;
1260 #endif
1261 	struct ip_options_rcu *inet_opt;
1262 
1263 	if (sk_acceptq_is_full(sk))
1264 		goto exit_overflow;
1265 
1266 	newsk = tcp_create_openreq_child(sk, req, skb);
1267 	if (!newsk)
1268 		goto exit_nonewsk;
1269 
1270 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1271 	inet_sk_rx_dst_set(newsk, skb);
1272 
1273 	newtp		      = tcp_sk(newsk);
1274 	newinet		      = inet_sk(newsk);
1275 	ireq		      = inet_rsk(req);
1276 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1277 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1278 	newinet->inet_saddr	      = ireq->ir_loc_addr;
1279 	inet_opt	      = ireq->opt;
1280 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1281 	ireq->opt	      = NULL;
1282 	newinet->mc_index     = inet_iif(skb);
1283 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1284 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1285 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1286 	if (inet_opt)
1287 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1288 	newinet->inet_id = newtp->write_seq ^ jiffies;
1289 
1290 	if (!dst) {
1291 		dst = inet_csk_route_child_sock(sk, newsk, req);
1292 		if (!dst)
1293 			goto put_and_exit;
1294 	} else {
1295 		/* syncookie case : see end of cookie_v4_check() */
1296 	}
1297 	sk_setup_caps(newsk, dst);
1298 
1299 	tcp_ca_openreq_child(newsk, dst);
1300 
1301 	tcp_sync_mss(newsk, dst_mtu(dst));
1302 	newtp->advmss = dst_metric_advmss(dst);
1303 	if (tcp_sk(sk)->rx_opt.user_mss &&
1304 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1305 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1306 
1307 	tcp_initialize_rcv_mss(newsk);
1308 
1309 #ifdef CONFIG_TCP_MD5SIG
1310 	/* Copy over the MD5 key from the original socket */
1311 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1312 				AF_INET);
1313 	if (key) {
1314 		/*
1315 		 * We're using one, so create a matching key
1316 		 * on the newsk structure. If we fail to get
1317 		 * memory, then we end up not copying the key
1318 		 * across. Shucks.
1319 		 */
1320 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1321 			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1322 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1323 	}
1324 #endif
1325 
1326 	if (__inet_inherit_port(sk, newsk) < 0)
1327 		goto put_and_exit;
1328 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1329 
1330 	return newsk;
1331 
1332 exit_overflow:
1333 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1334 exit_nonewsk:
1335 	dst_release(dst);
1336 exit:
1337 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1338 	return NULL;
1339 put_and_exit:
1340 	inet_csk_prepare_forced_close(newsk);
1341 	tcp_done(newsk);
1342 	goto exit;
1343 }
1344 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1345 
1346 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1347 {
1348 #ifdef CONFIG_SYN_COOKIES
1349 	const struct tcphdr *th = tcp_hdr(skb);
1350 
1351 	if (!th->syn)
1352 		sk = cookie_v4_check(sk, skb);
1353 #endif
1354 	return sk;
1355 }
1356 
1357 /* The socket must have it's spinlock held when we get
1358  * here, unless it is a TCP_LISTEN socket.
1359  *
1360  * We have a potential double-lock case here, so even when
1361  * doing backlog processing we use the BH locking scheme.
1362  * This is because we cannot sleep with the original spinlock
1363  * held.
1364  */
1365 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1366 {
1367 	struct sock *rsk;
1368 
1369 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1370 		struct dst_entry *dst = sk->sk_rx_dst;
1371 
1372 		sock_rps_save_rxhash(sk, skb);
1373 		sk_mark_napi_id(sk, skb);
1374 		if (dst) {
1375 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1376 			    !dst->ops->check(dst, 0)) {
1377 				dst_release(dst);
1378 				sk->sk_rx_dst = NULL;
1379 			}
1380 		}
1381 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1382 		return 0;
1383 	}
1384 
1385 	if (tcp_checksum_complete(skb))
1386 		goto csum_err;
1387 
1388 	if (sk->sk_state == TCP_LISTEN) {
1389 		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1390 
1391 		if (!nsk)
1392 			goto discard;
1393 		if (nsk != sk) {
1394 			sock_rps_save_rxhash(nsk, skb);
1395 			sk_mark_napi_id(nsk, skb);
1396 			if (tcp_child_process(sk, nsk, skb)) {
1397 				rsk = nsk;
1398 				goto reset;
1399 			}
1400 			return 0;
1401 		}
1402 	} else
1403 		sock_rps_save_rxhash(sk, skb);
1404 
1405 	if (tcp_rcv_state_process(sk, skb)) {
1406 		rsk = sk;
1407 		goto reset;
1408 	}
1409 	return 0;
1410 
1411 reset:
1412 	tcp_v4_send_reset(rsk, skb);
1413 discard:
1414 	kfree_skb(skb);
1415 	/* Be careful here. If this function gets more complicated and
1416 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1417 	 * might be destroyed here. This current version compiles correctly,
1418 	 * but you have been warned.
1419 	 */
1420 	return 0;
1421 
1422 csum_err:
1423 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1424 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1425 	goto discard;
1426 }
1427 EXPORT_SYMBOL(tcp_v4_do_rcv);
1428 
1429 void tcp_v4_early_demux(struct sk_buff *skb)
1430 {
1431 	const struct iphdr *iph;
1432 	const struct tcphdr *th;
1433 	struct sock *sk;
1434 
1435 	if (skb->pkt_type != PACKET_HOST)
1436 		return;
1437 
1438 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1439 		return;
1440 
1441 	iph = ip_hdr(skb);
1442 	th = tcp_hdr(skb);
1443 
1444 	if (th->doff < sizeof(struct tcphdr) / 4)
1445 		return;
1446 
1447 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1448 				       iph->saddr, th->source,
1449 				       iph->daddr, ntohs(th->dest),
1450 				       skb->skb_iif);
1451 	if (sk) {
1452 		skb->sk = sk;
1453 		skb->destructor = sock_edemux;
1454 		if (sk_fullsock(sk)) {
1455 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1456 
1457 			if (dst)
1458 				dst = dst_check(dst, 0);
1459 			if (dst &&
1460 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1461 				skb_dst_set_noref(skb, dst);
1462 		}
1463 	}
1464 }
1465 
1466 /* Packet is added to VJ-style prequeue for processing in process
1467  * context, if a reader task is waiting. Apparently, this exciting
1468  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1469  * failed somewhere. Latency? Burstiness? Well, at least now we will
1470  * see, why it failed. 8)8)				  --ANK
1471  *
1472  */
1473 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1474 {
1475 	struct tcp_sock *tp = tcp_sk(sk);
1476 
1477 	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1478 		return false;
1479 
1480 	if (skb->len <= tcp_hdrlen(skb) &&
1481 	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1482 		return false;
1483 
1484 	/* Before escaping RCU protected region, we need to take care of skb
1485 	 * dst. Prequeue is only enabled for established sockets.
1486 	 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1487 	 * Instead of doing full sk_rx_dst validity here, let's perform
1488 	 * an optimistic check.
1489 	 */
1490 	if (likely(sk->sk_rx_dst))
1491 		skb_dst_drop(skb);
1492 	else
1493 		skb_dst_force(skb);
1494 
1495 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1496 	tp->ucopy.memory += skb->truesize;
1497 	if (tp->ucopy.memory > sk->sk_rcvbuf) {
1498 		struct sk_buff *skb1;
1499 
1500 		BUG_ON(sock_owned_by_user(sk));
1501 
1502 		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1503 			sk_backlog_rcv(sk, skb1);
1504 			NET_INC_STATS_BH(sock_net(sk),
1505 					 LINUX_MIB_TCPPREQUEUEDROPPED);
1506 		}
1507 
1508 		tp->ucopy.memory = 0;
1509 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1510 		wake_up_interruptible_sync_poll(sk_sleep(sk),
1511 					   POLLIN | POLLRDNORM | POLLRDBAND);
1512 		if (!inet_csk_ack_scheduled(sk))
1513 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1514 						  (3 * tcp_rto_min(sk)) / 4,
1515 						  TCP_RTO_MAX);
1516 	}
1517 	return true;
1518 }
1519 EXPORT_SYMBOL(tcp_prequeue);
1520 
1521 /*
1522  *	From tcp_input.c
1523  */
1524 
1525 int tcp_v4_rcv(struct sk_buff *skb)
1526 {
1527 	const struct iphdr *iph;
1528 	const struct tcphdr *th;
1529 	struct sock *sk;
1530 	int ret;
1531 	struct net *net = dev_net(skb->dev);
1532 
1533 	if (skb->pkt_type != PACKET_HOST)
1534 		goto discard_it;
1535 
1536 	/* Count it even if it's bad */
1537 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1538 
1539 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1540 		goto discard_it;
1541 
1542 	th = tcp_hdr(skb);
1543 
1544 	if (th->doff < sizeof(struct tcphdr) / 4)
1545 		goto bad_packet;
1546 	if (!pskb_may_pull(skb, th->doff * 4))
1547 		goto discard_it;
1548 
1549 	/* An explanation is required here, I think.
1550 	 * Packet length and doff are validated by header prediction,
1551 	 * provided case of th->doff==0 is eliminated.
1552 	 * So, we defer the checks. */
1553 
1554 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1555 		goto csum_error;
1556 
1557 	th = tcp_hdr(skb);
1558 	iph = ip_hdr(skb);
1559 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1560 	 * barrier() makes sure compiler wont play fool^Waliasing games.
1561 	 */
1562 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1563 		sizeof(struct inet_skb_parm));
1564 	barrier();
1565 
1566 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1567 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1568 				    skb->len - th->doff * 4);
1569 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1570 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1571 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1572 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1573 	TCP_SKB_CB(skb)->sacked	 = 0;
1574 
1575 lookup:
1576 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1577 	if (!sk)
1578 		goto no_tcp_socket;
1579 
1580 process:
1581 	if (sk->sk_state == TCP_TIME_WAIT)
1582 		goto do_time_wait;
1583 
1584 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1585 		struct request_sock *req = inet_reqsk(sk);
1586 		struct sock *nsk = NULL;
1587 
1588 		sk = req->rsk_listener;
1589 		if (tcp_v4_inbound_md5_hash(sk, skb))
1590 			goto discard_and_relse;
1591 		if (likely(sk->sk_state == TCP_LISTEN)) {
1592 			nsk = tcp_check_req(sk, skb, req, false);
1593 		} else {
1594 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1595 			goto lookup;
1596 		}
1597 		if (!nsk) {
1598 			reqsk_put(req);
1599 			goto discard_it;
1600 		}
1601 		if (nsk == sk) {
1602 			sock_hold(sk);
1603 			reqsk_put(req);
1604 		} else if (tcp_child_process(sk, nsk, skb)) {
1605 			tcp_v4_send_reset(nsk, skb);
1606 			goto discard_it;
1607 		} else {
1608 			return 0;
1609 		}
1610 	}
1611 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1612 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1613 		goto discard_and_relse;
1614 	}
1615 
1616 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1617 		goto discard_and_relse;
1618 
1619 	if (tcp_v4_inbound_md5_hash(sk, skb))
1620 		goto discard_and_relse;
1621 
1622 	nf_reset(skb);
1623 
1624 	if (sk_filter(sk, skb))
1625 		goto discard_and_relse;
1626 
1627 	skb->dev = NULL;
1628 
1629 	if (sk->sk_state == TCP_LISTEN) {
1630 		ret = tcp_v4_do_rcv(sk, skb);
1631 		goto put_and_return;
1632 	}
1633 
1634 	sk_incoming_cpu_update(sk);
1635 
1636 	bh_lock_sock_nested(sk);
1637 	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1638 	ret = 0;
1639 	if (!sock_owned_by_user(sk)) {
1640 		if (!tcp_prequeue(sk, skb))
1641 			ret = tcp_v4_do_rcv(sk, skb);
1642 	} else if (unlikely(sk_add_backlog(sk, skb,
1643 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1644 		bh_unlock_sock(sk);
1645 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1646 		goto discard_and_relse;
1647 	}
1648 	bh_unlock_sock(sk);
1649 
1650 put_and_return:
1651 	sock_put(sk);
1652 
1653 	return ret;
1654 
1655 no_tcp_socket:
1656 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1657 		goto discard_it;
1658 
1659 	if (tcp_checksum_complete(skb)) {
1660 csum_error:
1661 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1662 bad_packet:
1663 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1664 	} else {
1665 		tcp_v4_send_reset(NULL, skb);
1666 	}
1667 
1668 discard_it:
1669 	/* Discard frame. */
1670 	kfree_skb(skb);
1671 	return 0;
1672 
1673 discard_and_relse:
1674 	sock_put(sk);
1675 	goto discard_it;
1676 
1677 do_time_wait:
1678 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1679 		inet_twsk_put(inet_twsk(sk));
1680 		goto discard_it;
1681 	}
1682 
1683 	if (tcp_checksum_complete(skb)) {
1684 		inet_twsk_put(inet_twsk(sk));
1685 		goto csum_error;
1686 	}
1687 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1688 	case TCP_TW_SYN: {
1689 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1690 							&tcp_hashinfo,
1691 							iph->saddr, th->source,
1692 							iph->daddr, th->dest,
1693 							inet_iif(skb));
1694 		if (sk2) {
1695 			inet_twsk_deschedule_put(inet_twsk(sk));
1696 			sk = sk2;
1697 			goto process;
1698 		}
1699 		/* Fall through to ACK */
1700 	}
1701 	case TCP_TW_ACK:
1702 		tcp_v4_timewait_ack(sk, skb);
1703 		break;
1704 	case TCP_TW_RST:
1705 		goto no_tcp_socket;
1706 	case TCP_TW_SUCCESS:;
1707 	}
1708 	goto discard_it;
1709 }
1710 
1711 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1712 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1713 	.twsk_unique	= tcp_twsk_unique,
1714 	.twsk_destructor= tcp_twsk_destructor,
1715 };
1716 
1717 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1718 {
1719 	struct dst_entry *dst = skb_dst(skb);
1720 
1721 	if (dst) {
1722 		dst_hold(dst);
1723 		sk->sk_rx_dst = dst;
1724 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1725 	}
1726 }
1727 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1728 
1729 const struct inet_connection_sock_af_ops ipv4_specific = {
1730 	.queue_xmit	   = ip_queue_xmit,
1731 	.send_check	   = tcp_v4_send_check,
1732 	.rebuild_header	   = inet_sk_rebuild_header,
1733 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1734 	.conn_request	   = tcp_v4_conn_request,
1735 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1736 	.net_header_len	   = sizeof(struct iphdr),
1737 	.setsockopt	   = ip_setsockopt,
1738 	.getsockopt	   = ip_getsockopt,
1739 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1740 	.sockaddr_len	   = sizeof(struct sockaddr_in),
1741 	.bind_conflict	   = inet_csk_bind_conflict,
1742 #ifdef CONFIG_COMPAT
1743 	.compat_setsockopt = compat_ip_setsockopt,
1744 	.compat_getsockopt = compat_ip_getsockopt,
1745 #endif
1746 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1747 };
1748 EXPORT_SYMBOL(ipv4_specific);
1749 
1750 #ifdef CONFIG_TCP_MD5SIG
1751 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1752 	.md5_lookup		= tcp_v4_md5_lookup,
1753 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1754 	.md5_parse		= tcp_v4_parse_md5_keys,
1755 };
1756 #endif
1757 
1758 /* NOTE: A lot of things set to zero explicitly by call to
1759  *       sk_alloc() so need not be done here.
1760  */
1761 static int tcp_v4_init_sock(struct sock *sk)
1762 {
1763 	struct inet_connection_sock *icsk = inet_csk(sk);
1764 
1765 	tcp_init_sock(sk);
1766 
1767 	icsk->icsk_af_ops = &ipv4_specific;
1768 
1769 #ifdef CONFIG_TCP_MD5SIG
1770 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1771 #endif
1772 
1773 	return 0;
1774 }
1775 
1776 void tcp_v4_destroy_sock(struct sock *sk)
1777 {
1778 	struct tcp_sock *tp = tcp_sk(sk);
1779 
1780 	tcp_clear_xmit_timers(sk);
1781 
1782 	tcp_cleanup_congestion_control(sk);
1783 
1784 	/* Cleanup up the write buffer. */
1785 	tcp_write_queue_purge(sk);
1786 
1787 	/* Cleans up our, hopefully empty, out_of_order_queue. */
1788 	__skb_queue_purge(&tp->out_of_order_queue);
1789 
1790 #ifdef CONFIG_TCP_MD5SIG
1791 	/* Clean up the MD5 key list, if any */
1792 	if (tp->md5sig_info) {
1793 		tcp_clear_md5_list(sk);
1794 		kfree_rcu(tp->md5sig_info, rcu);
1795 		tp->md5sig_info = NULL;
1796 	}
1797 #endif
1798 
1799 	/* Clean prequeue, it must be empty really */
1800 	__skb_queue_purge(&tp->ucopy.prequeue);
1801 
1802 	/* Clean up a referenced TCP bind bucket. */
1803 	if (inet_csk(sk)->icsk_bind_hash)
1804 		inet_put_port(sk);
1805 
1806 	BUG_ON(tp->fastopen_rsk);
1807 
1808 	/* If socket is aborted during connect operation */
1809 	tcp_free_fastopen_req(tp);
1810 	tcp_saved_syn_free(tp);
1811 
1812 	sk_sockets_allocated_dec(sk);
1813 	sock_release_memcg(sk);
1814 }
1815 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1816 
1817 #ifdef CONFIG_PROC_FS
1818 /* Proc filesystem TCP sock list dumping. */
1819 
1820 /*
1821  * Get next listener socket follow cur.  If cur is NULL, get first socket
1822  * starting from bucket given in st->bucket; when st->bucket is zero the
1823  * very first socket in the hash table is returned.
1824  */
1825 static void *listening_get_next(struct seq_file *seq, void *cur)
1826 {
1827 	struct inet_connection_sock *icsk;
1828 	struct hlist_nulls_node *node;
1829 	struct sock *sk = cur;
1830 	struct inet_listen_hashbucket *ilb;
1831 	struct tcp_iter_state *st = seq->private;
1832 	struct net *net = seq_file_net(seq);
1833 
1834 	if (!sk) {
1835 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1836 		spin_lock_bh(&ilb->lock);
1837 		sk = sk_nulls_head(&ilb->head);
1838 		st->offset = 0;
1839 		goto get_sk;
1840 	}
1841 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1842 	++st->num;
1843 	++st->offset;
1844 
1845 	sk = sk_nulls_next(sk);
1846 get_sk:
1847 	sk_nulls_for_each_from(sk, node) {
1848 		if (!net_eq(sock_net(sk), net))
1849 			continue;
1850 		if (sk->sk_family == st->family) {
1851 			cur = sk;
1852 			goto out;
1853 		}
1854 		icsk = inet_csk(sk);
1855 	}
1856 	spin_unlock_bh(&ilb->lock);
1857 	st->offset = 0;
1858 	if (++st->bucket < INET_LHTABLE_SIZE) {
1859 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1860 		spin_lock_bh(&ilb->lock);
1861 		sk = sk_nulls_head(&ilb->head);
1862 		goto get_sk;
1863 	}
1864 	cur = NULL;
1865 out:
1866 	return cur;
1867 }
1868 
1869 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1870 {
1871 	struct tcp_iter_state *st = seq->private;
1872 	void *rc;
1873 
1874 	st->bucket = 0;
1875 	st->offset = 0;
1876 	rc = listening_get_next(seq, NULL);
1877 
1878 	while (rc && *pos) {
1879 		rc = listening_get_next(seq, rc);
1880 		--*pos;
1881 	}
1882 	return rc;
1883 }
1884 
1885 static inline bool empty_bucket(const struct tcp_iter_state *st)
1886 {
1887 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1888 }
1889 
1890 /*
1891  * Get first established socket starting from bucket given in st->bucket.
1892  * If st->bucket is zero, the very first socket in the hash is returned.
1893  */
1894 static void *established_get_first(struct seq_file *seq)
1895 {
1896 	struct tcp_iter_state *st = seq->private;
1897 	struct net *net = seq_file_net(seq);
1898 	void *rc = NULL;
1899 
1900 	st->offset = 0;
1901 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1902 		struct sock *sk;
1903 		struct hlist_nulls_node *node;
1904 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1905 
1906 		/* Lockless fast path for the common case of empty buckets */
1907 		if (empty_bucket(st))
1908 			continue;
1909 
1910 		spin_lock_bh(lock);
1911 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1912 			if (sk->sk_family != st->family ||
1913 			    !net_eq(sock_net(sk), net)) {
1914 				continue;
1915 			}
1916 			rc = sk;
1917 			goto out;
1918 		}
1919 		spin_unlock_bh(lock);
1920 	}
1921 out:
1922 	return rc;
1923 }
1924 
1925 static void *established_get_next(struct seq_file *seq, void *cur)
1926 {
1927 	struct sock *sk = cur;
1928 	struct hlist_nulls_node *node;
1929 	struct tcp_iter_state *st = seq->private;
1930 	struct net *net = seq_file_net(seq);
1931 
1932 	++st->num;
1933 	++st->offset;
1934 
1935 	sk = sk_nulls_next(sk);
1936 
1937 	sk_nulls_for_each_from(sk, node) {
1938 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1939 			return sk;
1940 	}
1941 
1942 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1943 	++st->bucket;
1944 	return established_get_first(seq);
1945 }
1946 
1947 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1948 {
1949 	struct tcp_iter_state *st = seq->private;
1950 	void *rc;
1951 
1952 	st->bucket = 0;
1953 	rc = established_get_first(seq);
1954 
1955 	while (rc && pos) {
1956 		rc = established_get_next(seq, rc);
1957 		--pos;
1958 	}
1959 	return rc;
1960 }
1961 
1962 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1963 {
1964 	void *rc;
1965 	struct tcp_iter_state *st = seq->private;
1966 
1967 	st->state = TCP_SEQ_STATE_LISTENING;
1968 	rc	  = listening_get_idx(seq, &pos);
1969 
1970 	if (!rc) {
1971 		st->state = TCP_SEQ_STATE_ESTABLISHED;
1972 		rc	  = established_get_idx(seq, pos);
1973 	}
1974 
1975 	return rc;
1976 }
1977 
1978 static void *tcp_seek_last_pos(struct seq_file *seq)
1979 {
1980 	struct tcp_iter_state *st = seq->private;
1981 	int offset = st->offset;
1982 	int orig_num = st->num;
1983 	void *rc = NULL;
1984 
1985 	switch (st->state) {
1986 	case TCP_SEQ_STATE_LISTENING:
1987 		if (st->bucket >= INET_LHTABLE_SIZE)
1988 			break;
1989 		st->state = TCP_SEQ_STATE_LISTENING;
1990 		rc = listening_get_next(seq, NULL);
1991 		while (offset-- && rc)
1992 			rc = listening_get_next(seq, rc);
1993 		if (rc)
1994 			break;
1995 		st->bucket = 0;
1996 		st->state = TCP_SEQ_STATE_ESTABLISHED;
1997 		/* Fallthrough */
1998 	case TCP_SEQ_STATE_ESTABLISHED:
1999 		if (st->bucket > tcp_hashinfo.ehash_mask)
2000 			break;
2001 		rc = established_get_first(seq);
2002 		while (offset-- && rc)
2003 			rc = established_get_next(seq, rc);
2004 	}
2005 
2006 	st->num = orig_num;
2007 
2008 	return rc;
2009 }
2010 
2011 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2012 {
2013 	struct tcp_iter_state *st = seq->private;
2014 	void *rc;
2015 
2016 	if (*pos && *pos == st->last_pos) {
2017 		rc = tcp_seek_last_pos(seq);
2018 		if (rc)
2019 			goto out;
2020 	}
2021 
2022 	st->state = TCP_SEQ_STATE_LISTENING;
2023 	st->num = 0;
2024 	st->bucket = 0;
2025 	st->offset = 0;
2026 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2027 
2028 out:
2029 	st->last_pos = *pos;
2030 	return rc;
2031 }
2032 
2033 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2034 {
2035 	struct tcp_iter_state *st = seq->private;
2036 	void *rc = NULL;
2037 
2038 	if (v == SEQ_START_TOKEN) {
2039 		rc = tcp_get_idx(seq, 0);
2040 		goto out;
2041 	}
2042 
2043 	switch (st->state) {
2044 	case TCP_SEQ_STATE_LISTENING:
2045 		rc = listening_get_next(seq, v);
2046 		if (!rc) {
2047 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2048 			st->bucket = 0;
2049 			st->offset = 0;
2050 			rc	  = established_get_first(seq);
2051 		}
2052 		break;
2053 	case TCP_SEQ_STATE_ESTABLISHED:
2054 		rc = established_get_next(seq, v);
2055 		break;
2056 	}
2057 out:
2058 	++*pos;
2059 	st->last_pos = *pos;
2060 	return rc;
2061 }
2062 
2063 static void tcp_seq_stop(struct seq_file *seq, void *v)
2064 {
2065 	struct tcp_iter_state *st = seq->private;
2066 
2067 	switch (st->state) {
2068 	case TCP_SEQ_STATE_LISTENING:
2069 		if (v != SEQ_START_TOKEN)
2070 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2071 		break;
2072 	case TCP_SEQ_STATE_ESTABLISHED:
2073 		if (v)
2074 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2075 		break;
2076 	}
2077 }
2078 
2079 int tcp_seq_open(struct inode *inode, struct file *file)
2080 {
2081 	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2082 	struct tcp_iter_state *s;
2083 	int err;
2084 
2085 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2086 			  sizeof(struct tcp_iter_state));
2087 	if (err < 0)
2088 		return err;
2089 
2090 	s = ((struct seq_file *)file->private_data)->private;
2091 	s->family		= afinfo->family;
2092 	s->last_pos		= 0;
2093 	return 0;
2094 }
2095 EXPORT_SYMBOL(tcp_seq_open);
2096 
2097 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2098 {
2099 	int rc = 0;
2100 	struct proc_dir_entry *p;
2101 
2102 	afinfo->seq_ops.start		= tcp_seq_start;
2103 	afinfo->seq_ops.next		= tcp_seq_next;
2104 	afinfo->seq_ops.stop		= tcp_seq_stop;
2105 
2106 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2107 			     afinfo->seq_fops, afinfo);
2108 	if (!p)
2109 		rc = -ENOMEM;
2110 	return rc;
2111 }
2112 EXPORT_SYMBOL(tcp_proc_register);
2113 
2114 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2115 {
2116 	remove_proc_entry(afinfo->name, net->proc_net);
2117 }
2118 EXPORT_SYMBOL(tcp_proc_unregister);
2119 
2120 static void get_openreq4(const struct request_sock *req,
2121 			 struct seq_file *f, int i)
2122 {
2123 	const struct inet_request_sock *ireq = inet_rsk(req);
2124 	long delta = req->rsk_timer.expires - jiffies;
2125 
2126 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2127 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2128 		i,
2129 		ireq->ir_loc_addr,
2130 		ireq->ir_num,
2131 		ireq->ir_rmt_addr,
2132 		ntohs(ireq->ir_rmt_port),
2133 		TCP_SYN_RECV,
2134 		0, 0, /* could print option size, but that is af dependent. */
2135 		1,    /* timers active (only the expire timer) */
2136 		jiffies_delta_to_clock_t(delta),
2137 		req->num_timeout,
2138 		from_kuid_munged(seq_user_ns(f),
2139 				 sock_i_uid(req->rsk_listener)),
2140 		0,  /* non standard timer */
2141 		0, /* open_requests have no inode */
2142 		0,
2143 		req);
2144 }
2145 
2146 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2147 {
2148 	int timer_active;
2149 	unsigned long timer_expires;
2150 	const struct tcp_sock *tp = tcp_sk(sk);
2151 	const struct inet_connection_sock *icsk = inet_csk(sk);
2152 	const struct inet_sock *inet = inet_sk(sk);
2153 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2154 	__be32 dest = inet->inet_daddr;
2155 	__be32 src = inet->inet_rcv_saddr;
2156 	__u16 destp = ntohs(inet->inet_dport);
2157 	__u16 srcp = ntohs(inet->inet_sport);
2158 	int rx_queue;
2159 
2160 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2161 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2162 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2163 		timer_active	= 1;
2164 		timer_expires	= icsk->icsk_timeout;
2165 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2166 		timer_active	= 4;
2167 		timer_expires	= icsk->icsk_timeout;
2168 	} else if (timer_pending(&sk->sk_timer)) {
2169 		timer_active	= 2;
2170 		timer_expires	= sk->sk_timer.expires;
2171 	} else {
2172 		timer_active	= 0;
2173 		timer_expires = jiffies;
2174 	}
2175 
2176 	if (sk->sk_state == TCP_LISTEN)
2177 		rx_queue = sk->sk_ack_backlog;
2178 	else
2179 		/*
2180 		 * because we dont lock socket, we might find a transient negative value
2181 		 */
2182 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2183 
2184 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2185 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2186 		i, src, srcp, dest, destp, sk->sk_state,
2187 		tp->write_seq - tp->snd_una,
2188 		rx_queue,
2189 		timer_active,
2190 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2191 		icsk->icsk_retransmits,
2192 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2193 		icsk->icsk_probes_out,
2194 		sock_i_ino(sk),
2195 		atomic_read(&sk->sk_refcnt), sk,
2196 		jiffies_to_clock_t(icsk->icsk_rto),
2197 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2198 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2199 		tp->snd_cwnd,
2200 		sk->sk_state == TCP_LISTEN ?
2201 		    (fastopenq ? fastopenq->max_qlen : 0) :
2202 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2203 }
2204 
2205 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2206 			       struct seq_file *f, int i)
2207 {
2208 	long delta = tw->tw_timer.expires - jiffies;
2209 	__be32 dest, src;
2210 	__u16 destp, srcp;
2211 
2212 	dest  = tw->tw_daddr;
2213 	src   = tw->tw_rcv_saddr;
2214 	destp = ntohs(tw->tw_dport);
2215 	srcp  = ntohs(tw->tw_sport);
2216 
2217 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2218 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2219 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2220 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2221 		atomic_read(&tw->tw_refcnt), tw);
2222 }
2223 
2224 #define TMPSZ 150
2225 
2226 static int tcp4_seq_show(struct seq_file *seq, void *v)
2227 {
2228 	struct tcp_iter_state *st;
2229 	struct sock *sk = v;
2230 
2231 	seq_setwidth(seq, TMPSZ - 1);
2232 	if (v == SEQ_START_TOKEN) {
2233 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2234 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2235 			   "inode");
2236 		goto out;
2237 	}
2238 	st = seq->private;
2239 
2240 	if (sk->sk_state == TCP_TIME_WAIT)
2241 		get_timewait4_sock(v, seq, st->num);
2242 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2243 		get_openreq4(v, seq, st->num);
2244 	else
2245 		get_tcp4_sock(v, seq, st->num);
2246 out:
2247 	seq_pad(seq, '\n');
2248 	return 0;
2249 }
2250 
2251 static const struct file_operations tcp_afinfo_seq_fops = {
2252 	.owner   = THIS_MODULE,
2253 	.open    = tcp_seq_open,
2254 	.read    = seq_read,
2255 	.llseek  = seq_lseek,
2256 	.release = seq_release_net
2257 };
2258 
2259 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2260 	.name		= "tcp",
2261 	.family		= AF_INET,
2262 	.seq_fops	= &tcp_afinfo_seq_fops,
2263 	.seq_ops	= {
2264 		.show		= tcp4_seq_show,
2265 	},
2266 };
2267 
2268 static int __net_init tcp4_proc_init_net(struct net *net)
2269 {
2270 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2271 }
2272 
2273 static void __net_exit tcp4_proc_exit_net(struct net *net)
2274 {
2275 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2276 }
2277 
2278 static struct pernet_operations tcp4_net_ops = {
2279 	.init = tcp4_proc_init_net,
2280 	.exit = tcp4_proc_exit_net,
2281 };
2282 
2283 int __init tcp4_proc_init(void)
2284 {
2285 	return register_pernet_subsys(&tcp4_net_ops);
2286 }
2287 
2288 void tcp4_proc_exit(void)
2289 {
2290 	unregister_pernet_subsys(&tcp4_net_ops);
2291 }
2292 #endif /* CONFIG_PROC_FS */
2293 
2294 struct proto tcp_prot = {
2295 	.name			= "TCP",
2296 	.owner			= THIS_MODULE,
2297 	.close			= tcp_close,
2298 	.connect		= tcp_v4_connect,
2299 	.disconnect		= tcp_disconnect,
2300 	.accept			= inet_csk_accept,
2301 	.ioctl			= tcp_ioctl,
2302 	.init			= tcp_v4_init_sock,
2303 	.destroy		= tcp_v4_destroy_sock,
2304 	.shutdown		= tcp_shutdown,
2305 	.setsockopt		= tcp_setsockopt,
2306 	.getsockopt		= tcp_getsockopt,
2307 	.recvmsg		= tcp_recvmsg,
2308 	.sendmsg		= tcp_sendmsg,
2309 	.sendpage		= tcp_sendpage,
2310 	.backlog_rcv		= tcp_v4_do_rcv,
2311 	.release_cb		= tcp_release_cb,
2312 	.hash			= inet_hash,
2313 	.unhash			= inet_unhash,
2314 	.get_port		= inet_csk_get_port,
2315 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2316 	.stream_memory_free	= tcp_stream_memory_free,
2317 	.sockets_allocated	= &tcp_sockets_allocated,
2318 	.orphan_count		= &tcp_orphan_count,
2319 	.memory_allocated	= &tcp_memory_allocated,
2320 	.memory_pressure	= &tcp_memory_pressure,
2321 	.sysctl_mem		= sysctl_tcp_mem,
2322 	.sysctl_wmem		= sysctl_tcp_wmem,
2323 	.sysctl_rmem		= sysctl_tcp_rmem,
2324 	.max_header		= MAX_TCP_HEADER,
2325 	.obj_size		= sizeof(struct tcp_sock),
2326 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2327 	.twsk_prot		= &tcp_timewait_sock_ops,
2328 	.rsk_prot		= &tcp_request_sock_ops,
2329 	.h.hashinfo		= &tcp_hashinfo,
2330 	.no_autobind		= true,
2331 #ifdef CONFIG_COMPAT
2332 	.compat_setsockopt	= compat_tcp_setsockopt,
2333 	.compat_getsockopt	= compat_tcp_getsockopt,
2334 #endif
2335 #ifdef CONFIG_MEMCG_KMEM
2336 	.init_cgroup		= tcp_init_cgroup,
2337 	.destroy_cgroup		= tcp_destroy_cgroup,
2338 	.proto_cgroup		= tcp_proto_cgroup,
2339 #endif
2340 };
2341 EXPORT_SYMBOL(tcp_prot);
2342 
2343 static void __net_exit tcp_sk_exit(struct net *net)
2344 {
2345 	int cpu;
2346 
2347 	for_each_possible_cpu(cpu)
2348 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2349 	free_percpu(net->ipv4.tcp_sk);
2350 }
2351 
2352 static int __net_init tcp_sk_init(struct net *net)
2353 {
2354 	int res, cpu;
2355 
2356 	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2357 	if (!net->ipv4.tcp_sk)
2358 		return -ENOMEM;
2359 
2360 	for_each_possible_cpu(cpu) {
2361 		struct sock *sk;
2362 
2363 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2364 					   IPPROTO_TCP, net);
2365 		if (res)
2366 			goto fail;
2367 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2368 	}
2369 
2370 	net->ipv4.sysctl_tcp_ecn = 2;
2371 	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2372 
2373 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2374 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2375 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2376 
2377 	return 0;
2378 fail:
2379 	tcp_sk_exit(net);
2380 
2381 	return res;
2382 }
2383 
2384 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2385 {
2386 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2387 }
2388 
2389 static struct pernet_operations __net_initdata tcp_sk_ops = {
2390        .init	   = tcp_sk_init,
2391        .exit	   = tcp_sk_exit,
2392        .exit_batch = tcp_sk_exit_batch,
2393 };
2394 
2395 void __init tcp_v4_init(void)
2396 {
2397 	inet_hashinfo_init(&tcp_hashinfo);
2398 	if (register_pernet_subsys(&tcp_sk_ops))
2399 		panic("Failed to create the TCP control socket.\n");
2400 }
2401