xref: /openbmc/linux/net/ipv4/tcp_ipv4.c (revision 1fba70e5b6bed53496ba1f1f16127f5be01b5fb6)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
77 
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
84 
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
87 
88 #ifdef CONFIG_TCP_MD5SIG
89 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
90 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
91 #endif
92 
93 struct inet_hashinfo tcp_hashinfo;
94 EXPORT_SYMBOL(tcp_hashinfo);
95 
96 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
97 {
98 	return secure_tcp_seq(ip_hdr(skb)->daddr,
99 			      ip_hdr(skb)->saddr,
100 			      tcp_hdr(skb)->dest,
101 			      tcp_hdr(skb)->source);
102 }
103 
104 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
105 {
106 	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
107 }
108 
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 	struct tcp_sock *tp = tcp_sk(sk);
113 
114 	/* With PAWS, it is safe from the viewpoint
115 	   of data integrity. Even without PAWS it is safe provided sequence
116 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 
118 	   Actually, the idea is close to VJ's one, only timestamp cache is
119 	   held not per host, but per port pair and TW bucket is used as state
120 	   holder.
121 
122 	   If TW bucket has been already destroyed we fall back to VJ's scheme
123 	   and use initial timestamp retrieved from peer table.
124 	 */
125 	if (tcptw->tw_ts_recent_stamp &&
126 	    (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
127 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 		if (tp->write_seq == 0)
130 			tp->write_seq = 1;
131 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
132 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 		sock_hold(sktw);
134 		return 1;
135 	}
136 
137 	return 0;
138 }
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 {
144 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 	struct inet_sock *inet = inet_sk(sk);
146 	struct tcp_sock *tp = tcp_sk(sk);
147 	__be16 orig_sport, orig_dport;
148 	__be32 daddr, nexthop;
149 	struct flowi4 *fl4;
150 	struct rtable *rt;
151 	int err;
152 	struct ip_options_rcu *inet_opt;
153 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
154 
155 	if (addr_len < sizeof(struct sockaddr_in))
156 		return -EINVAL;
157 
158 	if (usin->sin_family != AF_INET)
159 		return -EAFNOSUPPORT;
160 
161 	nexthop = daddr = usin->sin_addr.s_addr;
162 	inet_opt = rcu_dereference_protected(inet->inet_opt,
163 					     lockdep_sock_is_held(sk));
164 	if (inet_opt && inet_opt->opt.srr) {
165 		if (!daddr)
166 			return -EINVAL;
167 		nexthop = inet_opt->opt.faddr;
168 	}
169 
170 	orig_sport = inet->inet_sport;
171 	orig_dport = usin->sin_port;
172 	fl4 = &inet->cork.fl.u.ip4;
173 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 			      IPPROTO_TCP,
176 			      orig_sport, orig_dport, sk);
177 	if (IS_ERR(rt)) {
178 		err = PTR_ERR(rt);
179 		if (err == -ENETUNREACH)
180 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
181 		return err;
182 	}
183 
184 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
185 		ip_rt_put(rt);
186 		return -ENETUNREACH;
187 	}
188 
189 	if (!inet_opt || !inet_opt->opt.srr)
190 		daddr = fl4->daddr;
191 
192 	if (!inet->inet_saddr)
193 		inet->inet_saddr = fl4->saddr;
194 	sk_rcv_saddr_set(sk, inet->inet_saddr);
195 
196 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197 		/* Reset inherited state */
198 		tp->rx_opt.ts_recent	   = 0;
199 		tp->rx_opt.ts_recent_stamp = 0;
200 		if (likely(!tp->repair))
201 			tp->write_seq	   = 0;
202 	}
203 
204 	inet->inet_dport = usin->sin_port;
205 	sk_daddr_set(sk, daddr);
206 
207 	inet_csk(sk)->icsk_ext_hdr_len = 0;
208 	if (inet_opt)
209 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
210 
211 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
212 
213 	/* Socket identity is still unknown (sport may be zero).
214 	 * However we set state to SYN-SENT and not releasing socket
215 	 * lock select source port, enter ourselves into the hash tables and
216 	 * complete initialization after this.
217 	 */
218 	tcp_set_state(sk, TCP_SYN_SENT);
219 	err = inet_hash_connect(tcp_death_row, sk);
220 	if (err)
221 		goto failure;
222 
223 	sk_set_txhash(sk);
224 
225 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
226 			       inet->inet_sport, inet->inet_dport, sk);
227 	if (IS_ERR(rt)) {
228 		err = PTR_ERR(rt);
229 		rt = NULL;
230 		goto failure;
231 	}
232 	/* OK, now commit destination to socket.  */
233 	sk->sk_gso_type = SKB_GSO_TCPV4;
234 	sk_setup_caps(sk, &rt->dst);
235 	rt = NULL;
236 
237 	if (likely(!tp->repair)) {
238 		if (!tp->write_seq)
239 			tp->write_seq = secure_tcp_seq(inet->inet_saddr,
240 						       inet->inet_daddr,
241 						       inet->inet_sport,
242 						       usin->sin_port);
243 		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
244 						 inet->inet_saddr,
245 						 inet->inet_daddr);
246 	}
247 
248 	inet->inet_id = tp->write_seq ^ jiffies;
249 
250 	if (tcp_fastopen_defer_connect(sk, &err))
251 		return err;
252 	if (err)
253 		goto failure;
254 
255 	err = tcp_connect(sk);
256 
257 	if (err)
258 		goto failure;
259 
260 	return 0;
261 
262 failure:
263 	/*
264 	 * This unhashes the socket and releases the local port,
265 	 * if necessary.
266 	 */
267 	tcp_set_state(sk, TCP_CLOSE);
268 	ip_rt_put(rt);
269 	sk->sk_route_caps = 0;
270 	inet->inet_dport = 0;
271 	return err;
272 }
273 EXPORT_SYMBOL(tcp_v4_connect);
274 
275 /*
276  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
277  * It can be called through tcp_release_cb() if socket was owned by user
278  * at the time tcp_v4_err() was called to handle ICMP message.
279  */
280 void tcp_v4_mtu_reduced(struct sock *sk)
281 {
282 	struct inet_sock *inet = inet_sk(sk);
283 	struct dst_entry *dst;
284 	u32 mtu;
285 
286 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
287 		return;
288 	mtu = tcp_sk(sk)->mtu_info;
289 	dst = inet_csk_update_pmtu(sk, mtu);
290 	if (!dst)
291 		return;
292 
293 	/* Something is about to be wrong... Remember soft error
294 	 * for the case, if this connection will not able to recover.
295 	 */
296 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
297 		sk->sk_err_soft = EMSGSIZE;
298 
299 	mtu = dst_mtu(dst);
300 
301 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
302 	    ip_sk_accept_pmtu(sk) &&
303 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
304 		tcp_sync_mss(sk, mtu);
305 
306 		/* Resend the TCP packet because it's
307 		 * clear that the old packet has been
308 		 * dropped. This is the new "fast" path mtu
309 		 * discovery.
310 		 */
311 		tcp_simple_retransmit(sk);
312 	} /* else let the usual retransmit timer handle it */
313 }
314 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
315 
316 static void do_redirect(struct sk_buff *skb, struct sock *sk)
317 {
318 	struct dst_entry *dst = __sk_dst_check(sk, 0);
319 
320 	if (dst)
321 		dst->ops->redirect(dst, sk, skb);
322 }
323 
324 
325 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
326 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
327 {
328 	struct request_sock *req = inet_reqsk(sk);
329 	struct net *net = sock_net(sk);
330 
331 	/* ICMPs are not backlogged, hence we cannot get
332 	 * an established socket here.
333 	 */
334 	if (seq != tcp_rsk(req)->snt_isn) {
335 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
336 	} else if (abort) {
337 		/*
338 		 * Still in SYN_RECV, just remove it silently.
339 		 * There is no good way to pass the error to the newly
340 		 * created socket, and POSIX does not want network
341 		 * errors returned from accept().
342 		 */
343 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
344 		tcp_listendrop(req->rsk_listener);
345 	}
346 	reqsk_put(req);
347 }
348 EXPORT_SYMBOL(tcp_req_err);
349 
350 /*
351  * This routine is called by the ICMP module when it gets some
352  * sort of error condition.  If err < 0 then the socket should
353  * be closed and the error returned to the user.  If err > 0
354  * it's just the icmp type << 8 | icmp code.  After adjustment
355  * header points to the first 8 bytes of the tcp header.  We need
356  * to find the appropriate port.
357  *
358  * The locking strategy used here is very "optimistic". When
359  * someone else accesses the socket the ICMP is just dropped
360  * and for some paths there is no check at all.
361  * A more general error queue to queue errors for later handling
362  * is probably better.
363  *
364  */
365 
366 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
367 {
368 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
369 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
370 	struct inet_connection_sock *icsk;
371 	struct tcp_sock *tp;
372 	struct inet_sock *inet;
373 	const int type = icmp_hdr(icmp_skb)->type;
374 	const int code = icmp_hdr(icmp_skb)->code;
375 	struct sock *sk;
376 	struct sk_buff *skb;
377 	struct request_sock *fastopen;
378 	u32 seq, snd_una;
379 	s32 remaining;
380 	u32 delta_us;
381 	int err;
382 	struct net *net = dev_net(icmp_skb->dev);
383 
384 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
385 				       th->dest, iph->saddr, ntohs(th->source),
386 				       inet_iif(icmp_skb), 0);
387 	if (!sk) {
388 		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
389 		return;
390 	}
391 	if (sk->sk_state == TCP_TIME_WAIT) {
392 		inet_twsk_put(inet_twsk(sk));
393 		return;
394 	}
395 	seq = ntohl(th->seq);
396 	if (sk->sk_state == TCP_NEW_SYN_RECV)
397 		return tcp_req_err(sk, seq,
398 				  type == ICMP_PARAMETERPROB ||
399 				  type == ICMP_TIME_EXCEEDED ||
400 				  (type == ICMP_DEST_UNREACH &&
401 				   (code == ICMP_NET_UNREACH ||
402 				    code == ICMP_HOST_UNREACH)));
403 
404 	bh_lock_sock(sk);
405 	/* If too many ICMPs get dropped on busy
406 	 * servers this needs to be solved differently.
407 	 * We do take care of PMTU discovery (RFC1191) special case :
408 	 * we can receive locally generated ICMP messages while socket is held.
409 	 */
410 	if (sock_owned_by_user(sk)) {
411 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
412 			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
413 	}
414 	if (sk->sk_state == TCP_CLOSE)
415 		goto out;
416 
417 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
418 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
419 		goto out;
420 	}
421 
422 	icsk = inet_csk(sk);
423 	tp = tcp_sk(sk);
424 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
425 	fastopen = tp->fastopen_rsk;
426 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
427 	if (sk->sk_state != TCP_LISTEN &&
428 	    !between(seq, snd_una, tp->snd_nxt)) {
429 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
430 		goto out;
431 	}
432 
433 	switch (type) {
434 	case ICMP_REDIRECT:
435 		if (!sock_owned_by_user(sk))
436 			do_redirect(icmp_skb, sk);
437 		goto out;
438 	case ICMP_SOURCE_QUENCH:
439 		/* Just silently ignore these. */
440 		goto out;
441 	case ICMP_PARAMETERPROB:
442 		err = EPROTO;
443 		break;
444 	case ICMP_DEST_UNREACH:
445 		if (code > NR_ICMP_UNREACH)
446 			goto out;
447 
448 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
449 			/* We are not interested in TCP_LISTEN and open_requests
450 			 * (SYN-ACKs send out by Linux are always <576bytes so
451 			 * they should go through unfragmented).
452 			 */
453 			if (sk->sk_state == TCP_LISTEN)
454 				goto out;
455 
456 			tp->mtu_info = info;
457 			if (!sock_owned_by_user(sk)) {
458 				tcp_v4_mtu_reduced(sk);
459 			} else {
460 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
461 					sock_hold(sk);
462 			}
463 			goto out;
464 		}
465 
466 		err = icmp_err_convert[code].errno;
467 		/* check if icmp_skb allows revert of backoff
468 		 * (see draft-zimmermann-tcp-lcd) */
469 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
470 			break;
471 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
472 		    !icsk->icsk_backoff || fastopen)
473 			break;
474 
475 		if (sock_owned_by_user(sk))
476 			break;
477 
478 		icsk->icsk_backoff--;
479 		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
480 					       TCP_TIMEOUT_INIT;
481 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
482 
483 		skb = tcp_rtx_queue_head(sk);
484 		BUG_ON(!skb);
485 
486 		tcp_mstamp_refresh(tp);
487 		delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
488 		remaining = icsk->icsk_rto -
489 			    usecs_to_jiffies(delta_us);
490 
491 		if (remaining > 0) {
492 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
493 						  remaining, TCP_RTO_MAX);
494 		} else {
495 			/* RTO revert clocked out retransmission.
496 			 * Will retransmit now */
497 			tcp_retransmit_timer(sk);
498 		}
499 
500 		break;
501 	case ICMP_TIME_EXCEEDED:
502 		err = EHOSTUNREACH;
503 		break;
504 	default:
505 		goto out;
506 	}
507 
508 	switch (sk->sk_state) {
509 	case TCP_SYN_SENT:
510 	case TCP_SYN_RECV:
511 		/* Only in fast or simultaneous open. If a fast open socket is
512 		 * is already accepted it is treated as a connected one below.
513 		 */
514 		if (fastopen && !fastopen->sk)
515 			break;
516 
517 		if (!sock_owned_by_user(sk)) {
518 			sk->sk_err = err;
519 
520 			sk->sk_error_report(sk);
521 
522 			tcp_done(sk);
523 		} else {
524 			sk->sk_err_soft = err;
525 		}
526 		goto out;
527 	}
528 
529 	/* If we've already connected we will keep trying
530 	 * until we time out, or the user gives up.
531 	 *
532 	 * rfc1122 4.2.3.9 allows to consider as hard errors
533 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
534 	 * but it is obsoleted by pmtu discovery).
535 	 *
536 	 * Note, that in modern internet, where routing is unreliable
537 	 * and in each dark corner broken firewalls sit, sending random
538 	 * errors ordered by their masters even this two messages finally lose
539 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
540 	 *
541 	 * Now we are in compliance with RFCs.
542 	 *							--ANK (980905)
543 	 */
544 
545 	inet = inet_sk(sk);
546 	if (!sock_owned_by_user(sk) && inet->recverr) {
547 		sk->sk_err = err;
548 		sk->sk_error_report(sk);
549 	} else	{ /* Only an error on timeout */
550 		sk->sk_err_soft = err;
551 	}
552 
553 out:
554 	bh_unlock_sock(sk);
555 	sock_put(sk);
556 }
557 
558 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
559 {
560 	struct tcphdr *th = tcp_hdr(skb);
561 
562 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
563 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
564 		skb->csum_start = skb_transport_header(skb) - skb->head;
565 		skb->csum_offset = offsetof(struct tcphdr, check);
566 	} else {
567 		th->check = tcp_v4_check(skb->len, saddr, daddr,
568 					 csum_partial(th,
569 						      th->doff << 2,
570 						      skb->csum));
571 	}
572 }
573 
574 /* This routine computes an IPv4 TCP checksum. */
575 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
576 {
577 	const struct inet_sock *inet = inet_sk(sk);
578 
579 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
580 }
581 EXPORT_SYMBOL(tcp_v4_send_check);
582 
583 /*
584  *	This routine will send an RST to the other tcp.
585  *
586  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
587  *		      for reset.
588  *	Answer: if a packet caused RST, it is not for a socket
589  *		existing in our system, if it is matched to a socket,
590  *		it is just duplicate segment or bug in other side's TCP.
591  *		So that we build reply only basing on parameters
592  *		arrived with segment.
593  *	Exception: precedence violation. We do not implement it in any case.
594  */
595 
596 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
597 {
598 	const struct tcphdr *th = tcp_hdr(skb);
599 	struct {
600 		struct tcphdr th;
601 #ifdef CONFIG_TCP_MD5SIG
602 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
603 #endif
604 	} rep;
605 	struct ip_reply_arg arg;
606 #ifdef CONFIG_TCP_MD5SIG
607 	struct tcp_md5sig_key *key = NULL;
608 	const __u8 *hash_location = NULL;
609 	unsigned char newhash[16];
610 	int genhash;
611 	struct sock *sk1 = NULL;
612 #endif
613 	struct net *net;
614 
615 	/* Never send a reset in response to a reset. */
616 	if (th->rst)
617 		return;
618 
619 	/* If sk not NULL, it means we did a successful lookup and incoming
620 	 * route had to be correct. prequeue might have dropped our dst.
621 	 */
622 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
623 		return;
624 
625 	/* Swap the send and the receive. */
626 	memset(&rep, 0, sizeof(rep));
627 	rep.th.dest   = th->source;
628 	rep.th.source = th->dest;
629 	rep.th.doff   = sizeof(struct tcphdr) / 4;
630 	rep.th.rst    = 1;
631 
632 	if (th->ack) {
633 		rep.th.seq = th->ack_seq;
634 	} else {
635 		rep.th.ack = 1;
636 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
637 				       skb->len - (th->doff << 2));
638 	}
639 
640 	memset(&arg, 0, sizeof(arg));
641 	arg.iov[0].iov_base = (unsigned char *)&rep;
642 	arg.iov[0].iov_len  = sizeof(rep.th);
643 
644 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
645 #ifdef CONFIG_TCP_MD5SIG
646 	rcu_read_lock();
647 	hash_location = tcp_parse_md5sig_option(th);
648 	if (sk && sk_fullsock(sk)) {
649 		key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
650 					&ip_hdr(skb)->saddr, AF_INET);
651 	} else if (hash_location) {
652 		/*
653 		 * active side is lost. Try to find listening socket through
654 		 * source port, and then find md5 key through listening socket.
655 		 * we are not loose security here:
656 		 * Incoming packet is checked with md5 hash with finding key,
657 		 * no RST generated if md5 hash doesn't match.
658 		 */
659 		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
660 					     ip_hdr(skb)->saddr,
661 					     th->source, ip_hdr(skb)->daddr,
662 					     ntohs(th->source), inet_iif(skb),
663 					     tcp_v4_sdif(skb));
664 		/* don't send rst if it can't find key */
665 		if (!sk1)
666 			goto out;
667 
668 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
669 					&ip_hdr(skb)->saddr, AF_INET);
670 		if (!key)
671 			goto out;
672 
673 
674 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
675 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
676 			goto out;
677 
678 	}
679 
680 	if (key) {
681 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
682 				   (TCPOPT_NOP << 16) |
683 				   (TCPOPT_MD5SIG << 8) |
684 				   TCPOLEN_MD5SIG);
685 		/* Update length and the length the header thinks exists */
686 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
687 		rep.th.doff = arg.iov[0].iov_len / 4;
688 
689 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
690 				     key, ip_hdr(skb)->saddr,
691 				     ip_hdr(skb)->daddr, &rep.th);
692 	}
693 #endif
694 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
695 				      ip_hdr(skb)->saddr, /* XXX */
696 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
697 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
698 	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
699 
700 	/* When socket is gone, all binding information is lost.
701 	 * routing might fail in this case. No choice here, if we choose to force
702 	 * input interface, we will misroute in case of asymmetric route.
703 	 */
704 	if (sk)
705 		arg.bound_dev_if = sk->sk_bound_dev_if;
706 
707 	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
708 		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
709 
710 	arg.tos = ip_hdr(skb)->tos;
711 	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
712 	local_bh_disable();
713 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
714 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
715 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
716 			      &arg, arg.iov[0].iov_len);
717 
718 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
719 	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
720 	local_bh_enable();
721 
722 #ifdef CONFIG_TCP_MD5SIG
723 out:
724 	rcu_read_unlock();
725 #endif
726 }
727 
728 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
729    outside socket context is ugly, certainly. What can I do?
730  */
731 
732 static void tcp_v4_send_ack(const struct sock *sk,
733 			    struct sk_buff *skb, u32 seq, u32 ack,
734 			    u32 win, u32 tsval, u32 tsecr, int oif,
735 			    struct tcp_md5sig_key *key,
736 			    int reply_flags, u8 tos)
737 {
738 	const struct tcphdr *th = tcp_hdr(skb);
739 	struct {
740 		struct tcphdr th;
741 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
742 #ifdef CONFIG_TCP_MD5SIG
743 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
744 #endif
745 			];
746 	} rep;
747 	struct net *net = sock_net(sk);
748 	struct ip_reply_arg arg;
749 
750 	memset(&rep.th, 0, sizeof(struct tcphdr));
751 	memset(&arg, 0, sizeof(arg));
752 
753 	arg.iov[0].iov_base = (unsigned char *)&rep;
754 	arg.iov[0].iov_len  = sizeof(rep.th);
755 	if (tsecr) {
756 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
757 				   (TCPOPT_TIMESTAMP << 8) |
758 				   TCPOLEN_TIMESTAMP);
759 		rep.opt[1] = htonl(tsval);
760 		rep.opt[2] = htonl(tsecr);
761 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
762 	}
763 
764 	/* Swap the send and the receive. */
765 	rep.th.dest    = th->source;
766 	rep.th.source  = th->dest;
767 	rep.th.doff    = arg.iov[0].iov_len / 4;
768 	rep.th.seq     = htonl(seq);
769 	rep.th.ack_seq = htonl(ack);
770 	rep.th.ack     = 1;
771 	rep.th.window  = htons(win);
772 
773 #ifdef CONFIG_TCP_MD5SIG
774 	if (key) {
775 		int offset = (tsecr) ? 3 : 0;
776 
777 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
778 					  (TCPOPT_NOP << 16) |
779 					  (TCPOPT_MD5SIG << 8) |
780 					  TCPOLEN_MD5SIG);
781 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
782 		rep.th.doff = arg.iov[0].iov_len/4;
783 
784 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
785 				    key, ip_hdr(skb)->saddr,
786 				    ip_hdr(skb)->daddr, &rep.th);
787 	}
788 #endif
789 	arg.flags = reply_flags;
790 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
791 				      ip_hdr(skb)->saddr, /* XXX */
792 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
793 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
794 	if (oif)
795 		arg.bound_dev_if = oif;
796 	arg.tos = tos;
797 	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
798 	local_bh_disable();
799 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
800 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
801 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
802 			      &arg, arg.iov[0].iov_len);
803 
804 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
805 	local_bh_enable();
806 }
807 
808 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
809 {
810 	struct inet_timewait_sock *tw = inet_twsk(sk);
811 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
812 
813 	tcp_v4_send_ack(sk, skb,
814 			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
815 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
816 			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
817 			tcptw->tw_ts_recent,
818 			tw->tw_bound_dev_if,
819 			tcp_twsk_md5_key(tcptw),
820 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
821 			tw->tw_tos
822 			);
823 
824 	inet_twsk_put(tw);
825 }
826 
827 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
828 				  struct request_sock *req)
829 {
830 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
831 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
832 	 */
833 	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
834 					     tcp_sk(sk)->snd_nxt;
835 
836 	/* RFC 7323 2.3
837 	 * The window field (SEG.WND) of every outgoing segment, with the
838 	 * exception of <SYN> segments, MUST be right-shifted by
839 	 * Rcv.Wind.Shift bits:
840 	 */
841 	tcp_v4_send_ack(sk, skb, seq,
842 			tcp_rsk(req)->rcv_nxt,
843 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
844 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
845 			req->ts_recent,
846 			0,
847 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
848 					  AF_INET),
849 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
850 			ip_hdr(skb)->tos);
851 }
852 
853 /*
854  *	Send a SYN-ACK after having received a SYN.
855  *	This still operates on a request_sock only, not on a big
856  *	socket.
857  */
858 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
859 			      struct flowi *fl,
860 			      struct request_sock *req,
861 			      struct tcp_fastopen_cookie *foc,
862 			      enum tcp_synack_type synack_type)
863 {
864 	const struct inet_request_sock *ireq = inet_rsk(req);
865 	struct flowi4 fl4;
866 	int err = -1;
867 	struct sk_buff *skb;
868 
869 	/* First, grab a route. */
870 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
871 		return -1;
872 
873 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
874 
875 	if (skb) {
876 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
877 
878 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
879 					    ireq->ir_rmt_addr,
880 					    ireq->opt);
881 		err = net_xmit_eval(err);
882 	}
883 
884 	return err;
885 }
886 
887 /*
888  *	IPv4 request_sock destructor.
889  */
890 static void tcp_v4_reqsk_destructor(struct request_sock *req)
891 {
892 	kfree(inet_rsk(req)->opt);
893 }
894 
895 #ifdef CONFIG_TCP_MD5SIG
896 /*
897  * RFC2385 MD5 checksumming requires a mapping of
898  * IP address->MD5 Key.
899  * We need to maintain these in the sk structure.
900  */
901 
902 /* Find the Key structure for an address.  */
903 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
904 					 const union tcp_md5_addr *addr,
905 					 int family)
906 {
907 	const struct tcp_sock *tp = tcp_sk(sk);
908 	struct tcp_md5sig_key *key;
909 	const struct tcp_md5sig_info *md5sig;
910 	__be32 mask;
911 	struct tcp_md5sig_key *best_match = NULL;
912 	bool match;
913 
914 	/* caller either holds rcu_read_lock() or socket lock */
915 	md5sig = rcu_dereference_check(tp->md5sig_info,
916 				       lockdep_sock_is_held(sk));
917 	if (!md5sig)
918 		return NULL;
919 
920 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
921 		if (key->family != family)
922 			continue;
923 
924 		if (family == AF_INET) {
925 			mask = inet_make_mask(key->prefixlen);
926 			match = (key->addr.a4.s_addr & mask) ==
927 				(addr->a4.s_addr & mask);
928 #if IS_ENABLED(CONFIG_IPV6)
929 		} else if (family == AF_INET6) {
930 			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
931 						  key->prefixlen);
932 #endif
933 		} else {
934 			match = false;
935 		}
936 
937 		if (match && (!best_match ||
938 			      key->prefixlen > best_match->prefixlen))
939 			best_match = key;
940 	}
941 	return best_match;
942 }
943 EXPORT_SYMBOL(tcp_md5_do_lookup);
944 
945 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
946 						      const union tcp_md5_addr *addr,
947 						      int family, u8 prefixlen)
948 {
949 	const struct tcp_sock *tp = tcp_sk(sk);
950 	struct tcp_md5sig_key *key;
951 	unsigned int size = sizeof(struct in_addr);
952 	const struct tcp_md5sig_info *md5sig;
953 
954 	/* caller either holds rcu_read_lock() or socket lock */
955 	md5sig = rcu_dereference_check(tp->md5sig_info,
956 				       lockdep_sock_is_held(sk));
957 	if (!md5sig)
958 		return NULL;
959 #if IS_ENABLED(CONFIG_IPV6)
960 	if (family == AF_INET6)
961 		size = sizeof(struct in6_addr);
962 #endif
963 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
964 		if (key->family != family)
965 			continue;
966 		if (!memcmp(&key->addr, addr, size) &&
967 		    key->prefixlen == prefixlen)
968 			return key;
969 	}
970 	return NULL;
971 }
972 
973 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
974 					 const struct sock *addr_sk)
975 {
976 	const union tcp_md5_addr *addr;
977 
978 	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
979 	return tcp_md5_do_lookup(sk, addr, AF_INET);
980 }
981 EXPORT_SYMBOL(tcp_v4_md5_lookup);
982 
983 /* This can be called on a newly created socket, from other files */
984 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
985 		   int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
986 		   gfp_t gfp)
987 {
988 	/* Add Key to the list */
989 	struct tcp_md5sig_key *key;
990 	struct tcp_sock *tp = tcp_sk(sk);
991 	struct tcp_md5sig_info *md5sig;
992 
993 	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
994 	if (key) {
995 		/* Pre-existing entry - just update that one. */
996 		memcpy(key->key, newkey, newkeylen);
997 		key->keylen = newkeylen;
998 		return 0;
999 	}
1000 
1001 	md5sig = rcu_dereference_protected(tp->md5sig_info,
1002 					   lockdep_sock_is_held(sk));
1003 	if (!md5sig) {
1004 		md5sig = kmalloc(sizeof(*md5sig), gfp);
1005 		if (!md5sig)
1006 			return -ENOMEM;
1007 
1008 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1009 		INIT_HLIST_HEAD(&md5sig->head);
1010 		rcu_assign_pointer(tp->md5sig_info, md5sig);
1011 	}
1012 
1013 	key = sock_kmalloc(sk, sizeof(*key), gfp);
1014 	if (!key)
1015 		return -ENOMEM;
1016 	if (!tcp_alloc_md5sig_pool()) {
1017 		sock_kfree_s(sk, key, sizeof(*key));
1018 		return -ENOMEM;
1019 	}
1020 
1021 	memcpy(key->key, newkey, newkeylen);
1022 	key->keylen = newkeylen;
1023 	key->family = family;
1024 	key->prefixlen = prefixlen;
1025 	memcpy(&key->addr, addr,
1026 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1027 				      sizeof(struct in_addr));
1028 	hlist_add_head_rcu(&key->node, &md5sig->head);
1029 	return 0;
1030 }
1031 EXPORT_SYMBOL(tcp_md5_do_add);
1032 
1033 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1034 		   u8 prefixlen)
1035 {
1036 	struct tcp_md5sig_key *key;
1037 
1038 	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1039 	if (!key)
1040 		return -ENOENT;
1041 	hlist_del_rcu(&key->node);
1042 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1043 	kfree_rcu(key, rcu);
1044 	return 0;
1045 }
1046 EXPORT_SYMBOL(tcp_md5_do_del);
1047 
1048 static void tcp_clear_md5_list(struct sock *sk)
1049 {
1050 	struct tcp_sock *tp = tcp_sk(sk);
1051 	struct tcp_md5sig_key *key;
1052 	struct hlist_node *n;
1053 	struct tcp_md5sig_info *md5sig;
1054 
1055 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1056 
1057 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1058 		hlist_del_rcu(&key->node);
1059 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1060 		kfree_rcu(key, rcu);
1061 	}
1062 }
1063 
1064 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1065 				 char __user *optval, int optlen)
1066 {
1067 	struct tcp_md5sig cmd;
1068 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1069 	u8 prefixlen = 32;
1070 
1071 	if (optlen < sizeof(cmd))
1072 		return -EINVAL;
1073 
1074 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1075 		return -EFAULT;
1076 
1077 	if (sin->sin_family != AF_INET)
1078 		return -EINVAL;
1079 
1080 	if (optname == TCP_MD5SIG_EXT &&
1081 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1082 		prefixlen = cmd.tcpm_prefixlen;
1083 		if (prefixlen > 32)
1084 			return -EINVAL;
1085 	}
1086 
1087 	if (!cmd.tcpm_keylen)
1088 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1089 				      AF_INET, prefixlen);
1090 
1091 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1092 		return -EINVAL;
1093 
1094 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1095 			      AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1096 			      GFP_KERNEL);
1097 }
1098 
1099 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1100 				   __be32 daddr, __be32 saddr,
1101 				   const struct tcphdr *th, int nbytes)
1102 {
1103 	struct tcp4_pseudohdr *bp;
1104 	struct scatterlist sg;
1105 	struct tcphdr *_th;
1106 
1107 	bp = hp->scratch;
1108 	bp->saddr = saddr;
1109 	bp->daddr = daddr;
1110 	bp->pad = 0;
1111 	bp->protocol = IPPROTO_TCP;
1112 	bp->len = cpu_to_be16(nbytes);
1113 
1114 	_th = (struct tcphdr *)(bp + 1);
1115 	memcpy(_th, th, sizeof(*th));
1116 	_th->check = 0;
1117 
1118 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1119 	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1120 				sizeof(*bp) + sizeof(*th));
1121 	return crypto_ahash_update(hp->md5_req);
1122 }
1123 
1124 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1125 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1126 {
1127 	struct tcp_md5sig_pool *hp;
1128 	struct ahash_request *req;
1129 
1130 	hp = tcp_get_md5sig_pool();
1131 	if (!hp)
1132 		goto clear_hash_noput;
1133 	req = hp->md5_req;
1134 
1135 	if (crypto_ahash_init(req))
1136 		goto clear_hash;
1137 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1138 		goto clear_hash;
1139 	if (tcp_md5_hash_key(hp, key))
1140 		goto clear_hash;
1141 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1142 	if (crypto_ahash_final(req))
1143 		goto clear_hash;
1144 
1145 	tcp_put_md5sig_pool();
1146 	return 0;
1147 
1148 clear_hash:
1149 	tcp_put_md5sig_pool();
1150 clear_hash_noput:
1151 	memset(md5_hash, 0, 16);
1152 	return 1;
1153 }
1154 
1155 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1156 			const struct sock *sk,
1157 			const struct sk_buff *skb)
1158 {
1159 	struct tcp_md5sig_pool *hp;
1160 	struct ahash_request *req;
1161 	const struct tcphdr *th = tcp_hdr(skb);
1162 	__be32 saddr, daddr;
1163 
1164 	if (sk) { /* valid for establish/request sockets */
1165 		saddr = sk->sk_rcv_saddr;
1166 		daddr = sk->sk_daddr;
1167 	} else {
1168 		const struct iphdr *iph = ip_hdr(skb);
1169 		saddr = iph->saddr;
1170 		daddr = iph->daddr;
1171 	}
1172 
1173 	hp = tcp_get_md5sig_pool();
1174 	if (!hp)
1175 		goto clear_hash_noput;
1176 	req = hp->md5_req;
1177 
1178 	if (crypto_ahash_init(req))
1179 		goto clear_hash;
1180 
1181 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1182 		goto clear_hash;
1183 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1184 		goto clear_hash;
1185 	if (tcp_md5_hash_key(hp, key))
1186 		goto clear_hash;
1187 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1188 	if (crypto_ahash_final(req))
1189 		goto clear_hash;
1190 
1191 	tcp_put_md5sig_pool();
1192 	return 0;
1193 
1194 clear_hash:
1195 	tcp_put_md5sig_pool();
1196 clear_hash_noput:
1197 	memset(md5_hash, 0, 16);
1198 	return 1;
1199 }
1200 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1201 
1202 #endif
1203 
1204 /* Called with rcu_read_lock() */
1205 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1206 				    const struct sk_buff *skb)
1207 {
1208 #ifdef CONFIG_TCP_MD5SIG
1209 	/*
1210 	 * This gets called for each TCP segment that arrives
1211 	 * so we want to be efficient.
1212 	 * We have 3 drop cases:
1213 	 * o No MD5 hash and one expected.
1214 	 * o MD5 hash and we're not expecting one.
1215 	 * o MD5 hash and its wrong.
1216 	 */
1217 	const __u8 *hash_location = NULL;
1218 	struct tcp_md5sig_key *hash_expected;
1219 	const struct iphdr *iph = ip_hdr(skb);
1220 	const struct tcphdr *th = tcp_hdr(skb);
1221 	int genhash;
1222 	unsigned char newhash[16];
1223 
1224 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1225 					  AF_INET);
1226 	hash_location = tcp_parse_md5sig_option(th);
1227 
1228 	/* We've parsed the options - do we have a hash? */
1229 	if (!hash_expected && !hash_location)
1230 		return false;
1231 
1232 	if (hash_expected && !hash_location) {
1233 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1234 		return true;
1235 	}
1236 
1237 	if (!hash_expected && hash_location) {
1238 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1239 		return true;
1240 	}
1241 
1242 	/* Okay, so this is hash_expected and hash_location -
1243 	 * so we need to calculate the checksum.
1244 	 */
1245 	genhash = tcp_v4_md5_hash_skb(newhash,
1246 				      hash_expected,
1247 				      NULL, skb);
1248 
1249 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1250 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1251 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1252 				     &iph->saddr, ntohs(th->source),
1253 				     &iph->daddr, ntohs(th->dest),
1254 				     genhash ? " tcp_v4_calc_md5_hash failed"
1255 				     : "");
1256 		return true;
1257 	}
1258 	return false;
1259 #endif
1260 	return false;
1261 }
1262 
1263 static void tcp_v4_init_req(struct request_sock *req,
1264 			    const struct sock *sk_listener,
1265 			    struct sk_buff *skb)
1266 {
1267 	struct inet_request_sock *ireq = inet_rsk(req);
1268 
1269 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1270 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1271 	ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb);
1272 }
1273 
1274 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1275 					  struct flowi *fl,
1276 					  const struct request_sock *req)
1277 {
1278 	return inet_csk_route_req(sk, &fl->u.ip4, req);
1279 }
1280 
1281 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1282 	.family		=	PF_INET,
1283 	.obj_size	=	sizeof(struct tcp_request_sock),
1284 	.rtx_syn_ack	=	tcp_rtx_synack,
1285 	.send_ack	=	tcp_v4_reqsk_send_ack,
1286 	.destructor	=	tcp_v4_reqsk_destructor,
1287 	.send_reset	=	tcp_v4_send_reset,
1288 	.syn_ack_timeout =	tcp_syn_ack_timeout,
1289 };
1290 
1291 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1292 	.mss_clamp	=	TCP_MSS_DEFAULT,
1293 #ifdef CONFIG_TCP_MD5SIG
1294 	.req_md5_lookup	=	tcp_v4_md5_lookup,
1295 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1296 #endif
1297 	.init_req	=	tcp_v4_init_req,
1298 #ifdef CONFIG_SYN_COOKIES
1299 	.cookie_init_seq =	cookie_v4_init_sequence,
1300 #endif
1301 	.route_req	=	tcp_v4_route_req,
1302 	.init_seq	=	tcp_v4_init_seq,
1303 	.init_ts_off	=	tcp_v4_init_ts_off,
1304 	.send_synack	=	tcp_v4_send_synack,
1305 };
1306 
1307 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1308 {
1309 	/* Never answer to SYNs send to broadcast or multicast */
1310 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1311 		goto drop;
1312 
1313 	return tcp_conn_request(&tcp_request_sock_ops,
1314 				&tcp_request_sock_ipv4_ops, sk, skb);
1315 
1316 drop:
1317 	tcp_listendrop(sk);
1318 	return 0;
1319 }
1320 EXPORT_SYMBOL(tcp_v4_conn_request);
1321 
1322 
1323 /*
1324  * The three way handshake has completed - we got a valid synack -
1325  * now create the new socket.
1326  */
1327 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1328 				  struct request_sock *req,
1329 				  struct dst_entry *dst,
1330 				  struct request_sock *req_unhash,
1331 				  bool *own_req)
1332 {
1333 	struct inet_request_sock *ireq;
1334 	struct inet_sock *newinet;
1335 	struct tcp_sock *newtp;
1336 	struct sock *newsk;
1337 #ifdef CONFIG_TCP_MD5SIG
1338 	struct tcp_md5sig_key *key;
1339 #endif
1340 	struct ip_options_rcu *inet_opt;
1341 
1342 	if (sk_acceptq_is_full(sk))
1343 		goto exit_overflow;
1344 
1345 	newsk = tcp_create_openreq_child(sk, req, skb);
1346 	if (!newsk)
1347 		goto exit_nonewsk;
1348 
1349 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1350 	inet_sk_rx_dst_set(newsk, skb);
1351 
1352 	newtp		      = tcp_sk(newsk);
1353 	newinet		      = inet_sk(newsk);
1354 	ireq		      = inet_rsk(req);
1355 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1356 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1357 	newsk->sk_bound_dev_if = ireq->ir_iif;
1358 	newinet->inet_saddr	      = ireq->ir_loc_addr;
1359 	inet_opt	      = ireq->opt;
1360 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1361 	ireq->opt	      = NULL;
1362 	newinet->mc_index     = inet_iif(skb);
1363 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1364 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1365 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1366 	if (inet_opt)
1367 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1368 	newinet->inet_id = newtp->write_seq ^ jiffies;
1369 
1370 	if (!dst) {
1371 		dst = inet_csk_route_child_sock(sk, newsk, req);
1372 		if (!dst)
1373 			goto put_and_exit;
1374 	} else {
1375 		/* syncookie case : see end of cookie_v4_check() */
1376 	}
1377 	sk_setup_caps(newsk, dst);
1378 
1379 	tcp_ca_openreq_child(newsk, dst);
1380 
1381 	tcp_sync_mss(newsk, dst_mtu(dst));
1382 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1383 
1384 	tcp_initialize_rcv_mss(newsk);
1385 
1386 #ifdef CONFIG_TCP_MD5SIG
1387 	/* Copy over the MD5 key from the original socket */
1388 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1389 				AF_INET);
1390 	if (key) {
1391 		/*
1392 		 * We're using one, so create a matching key
1393 		 * on the newsk structure. If we fail to get
1394 		 * memory, then we end up not copying the key
1395 		 * across. Shucks.
1396 		 */
1397 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1398 			       AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1399 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1400 	}
1401 #endif
1402 
1403 	if (__inet_inherit_port(sk, newsk) < 0)
1404 		goto put_and_exit;
1405 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1406 	if (*own_req)
1407 		tcp_move_syn(newtp, req);
1408 
1409 	return newsk;
1410 
1411 exit_overflow:
1412 	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1413 exit_nonewsk:
1414 	dst_release(dst);
1415 exit:
1416 	tcp_listendrop(sk);
1417 	return NULL;
1418 put_and_exit:
1419 	inet_csk_prepare_forced_close(newsk);
1420 	tcp_done(newsk);
1421 	goto exit;
1422 }
1423 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1424 
1425 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1426 {
1427 #ifdef CONFIG_SYN_COOKIES
1428 	const struct tcphdr *th = tcp_hdr(skb);
1429 
1430 	if (!th->syn)
1431 		sk = cookie_v4_check(sk, skb);
1432 #endif
1433 	return sk;
1434 }
1435 
1436 /* The socket must have it's spinlock held when we get
1437  * here, unless it is a TCP_LISTEN socket.
1438  *
1439  * We have a potential double-lock case here, so even when
1440  * doing backlog processing we use the BH locking scheme.
1441  * This is because we cannot sleep with the original spinlock
1442  * held.
1443  */
1444 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1445 {
1446 	struct sock *rsk;
1447 
1448 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1449 		struct dst_entry *dst = sk->sk_rx_dst;
1450 
1451 		sock_rps_save_rxhash(sk, skb);
1452 		sk_mark_napi_id(sk, skb);
1453 		if (dst) {
1454 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1455 			    !dst->ops->check(dst, 0)) {
1456 				dst_release(dst);
1457 				sk->sk_rx_dst = NULL;
1458 			}
1459 		}
1460 		tcp_rcv_established(sk, skb, tcp_hdr(skb));
1461 		return 0;
1462 	}
1463 
1464 	if (tcp_checksum_complete(skb))
1465 		goto csum_err;
1466 
1467 	if (sk->sk_state == TCP_LISTEN) {
1468 		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1469 
1470 		if (!nsk)
1471 			goto discard;
1472 		if (nsk != sk) {
1473 			if (tcp_child_process(sk, nsk, skb)) {
1474 				rsk = nsk;
1475 				goto reset;
1476 			}
1477 			return 0;
1478 		}
1479 	} else
1480 		sock_rps_save_rxhash(sk, skb);
1481 
1482 	if (tcp_rcv_state_process(sk, skb)) {
1483 		rsk = sk;
1484 		goto reset;
1485 	}
1486 	return 0;
1487 
1488 reset:
1489 	tcp_v4_send_reset(rsk, skb);
1490 discard:
1491 	kfree_skb(skb);
1492 	/* Be careful here. If this function gets more complicated and
1493 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1494 	 * might be destroyed here. This current version compiles correctly,
1495 	 * but you have been warned.
1496 	 */
1497 	return 0;
1498 
1499 csum_err:
1500 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1501 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1502 	goto discard;
1503 }
1504 EXPORT_SYMBOL(tcp_v4_do_rcv);
1505 
1506 int tcp_v4_early_demux(struct sk_buff *skb)
1507 {
1508 	const struct iphdr *iph;
1509 	const struct tcphdr *th;
1510 	struct sock *sk;
1511 
1512 	if (skb->pkt_type != PACKET_HOST)
1513 		return 0;
1514 
1515 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1516 		return 0;
1517 
1518 	iph = ip_hdr(skb);
1519 	th = tcp_hdr(skb);
1520 
1521 	if (th->doff < sizeof(struct tcphdr) / 4)
1522 		return 0;
1523 
1524 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1525 				       iph->saddr, th->source,
1526 				       iph->daddr, ntohs(th->dest),
1527 				       skb->skb_iif, inet_sdif(skb));
1528 	if (sk) {
1529 		skb->sk = sk;
1530 		skb->destructor = sock_edemux;
1531 		if (sk_fullsock(sk)) {
1532 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1533 
1534 			if (dst)
1535 				dst = dst_check(dst, 0);
1536 			if (dst &&
1537 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1538 				skb_dst_set_noref(skb, dst);
1539 		}
1540 	}
1541 	return 0;
1542 }
1543 
1544 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1545 {
1546 	u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1547 
1548 	/* Only socket owner can try to collapse/prune rx queues
1549 	 * to reduce memory overhead, so add a little headroom here.
1550 	 * Few sockets backlog are possibly concurrently non empty.
1551 	 */
1552 	limit += 64*1024;
1553 
1554 	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1555 	 * we can fix skb->truesize to its real value to avoid future drops.
1556 	 * This is valid because skb is not yet charged to the socket.
1557 	 * It has been noticed pure SACK packets were sometimes dropped
1558 	 * (if cooked by drivers without copybreak feature).
1559 	 */
1560 	skb_condense(skb);
1561 
1562 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
1563 		bh_unlock_sock(sk);
1564 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1565 		return true;
1566 	}
1567 	return false;
1568 }
1569 EXPORT_SYMBOL(tcp_add_backlog);
1570 
1571 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1572 {
1573 	struct tcphdr *th = (struct tcphdr *)skb->data;
1574 	unsigned int eaten = skb->len;
1575 	int err;
1576 
1577 	err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1578 	if (!err) {
1579 		eaten -= skb->len;
1580 		TCP_SKB_CB(skb)->end_seq -= eaten;
1581 	}
1582 	return err;
1583 }
1584 EXPORT_SYMBOL(tcp_filter);
1585 
1586 /*
1587  *	From tcp_input.c
1588  */
1589 
1590 int tcp_v4_rcv(struct sk_buff *skb)
1591 {
1592 	struct net *net = dev_net(skb->dev);
1593 	int sdif = inet_sdif(skb);
1594 	const struct iphdr *iph;
1595 	const struct tcphdr *th;
1596 	bool refcounted;
1597 	struct sock *sk;
1598 	int ret;
1599 
1600 	if (skb->pkt_type != PACKET_HOST)
1601 		goto discard_it;
1602 
1603 	/* Count it even if it's bad */
1604 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1605 
1606 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1607 		goto discard_it;
1608 
1609 	th = (const struct tcphdr *)skb->data;
1610 
1611 	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1612 		goto bad_packet;
1613 	if (!pskb_may_pull(skb, th->doff * 4))
1614 		goto discard_it;
1615 
1616 	/* An explanation is required here, I think.
1617 	 * Packet length and doff are validated by header prediction,
1618 	 * provided case of th->doff==0 is eliminated.
1619 	 * So, we defer the checks. */
1620 
1621 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1622 		goto csum_error;
1623 
1624 	th = (const struct tcphdr *)skb->data;
1625 	iph = ip_hdr(skb);
1626 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1627 	 * barrier() makes sure compiler wont play fool^Waliasing games.
1628 	 */
1629 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1630 		sizeof(struct inet_skb_parm));
1631 	barrier();
1632 
1633 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1634 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1635 				    skb->len - th->doff * 4);
1636 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1637 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1638 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1639 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1640 	TCP_SKB_CB(skb)->sacked	 = 0;
1641 	TCP_SKB_CB(skb)->has_rxtstamp =
1642 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1643 
1644 lookup:
1645 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1646 			       th->dest, sdif, &refcounted);
1647 	if (!sk)
1648 		goto no_tcp_socket;
1649 
1650 process:
1651 	if (sk->sk_state == TCP_TIME_WAIT)
1652 		goto do_time_wait;
1653 
1654 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1655 		struct request_sock *req = inet_reqsk(sk);
1656 		struct sock *nsk;
1657 
1658 		sk = req->rsk_listener;
1659 		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1660 			sk_drops_add(sk, skb);
1661 			reqsk_put(req);
1662 			goto discard_it;
1663 		}
1664 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1665 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1666 			goto lookup;
1667 		}
1668 		/* We own a reference on the listener, increase it again
1669 		 * as we might lose it too soon.
1670 		 */
1671 		sock_hold(sk);
1672 		refcounted = true;
1673 		nsk = NULL;
1674 		if (!tcp_filter(sk, skb))
1675 			nsk = tcp_check_req(sk, skb, req, false);
1676 		if (!nsk) {
1677 			reqsk_put(req);
1678 			goto discard_and_relse;
1679 		}
1680 		if (nsk == sk) {
1681 			reqsk_put(req);
1682 		} else if (tcp_child_process(sk, nsk, skb)) {
1683 			tcp_v4_send_reset(nsk, skb);
1684 			goto discard_and_relse;
1685 		} else {
1686 			sock_put(sk);
1687 			return 0;
1688 		}
1689 	}
1690 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1691 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1692 		goto discard_and_relse;
1693 	}
1694 
1695 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1696 		goto discard_and_relse;
1697 
1698 	if (tcp_v4_inbound_md5_hash(sk, skb))
1699 		goto discard_and_relse;
1700 
1701 	nf_reset(skb);
1702 
1703 	if (tcp_filter(sk, skb))
1704 		goto discard_and_relse;
1705 	th = (const struct tcphdr *)skb->data;
1706 	iph = ip_hdr(skb);
1707 
1708 	skb->dev = NULL;
1709 
1710 	if (sk->sk_state == TCP_LISTEN) {
1711 		ret = tcp_v4_do_rcv(sk, skb);
1712 		goto put_and_return;
1713 	}
1714 
1715 	sk_incoming_cpu_update(sk);
1716 
1717 	bh_lock_sock_nested(sk);
1718 	tcp_segs_in(tcp_sk(sk), skb);
1719 	ret = 0;
1720 	if (!sock_owned_by_user(sk)) {
1721 		ret = tcp_v4_do_rcv(sk, skb);
1722 	} else if (tcp_add_backlog(sk, skb)) {
1723 		goto discard_and_relse;
1724 	}
1725 	bh_unlock_sock(sk);
1726 
1727 put_and_return:
1728 	if (refcounted)
1729 		sock_put(sk);
1730 
1731 	return ret;
1732 
1733 no_tcp_socket:
1734 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1735 		goto discard_it;
1736 
1737 	if (tcp_checksum_complete(skb)) {
1738 csum_error:
1739 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1740 bad_packet:
1741 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1742 	} else {
1743 		tcp_v4_send_reset(NULL, skb);
1744 	}
1745 
1746 discard_it:
1747 	/* Discard frame. */
1748 	kfree_skb(skb);
1749 	return 0;
1750 
1751 discard_and_relse:
1752 	sk_drops_add(sk, skb);
1753 	if (refcounted)
1754 		sock_put(sk);
1755 	goto discard_it;
1756 
1757 do_time_wait:
1758 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1759 		inet_twsk_put(inet_twsk(sk));
1760 		goto discard_it;
1761 	}
1762 
1763 	if (tcp_checksum_complete(skb)) {
1764 		inet_twsk_put(inet_twsk(sk));
1765 		goto csum_error;
1766 	}
1767 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1768 	case TCP_TW_SYN: {
1769 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1770 							&tcp_hashinfo, skb,
1771 							__tcp_hdrlen(th),
1772 							iph->saddr, th->source,
1773 							iph->daddr, th->dest,
1774 							inet_iif(skb),
1775 							sdif);
1776 		if (sk2) {
1777 			inet_twsk_deschedule_put(inet_twsk(sk));
1778 			sk = sk2;
1779 			refcounted = false;
1780 			goto process;
1781 		}
1782 	}
1783 		/* to ACK */
1784 		/* fall through */
1785 	case TCP_TW_ACK:
1786 		tcp_v4_timewait_ack(sk, skb);
1787 		break;
1788 	case TCP_TW_RST:
1789 		tcp_v4_send_reset(sk, skb);
1790 		inet_twsk_deschedule_put(inet_twsk(sk));
1791 		goto discard_it;
1792 	case TCP_TW_SUCCESS:;
1793 	}
1794 	goto discard_it;
1795 }
1796 
1797 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1798 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1799 	.twsk_unique	= tcp_twsk_unique,
1800 	.twsk_destructor= tcp_twsk_destructor,
1801 };
1802 
1803 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1804 {
1805 	struct dst_entry *dst = skb_dst(skb);
1806 
1807 	if (dst && dst_hold_safe(dst)) {
1808 		sk->sk_rx_dst = dst;
1809 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1810 	}
1811 }
1812 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1813 
1814 const struct inet_connection_sock_af_ops ipv4_specific = {
1815 	.queue_xmit	   = ip_queue_xmit,
1816 	.send_check	   = tcp_v4_send_check,
1817 	.rebuild_header	   = inet_sk_rebuild_header,
1818 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1819 	.conn_request	   = tcp_v4_conn_request,
1820 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1821 	.net_header_len	   = sizeof(struct iphdr),
1822 	.setsockopt	   = ip_setsockopt,
1823 	.getsockopt	   = ip_getsockopt,
1824 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1825 	.sockaddr_len	   = sizeof(struct sockaddr_in),
1826 #ifdef CONFIG_COMPAT
1827 	.compat_setsockopt = compat_ip_setsockopt,
1828 	.compat_getsockopt = compat_ip_getsockopt,
1829 #endif
1830 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1831 };
1832 EXPORT_SYMBOL(ipv4_specific);
1833 
1834 #ifdef CONFIG_TCP_MD5SIG
1835 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1836 	.md5_lookup		= tcp_v4_md5_lookup,
1837 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1838 	.md5_parse		= tcp_v4_parse_md5_keys,
1839 };
1840 #endif
1841 
1842 /* NOTE: A lot of things set to zero explicitly by call to
1843  *       sk_alloc() so need not be done here.
1844  */
1845 static int tcp_v4_init_sock(struct sock *sk)
1846 {
1847 	struct inet_connection_sock *icsk = inet_csk(sk);
1848 
1849 	tcp_init_sock(sk);
1850 
1851 	icsk->icsk_af_ops = &ipv4_specific;
1852 
1853 #ifdef CONFIG_TCP_MD5SIG
1854 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1855 #endif
1856 
1857 	return 0;
1858 }
1859 
1860 void tcp_v4_destroy_sock(struct sock *sk)
1861 {
1862 	struct tcp_sock *tp = tcp_sk(sk);
1863 
1864 	tcp_clear_xmit_timers(sk);
1865 
1866 	tcp_cleanup_congestion_control(sk);
1867 
1868 	tcp_cleanup_ulp(sk);
1869 
1870 	/* Cleanup up the write buffer. */
1871 	tcp_write_queue_purge(sk);
1872 
1873 	/* Check if we want to disable active TFO */
1874 	tcp_fastopen_active_disable_ofo_check(sk);
1875 
1876 	/* Cleans up our, hopefully empty, out_of_order_queue. */
1877 	skb_rbtree_purge(&tp->out_of_order_queue);
1878 
1879 #ifdef CONFIG_TCP_MD5SIG
1880 	/* Clean up the MD5 key list, if any */
1881 	if (tp->md5sig_info) {
1882 		tcp_clear_md5_list(sk);
1883 		kfree_rcu(tp->md5sig_info, rcu);
1884 		tp->md5sig_info = NULL;
1885 	}
1886 #endif
1887 
1888 	/* Clean up a referenced TCP bind bucket. */
1889 	if (inet_csk(sk)->icsk_bind_hash)
1890 		inet_put_port(sk);
1891 
1892 	BUG_ON(tp->fastopen_rsk);
1893 
1894 	/* If socket is aborted during connect operation */
1895 	tcp_free_fastopen_req(tp);
1896 	tcp_fastopen_destroy_cipher(sk);
1897 	tcp_saved_syn_free(tp);
1898 
1899 	sk_sockets_allocated_dec(sk);
1900 }
1901 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1902 
1903 #ifdef CONFIG_PROC_FS
1904 /* Proc filesystem TCP sock list dumping. */
1905 
1906 /*
1907  * Get next listener socket follow cur.  If cur is NULL, get first socket
1908  * starting from bucket given in st->bucket; when st->bucket is zero the
1909  * very first socket in the hash table is returned.
1910  */
1911 static void *listening_get_next(struct seq_file *seq, void *cur)
1912 {
1913 	struct tcp_iter_state *st = seq->private;
1914 	struct net *net = seq_file_net(seq);
1915 	struct inet_listen_hashbucket *ilb;
1916 	struct sock *sk = cur;
1917 
1918 	if (!sk) {
1919 get_head:
1920 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1921 		spin_lock(&ilb->lock);
1922 		sk = sk_head(&ilb->head);
1923 		st->offset = 0;
1924 		goto get_sk;
1925 	}
1926 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1927 	++st->num;
1928 	++st->offset;
1929 
1930 	sk = sk_next(sk);
1931 get_sk:
1932 	sk_for_each_from(sk) {
1933 		if (!net_eq(sock_net(sk), net))
1934 			continue;
1935 		if (sk->sk_family == st->family)
1936 			return sk;
1937 	}
1938 	spin_unlock(&ilb->lock);
1939 	st->offset = 0;
1940 	if (++st->bucket < INET_LHTABLE_SIZE)
1941 		goto get_head;
1942 	return NULL;
1943 }
1944 
1945 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1946 {
1947 	struct tcp_iter_state *st = seq->private;
1948 	void *rc;
1949 
1950 	st->bucket = 0;
1951 	st->offset = 0;
1952 	rc = listening_get_next(seq, NULL);
1953 
1954 	while (rc && *pos) {
1955 		rc = listening_get_next(seq, rc);
1956 		--*pos;
1957 	}
1958 	return rc;
1959 }
1960 
1961 static inline bool empty_bucket(const struct tcp_iter_state *st)
1962 {
1963 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1964 }
1965 
1966 /*
1967  * Get first established socket starting from bucket given in st->bucket.
1968  * If st->bucket is zero, the very first socket in the hash is returned.
1969  */
1970 static void *established_get_first(struct seq_file *seq)
1971 {
1972 	struct tcp_iter_state *st = seq->private;
1973 	struct net *net = seq_file_net(seq);
1974 	void *rc = NULL;
1975 
1976 	st->offset = 0;
1977 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1978 		struct sock *sk;
1979 		struct hlist_nulls_node *node;
1980 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1981 
1982 		/* Lockless fast path for the common case of empty buckets */
1983 		if (empty_bucket(st))
1984 			continue;
1985 
1986 		spin_lock_bh(lock);
1987 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1988 			if (sk->sk_family != st->family ||
1989 			    !net_eq(sock_net(sk), net)) {
1990 				continue;
1991 			}
1992 			rc = sk;
1993 			goto out;
1994 		}
1995 		spin_unlock_bh(lock);
1996 	}
1997 out:
1998 	return rc;
1999 }
2000 
2001 static void *established_get_next(struct seq_file *seq, void *cur)
2002 {
2003 	struct sock *sk = cur;
2004 	struct hlist_nulls_node *node;
2005 	struct tcp_iter_state *st = seq->private;
2006 	struct net *net = seq_file_net(seq);
2007 
2008 	++st->num;
2009 	++st->offset;
2010 
2011 	sk = sk_nulls_next(sk);
2012 
2013 	sk_nulls_for_each_from(sk, node) {
2014 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2015 			return sk;
2016 	}
2017 
2018 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2019 	++st->bucket;
2020 	return established_get_first(seq);
2021 }
2022 
2023 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2024 {
2025 	struct tcp_iter_state *st = seq->private;
2026 	void *rc;
2027 
2028 	st->bucket = 0;
2029 	rc = established_get_first(seq);
2030 
2031 	while (rc && pos) {
2032 		rc = established_get_next(seq, rc);
2033 		--pos;
2034 	}
2035 	return rc;
2036 }
2037 
2038 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2039 {
2040 	void *rc;
2041 	struct tcp_iter_state *st = seq->private;
2042 
2043 	st->state = TCP_SEQ_STATE_LISTENING;
2044 	rc	  = listening_get_idx(seq, &pos);
2045 
2046 	if (!rc) {
2047 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2048 		rc	  = established_get_idx(seq, pos);
2049 	}
2050 
2051 	return rc;
2052 }
2053 
2054 static void *tcp_seek_last_pos(struct seq_file *seq)
2055 {
2056 	struct tcp_iter_state *st = seq->private;
2057 	int offset = st->offset;
2058 	int orig_num = st->num;
2059 	void *rc = NULL;
2060 
2061 	switch (st->state) {
2062 	case TCP_SEQ_STATE_LISTENING:
2063 		if (st->bucket >= INET_LHTABLE_SIZE)
2064 			break;
2065 		st->state = TCP_SEQ_STATE_LISTENING;
2066 		rc = listening_get_next(seq, NULL);
2067 		while (offset-- && rc)
2068 			rc = listening_get_next(seq, rc);
2069 		if (rc)
2070 			break;
2071 		st->bucket = 0;
2072 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2073 		/* Fallthrough */
2074 	case TCP_SEQ_STATE_ESTABLISHED:
2075 		if (st->bucket > tcp_hashinfo.ehash_mask)
2076 			break;
2077 		rc = established_get_first(seq);
2078 		while (offset-- && rc)
2079 			rc = established_get_next(seq, rc);
2080 	}
2081 
2082 	st->num = orig_num;
2083 
2084 	return rc;
2085 }
2086 
2087 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2088 {
2089 	struct tcp_iter_state *st = seq->private;
2090 	void *rc;
2091 
2092 	if (*pos && *pos == st->last_pos) {
2093 		rc = tcp_seek_last_pos(seq);
2094 		if (rc)
2095 			goto out;
2096 	}
2097 
2098 	st->state = TCP_SEQ_STATE_LISTENING;
2099 	st->num = 0;
2100 	st->bucket = 0;
2101 	st->offset = 0;
2102 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2103 
2104 out:
2105 	st->last_pos = *pos;
2106 	return rc;
2107 }
2108 
2109 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2110 {
2111 	struct tcp_iter_state *st = seq->private;
2112 	void *rc = NULL;
2113 
2114 	if (v == SEQ_START_TOKEN) {
2115 		rc = tcp_get_idx(seq, 0);
2116 		goto out;
2117 	}
2118 
2119 	switch (st->state) {
2120 	case TCP_SEQ_STATE_LISTENING:
2121 		rc = listening_get_next(seq, v);
2122 		if (!rc) {
2123 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2124 			st->bucket = 0;
2125 			st->offset = 0;
2126 			rc	  = established_get_first(seq);
2127 		}
2128 		break;
2129 	case TCP_SEQ_STATE_ESTABLISHED:
2130 		rc = established_get_next(seq, v);
2131 		break;
2132 	}
2133 out:
2134 	++*pos;
2135 	st->last_pos = *pos;
2136 	return rc;
2137 }
2138 
2139 static void tcp_seq_stop(struct seq_file *seq, void *v)
2140 {
2141 	struct tcp_iter_state *st = seq->private;
2142 
2143 	switch (st->state) {
2144 	case TCP_SEQ_STATE_LISTENING:
2145 		if (v != SEQ_START_TOKEN)
2146 			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2147 		break;
2148 	case TCP_SEQ_STATE_ESTABLISHED:
2149 		if (v)
2150 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2151 		break;
2152 	}
2153 }
2154 
2155 int tcp_seq_open(struct inode *inode, struct file *file)
2156 {
2157 	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2158 	struct tcp_iter_state *s;
2159 	int err;
2160 
2161 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2162 			  sizeof(struct tcp_iter_state));
2163 	if (err < 0)
2164 		return err;
2165 
2166 	s = ((struct seq_file *)file->private_data)->private;
2167 	s->family		= afinfo->family;
2168 	s->last_pos		= 0;
2169 	return 0;
2170 }
2171 EXPORT_SYMBOL(tcp_seq_open);
2172 
2173 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2174 {
2175 	int rc = 0;
2176 	struct proc_dir_entry *p;
2177 
2178 	afinfo->seq_ops.start		= tcp_seq_start;
2179 	afinfo->seq_ops.next		= tcp_seq_next;
2180 	afinfo->seq_ops.stop		= tcp_seq_stop;
2181 
2182 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2183 			     afinfo->seq_fops, afinfo);
2184 	if (!p)
2185 		rc = -ENOMEM;
2186 	return rc;
2187 }
2188 EXPORT_SYMBOL(tcp_proc_register);
2189 
2190 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2191 {
2192 	remove_proc_entry(afinfo->name, net->proc_net);
2193 }
2194 EXPORT_SYMBOL(tcp_proc_unregister);
2195 
2196 static void get_openreq4(const struct request_sock *req,
2197 			 struct seq_file *f, int i)
2198 {
2199 	const struct inet_request_sock *ireq = inet_rsk(req);
2200 	long delta = req->rsk_timer.expires - jiffies;
2201 
2202 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2203 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2204 		i,
2205 		ireq->ir_loc_addr,
2206 		ireq->ir_num,
2207 		ireq->ir_rmt_addr,
2208 		ntohs(ireq->ir_rmt_port),
2209 		TCP_SYN_RECV,
2210 		0, 0, /* could print option size, but that is af dependent. */
2211 		1,    /* timers active (only the expire timer) */
2212 		jiffies_delta_to_clock_t(delta),
2213 		req->num_timeout,
2214 		from_kuid_munged(seq_user_ns(f),
2215 				 sock_i_uid(req->rsk_listener)),
2216 		0,  /* non standard timer */
2217 		0, /* open_requests have no inode */
2218 		0,
2219 		req);
2220 }
2221 
2222 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2223 {
2224 	int timer_active;
2225 	unsigned long timer_expires;
2226 	const struct tcp_sock *tp = tcp_sk(sk);
2227 	const struct inet_connection_sock *icsk = inet_csk(sk);
2228 	const struct inet_sock *inet = inet_sk(sk);
2229 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2230 	__be32 dest = inet->inet_daddr;
2231 	__be32 src = inet->inet_rcv_saddr;
2232 	__u16 destp = ntohs(inet->inet_dport);
2233 	__u16 srcp = ntohs(inet->inet_sport);
2234 	int rx_queue;
2235 	int state;
2236 
2237 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2238 	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2239 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2240 		timer_active	= 1;
2241 		timer_expires	= icsk->icsk_timeout;
2242 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2243 		timer_active	= 4;
2244 		timer_expires	= icsk->icsk_timeout;
2245 	} else if (timer_pending(&sk->sk_timer)) {
2246 		timer_active	= 2;
2247 		timer_expires	= sk->sk_timer.expires;
2248 	} else {
2249 		timer_active	= 0;
2250 		timer_expires = jiffies;
2251 	}
2252 
2253 	state = sk_state_load(sk);
2254 	if (state == TCP_LISTEN)
2255 		rx_queue = sk->sk_ack_backlog;
2256 	else
2257 		/* Because we don't lock the socket,
2258 		 * we might find a transient negative value.
2259 		 */
2260 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2261 
2262 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2263 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2264 		i, src, srcp, dest, destp, state,
2265 		tp->write_seq - tp->snd_una,
2266 		rx_queue,
2267 		timer_active,
2268 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2269 		icsk->icsk_retransmits,
2270 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2271 		icsk->icsk_probes_out,
2272 		sock_i_ino(sk),
2273 		refcount_read(&sk->sk_refcnt), sk,
2274 		jiffies_to_clock_t(icsk->icsk_rto),
2275 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2276 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2277 		tp->snd_cwnd,
2278 		state == TCP_LISTEN ?
2279 		    fastopenq->max_qlen :
2280 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2281 }
2282 
2283 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2284 			       struct seq_file *f, int i)
2285 {
2286 	long delta = tw->tw_timer.expires - jiffies;
2287 	__be32 dest, src;
2288 	__u16 destp, srcp;
2289 
2290 	dest  = tw->tw_daddr;
2291 	src   = tw->tw_rcv_saddr;
2292 	destp = ntohs(tw->tw_dport);
2293 	srcp  = ntohs(tw->tw_sport);
2294 
2295 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2296 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2297 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2298 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2299 		refcount_read(&tw->tw_refcnt), tw);
2300 }
2301 
2302 #define TMPSZ 150
2303 
2304 static int tcp4_seq_show(struct seq_file *seq, void *v)
2305 {
2306 	struct tcp_iter_state *st;
2307 	struct sock *sk = v;
2308 
2309 	seq_setwidth(seq, TMPSZ - 1);
2310 	if (v == SEQ_START_TOKEN) {
2311 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2312 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2313 			   "inode");
2314 		goto out;
2315 	}
2316 	st = seq->private;
2317 
2318 	if (sk->sk_state == TCP_TIME_WAIT)
2319 		get_timewait4_sock(v, seq, st->num);
2320 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2321 		get_openreq4(v, seq, st->num);
2322 	else
2323 		get_tcp4_sock(v, seq, st->num);
2324 out:
2325 	seq_pad(seq, '\n');
2326 	return 0;
2327 }
2328 
2329 static const struct file_operations tcp_afinfo_seq_fops = {
2330 	.owner   = THIS_MODULE,
2331 	.open    = tcp_seq_open,
2332 	.read    = seq_read,
2333 	.llseek  = seq_lseek,
2334 	.release = seq_release_net
2335 };
2336 
2337 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2338 	.name		= "tcp",
2339 	.family		= AF_INET,
2340 	.seq_fops	= &tcp_afinfo_seq_fops,
2341 	.seq_ops	= {
2342 		.show		= tcp4_seq_show,
2343 	},
2344 };
2345 
2346 static int __net_init tcp4_proc_init_net(struct net *net)
2347 {
2348 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2349 }
2350 
2351 static void __net_exit tcp4_proc_exit_net(struct net *net)
2352 {
2353 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2354 }
2355 
2356 static struct pernet_operations tcp4_net_ops = {
2357 	.init = tcp4_proc_init_net,
2358 	.exit = tcp4_proc_exit_net,
2359 };
2360 
2361 int __init tcp4_proc_init(void)
2362 {
2363 	return register_pernet_subsys(&tcp4_net_ops);
2364 }
2365 
2366 void tcp4_proc_exit(void)
2367 {
2368 	unregister_pernet_subsys(&tcp4_net_ops);
2369 }
2370 #endif /* CONFIG_PROC_FS */
2371 
2372 struct proto tcp_prot = {
2373 	.name			= "TCP",
2374 	.owner			= THIS_MODULE,
2375 	.close			= tcp_close,
2376 	.connect		= tcp_v4_connect,
2377 	.disconnect		= tcp_disconnect,
2378 	.accept			= inet_csk_accept,
2379 	.ioctl			= tcp_ioctl,
2380 	.init			= tcp_v4_init_sock,
2381 	.destroy		= tcp_v4_destroy_sock,
2382 	.shutdown		= tcp_shutdown,
2383 	.setsockopt		= tcp_setsockopt,
2384 	.getsockopt		= tcp_getsockopt,
2385 	.keepalive		= tcp_set_keepalive,
2386 	.recvmsg		= tcp_recvmsg,
2387 	.sendmsg		= tcp_sendmsg,
2388 	.sendpage		= tcp_sendpage,
2389 	.backlog_rcv		= tcp_v4_do_rcv,
2390 	.release_cb		= tcp_release_cb,
2391 	.hash			= inet_hash,
2392 	.unhash			= inet_unhash,
2393 	.get_port		= inet_csk_get_port,
2394 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2395 	.leave_memory_pressure	= tcp_leave_memory_pressure,
2396 	.stream_memory_free	= tcp_stream_memory_free,
2397 	.sockets_allocated	= &tcp_sockets_allocated,
2398 	.orphan_count		= &tcp_orphan_count,
2399 	.memory_allocated	= &tcp_memory_allocated,
2400 	.memory_pressure	= &tcp_memory_pressure,
2401 	.sysctl_mem		= sysctl_tcp_mem,
2402 	.sysctl_wmem		= sysctl_tcp_wmem,
2403 	.sysctl_rmem		= sysctl_tcp_rmem,
2404 	.max_header		= MAX_TCP_HEADER,
2405 	.obj_size		= sizeof(struct tcp_sock),
2406 	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2407 	.twsk_prot		= &tcp_timewait_sock_ops,
2408 	.rsk_prot		= &tcp_request_sock_ops,
2409 	.h.hashinfo		= &tcp_hashinfo,
2410 	.no_autobind		= true,
2411 #ifdef CONFIG_COMPAT
2412 	.compat_setsockopt	= compat_tcp_setsockopt,
2413 	.compat_getsockopt	= compat_tcp_getsockopt,
2414 #endif
2415 	.diag_destroy		= tcp_abort,
2416 };
2417 EXPORT_SYMBOL(tcp_prot);
2418 
2419 static void __net_exit tcp_sk_exit(struct net *net)
2420 {
2421 	int cpu;
2422 
2423 	for_each_possible_cpu(cpu)
2424 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2425 	free_percpu(net->ipv4.tcp_sk);
2426 }
2427 
2428 static int __net_init tcp_sk_init(struct net *net)
2429 {
2430 	int res, cpu, cnt;
2431 
2432 	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2433 	if (!net->ipv4.tcp_sk)
2434 		return -ENOMEM;
2435 
2436 	for_each_possible_cpu(cpu) {
2437 		struct sock *sk;
2438 
2439 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2440 					   IPPROTO_TCP, net);
2441 		if (res)
2442 			goto fail;
2443 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2444 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2445 	}
2446 
2447 	net->ipv4.sysctl_tcp_ecn = 2;
2448 	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2449 
2450 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2451 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2452 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2453 
2454 	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2455 	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2456 	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2457 
2458 	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2459 	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2460 	net->ipv4.sysctl_tcp_syncookies = 1;
2461 	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2462 	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2463 	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2464 	net->ipv4.sysctl_tcp_orphan_retries = 0;
2465 	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2466 	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2467 	net->ipv4.sysctl_tcp_tw_reuse = 0;
2468 
2469 	cnt = tcp_hashinfo.ehash_mask + 1;
2470 	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2471 	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2472 
2473 	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2474 	net->ipv4.sysctl_tcp_sack = 1;
2475 	net->ipv4.sysctl_tcp_window_scaling = 1;
2476 	net->ipv4.sysctl_tcp_timestamps = 1;
2477 
2478 	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2479 	spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2480 	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2481 	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2482 
2483 	return 0;
2484 fail:
2485 	tcp_sk_exit(net);
2486 
2487 	return res;
2488 }
2489 
2490 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2491 {
2492 	struct net *net;
2493 
2494 	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2495 
2496 	list_for_each_entry(net, net_exit_list, exit_list)
2497 		tcp_fastopen_ctx_destroy(net);
2498 }
2499 
2500 static struct pernet_operations __net_initdata tcp_sk_ops = {
2501        .init	   = tcp_sk_init,
2502        .exit	   = tcp_sk_exit,
2503        .exit_batch = tcp_sk_exit_batch,
2504 };
2505 
2506 void __init tcp_v4_init(void)
2507 {
2508 	if (register_pernet_subsys(&tcp_sk_ops))
2509 		panic("Failed to create the TCP control socket.\n");
2510 }
2511