xref: /openbmc/linux/net/ipv4/tcp_ipv4.c (revision 77d84ff8)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 #include <net/busy_poll.h>
79 
80 #include <linux/inet.h>
81 #include <linux/ipv6.h>
82 #include <linux/stddef.h>
83 #include <linux/proc_fs.h>
84 #include <linux/seq_file.h>
85 
86 #include <linux/crypto.h>
87 #include <linux/scatterlist.h>
88 
89 int sysctl_tcp_tw_reuse __read_mostly;
90 int sysctl_tcp_low_latency __read_mostly;
91 EXPORT_SYMBOL(sysctl_tcp_low_latency);
92 
93 
94 #ifdef CONFIG_TCP_MD5SIG
95 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
96 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
97 #endif
98 
99 struct inet_hashinfo tcp_hashinfo;
100 EXPORT_SYMBOL(tcp_hashinfo);
101 
102 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 {
104 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
105 					  ip_hdr(skb)->saddr,
106 					  tcp_hdr(skb)->dest,
107 					  tcp_hdr(skb)->source);
108 }
109 
110 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 {
112 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
113 	struct tcp_sock *tp = tcp_sk(sk);
114 
115 	/* With PAWS, it is safe from the viewpoint
116 	   of data integrity. Even without PAWS it is safe provided sequence
117 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 
119 	   Actually, the idea is close to VJ's one, only timestamp cache is
120 	   held not per host, but per port pair and TW bucket is used as state
121 	   holder.
122 
123 	   If TW bucket has been already destroyed we fall back to VJ's scheme
124 	   and use initial timestamp retrieved from peer table.
125 	 */
126 	if (tcptw->tw_ts_recent_stamp &&
127 	    (twp == NULL || (sysctl_tcp_tw_reuse &&
128 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
129 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
130 		if (tp->write_seq == 0)
131 			tp->write_seq = 1;
132 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
133 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
134 		sock_hold(sktw);
135 		return 1;
136 	}
137 
138 	return 0;
139 }
140 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 
142 /* This will initiate an outgoing connection. */
143 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 {
145 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
146 	struct inet_sock *inet = inet_sk(sk);
147 	struct tcp_sock *tp = tcp_sk(sk);
148 	__be16 orig_sport, orig_dport;
149 	__be32 daddr, nexthop;
150 	struct flowi4 *fl4;
151 	struct rtable *rt;
152 	int err;
153 	struct ip_options_rcu *inet_opt;
154 
155 	if (addr_len < sizeof(struct sockaddr_in))
156 		return -EINVAL;
157 
158 	if (usin->sin_family != AF_INET)
159 		return -EAFNOSUPPORT;
160 
161 	nexthop = daddr = usin->sin_addr.s_addr;
162 	inet_opt = rcu_dereference_protected(inet->inet_opt,
163 					     sock_owned_by_user(sk));
164 	if (inet_opt && inet_opt->opt.srr) {
165 		if (!daddr)
166 			return -EINVAL;
167 		nexthop = inet_opt->opt.faddr;
168 	}
169 
170 	orig_sport = inet->inet_sport;
171 	orig_dport = usin->sin_port;
172 	fl4 = &inet->cork.fl.u.ip4;
173 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 			      IPPROTO_TCP,
176 			      orig_sport, orig_dport, sk, true);
177 	if (IS_ERR(rt)) {
178 		err = PTR_ERR(rt);
179 		if (err == -ENETUNREACH)
180 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
181 		return err;
182 	}
183 
184 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
185 		ip_rt_put(rt);
186 		return -ENETUNREACH;
187 	}
188 
189 	if (!inet_opt || !inet_opt->opt.srr)
190 		daddr = fl4->daddr;
191 
192 	if (!inet->inet_saddr)
193 		inet->inet_saddr = fl4->saddr;
194 	inet->inet_rcv_saddr = inet->inet_saddr;
195 
196 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197 		/* Reset inherited state */
198 		tp->rx_opt.ts_recent	   = 0;
199 		tp->rx_opt.ts_recent_stamp = 0;
200 		if (likely(!tp->repair))
201 			tp->write_seq	   = 0;
202 	}
203 
204 	if (tcp_death_row.sysctl_tw_recycle &&
205 	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
206 		tcp_fetch_timewait_stamp(sk, &rt->dst);
207 
208 	inet->inet_dport = usin->sin_port;
209 	inet->inet_daddr = daddr;
210 
211 	inet_csk(sk)->icsk_ext_hdr_len = 0;
212 	if (inet_opt)
213 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
214 
215 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
216 
217 	/* Socket identity is still unknown (sport may be zero).
218 	 * However we set state to SYN-SENT and not releasing socket
219 	 * lock select source port, enter ourselves into the hash tables and
220 	 * complete initialization after this.
221 	 */
222 	tcp_set_state(sk, TCP_SYN_SENT);
223 	err = inet_hash_connect(&tcp_death_row, sk);
224 	if (err)
225 		goto failure;
226 
227 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 			       inet->inet_sport, inet->inet_dport, sk);
229 	if (IS_ERR(rt)) {
230 		err = PTR_ERR(rt);
231 		rt = NULL;
232 		goto failure;
233 	}
234 	/* OK, now commit destination to socket.  */
235 	sk->sk_gso_type = SKB_GSO_TCPV4;
236 	sk_setup_caps(sk, &rt->dst);
237 
238 	if (!tp->write_seq && likely(!tp->repair))
239 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 							   inet->inet_daddr,
241 							   inet->inet_sport,
242 							   usin->sin_port);
243 
244 	inet->inet_id = tp->write_seq ^ jiffies;
245 
246 	err = tcp_connect(sk);
247 
248 	rt = NULL;
249 	if (err)
250 		goto failure;
251 
252 	return 0;
253 
254 failure:
255 	/*
256 	 * This unhashes the socket and releases the local port,
257 	 * if necessary.
258 	 */
259 	tcp_set_state(sk, TCP_CLOSE);
260 	ip_rt_put(rt);
261 	sk->sk_route_caps = 0;
262 	inet->inet_dport = 0;
263 	return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266 
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 static void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274 	struct dst_entry *dst;
275 	struct inet_sock *inet = inet_sk(sk);
276 	u32 mtu = tcp_sk(sk)->mtu_info;
277 
278 	dst = inet_csk_update_pmtu(sk, mtu);
279 	if (!dst)
280 		return;
281 
282 	/* Something is about to be wrong... Remember soft error
283 	 * for the case, if this connection will not able to recover.
284 	 */
285 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 		sk->sk_err_soft = EMSGSIZE;
287 
288 	mtu = dst_mtu(dst);
289 
290 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 	    ip_sk_accept_pmtu(sk) &&
292 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 		tcp_sync_mss(sk, mtu);
294 
295 		/* Resend the TCP packet because it's
296 		 * clear that the old packet has been
297 		 * dropped. This is the new "fast" path mtu
298 		 * discovery.
299 		 */
300 		tcp_simple_retransmit(sk);
301 	} /* else let the usual retransmit timer handle it */
302 }
303 
304 static void do_redirect(struct sk_buff *skb, struct sock *sk)
305 {
306 	struct dst_entry *dst = __sk_dst_check(sk, 0);
307 
308 	if (dst)
309 		dst->ops->redirect(dst, sk, skb);
310 }
311 
312 /*
313  * This routine is called by the ICMP module when it gets some
314  * sort of error condition.  If err < 0 then the socket should
315  * be closed and the error returned to the user.  If err > 0
316  * it's just the icmp type << 8 | icmp code.  After adjustment
317  * header points to the first 8 bytes of the tcp header.  We need
318  * to find the appropriate port.
319  *
320  * The locking strategy used here is very "optimistic". When
321  * someone else accesses the socket the ICMP is just dropped
322  * and for some paths there is no check at all.
323  * A more general error queue to queue errors for later handling
324  * is probably better.
325  *
326  */
327 
328 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
329 {
330 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
331 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
332 	struct inet_connection_sock *icsk;
333 	struct tcp_sock *tp;
334 	struct inet_sock *inet;
335 	const int type = icmp_hdr(icmp_skb)->type;
336 	const int code = icmp_hdr(icmp_skb)->code;
337 	struct sock *sk;
338 	struct sk_buff *skb;
339 	struct request_sock *req;
340 	__u32 seq;
341 	__u32 remaining;
342 	int err;
343 	struct net *net = dev_net(icmp_skb->dev);
344 
345 	if (icmp_skb->len < (iph->ihl << 2) + 8) {
346 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
347 		return;
348 	}
349 
350 	sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
351 			iph->saddr, th->source, inet_iif(icmp_skb));
352 	if (!sk) {
353 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
354 		return;
355 	}
356 	if (sk->sk_state == TCP_TIME_WAIT) {
357 		inet_twsk_put(inet_twsk(sk));
358 		return;
359 	}
360 
361 	bh_lock_sock(sk);
362 	/* If too many ICMPs get dropped on busy
363 	 * servers this needs to be solved differently.
364 	 * We do take care of PMTU discovery (RFC1191) special case :
365 	 * we can receive locally generated ICMP messages while socket is held.
366 	 */
367 	if (sock_owned_by_user(sk)) {
368 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
369 			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
370 	}
371 	if (sk->sk_state == TCP_CLOSE)
372 		goto out;
373 
374 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
375 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
376 		goto out;
377 	}
378 
379 	icsk = inet_csk(sk);
380 	tp = tcp_sk(sk);
381 	req = tp->fastopen_rsk;
382 	seq = ntohl(th->seq);
383 	if (sk->sk_state != TCP_LISTEN &&
384 	    !between(seq, tp->snd_una, tp->snd_nxt) &&
385 	    (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
386 		/* For a Fast Open socket, allow seq to be snt_isn. */
387 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
388 		goto out;
389 	}
390 
391 	switch (type) {
392 	case ICMP_REDIRECT:
393 		do_redirect(icmp_skb, sk);
394 		goto out;
395 	case ICMP_SOURCE_QUENCH:
396 		/* Just silently ignore these. */
397 		goto out;
398 	case ICMP_PARAMETERPROB:
399 		err = EPROTO;
400 		break;
401 	case ICMP_DEST_UNREACH:
402 		if (code > NR_ICMP_UNREACH)
403 			goto out;
404 
405 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
406 			/* We are not interested in TCP_LISTEN and open_requests
407 			 * (SYN-ACKs send out by Linux are always <576bytes so
408 			 * they should go through unfragmented).
409 			 */
410 			if (sk->sk_state == TCP_LISTEN)
411 				goto out;
412 
413 			tp->mtu_info = info;
414 			if (!sock_owned_by_user(sk)) {
415 				tcp_v4_mtu_reduced(sk);
416 			} else {
417 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
418 					sock_hold(sk);
419 			}
420 			goto out;
421 		}
422 
423 		err = icmp_err_convert[code].errno;
424 		/* check if icmp_skb allows revert of backoff
425 		 * (see draft-zimmermann-tcp-lcd) */
426 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
427 			break;
428 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
429 		    !icsk->icsk_backoff)
430 			break;
431 
432 		/* XXX (TFO) - revisit the following logic for TFO */
433 
434 		if (sock_owned_by_user(sk))
435 			break;
436 
437 		icsk->icsk_backoff--;
438 		inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
439 			TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
440 		tcp_bound_rto(sk);
441 
442 		skb = tcp_write_queue_head(sk);
443 		BUG_ON(!skb);
444 
445 		remaining = icsk->icsk_rto - min(icsk->icsk_rto,
446 				tcp_time_stamp - TCP_SKB_CB(skb)->when);
447 
448 		if (remaining) {
449 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
450 						  remaining, TCP_RTO_MAX);
451 		} else {
452 			/* RTO revert clocked out retransmission.
453 			 * Will retransmit now */
454 			tcp_retransmit_timer(sk);
455 		}
456 
457 		break;
458 	case ICMP_TIME_EXCEEDED:
459 		err = EHOSTUNREACH;
460 		break;
461 	default:
462 		goto out;
463 	}
464 
465 	/* XXX (TFO) - if it's a TFO socket and has been accepted, rather
466 	 * than following the TCP_SYN_RECV case and closing the socket,
467 	 * we ignore the ICMP error and keep trying like a fully established
468 	 * socket. Is this the right thing to do?
469 	 */
470 	if (req && req->sk == NULL)
471 		goto out;
472 
473 	switch (sk->sk_state) {
474 		struct request_sock *req, **prev;
475 	case TCP_LISTEN:
476 		if (sock_owned_by_user(sk))
477 			goto out;
478 
479 		req = inet_csk_search_req(sk, &prev, th->dest,
480 					  iph->daddr, iph->saddr);
481 		if (!req)
482 			goto out;
483 
484 		/* ICMPs are not backlogged, hence we cannot get
485 		   an established socket here.
486 		 */
487 		WARN_ON(req->sk);
488 
489 		if (seq != tcp_rsk(req)->snt_isn) {
490 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
491 			goto out;
492 		}
493 
494 		/*
495 		 * Still in SYN_RECV, just remove it silently.
496 		 * There is no good way to pass the error to the newly
497 		 * created socket, and POSIX does not want network
498 		 * errors returned from accept().
499 		 */
500 		inet_csk_reqsk_queue_drop(sk, req, prev);
501 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
502 		goto out;
503 
504 	case TCP_SYN_SENT:
505 	case TCP_SYN_RECV:  /* Cannot happen.
506 			       It can f.e. if SYNs crossed,
507 			       or Fast Open.
508 			     */
509 		if (!sock_owned_by_user(sk)) {
510 			sk->sk_err = err;
511 
512 			sk->sk_error_report(sk);
513 
514 			tcp_done(sk);
515 		} else {
516 			sk->sk_err_soft = err;
517 		}
518 		goto out;
519 	}
520 
521 	/* If we've already connected we will keep trying
522 	 * until we time out, or the user gives up.
523 	 *
524 	 * rfc1122 4.2.3.9 allows to consider as hard errors
525 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
526 	 * but it is obsoleted by pmtu discovery).
527 	 *
528 	 * Note, that in modern internet, where routing is unreliable
529 	 * and in each dark corner broken firewalls sit, sending random
530 	 * errors ordered by their masters even this two messages finally lose
531 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
532 	 *
533 	 * Now we are in compliance with RFCs.
534 	 *							--ANK (980905)
535 	 */
536 
537 	inet = inet_sk(sk);
538 	if (!sock_owned_by_user(sk) && inet->recverr) {
539 		sk->sk_err = err;
540 		sk->sk_error_report(sk);
541 	} else	{ /* Only an error on timeout */
542 		sk->sk_err_soft = err;
543 	}
544 
545 out:
546 	bh_unlock_sock(sk);
547 	sock_put(sk);
548 }
549 
550 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
551 {
552 	struct tcphdr *th = tcp_hdr(skb);
553 
554 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
555 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
556 		skb->csum_start = skb_transport_header(skb) - skb->head;
557 		skb->csum_offset = offsetof(struct tcphdr, check);
558 	} else {
559 		th->check = tcp_v4_check(skb->len, saddr, daddr,
560 					 csum_partial(th,
561 						      th->doff << 2,
562 						      skb->csum));
563 	}
564 }
565 
566 /* This routine computes an IPv4 TCP checksum. */
567 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
568 {
569 	const struct inet_sock *inet = inet_sk(sk);
570 
571 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
572 }
573 EXPORT_SYMBOL(tcp_v4_send_check);
574 
575 /*
576  *	This routine will send an RST to the other tcp.
577  *
578  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
579  *		      for reset.
580  *	Answer: if a packet caused RST, it is not for a socket
581  *		existing in our system, if it is matched to a socket,
582  *		it is just duplicate segment or bug in other side's TCP.
583  *		So that we build reply only basing on parameters
584  *		arrived with segment.
585  *	Exception: precedence violation. We do not implement it in any case.
586  */
587 
588 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
589 {
590 	const struct tcphdr *th = tcp_hdr(skb);
591 	struct {
592 		struct tcphdr th;
593 #ifdef CONFIG_TCP_MD5SIG
594 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
595 #endif
596 	} rep;
597 	struct ip_reply_arg arg;
598 #ifdef CONFIG_TCP_MD5SIG
599 	struct tcp_md5sig_key *key;
600 	const __u8 *hash_location = NULL;
601 	unsigned char newhash[16];
602 	int genhash;
603 	struct sock *sk1 = NULL;
604 #endif
605 	struct net *net;
606 
607 	/* Never send a reset in response to a reset. */
608 	if (th->rst)
609 		return;
610 
611 	if (skb_rtable(skb)->rt_type != RTN_LOCAL)
612 		return;
613 
614 	/* Swap the send and the receive. */
615 	memset(&rep, 0, sizeof(rep));
616 	rep.th.dest   = th->source;
617 	rep.th.source = th->dest;
618 	rep.th.doff   = sizeof(struct tcphdr) / 4;
619 	rep.th.rst    = 1;
620 
621 	if (th->ack) {
622 		rep.th.seq = th->ack_seq;
623 	} else {
624 		rep.th.ack = 1;
625 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
626 				       skb->len - (th->doff << 2));
627 	}
628 
629 	memset(&arg, 0, sizeof(arg));
630 	arg.iov[0].iov_base = (unsigned char *)&rep;
631 	arg.iov[0].iov_len  = sizeof(rep.th);
632 
633 #ifdef CONFIG_TCP_MD5SIG
634 	hash_location = tcp_parse_md5sig_option(th);
635 	if (!sk && hash_location) {
636 		/*
637 		 * active side is lost. Try to find listening socket through
638 		 * source port, and then find md5 key through listening socket.
639 		 * we are not loose security here:
640 		 * Incoming packet is checked with md5 hash with finding key,
641 		 * no RST generated if md5 hash doesn't match.
642 		 */
643 		sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
644 					     &tcp_hashinfo, ip_hdr(skb)->saddr,
645 					     th->source, ip_hdr(skb)->daddr,
646 					     ntohs(th->source), inet_iif(skb));
647 		/* don't send rst if it can't find key */
648 		if (!sk1)
649 			return;
650 		rcu_read_lock();
651 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
652 					&ip_hdr(skb)->saddr, AF_INET);
653 		if (!key)
654 			goto release_sk1;
655 
656 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
657 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
658 			goto release_sk1;
659 	} else {
660 		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
661 					     &ip_hdr(skb)->saddr,
662 					     AF_INET) : NULL;
663 	}
664 
665 	if (key) {
666 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
667 				   (TCPOPT_NOP << 16) |
668 				   (TCPOPT_MD5SIG << 8) |
669 				   TCPOLEN_MD5SIG);
670 		/* Update length and the length the header thinks exists */
671 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
672 		rep.th.doff = arg.iov[0].iov_len / 4;
673 
674 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
675 				     key, ip_hdr(skb)->saddr,
676 				     ip_hdr(skb)->daddr, &rep.th);
677 	}
678 #endif
679 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
680 				      ip_hdr(skb)->saddr, /* XXX */
681 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
682 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
683 	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
684 	/* When socket is gone, all binding information is lost.
685 	 * routing might fail in this case. No choice here, if we choose to force
686 	 * input interface, we will misroute in case of asymmetric route.
687 	 */
688 	if (sk)
689 		arg.bound_dev_if = sk->sk_bound_dev_if;
690 
691 	net = dev_net(skb_dst(skb)->dev);
692 	arg.tos = ip_hdr(skb)->tos;
693 	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
694 			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
695 
696 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
697 	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
698 
699 #ifdef CONFIG_TCP_MD5SIG
700 release_sk1:
701 	if (sk1) {
702 		rcu_read_unlock();
703 		sock_put(sk1);
704 	}
705 #endif
706 }
707 
708 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
709    outside socket context is ugly, certainly. What can I do?
710  */
711 
712 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
713 			    u32 win, u32 tsval, u32 tsecr, int oif,
714 			    struct tcp_md5sig_key *key,
715 			    int reply_flags, u8 tos)
716 {
717 	const struct tcphdr *th = tcp_hdr(skb);
718 	struct {
719 		struct tcphdr th;
720 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
721 #ifdef CONFIG_TCP_MD5SIG
722 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
723 #endif
724 			];
725 	} rep;
726 	struct ip_reply_arg arg;
727 	struct net *net = dev_net(skb_dst(skb)->dev);
728 
729 	memset(&rep.th, 0, sizeof(struct tcphdr));
730 	memset(&arg, 0, sizeof(arg));
731 
732 	arg.iov[0].iov_base = (unsigned char *)&rep;
733 	arg.iov[0].iov_len  = sizeof(rep.th);
734 	if (tsecr) {
735 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
736 				   (TCPOPT_TIMESTAMP << 8) |
737 				   TCPOLEN_TIMESTAMP);
738 		rep.opt[1] = htonl(tsval);
739 		rep.opt[2] = htonl(tsecr);
740 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
741 	}
742 
743 	/* Swap the send and the receive. */
744 	rep.th.dest    = th->source;
745 	rep.th.source  = th->dest;
746 	rep.th.doff    = arg.iov[0].iov_len / 4;
747 	rep.th.seq     = htonl(seq);
748 	rep.th.ack_seq = htonl(ack);
749 	rep.th.ack     = 1;
750 	rep.th.window  = htons(win);
751 
752 #ifdef CONFIG_TCP_MD5SIG
753 	if (key) {
754 		int offset = (tsecr) ? 3 : 0;
755 
756 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
757 					  (TCPOPT_NOP << 16) |
758 					  (TCPOPT_MD5SIG << 8) |
759 					  TCPOLEN_MD5SIG);
760 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
761 		rep.th.doff = arg.iov[0].iov_len/4;
762 
763 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
764 				    key, ip_hdr(skb)->saddr,
765 				    ip_hdr(skb)->daddr, &rep.th);
766 	}
767 #endif
768 	arg.flags = reply_flags;
769 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
770 				      ip_hdr(skb)->saddr, /* XXX */
771 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
772 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
773 	if (oif)
774 		arg.bound_dev_if = oif;
775 	arg.tos = tos;
776 	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
777 			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
778 
779 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
780 }
781 
782 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
783 {
784 	struct inet_timewait_sock *tw = inet_twsk(sk);
785 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
786 
787 	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
788 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
789 			tcp_time_stamp + tcptw->tw_ts_offset,
790 			tcptw->tw_ts_recent,
791 			tw->tw_bound_dev_if,
792 			tcp_twsk_md5_key(tcptw),
793 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
794 			tw->tw_tos
795 			);
796 
797 	inet_twsk_put(tw);
798 }
799 
800 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
801 				  struct request_sock *req)
802 {
803 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
804 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
805 	 */
806 	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
807 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
808 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
809 			tcp_time_stamp,
810 			req->ts_recent,
811 			0,
812 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
813 					  AF_INET),
814 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
815 			ip_hdr(skb)->tos);
816 }
817 
818 /*
819  *	Send a SYN-ACK after having received a SYN.
820  *	This still operates on a request_sock only, not on a big
821  *	socket.
822  */
823 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
824 			      struct request_sock *req,
825 			      u16 queue_mapping)
826 {
827 	const struct inet_request_sock *ireq = inet_rsk(req);
828 	struct flowi4 fl4;
829 	int err = -1;
830 	struct sk_buff * skb;
831 
832 	/* First, grab a route. */
833 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834 		return -1;
835 
836 	skb = tcp_make_synack(sk, dst, req, NULL);
837 
838 	if (skb) {
839 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840 
841 		skb_set_queue_mapping(skb, queue_mapping);
842 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
843 					    ireq->ir_rmt_addr,
844 					    ireq->opt);
845 		err = net_xmit_eval(err);
846 		if (!tcp_rsk(req)->snt_synack && !err)
847 			tcp_rsk(req)->snt_synack = tcp_time_stamp;
848 	}
849 
850 	return err;
851 }
852 
853 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
854 {
855 	int res = tcp_v4_send_synack(sk, NULL, req, 0);
856 
857 	if (!res)
858 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
859 	return res;
860 }
861 
862 /*
863  *	IPv4 request_sock destructor.
864  */
865 static void tcp_v4_reqsk_destructor(struct request_sock *req)
866 {
867 	kfree(inet_rsk(req)->opt);
868 }
869 
870 /*
871  * Return true if a syncookie should be sent
872  */
873 bool tcp_syn_flood_action(struct sock *sk,
874 			 const struct sk_buff *skb,
875 			 const char *proto)
876 {
877 	const char *msg = "Dropping request";
878 	bool want_cookie = false;
879 	struct listen_sock *lopt;
880 
881 
882 
883 #ifdef CONFIG_SYN_COOKIES
884 	if (sysctl_tcp_syncookies) {
885 		msg = "Sending cookies";
886 		want_cookie = true;
887 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
888 	} else
889 #endif
890 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
891 
892 	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
893 	if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
894 		lopt->synflood_warned = 1;
895 		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
896 			proto, ntohs(tcp_hdr(skb)->dest), msg);
897 	}
898 	return want_cookie;
899 }
900 EXPORT_SYMBOL(tcp_syn_flood_action);
901 
902 /*
903  * Save and compile IPv4 options into the request_sock if needed.
904  */
905 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
906 {
907 	const struct ip_options *opt = &(IPCB(skb)->opt);
908 	struct ip_options_rcu *dopt = NULL;
909 
910 	if (opt && opt->optlen) {
911 		int opt_size = sizeof(*dopt) + opt->optlen;
912 
913 		dopt = kmalloc(opt_size, GFP_ATOMIC);
914 		if (dopt) {
915 			if (ip_options_echo(&dopt->opt, skb)) {
916 				kfree(dopt);
917 				dopt = NULL;
918 			}
919 		}
920 	}
921 	return dopt;
922 }
923 
924 #ifdef CONFIG_TCP_MD5SIG
925 /*
926  * RFC2385 MD5 checksumming requires a mapping of
927  * IP address->MD5 Key.
928  * We need to maintain these in the sk structure.
929  */
930 
931 /* Find the Key structure for an address.  */
932 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
933 					 const union tcp_md5_addr *addr,
934 					 int family)
935 {
936 	struct tcp_sock *tp = tcp_sk(sk);
937 	struct tcp_md5sig_key *key;
938 	unsigned int size = sizeof(struct in_addr);
939 	struct tcp_md5sig_info *md5sig;
940 
941 	/* caller either holds rcu_read_lock() or socket lock */
942 	md5sig = rcu_dereference_check(tp->md5sig_info,
943 				       sock_owned_by_user(sk) ||
944 				       lockdep_is_held(&sk->sk_lock.slock));
945 	if (!md5sig)
946 		return NULL;
947 #if IS_ENABLED(CONFIG_IPV6)
948 	if (family == AF_INET6)
949 		size = sizeof(struct in6_addr);
950 #endif
951 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
952 		if (key->family != family)
953 			continue;
954 		if (!memcmp(&key->addr, addr, size))
955 			return key;
956 	}
957 	return NULL;
958 }
959 EXPORT_SYMBOL(tcp_md5_do_lookup);
960 
961 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
962 					 struct sock *addr_sk)
963 {
964 	union tcp_md5_addr *addr;
965 
966 	addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
967 	return tcp_md5_do_lookup(sk, addr, AF_INET);
968 }
969 EXPORT_SYMBOL(tcp_v4_md5_lookup);
970 
971 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
972 						      struct request_sock *req)
973 {
974 	union tcp_md5_addr *addr;
975 
976 	addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
977 	return tcp_md5_do_lookup(sk, addr, AF_INET);
978 }
979 
980 /* This can be called on a newly created socket, from other files */
981 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
982 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
983 {
984 	/* Add Key to the list */
985 	struct tcp_md5sig_key *key;
986 	struct tcp_sock *tp = tcp_sk(sk);
987 	struct tcp_md5sig_info *md5sig;
988 
989 	key = tcp_md5_do_lookup(sk, addr, family);
990 	if (key) {
991 		/* Pre-existing entry - just update that one. */
992 		memcpy(key->key, newkey, newkeylen);
993 		key->keylen = newkeylen;
994 		return 0;
995 	}
996 
997 	md5sig = rcu_dereference_protected(tp->md5sig_info,
998 					   sock_owned_by_user(sk));
999 	if (!md5sig) {
1000 		md5sig = kmalloc(sizeof(*md5sig), gfp);
1001 		if (!md5sig)
1002 			return -ENOMEM;
1003 
1004 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1005 		INIT_HLIST_HEAD(&md5sig->head);
1006 		rcu_assign_pointer(tp->md5sig_info, md5sig);
1007 	}
1008 
1009 	key = sock_kmalloc(sk, sizeof(*key), gfp);
1010 	if (!key)
1011 		return -ENOMEM;
1012 	if (!tcp_alloc_md5sig_pool()) {
1013 		sock_kfree_s(sk, key, sizeof(*key));
1014 		return -ENOMEM;
1015 	}
1016 
1017 	memcpy(key->key, newkey, newkeylen);
1018 	key->keylen = newkeylen;
1019 	key->family = family;
1020 	memcpy(&key->addr, addr,
1021 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1022 				      sizeof(struct in_addr));
1023 	hlist_add_head_rcu(&key->node, &md5sig->head);
1024 	return 0;
1025 }
1026 EXPORT_SYMBOL(tcp_md5_do_add);
1027 
1028 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1029 {
1030 	struct tcp_md5sig_key *key;
1031 
1032 	key = tcp_md5_do_lookup(sk, addr, family);
1033 	if (!key)
1034 		return -ENOENT;
1035 	hlist_del_rcu(&key->node);
1036 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1037 	kfree_rcu(key, rcu);
1038 	return 0;
1039 }
1040 EXPORT_SYMBOL(tcp_md5_do_del);
1041 
1042 static void tcp_clear_md5_list(struct sock *sk)
1043 {
1044 	struct tcp_sock *tp = tcp_sk(sk);
1045 	struct tcp_md5sig_key *key;
1046 	struct hlist_node *n;
1047 	struct tcp_md5sig_info *md5sig;
1048 
1049 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1050 
1051 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1052 		hlist_del_rcu(&key->node);
1053 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1054 		kfree_rcu(key, rcu);
1055 	}
1056 }
1057 
1058 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1059 				 int optlen)
1060 {
1061 	struct tcp_md5sig cmd;
1062 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1063 
1064 	if (optlen < sizeof(cmd))
1065 		return -EINVAL;
1066 
1067 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1068 		return -EFAULT;
1069 
1070 	if (sin->sin_family != AF_INET)
1071 		return -EINVAL;
1072 
1073 	if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1074 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1075 				      AF_INET);
1076 
1077 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1078 		return -EINVAL;
1079 
1080 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1081 			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1082 			      GFP_KERNEL);
1083 }
1084 
1085 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1086 					__be32 daddr, __be32 saddr, int nbytes)
1087 {
1088 	struct tcp4_pseudohdr *bp;
1089 	struct scatterlist sg;
1090 
1091 	bp = &hp->md5_blk.ip4;
1092 
1093 	/*
1094 	 * 1. the TCP pseudo-header (in the order: source IP address,
1095 	 * destination IP address, zero-padded protocol number, and
1096 	 * segment length)
1097 	 */
1098 	bp->saddr = saddr;
1099 	bp->daddr = daddr;
1100 	bp->pad = 0;
1101 	bp->protocol = IPPROTO_TCP;
1102 	bp->len = cpu_to_be16(nbytes);
1103 
1104 	sg_init_one(&sg, bp, sizeof(*bp));
1105 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1106 }
1107 
1108 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1109 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1110 {
1111 	struct tcp_md5sig_pool *hp;
1112 	struct hash_desc *desc;
1113 
1114 	hp = tcp_get_md5sig_pool();
1115 	if (!hp)
1116 		goto clear_hash_noput;
1117 	desc = &hp->md5_desc;
1118 
1119 	if (crypto_hash_init(desc))
1120 		goto clear_hash;
1121 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1122 		goto clear_hash;
1123 	if (tcp_md5_hash_header(hp, th))
1124 		goto clear_hash;
1125 	if (tcp_md5_hash_key(hp, key))
1126 		goto clear_hash;
1127 	if (crypto_hash_final(desc, md5_hash))
1128 		goto clear_hash;
1129 
1130 	tcp_put_md5sig_pool();
1131 	return 0;
1132 
1133 clear_hash:
1134 	tcp_put_md5sig_pool();
1135 clear_hash_noput:
1136 	memset(md5_hash, 0, 16);
1137 	return 1;
1138 }
1139 
1140 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1141 			const struct sock *sk, const struct request_sock *req,
1142 			const struct sk_buff *skb)
1143 {
1144 	struct tcp_md5sig_pool *hp;
1145 	struct hash_desc *desc;
1146 	const struct tcphdr *th = tcp_hdr(skb);
1147 	__be32 saddr, daddr;
1148 
1149 	if (sk) {
1150 		saddr = inet_sk(sk)->inet_saddr;
1151 		daddr = inet_sk(sk)->inet_daddr;
1152 	} else if (req) {
1153 		saddr = inet_rsk(req)->ir_loc_addr;
1154 		daddr = inet_rsk(req)->ir_rmt_addr;
1155 	} else {
1156 		const struct iphdr *iph = ip_hdr(skb);
1157 		saddr = iph->saddr;
1158 		daddr = iph->daddr;
1159 	}
1160 
1161 	hp = tcp_get_md5sig_pool();
1162 	if (!hp)
1163 		goto clear_hash_noput;
1164 	desc = &hp->md5_desc;
1165 
1166 	if (crypto_hash_init(desc))
1167 		goto clear_hash;
1168 
1169 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1170 		goto clear_hash;
1171 	if (tcp_md5_hash_header(hp, th))
1172 		goto clear_hash;
1173 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1174 		goto clear_hash;
1175 	if (tcp_md5_hash_key(hp, key))
1176 		goto clear_hash;
1177 	if (crypto_hash_final(desc, md5_hash))
1178 		goto clear_hash;
1179 
1180 	tcp_put_md5sig_pool();
1181 	return 0;
1182 
1183 clear_hash:
1184 	tcp_put_md5sig_pool();
1185 clear_hash_noput:
1186 	memset(md5_hash, 0, 16);
1187 	return 1;
1188 }
1189 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1190 
1191 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1192 {
1193 	/*
1194 	 * This gets called for each TCP segment that arrives
1195 	 * so we want to be efficient.
1196 	 * We have 3 drop cases:
1197 	 * o No MD5 hash and one expected.
1198 	 * o MD5 hash and we're not expecting one.
1199 	 * o MD5 hash and its wrong.
1200 	 */
1201 	const __u8 *hash_location = NULL;
1202 	struct tcp_md5sig_key *hash_expected;
1203 	const struct iphdr *iph = ip_hdr(skb);
1204 	const struct tcphdr *th = tcp_hdr(skb);
1205 	int genhash;
1206 	unsigned char newhash[16];
1207 
1208 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1209 					  AF_INET);
1210 	hash_location = tcp_parse_md5sig_option(th);
1211 
1212 	/* We've parsed the options - do we have a hash? */
1213 	if (!hash_expected && !hash_location)
1214 		return false;
1215 
1216 	if (hash_expected && !hash_location) {
1217 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1218 		return true;
1219 	}
1220 
1221 	if (!hash_expected && hash_location) {
1222 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1223 		return true;
1224 	}
1225 
1226 	/* Okay, so this is hash_expected and hash_location -
1227 	 * so we need to calculate the checksum.
1228 	 */
1229 	genhash = tcp_v4_md5_hash_skb(newhash,
1230 				      hash_expected,
1231 				      NULL, NULL, skb);
1232 
1233 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1234 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1235 				     &iph->saddr, ntohs(th->source),
1236 				     &iph->daddr, ntohs(th->dest),
1237 				     genhash ? " tcp_v4_calc_md5_hash failed"
1238 				     : "");
1239 		return true;
1240 	}
1241 	return false;
1242 }
1243 
1244 #endif
1245 
1246 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1247 	.family		=	PF_INET,
1248 	.obj_size	=	sizeof(struct tcp_request_sock),
1249 	.rtx_syn_ack	=	tcp_v4_rtx_synack,
1250 	.send_ack	=	tcp_v4_reqsk_send_ack,
1251 	.destructor	=	tcp_v4_reqsk_destructor,
1252 	.send_reset	=	tcp_v4_send_reset,
1253 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
1254 };
1255 
1256 #ifdef CONFIG_TCP_MD5SIG
1257 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1258 	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
1259 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1260 };
1261 #endif
1262 
1263 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1264 			       struct request_sock *req,
1265 			       struct tcp_fastopen_cookie *foc,
1266 			       struct tcp_fastopen_cookie *valid_foc)
1267 {
1268 	bool skip_cookie = false;
1269 	struct fastopen_queue *fastopenq;
1270 
1271 	if (likely(!fastopen_cookie_present(foc))) {
1272 		/* See include/net/tcp.h for the meaning of these knobs */
1273 		if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1274 		    ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1275 		    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1276 			skip_cookie = true; /* no cookie to validate */
1277 		else
1278 			return false;
1279 	}
1280 	fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1281 	/* A FO option is present; bump the counter. */
1282 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1283 
1284 	/* Make sure the listener has enabled fastopen, and we don't
1285 	 * exceed the max # of pending TFO requests allowed before trying
1286 	 * to validating the cookie in order to avoid burning CPU cycles
1287 	 * unnecessarily.
1288 	 *
1289 	 * XXX (TFO) - The implication of checking the max_qlen before
1290 	 * processing a cookie request is that clients can't differentiate
1291 	 * between qlen overflow causing Fast Open to be disabled
1292 	 * temporarily vs a server not supporting Fast Open at all.
1293 	 */
1294 	if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1295 	    fastopenq == NULL || fastopenq->max_qlen == 0)
1296 		return false;
1297 
1298 	if (fastopenq->qlen >= fastopenq->max_qlen) {
1299 		struct request_sock *req1;
1300 		spin_lock(&fastopenq->lock);
1301 		req1 = fastopenq->rskq_rst_head;
1302 		if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1303 			spin_unlock(&fastopenq->lock);
1304 			NET_INC_STATS_BH(sock_net(sk),
1305 			    LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1306 			/* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1307 			foc->len = -1;
1308 			return false;
1309 		}
1310 		fastopenq->rskq_rst_head = req1->dl_next;
1311 		fastopenq->qlen--;
1312 		spin_unlock(&fastopenq->lock);
1313 		reqsk_free(req1);
1314 	}
1315 	if (skip_cookie) {
1316 		tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1317 		return true;
1318 	}
1319 
1320 	if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1321 		if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1322 			tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1323 						ip_hdr(skb)->daddr, valid_foc);
1324 			if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1325 			    memcmp(&foc->val[0], &valid_foc->val[0],
1326 			    TCP_FASTOPEN_COOKIE_SIZE) != 0)
1327 				return false;
1328 			valid_foc->len = -1;
1329 		}
1330 		/* Acknowledge the data received from the peer. */
1331 		tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1332 		return true;
1333 	} else if (foc->len == 0) { /* Client requesting a cookie */
1334 		tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1335 					ip_hdr(skb)->daddr, valid_foc);
1336 		NET_INC_STATS_BH(sock_net(sk),
1337 		    LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1338 	} else {
1339 		/* Client sent a cookie with wrong size. Treat it
1340 		 * the same as invalid and return a valid one.
1341 		 */
1342 		tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1343 					ip_hdr(skb)->daddr, valid_foc);
1344 	}
1345 	return false;
1346 }
1347 
1348 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1349 				    struct sk_buff *skb,
1350 				    struct sk_buff *skb_synack,
1351 				    struct request_sock *req)
1352 {
1353 	struct tcp_sock *tp = tcp_sk(sk);
1354 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1355 	const struct inet_request_sock *ireq = inet_rsk(req);
1356 	struct sock *child;
1357 	int err;
1358 
1359 	req->num_retrans = 0;
1360 	req->num_timeout = 0;
1361 	req->sk = NULL;
1362 
1363 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1364 	if (child == NULL) {
1365 		NET_INC_STATS_BH(sock_net(sk),
1366 				 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1367 		kfree_skb(skb_synack);
1368 		return -1;
1369 	}
1370 	err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1371 				    ireq->ir_rmt_addr, ireq->opt);
1372 	err = net_xmit_eval(err);
1373 	if (!err)
1374 		tcp_rsk(req)->snt_synack = tcp_time_stamp;
1375 	/* XXX (TFO) - is it ok to ignore error and continue? */
1376 
1377 	spin_lock(&queue->fastopenq->lock);
1378 	queue->fastopenq->qlen++;
1379 	spin_unlock(&queue->fastopenq->lock);
1380 
1381 	/* Initialize the child socket. Have to fix some values to take
1382 	 * into account the child is a Fast Open socket and is created
1383 	 * only out of the bits carried in the SYN packet.
1384 	 */
1385 	tp = tcp_sk(child);
1386 
1387 	tp->fastopen_rsk = req;
1388 	/* Do a hold on the listner sk so that if the listener is being
1389 	 * closed, the child that has been accepted can live on and still
1390 	 * access listen_lock.
1391 	 */
1392 	sock_hold(sk);
1393 	tcp_rsk(req)->listener = sk;
1394 
1395 	/* RFC1323: The window in SYN & SYN/ACK segments is never
1396 	 * scaled. So correct it appropriately.
1397 	 */
1398 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1399 
1400 	/* Activate the retrans timer so that SYNACK can be retransmitted.
1401 	 * The request socket is not added to the SYN table of the parent
1402 	 * because it's been added to the accept queue directly.
1403 	 */
1404 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1405 	    TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1406 
1407 	/* Add the child socket directly into the accept queue */
1408 	inet_csk_reqsk_queue_add(sk, req, child);
1409 
1410 	/* Now finish processing the fastopen child socket. */
1411 	inet_csk(child)->icsk_af_ops->rebuild_header(child);
1412 	tcp_init_congestion_control(child);
1413 	tcp_mtup_init(child);
1414 	tcp_init_metrics(child);
1415 	tcp_init_buffer_space(child);
1416 
1417 	/* Queue the data carried in the SYN packet. We need to first
1418 	 * bump skb's refcnt because the caller will attempt to free it.
1419 	 *
1420 	 * XXX (TFO) - we honor a zero-payload TFO request for now.
1421 	 * (Any reason not to?)
1422 	 */
1423 	if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1424 		/* Don't queue the skb if there is no payload in SYN.
1425 		 * XXX (TFO) - How about SYN+FIN?
1426 		 */
1427 		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1428 	} else {
1429 		skb = skb_get(skb);
1430 		skb_dst_drop(skb);
1431 		__skb_pull(skb, tcp_hdr(skb)->doff * 4);
1432 		skb_set_owner_r(skb, child);
1433 		__skb_queue_tail(&child->sk_receive_queue, skb);
1434 		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1435 		tp->syn_data_acked = 1;
1436 	}
1437 	sk->sk_data_ready(sk, 0);
1438 	bh_unlock_sock(child);
1439 	sock_put(child);
1440 	WARN_ON(req->sk == NULL);
1441 	return 0;
1442 }
1443 
1444 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1445 {
1446 	struct tcp_options_received tmp_opt;
1447 	struct request_sock *req;
1448 	struct inet_request_sock *ireq;
1449 	struct tcp_sock *tp = tcp_sk(sk);
1450 	struct dst_entry *dst = NULL;
1451 	__be32 saddr = ip_hdr(skb)->saddr;
1452 	__be32 daddr = ip_hdr(skb)->daddr;
1453 	__u32 isn = TCP_SKB_CB(skb)->when;
1454 	bool want_cookie = false;
1455 	struct flowi4 fl4;
1456 	struct tcp_fastopen_cookie foc = { .len = -1 };
1457 	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1458 	struct sk_buff *skb_synack;
1459 	int do_fastopen;
1460 
1461 	/* Never answer to SYNs send to broadcast or multicast */
1462 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1463 		goto drop;
1464 
1465 	/* TW buckets are converted to open requests without
1466 	 * limitations, they conserve resources and peer is
1467 	 * evidently real one.
1468 	 */
1469 	if ((sysctl_tcp_syncookies == 2 ||
1470 	     inet_csk_reqsk_queue_is_full(sk)) && !isn) {
1471 		want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1472 		if (!want_cookie)
1473 			goto drop;
1474 	}
1475 
1476 	/* Accept backlog is full. If we have already queued enough
1477 	 * of warm entries in syn queue, drop request. It is better than
1478 	 * clogging syn queue with openreqs with exponentially increasing
1479 	 * timeout.
1480 	 */
1481 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1482 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1483 		goto drop;
1484 	}
1485 
1486 	req = inet_reqsk_alloc(&tcp_request_sock_ops);
1487 	if (!req)
1488 		goto drop;
1489 
1490 #ifdef CONFIG_TCP_MD5SIG
1491 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1492 #endif
1493 
1494 	tcp_clear_options(&tmp_opt);
1495 	tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1496 	tmp_opt.user_mss  = tp->rx_opt.user_mss;
1497 	tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1498 
1499 	if (want_cookie && !tmp_opt.saw_tstamp)
1500 		tcp_clear_options(&tmp_opt);
1501 
1502 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1503 	tcp_openreq_init(req, &tmp_opt, skb);
1504 
1505 	ireq = inet_rsk(req);
1506 	ireq->ir_loc_addr = daddr;
1507 	ireq->ir_rmt_addr = saddr;
1508 	ireq->no_srccheck = inet_sk(sk)->transparent;
1509 	ireq->opt = tcp_v4_save_options(skb);
1510 
1511 	if (security_inet_conn_request(sk, skb, req))
1512 		goto drop_and_free;
1513 
1514 	if (!want_cookie || tmp_opt.tstamp_ok)
1515 		TCP_ECN_create_request(req, skb, sock_net(sk));
1516 
1517 	if (want_cookie) {
1518 		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1519 		req->cookie_ts = tmp_opt.tstamp_ok;
1520 	} else if (!isn) {
1521 		/* VJ's idea. We save last timestamp seen
1522 		 * from the destination in peer table, when entering
1523 		 * state TIME-WAIT, and check against it before
1524 		 * accepting new connection request.
1525 		 *
1526 		 * If "isn" is not zero, this request hit alive
1527 		 * timewait bucket, so that all the necessary checks
1528 		 * are made in the function processing timewait state.
1529 		 */
1530 		if (tmp_opt.saw_tstamp &&
1531 		    tcp_death_row.sysctl_tw_recycle &&
1532 		    (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1533 		    fl4.daddr == saddr) {
1534 			if (!tcp_peer_is_proven(req, dst, true)) {
1535 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1536 				goto drop_and_release;
1537 			}
1538 		}
1539 		/* Kill the following clause, if you dislike this way. */
1540 		else if (!sysctl_tcp_syncookies &&
1541 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1542 			  (sysctl_max_syn_backlog >> 2)) &&
1543 			 !tcp_peer_is_proven(req, dst, false)) {
1544 			/* Without syncookies last quarter of
1545 			 * backlog is filled with destinations,
1546 			 * proven to be alive.
1547 			 * It means that we continue to communicate
1548 			 * to destinations, already remembered
1549 			 * to the moment of synflood.
1550 			 */
1551 			LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1552 				       &saddr, ntohs(tcp_hdr(skb)->source));
1553 			goto drop_and_release;
1554 		}
1555 
1556 		isn = tcp_v4_init_sequence(skb);
1557 	}
1558 	tcp_rsk(req)->snt_isn = isn;
1559 
1560 	if (dst == NULL) {
1561 		dst = inet_csk_route_req(sk, &fl4, req);
1562 		if (dst == NULL)
1563 			goto drop_and_free;
1564 	}
1565 	do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1566 
1567 	/* We don't call tcp_v4_send_synack() directly because we need
1568 	 * to make sure a child socket can be created successfully before
1569 	 * sending back synack!
1570 	 *
1571 	 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1572 	 * (or better yet, call tcp_send_synack() in the child context
1573 	 * directly, but will have to fix bunch of other code first)
1574 	 * after syn_recv_sock() except one will need to first fix the
1575 	 * latter to remove its dependency on the current implementation
1576 	 * of tcp_v4_send_synack()->tcp_select_initial_window().
1577 	 */
1578 	skb_synack = tcp_make_synack(sk, dst, req,
1579 	    fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1580 
1581 	if (skb_synack) {
1582 		__tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1583 		skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1584 	} else
1585 		goto drop_and_free;
1586 
1587 	if (likely(!do_fastopen)) {
1588 		int err;
1589 		err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1590 		     ireq->ir_rmt_addr, ireq->opt);
1591 		err = net_xmit_eval(err);
1592 		if (err || want_cookie)
1593 			goto drop_and_free;
1594 
1595 		tcp_rsk(req)->snt_synack = tcp_time_stamp;
1596 		tcp_rsk(req)->listener = NULL;
1597 		/* Add the request_sock to the SYN table */
1598 		inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1599 		if (fastopen_cookie_present(&foc) && foc.len != 0)
1600 			NET_INC_STATS_BH(sock_net(sk),
1601 			    LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1602 	} else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
1603 		goto drop_and_free;
1604 
1605 	return 0;
1606 
1607 drop_and_release:
1608 	dst_release(dst);
1609 drop_and_free:
1610 	reqsk_free(req);
1611 drop:
1612 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1613 	return 0;
1614 }
1615 EXPORT_SYMBOL(tcp_v4_conn_request);
1616 
1617 
1618 /*
1619  * The three way handshake has completed - we got a valid synack -
1620  * now create the new socket.
1621  */
1622 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1623 				  struct request_sock *req,
1624 				  struct dst_entry *dst)
1625 {
1626 	struct inet_request_sock *ireq;
1627 	struct inet_sock *newinet;
1628 	struct tcp_sock *newtp;
1629 	struct sock *newsk;
1630 #ifdef CONFIG_TCP_MD5SIG
1631 	struct tcp_md5sig_key *key;
1632 #endif
1633 	struct ip_options_rcu *inet_opt;
1634 
1635 	if (sk_acceptq_is_full(sk))
1636 		goto exit_overflow;
1637 
1638 	newsk = tcp_create_openreq_child(sk, req, skb);
1639 	if (!newsk)
1640 		goto exit_nonewsk;
1641 
1642 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1643 	inet_sk_rx_dst_set(newsk, skb);
1644 
1645 	newtp		      = tcp_sk(newsk);
1646 	newinet		      = inet_sk(newsk);
1647 	ireq		      = inet_rsk(req);
1648 	newinet->inet_daddr   = ireq->ir_rmt_addr;
1649 	newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1650 	newinet->inet_saddr	      = ireq->ir_loc_addr;
1651 	inet_opt	      = ireq->opt;
1652 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1653 	ireq->opt	      = NULL;
1654 	newinet->mc_index     = inet_iif(skb);
1655 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1656 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1657 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1658 	if (inet_opt)
1659 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1660 	newinet->inet_id = newtp->write_seq ^ jiffies;
1661 
1662 	if (!dst) {
1663 		dst = inet_csk_route_child_sock(sk, newsk, req);
1664 		if (!dst)
1665 			goto put_and_exit;
1666 	} else {
1667 		/* syncookie case : see end of cookie_v4_check() */
1668 	}
1669 	sk_setup_caps(newsk, dst);
1670 
1671 	tcp_mtup_init(newsk);
1672 	tcp_sync_mss(newsk, dst_mtu(dst));
1673 	newtp->advmss = dst_metric_advmss(dst);
1674 	if (tcp_sk(sk)->rx_opt.user_mss &&
1675 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1676 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1677 
1678 	tcp_initialize_rcv_mss(newsk);
1679 
1680 #ifdef CONFIG_TCP_MD5SIG
1681 	/* Copy over the MD5 key from the original socket */
1682 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1683 				AF_INET);
1684 	if (key != NULL) {
1685 		/*
1686 		 * We're using one, so create a matching key
1687 		 * on the newsk structure. If we fail to get
1688 		 * memory, then we end up not copying the key
1689 		 * across. Shucks.
1690 		 */
1691 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1692 			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1693 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1694 	}
1695 #endif
1696 
1697 	if (__inet_inherit_port(sk, newsk) < 0)
1698 		goto put_and_exit;
1699 	__inet_hash_nolisten(newsk, NULL);
1700 
1701 	return newsk;
1702 
1703 exit_overflow:
1704 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1705 exit_nonewsk:
1706 	dst_release(dst);
1707 exit:
1708 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1709 	return NULL;
1710 put_and_exit:
1711 	inet_csk_prepare_forced_close(newsk);
1712 	tcp_done(newsk);
1713 	goto exit;
1714 }
1715 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1716 
1717 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1718 {
1719 	struct tcphdr *th = tcp_hdr(skb);
1720 	const struct iphdr *iph = ip_hdr(skb);
1721 	struct sock *nsk;
1722 	struct request_sock **prev;
1723 	/* Find possible connection requests. */
1724 	struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1725 						       iph->saddr, iph->daddr);
1726 	if (req)
1727 		return tcp_check_req(sk, skb, req, prev, false);
1728 
1729 	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1730 			th->source, iph->daddr, th->dest, inet_iif(skb));
1731 
1732 	if (nsk) {
1733 		if (nsk->sk_state != TCP_TIME_WAIT) {
1734 			bh_lock_sock(nsk);
1735 			return nsk;
1736 		}
1737 		inet_twsk_put(inet_twsk(nsk));
1738 		return NULL;
1739 	}
1740 
1741 #ifdef CONFIG_SYN_COOKIES
1742 	if (!th->syn)
1743 		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1744 #endif
1745 	return sk;
1746 }
1747 
1748 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1749 {
1750 	const struct iphdr *iph = ip_hdr(skb);
1751 
1752 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1753 		if (!tcp_v4_check(skb->len, iph->saddr,
1754 				  iph->daddr, skb->csum)) {
1755 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1756 			return 0;
1757 		}
1758 	}
1759 
1760 	skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1761 				       skb->len, IPPROTO_TCP, 0);
1762 
1763 	if (skb->len <= 76) {
1764 		return __skb_checksum_complete(skb);
1765 	}
1766 	return 0;
1767 }
1768 
1769 
1770 /* The socket must have it's spinlock held when we get
1771  * here.
1772  *
1773  * We have a potential double-lock case here, so even when
1774  * doing backlog processing we use the BH locking scheme.
1775  * This is because we cannot sleep with the original spinlock
1776  * held.
1777  */
1778 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1779 {
1780 	struct sock *rsk;
1781 #ifdef CONFIG_TCP_MD5SIG
1782 	/*
1783 	 * We really want to reject the packet as early as possible
1784 	 * if:
1785 	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1786 	 *  o There is an MD5 option and we're not expecting one
1787 	 */
1788 	if (tcp_v4_inbound_md5_hash(sk, skb))
1789 		goto discard;
1790 #endif
1791 
1792 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1793 		struct dst_entry *dst = sk->sk_rx_dst;
1794 
1795 		sock_rps_save_rxhash(sk, skb);
1796 		if (dst) {
1797 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1798 			    dst->ops->check(dst, 0) == NULL) {
1799 				dst_release(dst);
1800 				sk->sk_rx_dst = NULL;
1801 			}
1802 		}
1803 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1804 		return 0;
1805 	}
1806 
1807 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1808 		goto csum_err;
1809 
1810 	if (sk->sk_state == TCP_LISTEN) {
1811 		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1812 		if (!nsk)
1813 			goto discard;
1814 
1815 		if (nsk != sk) {
1816 			sock_rps_save_rxhash(nsk, skb);
1817 			if (tcp_child_process(sk, nsk, skb)) {
1818 				rsk = nsk;
1819 				goto reset;
1820 			}
1821 			return 0;
1822 		}
1823 	} else
1824 		sock_rps_save_rxhash(sk, skb);
1825 
1826 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1827 		rsk = sk;
1828 		goto reset;
1829 	}
1830 	return 0;
1831 
1832 reset:
1833 	tcp_v4_send_reset(rsk, skb);
1834 discard:
1835 	kfree_skb(skb);
1836 	/* Be careful here. If this function gets more complicated and
1837 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1838 	 * might be destroyed here. This current version compiles correctly,
1839 	 * but you have been warned.
1840 	 */
1841 	return 0;
1842 
1843 csum_err:
1844 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1845 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1846 	goto discard;
1847 }
1848 EXPORT_SYMBOL(tcp_v4_do_rcv);
1849 
1850 void tcp_v4_early_demux(struct sk_buff *skb)
1851 {
1852 	const struct iphdr *iph;
1853 	const struct tcphdr *th;
1854 	struct sock *sk;
1855 
1856 	if (skb->pkt_type != PACKET_HOST)
1857 		return;
1858 
1859 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1860 		return;
1861 
1862 	iph = ip_hdr(skb);
1863 	th = tcp_hdr(skb);
1864 
1865 	if (th->doff < sizeof(struct tcphdr) / 4)
1866 		return;
1867 
1868 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1869 				       iph->saddr, th->source,
1870 				       iph->daddr, ntohs(th->dest),
1871 				       skb->skb_iif);
1872 	if (sk) {
1873 		skb->sk = sk;
1874 		skb->destructor = sock_edemux;
1875 		if (sk->sk_state != TCP_TIME_WAIT) {
1876 			struct dst_entry *dst = sk->sk_rx_dst;
1877 
1878 			if (dst)
1879 				dst = dst_check(dst, 0);
1880 			if (dst &&
1881 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1882 				skb_dst_set_noref(skb, dst);
1883 		}
1884 	}
1885 }
1886 
1887 /* Packet is added to VJ-style prequeue for processing in process
1888  * context, if a reader task is waiting. Apparently, this exciting
1889  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1890  * failed somewhere. Latency? Burstiness? Well, at least now we will
1891  * see, why it failed. 8)8)				  --ANK
1892  *
1893  */
1894 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1895 {
1896 	struct tcp_sock *tp = tcp_sk(sk);
1897 
1898 	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1899 		return false;
1900 
1901 	if (skb->len <= tcp_hdrlen(skb) &&
1902 	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1903 		return false;
1904 
1905 	skb_dst_force(skb);
1906 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1907 	tp->ucopy.memory += skb->truesize;
1908 	if (tp->ucopy.memory > sk->sk_rcvbuf) {
1909 		struct sk_buff *skb1;
1910 
1911 		BUG_ON(sock_owned_by_user(sk));
1912 
1913 		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1914 			sk_backlog_rcv(sk, skb1);
1915 			NET_INC_STATS_BH(sock_net(sk),
1916 					 LINUX_MIB_TCPPREQUEUEDROPPED);
1917 		}
1918 
1919 		tp->ucopy.memory = 0;
1920 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1921 		wake_up_interruptible_sync_poll(sk_sleep(sk),
1922 					   POLLIN | POLLRDNORM | POLLRDBAND);
1923 		if (!inet_csk_ack_scheduled(sk))
1924 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1925 						  (3 * tcp_rto_min(sk)) / 4,
1926 						  TCP_RTO_MAX);
1927 	}
1928 	return true;
1929 }
1930 EXPORT_SYMBOL(tcp_prequeue);
1931 
1932 /*
1933  *	From tcp_input.c
1934  */
1935 
1936 int tcp_v4_rcv(struct sk_buff *skb)
1937 {
1938 	const struct iphdr *iph;
1939 	const struct tcphdr *th;
1940 	struct sock *sk;
1941 	int ret;
1942 	struct net *net = dev_net(skb->dev);
1943 
1944 	if (skb->pkt_type != PACKET_HOST)
1945 		goto discard_it;
1946 
1947 	/* Count it even if it's bad */
1948 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1949 
1950 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1951 		goto discard_it;
1952 
1953 	th = tcp_hdr(skb);
1954 
1955 	if (th->doff < sizeof(struct tcphdr) / 4)
1956 		goto bad_packet;
1957 	if (!pskb_may_pull(skb, th->doff * 4))
1958 		goto discard_it;
1959 
1960 	/* An explanation is required here, I think.
1961 	 * Packet length and doff are validated by header prediction,
1962 	 * provided case of th->doff==0 is eliminated.
1963 	 * So, we defer the checks. */
1964 	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1965 		goto csum_error;
1966 
1967 	th = tcp_hdr(skb);
1968 	iph = ip_hdr(skb);
1969 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1970 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1971 				    skb->len - th->doff * 4);
1972 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1973 	TCP_SKB_CB(skb)->when	 = 0;
1974 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1975 	TCP_SKB_CB(skb)->sacked	 = 0;
1976 
1977 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1978 	if (!sk)
1979 		goto no_tcp_socket;
1980 
1981 process:
1982 	if (sk->sk_state == TCP_TIME_WAIT)
1983 		goto do_time_wait;
1984 
1985 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1986 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1987 		goto discard_and_relse;
1988 	}
1989 
1990 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1991 		goto discard_and_relse;
1992 	nf_reset(skb);
1993 
1994 	if (sk_filter(sk, skb))
1995 		goto discard_and_relse;
1996 
1997 	sk_mark_napi_id(sk, skb);
1998 	skb->dev = NULL;
1999 
2000 	bh_lock_sock_nested(sk);
2001 	ret = 0;
2002 	if (!sock_owned_by_user(sk)) {
2003 #ifdef CONFIG_NET_DMA
2004 		struct tcp_sock *tp = tcp_sk(sk);
2005 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2006 			tp->ucopy.dma_chan = net_dma_find_channel();
2007 		if (tp->ucopy.dma_chan)
2008 			ret = tcp_v4_do_rcv(sk, skb);
2009 		else
2010 #endif
2011 		{
2012 			if (!tcp_prequeue(sk, skb))
2013 				ret = tcp_v4_do_rcv(sk, skb);
2014 		}
2015 	} else if (unlikely(sk_add_backlog(sk, skb,
2016 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
2017 		bh_unlock_sock(sk);
2018 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2019 		goto discard_and_relse;
2020 	}
2021 	bh_unlock_sock(sk);
2022 
2023 	sock_put(sk);
2024 
2025 	return ret;
2026 
2027 no_tcp_socket:
2028 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2029 		goto discard_it;
2030 
2031 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2032 csum_error:
2033 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
2034 bad_packet:
2035 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2036 	} else {
2037 		tcp_v4_send_reset(NULL, skb);
2038 	}
2039 
2040 discard_it:
2041 	/* Discard frame. */
2042 	kfree_skb(skb);
2043 	return 0;
2044 
2045 discard_and_relse:
2046 	sock_put(sk);
2047 	goto discard_it;
2048 
2049 do_time_wait:
2050 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2051 		inet_twsk_put(inet_twsk(sk));
2052 		goto discard_it;
2053 	}
2054 
2055 	if (skb->len < (th->doff << 2)) {
2056 		inet_twsk_put(inet_twsk(sk));
2057 		goto bad_packet;
2058 	}
2059 	if (tcp_checksum_complete(skb)) {
2060 		inet_twsk_put(inet_twsk(sk));
2061 		goto csum_error;
2062 	}
2063 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2064 	case TCP_TW_SYN: {
2065 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2066 							&tcp_hashinfo,
2067 							iph->saddr, th->source,
2068 							iph->daddr, th->dest,
2069 							inet_iif(skb));
2070 		if (sk2) {
2071 			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2072 			inet_twsk_put(inet_twsk(sk));
2073 			sk = sk2;
2074 			goto process;
2075 		}
2076 		/* Fall through to ACK */
2077 	}
2078 	case TCP_TW_ACK:
2079 		tcp_v4_timewait_ack(sk, skb);
2080 		break;
2081 	case TCP_TW_RST:
2082 		goto no_tcp_socket;
2083 	case TCP_TW_SUCCESS:;
2084 	}
2085 	goto discard_it;
2086 }
2087 
2088 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2089 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
2090 	.twsk_unique	= tcp_twsk_unique,
2091 	.twsk_destructor= tcp_twsk_destructor,
2092 };
2093 
2094 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2095 {
2096 	struct dst_entry *dst = skb_dst(skb);
2097 
2098 	dst_hold(dst);
2099 	sk->sk_rx_dst = dst;
2100 	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2101 }
2102 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2103 
2104 const struct inet_connection_sock_af_ops ipv4_specific = {
2105 	.queue_xmit	   = ip_queue_xmit,
2106 	.send_check	   = tcp_v4_send_check,
2107 	.rebuild_header	   = inet_sk_rebuild_header,
2108 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2109 	.conn_request	   = tcp_v4_conn_request,
2110 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
2111 	.net_header_len	   = sizeof(struct iphdr),
2112 	.setsockopt	   = ip_setsockopt,
2113 	.getsockopt	   = ip_getsockopt,
2114 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
2115 	.sockaddr_len	   = sizeof(struct sockaddr_in),
2116 	.bind_conflict	   = inet_csk_bind_conflict,
2117 #ifdef CONFIG_COMPAT
2118 	.compat_setsockopt = compat_ip_setsockopt,
2119 	.compat_getsockopt = compat_ip_getsockopt,
2120 #endif
2121 };
2122 EXPORT_SYMBOL(ipv4_specific);
2123 
2124 #ifdef CONFIG_TCP_MD5SIG
2125 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2126 	.md5_lookup		= tcp_v4_md5_lookup,
2127 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2128 	.md5_parse		= tcp_v4_parse_md5_keys,
2129 };
2130 #endif
2131 
2132 /* NOTE: A lot of things set to zero explicitly by call to
2133  *       sk_alloc() so need not be done here.
2134  */
2135 static int tcp_v4_init_sock(struct sock *sk)
2136 {
2137 	struct inet_connection_sock *icsk = inet_csk(sk);
2138 
2139 	tcp_init_sock(sk);
2140 
2141 	icsk->icsk_af_ops = &ipv4_specific;
2142 
2143 #ifdef CONFIG_TCP_MD5SIG
2144 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2145 #endif
2146 
2147 	return 0;
2148 }
2149 
2150 void tcp_v4_destroy_sock(struct sock *sk)
2151 {
2152 	struct tcp_sock *tp = tcp_sk(sk);
2153 
2154 	tcp_clear_xmit_timers(sk);
2155 
2156 	tcp_cleanup_congestion_control(sk);
2157 
2158 	/* Cleanup up the write buffer. */
2159 	tcp_write_queue_purge(sk);
2160 
2161 	/* Cleans up our, hopefully empty, out_of_order_queue. */
2162 	__skb_queue_purge(&tp->out_of_order_queue);
2163 
2164 #ifdef CONFIG_TCP_MD5SIG
2165 	/* Clean up the MD5 key list, if any */
2166 	if (tp->md5sig_info) {
2167 		tcp_clear_md5_list(sk);
2168 		kfree_rcu(tp->md5sig_info, rcu);
2169 		tp->md5sig_info = NULL;
2170 	}
2171 #endif
2172 
2173 #ifdef CONFIG_NET_DMA
2174 	/* Cleans up our sk_async_wait_queue */
2175 	__skb_queue_purge(&sk->sk_async_wait_queue);
2176 #endif
2177 
2178 	/* Clean prequeue, it must be empty really */
2179 	__skb_queue_purge(&tp->ucopy.prequeue);
2180 
2181 	/* Clean up a referenced TCP bind bucket. */
2182 	if (inet_csk(sk)->icsk_bind_hash)
2183 		inet_put_port(sk);
2184 
2185 	BUG_ON(tp->fastopen_rsk != NULL);
2186 
2187 	/* If socket is aborted during connect operation */
2188 	tcp_free_fastopen_req(tp);
2189 
2190 	sk_sockets_allocated_dec(sk);
2191 	sock_release_memcg(sk);
2192 }
2193 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2194 
2195 #ifdef CONFIG_PROC_FS
2196 /* Proc filesystem TCP sock list dumping. */
2197 
2198 /*
2199  * Get next listener socket follow cur.  If cur is NULL, get first socket
2200  * starting from bucket given in st->bucket; when st->bucket is zero the
2201  * very first socket in the hash table is returned.
2202  */
2203 static void *listening_get_next(struct seq_file *seq, void *cur)
2204 {
2205 	struct inet_connection_sock *icsk;
2206 	struct hlist_nulls_node *node;
2207 	struct sock *sk = cur;
2208 	struct inet_listen_hashbucket *ilb;
2209 	struct tcp_iter_state *st = seq->private;
2210 	struct net *net = seq_file_net(seq);
2211 
2212 	if (!sk) {
2213 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2214 		spin_lock_bh(&ilb->lock);
2215 		sk = sk_nulls_head(&ilb->head);
2216 		st->offset = 0;
2217 		goto get_sk;
2218 	}
2219 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
2220 	++st->num;
2221 	++st->offset;
2222 
2223 	if (st->state == TCP_SEQ_STATE_OPENREQ) {
2224 		struct request_sock *req = cur;
2225 
2226 		icsk = inet_csk(st->syn_wait_sk);
2227 		req = req->dl_next;
2228 		while (1) {
2229 			while (req) {
2230 				if (req->rsk_ops->family == st->family) {
2231 					cur = req;
2232 					goto out;
2233 				}
2234 				req = req->dl_next;
2235 			}
2236 			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2237 				break;
2238 get_req:
2239 			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2240 		}
2241 		sk	  = sk_nulls_next(st->syn_wait_sk);
2242 		st->state = TCP_SEQ_STATE_LISTENING;
2243 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2244 	} else {
2245 		icsk = inet_csk(sk);
2246 		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2247 		if (reqsk_queue_len(&icsk->icsk_accept_queue))
2248 			goto start_req;
2249 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2250 		sk = sk_nulls_next(sk);
2251 	}
2252 get_sk:
2253 	sk_nulls_for_each_from(sk, node) {
2254 		if (!net_eq(sock_net(sk), net))
2255 			continue;
2256 		if (sk->sk_family == st->family) {
2257 			cur = sk;
2258 			goto out;
2259 		}
2260 		icsk = inet_csk(sk);
2261 		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2262 		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2263 start_req:
2264 			st->uid		= sock_i_uid(sk);
2265 			st->syn_wait_sk = sk;
2266 			st->state	= TCP_SEQ_STATE_OPENREQ;
2267 			st->sbucket	= 0;
2268 			goto get_req;
2269 		}
2270 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2271 	}
2272 	spin_unlock_bh(&ilb->lock);
2273 	st->offset = 0;
2274 	if (++st->bucket < INET_LHTABLE_SIZE) {
2275 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2276 		spin_lock_bh(&ilb->lock);
2277 		sk = sk_nulls_head(&ilb->head);
2278 		goto get_sk;
2279 	}
2280 	cur = NULL;
2281 out:
2282 	return cur;
2283 }
2284 
2285 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2286 {
2287 	struct tcp_iter_state *st = seq->private;
2288 	void *rc;
2289 
2290 	st->bucket = 0;
2291 	st->offset = 0;
2292 	rc = listening_get_next(seq, NULL);
2293 
2294 	while (rc && *pos) {
2295 		rc = listening_get_next(seq, rc);
2296 		--*pos;
2297 	}
2298 	return rc;
2299 }
2300 
2301 static inline bool empty_bucket(const struct tcp_iter_state *st)
2302 {
2303 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2304 }
2305 
2306 /*
2307  * Get first established socket starting from bucket given in st->bucket.
2308  * If st->bucket is zero, the very first socket in the hash is returned.
2309  */
2310 static void *established_get_first(struct seq_file *seq)
2311 {
2312 	struct tcp_iter_state *st = seq->private;
2313 	struct net *net = seq_file_net(seq);
2314 	void *rc = NULL;
2315 
2316 	st->offset = 0;
2317 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2318 		struct sock *sk;
2319 		struct hlist_nulls_node *node;
2320 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2321 
2322 		/* Lockless fast path for the common case of empty buckets */
2323 		if (empty_bucket(st))
2324 			continue;
2325 
2326 		spin_lock_bh(lock);
2327 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2328 			if (sk->sk_family != st->family ||
2329 			    !net_eq(sock_net(sk), net)) {
2330 				continue;
2331 			}
2332 			rc = sk;
2333 			goto out;
2334 		}
2335 		spin_unlock_bh(lock);
2336 	}
2337 out:
2338 	return rc;
2339 }
2340 
2341 static void *established_get_next(struct seq_file *seq, void *cur)
2342 {
2343 	struct sock *sk = cur;
2344 	struct hlist_nulls_node *node;
2345 	struct tcp_iter_state *st = seq->private;
2346 	struct net *net = seq_file_net(seq);
2347 
2348 	++st->num;
2349 	++st->offset;
2350 
2351 	sk = sk_nulls_next(sk);
2352 
2353 	sk_nulls_for_each_from(sk, node) {
2354 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2355 			return sk;
2356 	}
2357 
2358 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2359 	++st->bucket;
2360 	return established_get_first(seq);
2361 }
2362 
2363 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2364 {
2365 	struct tcp_iter_state *st = seq->private;
2366 	void *rc;
2367 
2368 	st->bucket = 0;
2369 	rc = established_get_first(seq);
2370 
2371 	while (rc && pos) {
2372 		rc = established_get_next(seq, rc);
2373 		--pos;
2374 	}
2375 	return rc;
2376 }
2377 
2378 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2379 {
2380 	void *rc;
2381 	struct tcp_iter_state *st = seq->private;
2382 
2383 	st->state = TCP_SEQ_STATE_LISTENING;
2384 	rc	  = listening_get_idx(seq, &pos);
2385 
2386 	if (!rc) {
2387 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2388 		rc	  = established_get_idx(seq, pos);
2389 	}
2390 
2391 	return rc;
2392 }
2393 
2394 static void *tcp_seek_last_pos(struct seq_file *seq)
2395 {
2396 	struct tcp_iter_state *st = seq->private;
2397 	int offset = st->offset;
2398 	int orig_num = st->num;
2399 	void *rc = NULL;
2400 
2401 	switch (st->state) {
2402 	case TCP_SEQ_STATE_OPENREQ:
2403 	case TCP_SEQ_STATE_LISTENING:
2404 		if (st->bucket >= INET_LHTABLE_SIZE)
2405 			break;
2406 		st->state = TCP_SEQ_STATE_LISTENING;
2407 		rc = listening_get_next(seq, NULL);
2408 		while (offset-- && rc)
2409 			rc = listening_get_next(seq, rc);
2410 		if (rc)
2411 			break;
2412 		st->bucket = 0;
2413 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2414 		/* Fallthrough */
2415 	case TCP_SEQ_STATE_ESTABLISHED:
2416 		if (st->bucket > tcp_hashinfo.ehash_mask)
2417 			break;
2418 		rc = established_get_first(seq);
2419 		while (offset-- && rc)
2420 			rc = established_get_next(seq, rc);
2421 	}
2422 
2423 	st->num = orig_num;
2424 
2425 	return rc;
2426 }
2427 
2428 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2429 {
2430 	struct tcp_iter_state *st = seq->private;
2431 	void *rc;
2432 
2433 	if (*pos && *pos == st->last_pos) {
2434 		rc = tcp_seek_last_pos(seq);
2435 		if (rc)
2436 			goto out;
2437 	}
2438 
2439 	st->state = TCP_SEQ_STATE_LISTENING;
2440 	st->num = 0;
2441 	st->bucket = 0;
2442 	st->offset = 0;
2443 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2444 
2445 out:
2446 	st->last_pos = *pos;
2447 	return rc;
2448 }
2449 
2450 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2451 {
2452 	struct tcp_iter_state *st = seq->private;
2453 	void *rc = NULL;
2454 
2455 	if (v == SEQ_START_TOKEN) {
2456 		rc = tcp_get_idx(seq, 0);
2457 		goto out;
2458 	}
2459 
2460 	switch (st->state) {
2461 	case TCP_SEQ_STATE_OPENREQ:
2462 	case TCP_SEQ_STATE_LISTENING:
2463 		rc = listening_get_next(seq, v);
2464 		if (!rc) {
2465 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2466 			st->bucket = 0;
2467 			st->offset = 0;
2468 			rc	  = established_get_first(seq);
2469 		}
2470 		break;
2471 	case TCP_SEQ_STATE_ESTABLISHED:
2472 		rc = established_get_next(seq, v);
2473 		break;
2474 	}
2475 out:
2476 	++*pos;
2477 	st->last_pos = *pos;
2478 	return rc;
2479 }
2480 
2481 static void tcp_seq_stop(struct seq_file *seq, void *v)
2482 {
2483 	struct tcp_iter_state *st = seq->private;
2484 
2485 	switch (st->state) {
2486 	case TCP_SEQ_STATE_OPENREQ:
2487 		if (v) {
2488 			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2489 			read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2490 		}
2491 	case TCP_SEQ_STATE_LISTENING:
2492 		if (v != SEQ_START_TOKEN)
2493 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2494 		break;
2495 	case TCP_SEQ_STATE_ESTABLISHED:
2496 		if (v)
2497 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2498 		break;
2499 	}
2500 }
2501 
2502 int tcp_seq_open(struct inode *inode, struct file *file)
2503 {
2504 	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2505 	struct tcp_iter_state *s;
2506 	int err;
2507 
2508 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2509 			  sizeof(struct tcp_iter_state));
2510 	if (err < 0)
2511 		return err;
2512 
2513 	s = ((struct seq_file *)file->private_data)->private;
2514 	s->family		= afinfo->family;
2515 	s->last_pos 		= 0;
2516 	return 0;
2517 }
2518 EXPORT_SYMBOL(tcp_seq_open);
2519 
2520 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2521 {
2522 	int rc = 0;
2523 	struct proc_dir_entry *p;
2524 
2525 	afinfo->seq_ops.start		= tcp_seq_start;
2526 	afinfo->seq_ops.next		= tcp_seq_next;
2527 	afinfo->seq_ops.stop		= tcp_seq_stop;
2528 
2529 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2530 			     afinfo->seq_fops, afinfo);
2531 	if (!p)
2532 		rc = -ENOMEM;
2533 	return rc;
2534 }
2535 EXPORT_SYMBOL(tcp_proc_register);
2536 
2537 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2538 {
2539 	remove_proc_entry(afinfo->name, net->proc_net);
2540 }
2541 EXPORT_SYMBOL(tcp_proc_unregister);
2542 
2543 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2544 			 struct seq_file *f, int i, kuid_t uid)
2545 {
2546 	const struct inet_request_sock *ireq = inet_rsk(req);
2547 	long delta = req->expires - jiffies;
2548 
2549 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2550 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2551 		i,
2552 		ireq->ir_loc_addr,
2553 		ntohs(inet_sk(sk)->inet_sport),
2554 		ireq->ir_rmt_addr,
2555 		ntohs(ireq->ir_rmt_port),
2556 		TCP_SYN_RECV,
2557 		0, 0, /* could print option size, but that is af dependent. */
2558 		1,    /* timers active (only the expire timer) */
2559 		jiffies_delta_to_clock_t(delta),
2560 		req->num_timeout,
2561 		from_kuid_munged(seq_user_ns(f), uid),
2562 		0,  /* non standard timer */
2563 		0, /* open_requests have no inode */
2564 		atomic_read(&sk->sk_refcnt),
2565 		req);
2566 }
2567 
2568 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2569 {
2570 	int timer_active;
2571 	unsigned long timer_expires;
2572 	const struct tcp_sock *tp = tcp_sk(sk);
2573 	const struct inet_connection_sock *icsk = inet_csk(sk);
2574 	const struct inet_sock *inet = inet_sk(sk);
2575 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2576 	__be32 dest = inet->inet_daddr;
2577 	__be32 src = inet->inet_rcv_saddr;
2578 	__u16 destp = ntohs(inet->inet_dport);
2579 	__u16 srcp = ntohs(inet->inet_sport);
2580 	int rx_queue;
2581 
2582 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2583 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2584 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2585 		timer_active	= 1;
2586 		timer_expires	= icsk->icsk_timeout;
2587 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2588 		timer_active	= 4;
2589 		timer_expires	= icsk->icsk_timeout;
2590 	} else if (timer_pending(&sk->sk_timer)) {
2591 		timer_active	= 2;
2592 		timer_expires	= sk->sk_timer.expires;
2593 	} else {
2594 		timer_active	= 0;
2595 		timer_expires = jiffies;
2596 	}
2597 
2598 	if (sk->sk_state == TCP_LISTEN)
2599 		rx_queue = sk->sk_ack_backlog;
2600 	else
2601 		/*
2602 		 * because we dont lock socket, we might find a transient negative value
2603 		 */
2604 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2605 
2606 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2607 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2608 		i, src, srcp, dest, destp, sk->sk_state,
2609 		tp->write_seq - tp->snd_una,
2610 		rx_queue,
2611 		timer_active,
2612 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2613 		icsk->icsk_retransmits,
2614 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2615 		icsk->icsk_probes_out,
2616 		sock_i_ino(sk),
2617 		atomic_read(&sk->sk_refcnt), sk,
2618 		jiffies_to_clock_t(icsk->icsk_rto),
2619 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2620 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2621 		tp->snd_cwnd,
2622 		sk->sk_state == TCP_LISTEN ?
2623 		    (fastopenq ? fastopenq->max_qlen : 0) :
2624 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2625 }
2626 
2627 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2628 			       struct seq_file *f, int i)
2629 {
2630 	__be32 dest, src;
2631 	__u16 destp, srcp;
2632 	long delta = tw->tw_ttd - jiffies;
2633 
2634 	dest  = tw->tw_daddr;
2635 	src   = tw->tw_rcv_saddr;
2636 	destp = ntohs(tw->tw_dport);
2637 	srcp  = ntohs(tw->tw_sport);
2638 
2639 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2640 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2641 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2642 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2643 		atomic_read(&tw->tw_refcnt), tw);
2644 }
2645 
2646 #define TMPSZ 150
2647 
2648 static int tcp4_seq_show(struct seq_file *seq, void *v)
2649 {
2650 	struct tcp_iter_state *st;
2651 	struct sock *sk = v;
2652 
2653 	seq_setwidth(seq, TMPSZ - 1);
2654 	if (v == SEQ_START_TOKEN) {
2655 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2656 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2657 			   "inode");
2658 		goto out;
2659 	}
2660 	st = seq->private;
2661 
2662 	switch (st->state) {
2663 	case TCP_SEQ_STATE_LISTENING:
2664 	case TCP_SEQ_STATE_ESTABLISHED:
2665 		if (sk->sk_state == TCP_TIME_WAIT)
2666 			get_timewait4_sock(v, seq, st->num);
2667 		else
2668 			get_tcp4_sock(v, seq, st->num);
2669 		break;
2670 	case TCP_SEQ_STATE_OPENREQ:
2671 		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2672 		break;
2673 	}
2674 out:
2675 	seq_pad(seq, '\n');
2676 	return 0;
2677 }
2678 
2679 static const struct file_operations tcp_afinfo_seq_fops = {
2680 	.owner   = THIS_MODULE,
2681 	.open    = tcp_seq_open,
2682 	.read    = seq_read,
2683 	.llseek  = seq_lseek,
2684 	.release = seq_release_net
2685 };
2686 
2687 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2688 	.name		= "tcp",
2689 	.family		= AF_INET,
2690 	.seq_fops	= &tcp_afinfo_seq_fops,
2691 	.seq_ops	= {
2692 		.show		= tcp4_seq_show,
2693 	},
2694 };
2695 
2696 static int __net_init tcp4_proc_init_net(struct net *net)
2697 {
2698 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2699 }
2700 
2701 static void __net_exit tcp4_proc_exit_net(struct net *net)
2702 {
2703 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2704 }
2705 
2706 static struct pernet_operations tcp4_net_ops = {
2707 	.init = tcp4_proc_init_net,
2708 	.exit = tcp4_proc_exit_net,
2709 };
2710 
2711 int __init tcp4_proc_init(void)
2712 {
2713 	return register_pernet_subsys(&tcp4_net_ops);
2714 }
2715 
2716 void tcp4_proc_exit(void)
2717 {
2718 	unregister_pernet_subsys(&tcp4_net_ops);
2719 }
2720 #endif /* CONFIG_PROC_FS */
2721 
2722 struct proto tcp_prot = {
2723 	.name			= "TCP",
2724 	.owner			= THIS_MODULE,
2725 	.close			= tcp_close,
2726 	.connect		= tcp_v4_connect,
2727 	.disconnect		= tcp_disconnect,
2728 	.accept			= inet_csk_accept,
2729 	.ioctl			= tcp_ioctl,
2730 	.init			= tcp_v4_init_sock,
2731 	.destroy		= tcp_v4_destroy_sock,
2732 	.shutdown		= tcp_shutdown,
2733 	.setsockopt		= tcp_setsockopt,
2734 	.getsockopt		= tcp_getsockopt,
2735 	.recvmsg		= tcp_recvmsg,
2736 	.sendmsg		= tcp_sendmsg,
2737 	.sendpage		= tcp_sendpage,
2738 	.backlog_rcv		= tcp_v4_do_rcv,
2739 	.release_cb		= tcp_release_cb,
2740 	.mtu_reduced		= tcp_v4_mtu_reduced,
2741 	.hash			= inet_hash,
2742 	.unhash			= inet_unhash,
2743 	.get_port		= inet_csk_get_port,
2744 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2745 	.stream_memory_free	= tcp_stream_memory_free,
2746 	.sockets_allocated	= &tcp_sockets_allocated,
2747 	.orphan_count		= &tcp_orphan_count,
2748 	.memory_allocated	= &tcp_memory_allocated,
2749 	.memory_pressure	= &tcp_memory_pressure,
2750 	.sysctl_mem		= sysctl_tcp_mem,
2751 	.sysctl_wmem		= sysctl_tcp_wmem,
2752 	.sysctl_rmem		= sysctl_tcp_rmem,
2753 	.max_header		= MAX_TCP_HEADER,
2754 	.obj_size		= sizeof(struct tcp_sock),
2755 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2756 	.twsk_prot		= &tcp_timewait_sock_ops,
2757 	.rsk_prot		= &tcp_request_sock_ops,
2758 	.h.hashinfo		= &tcp_hashinfo,
2759 	.no_autobind		= true,
2760 #ifdef CONFIG_COMPAT
2761 	.compat_setsockopt	= compat_tcp_setsockopt,
2762 	.compat_getsockopt	= compat_tcp_getsockopt,
2763 #endif
2764 #ifdef CONFIG_MEMCG_KMEM
2765 	.init_cgroup		= tcp_init_cgroup,
2766 	.destroy_cgroup		= tcp_destroy_cgroup,
2767 	.proto_cgroup		= tcp_proto_cgroup,
2768 #endif
2769 };
2770 EXPORT_SYMBOL(tcp_prot);
2771 
2772 static int __net_init tcp_sk_init(struct net *net)
2773 {
2774 	net->ipv4.sysctl_tcp_ecn = 2;
2775 	return 0;
2776 }
2777 
2778 static void __net_exit tcp_sk_exit(struct net *net)
2779 {
2780 }
2781 
2782 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2783 {
2784 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2785 }
2786 
2787 static struct pernet_operations __net_initdata tcp_sk_ops = {
2788        .init	   = tcp_sk_init,
2789        .exit	   = tcp_sk_exit,
2790        .exit_batch = tcp_sk_exit_batch,
2791 };
2792 
2793 void __init tcp_v4_init(void)
2794 {
2795 	inet_hashinfo_init(&tcp_hashinfo);
2796 	if (register_pernet_subsys(&tcp_sk_ops))
2797 		panic("Failed to create the TCP control socket.\n");
2798 }
2799