xref: /openbmc/linux/net/ipv4/tcp_ipv4.c (revision 4f693b55c3d2d2239b8a0094b518a1e533cf75d5)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
77 
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
84 
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
87 
88 #include <trace/events/tcp.h>
89 
90 #ifdef CONFIG_TCP_MD5SIG
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
92 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
93 #endif
94 
95 struct inet_hashinfo tcp_hashinfo;
96 EXPORT_SYMBOL(tcp_hashinfo);
97 
98 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
99 {
100 	return secure_tcp_seq(ip_hdr(skb)->daddr,
101 			      ip_hdr(skb)->saddr,
102 			      tcp_hdr(skb)->dest,
103 			      tcp_hdr(skb)->source);
104 }
105 
106 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
107 {
108 	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
109 }
110 
111 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112 {
113 	const struct inet_timewait_sock *tw = inet_twsk(sktw);
114 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 	struct tcp_sock *tp = tcp_sk(sk);
116 	int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
117 
118 	if (reuse == 2) {
119 		/* Still does not detect *everything* that goes through
120 		 * lo, since we require a loopback src or dst address
121 		 * or direct binding to 'lo' interface.
122 		 */
123 		bool loopback = false;
124 		if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
125 			loopback = true;
126 #if IS_ENABLED(CONFIG_IPV6)
127 		if (tw->tw_family == AF_INET6) {
128 			if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
129 			    (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
130 			     (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
131 			    ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
132 			    (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
133 			     (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
134 				loopback = true;
135 		} else
136 #endif
137 		{
138 			if (ipv4_is_loopback(tw->tw_daddr) ||
139 			    ipv4_is_loopback(tw->tw_rcv_saddr))
140 				loopback = true;
141 		}
142 		if (!loopback)
143 			reuse = 0;
144 	}
145 
146 	/* With PAWS, it is safe from the viewpoint
147 	   of data integrity. Even without PAWS it is safe provided sequence
148 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
149 
150 	   Actually, the idea is close to VJ's one, only timestamp cache is
151 	   held not per host, but per port pair and TW bucket is used as state
152 	   holder.
153 
154 	   If TW bucket has been already destroyed we fall back to VJ's scheme
155 	   and use initial timestamp retrieved from peer table.
156 	 */
157 	if (tcptw->tw_ts_recent_stamp &&
158 	    (!twp || (reuse && time_after32(ktime_get_seconds(),
159 					    tcptw->tw_ts_recent_stamp)))) {
160 		/* In case of repair and re-using TIME-WAIT sockets we still
161 		 * want to be sure that it is safe as above but honor the
162 		 * sequence numbers and time stamps set as part of the repair
163 		 * process.
164 		 *
165 		 * Without this check re-using a TIME-WAIT socket with TCP
166 		 * repair would accumulate a -1 on the repair assigned
167 		 * sequence number. The first time it is reused the sequence
168 		 * is -1, the second time -2, etc. This fixes that issue
169 		 * without appearing to create any others.
170 		 */
171 		if (likely(!tp->repair)) {
172 			tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
173 			if (tp->write_seq == 0)
174 				tp->write_seq = 1;
175 			tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
176 			tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
177 		}
178 		sock_hold(sktw);
179 		return 1;
180 	}
181 
182 	return 0;
183 }
184 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
185 
186 static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
187 			      int addr_len)
188 {
189 	/* This check is replicated from tcp_v4_connect() and intended to
190 	 * prevent BPF program called below from accessing bytes that are out
191 	 * of the bound specified by user in addr_len.
192 	 */
193 	if (addr_len < sizeof(struct sockaddr_in))
194 		return -EINVAL;
195 
196 	sock_owned_by_me(sk);
197 
198 	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
199 }
200 
201 /* This will initiate an outgoing connection. */
202 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
203 {
204 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
205 	struct inet_sock *inet = inet_sk(sk);
206 	struct tcp_sock *tp = tcp_sk(sk);
207 	__be16 orig_sport, orig_dport;
208 	__be32 daddr, nexthop;
209 	struct flowi4 *fl4;
210 	struct rtable *rt;
211 	int err;
212 	struct ip_options_rcu *inet_opt;
213 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
214 
215 	if (addr_len < sizeof(struct sockaddr_in))
216 		return -EINVAL;
217 
218 	if (usin->sin_family != AF_INET)
219 		return -EAFNOSUPPORT;
220 
221 	nexthop = daddr = usin->sin_addr.s_addr;
222 	inet_opt = rcu_dereference_protected(inet->inet_opt,
223 					     lockdep_sock_is_held(sk));
224 	if (inet_opt && inet_opt->opt.srr) {
225 		if (!daddr)
226 			return -EINVAL;
227 		nexthop = inet_opt->opt.faddr;
228 	}
229 
230 	orig_sport = inet->inet_sport;
231 	orig_dport = usin->sin_port;
232 	fl4 = &inet->cork.fl.u.ip4;
233 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
234 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
235 			      IPPROTO_TCP,
236 			      orig_sport, orig_dport, sk);
237 	if (IS_ERR(rt)) {
238 		err = PTR_ERR(rt);
239 		if (err == -ENETUNREACH)
240 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
241 		return err;
242 	}
243 
244 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
245 		ip_rt_put(rt);
246 		return -ENETUNREACH;
247 	}
248 
249 	if (!inet_opt || !inet_opt->opt.srr)
250 		daddr = fl4->daddr;
251 
252 	if (!inet->inet_saddr)
253 		inet->inet_saddr = fl4->saddr;
254 	sk_rcv_saddr_set(sk, inet->inet_saddr);
255 
256 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
257 		/* Reset inherited state */
258 		tp->rx_opt.ts_recent	   = 0;
259 		tp->rx_opt.ts_recent_stamp = 0;
260 		if (likely(!tp->repair))
261 			tp->write_seq	   = 0;
262 	}
263 
264 	inet->inet_dport = usin->sin_port;
265 	sk_daddr_set(sk, daddr);
266 
267 	inet_csk(sk)->icsk_ext_hdr_len = 0;
268 	if (inet_opt)
269 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
270 
271 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
272 
273 	/* Socket identity is still unknown (sport may be zero).
274 	 * However we set state to SYN-SENT and not releasing socket
275 	 * lock select source port, enter ourselves into the hash tables and
276 	 * complete initialization after this.
277 	 */
278 	tcp_set_state(sk, TCP_SYN_SENT);
279 	err = inet_hash_connect(tcp_death_row, sk);
280 	if (err)
281 		goto failure;
282 
283 	sk_set_txhash(sk);
284 
285 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
286 			       inet->inet_sport, inet->inet_dport, sk);
287 	if (IS_ERR(rt)) {
288 		err = PTR_ERR(rt);
289 		rt = NULL;
290 		goto failure;
291 	}
292 	/* OK, now commit destination to socket.  */
293 	sk->sk_gso_type = SKB_GSO_TCPV4;
294 	sk_setup_caps(sk, &rt->dst);
295 	rt = NULL;
296 
297 	if (likely(!tp->repair)) {
298 		if (!tp->write_seq)
299 			tp->write_seq = secure_tcp_seq(inet->inet_saddr,
300 						       inet->inet_daddr,
301 						       inet->inet_sport,
302 						       usin->sin_port);
303 		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
304 						 inet->inet_saddr,
305 						 inet->inet_daddr);
306 	}
307 
308 	inet->inet_id = tp->write_seq ^ jiffies;
309 
310 	if (tcp_fastopen_defer_connect(sk, &err))
311 		return err;
312 	if (err)
313 		goto failure;
314 
315 	err = tcp_connect(sk);
316 
317 	if (err)
318 		goto failure;
319 
320 	return 0;
321 
322 failure:
323 	/*
324 	 * This unhashes the socket and releases the local port,
325 	 * if necessary.
326 	 */
327 	tcp_set_state(sk, TCP_CLOSE);
328 	ip_rt_put(rt);
329 	sk->sk_route_caps = 0;
330 	inet->inet_dport = 0;
331 	return err;
332 }
333 EXPORT_SYMBOL(tcp_v4_connect);
334 
335 /*
336  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
337  * It can be called through tcp_release_cb() if socket was owned by user
338  * at the time tcp_v4_err() was called to handle ICMP message.
339  */
340 void tcp_v4_mtu_reduced(struct sock *sk)
341 {
342 	struct inet_sock *inet = inet_sk(sk);
343 	struct dst_entry *dst;
344 	u32 mtu;
345 
346 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
347 		return;
348 	mtu = tcp_sk(sk)->mtu_info;
349 	dst = inet_csk_update_pmtu(sk, mtu);
350 	if (!dst)
351 		return;
352 
353 	/* Something is about to be wrong... Remember soft error
354 	 * for the case, if this connection will not able to recover.
355 	 */
356 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
357 		sk->sk_err_soft = EMSGSIZE;
358 
359 	mtu = dst_mtu(dst);
360 
361 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
362 	    ip_sk_accept_pmtu(sk) &&
363 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
364 		tcp_sync_mss(sk, mtu);
365 
366 		/* Resend the TCP packet because it's
367 		 * clear that the old packet has been
368 		 * dropped. This is the new "fast" path mtu
369 		 * discovery.
370 		 */
371 		tcp_simple_retransmit(sk);
372 	} /* else let the usual retransmit timer handle it */
373 }
374 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
375 
376 static void do_redirect(struct sk_buff *skb, struct sock *sk)
377 {
378 	struct dst_entry *dst = __sk_dst_check(sk, 0);
379 
380 	if (dst)
381 		dst->ops->redirect(dst, sk, skb);
382 }
383 
384 
385 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
386 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
387 {
388 	struct request_sock *req = inet_reqsk(sk);
389 	struct net *net = sock_net(sk);
390 
391 	/* ICMPs are not backlogged, hence we cannot get
392 	 * an established socket here.
393 	 */
394 	if (seq != tcp_rsk(req)->snt_isn) {
395 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
396 	} else if (abort) {
397 		/*
398 		 * Still in SYN_RECV, just remove it silently.
399 		 * There is no good way to pass the error to the newly
400 		 * created socket, and POSIX does not want network
401 		 * errors returned from accept().
402 		 */
403 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
404 		tcp_listendrop(req->rsk_listener);
405 	}
406 	reqsk_put(req);
407 }
408 EXPORT_SYMBOL(tcp_req_err);
409 
410 /*
411  * This routine is called by the ICMP module when it gets some
412  * sort of error condition.  If err < 0 then the socket should
413  * be closed and the error returned to the user.  If err > 0
414  * it's just the icmp type << 8 | icmp code.  After adjustment
415  * header points to the first 8 bytes of the tcp header.  We need
416  * to find the appropriate port.
417  *
418  * The locking strategy used here is very "optimistic". When
419  * someone else accesses the socket the ICMP is just dropped
420  * and for some paths there is no check at all.
421  * A more general error queue to queue errors for later handling
422  * is probably better.
423  *
424  */
425 
426 int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
427 {
428 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
429 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
430 	struct inet_connection_sock *icsk;
431 	struct tcp_sock *tp;
432 	struct inet_sock *inet;
433 	const int type = icmp_hdr(icmp_skb)->type;
434 	const int code = icmp_hdr(icmp_skb)->code;
435 	struct sock *sk;
436 	struct sk_buff *skb;
437 	struct request_sock *fastopen;
438 	u32 seq, snd_una;
439 	s32 remaining;
440 	u32 delta_us;
441 	int err;
442 	struct net *net = dev_net(icmp_skb->dev);
443 
444 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
445 				       th->dest, iph->saddr, ntohs(th->source),
446 				       inet_iif(icmp_skb), 0);
447 	if (!sk) {
448 		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
449 		return -ENOENT;
450 	}
451 	if (sk->sk_state == TCP_TIME_WAIT) {
452 		inet_twsk_put(inet_twsk(sk));
453 		return 0;
454 	}
455 	seq = ntohl(th->seq);
456 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
457 		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
458 				     type == ICMP_TIME_EXCEEDED ||
459 				     (type == ICMP_DEST_UNREACH &&
460 				      (code == ICMP_NET_UNREACH ||
461 				       code == ICMP_HOST_UNREACH)));
462 		return 0;
463 	}
464 
465 	bh_lock_sock(sk);
466 	/* If too many ICMPs get dropped on busy
467 	 * servers this needs to be solved differently.
468 	 * We do take care of PMTU discovery (RFC1191) special case :
469 	 * we can receive locally generated ICMP messages while socket is held.
470 	 */
471 	if (sock_owned_by_user(sk)) {
472 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
473 			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
474 	}
475 	if (sk->sk_state == TCP_CLOSE)
476 		goto out;
477 
478 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
479 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
480 		goto out;
481 	}
482 
483 	icsk = inet_csk(sk);
484 	tp = tcp_sk(sk);
485 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
486 	fastopen = tp->fastopen_rsk;
487 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
488 	if (sk->sk_state != TCP_LISTEN &&
489 	    !between(seq, snd_una, tp->snd_nxt)) {
490 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
491 		goto out;
492 	}
493 
494 	switch (type) {
495 	case ICMP_REDIRECT:
496 		if (!sock_owned_by_user(sk))
497 			do_redirect(icmp_skb, sk);
498 		goto out;
499 	case ICMP_SOURCE_QUENCH:
500 		/* Just silently ignore these. */
501 		goto out;
502 	case ICMP_PARAMETERPROB:
503 		err = EPROTO;
504 		break;
505 	case ICMP_DEST_UNREACH:
506 		if (code > NR_ICMP_UNREACH)
507 			goto out;
508 
509 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
510 			/* We are not interested in TCP_LISTEN and open_requests
511 			 * (SYN-ACKs send out by Linux are always <576bytes so
512 			 * they should go through unfragmented).
513 			 */
514 			if (sk->sk_state == TCP_LISTEN)
515 				goto out;
516 
517 			tp->mtu_info = info;
518 			if (!sock_owned_by_user(sk)) {
519 				tcp_v4_mtu_reduced(sk);
520 			} else {
521 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
522 					sock_hold(sk);
523 			}
524 			goto out;
525 		}
526 
527 		err = icmp_err_convert[code].errno;
528 		/* check if icmp_skb allows revert of backoff
529 		 * (see draft-zimmermann-tcp-lcd) */
530 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
531 			break;
532 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
533 		    !icsk->icsk_backoff || fastopen)
534 			break;
535 
536 		if (sock_owned_by_user(sk))
537 			break;
538 
539 		icsk->icsk_backoff--;
540 		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
541 					       TCP_TIMEOUT_INIT;
542 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
543 
544 		skb = tcp_rtx_queue_head(sk);
545 
546 		tcp_mstamp_refresh(tp);
547 		delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
548 		remaining = icsk->icsk_rto -
549 			    usecs_to_jiffies(delta_us);
550 
551 		if (remaining > 0) {
552 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
553 						  remaining, TCP_RTO_MAX);
554 		} else {
555 			/* RTO revert clocked out retransmission.
556 			 * Will retransmit now */
557 			tcp_retransmit_timer(sk);
558 		}
559 
560 		break;
561 	case ICMP_TIME_EXCEEDED:
562 		err = EHOSTUNREACH;
563 		break;
564 	default:
565 		goto out;
566 	}
567 
568 	switch (sk->sk_state) {
569 	case TCP_SYN_SENT:
570 	case TCP_SYN_RECV:
571 		/* Only in fast or simultaneous open. If a fast open socket is
572 		 * is already accepted it is treated as a connected one below.
573 		 */
574 		if (fastopen && !fastopen->sk)
575 			break;
576 
577 		if (!sock_owned_by_user(sk)) {
578 			sk->sk_err = err;
579 
580 			sk->sk_error_report(sk);
581 
582 			tcp_done(sk);
583 		} else {
584 			sk->sk_err_soft = err;
585 		}
586 		goto out;
587 	}
588 
589 	/* If we've already connected we will keep trying
590 	 * until we time out, or the user gives up.
591 	 *
592 	 * rfc1122 4.2.3.9 allows to consider as hard errors
593 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
594 	 * but it is obsoleted by pmtu discovery).
595 	 *
596 	 * Note, that in modern internet, where routing is unreliable
597 	 * and in each dark corner broken firewalls sit, sending random
598 	 * errors ordered by their masters even this two messages finally lose
599 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
600 	 *
601 	 * Now we are in compliance with RFCs.
602 	 *							--ANK (980905)
603 	 */
604 
605 	inet = inet_sk(sk);
606 	if (!sock_owned_by_user(sk) && inet->recverr) {
607 		sk->sk_err = err;
608 		sk->sk_error_report(sk);
609 	} else	{ /* Only an error on timeout */
610 		sk->sk_err_soft = err;
611 	}
612 
613 out:
614 	bh_unlock_sock(sk);
615 	sock_put(sk);
616 	return 0;
617 }
618 
619 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
620 {
621 	struct tcphdr *th = tcp_hdr(skb);
622 
623 	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
624 	skb->csum_start = skb_transport_header(skb) - skb->head;
625 	skb->csum_offset = offsetof(struct tcphdr, check);
626 }
627 
628 /* This routine computes an IPv4 TCP checksum. */
629 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
630 {
631 	const struct inet_sock *inet = inet_sk(sk);
632 
633 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
634 }
635 EXPORT_SYMBOL(tcp_v4_send_check);
636 
637 /*
638  *	This routine will send an RST to the other tcp.
639  *
640  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
641  *		      for reset.
642  *	Answer: if a packet caused RST, it is not for a socket
643  *		existing in our system, if it is matched to a socket,
644  *		it is just duplicate segment or bug in other side's TCP.
645  *		So that we build reply only basing on parameters
646  *		arrived with segment.
647  *	Exception: precedence violation. We do not implement it in any case.
648  */
649 
650 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
651 {
652 	const struct tcphdr *th = tcp_hdr(skb);
653 	struct {
654 		struct tcphdr th;
655 #ifdef CONFIG_TCP_MD5SIG
656 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
657 #endif
658 	} rep;
659 	struct ip_reply_arg arg;
660 #ifdef CONFIG_TCP_MD5SIG
661 	struct tcp_md5sig_key *key = NULL;
662 	const __u8 *hash_location = NULL;
663 	unsigned char newhash[16];
664 	int genhash;
665 	struct sock *sk1 = NULL;
666 #endif
667 	struct net *net;
668 	struct sock *ctl_sk;
669 
670 	/* Never send a reset in response to a reset. */
671 	if (th->rst)
672 		return;
673 
674 	/* If sk not NULL, it means we did a successful lookup and incoming
675 	 * route had to be correct. prequeue might have dropped our dst.
676 	 */
677 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
678 		return;
679 
680 	/* Swap the send and the receive. */
681 	memset(&rep, 0, sizeof(rep));
682 	rep.th.dest   = th->source;
683 	rep.th.source = th->dest;
684 	rep.th.doff   = sizeof(struct tcphdr) / 4;
685 	rep.th.rst    = 1;
686 
687 	if (th->ack) {
688 		rep.th.seq = th->ack_seq;
689 	} else {
690 		rep.th.ack = 1;
691 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
692 				       skb->len - (th->doff << 2));
693 	}
694 
695 	memset(&arg, 0, sizeof(arg));
696 	arg.iov[0].iov_base = (unsigned char *)&rep;
697 	arg.iov[0].iov_len  = sizeof(rep.th);
698 
699 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
700 #ifdef CONFIG_TCP_MD5SIG
701 	rcu_read_lock();
702 	hash_location = tcp_parse_md5sig_option(th);
703 	if (sk && sk_fullsock(sk)) {
704 		key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
705 					&ip_hdr(skb)->saddr, AF_INET);
706 	} else if (hash_location) {
707 		/*
708 		 * active side is lost. Try to find listening socket through
709 		 * source port, and then find md5 key through listening socket.
710 		 * we are not loose security here:
711 		 * Incoming packet is checked with md5 hash with finding key,
712 		 * no RST generated if md5 hash doesn't match.
713 		 */
714 		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
715 					     ip_hdr(skb)->saddr,
716 					     th->source, ip_hdr(skb)->daddr,
717 					     ntohs(th->source), inet_iif(skb),
718 					     tcp_v4_sdif(skb));
719 		/* don't send rst if it can't find key */
720 		if (!sk1)
721 			goto out;
722 
723 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
724 					&ip_hdr(skb)->saddr, AF_INET);
725 		if (!key)
726 			goto out;
727 
728 
729 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
730 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
731 			goto out;
732 
733 	}
734 
735 	if (key) {
736 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
737 				   (TCPOPT_NOP << 16) |
738 				   (TCPOPT_MD5SIG << 8) |
739 				   TCPOLEN_MD5SIG);
740 		/* Update length and the length the header thinks exists */
741 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
742 		rep.th.doff = arg.iov[0].iov_len / 4;
743 
744 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
745 				     key, ip_hdr(skb)->saddr,
746 				     ip_hdr(skb)->daddr, &rep.th);
747 	}
748 #endif
749 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
750 				      ip_hdr(skb)->saddr, /* XXX */
751 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
752 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
753 	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
754 
755 	/* When socket is gone, all binding information is lost.
756 	 * routing might fail in this case. No choice here, if we choose to force
757 	 * input interface, we will misroute in case of asymmetric route.
758 	 */
759 	if (sk) {
760 		arg.bound_dev_if = sk->sk_bound_dev_if;
761 		if (sk_fullsock(sk))
762 			trace_tcp_send_reset(sk, skb);
763 	}
764 
765 	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
766 		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
767 
768 	arg.tos = ip_hdr(skb)->tos;
769 	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
770 	local_bh_disable();
771 	ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
772 	if (sk)
773 		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
774 				   inet_twsk(sk)->tw_mark : sk->sk_mark;
775 	ip_send_unicast_reply(ctl_sk,
776 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
777 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
778 			      &arg, arg.iov[0].iov_len);
779 
780 	ctl_sk->sk_mark = 0;
781 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
782 	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
783 	local_bh_enable();
784 
785 #ifdef CONFIG_TCP_MD5SIG
786 out:
787 	rcu_read_unlock();
788 #endif
789 }
790 
791 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
792    outside socket context is ugly, certainly. What can I do?
793  */
794 
795 static void tcp_v4_send_ack(const struct sock *sk,
796 			    struct sk_buff *skb, u32 seq, u32 ack,
797 			    u32 win, u32 tsval, u32 tsecr, int oif,
798 			    struct tcp_md5sig_key *key,
799 			    int reply_flags, u8 tos)
800 {
801 	const struct tcphdr *th = tcp_hdr(skb);
802 	struct {
803 		struct tcphdr th;
804 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
805 #ifdef CONFIG_TCP_MD5SIG
806 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
807 #endif
808 			];
809 	} rep;
810 	struct net *net = sock_net(sk);
811 	struct ip_reply_arg arg;
812 	struct sock *ctl_sk;
813 
814 	memset(&rep.th, 0, sizeof(struct tcphdr));
815 	memset(&arg, 0, sizeof(arg));
816 
817 	arg.iov[0].iov_base = (unsigned char *)&rep;
818 	arg.iov[0].iov_len  = sizeof(rep.th);
819 	if (tsecr) {
820 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
821 				   (TCPOPT_TIMESTAMP << 8) |
822 				   TCPOLEN_TIMESTAMP);
823 		rep.opt[1] = htonl(tsval);
824 		rep.opt[2] = htonl(tsecr);
825 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
826 	}
827 
828 	/* Swap the send and the receive. */
829 	rep.th.dest    = th->source;
830 	rep.th.source  = th->dest;
831 	rep.th.doff    = arg.iov[0].iov_len / 4;
832 	rep.th.seq     = htonl(seq);
833 	rep.th.ack_seq = htonl(ack);
834 	rep.th.ack     = 1;
835 	rep.th.window  = htons(win);
836 
837 #ifdef CONFIG_TCP_MD5SIG
838 	if (key) {
839 		int offset = (tsecr) ? 3 : 0;
840 
841 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
842 					  (TCPOPT_NOP << 16) |
843 					  (TCPOPT_MD5SIG << 8) |
844 					  TCPOLEN_MD5SIG);
845 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
846 		rep.th.doff = arg.iov[0].iov_len/4;
847 
848 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
849 				    key, ip_hdr(skb)->saddr,
850 				    ip_hdr(skb)->daddr, &rep.th);
851 	}
852 #endif
853 	arg.flags = reply_flags;
854 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
855 				      ip_hdr(skb)->saddr, /* XXX */
856 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
857 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
858 	if (oif)
859 		arg.bound_dev_if = oif;
860 	arg.tos = tos;
861 	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
862 	local_bh_disable();
863 	ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
864 	if (sk)
865 		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
866 				   inet_twsk(sk)->tw_mark : sk->sk_mark;
867 	ip_send_unicast_reply(ctl_sk,
868 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
869 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
870 			      &arg, arg.iov[0].iov_len);
871 
872 	ctl_sk->sk_mark = 0;
873 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
874 	local_bh_enable();
875 }
876 
877 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
878 {
879 	struct inet_timewait_sock *tw = inet_twsk(sk);
880 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
881 
882 	tcp_v4_send_ack(sk, skb,
883 			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
884 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
885 			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
886 			tcptw->tw_ts_recent,
887 			tw->tw_bound_dev_if,
888 			tcp_twsk_md5_key(tcptw),
889 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
890 			tw->tw_tos
891 			);
892 
893 	inet_twsk_put(tw);
894 }
895 
896 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
897 				  struct request_sock *req)
898 {
899 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
900 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
901 	 */
902 	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
903 					     tcp_sk(sk)->snd_nxt;
904 
905 	/* RFC 7323 2.3
906 	 * The window field (SEG.WND) of every outgoing segment, with the
907 	 * exception of <SYN> segments, MUST be right-shifted by
908 	 * Rcv.Wind.Shift bits:
909 	 */
910 	tcp_v4_send_ack(sk, skb, seq,
911 			tcp_rsk(req)->rcv_nxt,
912 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
913 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
914 			req->ts_recent,
915 			0,
916 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
917 					  AF_INET),
918 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
919 			ip_hdr(skb)->tos);
920 }
921 
922 /*
923  *	Send a SYN-ACK after having received a SYN.
924  *	This still operates on a request_sock only, not on a big
925  *	socket.
926  */
927 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
928 			      struct flowi *fl,
929 			      struct request_sock *req,
930 			      struct tcp_fastopen_cookie *foc,
931 			      enum tcp_synack_type synack_type)
932 {
933 	const struct inet_request_sock *ireq = inet_rsk(req);
934 	struct flowi4 fl4;
935 	int err = -1;
936 	struct sk_buff *skb;
937 
938 	/* First, grab a route. */
939 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
940 		return -1;
941 
942 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
943 
944 	if (skb) {
945 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
946 
947 		rcu_read_lock();
948 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
949 					    ireq->ir_rmt_addr,
950 					    rcu_dereference(ireq->ireq_opt));
951 		rcu_read_unlock();
952 		err = net_xmit_eval(err);
953 	}
954 
955 	return err;
956 }
957 
958 /*
959  *	IPv4 request_sock destructor.
960  */
961 static void tcp_v4_reqsk_destructor(struct request_sock *req)
962 {
963 	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
964 }
965 
966 #ifdef CONFIG_TCP_MD5SIG
967 /*
968  * RFC2385 MD5 checksumming requires a mapping of
969  * IP address->MD5 Key.
970  * We need to maintain these in the sk structure.
971  */
972 
973 /* Find the Key structure for an address.  */
974 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
975 					 const union tcp_md5_addr *addr,
976 					 int family)
977 {
978 	const struct tcp_sock *tp = tcp_sk(sk);
979 	struct tcp_md5sig_key *key;
980 	const struct tcp_md5sig_info *md5sig;
981 	__be32 mask;
982 	struct tcp_md5sig_key *best_match = NULL;
983 	bool match;
984 
985 	/* caller either holds rcu_read_lock() or socket lock */
986 	md5sig = rcu_dereference_check(tp->md5sig_info,
987 				       lockdep_sock_is_held(sk));
988 	if (!md5sig)
989 		return NULL;
990 
991 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
992 		if (key->family != family)
993 			continue;
994 
995 		if (family == AF_INET) {
996 			mask = inet_make_mask(key->prefixlen);
997 			match = (key->addr.a4.s_addr & mask) ==
998 				(addr->a4.s_addr & mask);
999 #if IS_ENABLED(CONFIG_IPV6)
1000 		} else if (family == AF_INET6) {
1001 			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1002 						  key->prefixlen);
1003 #endif
1004 		} else {
1005 			match = false;
1006 		}
1007 
1008 		if (match && (!best_match ||
1009 			      key->prefixlen > best_match->prefixlen))
1010 			best_match = key;
1011 	}
1012 	return best_match;
1013 }
1014 EXPORT_SYMBOL(tcp_md5_do_lookup);
1015 
1016 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1017 						      const union tcp_md5_addr *addr,
1018 						      int family, u8 prefixlen)
1019 {
1020 	const struct tcp_sock *tp = tcp_sk(sk);
1021 	struct tcp_md5sig_key *key;
1022 	unsigned int size = sizeof(struct in_addr);
1023 	const struct tcp_md5sig_info *md5sig;
1024 
1025 	/* caller either holds rcu_read_lock() or socket lock */
1026 	md5sig = rcu_dereference_check(tp->md5sig_info,
1027 				       lockdep_sock_is_held(sk));
1028 	if (!md5sig)
1029 		return NULL;
1030 #if IS_ENABLED(CONFIG_IPV6)
1031 	if (family == AF_INET6)
1032 		size = sizeof(struct in6_addr);
1033 #endif
1034 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1035 		if (key->family != family)
1036 			continue;
1037 		if (!memcmp(&key->addr, addr, size) &&
1038 		    key->prefixlen == prefixlen)
1039 			return key;
1040 	}
1041 	return NULL;
1042 }
1043 
1044 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1045 					 const struct sock *addr_sk)
1046 {
1047 	const union tcp_md5_addr *addr;
1048 
1049 	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1050 	return tcp_md5_do_lookup(sk, addr, AF_INET);
1051 }
1052 EXPORT_SYMBOL(tcp_v4_md5_lookup);
1053 
1054 /* This can be called on a newly created socket, from other files */
1055 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1056 		   int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1057 		   gfp_t gfp)
1058 {
1059 	/* Add Key to the list */
1060 	struct tcp_md5sig_key *key;
1061 	struct tcp_sock *tp = tcp_sk(sk);
1062 	struct tcp_md5sig_info *md5sig;
1063 
1064 	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1065 	if (key) {
1066 		/* Pre-existing entry - just update that one. */
1067 		memcpy(key->key, newkey, newkeylen);
1068 		key->keylen = newkeylen;
1069 		return 0;
1070 	}
1071 
1072 	md5sig = rcu_dereference_protected(tp->md5sig_info,
1073 					   lockdep_sock_is_held(sk));
1074 	if (!md5sig) {
1075 		md5sig = kmalloc(sizeof(*md5sig), gfp);
1076 		if (!md5sig)
1077 			return -ENOMEM;
1078 
1079 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1080 		INIT_HLIST_HEAD(&md5sig->head);
1081 		rcu_assign_pointer(tp->md5sig_info, md5sig);
1082 	}
1083 
1084 	key = sock_kmalloc(sk, sizeof(*key), gfp);
1085 	if (!key)
1086 		return -ENOMEM;
1087 	if (!tcp_alloc_md5sig_pool()) {
1088 		sock_kfree_s(sk, key, sizeof(*key));
1089 		return -ENOMEM;
1090 	}
1091 
1092 	memcpy(key->key, newkey, newkeylen);
1093 	key->keylen = newkeylen;
1094 	key->family = family;
1095 	key->prefixlen = prefixlen;
1096 	memcpy(&key->addr, addr,
1097 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1098 				      sizeof(struct in_addr));
1099 	hlist_add_head_rcu(&key->node, &md5sig->head);
1100 	return 0;
1101 }
1102 EXPORT_SYMBOL(tcp_md5_do_add);
1103 
1104 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1105 		   u8 prefixlen)
1106 {
1107 	struct tcp_md5sig_key *key;
1108 
1109 	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1110 	if (!key)
1111 		return -ENOENT;
1112 	hlist_del_rcu(&key->node);
1113 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1114 	kfree_rcu(key, rcu);
1115 	return 0;
1116 }
1117 EXPORT_SYMBOL(tcp_md5_do_del);
1118 
1119 static void tcp_clear_md5_list(struct sock *sk)
1120 {
1121 	struct tcp_sock *tp = tcp_sk(sk);
1122 	struct tcp_md5sig_key *key;
1123 	struct hlist_node *n;
1124 	struct tcp_md5sig_info *md5sig;
1125 
1126 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1127 
1128 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1129 		hlist_del_rcu(&key->node);
1130 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1131 		kfree_rcu(key, rcu);
1132 	}
1133 }
1134 
1135 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1136 				 char __user *optval, int optlen)
1137 {
1138 	struct tcp_md5sig cmd;
1139 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1140 	u8 prefixlen = 32;
1141 
1142 	if (optlen < sizeof(cmd))
1143 		return -EINVAL;
1144 
1145 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1146 		return -EFAULT;
1147 
1148 	if (sin->sin_family != AF_INET)
1149 		return -EINVAL;
1150 
1151 	if (optname == TCP_MD5SIG_EXT &&
1152 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1153 		prefixlen = cmd.tcpm_prefixlen;
1154 		if (prefixlen > 32)
1155 			return -EINVAL;
1156 	}
1157 
1158 	if (!cmd.tcpm_keylen)
1159 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1160 				      AF_INET, prefixlen);
1161 
1162 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1163 		return -EINVAL;
1164 
1165 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1166 			      AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1167 			      GFP_KERNEL);
1168 }
1169 
1170 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1171 				   __be32 daddr, __be32 saddr,
1172 				   const struct tcphdr *th, int nbytes)
1173 {
1174 	struct tcp4_pseudohdr *bp;
1175 	struct scatterlist sg;
1176 	struct tcphdr *_th;
1177 
1178 	bp = hp->scratch;
1179 	bp->saddr = saddr;
1180 	bp->daddr = daddr;
1181 	bp->pad = 0;
1182 	bp->protocol = IPPROTO_TCP;
1183 	bp->len = cpu_to_be16(nbytes);
1184 
1185 	_th = (struct tcphdr *)(bp + 1);
1186 	memcpy(_th, th, sizeof(*th));
1187 	_th->check = 0;
1188 
1189 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1190 	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1191 				sizeof(*bp) + sizeof(*th));
1192 	return crypto_ahash_update(hp->md5_req);
1193 }
1194 
1195 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1196 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1197 {
1198 	struct tcp_md5sig_pool *hp;
1199 	struct ahash_request *req;
1200 
1201 	hp = tcp_get_md5sig_pool();
1202 	if (!hp)
1203 		goto clear_hash_noput;
1204 	req = hp->md5_req;
1205 
1206 	if (crypto_ahash_init(req))
1207 		goto clear_hash;
1208 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1209 		goto clear_hash;
1210 	if (tcp_md5_hash_key(hp, key))
1211 		goto clear_hash;
1212 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1213 	if (crypto_ahash_final(req))
1214 		goto clear_hash;
1215 
1216 	tcp_put_md5sig_pool();
1217 	return 0;
1218 
1219 clear_hash:
1220 	tcp_put_md5sig_pool();
1221 clear_hash_noput:
1222 	memset(md5_hash, 0, 16);
1223 	return 1;
1224 }
1225 
1226 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1227 			const struct sock *sk,
1228 			const struct sk_buff *skb)
1229 {
1230 	struct tcp_md5sig_pool *hp;
1231 	struct ahash_request *req;
1232 	const struct tcphdr *th = tcp_hdr(skb);
1233 	__be32 saddr, daddr;
1234 
1235 	if (sk) { /* valid for establish/request sockets */
1236 		saddr = sk->sk_rcv_saddr;
1237 		daddr = sk->sk_daddr;
1238 	} else {
1239 		const struct iphdr *iph = ip_hdr(skb);
1240 		saddr = iph->saddr;
1241 		daddr = iph->daddr;
1242 	}
1243 
1244 	hp = tcp_get_md5sig_pool();
1245 	if (!hp)
1246 		goto clear_hash_noput;
1247 	req = hp->md5_req;
1248 
1249 	if (crypto_ahash_init(req))
1250 		goto clear_hash;
1251 
1252 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1253 		goto clear_hash;
1254 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1255 		goto clear_hash;
1256 	if (tcp_md5_hash_key(hp, key))
1257 		goto clear_hash;
1258 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1259 	if (crypto_ahash_final(req))
1260 		goto clear_hash;
1261 
1262 	tcp_put_md5sig_pool();
1263 	return 0;
1264 
1265 clear_hash:
1266 	tcp_put_md5sig_pool();
1267 clear_hash_noput:
1268 	memset(md5_hash, 0, 16);
1269 	return 1;
1270 }
1271 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1272 
1273 #endif
1274 
1275 /* Called with rcu_read_lock() */
1276 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1277 				    const struct sk_buff *skb)
1278 {
1279 #ifdef CONFIG_TCP_MD5SIG
1280 	/*
1281 	 * This gets called for each TCP segment that arrives
1282 	 * so we want to be efficient.
1283 	 * We have 3 drop cases:
1284 	 * o No MD5 hash and one expected.
1285 	 * o MD5 hash and we're not expecting one.
1286 	 * o MD5 hash and its wrong.
1287 	 */
1288 	const __u8 *hash_location = NULL;
1289 	struct tcp_md5sig_key *hash_expected;
1290 	const struct iphdr *iph = ip_hdr(skb);
1291 	const struct tcphdr *th = tcp_hdr(skb);
1292 	int genhash;
1293 	unsigned char newhash[16];
1294 
1295 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1296 					  AF_INET);
1297 	hash_location = tcp_parse_md5sig_option(th);
1298 
1299 	/* We've parsed the options - do we have a hash? */
1300 	if (!hash_expected && !hash_location)
1301 		return false;
1302 
1303 	if (hash_expected && !hash_location) {
1304 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1305 		return true;
1306 	}
1307 
1308 	if (!hash_expected && hash_location) {
1309 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1310 		return true;
1311 	}
1312 
1313 	/* Okay, so this is hash_expected and hash_location -
1314 	 * so we need to calculate the checksum.
1315 	 */
1316 	genhash = tcp_v4_md5_hash_skb(newhash,
1317 				      hash_expected,
1318 				      NULL, skb);
1319 
1320 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1321 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1322 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1323 				     &iph->saddr, ntohs(th->source),
1324 				     &iph->daddr, ntohs(th->dest),
1325 				     genhash ? " tcp_v4_calc_md5_hash failed"
1326 				     : "");
1327 		return true;
1328 	}
1329 	return false;
1330 #endif
1331 	return false;
1332 }
1333 
1334 static void tcp_v4_init_req(struct request_sock *req,
1335 			    const struct sock *sk_listener,
1336 			    struct sk_buff *skb)
1337 {
1338 	struct inet_request_sock *ireq = inet_rsk(req);
1339 	struct net *net = sock_net(sk_listener);
1340 
1341 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1342 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1343 	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1344 }
1345 
1346 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1347 					  struct flowi *fl,
1348 					  const struct request_sock *req)
1349 {
1350 	return inet_csk_route_req(sk, &fl->u.ip4, req);
1351 }
1352 
1353 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1354 	.family		=	PF_INET,
1355 	.obj_size	=	sizeof(struct tcp_request_sock),
1356 	.rtx_syn_ack	=	tcp_rtx_synack,
1357 	.send_ack	=	tcp_v4_reqsk_send_ack,
1358 	.destructor	=	tcp_v4_reqsk_destructor,
1359 	.send_reset	=	tcp_v4_send_reset,
1360 	.syn_ack_timeout =	tcp_syn_ack_timeout,
1361 };
1362 
1363 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1364 	.mss_clamp	=	TCP_MSS_DEFAULT,
1365 #ifdef CONFIG_TCP_MD5SIG
1366 	.req_md5_lookup	=	tcp_v4_md5_lookup,
1367 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1368 #endif
1369 	.init_req	=	tcp_v4_init_req,
1370 #ifdef CONFIG_SYN_COOKIES
1371 	.cookie_init_seq =	cookie_v4_init_sequence,
1372 #endif
1373 	.route_req	=	tcp_v4_route_req,
1374 	.init_seq	=	tcp_v4_init_seq,
1375 	.init_ts_off	=	tcp_v4_init_ts_off,
1376 	.send_synack	=	tcp_v4_send_synack,
1377 };
1378 
1379 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1380 {
1381 	/* Never answer to SYNs send to broadcast or multicast */
1382 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1383 		goto drop;
1384 
1385 	return tcp_conn_request(&tcp_request_sock_ops,
1386 				&tcp_request_sock_ipv4_ops, sk, skb);
1387 
1388 drop:
1389 	tcp_listendrop(sk);
1390 	return 0;
1391 }
1392 EXPORT_SYMBOL(tcp_v4_conn_request);
1393 
1394 
1395 /*
1396  * The three way handshake has completed - we got a valid synack -
1397  * now create the new socket.
1398  */
1399 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1400 				  struct request_sock *req,
1401 				  struct dst_entry *dst,
1402 				  struct request_sock *req_unhash,
1403 				  bool *own_req)
1404 {
1405 	struct inet_request_sock *ireq;
1406 	struct inet_sock *newinet;
1407 	struct tcp_sock *newtp;
1408 	struct sock *newsk;
1409 #ifdef CONFIG_TCP_MD5SIG
1410 	struct tcp_md5sig_key *key;
1411 #endif
1412 	struct ip_options_rcu *inet_opt;
1413 
1414 	if (sk_acceptq_is_full(sk))
1415 		goto exit_overflow;
1416 
1417 	newsk = tcp_create_openreq_child(sk, req, skb);
1418 	if (!newsk)
1419 		goto exit_nonewsk;
1420 
1421 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1422 	inet_sk_rx_dst_set(newsk, skb);
1423 
1424 	newtp		      = tcp_sk(newsk);
1425 	newinet		      = inet_sk(newsk);
1426 	ireq		      = inet_rsk(req);
1427 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1428 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1429 	newsk->sk_bound_dev_if = ireq->ir_iif;
1430 	newinet->inet_saddr   = ireq->ir_loc_addr;
1431 	inet_opt	      = rcu_dereference(ireq->ireq_opt);
1432 	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1433 	newinet->mc_index     = inet_iif(skb);
1434 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1435 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1436 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1437 	if (inet_opt)
1438 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1439 	newinet->inet_id = newtp->write_seq ^ jiffies;
1440 
1441 	if (!dst) {
1442 		dst = inet_csk_route_child_sock(sk, newsk, req);
1443 		if (!dst)
1444 			goto put_and_exit;
1445 	} else {
1446 		/* syncookie case : see end of cookie_v4_check() */
1447 	}
1448 	sk_setup_caps(newsk, dst);
1449 
1450 	tcp_ca_openreq_child(newsk, dst);
1451 
1452 	tcp_sync_mss(newsk, dst_mtu(dst));
1453 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1454 
1455 	tcp_initialize_rcv_mss(newsk);
1456 
1457 #ifdef CONFIG_TCP_MD5SIG
1458 	/* Copy over the MD5 key from the original socket */
1459 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1460 				AF_INET);
1461 	if (key) {
1462 		/*
1463 		 * We're using one, so create a matching key
1464 		 * on the newsk structure. If we fail to get
1465 		 * memory, then we end up not copying the key
1466 		 * across. Shucks.
1467 		 */
1468 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1469 			       AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1470 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1471 	}
1472 #endif
1473 
1474 	if (__inet_inherit_port(sk, newsk) < 0)
1475 		goto put_and_exit;
1476 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1477 	if (likely(*own_req)) {
1478 		tcp_move_syn(newtp, req);
1479 		ireq->ireq_opt = NULL;
1480 	} else {
1481 		newinet->inet_opt = NULL;
1482 	}
1483 	return newsk;
1484 
1485 exit_overflow:
1486 	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1487 exit_nonewsk:
1488 	dst_release(dst);
1489 exit:
1490 	tcp_listendrop(sk);
1491 	return NULL;
1492 put_and_exit:
1493 	newinet->inet_opt = NULL;
1494 	inet_csk_prepare_forced_close(newsk);
1495 	tcp_done(newsk);
1496 	goto exit;
1497 }
1498 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1499 
1500 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1501 {
1502 #ifdef CONFIG_SYN_COOKIES
1503 	const struct tcphdr *th = tcp_hdr(skb);
1504 
1505 	if (!th->syn)
1506 		sk = cookie_v4_check(sk, skb);
1507 #endif
1508 	return sk;
1509 }
1510 
1511 /* The socket must have it's spinlock held when we get
1512  * here, unless it is a TCP_LISTEN socket.
1513  *
1514  * We have a potential double-lock case here, so even when
1515  * doing backlog processing we use the BH locking scheme.
1516  * This is because we cannot sleep with the original spinlock
1517  * held.
1518  */
1519 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1520 {
1521 	struct sock *rsk;
1522 
1523 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1524 		struct dst_entry *dst = sk->sk_rx_dst;
1525 
1526 		sock_rps_save_rxhash(sk, skb);
1527 		sk_mark_napi_id(sk, skb);
1528 		if (dst) {
1529 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1530 			    !dst->ops->check(dst, 0)) {
1531 				dst_release(dst);
1532 				sk->sk_rx_dst = NULL;
1533 			}
1534 		}
1535 		tcp_rcv_established(sk, skb);
1536 		return 0;
1537 	}
1538 
1539 	if (tcp_checksum_complete(skb))
1540 		goto csum_err;
1541 
1542 	if (sk->sk_state == TCP_LISTEN) {
1543 		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1544 
1545 		if (!nsk)
1546 			goto discard;
1547 		if (nsk != sk) {
1548 			if (tcp_child_process(sk, nsk, skb)) {
1549 				rsk = nsk;
1550 				goto reset;
1551 			}
1552 			return 0;
1553 		}
1554 	} else
1555 		sock_rps_save_rxhash(sk, skb);
1556 
1557 	if (tcp_rcv_state_process(sk, skb)) {
1558 		rsk = sk;
1559 		goto reset;
1560 	}
1561 	return 0;
1562 
1563 reset:
1564 	tcp_v4_send_reset(rsk, skb);
1565 discard:
1566 	kfree_skb(skb);
1567 	/* Be careful here. If this function gets more complicated and
1568 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1569 	 * might be destroyed here. This current version compiles correctly,
1570 	 * but you have been warned.
1571 	 */
1572 	return 0;
1573 
1574 csum_err:
1575 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1576 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1577 	goto discard;
1578 }
1579 EXPORT_SYMBOL(tcp_v4_do_rcv);
1580 
1581 int tcp_v4_early_demux(struct sk_buff *skb)
1582 {
1583 	const struct iphdr *iph;
1584 	const struct tcphdr *th;
1585 	struct sock *sk;
1586 
1587 	if (skb->pkt_type != PACKET_HOST)
1588 		return 0;
1589 
1590 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1591 		return 0;
1592 
1593 	iph = ip_hdr(skb);
1594 	th = tcp_hdr(skb);
1595 
1596 	if (th->doff < sizeof(struct tcphdr) / 4)
1597 		return 0;
1598 
1599 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1600 				       iph->saddr, th->source,
1601 				       iph->daddr, ntohs(th->dest),
1602 				       skb->skb_iif, inet_sdif(skb));
1603 	if (sk) {
1604 		skb->sk = sk;
1605 		skb->destructor = sock_edemux;
1606 		if (sk_fullsock(sk)) {
1607 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1608 
1609 			if (dst)
1610 				dst = dst_check(dst, 0);
1611 			if (dst &&
1612 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1613 				skb_dst_set_noref(skb, dst);
1614 		}
1615 	}
1616 	return 0;
1617 }
1618 
1619 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1620 {
1621 	u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1622 	struct skb_shared_info *shinfo;
1623 	const struct tcphdr *th;
1624 	struct tcphdr *thtail;
1625 	struct sk_buff *tail;
1626 	unsigned int hdrlen;
1627 	bool fragstolen;
1628 	u32 gso_segs;
1629 	int delta;
1630 
1631 	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1632 	 * we can fix skb->truesize to its real value to avoid future drops.
1633 	 * This is valid because skb is not yet charged to the socket.
1634 	 * It has been noticed pure SACK packets were sometimes dropped
1635 	 * (if cooked by drivers without copybreak feature).
1636 	 */
1637 	skb_condense(skb);
1638 
1639 	skb_dst_drop(skb);
1640 
1641 	if (unlikely(tcp_checksum_complete(skb))) {
1642 		bh_unlock_sock(sk);
1643 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1644 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1645 		return true;
1646 	}
1647 
1648 	/* Attempt coalescing to last skb in backlog, even if we are
1649 	 * above the limits.
1650 	 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1651 	 */
1652 	th = (const struct tcphdr *)skb->data;
1653 	hdrlen = th->doff * 4;
1654 	shinfo = skb_shinfo(skb);
1655 
1656 	if (!shinfo->gso_size)
1657 		shinfo->gso_size = skb->len - hdrlen;
1658 
1659 	if (!shinfo->gso_segs)
1660 		shinfo->gso_segs = 1;
1661 
1662 	tail = sk->sk_backlog.tail;
1663 	if (!tail)
1664 		goto no_coalesce;
1665 	thtail = (struct tcphdr *)tail->data;
1666 
1667 	if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1668 	    TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1669 	    ((TCP_SKB_CB(tail)->tcp_flags |
1670 	      TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) ||
1671 	    ((TCP_SKB_CB(tail)->tcp_flags ^
1672 	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1673 #ifdef CONFIG_TLS_DEVICE
1674 	    tail->decrypted != skb->decrypted ||
1675 #endif
1676 	    thtail->doff != th->doff ||
1677 	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1678 		goto no_coalesce;
1679 
1680 	__skb_pull(skb, hdrlen);
1681 	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1682 		thtail->window = th->window;
1683 
1684 		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1685 
1686 		if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
1687 			TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1688 
1689 		TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1690 
1691 		if (TCP_SKB_CB(skb)->has_rxtstamp) {
1692 			TCP_SKB_CB(tail)->has_rxtstamp = true;
1693 			tail->tstamp = skb->tstamp;
1694 			skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1695 		}
1696 
1697 		/* Not as strict as GRO. We only need to carry mss max value */
1698 		skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
1699 						 skb_shinfo(tail)->gso_size);
1700 
1701 		gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
1702 		skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
1703 
1704 		sk->sk_backlog.len += delta;
1705 		__NET_INC_STATS(sock_net(sk),
1706 				LINUX_MIB_TCPBACKLOGCOALESCE);
1707 		kfree_skb_partial(skb, fragstolen);
1708 		return false;
1709 	}
1710 	__skb_push(skb, hdrlen);
1711 
1712 no_coalesce:
1713 	/* Only socket owner can try to collapse/prune rx queues
1714 	 * to reduce memory overhead, so add a little headroom here.
1715 	 * Few sockets backlog are possibly concurrently non empty.
1716 	 */
1717 	limit += 64*1024;
1718 
1719 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
1720 		bh_unlock_sock(sk);
1721 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1722 		return true;
1723 	}
1724 	return false;
1725 }
1726 EXPORT_SYMBOL(tcp_add_backlog);
1727 
1728 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1729 {
1730 	struct tcphdr *th = (struct tcphdr *)skb->data;
1731 	unsigned int eaten = skb->len;
1732 	int err;
1733 
1734 	err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1735 	if (!err) {
1736 		eaten -= skb->len;
1737 		TCP_SKB_CB(skb)->end_seq -= eaten;
1738 	}
1739 	return err;
1740 }
1741 EXPORT_SYMBOL(tcp_filter);
1742 
1743 static void tcp_v4_restore_cb(struct sk_buff *skb)
1744 {
1745 	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1746 		sizeof(struct inet_skb_parm));
1747 }
1748 
1749 static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1750 			   const struct tcphdr *th)
1751 {
1752 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1753 	 * barrier() makes sure compiler wont play fool^Waliasing games.
1754 	 */
1755 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1756 		sizeof(struct inet_skb_parm));
1757 	barrier();
1758 
1759 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1760 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1761 				    skb->len - th->doff * 4);
1762 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1763 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1764 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1765 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1766 	TCP_SKB_CB(skb)->sacked	 = 0;
1767 	TCP_SKB_CB(skb)->has_rxtstamp =
1768 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1769 }
1770 
1771 /*
1772  *	From tcp_input.c
1773  */
1774 
1775 int tcp_v4_rcv(struct sk_buff *skb)
1776 {
1777 	struct net *net = dev_net(skb->dev);
1778 	int sdif = inet_sdif(skb);
1779 	const struct iphdr *iph;
1780 	const struct tcphdr *th;
1781 	bool refcounted;
1782 	struct sock *sk;
1783 	int ret;
1784 
1785 	if (skb->pkt_type != PACKET_HOST)
1786 		goto discard_it;
1787 
1788 	/* Count it even if it's bad */
1789 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1790 
1791 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1792 		goto discard_it;
1793 
1794 	th = (const struct tcphdr *)skb->data;
1795 
1796 	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1797 		goto bad_packet;
1798 	if (!pskb_may_pull(skb, th->doff * 4))
1799 		goto discard_it;
1800 
1801 	/* An explanation is required here, I think.
1802 	 * Packet length and doff are validated by header prediction,
1803 	 * provided case of th->doff==0 is eliminated.
1804 	 * So, we defer the checks. */
1805 
1806 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1807 		goto csum_error;
1808 
1809 	th = (const struct tcphdr *)skb->data;
1810 	iph = ip_hdr(skb);
1811 lookup:
1812 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1813 			       th->dest, sdif, &refcounted);
1814 	if (!sk)
1815 		goto no_tcp_socket;
1816 
1817 process:
1818 	if (sk->sk_state == TCP_TIME_WAIT)
1819 		goto do_time_wait;
1820 
1821 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1822 		struct request_sock *req = inet_reqsk(sk);
1823 		bool req_stolen = false;
1824 		struct sock *nsk;
1825 
1826 		sk = req->rsk_listener;
1827 		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1828 			sk_drops_add(sk, skb);
1829 			reqsk_put(req);
1830 			goto discard_it;
1831 		}
1832 		if (tcp_checksum_complete(skb)) {
1833 			reqsk_put(req);
1834 			goto csum_error;
1835 		}
1836 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1837 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1838 			goto lookup;
1839 		}
1840 		/* We own a reference on the listener, increase it again
1841 		 * as we might lose it too soon.
1842 		 */
1843 		sock_hold(sk);
1844 		refcounted = true;
1845 		nsk = NULL;
1846 		if (!tcp_filter(sk, skb)) {
1847 			th = (const struct tcphdr *)skb->data;
1848 			iph = ip_hdr(skb);
1849 			tcp_v4_fill_cb(skb, iph, th);
1850 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1851 		}
1852 		if (!nsk) {
1853 			reqsk_put(req);
1854 			if (req_stolen) {
1855 				/* Another cpu got exclusive access to req
1856 				 * and created a full blown socket.
1857 				 * Try to feed this packet to this socket
1858 				 * instead of discarding it.
1859 				 */
1860 				tcp_v4_restore_cb(skb);
1861 				sock_put(sk);
1862 				goto lookup;
1863 			}
1864 			goto discard_and_relse;
1865 		}
1866 		if (nsk == sk) {
1867 			reqsk_put(req);
1868 			tcp_v4_restore_cb(skb);
1869 		} else if (tcp_child_process(sk, nsk, skb)) {
1870 			tcp_v4_send_reset(nsk, skb);
1871 			goto discard_and_relse;
1872 		} else {
1873 			sock_put(sk);
1874 			return 0;
1875 		}
1876 	}
1877 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1878 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1879 		goto discard_and_relse;
1880 	}
1881 
1882 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1883 		goto discard_and_relse;
1884 
1885 	if (tcp_v4_inbound_md5_hash(sk, skb))
1886 		goto discard_and_relse;
1887 
1888 	nf_reset(skb);
1889 
1890 	if (tcp_filter(sk, skb))
1891 		goto discard_and_relse;
1892 	th = (const struct tcphdr *)skb->data;
1893 	iph = ip_hdr(skb);
1894 	tcp_v4_fill_cb(skb, iph, th);
1895 
1896 	skb->dev = NULL;
1897 
1898 	if (sk->sk_state == TCP_LISTEN) {
1899 		ret = tcp_v4_do_rcv(sk, skb);
1900 		goto put_and_return;
1901 	}
1902 
1903 	sk_incoming_cpu_update(sk);
1904 
1905 	bh_lock_sock_nested(sk);
1906 	tcp_segs_in(tcp_sk(sk), skb);
1907 	ret = 0;
1908 	if (!sock_owned_by_user(sk)) {
1909 		ret = tcp_v4_do_rcv(sk, skb);
1910 	} else if (tcp_add_backlog(sk, skb)) {
1911 		goto discard_and_relse;
1912 	}
1913 	bh_unlock_sock(sk);
1914 
1915 put_and_return:
1916 	if (refcounted)
1917 		sock_put(sk);
1918 
1919 	return ret;
1920 
1921 no_tcp_socket:
1922 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1923 		goto discard_it;
1924 
1925 	tcp_v4_fill_cb(skb, iph, th);
1926 
1927 	if (tcp_checksum_complete(skb)) {
1928 csum_error:
1929 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1930 bad_packet:
1931 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1932 	} else {
1933 		tcp_v4_send_reset(NULL, skb);
1934 	}
1935 
1936 discard_it:
1937 	/* Discard frame. */
1938 	kfree_skb(skb);
1939 	return 0;
1940 
1941 discard_and_relse:
1942 	sk_drops_add(sk, skb);
1943 	if (refcounted)
1944 		sock_put(sk);
1945 	goto discard_it;
1946 
1947 do_time_wait:
1948 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1949 		inet_twsk_put(inet_twsk(sk));
1950 		goto discard_it;
1951 	}
1952 
1953 	tcp_v4_fill_cb(skb, iph, th);
1954 
1955 	if (tcp_checksum_complete(skb)) {
1956 		inet_twsk_put(inet_twsk(sk));
1957 		goto csum_error;
1958 	}
1959 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1960 	case TCP_TW_SYN: {
1961 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1962 							&tcp_hashinfo, skb,
1963 							__tcp_hdrlen(th),
1964 							iph->saddr, th->source,
1965 							iph->daddr, th->dest,
1966 							inet_iif(skb),
1967 							sdif);
1968 		if (sk2) {
1969 			inet_twsk_deschedule_put(inet_twsk(sk));
1970 			sk = sk2;
1971 			tcp_v4_restore_cb(skb);
1972 			refcounted = false;
1973 			goto process;
1974 		}
1975 	}
1976 		/* to ACK */
1977 		/* fall through */
1978 	case TCP_TW_ACK:
1979 		tcp_v4_timewait_ack(sk, skb);
1980 		break;
1981 	case TCP_TW_RST:
1982 		tcp_v4_send_reset(sk, skb);
1983 		inet_twsk_deschedule_put(inet_twsk(sk));
1984 		goto discard_it;
1985 	case TCP_TW_SUCCESS:;
1986 	}
1987 	goto discard_it;
1988 }
1989 
1990 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1991 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1992 	.twsk_unique	= tcp_twsk_unique,
1993 	.twsk_destructor= tcp_twsk_destructor,
1994 };
1995 
1996 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1997 {
1998 	struct dst_entry *dst = skb_dst(skb);
1999 
2000 	if (dst && dst_hold_safe(dst)) {
2001 		sk->sk_rx_dst = dst;
2002 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2003 	}
2004 }
2005 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2006 
2007 const struct inet_connection_sock_af_ops ipv4_specific = {
2008 	.queue_xmit	   = ip_queue_xmit,
2009 	.send_check	   = tcp_v4_send_check,
2010 	.rebuild_header	   = inet_sk_rebuild_header,
2011 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2012 	.conn_request	   = tcp_v4_conn_request,
2013 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
2014 	.net_header_len	   = sizeof(struct iphdr),
2015 	.setsockopt	   = ip_setsockopt,
2016 	.getsockopt	   = ip_getsockopt,
2017 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
2018 	.sockaddr_len	   = sizeof(struct sockaddr_in),
2019 #ifdef CONFIG_COMPAT
2020 	.compat_setsockopt = compat_ip_setsockopt,
2021 	.compat_getsockopt = compat_ip_getsockopt,
2022 #endif
2023 	.mtu_reduced	   = tcp_v4_mtu_reduced,
2024 };
2025 EXPORT_SYMBOL(ipv4_specific);
2026 
2027 #ifdef CONFIG_TCP_MD5SIG
2028 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2029 	.md5_lookup		= tcp_v4_md5_lookup,
2030 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2031 	.md5_parse		= tcp_v4_parse_md5_keys,
2032 };
2033 #endif
2034 
2035 /* NOTE: A lot of things set to zero explicitly by call to
2036  *       sk_alloc() so need not be done here.
2037  */
2038 static int tcp_v4_init_sock(struct sock *sk)
2039 {
2040 	struct inet_connection_sock *icsk = inet_csk(sk);
2041 
2042 	tcp_init_sock(sk);
2043 
2044 	icsk->icsk_af_ops = &ipv4_specific;
2045 
2046 #ifdef CONFIG_TCP_MD5SIG
2047 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2048 #endif
2049 
2050 	return 0;
2051 }
2052 
2053 void tcp_v4_destroy_sock(struct sock *sk)
2054 {
2055 	struct tcp_sock *tp = tcp_sk(sk);
2056 
2057 	trace_tcp_destroy_sock(sk);
2058 
2059 	tcp_clear_xmit_timers(sk);
2060 
2061 	tcp_cleanup_congestion_control(sk);
2062 
2063 	tcp_cleanup_ulp(sk);
2064 
2065 	/* Cleanup up the write buffer. */
2066 	tcp_write_queue_purge(sk);
2067 
2068 	/* Check if we want to disable active TFO */
2069 	tcp_fastopen_active_disable_ofo_check(sk);
2070 
2071 	/* Cleans up our, hopefully empty, out_of_order_queue. */
2072 	skb_rbtree_purge(&tp->out_of_order_queue);
2073 
2074 #ifdef CONFIG_TCP_MD5SIG
2075 	/* Clean up the MD5 key list, if any */
2076 	if (tp->md5sig_info) {
2077 		tcp_clear_md5_list(sk);
2078 		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2079 		tp->md5sig_info = NULL;
2080 	}
2081 #endif
2082 
2083 	/* Clean up a referenced TCP bind bucket. */
2084 	if (inet_csk(sk)->icsk_bind_hash)
2085 		inet_put_port(sk);
2086 
2087 	BUG_ON(tp->fastopen_rsk);
2088 
2089 	/* If socket is aborted during connect operation */
2090 	tcp_free_fastopen_req(tp);
2091 	tcp_fastopen_destroy_cipher(sk);
2092 	tcp_saved_syn_free(tp);
2093 
2094 	sk_sockets_allocated_dec(sk);
2095 }
2096 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2097 
2098 #ifdef CONFIG_PROC_FS
2099 /* Proc filesystem TCP sock list dumping. */
2100 
2101 /*
2102  * Get next listener socket follow cur.  If cur is NULL, get first socket
2103  * starting from bucket given in st->bucket; when st->bucket is zero the
2104  * very first socket in the hash table is returned.
2105  */
2106 static void *listening_get_next(struct seq_file *seq, void *cur)
2107 {
2108 	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2109 	struct tcp_iter_state *st = seq->private;
2110 	struct net *net = seq_file_net(seq);
2111 	struct inet_listen_hashbucket *ilb;
2112 	struct sock *sk = cur;
2113 
2114 	if (!sk) {
2115 get_head:
2116 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2117 		spin_lock(&ilb->lock);
2118 		sk = sk_head(&ilb->head);
2119 		st->offset = 0;
2120 		goto get_sk;
2121 	}
2122 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
2123 	++st->num;
2124 	++st->offset;
2125 
2126 	sk = sk_next(sk);
2127 get_sk:
2128 	sk_for_each_from(sk) {
2129 		if (!net_eq(sock_net(sk), net))
2130 			continue;
2131 		if (sk->sk_family == afinfo->family)
2132 			return sk;
2133 	}
2134 	spin_unlock(&ilb->lock);
2135 	st->offset = 0;
2136 	if (++st->bucket < INET_LHTABLE_SIZE)
2137 		goto get_head;
2138 	return NULL;
2139 }
2140 
2141 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2142 {
2143 	struct tcp_iter_state *st = seq->private;
2144 	void *rc;
2145 
2146 	st->bucket = 0;
2147 	st->offset = 0;
2148 	rc = listening_get_next(seq, NULL);
2149 
2150 	while (rc && *pos) {
2151 		rc = listening_get_next(seq, rc);
2152 		--*pos;
2153 	}
2154 	return rc;
2155 }
2156 
2157 static inline bool empty_bucket(const struct tcp_iter_state *st)
2158 {
2159 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2160 }
2161 
2162 /*
2163  * Get first established socket starting from bucket given in st->bucket.
2164  * If st->bucket is zero, the very first socket in the hash is returned.
2165  */
2166 static void *established_get_first(struct seq_file *seq)
2167 {
2168 	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2169 	struct tcp_iter_state *st = seq->private;
2170 	struct net *net = seq_file_net(seq);
2171 	void *rc = NULL;
2172 
2173 	st->offset = 0;
2174 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2175 		struct sock *sk;
2176 		struct hlist_nulls_node *node;
2177 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2178 
2179 		/* Lockless fast path for the common case of empty buckets */
2180 		if (empty_bucket(st))
2181 			continue;
2182 
2183 		spin_lock_bh(lock);
2184 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2185 			if (sk->sk_family != afinfo->family ||
2186 			    !net_eq(sock_net(sk), net)) {
2187 				continue;
2188 			}
2189 			rc = sk;
2190 			goto out;
2191 		}
2192 		spin_unlock_bh(lock);
2193 	}
2194 out:
2195 	return rc;
2196 }
2197 
2198 static void *established_get_next(struct seq_file *seq, void *cur)
2199 {
2200 	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2201 	struct sock *sk = cur;
2202 	struct hlist_nulls_node *node;
2203 	struct tcp_iter_state *st = seq->private;
2204 	struct net *net = seq_file_net(seq);
2205 
2206 	++st->num;
2207 	++st->offset;
2208 
2209 	sk = sk_nulls_next(sk);
2210 
2211 	sk_nulls_for_each_from(sk, node) {
2212 		if (sk->sk_family == afinfo->family &&
2213 		    net_eq(sock_net(sk), net))
2214 			return sk;
2215 	}
2216 
2217 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2218 	++st->bucket;
2219 	return established_get_first(seq);
2220 }
2221 
2222 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2223 {
2224 	struct tcp_iter_state *st = seq->private;
2225 	void *rc;
2226 
2227 	st->bucket = 0;
2228 	rc = established_get_first(seq);
2229 
2230 	while (rc && pos) {
2231 		rc = established_get_next(seq, rc);
2232 		--pos;
2233 	}
2234 	return rc;
2235 }
2236 
2237 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2238 {
2239 	void *rc;
2240 	struct tcp_iter_state *st = seq->private;
2241 
2242 	st->state = TCP_SEQ_STATE_LISTENING;
2243 	rc	  = listening_get_idx(seq, &pos);
2244 
2245 	if (!rc) {
2246 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2247 		rc	  = established_get_idx(seq, pos);
2248 	}
2249 
2250 	return rc;
2251 }
2252 
2253 static void *tcp_seek_last_pos(struct seq_file *seq)
2254 {
2255 	struct tcp_iter_state *st = seq->private;
2256 	int offset = st->offset;
2257 	int orig_num = st->num;
2258 	void *rc = NULL;
2259 
2260 	switch (st->state) {
2261 	case TCP_SEQ_STATE_LISTENING:
2262 		if (st->bucket >= INET_LHTABLE_SIZE)
2263 			break;
2264 		st->state = TCP_SEQ_STATE_LISTENING;
2265 		rc = listening_get_next(seq, NULL);
2266 		while (offset-- && rc)
2267 			rc = listening_get_next(seq, rc);
2268 		if (rc)
2269 			break;
2270 		st->bucket = 0;
2271 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2272 		/* Fallthrough */
2273 	case TCP_SEQ_STATE_ESTABLISHED:
2274 		if (st->bucket > tcp_hashinfo.ehash_mask)
2275 			break;
2276 		rc = established_get_first(seq);
2277 		while (offset-- && rc)
2278 			rc = established_get_next(seq, rc);
2279 	}
2280 
2281 	st->num = orig_num;
2282 
2283 	return rc;
2284 }
2285 
2286 void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2287 {
2288 	struct tcp_iter_state *st = seq->private;
2289 	void *rc;
2290 
2291 	if (*pos && *pos == st->last_pos) {
2292 		rc = tcp_seek_last_pos(seq);
2293 		if (rc)
2294 			goto out;
2295 	}
2296 
2297 	st->state = TCP_SEQ_STATE_LISTENING;
2298 	st->num = 0;
2299 	st->bucket = 0;
2300 	st->offset = 0;
2301 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2302 
2303 out:
2304 	st->last_pos = *pos;
2305 	return rc;
2306 }
2307 EXPORT_SYMBOL(tcp_seq_start);
2308 
2309 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2310 {
2311 	struct tcp_iter_state *st = seq->private;
2312 	void *rc = NULL;
2313 
2314 	if (v == SEQ_START_TOKEN) {
2315 		rc = tcp_get_idx(seq, 0);
2316 		goto out;
2317 	}
2318 
2319 	switch (st->state) {
2320 	case TCP_SEQ_STATE_LISTENING:
2321 		rc = listening_get_next(seq, v);
2322 		if (!rc) {
2323 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2324 			st->bucket = 0;
2325 			st->offset = 0;
2326 			rc	  = established_get_first(seq);
2327 		}
2328 		break;
2329 	case TCP_SEQ_STATE_ESTABLISHED:
2330 		rc = established_get_next(seq, v);
2331 		break;
2332 	}
2333 out:
2334 	++*pos;
2335 	st->last_pos = *pos;
2336 	return rc;
2337 }
2338 EXPORT_SYMBOL(tcp_seq_next);
2339 
2340 void tcp_seq_stop(struct seq_file *seq, void *v)
2341 {
2342 	struct tcp_iter_state *st = seq->private;
2343 
2344 	switch (st->state) {
2345 	case TCP_SEQ_STATE_LISTENING:
2346 		if (v != SEQ_START_TOKEN)
2347 			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2348 		break;
2349 	case TCP_SEQ_STATE_ESTABLISHED:
2350 		if (v)
2351 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2352 		break;
2353 	}
2354 }
2355 EXPORT_SYMBOL(tcp_seq_stop);
2356 
2357 static void get_openreq4(const struct request_sock *req,
2358 			 struct seq_file *f, int i)
2359 {
2360 	const struct inet_request_sock *ireq = inet_rsk(req);
2361 	long delta = req->rsk_timer.expires - jiffies;
2362 
2363 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2364 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2365 		i,
2366 		ireq->ir_loc_addr,
2367 		ireq->ir_num,
2368 		ireq->ir_rmt_addr,
2369 		ntohs(ireq->ir_rmt_port),
2370 		TCP_SYN_RECV,
2371 		0, 0, /* could print option size, but that is af dependent. */
2372 		1,    /* timers active (only the expire timer) */
2373 		jiffies_delta_to_clock_t(delta),
2374 		req->num_timeout,
2375 		from_kuid_munged(seq_user_ns(f),
2376 				 sock_i_uid(req->rsk_listener)),
2377 		0,  /* non standard timer */
2378 		0, /* open_requests have no inode */
2379 		0,
2380 		req);
2381 }
2382 
2383 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2384 {
2385 	int timer_active;
2386 	unsigned long timer_expires;
2387 	const struct tcp_sock *tp = tcp_sk(sk);
2388 	const struct inet_connection_sock *icsk = inet_csk(sk);
2389 	const struct inet_sock *inet = inet_sk(sk);
2390 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2391 	__be32 dest = inet->inet_daddr;
2392 	__be32 src = inet->inet_rcv_saddr;
2393 	__u16 destp = ntohs(inet->inet_dport);
2394 	__u16 srcp = ntohs(inet->inet_sport);
2395 	int rx_queue;
2396 	int state;
2397 
2398 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2399 	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2400 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2401 		timer_active	= 1;
2402 		timer_expires	= icsk->icsk_timeout;
2403 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2404 		timer_active	= 4;
2405 		timer_expires	= icsk->icsk_timeout;
2406 	} else if (timer_pending(&sk->sk_timer)) {
2407 		timer_active	= 2;
2408 		timer_expires	= sk->sk_timer.expires;
2409 	} else {
2410 		timer_active	= 0;
2411 		timer_expires = jiffies;
2412 	}
2413 
2414 	state = inet_sk_state_load(sk);
2415 	if (state == TCP_LISTEN)
2416 		rx_queue = sk->sk_ack_backlog;
2417 	else
2418 		/* Because we don't lock the socket,
2419 		 * we might find a transient negative value.
2420 		 */
2421 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2422 
2423 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2424 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2425 		i, src, srcp, dest, destp, state,
2426 		tp->write_seq - tp->snd_una,
2427 		rx_queue,
2428 		timer_active,
2429 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2430 		icsk->icsk_retransmits,
2431 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2432 		icsk->icsk_probes_out,
2433 		sock_i_ino(sk),
2434 		refcount_read(&sk->sk_refcnt), sk,
2435 		jiffies_to_clock_t(icsk->icsk_rto),
2436 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2437 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2438 		tp->snd_cwnd,
2439 		state == TCP_LISTEN ?
2440 		    fastopenq->max_qlen :
2441 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2442 }
2443 
2444 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2445 			       struct seq_file *f, int i)
2446 {
2447 	long delta = tw->tw_timer.expires - jiffies;
2448 	__be32 dest, src;
2449 	__u16 destp, srcp;
2450 
2451 	dest  = tw->tw_daddr;
2452 	src   = tw->tw_rcv_saddr;
2453 	destp = ntohs(tw->tw_dport);
2454 	srcp  = ntohs(tw->tw_sport);
2455 
2456 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2457 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2458 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2459 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2460 		refcount_read(&tw->tw_refcnt), tw);
2461 }
2462 
2463 #define TMPSZ 150
2464 
2465 static int tcp4_seq_show(struct seq_file *seq, void *v)
2466 {
2467 	struct tcp_iter_state *st;
2468 	struct sock *sk = v;
2469 
2470 	seq_setwidth(seq, TMPSZ - 1);
2471 	if (v == SEQ_START_TOKEN) {
2472 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2473 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2474 			   "inode");
2475 		goto out;
2476 	}
2477 	st = seq->private;
2478 
2479 	if (sk->sk_state == TCP_TIME_WAIT)
2480 		get_timewait4_sock(v, seq, st->num);
2481 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2482 		get_openreq4(v, seq, st->num);
2483 	else
2484 		get_tcp4_sock(v, seq, st->num);
2485 out:
2486 	seq_pad(seq, '\n');
2487 	return 0;
2488 }
2489 
2490 static const struct seq_operations tcp4_seq_ops = {
2491 	.show		= tcp4_seq_show,
2492 	.start		= tcp_seq_start,
2493 	.next		= tcp_seq_next,
2494 	.stop		= tcp_seq_stop,
2495 };
2496 
2497 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2498 	.family		= AF_INET,
2499 };
2500 
2501 static int __net_init tcp4_proc_init_net(struct net *net)
2502 {
2503 	if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2504 			sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2505 		return -ENOMEM;
2506 	return 0;
2507 }
2508 
2509 static void __net_exit tcp4_proc_exit_net(struct net *net)
2510 {
2511 	remove_proc_entry("tcp", net->proc_net);
2512 }
2513 
2514 static struct pernet_operations tcp4_net_ops = {
2515 	.init = tcp4_proc_init_net,
2516 	.exit = tcp4_proc_exit_net,
2517 };
2518 
2519 int __init tcp4_proc_init(void)
2520 {
2521 	return register_pernet_subsys(&tcp4_net_ops);
2522 }
2523 
2524 void tcp4_proc_exit(void)
2525 {
2526 	unregister_pernet_subsys(&tcp4_net_ops);
2527 }
2528 #endif /* CONFIG_PROC_FS */
2529 
2530 struct proto tcp_prot = {
2531 	.name			= "TCP",
2532 	.owner			= THIS_MODULE,
2533 	.close			= tcp_close,
2534 	.pre_connect		= tcp_v4_pre_connect,
2535 	.connect		= tcp_v4_connect,
2536 	.disconnect		= tcp_disconnect,
2537 	.accept			= inet_csk_accept,
2538 	.ioctl			= tcp_ioctl,
2539 	.init			= tcp_v4_init_sock,
2540 	.destroy		= tcp_v4_destroy_sock,
2541 	.shutdown		= tcp_shutdown,
2542 	.setsockopt		= tcp_setsockopt,
2543 	.getsockopt		= tcp_getsockopt,
2544 	.keepalive		= tcp_set_keepalive,
2545 	.recvmsg		= tcp_recvmsg,
2546 	.sendmsg		= tcp_sendmsg,
2547 	.sendpage		= tcp_sendpage,
2548 	.backlog_rcv		= tcp_v4_do_rcv,
2549 	.release_cb		= tcp_release_cb,
2550 	.hash			= inet_hash,
2551 	.unhash			= inet_unhash,
2552 	.get_port		= inet_csk_get_port,
2553 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2554 	.leave_memory_pressure	= tcp_leave_memory_pressure,
2555 	.stream_memory_free	= tcp_stream_memory_free,
2556 	.sockets_allocated	= &tcp_sockets_allocated,
2557 	.orphan_count		= &tcp_orphan_count,
2558 	.memory_allocated	= &tcp_memory_allocated,
2559 	.memory_pressure	= &tcp_memory_pressure,
2560 	.sysctl_mem		= sysctl_tcp_mem,
2561 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2562 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
2563 	.max_header		= MAX_TCP_HEADER,
2564 	.obj_size		= sizeof(struct tcp_sock),
2565 	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2566 	.twsk_prot		= &tcp_timewait_sock_ops,
2567 	.rsk_prot		= &tcp_request_sock_ops,
2568 	.h.hashinfo		= &tcp_hashinfo,
2569 	.no_autobind		= true,
2570 #ifdef CONFIG_COMPAT
2571 	.compat_setsockopt	= compat_tcp_setsockopt,
2572 	.compat_getsockopt	= compat_tcp_getsockopt,
2573 #endif
2574 	.diag_destroy		= tcp_abort,
2575 };
2576 EXPORT_SYMBOL(tcp_prot);
2577 
2578 static void __net_exit tcp_sk_exit(struct net *net)
2579 {
2580 	int cpu;
2581 
2582 	module_put(net->ipv4.tcp_congestion_control->owner);
2583 
2584 	for_each_possible_cpu(cpu)
2585 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2586 	free_percpu(net->ipv4.tcp_sk);
2587 }
2588 
2589 static int __net_init tcp_sk_init(struct net *net)
2590 {
2591 	int res, cpu, cnt;
2592 
2593 	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2594 	if (!net->ipv4.tcp_sk)
2595 		return -ENOMEM;
2596 
2597 	for_each_possible_cpu(cpu) {
2598 		struct sock *sk;
2599 
2600 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2601 					   IPPROTO_TCP, net);
2602 		if (res)
2603 			goto fail;
2604 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2605 
2606 		/* Please enforce IP_DF and IPID==0 for RST and
2607 		 * ACK sent in SYN-RECV and TIME-WAIT state.
2608 		 */
2609 		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2610 
2611 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2612 	}
2613 
2614 	net->ipv4.sysctl_tcp_ecn = 2;
2615 	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2616 
2617 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2618 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2619 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2620 
2621 	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2622 	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2623 	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2624 
2625 	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2626 	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2627 	net->ipv4.sysctl_tcp_syncookies = 1;
2628 	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2629 	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2630 	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2631 	net->ipv4.sysctl_tcp_orphan_retries = 0;
2632 	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2633 	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2634 	net->ipv4.sysctl_tcp_tw_reuse = 2;
2635 
2636 	cnt = tcp_hashinfo.ehash_mask + 1;
2637 	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2638 	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2639 
2640 	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2641 	net->ipv4.sysctl_tcp_sack = 1;
2642 	net->ipv4.sysctl_tcp_window_scaling = 1;
2643 	net->ipv4.sysctl_tcp_timestamps = 1;
2644 	net->ipv4.sysctl_tcp_early_retrans = 3;
2645 	net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2646 	net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
2647 	net->ipv4.sysctl_tcp_retrans_collapse = 1;
2648 	net->ipv4.sysctl_tcp_max_reordering = 300;
2649 	net->ipv4.sysctl_tcp_dsack = 1;
2650 	net->ipv4.sysctl_tcp_app_win = 31;
2651 	net->ipv4.sysctl_tcp_adv_win_scale = 1;
2652 	net->ipv4.sysctl_tcp_frto = 2;
2653 	net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2654 	/* This limits the percentage of the congestion window which we
2655 	 * will allow a single TSO frame to consume.  Building TSO frames
2656 	 * which are too large can cause TCP streams to be bursty.
2657 	 */
2658 	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2659 	/* Default TSQ limit of 16 TSO segments */
2660 	net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2661 	/* rfc5961 challenge ack rate limiting */
2662 	net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2663 	net->ipv4.sysctl_tcp_min_tso_segs = 2;
2664 	net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2665 	net->ipv4.sysctl_tcp_autocorking = 1;
2666 	net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2667 	net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2668 	net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2669 	if (net != &init_net) {
2670 		memcpy(net->ipv4.sysctl_tcp_rmem,
2671 		       init_net.ipv4.sysctl_tcp_rmem,
2672 		       sizeof(init_net.ipv4.sysctl_tcp_rmem));
2673 		memcpy(net->ipv4.sysctl_tcp_wmem,
2674 		       init_net.ipv4.sysctl_tcp_wmem,
2675 		       sizeof(init_net.ipv4.sysctl_tcp_wmem));
2676 	}
2677 	net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2678 	net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2679 	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2680 	spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2681 	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2682 	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2683 
2684 	/* Reno is always built in */
2685 	if (!net_eq(net, &init_net) &&
2686 	    try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2687 		net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2688 	else
2689 		net->ipv4.tcp_congestion_control = &tcp_reno;
2690 
2691 	return 0;
2692 fail:
2693 	tcp_sk_exit(net);
2694 
2695 	return res;
2696 }
2697 
2698 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2699 {
2700 	struct net *net;
2701 
2702 	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2703 
2704 	list_for_each_entry(net, net_exit_list, exit_list)
2705 		tcp_fastopen_ctx_destroy(net);
2706 }
2707 
2708 static struct pernet_operations __net_initdata tcp_sk_ops = {
2709        .init	   = tcp_sk_init,
2710        .exit	   = tcp_sk_exit,
2711        .exit_batch = tcp_sk_exit_batch,
2712 };
2713 
2714 void __init tcp_v4_init(void)
2715 {
2716 	if (register_pernet_subsys(&tcp_sk_ops))
2717 		panic("Failed to create the TCP control socket.\n");
2718 }
2719