xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision f0702555)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65 
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68 
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71 
72 static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 				      struct request_sock *req);
75 
76 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77 
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 						   const struct in6_addr *addr)
86 {
87 	return NULL;
88 }
89 #endif
90 
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92 {
93 	struct dst_entry *dst = skb_dst(skb);
94 
95 	if (dst && dst_hold_safe(dst)) {
96 		const struct rt6_info *rt = (const struct rt6_info *)dst;
97 
98 		sk->sk_rx_dst = dst;
99 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101 	}
102 }
103 
104 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
105 {
106 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 					    ipv6_hdr(skb)->saddr.s6_addr32,
108 					    tcp_hdr(skb)->dest,
109 					    tcp_hdr(skb)->source);
110 }
111 
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
113 			  int addr_len)
114 {
115 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 	struct inet_sock *inet = inet_sk(sk);
117 	struct inet_connection_sock *icsk = inet_csk(sk);
118 	struct ipv6_pinfo *np = inet6_sk(sk);
119 	struct tcp_sock *tp = tcp_sk(sk);
120 	struct in6_addr *saddr = NULL, *final_p, final;
121 	struct ipv6_txoptions *opt;
122 	struct flowi6 fl6;
123 	struct dst_entry *dst;
124 	int addr_type;
125 	int err;
126 
127 	if (addr_len < SIN6_LEN_RFC2133)
128 		return -EINVAL;
129 
130 	if (usin->sin6_family != AF_INET6)
131 		return -EAFNOSUPPORT;
132 
133 	memset(&fl6, 0, sizeof(fl6));
134 
135 	if (np->sndflow) {
136 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 		IP6_ECN_flow_init(fl6.flowlabel);
138 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 			struct ip6_flowlabel *flowlabel;
140 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
141 			if (!flowlabel)
142 				return -EINVAL;
143 			fl6_sock_release(flowlabel);
144 		}
145 	}
146 
147 	/*
148 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
149 	 */
150 
151 	if (ipv6_addr_any(&usin->sin6_addr))
152 		usin->sin6_addr.s6_addr[15] = 0x1;
153 
154 	addr_type = ipv6_addr_type(&usin->sin6_addr);
155 
156 	if (addr_type & IPV6_ADDR_MULTICAST)
157 		return -ENETUNREACH;
158 
159 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 		if (addr_len >= sizeof(struct sockaddr_in6) &&
161 		    usin->sin6_scope_id) {
162 			/* If interface is set while binding, indices
163 			 * must coincide.
164 			 */
165 			if (sk->sk_bound_dev_if &&
166 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
167 				return -EINVAL;
168 
169 			sk->sk_bound_dev_if = usin->sin6_scope_id;
170 		}
171 
172 		/* Connect to link-local address requires an interface */
173 		if (!sk->sk_bound_dev_if)
174 			return -EINVAL;
175 	}
176 
177 	if (tp->rx_opt.ts_recent_stamp &&
178 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179 		tp->rx_opt.ts_recent = 0;
180 		tp->rx_opt.ts_recent_stamp = 0;
181 		tp->write_seq = 0;
182 	}
183 
184 	sk->sk_v6_daddr = usin->sin6_addr;
185 	np->flow_label = fl6.flowlabel;
186 
187 	/*
188 	 *	TCP over IPv4
189 	 */
190 
191 	if (addr_type == IPV6_ADDR_MAPPED) {
192 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 		struct sockaddr_in sin;
194 
195 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
196 
197 		if (__ipv6_only_sock(sk))
198 			return -ENETUNREACH;
199 
200 		sin.sin_family = AF_INET;
201 		sin.sin_port = usin->sin6_port;
202 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
203 
204 		icsk->icsk_af_ops = &ipv6_mapped;
205 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
206 #ifdef CONFIG_TCP_MD5SIG
207 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
208 #endif
209 
210 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
211 
212 		if (err) {
213 			icsk->icsk_ext_hdr_len = exthdrlen;
214 			icsk->icsk_af_ops = &ipv6_specific;
215 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217 			tp->af_specific = &tcp_sock_ipv6_specific;
218 #endif
219 			goto failure;
220 		}
221 		np->saddr = sk->sk_v6_rcv_saddr;
222 
223 		return err;
224 	}
225 
226 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 		saddr = &sk->sk_v6_rcv_saddr;
228 
229 	fl6.flowi6_proto = IPPROTO_TCP;
230 	fl6.daddr = sk->sk_v6_daddr;
231 	fl6.saddr = saddr ? *saddr : np->saddr;
232 	fl6.flowi6_oif = sk->sk_bound_dev_if;
233 	fl6.flowi6_mark = sk->sk_mark;
234 	fl6.fl6_dport = usin->sin6_port;
235 	fl6.fl6_sport = inet->inet_sport;
236 
237 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
238 	final_p = fl6_update_dst(&fl6, opt, &final);
239 
240 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241 
242 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 	if (IS_ERR(dst)) {
244 		err = PTR_ERR(dst);
245 		goto failure;
246 	}
247 
248 	if (!saddr) {
249 		saddr = &fl6.saddr;
250 		sk->sk_v6_rcv_saddr = *saddr;
251 	}
252 
253 	/* set the source address */
254 	np->saddr = *saddr;
255 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256 
257 	sk->sk_gso_type = SKB_GSO_TCPV6;
258 	ip6_dst_store(sk, dst, NULL, NULL);
259 
260 	if (tcp_death_row.sysctl_tw_recycle &&
261 	    !tp->rx_opt.ts_recent_stamp &&
262 	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 		tcp_fetch_timewait_stamp(sk, dst);
264 
265 	icsk->icsk_ext_hdr_len = 0;
266 	if (opt)
267 		icsk->icsk_ext_hdr_len = opt->opt_flen +
268 					 opt->opt_nflen;
269 
270 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271 
272 	inet->inet_dport = usin->sin6_port;
273 
274 	tcp_set_state(sk, TCP_SYN_SENT);
275 	err = inet6_hash_connect(&tcp_death_row, sk);
276 	if (err)
277 		goto late_failure;
278 
279 	sk_set_txhash(sk);
280 
281 	if (!tp->write_seq && likely(!tp->repair))
282 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 							     sk->sk_v6_daddr.s6_addr32,
284 							     inet->inet_sport,
285 							     inet->inet_dport);
286 
287 	err = tcp_connect(sk);
288 	if (err)
289 		goto late_failure;
290 
291 	return 0;
292 
293 late_failure:
294 	tcp_set_state(sk, TCP_CLOSE);
295 	__sk_dst_reset(sk);
296 failure:
297 	inet->inet_dport = 0;
298 	sk->sk_route_caps = 0;
299 	return err;
300 }
301 
302 static void tcp_v6_mtu_reduced(struct sock *sk)
303 {
304 	struct dst_entry *dst;
305 
306 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 		return;
308 
309 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 	if (!dst)
311 		return;
312 
313 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 		tcp_sync_mss(sk, dst_mtu(dst));
315 		tcp_simple_retransmit(sk);
316 	}
317 }
318 
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 		u8 type, u8 code, int offset, __be32 info)
321 {
322 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 	struct net *net = dev_net(skb->dev);
325 	struct request_sock *fastopen;
326 	struct ipv6_pinfo *np;
327 	struct tcp_sock *tp;
328 	__u32 seq, snd_una;
329 	struct sock *sk;
330 	bool fatal;
331 	int err;
332 
333 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 					&hdr->daddr, th->dest,
335 					&hdr->saddr, ntohs(th->source),
336 					skb->dev->ifindex);
337 
338 	if (!sk) {
339 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
340 				  ICMP6_MIB_INERRORS);
341 		return;
342 	}
343 
344 	if (sk->sk_state == TCP_TIME_WAIT) {
345 		inet_twsk_put(inet_twsk(sk));
346 		return;
347 	}
348 	seq = ntohl(th->seq);
349 	fatal = icmpv6_err_convert(type, code, &err);
350 	if (sk->sk_state == TCP_NEW_SYN_RECV)
351 		return tcp_req_err(sk, seq, fatal);
352 
353 	bh_lock_sock(sk);
354 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
356 
357 	if (sk->sk_state == TCP_CLOSE)
358 		goto out;
359 
360 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
362 		goto out;
363 	}
364 
365 	tp = tcp_sk(sk);
366 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 	fastopen = tp->fastopen_rsk;
368 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 	if (sk->sk_state != TCP_LISTEN &&
370 	    !between(seq, snd_una, tp->snd_nxt)) {
371 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
372 		goto out;
373 	}
374 
375 	np = inet6_sk(sk);
376 
377 	if (type == NDISC_REDIRECT) {
378 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379 
380 		if (dst)
381 			dst->ops->redirect(dst, sk, skb);
382 		goto out;
383 	}
384 
385 	if (type == ICMPV6_PKT_TOOBIG) {
386 		/* We are not interested in TCP_LISTEN and open_requests
387 		 * (SYN-ACKs send out by Linux are always <576bytes so
388 		 * they should go through unfragmented).
389 		 */
390 		if (sk->sk_state == TCP_LISTEN)
391 			goto out;
392 
393 		if (!ip6_sk_accept_pmtu(sk))
394 			goto out;
395 
396 		tp->mtu_info = ntohl(info);
397 		if (!sock_owned_by_user(sk))
398 			tcp_v6_mtu_reduced(sk);
399 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
400 					   &tp->tsq_flags))
401 			sock_hold(sk);
402 		goto out;
403 	}
404 
405 
406 	/* Might be for an request_sock */
407 	switch (sk->sk_state) {
408 	case TCP_SYN_SENT:
409 	case TCP_SYN_RECV:
410 		/* Only in fast or simultaneous open. If a fast open socket is
411 		 * is already accepted it is treated as a connected one below.
412 		 */
413 		if (fastopen && !fastopen->sk)
414 			break;
415 
416 		if (!sock_owned_by_user(sk)) {
417 			sk->sk_err = err;
418 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
419 
420 			tcp_done(sk);
421 		} else
422 			sk->sk_err_soft = err;
423 		goto out;
424 	}
425 
426 	if (!sock_owned_by_user(sk) && np->recverr) {
427 		sk->sk_err = err;
428 		sk->sk_error_report(sk);
429 	} else
430 		sk->sk_err_soft = err;
431 
432 out:
433 	bh_unlock_sock(sk);
434 	sock_put(sk);
435 }
436 
437 
438 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 			      struct flowi *fl,
440 			      struct request_sock *req,
441 			      struct tcp_fastopen_cookie *foc,
442 			      enum tcp_synack_type synack_type)
443 {
444 	struct inet_request_sock *ireq = inet_rsk(req);
445 	struct ipv6_pinfo *np = inet6_sk(sk);
446 	struct flowi6 *fl6 = &fl->u.ip6;
447 	struct sk_buff *skb;
448 	int err = -ENOMEM;
449 
450 	/* First, grab a route. */
451 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
452 					       IPPROTO_TCP)) == NULL)
453 		goto done;
454 
455 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
456 
457 	if (skb) {
458 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
459 				    &ireq->ir_v6_rmt_addr);
460 
461 		fl6->daddr = ireq->ir_v6_rmt_addr;
462 		if (np->repflow && ireq->pktopts)
463 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
464 
465 		rcu_read_lock();
466 		err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
467 			       np->tclass);
468 		rcu_read_unlock();
469 		err = net_xmit_eval(err);
470 	}
471 
472 done:
473 	return err;
474 }
475 
476 
477 static void tcp_v6_reqsk_destructor(struct request_sock *req)
478 {
479 	kfree_skb(inet_rsk(req)->pktopts);
480 }
481 
482 #ifdef CONFIG_TCP_MD5SIG
483 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
484 						   const struct in6_addr *addr)
485 {
486 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
487 }
488 
489 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
490 						const struct sock *addr_sk)
491 {
492 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
493 }
494 
495 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
496 				 int optlen)
497 {
498 	struct tcp_md5sig cmd;
499 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
500 
501 	if (optlen < sizeof(cmd))
502 		return -EINVAL;
503 
504 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
505 		return -EFAULT;
506 
507 	if (sin6->sin6_family != AF_INET6)
508 		return -EINVAL;
509 
510 	if (!cmd.tcpm_keylen) {
511 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
512 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
513 					      AF_INET);
514 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
515 				      AF_INET6);
516 	}
517 
518 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
519 		return -EINVAL;
520 
521 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
522 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
523 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
524 
525 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
526 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
527 }
528 
529 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
530 					const struct in6_addr *daddr,
531 					const struct in6_addr *saddr, int nbytes)
532 {
533 	struct tcp6_pseudohdr *bp;
534 	struct scatterlist sg;
535 
536 	bp = &hp->md5_blk.ip6;
537 	/* 1. TCP pseudo-header (RFC2460) */
538 	bp->saddr = *saddr;
539 	bp->daddr = *daddr;
540 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
541 	bp->len = cpu_to_be32(nbytes);
542 
543 	sg_init_one(&sg, bp, sizeof(*bp));
544 	ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
545 	return crypto_ahash_update(hp->md5_req);
546 }
547 
548 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
549 			       const struct in6_addr *daddr, struct in6_addr *saddr,
550 			       const struct tcphdr *th)
551 {
552 	struct tcp_md5sig_pool *hp;
553 	struct ahash_request *req;
554 
555 	hp = tcp_get_md5sig_pool();
556 	if (!hp)
557 		goto clear_hash_noput;
558 	req = hp->md5_req;
559 
560 	if (crypto_ahash_init(req))
561 		goto clear_hash;
562 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
563 		goto clear_hash;
564 	if (tcp_md5_hash_header(hp, th))
565 		goto clear_hash;
566 	if (tcp_md5_hash_key(hp, key))
567 		goto clear_hash;
568 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
569 	if (crypto_ahash_final(req))
570 		goto clear_hash;
571 
572 	tcp_put_md5sig_pool();
573 	return 0;
574 
575 clear_hash:
576 	tcp_put_md5sig_pool();
577 clear_hash_noput:
578 	memset(md5_hash, 0, 16);
579 	return 1;
580 }
581 
582 static int tcp_v6_md5_hash_skb(char *md5_hash,
583 			       const struct tcp_md5sig_key *key,
584 			       const struct sock *sk,
585 			       const struct sk_buff *skb)
586 {
587 	const struct in6_addr *saddr, *daddr;
588 	struct tcp_md5sig_pool *hp;
589 	struct ahash_request *req;
590 	const struct tcphdr *th = tcp_hdr(skb);
591 
592 	if (sk) { /* valid for establish/request sockets */
593 		saddr = &sk->sk_v6_rcv_saddr;
594 		daddr = &sk->sk_v6_daddr;
595 	} else {
596 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
597 		saddr = &ip6h->saddr;
598 		daddr = &ip6h->daddr;
599 	}
600 
601 	hp = tcp_get_md5sig_pool();
602 	if (!hp)
603 		goto clear_hash_noput;
604 	req = hp->md5_req;
605 
606 	if (crypto_ahash_init(req))
607 		goto clear_hash;
608 
609 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
610 		goto clear_hash;
611 	if (tcp_md5_hash_header(hp, th))
612 		goto clear_hash;
613 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
614 		goto clear_hash;
615 	if (tcp_md5_hash_key(hp, key))
616 		goto clear_hash;
617 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
618 	if (crypto_ahash_final(req))
619 		goto clear_hash;
620 
621 	tcp_put_md5sig_pool();
622 	return 0;
623 
624 clear_hash:
625 	tcp_put_md5sig_pool();
626 clear_hash_noput:
627 	memset(md5_hash, 0, 16);
628 	return 1;
629 }
630 
631 #endif
632 
633 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
634 				    const struct sk_buff *skb)
635 {
636 #ifdef CONFIG_TCP_MD5SIG
637 	const __u8 *hash_location = NULL;
638 	struct tcp_md5sig_key *hash_expected;
639 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640 	const struct tcphdr *th = tcp_hdr(skb);
641 	int genhash;
642 	u8 newhash[16];
643 
644 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
645 	hash_location = tcp_parse_md5sig_option(th);
646 
647 	/* We've parsed the options - do we have a hash? */
648 	if (!hash_expected && !hash_location)
649 		return false;
650 
651 	if (hash_expected && !hash_location) {
652 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
653 		return true;
654 	}
655 
656 	if (!hash_expected && hash_location) {
657 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
658 		return true;
659 	}
660 
661 	/* check the signature */
662 	genhash = tcp_v6_md5_hash_skb(newhash,
663 				      hash_expected,
664 				      NULL, skb);
665 
666 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
667 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
668 				     genhash ? "failed" : "mismatch",
669 				     &ip6h->saddr, ntohs(th->source),
670 				     &ip6h->daddr, ntohs(th->dest));
671 		return true;
672 	}
673 #endif
674 	return false;
675 }
676 
677 static void tcp_v6_init_req(struct request_sock *req,
678 			    const struct sock *sk_listener,
679 			    struct sk_buff *skb)
680 {
681 	struct inet_request_sock *ireq = inet_rsk(req);
682 	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
683 
684 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
685 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
686 
687 	/* So that link locals have meaning */
688 	if (!sk_listener->sk_bound_dev_if &&
689 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
690 		ireq->ir_iif = tcp_v6_iif(skb);
691 
692 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
693 	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
694 	     np->rxopt.bits.rxinfo ||
695 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
696 	     np->rxopt.bits.rxohlim || np->repflow)) {
697 		atomic_inc(&skb->users);
698 		ireq->pktopts = skb;
699 	}
700 }
701 
702 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
703 					  struct flowi *fl,
704 					  const struct request_sock *req,
705 					  bool *strict)
706 {
707 	if (strict)
708 		*strict = true;
709 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
710 }
711 
712 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
713 	.family		=	AF_INET6,
714 	.obj_size	=	sizeof(struct tcp6_request_sock),
715 	.rtx_syn_ack	=	tcp_rtx_synack,
716 	.send_ack	=	tcp_v6_reqsk_send_ack,
717 	.destructor	=	tcp_v6_reqsk_destructor,
718 	.send_reset	=	tcp_v6_send_reset,
719 	.syn_ack_timeout =	tcp_syn_ack_timeout,
720 };
721 
722 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
723 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
724 				sizeof(struct ipv6hdr),
725 #ifdef CONFIG_TCP_MD5SIG
726 	.req_md5_lookup	=	tcp_v6_md5_lookup,
727 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
728 #endif
729 	.init_req	=	tcp_v6_init_req,
730 #ifdef CONFIG_SYN_COOKIES
731 	.cookie_init_seq =	cookie_v6_init_sequence,
732 #endif
733 	.route_req	=	tcp_v6_route_req,
734 	.init_seq	=	tcp_v6_init_sequence,
735 	.send_synack	=	tcp_v6_send_synack,
736 };
737 
738 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
739 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
740 				 int oif, struct tcp_md5sig_key *key, int rst,
741 				 u8 tclass, u32 label)
742 {
743 	const struct tcphdr *th = tcp_hdr(skb);
744 	struct tcphdr *t1;
745 	struct sk_buff *buff;
746 	struct flowi6 fl6;
747 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
748 	struct sock *ctl_sk = net->ipv6.tcp_sk;
749 	unsigned int tot_len = sizeof(struct tcphdr);
750 	struct dst_entry *dst;
751 	__be32 *topt;
752 
753 	if (tsecr)
754 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
755 #ifdef CONFIG_TCP_MD5SIG
756 	if (key)
757 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
758 #endif
759 
760 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
761 			 GFP_ATOMIC);
762 	if (!buff)
763 		return;
764 
765 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
766 
767 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
768 	skb_reset_transport_header(buff);
769 
770 	/* Swap the send and the receive. */
771 	memset(t1, 0, sizeof(*t1));
772 	t1->dest = th->source;
773 	t1->source = th->dest;
774 	t1->doff = tot_len / 4;
775 	t1->seq = htonl(seq);
776 	t1->ack_seq = htonl(ack);
777 	t1->ack = !rst || !th->ack;
778 	t1->rst = rst;
779 	t1->window = htons(win);
780 
781 	topt = (__be32 *)(t1 + 1);
782 
783 	if (tsecr) {
784 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
785 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
786 		*topt++ = htonl(tsval);
787 		*topt++ = htonl(tsecr);
788 	}
789 
790 #ifdef CONFIG_TCP_MD5SIG
791 	if (key) {
792 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
793 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
794 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
795 				    &ipv6_hdr(skb)->saddr,
796 				    &ipv6_hdr(skb)->daddr, t1);
797 	}
798 #endif
799 
800 	memset(&fl6, 0, sizeof(fl6));
801 	fl6.daddr = ipv6_hdr(skb)->saddr;
802 	fl6.saddr = ipv6_hdr(skb)->daddr;
803 	fl6.flowlabel = label;
804 
805 	buff->ip_summed = CHECKSUM_PARTIAL;
806 	buff->csum = 0;
807 
808 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
809 
810 	fl6.flowi6_proto = IPPROTO_TCP;
811 	if (rt6_need_strict(&fl6.daddr) && !oif)
812 		fl6.flowi6_oif = tcp_v6_iif(skb);
813 	else {
814 		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
815 			oif = skb->skb_iif;
816 
817 		fl6.flowi6_oif = oif;
818 	}
819 
820 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
821 	fl6.fl6_dport = t1->dest;
822 	fl6.fl6_sport = t1->source;
823 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
824 
825 	/* Pass a socket to ip6_dst_lookup either it is for RST
826 	 * Underlying function will use this to retrieve the network
827 	 * namespace
828 	 */
829 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
830 	if (!IS_ERR(dst)) {
831 		skb_dst_set(buff, dst);
832 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
833 		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
834 		if (rst)
835 			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
836 		return;
837 	}
838 
839 	kfree_skb(buff);
840 }
841 
842 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
843 {
844 	const struct tcphdr *th = tcp_hdr(skb);
845 	u32 seq = 0, ack_seq = 0;
846 	struct tcp_md5sig_key *key = NULL;
847 #ifdef CONFIG_TCP_MD5SIG
848 	const __u8 *hash_location = NULL;
849 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
850 	unsigned char newhash[16];
851 	int genhash;
852 	struct sock *sk1 = NULL;
853 #endif
854 	int oif;
855 
856 	if (th->rst)
857 		return;
858 
859 	/* If sk not NULL, it means we did a successful lookup and incoming
860 	 * route had to be correct. prequeue might have dropped our dst.
861 	 */
862 	if (!sk && !ipv6_unicast_destination(skb))
863 		return;
864 
865 #ifdef CONFIG_TCP_MD5SIG
866 	rcu_read_lock();
867 	hash_location = tcp_parse_md5sig_option(th);
868 	if (sk && sk_fullsock(sk)) {
869 		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
870 	} else if (hash_location) {
871 		/*
872 		 * active side is lost. Try to find listening socket through
873 		 * source port, and then find md5 key through listening socket.
874 		 * we are not loose security here:
875 		 * Incoming packet is checked with md5 hash with finding key,
876 		 * no RST generated if md5 hash doesn't match.
877 		 */
878 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
879 					   &tcp_hashinfo, NULL, 0,
880 					   &ipv6h->saddr,
881 					   th->source, &ipv6h->daddr,
882 					   ntohs(th->source), tcp_v6_iif(skb));
883 		if (!sk1)
884 			goto out;
885 
886 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
887 		if (!key)
888 			goto out;
889 
890 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
891 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
892 			goto out;
893 	}
894 #endif
895 
896 	if (th->ack)
897 		seq = ntohl(th->ack_seq);
898 	else
899 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
900 			  (th->doff << 2);
901 
902 	oif = sk ? sk->sk_bound_dev_if : 0;
903 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
904 
905 #ifdef CONFIG_TCP_MD5SIG
906 out:
907 	rcu_read_unlock();
908 #endif
909 }
910 
911 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
912 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
913 			    struct tcp_md5sig_key *key, u8 tclass,
914 			    u32 label)
915 {
916 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
917 			     tclass, label);
918 }
919 
920 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
921 {
922 	struct inet_timewait_sock *tw = inet_twsk(sk);
923 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
924 
925 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
926 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
927 			tcp_time_stamp + tcptw->tw_ts_offset,
928 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
929 			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
930 
931 	inet_twsk_put(tw);
932 }
933 
934 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
935 				  struct request_sock *req)
936 {
937 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
938 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
939 	 */
940 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
941 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
942 			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
943 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
944 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
945 			0, 0);
946 }
947 
948 
949 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
950 {
951 #ifdef CONFIG_SYN_COOKIES
952 	const struct tcphdr *th = tcp_hdr(skb);
953 
954 	if (!th->syn)
955 		sk = cookie_v6_check(sk, skb);
956 #endif
957 	return sk;
958 }
959 
960 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
961 {
962 	if (skb->protocol == htons(ETH_P_IP))
963 		return tcp_v4_conn_request(sk, skb);
964 
965 	if (!ipv6_unicast_destination(skb))
966 		goto drop;
967 
968 	return tcp_conn_request(&tcp6_request_sock_ops,
969 				&tcp_request_sock_ipv6_ops, sk, skb);
970 
971 drop:
972 	tcp_listendrop(sk);
973 	return 0; /* don't send reset */
974 }
975 
976 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
977 					 struct request_sock *req,
978 					 struct dst_entry *dst,
979 					 struct request_sock *req_unhash,
980 					 bool *own_req)
981 {
982 	struct inet_request_sock *ireq;
983 	struct ipv6_pinfo *newnp;
984 	const struct ipv6_pinfo *np = inet6_sk(sk);
985 	struct ipv6_txoptions *opt;
986 	struct tcp6_sock *newtcp6sk;
987 	struct inet_sock *newinet;
988 	struct tcp_sock *newtp;
989 	struct sock *newsk;
990 #ifdef CONFIG_TCP_MD5SIG
991 	struct tcp_md5sig_key *key;
992 #endif
993 	struct flowi6 fl6;
994 
995 	if (skb->protocol == htons(ETH_P_IP)) {
996 		/*
997 		 *	v6 mapped
998 		 */
999 
1000 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1001 					     req_unhash, own_req);
1002 
1003 		if (!newsk)
1004 			return NULL;
1005 
1006 		newtcp6sk = (struct tcp6_sock *)newsk;
1007 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1008 
1009 		newinet = inet_sk(newsk);
1010 		newnp = inet6_sk(newsk);
1011 		newtp = tcp_sk(newsk);
1012 
1013 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1014 
1015 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1016 
1017 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1018 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1019 #ifdef CONFIG_TCP_MD5SIG
1020 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1021 #endif
1022 
1023 		newnp->ipv6_ac_list = NULL;
1024 		newnp->ipv6_fl_list = NULL;
1025 		newnp->pktoptions  = NULL;
1026 		newnp->opt	   = NULL;
1027 		newnp->mcast_oif   = tcp_v6_iif(skb);
1028 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1029 		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1030 		if (np->repflow)
1031 			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1032 
1033 		/*
1034 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1035 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1036 		 * that function for the gory details. -acme
1037 		 */
1038 
1039 		/* It is tricky place. Until this moment IPv4 tcp
1040 		   worked with IPv6 icsk.icsk_af_ops.
1041 		   Sync it now.
1042 		 */
1043 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1044 
1045 		return newsk;
1046 	}
1047 
1048 	ireq = inet_rsk(req);
1049 
1050 	if (sk_acceptq_is_full(sk))
1051 		goto out_overflow;
1052 
1053 	if (!dst) {
1054 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1055 		if (!dst)
1056 			goto out;
1057 	}
1058 
1059 	newsk = tcp_create_openreq_child(sk, req, skb);
1060 	if (!newsk)
1061 		goto out_nonewsk;
1062 
1063 	/*
1064 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1065 	 * count here, tcp_create_openreq_child now does this for us, see the
1066 	 * comment in that function for the gory details. -acme
1067 	 */
1068 
1069 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1070 	ip6_dst_store(newsk, dst, NULL, NULL);
1071 	inet6_sk_rx_dst_set(newsk, skb);
1072 
1073 	newtcp6sk = (struct tcp6_sock *)newsk;
1074 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1075 
1076 	newtp = tcp_sk(newsk);
1077 	newinet = inet_sk(newsk);
1078 	newnp = inet6_sk(newsk);
1079 
1080 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1081 
1082 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1083 	newnp->saddr = ireq->ir_v6_loc_addr;
1084 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1085 	newsk->sk_bound_dev_if = ireq->ir_iif;
1086 
1087 	/* Now IPv6 options...
1088 
1089 	   First: no IPv4 options.
1090 	 */
1091 	newinet->inet_opt = NULL;
1092 	newnp->ipv6_ac_list = NULL;
1093 	newnp->ipv6_fl_list = NULL;
1094 
1095 	/* Clone RX bits */
1096 	newnp->rxopt.all = np->rxopt.all;
1097 
1098 	newnp->pktoptions = NULL;
1099 	newnp->opt	  = NULL;
1100 	newnp->mcast_oif  = tcp_v6_iif(skb);
1101 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1102 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1103 	if (np->repflow)
1104 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1105 
1106 	/* Clone native IPv6 options from listening socket (if any)
1107 
1108 	   Yes, keeping reference count would be much more clever,
1109 	   but we make one more one thing there: reattach optmem
1110 	   to newsk.
1111 	 */
1112 	opt = rcu_dereference(np->opt);
1113 	if (opt) {
1114 		opt = ipv6_dup_options(newsk, opt);
1115 		RCU_INIT_POINTER(newnp->opt, opt);
1116 	}
1117 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1118 	if (opt)
1119 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1120 						    opt->opt_flen;
1121 
1122 	tcp_ca_openreq_child(newsk, dst);
1123 
1124 	tcp_sync_mss(newsk, dst_mtu(dst));
1125 	newtp->advmss = dst_metric_advmss(dst);
1126 	if (tcp_sk(sk)->rx_opt.user_mss &&
1127 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1128 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1129 
1130 	tcp_initialize_rcv_mss(newsk);
1131 
1132 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1133 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1134 
1135 #ifdef CONFIG_TCP_MD5SIG
1136 	/* Copy over the MD5 key from the original socket */
1137 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1138 	if (key) {
1139 		/* We're using one, so create a matching key
1140 		 * on the newsk structure. If we fail to get
1141 		 * memory, then we end up not copying the key
1142 		 * across. Shucks.
1143 		 */
1144 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1145 			       AF_INET6, key->key, key->keylen,
1146 			       sk_gfp_mask(sk, GFP_ATOMIC));
1147 	}
1148 #endif
1149 
1150 	if (__inet_inherit_port(sk, newsk) < 0) {
1151 		inet_csk_prepare_forced_close(newsk);
1152 		tcp_done(newsk);
1153 		goto out;
1154 	}
1155 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1156 	if (*own_req) {
1157 		tcp_move_syn(newtp, req);
1158 
1159 		/* Clone pktoptions received with SYN, if we own the req */
1160 		if (ireq->pktopts) {
1161 			newnp->pktoptions = skb_clone(ireq->pktopts,
1162 						      sk_gfp_mask(sk, GFP_ATOMIC));
1163 			consume_skb(ireq->pktopts);
1164 			ireq->pktopts = NULL;
1165 			if (newnp->pktoptions)
1166 				skb_set_owner_r(newnp->pktoptions, newsk);
1167 		}
1168 	}
1169 
1170 	return newsk;
1171 
1172 out_overflow:
1173 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1174 out_nonewsk:
1175 	dst_release(dst);
1176 out:
1177 	tcp_listendrop(sk);
1178 	return NULL;
1179 }
1180 
1181 /* The socket must have it's spinlock held when we get
1182  * here, unless it is a TCP_LISTEN socket.
1183  *
1184  * We have a potential double-lock case here, so even when
1185  * doing backlog processing we use the BH locking scheme.
1186  * This is because we cannot sleep with the original spinlock
1187  * held.
1188  */
1189 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1190 {
1191 	struct ipv6_pinfo *np = inet6_sk(sk);
1192 	struct tcp_sock *tp;
1193 	struct sk_buff *opt_skb = NULL;
1194 
1195 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1196 	   goes to IPv4 receive handler and backlogged.
1197 	   From backlog it always goes here. Kerboom...
1198 	   Fortunately, tcp_rcv_established and rcv_established
1199 	   handle them correctly, but it is not case with
1200 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1201 	 */
1202 
1203 	if (skb->protocol == htons(ETH_P_IP))
1204 		return tcp_v4_do_rcv(sk, skb);
1205 
1206 	if (sk_filter(sk, skb))
1207 		goto discard;
1208 
1209 	/*
1210 	 *	socket locking is here for SMP purposes as backlog rcv
1211 	 *	is currently called with bh processing disabled.
1212 	 */
1213 
1214 	/* Do Stevens' IPV6_PKTOPTIONS.
1215 
1216 	   Yes, guys, it is the only place in our code, where we
1217 	   may make it not affecting IPv4.
1218 	   The rest of code is protocol independent,
1219 	   and I do not like idea to uglify IPv4.
1220 
1221 	   Actually, all the idea behind IPV6_PKTOPTIONS
1222 	   looks not very well thought. For now we latch
1223 	   options, received in the last packet, enqueued
1224 	   by tcp. Feel free to propose better solution.
1225 					       --ANK (980728)
1226 	 */
1227 	if (np->rxopt.all)
1228 		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1229 
1230 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1231 		struct dst_entry *dst = sk->sk_rx_dst;
1232 
1233 		sock_rps_save_rxhash(sk, skb);
1234 		sk_mark_napi_id(sk, skb);
1235 		if (dst) {
1236 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1237 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1238 				dst_release(dst);
1239 				sk->sk_rx_dst = NULL;
1240 			}
1241 		}
1242 
1243 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1244 		if (opt_skb)
1245 			goto ipv6_pktoptions;
1246 		return 0;
1247 	}
1248 
1249 	if (tcp_checksum_complete(skb))
1250 		goto csum_err;
1251 
1252 	if (sk->sk_state == TCP_LISTEN) {
1253 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1254 
1255 		if (!nsk)
1256 			goto discard;
1257 
1258 		if (nsk != sk) {
1259 			sock_rps_save_rxhash(nsk, skb);
1260 			sk_mark_napi_id(nsk, skb);
1261 			if (tcp_child_process(sk, nsk, skb))
1262 				goto reset;
1263 			if (opt_skb)
1264 				__kfree_skb(opt_skb);
1265 			return 0;
1266 		}
1267 	} else
1268 		sock_rps_save_rxhash(sk, skb);
1269 
1270 	if (tcp_rcv_state_process(sk, skb))
1271 		goto reset;
1272 	if (opt_skb)
1273 		goto ipv6_pktoptions;
1274 	return 0;
1275 
1276 reset:
1277 	tcp_v6_send_reset(sk, skb);
1278 discard:
1279 	if (opt_skb)
1280 		__kfree_skb(opt_skb);
1281 	kfree_skb(skb);
1282 	return 0;
1283 csum_err:
1284 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1285 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1286 	goto discard;
1287 
1288 
1289 ipv6_pktoptions:
1290 	/* Do you ask, what is it?
1291 
1292 	   1. skb was enqueued by tcp.
1293 	   2. skb is added to tail of read queue, rather than out of order.
1294 	   3. socket is not in passive state.
1295 	   4. Finally, it really contains options, which user wants to receive.
1296 	 */
1297 	tp = tcp_sk(sk);
1298 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1299 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1300 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1301 			np->mcast_oif = tcp_v6_iif(opt_skb);
1302 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1303 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1304 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1305 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1306 		if (np->repflow)
1307 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1308 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1309 			skb_set_owner_r(opt_skb, sk);
1310 			opt_skb = xchg(&np->pktoptions, opt_skb);
1311 		} else {
1312 			__kfree_skb(opt_skb);
1313 			opt_skb = xchg(&np->pktoptions, NULL);
1314 		}
1315 	}
1316 
1317 	kfree_skb(opt_skb);
1318 	return 0;
1319 }
1320 
1321 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1322 			   const struct tcphdr *th)
1323 {
1324 	/* This is tricky: we move IP6CB at its correct location into
1325 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1326 	 * _decode_session6() uses IP6CB().
1327 	 * barrier() makes sure compiler won't play aliasing games.
1328 	 */
1329 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1330 		sizeof(struct inet6_skb_parm));
1331 	barrier();
1332 
1333 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1334 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1335 				    skb->len - th->doff*4);
1336 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1337 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1338 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1339 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1340 	TCP_SKB_CB(skb)->sacked = 0;
1341 }
1342 
1343 static void tcp_v6_restore_cb(struct sk_buff *skb)
1344 {
1345 	/* We need to move header back to the beginning if xfrm6_policy_check()
1346 	 * and tcp_v6_fill_cb() are going to be called again.
1347 	 */
1348 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1349 		sizeof(struct inet6_skb_parm));
1350 }
1351 
1352 static int tcp_v6_rcv(struct sk_buff *skb)
1353 {
1354 	const struct tcphdr *th;
1355 	const struct ipv6hdr *hdr;
1356 	bool refcounted;
1357 	struct sock *sk;
1358 	int ret;
1359 	struct net *net = dev_net(skb->dev);
1360 
1361 	if (skb->pkt_type != PACKET_HOST)
1362 		goto discard_it;
1363 
1364 	/*
1365 	 *	Count it even if it's bad.
1366 	 */
1367 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1368 
1369 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1370 		goto discard_it;
1371 
1372 	th = (const struct tcphdr *)skb->data;
1373 
1374 	if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1375 		goto bad_packet;
1376 	if (!pskb_may_pull(skb, th->doff*4))
1377 		goto discard_it;
1378 
1379 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1380 		goto csum_error;
1381 
1382 	th = (const struct tcphdr *)skb->data;
1383 	hdr = ipv6_hdr(skb);
1384 
1385 lookup:
1386 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1387 				th->source, th->dest, inet6_iif(skb),
1388 				&refcounted);
1389 	if (!sk)
1390 		goto no_tcp_socket;
1391 
1392 process:
1393 	if (sk->sk_state == TCP_TIME_WAIT)
1394 		goto do_time_wait;
1395 
1396 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1397 		struct request_sock *req = inet_reqsk(sk);
1398 		struct sock *nsk;
1399 
1400 		sk = req->rsk_listener;
1401 		tcp_v6_fill_cb(skb, hdr, th);
1402 		if (tcp_v6_inbound_md5_hash(sk, skb)) {
1403 			reqsk_put(req);
1404 			goto discard_it;
1405 		}
1406 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1407 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1408 			goto lookup;
1409 		}
1410 		sock_hold(sk);
1411 		refcounted = true;
1412 		nsk = tcp_check_req(sk, skb, req, false);
1413 		if (!nsk) {
1414 			reqsk_put(req);
1415 			goto discard_and_relse;
1416 		}
1417 		if (nsk == sk) {
1418 			reqsk_put(req);
1419 			tcp_v6_restore_cb(skb);
1420 		} else if (tcp_child_process(sk, nsk, skb)) {
1421 			tcp_v6_send_reset(nsk, skb);
1422 			goto discard_and_relse;
1423 		} else {
1424 			sock_put(sk);
1425 			return 0;
1426 		}
1427 	}
1428 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1429 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1430 		goto discard_and_relse;
1431 	}
1432 
1433 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1434 		goto discard_and_relse;
1435 
1436 	tcp_v6_fill_cb(skb, hdr, th);
1437 
1438 	if (tcp_v6_inbound_md5_hash(sk, skb))
1439 		goto discard_and_relse;
1440 
1441 	if (sk_filter(sk, skb))
1442 		goto discard_and_relse;
1443 
1444 	skb->dev = NULL;
1445 
1446 	if (sk->sk_state == TCP_LISTEN) {
1447 		ret = tcp_v6_do_rcv(sk, skb);
1448 		goto put_and_return;
1449 	}
1450 
1451 	sk_incoming_cpu_update(sk);
1452 
1453 	bh_lock_sock_nested(sk);
1454 	tcp_segs_in(tcp_sk(sk), skb);
1455 	ret = 0;
1456 	if (!sock_owned_by_user(sk)) {
1457 		if (!tcp_prequeue(sk, skb))
1458 			ret = tcp_v6_do_rcv(sk, skb);
1459 	} else if (unlikely(sk_add_backlog(sk, skb,
1460 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1461 		bh_unlock_sock(sk);
1462 		__NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
1463 		goto discard_and_relse;
1464 	}
1465 	bh_unlock_sock(sk);
1466 
1467 put_and_return:
1468 	if (refcounted)
1469 		sock_put(sk);
1470 	return ret ? -1 : 0;
1471 
1472 no_tcp_socket:
1473 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1474 		goto discard_it;
1475 
1476 	tcp_v6_fill_cb(skb, hdr, th);
1477 
1478 	if (tcp_checksum_complete(skb)) {
1479 csum_error:
1480 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1481 bad_packet:
1482 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1483 	} else {
1484 		tcp_v6_send_reset(NULL, skb);
1485 	}
1486 
1487 discard_it:
1488 	kfree_skb(skb);
1489 	return 0;
1490 
1491 discard_and_relse:
1492 	sk_drops_add(sk, skb);
1493 	if (refcounted)
1494 		sock_put(sk);
1495 	goto discard_it;
1496 
1497 do_time_wait:
1498 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1499 		inet_twsk_put(inet_twsk(sk));
1500 		goto discard_it;
1501 	}
1502 
1503 	tcp_v6_fill_cb(skb, hdr, th);
1504 
1505 	if (tcp_checksum_complete(skb)) {
1506 		inet_twsk_put(inet_twsk(sk));
1507 		goto csum_error;
1508 	}
1509 
1510 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1511 	case TCP_TW_SYN:
1512 	{
1513 		struct sock *sk2;
1514 
1515 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1516 					    skb, __tcp_hdrlen(th),
1517 					    &ipv6_hdr(skb)->saddr, th->source,
1518 					    &ipv6_hdr(skb)->daddr,
1519 					    ntohs(th->dest), tcp_v6_iif(skb));
1520 		if (sk2) {
1521 			struct inet_timewait_sock *tw = inet_twsk(sk);
1522 			inet_twsk_deschedule_put(tw);
1523 			sk = sk2;
1524 			tcp_v6_restore_cb(skb);
1525 			refcounted = false;
1526 			goto process;
1527 		}
1528 		/* Fall through to ACK */
1529 	}
1530 	case TCP_TW_ACK:
1531 		tcp_v6_timewait_ack(sk, skb);
1532 		break;
1533 	case TCP_TW_RST:
1534 		tcp_v6_restore_cb(skb);
1535 		tcp_v6_send_reset(sk, skb);
1536 		inet_twsk_deschedule_put(inet_twsk(sk));
1537 		goto discard_it;
1538 	case TCP_TW_SUCCESS:
1539 		;
1540 	}
1541 	goto discard_it;
1542 }
1543 
1544 static void tcp_v6_early_demux(struct sk_buff *skb)
1545 {
1546 	const struct ipv6hdr *hdr;
1547 	const struct tcphdr *th;
1548 	struct sock *sk;
1549 
1550 	if (skb->pkt_type != PACKET_HOST)
1551 		return;
1552 
1553 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1554 		return;
1555 
1556 	hdr = ipv6_hdr(skb);
1557 	th = tcp_hdr(skb);
1558 
1559 	if (th->doff < sizeof(struct tcphdr) / 4)
1560 		return;
1561 
1562 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1563 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1564 					&hdr->saddr, th->source,
1565 					&hdr->daddr, ntohs(th->dest),
1566 					inet6_iif(skb));
1567 	if (sk) {
1568 		skb->sk = sk;
1569 		skb->destructor = sock_edemux;
1570 		if (sk_fullsock(sk)) {
1571 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1572 
1573 			if (dst)
1574 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1575 			if (dst &&
1576 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1577 				skb_dst_set_noref(skb, dst);
1578 		}
1579 	}
1580 }
1581 
1582 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1583 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1584 	.twsk_unique	= tcp_twsk_unique,
1585 	.twsk_destructor = tcp_twsk_destructor,
1586 };
1587 
1588 static const struct inet_connection_sock_af_ops ipv6_specific = {
1589 	.queue_xmit	   = inet6_csk_xmit,
1590 	.send_check	   = tcp_v6_send_check,
1591 	.rebuild_header	   = inet6_sk_rebuild_header,
1592 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1593 	.conn_request	   = tcp_v6_conn_request,
1594 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1595 	.net_header_len	   = sizeof(struct ipv6hdr),
1596 	.net_frag_header_len = sizeof(struct frag_hdr),
1597 	.setsockopt	   = ipv6_setsockopt,
1598 	.getsockopt	   = ipv6_getsockopt,
1599 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1600 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1601 	.bind_conflict	   = inet6_csk_bind_conflict,
1602 #ifdef CONFIG_COMPAT
1603 	.compat_setsockopt = compat_ipv6_setsockopt,
1604 	.compat_getsockopt = compat_ipv6_getsockopt,
1605 #endif
1606 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1607 };
1608 
1609 #ifdef CONFIG_TCP_MD5SIG
1610 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1611 	.md5_lookup	=	tcp_v6_md5_lookup,
1612 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1613 	.md5_parse	=	tcp_v6_parse_md5_keys,
1614 };
1615 #endif
1616 
1617 /*
1618  *	TCP over IPv4 via INET6 API
1619  */
1620 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1621 	.queue_xmit	   = ip_queue_xmit,
1622 	.send_check	   = tcp_v4_send_check,
1623 	.rebuild_header	   = inet_sk_rebuild_header,
1624 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1625 	.conn_request	   = tcp_v6_conn_request,
1626 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1627 	.net_header_len	   = sizeof(struct iphdr),
1628 	.setsockopt	   = ipv6_setsockopt,
1629 	.getsockopt	   = ipv6_getsockopt,
1630 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1631 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1632 	.bind_conflict	   = inet6_csk_bind_conflict,
1633 #ifdef CONFIG_COMPAT
1634 	.compat_setsockopt = compat_ipv6_setsockopt,
1635 	.compat_getsockopt = compat_ipv6_getsockopt,
1636 #endif
1637 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1638 };
1639 
1640 #ifdef CONFIG_TCP_MD5SIG
1641 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1642 	.md5_lookup	=	tcp_v4_md5_lookup,
1643 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1644 	.md5_parse	=	tcp_v6_parse_md5_keys,
1645 };
1646 #endif
1647 
1648 /* NOTE: A lot of things set to zero explicitly by call to
1649  *       sk_alloc() so need not be done here.
1650  */
1651 static int tcp_v6_init_sock(struct sock *sk)
1652 {
1653 	struct inet_connection_sock *icsk = inet_csk(sk);
1654 
1655 	tcp_init_sock(sk);
1656 
1657 	icsk->icsk_af_ops = &ipv6_specific;
1658 
1659 #ifdef CONFIG_TCP_MD5SIG
1660 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1661 #endif
1662 
1663 	return 0;
1664 }
1665 
1666 static void tcp_v6_destroy_sock(struct sock *sk)
1667 {
1668 	tcp_v4_destroy_sock(sk);
1669 	inet6_destroy_sock(sk);
1670 }
1671 
1672 #ifdef CONFIG_PROC_FS
1673 /* Proc filesystem TCPv6 sock list dumping. */
1674 static void get_openreq6(struct seq_file *seq,
1675 			 const struct request_sock *req, int i)
1676 {
1677 	long ttd = req->rsk_timer.expires - jiffies;
1678 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1679 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1680 
1681 	if (ttd < 0)
1682 		ttd = 0;
1683 
1684 	seq_printf(seq,
1685 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1686 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1687 		   i,
1688 		   src->s6_addr32[0], src->s6_addr32[1],
1689 		   src->s6_addr32[2], src->s6_addr32[3],
1690 		   inet_rsk(req)->ir_num,
1691 		   dest->s6_addr32[0], dest->s6_addr32[1],
1692 		   dest->s6_addr32[2], dest->s6_addr32[3],
1693 		   ntohs(inet_rsk(req)->ir_rmt_port),
1694 		   TCP_SYN_RECV,
1695 		   0, 0, /* could print option size, but that is af dependent. */
1696 		   1,   /* timers active (only the expire timer) */
1697 		   jiffies_to_clock_t(ttd),
1698 		   req->num_timeout,
1699 		   from_kuid_munged(seq_user_ns(seq),
1700 				    sock_i_uid(req->rsk_listener)),
1701 		   0,  /* non standard timer */
1702 		   0, /* open_requests have no inode */
1703 		   0, req);
1704 }
1705 
1706 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1707 {
1708 	const struct in6_addr *dest, *src;
1709 	__u16 destp, srcp;
1710 	int timer_active;
1711 	unsigned long timer_expires;
1712 	const struct inet_sock *inet = inet_sk(sp);
1713 	const struct tcp_sock *tp = tcp_sk(sp);
1714 	const struct inet_connection_sock *icsk = inet_csk(sp);
1715 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1716 	int rx_queue;
1717 	int state;
1718 
1719 	dest  = &sp->sk_v6_daddr;
1720 	src   = &sp->sk_v6_rcv_saddr;
1721 	destp = ntohs(inet->inet_dport);
1722 	srcp  = ntohs(inet->inet_sport);
1723 
1724 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1725 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1726 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1727 		timer_active	= 1;
1728 		timer_expires	= icsk->icsk_timeout;
1729 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1730 		timer_active	= 4;
1731 		timer_expires	= icsk->icsk_timeout;
1732 	} else if (timer_pending(&sp->sk_timer)) {
1733 		timer_active	= 2;
1734 		timer_expires	= sp->sk_timer.expires;
1735 	} else {
1736 		timer_active	= 0;
1737 		timer_expires = jiffies;
1738 	}
1739 
1740 	state = sk_state_load(sp);
1741 	if (state == TCP_LISTEN)
1742 		rx_queue = sp->sk_ack_backlog;
1743 	else
1744 		/* Because we don't lock the socket,
1745 		 * we might find a transient negative value.
1746 		 */
1747 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1748 
1749 	seq_printf(seq,
1750 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1751 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1752 		   i,
1753 		   src->s6_addr32[0], src->s6_addr32[1],
1754 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1755 		   dest->s6_addr32[0], dest->s6_addr32[1],
1756 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1757 		   state,
1758 		   tp->write_seq - tp->snd_una,
1759 		   rx_queue,
1760 		   timer_active,
1761 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1762 		   icsk->icsk_retransmits,
1763 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1764 		   icsk->icsk_probes_out,
1765 		   sock_i_ino(sp),
1766 		   atomic_read(&sp->sk_refcnt), sp,
1767 		   jiffies_to_clock_t(icsk->icsk_rto),
1768 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1769 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1770 		   tp->snd_cwnd,
1771 		   state == TCP_LISTEN ?
1772 			fastopenq->max_qlen :
1773 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1774 		   );
1775 }
1776 
1777 static void get_timewait6_sock(struct seq_file *seq,
1778 			       struct inet_timewait_sock *tw, int i)
1779 {
1780 	long delta = tw->tw_timer.expires - jiffies;
1781 	const struct in6_addr *dest, *src;
1782 	__u16 destp, srcp;
1783 
1784 	dest = &tw->tw_v6_daddr;
1785 	src  = &tw->tw_v6_rcv_saddr;
1786 	destp = ntohs(tw->tw_dport);
1787 	srcp  = ntohs(tw->tw_sport);
1788 
1789 	seq_printf(seq,
1790 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1791 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1792 		   i,
1793 		   src->s6_addr32[0], src->s6_addr32[1],
1794 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1795 		   dest->s6_addr32[0], dest->s6_addr32[1],
1796 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1797 		   tw->tw_substate, 0, 0,
1798 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1799 		   atomic_read(&tw->tw_refcnt), tw);
1800 }
1801 
1802 static int tcp6_seq_show(struct seq_file *seq, void *v)
1803 {
1804 	struct tcp_iter_state *st;
1805 	struct sock *sk = v;
1806 
1807 	if (v == SEQ_START_TOKEN) {
1808 		seq_puts(seq,
1809 			 "  sl  "
1810 			 "local_address                         "
1811 			 "remote_address                        "
1812 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1813 			 "   uid  timeout inode\n");
1814 		goto out;
1815 	}
1816 	st = seq->private;
1817 
1818 	if (sk->sk_state == TCP_TIME_WAIT)
1819 		get_timewait6_sock(seq, v, st->num);
1820 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1821 		get_openreq6(seq, v, st->num);
1822 	else
1823 		get_tcp6_sock(seq, v, st->num);
1824 out:
1825 	return 0;
1826 }
1827 
1828 static const struct file_operations tcp6_afinfo_seq_fops = {
1829 	.owner   = THIS_MODULE,
1830 	.open    = tcp_seq_open,
1831 	.read    = seq_read,
1832 	.llseek  = seq_lseek,
1833 	.release = seq_release_net
1834 };
1835 
1836 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1837 	.name		= "tcp6",
1838 	.family		= AF_INET6,
1839 	.seq_fops	= &tcp6_afinfo_seq_fops,
1840 	.seq_ops	= {
1841 		.show		= tcp6_seq_show,
1842 	},
1843 };
1844 
1845 int __net_init tcp6_proc_init(struct net *net)
1846 {
1847 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1848 }
1849 
1850 void tcp6_proc_exit(struct net *net)
1851 {
1852 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1853 }
1854 #endif
1855 
1856 static void tcp_v6_clear_sk(struct sock *sk, int size)
1857 {
1858 	struct inet_sock *inet = inet_sk(sk);
1859 
1860 	/* we do not want to clear pinet6 field, because of RCU lookups */
1861 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1862 
1863 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1864 	memset(&inet->pinet6 + 1, 0, size);
1865 }
1866 
1867 struct proto tcpv6_prot = {
1868 	.name			= "TCPv6",
1869 	.owner			= THIS_MODULE,
1870 	.close			= tcp_close,
1871 	.connect		= tcp_v6_connect,
1872 	.disconnect		= tcp_disconnect,
1873 	.accept			= inet_csk_accept,
1874 	.ioctl			= tcp_ioctl,
1875 	.init			= tcp_v6_init_sock,
1876 	.destroy		= tcp_v6_destroy_sock,
1877 	.shutdown		= tcp_shutdown,
1878 	.setsockopt		= tcp_setsockopt,
1879 	.getsockopt		= tcp_getsockopt,
1880 	.recvmsg		= tcp_recvmsg,
1881 	.sendmsg		= tcp_sendmsg,
1882 	.sendpage		= tcp_sendpage,
1883 	.backlog_rcv		= tcp_v6_do_rcv,
1884 	.release_cb		= tcp_release_cb,
1885 	.hash			= inet6_hash,
1886 	.unhash			= inet_unhash,
1887 	.get_port		= inet_csk_get_port,
1888 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1889 	.stream_memory_free	= tcp_stream_memory_free,
1890 	.sockets_allocated	= &tcp_sockets_allocated,
1891 	.memory_allocated	= &tcp_memory_allocated,
1892 	.memory_pressure	= &tcp_memory_pressure,
1893 	.orphan_count		= &tcp_orphan_count,
1894 	.sysctl_mem		= sysctl_tcp_mem,
1895 	.sysctl_wmem		= sysctl_tcp_wmem,
1896 	.sysctl_rmem		= sysctl_tcp_rmem,
1897 	.max_header		= MAX_TCP_HEADER,
1898 	.obj_size		= sizeof(struct tcp6_sock),
1899 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1900 	.twsk_prot		= &tcp6_timewait_sock_ops,
1901 	.rsk_prot		= &tcp6_request_sock_ops,
1902 	.h.hashinfo		= &tcp_hashinfo,
1903 	.no_autobind		= true,
1904 #ifdef CONFIG_COMPAT
1905 	.compat_setsockopt	= compat_tcp_setsockopt,
1906 	.compat_getsockopt	= compat_tcp_getsockopt,
1907 #endif
1908 	.clear_sk		= tcp_v6_clear_sk,
1909 	.diag_destroy		= tcp_abort,
1910 };
1911 
1912 static const struct inet6_protocol tcpv6_protocol = {
1913 	.early_demux	=	tcp_v6_early_demux,
1914 	.handler	=	tcp_v6_rcv,
1915 	.err_handler	=	tcp_v6_err,
1916 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1917 };
1918 
1919 static struct inet_protosw tcpv6_protosw = {
1920 	.type		=	SOCK_STREAM,
1921 	.protocol	=	IPPROTO_TCP,
1922 	.prot		=	&tcpv6_prot,
1923 	.ops		=	&inet6_stream_ops,
1924 	.flags		=	INET_PROTOSW_PERMANENT |
1925 				INET_PROTOSW_ICSK,
1926 };
1927 
1928 static int __net_init tcpv6_net_init(struct net *net)
1929 {
1930 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1931 				    SOCK_RAW, IPPROTO_TCP, net);
1932 }
1933 
1934 static void __net_exit tcpv6_net_exit(struct net *net)
1935 {
1936 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1937 }
1938 
1939 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1940 {
1941 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1942 }
1943 
1944 static struct pernet_operations tcpv6_net_ops = {
1945 	.init	    = tcpv6_net_init,
1946 	.exit	    = tcpv6_net_exit,
1947 	.exit_batch = tcpv6_net_exit_batch,
1948 };
1949 
1950 int __init tcpv6_init(void)
1951 {
1952 	int ret;
1953 
1954 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1955 	if (ret)
1956 		goto out;
1957 
1958 	/* register inet6 protocol */
1959 	ret = inet6_register_protosw(&tcpv6_protosw);
1960 	if (ret)
1961 		goto out_tcpv6_protocol;
1962 
1963 	ret = register_pernet_subsys(&tcpv6_net_ops);
1964 	if (ret)
1965 		goto out_tcpv6_protosw;
1966 out:
1967 	return ret;
1968 
1969 out_tcpv6_protosw:
1970 	inet6_unregister_protosw(&tcpv6_protosw);
1971 out_tcpv6_protocol:
1972 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1973 	goto out;
1974 }
1975 
1976 void tcpv6_exit(void)
1977 {
1978 	unregister_pernet_subsys(&tcpv6_net_ops);
1979 	inet6_unregister_protosw(&tcpv6_protosw);
1980 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1981 }
1982