xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision 6a613ac6)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66 
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69 
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72 
73 static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
74 static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
75 				      struct request_sock *req);
76 
77 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
86 						   const struct in6_addr *addr)
87 {
88 	return NULL;
89 }
90 #endif
91 
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 	struct dst_entry *dst = skb_dst(skb);
95 
96 	if (dst) {
97 		const struct rt6_info *rt = (const struct rt6_info *)dst;
98 
99 		dst_hold(dst);
100 		sk->sk_rx_dst = dst;
101 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103 	}
104 }
105 
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
107 {
108 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
109 					    ipv6_hdr(skb)->saddr.s6_addr32,
110 					    tcp_hdr(skb)->dest,
111 					    tcp_hdr(skb)->source);
112 }
113 
114 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
115 			  int addr_len)
116 {
117 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
118 	struct inet_sock *inet = inet_sk(sk);
119 	struct inet_connection_sock *icsk = inet_csk(sk);
120 	struct ipv6_pinfo *np = inet6_sk(sk);
121 	struct tcp_sock *tp = tcp_sk(sk);
122 	struct in6_addr *saddr = NULL, *final_p, final;
123 	struct ipv6_txoptions *opt;
124 	struct flowi6 fl6;
125 	struct dst_entry *dst;
126 	int addr_type;
127 	int err;
128 
129 	if (addr_len < SIN6_LEN_RFC2133)
130 		return -EINVAL;
131 
132 	if (usin->sin6_family != AF_INET6)
133 		return -EAFNOSUPPORT;
134 
135 	memset(&fl6, 0, sizeof(fl6));
136 
137 	if (np->sndflow) {
138 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
139 		IP6_ECN_flow_init(fl6.flowlabel);
140 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
141 			struct ip6_flowlabel *flowlabel;
142 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
143 			if (!flowlabel)
144 				return -EINVAL;
145 			fl6_sock_release(flowlabel);
146 		}
147 	}
148 
149 	/*
150 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
151 	 */
152 
153 	if (ipv6_addr_any(&usin->sin6_addr))
154 		usin->sin6_addr.s6_addr[15] = 0x1;
155 
156 	addr_type = ipv6_addr_type(&usin->sin6_addr);
157 
158 	if (addr_type & IPV6_ADDR_MULTICAST)
159 		return -ENETUNREACH;
160 
161 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
162 		if (addr_len >= sizeof(struct sockaddr_in6) &&
163 		    usin->sin6_scope_id) {
164 			/* If interface is set while binding, indices
165 			 * must coincide.
166 			 */
167 			if (sk->sk_bound_dev_if &&
168 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
169 				return -EINVAL;
170 
171 			sk->sk_bound_dev_if = usin->sin6_scope_id;
172 		}
173 
174 		/* Connect to link-local address requires an interface */
175 		if (!sk->sk_bound_dev_if)
176 			return -EINVAL;
177 	}
178 
179 	if (tp->rx_opt.ts_recent_stamp &&
180 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
181 		tp->rx_opt.ts_recent = 0;
182 		tp->rx_opt.ts_recent_stamp = 0;
183 		tp->write_seq = 0;
184 	}
185 
186 	sk->sk_v6_daddr = usin->sin6_addr;
187 	np->flow_label = fl6.flowlabel;
188 
189 	/*
190 	 *	TCP over IPv4
191 	 */
192 
193 	if (addr_type == IPV6_ADDR_MAPPED) {
194 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
195 		struct sockaddr_in sin;
196 
197 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
198 
199 		if (__ipv6_only_sock(sk))
200 			return -ENETUNREACH;
201 
202 		sin.sin_family = AF_INET;
203 		sin.sin_port = usin->sin6_port;
204 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
205 
206 		icsk->icsk_af_ops = &ipv6_mapped;
207 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
208 #ifdef CONFIG_TCP_MD5SIG
209 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
210 #endif
211 
212 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
213 
214 		if (err) {
215 			icsk->icsk_ext_hdr_len = exthdrlen;
216 			icsk->icsk_af_ops = &ipv6_specific;
217 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
218 #ifdef CONFIG_TCP_MD5SIG
219 			tp->af_specific = &tcp_sock_ipv6_specific;
220 #endif
221 			goto failure;
222 		}
223 		np->saddr = sk->sk_v6_rcv_saddr;
224 
225 		return err;
226 	}
227 
228 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
229 		saddr = &sk->sk_v6_rcv_saddr;
230 
231 	fl6.flowi6_proto = IPPROTO_TCP;
232 	fl6.daddr = sk->sk_v6_daddr;
233 	fl6.saddr = saddr ? *saddr : np->saddr;
234 	fl6.flowi6_oif = sk->sk_bound_dev_if;
235 	fl6.flowi6_mark = sk->sk_mark;
236 	fl6.fl6_dport = usin->sin6_port;
237 	fl6.fl6_sport = inet->inet_sport;
238 
239 	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
240 	final_p = fl6_update_dst(&fl6, opt, &final);
241 
242 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
243 
244 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
245 	if (IS_ERR(dst)) {
246 		err = PTR_ERR(dst);
247 		goto failure;
248 	}
249 
250 	if (!saddr) {
251 		saddr = &fl6.saddr;
252 		sk->sk_v6_rcv_saddr = *saddr;
253 	}
254 
255 	/* set the source address */
256 	np->saddr = *saddr;
257 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
258 
259 	sk->sk_gso_type = SKB_GSO_TCPV6;
260 	ip6_dst_store(sk, dst, NULL, NULL);
261 
262 	if (tcp_death_row.sysctl_tw_recycle &&
263 	    !tp->rx_opt.ts_recent_stamp &&
264 	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
265 		tcp_fetch_timewait_stamp(sk, dst);
266 
267 	icsk->icsk_ext_hdr_len = 0;
268 	if (opt)
269 		icsk->icsk_ext_hdr_len = opt->opt_flen +
270 					 opt->opt_nflen;
271 
272 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
273 
274 	inet->inet_dport = usin->sin6_port;
275 
276 	tcp_set_state(sk, TCP_SYN_SENT);
277 	err = inet6_hash_connect(&tcp_death_row, sk);
278 	if (err)
279 		goto late_failure;
280 
281 	sk_set_txhash(sk);
282 
283 	if (!tp->write_seq && likely(!tp->repair))
284 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
285 							     sk->sk_v6_daddr.s6_addr32,
286 							     inet->inet_sport,
287 							     inet->inet_dport);
288 
289 	err = tcp_connect(sk);
290 	if (err)
291 		goto late_failure;
292 
293 	return 0;
294 
295 late_failure:
296 	tcp_set_state(sk, TCP_CLOSE);
297 	__sk_dst_reset(sk);
298 failure:
299 	inet->inet_dport = 0;
300 	sk->sk_route_caps = 0;
301 	return err;
302 }
303 
304 static void tcp_v6_mtu_reduced(struct sock *sk)
305 {
306 	struct dst_entry *dst;
307 
308 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
309 		return;
310 
311 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
312 	if (!dst)
313 		return;
314 
315 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
316 		tcp_sync_mss(sk, dst_mtu(dst));
317 		tcp_simple_retransmit(sk);
318 	}
319 }
320 
321 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
322 		u8 type, u8 code, int offset, __be32 info)
323 {
324 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
325 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
326 	struct net *net = dev_net(skb->dev);
327 	struct request_sock *fastopen;
328 	struct ipv6_pinfo *np;
329 	struct tcp_sock *tp;
330 	__u32 seq, snd_una;
331 	struct sock *sk;
332 	int err;
333 
334 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
335 					&hdr->daddr, th->dest,
336 					&hdr->saddr, ntohs(th->source),
337 					skb->dev->ifindex);
338 
339 	if (!sk) {
340 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
341 				   ICMP6_MIB_INERRORS);
342 		return;
343 	}
344 
345 	if (sk->sk_state == TCP_TIME_WAIT) {
346 		inet_twsk_put(inet_twsk(sk));
347 		return;
348 	}
349 	seq = ntohl(th->seq);
350 	if (sk->sk_state == TCP_NEW_SYN_RECV)
351 		return tcp_req_err(sk, seq);
352 
353 	bh_lock_sock(sk);
354 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
356 
357 	if (sk->sk_state == TCP_CLOSE)
358 		goto out;
359 
360 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
362 		goto out;
363 	}
364 
365 	tp = tcp_sk(sk);
366 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 	fastopen = tp->fastopen_rsk;
368 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 	if (sk->sk_state != TCP_LISTEN &&
370 	    !between(seq, snd_una, tp->snd_nxt)) {
371 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
372 		goto out;
373 	}
374 
375 	np = inet6_sk(sk);
376 
377 	if (type == NDISC_REDIRECT) {
378 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379 
380 		if (dst)
381 			dst->ops->redirect(dst, sk, skb);
382 		goto out;
383 	}
384 
385 	if (type == ICMPV6_PKT_TOOBIG) {
386 		/* We are not interested in TCP_LISTEN and open_requests
387 		 * (SYN-ACKs send out by Linux are always <576bytes so
388 		 * they should go through unfragmented).
389 		 */
390 		if (sk->sk_state == TCP_LISTEN)
391 			goto out;
392 
393 		if (!ip6_sk_accept_pmtu(sk))
394 			goto out;
395 
396 		tp->mtu_info = ntohl(info);
397 		if (!sock_owned_by_user(sk))
398 			tcp_v6_mtu_reduced(sk);
399 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
400 					   &tp->tsq_flags))
401 			sock_hold(sk);
402 		goto out;
403 	}
404 
405 	icmpv6_err_convert(type, code, &err);
406 
407 	/* Might be for an request_sock */
408 	switch (sk->sk_state) {
409 	case TCP_SYN_SENT:
410 	case TCP_SYN_RECV:
411 		/* Only in fast or simultaneous open. If a fast open socket is
412 		 * is already accepted it is treated as a connected one below.
413 		 */
414 		if (fastopen && !fastopen->sk)
415 			break;
416 
417 		if (!sock_owned_by_user(sk)) {
418 			sk->sk_err = err;
419 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
420 
421 			tcp_done(sk);
422 		} else
423 			sk->sk_err_soft = err;
424 		goto out;
425 	}
426 
427 	if (!sock_owned_by_user(sk) && np->recverr) {
428 		sk->sk_err = err;
429 		sk->sk_error_report(sk);
430 	} else
431 		sk->sk_err_soft = err;
432 
433 out:
434 	bh_unlock_sock(sk);
435 	sock_put(sk);
436 }
437 
438 
439 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
440 			      struct flowi *fl,
441 			      struct request_sock *req,
442 			      struct tcp_fastopen_cookie *foc,
443 			      bool attach_req)
444 {
445 	struct inet_request_sock *ireq = inet_rsk(req);
446 	struct ipv6_pinfo *np = inet6_sk(sk);
447 	struct flowi6 *fl6 = &fl->u.ip6;
448 	struct sk_buff *skb;
449 	int err = -ENOMEM;
450 
451 	/* First, grab a route. */
452 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
453 					       IPPROTO_TCP)) == NULL)
454 		goto done;
455 
456 	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
457 
458 	if (skb) {
459 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 				    &ireq->ir_v6_rmt_addr);
461 
462 		fl6->daddr = ireq->ir_v6_rmt_addr;
463 		if (np->repflow && ireq->pktopts)
464 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
465 
466 		err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
467 			       np->tclass);
468 		err = net_xmit_eval(err);
469 	}
470 
471 done:
472 	return err;
473 }
474 
475 
476 static void tcp_v6_reqsk_destructor(struct request_sock *req)
477 {
478 	kfree_skb(inet_rsk(req)->pktopts);
479 }
480 
481 #ifdef CONFIG_TCP_MD5SIG
482 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
483 						   const struct in6_addr *addr)
484 {
485 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
486 }
487 
488 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
489 						const struct sock *addr_sk)
490 {
491 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
492 }
493 
494 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
495 				 int optlen)
496 {
497 	struct tcp_md5sig cmd;
498 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
499 
500 	if (optlen < sizeof(cmd))
501 		return -EINVAL;
502 
503 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
504 		return -EFAULT;
505 
506 	if (sin6->sin6_family != AF_INET6)
507 		return -EINVAL;
508 
509 	if (!cmd.tcpm_keylen) {
510 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
511 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
512 					      AF_INET);
513 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
514 				      AF_INET6);
515 	}
516 
517 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
518 		return -EINVAL;
519 
520 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
521 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
522 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
523 
524 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
525 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
526 }
527 
528 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
529 					const struct in6_addr *daddr,
530 					const struct in6_addr *saddr, int nbytes)
531 {
532 	struct tcp6_pseudohdr *bp;
533 	struct scatterlist sg;
534 
535 	bp = &hp->md5_blk.ip6;
536 	/* 1. TCP pseudo-header (RFC2460) */
537 	bp->saddr = *saddr;
538 	bp->daddr = *daddr;
539 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
540 	bp->len = cpu_to_be32(nbytes);
541 
542 	sg_init_one(&sg, bp, sizeof(*bp));
543 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
544 }
545 
546 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
547 			       const struct in6_addr *daddr, struct in6_addr *saddr,
548 			       const struct tcphdr *th)
549 {
550 	struct tcp_md5sig_pool *hp;
551 	struct hash_desc *desc;
552 
553 	hp = tcp_get_md5sig_pool();
554 	if (!hp)
555 		goto clear_hash_noput;
556 	desc = &hp->md5_desc;
557 
558 	if (crypto_hash_init(desc))
559 		goto clear_hash;
560 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
561 		goto clear_hash;
562 	if (tcp_md5_hash_header(hp, th))
563 		goto clear_hash;
564 	if (tcp_md5_hash_key(hp, key))
565 		goto clear_hash;
566 	if (crypto_hash_final(desc, md5_hash))
567 		goto clear_hash;
568 
569 	tcp_put_md5sig_pool();
570 	return 0;
571 
572 clear_hash:
573 	tcp_put_md5sig_pool();
574 clear_hash_noput:
575 	memset(md5_hash, 0, 16);
576 	return 1;
577 }
578 
579 static int tcp_v6_md5_hash_skb(char *md5_hash,
580 			       const struct tcp_md5sig_key *key,
581 			       const struct sock *sk,
582 			       const struct sk_buff *skb)
583 {
584 	const struct in6_addr *saddr, *daddr;
585 	struct tcp_md5sig_pool *hp;
586 	struct hash_desc *desc;
587 	const struct tcphdr *th = tcp_hdr(skb);
588 
589 	if (sk) { /* valid for establish/request sockets */
590 		saddr = &sk->sk_v6_rcv_saddr;
591 		daddr = &sk->sk_v6_daddr;
592 	} else {
593 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
594 		saddr = &ip6h->saddr;
595 		daddr = &ip6h->daddr;
596 	}
597 
598 	hp = tcp_get_md5sig_pool();
599 	if (!hp)
600 		goto clear_hash_noput;
601 	desc = &hp->md5_desc;
602 
603 	if (crypto_hash_init(desc))
604 		goto clear_hash;
605 
606 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
607 		goto clear_hash;
608 	if (tcp_md5_hash_header(hp, th))
609 		goto clear_hash;
610 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
611 		goto clear_hash;
612 	if (tcp_md5_hash_key(hp, key))
613 		goto clear_hash;
614 	if (crypto_hash_final(desc, md5_hash))
615 		goto clear_hash;
616 
617 	tcp_put_md5sig_pool();
618 	return 0;
619 
620 clear_hash:
621 	tcp_put_md5sig_pool();
622 clear_hash_noput:
623 	memset(md5_hash, 0, 16);
624 	return 1;
625 }
626 
627 #endif
628 
629 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
630 				    const struct sk_buff *skb)
631 {
632 #ifdef CONFIG_TCP_MD5SIG
633 	const __u8 *hash_location = NULL;
634 	struct tcp_md5sig_key *hash_expected;
635 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
636 	const struct tcphdr *th = tcp_hdr(skb);
637 	int genhash;
638 	u8 newhash[16];
639 
640 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
641 	hash_location = tcp_parse_md5sig_option(th);
642 
643 	/* We've parsed the options - do we have a hash? */
644 	if (!hash_expected && !hash_location)
645 		return false;
646 
647 	if (hash_expected && !hash_location) {
648 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
649 		return true;
650 	}
651 
652 	if (!hash_expected && hash_location) {
653 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
654 		return true;
655 	}
656 
657 	/* check the signature */
658 	genhash = tcp_v6_md5_hash_skb(newhash,
659 				      hash_expected,
660 				      NULL, skb);
661 
662 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
663 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
664 				     genhash ? "failed" : "mismatch",
665 				     &ip6h->saddr, ntohs(th->source),
666 				     &ip6h->daddr, ntohs(th->dest));
667 		return true;
668 	}
669 #endif
670 	return false;
671 }
672 
673 static void tcp_v6_init_req(struct request_sock *req,
674 			    const struct sock *sk_listener,
675 			    struct sk_buff *skb)
676 {
677 	struct inet_request_sock *ireq = inet_rsk(req);
678 	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
679 
680 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
681 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
682 
683 	/* So that link locals have meaning */
684 	if (!sk_listener->sk_bound_dev_if &&
685 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
686 		ireq->ir_iif = tcp_v6_iif(skb);
687 
688 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
689 	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
690 	     np->rxopt.bits.rxinfo ||
691 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
692 	     np->rxopt.bits.rxohlim || np->repflow)) {
693 		atomic_inc(&skb->users);
694 		ireq->pktopts = skb;
695 	}
696 }
697 
698 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
699 					  struct flowi *fl,
700 					  const struct request_sock *req,
701 					  bool *strict)
702 {
703 	if (strict)
704 		*strict = true;
705 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
706 }
707 
708 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
709 	.family		=	AF_INET6,
710 	.obj_size	=	sizeof(struct tcp6_request_sock),
711 	.rtx_syn_ack	=	tcp_rtx_synack,
712 	.send_ack	=	tcp_v6_reqsk_send_ack,
713 	.destructor	=	tcp_v6_reqsk_destructor,
714 	.send_reset	=	tcp_v6_send_reset,
715 	.syn_ack_timeout =	tcp_syn_ack_timeout,
716 };
717 
718 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
719 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
720 				sizeof(struct ipv6hdr),
721 #ifdef CONFIG_TCP_MD5SIG
722 	.req_md5_lookup	=	tcp_v6_md5_lookup,
723 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
724 #endif
725 	.init_req	=	tcp_v6_init_req,
726 #ifdef CONFIG_SYN_COOKIES
727 	.cookie_init_seq =	cookie_v6_init_sequence,
728 #endif
729 	.route_req	=	tcp_v6_route_req,
730 	.init_seq	=	tcp_v6_init_sequence,
731 	.send_synack	=	tcp_v6_send_synack,
732 };
733 
734 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
735 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
736 				 int oif, struct tcp_md5sig_key *key, int rst,
737 				 u8 tclass, u32 label)
738 {
739 	const struct tcphdr *th = tcp_hdr(skb);
740 	struct tcphdr *t1;
741 	struct sk_buff *buff;
742 	struct flowi6 fl6;
743 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
744 	struct sock *ctl_sk = net->ipv6.tcp_sk;
745 	unsigned int tot_len = sizeof(struct tcphdr);
746 	struct dst_entry *dst;
747 	__be32 *topt;
748 
749 	if (tsecr)
750 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
751 #ifdef CONFIG_TCP_MD5SIG
752 	if (key)
753 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
754 #endif
755 
756 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
757 			 GFP_ATOMIC);
758 	if (!buff)
759 		return;
760 
761 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
762 
763 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
764 	skb_reset_transport_header(buff);
765 
766 	/* Swap the send and the receive. */
767 	memset(t1, 0, sizeof(*t1));
768 	t1->dest = th->source;
769 	t1->source = th->dest;
770 	t1->doff = tot_len / 4;
771 	t1->seq = htonl(seq);
772 	t1->ack_seq = htonl(ack);
773 	t1->ack = !rst || !th->ack;
774 	t1->rst = rst;
775 	t1->window = htons(win);
776 
777 	topt = (__be32 *)(t1 + 1);
778 
779 	if (tsecr) {
780 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
781 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
782 		*topt++ = htonl(tsval);
783 		*topt++ = htonl(tsecr);
784 	}
785 
786 #ifdef CONFIG_TCP_MD5SIG
787 	if (key) {
788 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
789 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
790 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
791 				    &ipv6_hdr(skb)->saddr,
792 				    &ipv6_hdr(skb)->daddr, t1);
793 	}
794 #endif
795 
796 	memset(&fl6, 0, sizeof(fl6));
797 	fl6.daddr = ipv6_hdr(skb)->saddr;
798 	fl6.saddr = ipv6_hdr(skb)->daddr;
799 	fl6.flowlabel = label;
800 
801 	buff->ip_summed = CHECKSUM_PARTIAL;
802 	buff->csum = 0;
803 
804 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
805 
806 	fl6.flowi6_proto = IPPROTO_TCP;
807 	if (rt6_need_strict(&fl6.daddr) && !oif)
808 		fl6.flowi6_oif = tcp_v6_iif(skb);
809 	else
810 		fl6.flowi6_oif = oif;
811 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
812 	fl6.fl6_dport = t1->dest;
813 	fl6.fl6_sport = t1->source;
814 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
815 
816 	/* Pass a socket to ip6_dst_lookup either it is for RST
817 	 * Underlying function will use this to retrieve the network
818 	 * namespace
819 	 */
820 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
821 	if (!IS_ERR(dst)) {
822 		skb_dst_set(buff, dst);
823 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
824 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
825 		if (rst)
826 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
827 		return;
828 	}
829 
830 	kfree_skb(buff);
831 }
832 
833 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
834 {
835 	const struct tcphdr *th = tcp_hdr(skb);
836 	u32 seq = 0, ack_seq = 0;
837 	struct tcp_md5sig_key *key = NULL;
838 #ifdef CONFIG_TCP_MD5SIG
839 	const __u8 *hash_location = NULL;
840 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
841 	unsigned char newhash[16];
842 	int genhash;
843 	struct sock *sk1 = NULL;
844 #endif
845 	int oif;
846 
847 	if (th->rst)
848 		return;
849 
850 	/* If sk not NULL, it means we did a successful lookup and incoming
851 	 * route had to be correct. prequeue might have dropped our dst.
852 	 */
853 	if (!sk && !ipv6_unicast_destination(skb))
854 		return;
855 
856 #ifdef CONFIG_TCP_MD5SIG
857 	hash_location = tcp_parse_md5sig_option(th);
858 	if (!sk && hash_location) {
859 		/*
860 		 * active side is lost. Try to find listening socket through
861 		 * source port, and then find md5 key through listening socket.
862 		 * we are not loose security here:
863 		 * Incoming packet is checked with md5 hash with finding key,
864 		 * no RST generated if md5 hash doesn't match.
865 		 */
866 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
867 					   &tcp_hashinfo, &ipv6h->saddr,
868 					   th->source, &ipv6h->daddr,
869 					   ntohs(th->source), tcp_v6_iif(skb));
870 		if (!sk1)
871 			return;
872 
873 		rcu_read_lock();
874 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
875 		if (!key)
876 			goto release_sk1;
877 
878 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
879 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
880 			goto release_sk1;
881 	} else {
882 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
883 	}
884 #endif
885 
886 	if (th->ack)
887 		seq = ntohl(th->ack_seq);
888 	else
889 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
890 			  (th->doff << 2);
891 
892 	oif = sk ? sk->sk_bound_dev_if : 0;
893 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
894 
895 #ifdef CONFIG_TCP_MD5SIG
896 release_sk1:
897 	if (sk1) {
898 		rcu_read_unlock();
899 		sock_put(sk1);
900 	}
901 #endif
902 }
903 
904 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
905 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
906 			    struct tcp_md5sig_key *key, u8 tclass,
907 			    u32 label)
908 {
909 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
910 			     tclass, label);
911 }
912 
913 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
914 {
915 	struct inet_timewait_sock *tw = inet_twsk(sk);
916 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
917 
918 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
919 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
920 			tcp_time_stamp + tcptw->tw_ts_offset,
921 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
922 			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
923 
924 	inet_twsk_put(tw);
925 }
926 
927 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
928 				  struct request_sock *req)
929 {
930 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
931 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
932 	 */
933 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
934 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
935 			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
936 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
937 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
938 			0, 0);
939 }
940 
941 
942 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
943 {
944 #ifdef CONFIG_SYN_COOKIES
945 	const struct tcphdr *th = tcp_hdr(skb);
946 
947 	if (!th->syn)
948 		sk = cookie_v6_check(sk, skb);
949 #endif
950 	return sk;
951 }
952 
953 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
954 {
955 	if (skb->protocol == htons(ETH_P_IP))
956 		return tcp_v4_conn_request(sk, skb);
957 
958 	if (!ipv6_unicast_destination(skb))
959 		goto drop;
960 
961 	return tcp_conn_request(&tcp6_request_sock_ops,
962 				&tcp_request_sock_ipv6_ops, sk, skb);
963 
964 drop:
965 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
966 	return 0; /* don't send reset */
967 }
968 
969 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
970 					 struct request_sock *req,
971 					 struct dst_entry *dst,
972 					 struct request_sock *req_unhash,
973 					 bool *own_req)
974 {
975 	struct inet_request_sock *ireq;
976 	struct ipv6_pinfo *newnp;
977 	const struct ipv6_pinfo *np = inet6_sk(sk);
978 	struct ipv6_txoptions *opt;
979 	struct tcp6_sock *newtcp6sk;
980 	struct inet_sock *newinet;
981 	struct tcp_sock *newtp;
982 	struct sock *newsk;
983 #ifdef CONFIG_TCP_MD5SIG
984 	struct tcp_md5sig_key *key;
985 #endif
986 	struct flowi6 fl6;
987 
988 	if (skb->protocol == htons(ETH_P_IP)) {
989 		/*
990 		 *	v6 mapped
991 		 */
992 
993 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
994 					     req_unhash, own_req);
995 
996 		if (!newsk)
997 			return NULL;
998 
999 		newtcp6sk = (struct tcp6_sock *)newsk;
1000 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1001 
1002 		newinet = inet_sk(newsk);
1003 		newnp = inet6_sk(newsk);
1004 		newtp = tcp_sk(newsk);
1005 
1006 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1007 
1008 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1009 
1010 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1011 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1012 #ifdef CONFIG_TCP_MD5SIG
1013 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1014 #endif
1015 
1016 		newnp->ipv6_ac_list = NULL;
1017 		newnp->ipv6_fl_list = NULL;
1018 		newnp->pktoptions  = NULL;
1019 		newnp->opt	   = NULL;
1020 		newnp->mcast_oif   = tcp_v6_iif(skb);
1021 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1022 		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1023 		if (np->repflow)
1024 			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1025 
1026 		/*
1027 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1028 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1029 		 * that function for the gory details. -acme
1030 		 */
1031 
1032 		/* It is tricky place. Until this moment IPv4 tcp
1033 		   worked with IPv6 icsk.icsk_af_ops.
1034 		   Sync it now.
1035 		 */
1036 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1037 
1038 		return newsk;
1039 	}
1040 
1041 	ireq = inet_rsk(req);
1042 
1043 	if (sk_acceptq_is_full(sk))
1044 		goto out_overflow;
1045 
1046 	if (!dst) {
1047 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1048 		if (!dst)
1049 			goto out;
1050 	}
1051 
1052 	newsk = tcp_create_openreq_child(sk, req, skb);
1053 	if (!newsk)
1054 		goto out_nonewsk;
1055 
1056 	/*
1057 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1058 	 * count here, tcp_create_openreq_child now does this for us, see the
1059 	 * comment in that function for the gory details. -acme
1060 	 */
1061 
1062 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1063 	ip6_dst_store(newsk, dst, NULL, NULL);
1064 	inet6_sk_rx_dst_set(newsk, skb);
1065 
1066 	newtcp6sk = (struct tcp6_sock *)newsk;
1067 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1068 
1069 	newtp = tcp_sk(newsk);
1070 	newinet = inet_sk(newsk);
1071 	newnp = inet6_sk(newsk);
1072 
1073 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1074 
1075 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1076 	newnp->saddr = ireq->ir_v6_loc_addr;
1077 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1078 	newsk->sk_bound_dev_if = ireq->ir_iif;
1079 
1080 	/* Now IPv6 options...
1081 
1082 	   First: no IPv4 options.
1083 	 */
1084 	newinet->inet_opt = NULL;
1085 	newnp->ipv6_ac_list = NULL;
1086 	newnp->ipv6_fl_list = NULL;
1087 
1088 	/* Clone RX bits */
1089 	newnp->rxopt.all = np->rxopt.all;
1090 
1091 	newnp->pktoptions = NULL;
1092 	newnp->opt	  = NULL;
1093 	newnp->mcast_oif  = tcp_v6_iif(skb);
1094 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1095 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1096 	if (np->repflow)
1097 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1098 
1099 	/* Clone native IPv6 options from listening socket (if any)
1100 
1101 	   Yes, keeping reference count would be much more clever,
1102 	   but we make one more one thing there: reattach optmem
1103 	   to newsk.
1104 	 */
1105 	opt = rcu_dereference(np->opt);
1106 	if (opt) {
1107 		opt = ipv6_dup_options(newsk, opt);
1108 		RCU_INIT_POINTER(newnp->opt, opt);
1109 	}
1110 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1111 	if (opt)
1112 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1113 						    opt->opt_flen;
1114 
1115 	tcp_ca_openreq_child(newsk, dst);
1116 
1117 	tcp_sync_mss(newsk, dst_mtu(dst));
1118 	newtp->advmss = dst_metric_advmss(dst);
1119 	if (tcp_sk(sk)->rx_opt.user_mss &&
1120 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1121 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1122 
1123 	tcp_initialize_rcv_mss(newsk);
1124 
1125 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1126 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1127 
1128 #ifdef CONFIG_TCP_MD5SIG
1129 	/* Copy over the MD5 key from the original socket */
1130 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1131 	if (key) {
1132 		/* We're using one, so create a matching key
1133 		 * on the newsk structure. If we fail to get
1134 		 * memory, then we end up not copying the key
1135 		 * across. Shucks.
1136 		 */
1137 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1138 			       AF_INET6, key->key, key->keylen,
1139 			       sk_gfp_atomic(sk, GFP_ATOMIC));
1140 	}
1141 #endif
1142 
1143 	if (__inet_inherit_port(sk, newsk) < 0) {
1144 		inet_csk_prepare_forced_close(newsk);
1145 		tcp_done(newsk);
1146 		goto out;
1147 	}
1148 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1149 	if (*own_req) {
1150 		tcp_move_syn(newtp, req);
1151 
1152 		/* Clone pktoptions received with SYN, if we own the req */
1153 		if (ireq->pktopts) {
1154 			newnp->pktoptions = skb_clone(ireq->pktopts,
1155 						      sk_gfp_atomic(sk, GFP_ATOMIC));
1156 			consume_skb(ireq->pktopts);
1157 			ireq->pktopts = NULL;
1158 			if (newnp->pktoptions)
1159 				skb_set_owner_r(newnp->pktoptions, newsk);
1160 		}
1161 	}
1162 
1163 	return newsk;
1164 
1165 out_overflow:
1166 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1167 out_nonewsk:
1168 	dst_release(dst);
1169 out:
1170 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1171 	return NULL;
1172 }
1173 
1174 /* The socket must have it's spinlock held when we get
1175  * here, unless it is a TCP_LISTEN socket.
1176  *
1177  * We have a potential double-lock case here, so even when
1178  * doing backlog processing we use the BH locking scheme.
1179  * This is because we cannot sleep with the original spinlock
1180  * held.
1181  */
1182 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1183 {
1184 	struct ipv6_pinfo *np = inet6_sk(sk);
1185 	struct tcp_sock *tp;
1186 	struct sk_buff *opt_skb = NULL;
1187 
1188 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1189 	   goes to IPv4 receive handler and backlogged.
1190 	   From backlog it always goes here. Kerboom...
1191 	   Fortunately, tcp_rcv_established and rcv_established
1192 	   handle them correctly, but it is not case with
1193 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1194 	 */
1195 
1196 	if (skb->protocol == htons(ETH_P_IP))
1197 		return tcp_v4_do_rcv(sk, skb);
1198 
1199 	if (sk_filter(sk, skb))
1200 		goto discard;
1201 
1202 	/*
1203 	 *	socket locking is here for SMP purposes as backlog rcv
1204 	 *	is currently called with bh processing disabled.
1205 	 */
1206 
1207 	/* Do Stevens' IPV6_PKTOPTIONS.
1208 
1209 	   Yes, guys, it is the only place in our code, where we
1210 	   may make it not affecting IPv4.
1211 	   The rest of code is protocol independent,
1212 	   and I do not like idea to uglify IPv4.
1213 
1214 	   Actually, all the idea behind IPV6_PKTOPTIONS
1215 	   looks not very well thought. For now we latch
1216 	   options, received in the last packet, enqueued
1217 	   by tcp. Feel free to propose better solution.
1218 					       --ANK (980728)
1219 	 */
1220 	if (np->rxopt.all)
1221 		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1222 
1223 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1224 		struct dst_entry *dst = sk->sk_rx_dst;
1225 
1226 		sock_rps_save_rxhash(sk, skb);
1227 		sk_mark_napi_id(sk, skb);
1228 		if (dst) {
1229 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1230 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1231 				dst_release(dst);
1232 				sk->sk_rx_dst = NULL;
1233 			}
1234 		}
1235 
1236 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1237 		if (opt_skb)
1238 			goto ipv6_pktoptions;
1239 		return 0;
1240 	}
1241 
1242 	if (tcp_checksum_complete(skb))
1243 		goto csum_err;
1244 
1245 	if (sk->sk_state == TCP_LISTEN) {
1246 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1247 
1248 		if (!nsk)
1249 			goto discard;
1250 
1251 		if (nsk != sk) {
1252 			sock_rps_save_rxhash(nsk, skb);
1253 			sk_mark_napi_id(nsk, skb);
1254 			if (tcp_child_process(sk, nsk, skb))
1255 				goto reset;
1256 			if (opt_skb)
1257 				__kfree_skb(opt_skb);
1258 			return 0;
1259 		}
1260 	} else
1261 		sock_rps_save_rxhash(sk, skb);
1262 
1263 	if (tcp_rcv_state_process(sk, skb))
1264 		goto reset;
1265 	if (opt_skb)
1266 		goto ipv6_pktoptions;
1267 	return 0;
1268 
1269 reset:
1270 	tcp_v6_send_reset(sk, skb);
1271 discard:
1272 	if (opt_skb)
1273 		__kfree_skb(opt_skb);
1274 	kfree_skb(skb);
1275 	return 0;
1276 csum_err:
1277 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1278 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1279 	goto discard;
1280 
1281 
1282 ipv6_pktoptions:
1283 	/* Do you ask, what is it?
1284 
1285 	   1. skb was enqueued by tcp.
1286 	   2. skb is added to tail of read queue, rather than out of order.
1287 	   3. socket is not in passive state.
1288 	   4. Finally, it really contains options, which user wants to receive.
1289 	 */
1290 	tp = tcp_sk(sk);
1291 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1292 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1293 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1294 			np->mcast_oif = tcp_v6_iif(opt_skb);
1295 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1296 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1297 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1298 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1299 		if (np->repflow)
1300 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1301 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1302 			skb_set_owner_r(opt_skb, sk);
1303 			opt_skb = xchg(&np->pktoptions, opt_skb);
1304 		} else {
1305 			__kfree_skb(opt_skb);
1306 			opt_skb = xchg(&np->pktoptions, NULL);
1307 		}
1308 	}
1309 
1310 	kfree_skb(opt_skb);
1311 	return 0;
1312 }
1313 
1314 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1315 			   const struct tcphdr *th)
1316 {
1317 	/* This is tricky: we move IP6CB at its correct location into
1318 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1319 	 * _decode_session6() uses IP6CB().
1320 	 * barrier() makes sure compiler won't play aliasing games.
1321 	 */
1322 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1323 		sizeof(struct inet6_skb_parm));
1324 	barrier();
1325 
1326 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1327 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1328 				    skb->len - th->doff*4);
1329 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1330 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1331 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1332 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1333 	TCP_SKB_CB(skb)->sacked = 0;
1334 }
1335 
1336 static void tcp_v6_restore_cb(struct sk_buff *skb)
1337 {
1338 	/* We need to move header back to the beginning if xfrm6_policy_check()
1339 	 * and tcp_v6_fill_cb() are going to be called again.
1340 	 */
1341 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1342 		sizeof(struct inet6_skb_parm));
1343 }
1344 
1345 static int tcp_v6_rcv(struct sk_buff *skb)
1346 {
1347 	const struct tcphdr *th;
1348 	const struct ipv6hdr *hdr;
1349 	struct sock *sk;
1350 	int ret;
1351 	struct net *net = dev_net(skb->dev);
1352 
1353 	if (skb->pkt_type != PACKET_HOST)
1354 		goto discard_it;
1355 
1356 	/*
1357 	 *	Count it even if it's bad.
1358 	 */
1359 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1360 
1361 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1362 		goto discard_it;
1363 
1364 	th = tcp_hdr(skb);
1365 
1366 	if (th->doff < sizeof(struct tcphdr)/4)
1367 		goto bad_packet;
1368 	if (!pskb_may_pull(skb, th->doff*4))
1369 		goto discard_it;
1370 
1371 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1372 		goto csum_error;
1373 
1374 	th = tcp_hdr(skb);
1375 	hdr = ipv6_hdr(skb);
1376 
1377 lookup:
1378 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1379 				inet6_iif(skb));
1380 	if (!sk)
1381 		goto no_tcp_socket;
1382 
1383 process:
1384 	if (sk->sk_state == TCP_TIME_WAIT)
1385 		goto do_time_wait;
1386 
1387 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1388 		struct request_sock *req = inet_reqsk(sk);
1389 		struct sock *nsk = NULL;
1390 
1391 		sk = req->rsk_listener;
1392 		tcp_v6_fill_cb(skb, hdr, th);
1393 		if (tcp_v6_inbound_md5_hash(sk, skb)) {
1394 			reqsk_put(req);
1395 			goto discard_it;
1396 		}
1397 		if (likely(sk->sk_state == TCP_LISTEN)) {
1398 			nsk = tcp_check_req(sk, skb, req, false);
1399 		} else {
1400 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1401 			goto lookup;
1402 		}
1403 		if (!nsk) {
1404 			reqsk_put(req);
1405 			goto discard_it;
1406 		}
1407 		if (nsk == sk) {
1408 			sock_hold(sk);
1409 			reqsk_put(req);
1410 			tcp_v6_restore_cb(skb);
1411 		} else if (tcp_child_process(sk, nsk, skb)) {
1412 			tcp_v6_send_reset(nsk, skb);
1413 			goto discard_it;
1414 		} else {
1415 			return 0;
1416 		}
1417 	}
1418 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1419 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1420 		goto discard_and_relse;
1421 	}
1422 
1423 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1424 		goto discard_and_relse;
1425 
1426 	tcp_v6_fill_cb(skb, hdr, th);
1427 
1428 	if (tcp_v6_inbound_md5_hash(sk, skb))
1429 		goto discard_and_relse;
1430 
1431 	if (sk_filter(sk, skb))
1432 		goto discard_and_relse;
1433 
1434 	skb->dev = NULL;
1435 
1436 	if (sk->sk_state == TCP_LISTEN) {
1437 		ret = tcp_v6_do_rcv(sk, skb);
1438 		goto put_and_return;
1439 	}
1440 
1441 	sk_incoming_cpu_update(sk);
1442 
1443 	bh_lock_sock_nested(sk);
1444 	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1445 	ret = 0;
1446 	if (!sock_owned_by_user(sk)) {
1447 		if (!tcp_prequeue(sk, skb))
1448 			ret = tcp_v6_do_rcv(sk, skb);
1449 	} else if (unlikely(sk_add_backlog(sk, skb,
1450 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1451 		bh_unlock_sock(sk);
1452 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1453 		goto discard_and_relse;
1454 	}
1455 	bh_unlock_sock(sk);
1456 
1457 put_and_return:
1458 	sock_put(sk);
1459 	return ret ? -1 : 0;
1460 
1461 no_tcp_socket:
1462 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1463 		goto discard_it;
1464 
1465 	tcp_v6_fill_cb(skb, hdr, th);
1466 
1467 	if (tcp_checksum_complete(skb)) {
1468 csum_error:
1469 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1470 bad_packet:
1471 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1472 	} else {
1473 		tcp_v6_send_reset(NULL, skb);
1474 	}
1475 
1476 discard_it:
1477 	kfree_skb(skb);
1478 	return 0;
1479 
1480 discard_and_relse:
1481 	sock_put(sk);
1482 	goto discard_it;
1483 
1484 do_time_wait:
1485 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1486 		inet_twsk_put(inet_twsk(sk));
1487 		goto discard_it;
1488 	}
1489 
1490 	tcp_v6_fill_cb(skb, hdr, th);
1491 
1492 	if (tcp_checksum_complete(skb)) {
1493 		inet_twsk_put(inet_twsk(sk));
1494 		goto csum_error;
1495 	}
1496 
1497 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1498 	case TCP_TW_SYN:
1499 	{
1500 		struct sock *sk2;
1501 
1502 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1503 					    &ipv6_hdr(skb)->saddr, th->source,
1504 					    &ipv6_hdr(skb)->daddr,
1505 					    ntohs(th->dest), tcp_v6_iif(skb));
1506 		if (sk2) {
1507 			struct inet_timewait_sock *tw = inet_twsk(sk);
1508 			inet_twsk_deschedule_put(tw);
1509 			sk = sk2;
1510 			tcp_v6_restore_cb(skb);
1511 			goto process;
1512 		}
1513 		/* Fall through to ACK */
1514 	}
1515 	case TCP_TW_ACK:
1516 		tcp_v6_timewait_ack(sk, skb);
1517 		break;
1518 	case TCP_TW_RST:
1519 		tcp_v6_restore_cb(skb);
1520 		goto no_tcp_socket;
1521 	case TCP_TW_SUCCESS:
1522 		;
1523 	}
1524 	goto discard_it;
1525 }
1526 
1527 static void tcp_v6_early_demux(struct sk_buff *skb)
1528 {
1529 	const struct ipv6hdr *hdr;
1530 	const struct tcphdr *th;
1531 	struct sock *sk;
1532 
1533 	if (skb->pkt_type != PACKET_HOST)
1534 		return;
1535 
1536 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1537 		return;
1538 
1539 	hdr = ipv6_hdr(skb);
1540 	th = tcp_hdr(skb);
1541 
1542 	if (th->doff < sizeof(struct tcphdr) / 4)
1543 		return;
1544 
1545 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1546 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1547 					&hdr->saddr, th->source,
1548 					&hdr->daddr, ntohs(th->dest),
1549 					inet6_iif(skb));
1550 	if (sk) {
1551 		skb->sk = sk;
1552 		skb->destructor = sock_edemux;
1553 		if (sk_fullsock(sk)) {
1554 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1555 
1556 			if (dst)
1557 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1558 			if (dst &&
1559 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1560 				skb_dst_set_noref(skb, dst);
1561 		}
1562 	}
1563 }
1564 
1565 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1566 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1567 	.twsk_unique	= tcp_twsk_unique,
1568 	.twsk_destructor = tcp_twsk_destructor,
1569 };
1570 
1571 static const struct inet_connection_sock_af_ops ipv6_specific = {
1572 	.queue_xmit	   = inet6_csk_xmit,
1573 	.send_check	   = tcp_v6_send_check,
1574 	.rebuild_header	   = inet6_sk_rebuild_header,
1575 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1576 	.conn_request	   = tcp_v6_conn_request,
1577 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1578 	.net_header_len	   = sizeof(struct ipv6hdr),
1579 	.net_frag_header_len = sizeof(struct frag_hdr),
1580 	.setsockopt	   = ipv6_setsockopt,
1581 	.getsockopt	   = ipv6_getsockopt,
1582 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1583 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1584 	.bind_conflict	   = inet6_csk_bind_conflict,
1585 #ifdef CONFIG_COMPAT
1586 	.compat_setsockopt = compat_ipv6_setsockopt,
1587 	.compat_getsockopt = compat_ipv6_getsockopt,
1588 #endif
1589 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1590 };
1591 
1592 #ifdef CONFIG_TCP_MD5SIG
1593 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1594 	.md5_lookup	=	tcp_v6_md5_lookup,
1595 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1596 	.md5_parse	=	tcp_v6_parse_md5_keys,
1597 };
1598 #endif
1599 
1600 /*
1601  *	TCP over IPv4 via INET6 API
1602  */
1603 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1604 	.queue_xmit	   = ip_queue_xmit,
1605 	.send_check	   = tcp_v4_send_check,
1606 	.rebuild_header	   = inet_sk_rebuild_header,
1607 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1608 	.conn_request	   = tcp_v6_conn_request,
1609 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1610 	.net_header_len	   = sizeof(struct iphdr),
1611 	.setsockopt	   = ipv6_setsockopt,
1612 	.getsockopt	   = ipv6_getsockopt,
1613 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1614 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1615 	.bind_conflict	   = inet6_csk_bind_conflict,
1616 #ifdef CONFIG_COMPAT
1617 	.compat_setsockopt = compat_ipv6_setsockopt,
1618 	.compat_getsockopt = compat_ipv6_getsockopt,
1619 #endif
1620 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1621 };
1622 
1623 #ifdef CONFIG_TCP_MD5SIG
1624 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1625 	.md5_lookup	=	tcp_v4_md5_lookup,
1626 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1627 	.md5_parse	=	tcp_v6_parse_md5_keys,
1628 };
1629 #endif
1630 
1631 /* NOTE: A lot of things set to zero explicitly by call to
1632  *       sk_alloc() so need not be done here.
1633  */
1634 static int tcp_v6_init_sock(struct sock *sk)
1635 {
1636 	struct inet_connection_sock *icsk = inet_csk(sk);
1637 
1638 	tcp_init_sock(sk);
1639 
1640 	icsk->icsk_af_ops = &ipv6_specific;
1641 
1642 #ifdef CONFIG_TCP_MD5SIG
1643 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1644 #endif
1645 
1646 	return 0;
1647 }
1648 
1649 static void tcp_v6_destroy_sock(struct sock *sk)
1650 {
1651 	tcp_v4_destroy_sock(sk);
1652 	inet6_destroy_sock(sk);
1653 }
1654 
1655 #ifdef CONFIG_PROC_FS
1656 /* Proc filesystem TCPv6 sock list dumping. */
1657 static void get_openreq6(struct seq_file *seq,
1658 			 const struct request_sock *req, int i)
1659 {
1660 	long ttd = req->rsk_timer.expires - jiffies;
1661 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1662 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1663 
1664 	if (ttd < 0)
1665 		ttd = 0;
1666 
1667 	seq_printf(seq,
1668 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1669 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1670 		   i,
1671 		   src->s6_addr32[0], src->s6_addr32[1],
1672 		   src->s6_addr32[2], src->s6_addr32[3],
1673 		   inet_rsk(req)->ir_num,
1674 		   dest->s6_addr32[0], dest->s6_addr32[1],
1675 		   dest->s6_addr32[2], dest->s6_addr32[3],
1676 		   ntohs(inet_rsk(req)->ir_rmt_port),
1677 		   TCP_SYN_RECV,
1678 		   0, 0, /* could print option size, but that is af dependent. */
1679 		   1,   /* timers active (only the expire timer) */
1680 		   jiffies_to_clock_t(ttd),
1681 		   req->num_timeout,
1682 		   from_kuid_munged(seq_user_ns(seq),
1683 				    sock_i_uid(req->rsk_listener)),
1684 		   0,  /* non standard timer */
1685 		   0, /* open_requests have no inode */
1686 		   0, req);
1687 }
1688 
1689 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1690 {
1691 	const struct in6_addr *dest, *src;
1692 	__u16 destp, srcp;
1693 	int timer_active;
1694 	unsigned long timer_expires;
1695 	const struct inet_sock *inet = inet_sk(sp);
1696 	const struct tcp_sock *tp = tcp_sk(sp);
1697 	const struct inet_connection_sock *icsk = inet_csk(sp);
1698 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1699 	int rx_queue;
1700 	int state;
1701 
1702 	dest  = &sp->sk_v6_daddr;
1703 	src   = &sp->sk_v6_rcv_saddr;
1704 	destp = ntohs(inet->inet_dport);
1705 	srcp  = ntohs(inet->inet_sport);
1706 
1707 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1708 		timer_active	= 1;
1709 		timer_expires	= icsk->icsk_timeout;
1710 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1711 		timer_active	= 4;
1712 		timer_expires	= icsk->icsk_timeout;
1713 	} else if (timer_pending(&sp->sk_timer)) {
1714 		timer_active	= 2;
1715 		timer_expires	= sp->sk_timer.expires;
1716 	} else {
1717 		timer_active	= 0;
1718 		timer_expires = jiffies;
1719 	}
1720 
1721 	state = sk_state_load(sp);
1722 	if (state == TCP_LISTEN)
1723 		rx_queue = sp->sk_ack_backlog;
1724 	else
1725 		/* Because we don't lock the socket,
1726 		 * we might find a transient negative value.
1727 		 */
1728 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1729 
1730 	seq_printf(seq,
1731 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1732 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1733 		   i,
1734 		   src->s6_addr32[0], src->s6_addr32[1],
1735 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1736 		   dest->s6_addr32[0], dest->s6_addr32[1],
1737 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1738 		   state,
1739 		   tp->write_seq - tp->snd_una,
1740 		   rx_queue,
1741 		   timer_active,
1742 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1743 		   icsk->icsk_retransmits,
1744 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1745 		   icsk->icsk_probes_out,
1746 		   sock_i_ino(sp),
1747 		   atomic_read(&sp->sk_refcnt), sp,
1748 		   jiffies_to_clock_t(icsk->icsk_rto),
1749 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1750 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1751 		   tp->snd_cwnd,
1752 		   state == TCP_LISTEN ?
1753 			fastopenq->max_qlen :
1754 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1755 		   );
1756 }
1757 
1758 static void get_timewait6_sock(struct seq_file *seq,
1759 			       struct inet_timewait_sock *tw, int i)
1760 {
1761 	long delta = tw->tw_timer.expires - jiffies;
1762 	const struct in6_addr *dest, *src;
1763 	__u16 destp, srcp;
1764 
1765 	dest = &tw->tw_v6_daddr;
1766 	src  = &tw->tw_v6_rcv_saddr;
1767 	destp = ntohs(tw->tw_dport);
1768 	srcp  = ntohs(tw->tw_sport);
1769 
1770 	seq_printf(seq,
1771 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1772 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1773 		   i,
1774 		   src->s6_addr32[0], src->s6_addr32[1],
1775 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1776 		   dest->s6_addr32[0], dest->s6_addr32[1],
1777 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1778 		   tw->tw_substate, 0, 0,
1779 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1780 		   atomic_read(&tw->tw_refcnt), tw);
1781 }
1782 
1783 static int tcp6_seq_show(struct seq_file *seq, void *v)
1784 {
1785 	struct tcp_iter_state *st;
1786 	struct sock *sk = v;
1787 
1788 	if (v == SEQ_START_TOKEN) {
1789 		seq_puts(seq,
1790 			 "  sl  "
1791 			 "local_address                         "
1792 			 "remote_address                        "
1793 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1794 			 "   uid  timeout inode\n");
1795 		goto out;
1796 	}
1797 	st = seq->private;
1798 
1799 	if (sk->sk_state == TCP_TIME_WAIT)
1800 		get_timewait6_sock(seq, v, st->num);
1801 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1802 		get_openreq6(seq, v, st->num);
1803 	else
1804 		get_tcp6_sock(seq, v, st->num);
1805 out:
1806 	return 0;
1807 }
1808 
1809 static const struct file_operations tcp6_afinfo_seq_fops = {
1810 	.owner   = THIS_MODULE,
1811 	.open    = tcp_seq_open,
1812 	.read    = seq_read,
1813 	.llseek  = seq_lseek,
1814 	.release = seq_release_net
1815 };
1816 
1817 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1818 	.name		= "tcp6",
1819 	.family		= AF_INET6,
1820 	.seq_fops	= &tcp6_afinfo_seq_fops,
1821 	.seq_ops	= {
1822 		.show		= tcp6_seq_show,
1823 	},
1824 };
1825 
1826 int __net_init tcp6_proc_init(struct net *net)
1827 {
1828 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1829 }
1830 
1831 void tcp6_proc_exit(struct net *net)
1832 {
1833 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1834 }
1835 #endif
1836 
1837 static void tcp_v6_clear_sk(struct sock *sk, int size)
1838 {
1839 	struct inet_sock *inet = inet_sk(sk);
1840 
1841 	/* we do not want to clear pinet6 field, because of RCU lookups */
1842 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1843 
1844 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1845 	memset(&inet->pinet6 + 1, 0, size);
1846 }
1847 
1848 struct proto tcpv6_prot = {
1849 	.name			= "TCPv6",
1850 	.owner			= THIS_MODULE,
1851 	.close			= tcp_close,
1852 	.connect		= tcp_v6_connect,
1853 	.disconnect		= tcp_disconnect,
1854 	.accept			= inet_csk_accept,
1855 	.ioctl			= tcp_ioctl,
1856 	.init			= tcp_v6_init_sock,
1857 	.destroy		= tcp_v6_destroy_sock,
1858 	.shutdown		= tcp_shutdown,
1859 	.setsockopt		= tcp_setsockopt,
1860 	.getsockopt		= tcp_getsockopt,
1861 	.recvmsg		= tcp_recvmsg,
1862 	.sendmsg		= tcp_sendmsg,
1863 	.sendpage		= tcp_sendpage,
1864 	.backlog_rcv		= tcp_v6_do_rcv,
1865 	.release_cb		= tcp_release_cb,
1866 	.hash			= inet_hash,
1867 	.unhash			= inet_unhash,
1868 	.get_port		= inet_csk_get_port,
1869 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1870 	.stream_memory_free	= tcp_stream_memory_free,
1871 	.sockets_allocated	= &tcp_sockets_allocated,
1872 	.memory_allocated	= &tcp_memory_allocated,
1873 	.memory_pressure	= &tcp_memory_pressure,
1874 	.orphan_count		= &tcp_orphan_count,
1875 	.sysctl_mem		= sysctl_tcp_mem,
1876 	.sysctl_wmem		= sysctl_tcp_wmem,
1877 	.sysctl_rmem		= sysctl_tcp_rmem,
1878 	.max_header		= MAX_TCP_HEADER,
1879 	.obj_size		= sizeof(struct tcp6_sock),
1880 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1881 	.twsk_prot		= &tcp6_timewait_sock_ops,
1882 	.rsk_prot		= &tcp6_request_sock_ops,
1883 	.h.hashinfo		= &tcp_hashinfo,
1884 	.no_autobind		= true,
1885 #ifdef CONFIG_COMPAT
1886 	.compat_setsockopt	= compat_tcp_setsockopt,
1887 	.compat_getsockopt	= compat_tcp_getsockopt,
1888 #endif
1889 #ifdef CONFIG_MEMCG_KMEM
1890 	.proto_cgroup		= tcp_proto_cgroup,
1891 #endif
1892 	.clear_sk		= tcp_v6_clear_sk,
1893 };
1894 
1895 static const struct inet6_protocol tcpv6_protocol = {
1896 	.early_demux	=	tcp_v6_early_demux,
1897 	.handler	=	tcp_v6_rcv,
1898 	.err_handler	=	tcp_v6_err,
1899 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1900 };
1901 
1902 static struct inet_protosw tcpv6_protosw = {
1903 	.type		=	SOCK_STREAM,
1904 	.protocol	=	IPPROTO_TCP,
1905 	.prot		=	&tcpv6_prot,
1906 	.ops		=	&inet6_stream_ops,
1907 	.flags		=	INET_PROTOSW_PERMANENT |
1908 				INET_PROTOSW_ICSK,
1909 };
1910 
1911 static int __net_init tcpv6_net_init(struct net *net)
1912 {
1913 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1914 				    SOCK_RAW, IPPROTO_TCP, net);
1915 }
1916 
1917 static void __net_exit tcpv6_net_exit(struct net *net)
1918 {
1919 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1920 }
1921 
1922 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1923 {
1924 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1925 }
1926 
1927 static struct pernet_operations tcpv6_net_ops = {
1928 	.init	    = tcpv6_net_init,
1929 	.exit	    = tcpv6_net_exit,
1930 	.exit_batch = tcpv6_net_exit_batch,
1931 };
1932 
1933 int __init tcpv6_init(void)
1934 {
1935 	int ret;
1936 
1937 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1938 	if (ret)
1939 		goto out;
1940 
1941 	/* register inet6 protocol */
1942 	ret = inet6_register_protosw(&tcpv6_protosw);
1943 	if (ret)
1944 		goto out_tcpv6_protocol;
1945 
1946 	ret = register_pernet_subsys(&tcpv6_net_ops);
1947 	if (ret)
1948 		goto out_tcpv6_protosw;
1949 out:
1950 	return ret;
1951 
1952 out_tcpv6_protosw:
1953 	inet6_unregister_protosw(&tcpv6_protosw);
1954 out_tcpv6_protocol:
1955 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1956 	goto out;
1957 }
1958 
1959 void tcpv6_exit(void)
1960 {
1961 	unregister_pernet_subsys(&tcpv6_net_ops);
1962 	inet6_unregister_protosw(&tcpv6_protosw);
1963 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1964 }
1965