xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision 6d8e62c3)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66 
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69 
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72 
73 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 				      struct request_sock *req);
76 
77 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 						   const struct in6_addr *addr)
87 {
88 	return NULL;
89 }
90 #endif
91 
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 	struct dst_entry *dst = skb_dst(skb);
95 
96 	if (dst) {
97 		const struct rt6_info *rt = (const struct rt6_info *)dst;
98 
99 		dst_hold(dst);
100 		sk->sk_rx_dst = dst;
101 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 		if (rt->rt6i_node)
103 			inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104 	}
105 }
106 
107 static void tcp_v6_hash(struct sock *sk)
108 {
109 	if (sk->sk_state != TCP_CLOSE) {
110 		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
111 			tcp_prot.hash(sk);
112 			return;
113 		}
114 		local_bh_disable();
115 		__inet6_hash(sk, NULL);
116 		local_bh_enable();
117 	}
118 }
119 
120 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
121 {
122 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123 					    ipv6_hdr(skb)->saddr.s6_addr32,
124 					    tcp_hdr(skb)->dest,
125 					    tcp_hdr(skb)->source);
126 }
127 
128 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 			  int addr_len)
130 {
131 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
132 	struct inet_sock *inet = inet_sk(sk);
133 	struct inet_connection_sock *icsk = inet_csk(sk);
134 	struct ipv6_pinfo *np = inet6_sk(sk);
135 	struct tcp_sock *tp = tcp_sk(sk);
136 	struct in6_addr *saddr = NULL, *final_p, final;
137 	struct rt6_info *rt;
138 	struct flowi6 fl6;
139 	struct dst_entry *dst;
140 	int addr_type;
141 	int err;
142 
143 	if (addr_len < SIN6_LEN_RFC2133)
144 		return -EINVAL;
145 
146 	if (usin->sin6_family != AF_INET6)
147 		return -EAFNOSUPPORT;
148 
149 	memset(&fl6, 0, sizeof(fl6));
150 
151 	if (np->sndflow) {
152 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153 		IP6_ECN_flow_init(fl6.flowlabel);
154 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155 			struct ip6_flowlabel *flowlabel;
156 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 			if (flowlabel == NULL)
158 				return -EINVAL;
159 			fl6_sock_release(flowlabel);
160 		}
161 	}
162 
163 	/*
164 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
165 	 */
166 
167 	if (ipv6_addr_any(&usin->sin6_addr))
168 		usin->sin6_addr.s6_addr[15] = 0x1;
169 
170 	addr_type = ipv6_addr_type(&usin->sin6_addr);
171 
172 	if (addr_type & IPV6_ADDR_MULTICAST)
173 		return -ENETUNREACH;
174 
175 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 		if (addr_len >= sizeof(struct sockaddr_in6) &&
177 		    usin->sin6_scope_id) {
178 			/* If interface is set while binding, indices
179 			 * must coincide.
180 			 */
181 			if (sk->sk_bound_dev_if &&
182 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
183 				return -EINVAL;
184 
185 			sk->sk_bound_dev_if = usin->sin6_scope_id;
186 		}
187 
188 		/* Connect to link-local address requires an interface */
189 		if (!sk->sk_bound_dev_if)
190 			return -EINVAL;
191 	}
192 
193 	if (tp->rx_opt.ts_recent_stamp &&
194 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
195 		tp->rx_opt.ts_recent = 0;
196 		tp->rx_opt.ts_recent_stamp = 0;
197 		tp->write_seq = 0;
198 	}
199 
200 	sk->sk_v6_daddr = usin->sin6_addr;
201 	np->flow_label = fl6.flowlabel;
202 
203 	/*
204 	 *	TCP over IPv4
205 	 */
206 
207 	if (addr_type == IPV6_ADDR_MAPPED) {
208 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
209 		struct sockaddr_in sin;
210 
211 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
212 
213 		if (__ipv6_only_sock(sk))
214 			return -ENETUNREACH;
215 
216 		sin.sin_family = AF_INET;
217 		sin.sin_port = usin->sin6_port;
218 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
219 
220 		icsk->icsk_af_ops = &ipv6_mapped;
221 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
222 #ifdef CONFIG_TCP_MD5SIG
223 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224 #endif
225 
226 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227 
228 		if (err) {
229 			icsk->icsk_ext_hdr_len = exthdrlen;
230 			icsk->icsk_af_ops = &ipv6_specific;
231 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
232 #ifdef CONFIG_TCP_MD5SIG
233 			tp->af_specific = &tcp_sock_ipv6_specific;
234 #endif
235 			goto failure;
236 		} else {
237 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
238 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
239 					       &sk->sk_v6_rcv_saddr);
240 		}
241 
242 		return err;
243 	}
244 
245 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
246 		saddr = &sk->sk_v6_rcv_saddr;
247 
248 	fl6.flowi6_proto = IPPROTO_TCP;
249 	fl6.daddr = sk->sk_v6_daddr;
250 	fl6.saddr = saddr ? *saddr : np->saddr;
251 	fl6.flowi6_oif = sk->sk_bound_dev_if;
252 	fl6.flowi6_mark = sk->sk_mark;
253 	fl6.fl6_dport = usin->sin6_port;
254 	fl6.fl6_sport = inet->inet_sport;
255 
256 	final_p = fl6_update_dst(&fl6, np->opt, &final);
257 
258 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
259 
260 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
261 	if (IS_ERR(dst)) {
262 		err = PTR_ERR(dst);
263 		goto failure;
264 	}
265 
266 	if (saddr == NULL) {
267 		saddr = &fl6.saddr;
268 		sk->sk_v6_rcv_saddr = *saddr;
269 	}
270 
271 	/* set the source address */
272 	np->saddr = *saddr;
273 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
274 
275 	sk->sk_gso_type = SKB_GSO_TCPV6;
276 	__ip6_dst_store(sk, dst, NULL, NULL);
277 
278 	rt = (struct rt6_info *) dst;
279 	if (tcp_death_row.sysctl_tw_recycle &&
280 	    !tp->rx_opt.ts_recent_stamp &&
281 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
282 		tcp_fetch_timewait_stamp(sk, dst);
283 
284 	icsk->icsk_ext_hdr_len = 0;
285 	if (np->opt)
286 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 					  np->opt->opt_nflen);
288 
289 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
290 
291 	inet->inet_dport = usin->sin6_port;
292 
293 	tcp_set_state(sk, TCP_SYN_SENT);
294 	err = inet6_hash_connect(&tcp_death_row, sk);
295 	if (err)
296 		goto late_failure;
297 
298 	ip6_set_txhash(sk);
299 
300 	if (!tp->write_seq && likely(!tp->repair))
301 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
302 							     sk->sk_v6_daddr.s6_addr32,
303 							     inet->inet_sport,
304 							     inet->inet_dport);
305 
306 	err = tcp_connect(sk);
307 	if (err)
308 		goto late_failure;
309 
310 	return 0;
311 
312 late_failure:
313 	tcp_set_state(sk, TCP_CLOSE);
314 	__sk_dst_reset(sk);
315 failure:
316 	inet->inet_dport = 0;
317 	sk->sk_route_caps = 0;
318 	return err;
319 }
320 
321 static void tcp_v6_mtu_reduced(struct sock *sk)
322 {
323 	struct dst_entry *dst;
324 
325 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
326 		return;
327 
328 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
329 	if (!dst)
330 		return;
331 
332 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333 		tcp_sync_mss(sk, dst_mtu(dst));
334 		tcp_simple_retransmit(sk);
335 	}
336 }
337 
338 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
339 		u8 type, u8 code, int offset, __be32 info)
340 {
341 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
342 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
343 	struct ipv6_pinfo *np;
344 	struct sock *sk;
345 	int err;
346 	struct tcp_sock *tp;
347 	struct request_sock *fastopen;
348 	__u32 seq, snd_una;
349 	struct net *net = dev_net(skb->dev);
350 
351 	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
352 			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
353 
354 	if (sk == NULL) {
355 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
356 				   ICMP6_MIB_INERRORS);
357 		return;
358 	}
359 
360 	if (sk->sk_state == TCP_TIME_WAIT) {
361 		inet_twsk_put(inet_twsk(sk));
362 		return;
363 	}
364 
365 	bh_lock_sock(sk);
366 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
367 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
368 
369 	if (sk->sk_state == TCP_CLOSE)
370 		goto out;
371 
372 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
373 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
374 		goto out;
375 	}
376 
377 	tp = tcp_sk(sk);
378 	seq = ntohl(th->seq);
379 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
380 	fastopen = tp->fastopen_rsk;
381 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
382 	if (sk->sk_state != TCP_LISTEN &&
383 	    !between(seq, snd_una, tp->snd_nxt)) {
384 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
385 		goto out;
386 	}
387 
388 	np = inet6_sk(sk);
389 
390 	if (type == NDISC_REDIRECT) {
391 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
392 
393 		if (dst)
394 			dst->ops->redirect(dst, sk, skb);
395 		goto out;
396 	}
397 
398 	if (type == ICMPV6_PKT_TOOBIG) {
399 		/* We are not interested in TCP_LISTEN and open_requests
400 		 * (SYN-ACKs send out by Linux are always <576bytes so
401 		 * they should go through unfragmented).
402 		 */
403 		if (sk->sk_state == TCP_LISTEN)
404 			goto out;
405 
406 		if (!ip6_sk_accept_pmtu(sk))
407 			goto out;
408 
409 		tp->mtu_info = ntohl(info);
410 		if (!sock_owned_by_user(sk))
411 			tcp_v6_mtu_reduced(sk);
412 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
413 					   &tp->tsq_flags))
414 			sock_hold(sk);
415 		goto out;
416 	}
417 
418 	icmpv6_err_convert(type, code, &err);
419 
420 	/* Might be for an request_sock */
421 	switch (sk->sk_state) {
422 		struct request_sock *req, **prev;
423 	case TCP_LISTEN:
424 		if (sock_owned_by_user(sk))
425 			goto out;
426 
427 		/* Note : We use inet6_iif() here, not tcp_v6_iif() */
428 		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
429 					   &hdr->saddr, inet6_iif(skb));
430 		if (!req)
431 			goto out;
432 
433 		/* ICMPs are not backlogged, hence we cannot get
434 		 * an established socket here.
435 		 */
436 		WARN_ON(req->sk != NULL);
437 
438 		if (seq != tcp_rsk(req)->snt_isn) {
439 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
440 			goto out;
441 		}
442 
443 		inet_csk_reqsk_queue_drop(sk, req, prev);
444 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
445 		goto out;
446 
447 	case TCP_SYN_SENT:
448 	case TCP_SYN_RECV:
449 		/* Only in fast or simultaneous open. If a fast open socket is
450 		 * is already accepted it is treated as a connected one below.
451 		 */
452 		if (fastopen && fastopen->sk == NULL)
453 			break;
454 
455 		if (!sock_owned_by_user(sk)) {
456 			sk->sk_err = err;
457 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
458 
459 			tcp_done(sk);
460 		} else
461 			sk->sk_err_soft = err;
462 		goto out;
463 	}
464 
465 	if (!sock_owned_by_user(sk) && np->recverr) {
466 		sk->sk_err = err;
467 		sk->sk_error_report(sk);
468 	} else
469 		sk->sk_err_soft = err;
470 
471 out:
472 	bh_unlock_sock(sk);
473 	sock_put(sk);
474 }
475 
476 
477 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
478 			      struct flowi *fl,
479 			      struct request_sock *req,
480 			      u16 queue_mapping,
481 			      struct tcp_fastopen_cookie *foc)
482 {
483 	struct inet_request_sock *ireq = inet_rsk(req);
484 	struct ipv6_pinfo *np = inet6_sk(sk);
485 	struct flowi6 *fl6 = &fl->u.ip6;
486 	struct sk_buff *skb;
487 	int err = -ENOMEM;
488 
489 	/* First, grab a route. */
490 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
491 		goto done;
492 
493 	skb = tcp_make_synack(sk, dst, req, foc);
494 
495 	if (skb) {
496 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
497 				    &ireq->ir_v6_rmt_addr);
498 
499 		fl6->daddr = ireq->ir_v6_rmt_addr;
500 		if (np->repflow && (ireq->pktopts != NULL))
501 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
502 
503 		skb_set_queue_mapping(skb, queue_mapping);
504 		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
505 		err = net_xmit_eval(err);
506 	}
507 
508 done:
509 	return err;
510 }
511 
512 
513 static void tcp_v6_reqsk_destructor(struct request_sock *req)
514 {
515 	kfree_skb(inet_rsk(req)->pktopts);
516 }
517 
518 #ifdef CONFIG_TCP_MD5SIG
519 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
520 						   const struct in6_addr *addr)
521 {
522 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
523 }
524 
525 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
526 						struct sock *addr_sk)
527 {
528 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
529 }
530 
531 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
532 						      struct request_sock *req)
533 {
534 	return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
535 }
536 
537 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
538 				 int optlen)
539 {
540 	struct tcp_md5sig cmd;
541 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
542 
543 	if (optlen < sizeof(cmd))
544 		return -EINVAL;
545 
546 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
547 		return -EFAULT;
548 
549 	if (sin6->sin6_family != AF_INET6)
550 		return -EINVAL;
551 
552 	if (!cmd.tcpm_keylen) {
553 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
554 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
555 					      AF_INET);
556 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
557 				      AF_INET6);
558 	}
559 
560 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
561 		return -EINVAL;
562 
563 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
564 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
565 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
566 
567 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
568 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
569 }
570 
571 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
572 					const struct in6_addr *daddr,
573 					const struct in6_addr *saddr, int nbytes)
574 {
575 	struct tcp6_pseudohdr *bp;
576 	struct scatterlist sg;
577 
578 	bp = &hp->md5_blk.ip6;
579 	/* 1. TCP pseudo-header (RFC2460) */
580 	bp->saddr = *saddr;
581 	bp->daddr = *daddr;
582 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
583 	bp->len = cpu_to_be32(nbytes);
584 
585 	sg_init_one(&sg, bp, sizeof(*bp));
586 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
587 }
588 
589 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
590 			       const struct in6_addr *daddr, struct in6_addr *saddr,
591 			       const struct tcphdr *th)
592 {
593 	struct tcp_md5sig_pool *hp;
594 	struct hash_desc *desc;
595 
596 	hp = tcp_get_md5sig_pool();
597 	if (!hp)
598 		goto clear_hash_noput;
599 	desc = &hp->md5_desc;
600 
601 	if (crypto_hash_init(desc))
602 		goto clear_hash;
603 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
604 		goto clear_hash;
605 	if (tcp_md5_hash_header(hp, th))
606 		goto clear_hash;
607 	if (tcp_md5_hash_key(hp, key))
608 		goto clear_hash;
609 	if (crypto_hash_final(desc, md5_hash))
610 		goto clear_hash;
611 
612 	tcp_put_md5sig_pool();
613 	return 0;
614 
615 clear_hash:
616 	tcp_put_md5sig_pool();
617 clear_hash_noput:
618 	memset(md5_hash, 0, 16);
619 	return 1;
620 }
621 
622 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
623 			       const struct sock *sk,
624 			       const struct request_sock *req,
625 			       const struct sk_buff *skb)
626 {
627 	const struct in6_addr *saddr, *daddr;
628 	struct tcp_md5sig_pool *hp;
629 	struct hash_desc *desc;
630 	const struct tcphdr *th = tcp_hdr(skb);
631 
632 	if (sk) {
633 		saddr = &inet6_sk(sk)->saddr;
634 		daddr = &sk->sk_v6_daddr;
635 	} else if (req) {
636 		saddr = &inet_rsk(req)->ir_v6_loc_addr;
637 		daddr = &inet_rsk(req)->ir_v6_rmt_addr;
638 	} else {
639 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640 		saddr = &ip6h->saddr;
641 		daddr = &ip6h->daddr;
642 	}
643 
644 	hp = tcp_get_md5sig_pool();
645 	if (!hp)
646 		goto clear_hash_noput;
647 	desc = &hp->md5_desc;
648 
649 	if (crypto_hash_init(desc))
650 		goto clear_hash;
651 
652 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
653 		goto clear_hash;
654 	if (tcp_md5_hash_header(hp, th))
655 		goto clear_hash;
656 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
657 		goto clear_hash;
658 	if (tcp_md5_hash_key(hp, key))
659 		goto clear_hash;
660 	if (crypto_hash_final(desc, md5_hash))
661 		goto clear_hash;
662 
663 	tcp_put_md5sig_pool();
664 	return 0;
665 
666 clear_hash:
667 	tcp_put_md5sig_pool();
668 clear_hash_noput:
669 	memset(md5_hash, 0, 16);
670 	return 1;
671 }
672 
673 static int __tcp_v6_inbound_md5_hash(struct sock *sk,
674 				     const struct sk_buff *skb)
675 {
676 	const __u8 *hash_location = NULL;
677 	struct tcp_md5sig_key *hash_expected;
678 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
679 	const struct tcphdr *th = tcp_hdr(skb);
680 	int genhash;
681 	u8 newhash[16];
682 
683 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
684 	hash_location = tcp_parse_md5sig_option(th);
685 
686 	/* We've parsed the options - do we have a hash? */
687 	if (!hash_expected && !hash_location)
688 		return 0;
689 
690 	if (hash_expected && !hash_location) {
691 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
692 		return 1;
693 	}
694 
695 	if (!hash_expected && hash_location) {
696 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
697 		return 1;
698 	}
699 
700 	/* check the signature */
701 	genhash = tcp_v6_md5_hash_skb(newhash,
702 				      hash_expected,
703 				      NULL, NULL, skb);
704 
705 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
706 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
707 				     genhash ? "failed" : "mismatch",
708 				     &ip6h->saddr, ntohs(th->source),
709 				     &ip6h->daddr, ntohs(th->dest));
710 		return 1;
711 	}
712 	return 0;
713 }
714 
715 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
716 {
717 	int ret;
718 
719 	rcu_read_lock();
720 	ret = __tcp_v6_inbound_md5_hash(sk, skb);
721 	rcu_read_unlock();
722 
723 	return ret;
724 }
725 
726 #endif
727 
728 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
729 			    struct sk_buff *skb)
730 {
731 	struct inet_request_sock *ireq = inet_rsk(req);
732 	struct ipv6_pinfo *np = inet6_sk(sk);
733 
734 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
735 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
736 
737 	ireq->ir_iif = sk->sk_bound_dev_if;
738 
739 	/* So that link locals have meaning */
740 	if (!sk->sk_bound_dev_if &&
741 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
742 		ireq->ir_iif = tcp_v6_iif(skb);
743 
744 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
745 	    (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
746 	     np->rxopt.bits.rxinfo ||
747 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
748 	     np->rxopt.bits.rxohlim || np->repflow)) {
749 		atomic_inc(&skb->users);
750 		ireq->pktopts = skb;
751 	}
752 }
753 
754 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
755 					  const struct request_sock *req,
756 					  bool *strict)
757 {
758 	if (strict)
759 		*strict = true;
760 	return inet6_csk_route_req(sk, &fl->u.ip6, req);
761 }
762 
763 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
764 	.family		=	AF_INET6,
765 	.obj_size	=	sizeof(struct tcp6_request_sock),
766 	.rtx_syn_ack	=	tcp_rtx_synack,
767 	.send_ack	=	tcp_v6_reqsk_send_ack,
768 	.destructor	=	tcp_v6_reqsk_destructor,
769 	.send_reset	=	tcp_v6_send_reset,
770 	.syn_ack_timeout =	tcp_syn_ack_timeout,
771 };
772 
773 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
774 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
775 				sizeof(struct ipv6hdr),
776 #ifdef CONFIG_TCP_MD5SIG
777 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
778 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
779 #endif
780 	.init_req	=	tcp_v6_init_req,
781 #ifdef CONFIG_SYN_COOKIES
782 	.cookie_init_seq =	cookie_v6_init_sequence,
783 #endif
784 	.route_req	=	tcp_v6_route_req,
785 	.init_seq	=	tcp_v6_init_sequence,
786 	.send_synack	=	tcp_v6_send_synack,
787 	.queue_hash_add =	inet6_csk_reqsk_queue_hash_add,
788 };
789 
790 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
791 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
792 				 int oif, struct tcp_md5sig_key *key, int rst,
793 				 u8 tclass, u32 label)
794 {
795 	const struct tcphdr *th = tcp_hdr(skb);
796 	struct tcphdr *t1;
797 	struct sk_buff *buff;
798 	struct flowi6 fl6;
799 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
800 	struct sock *ctl_sk = net->ipv6.tcp_sk;
801 	unsigned int tot_len = sizeof(struct tcphdr);
802 	struct dst_entry *dst;
803 	__be32 *topt;
804 
805 	if (tsecr)
806 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
807 #ifdef CONFIG_TCP_MD5SIG
808 	if (key)
809 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
810 #endif
811 
812 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
813 			 GFP_ATOMIC);
814 	if (buff == NULL)
815 		return;
816 
817 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
818 
819 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
820 	skb_reset_transport_header(buff);
821 
822 	/* Swap the send and the receive. */
823 	memset(t1, 0, sizeof(*t1));
824 	t1->dest = th->source;
825 	t1->source = th->dest;
826 	t1->doff = tot_len / 4;
827 	t1->seq = htonl(seq);
828 	t1->ack_seq = htonl(ack);
829 	t1->ack = !rst || !th->ack;
830 	t1->rst = rst;
831 	t1->window = htons(win);
832 
833 	topt = (__be32 *)(t1 + 1);
834 
835 	if (tsecr) {
836 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
837 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
838 		*topt++ = htonl(tsval);
839 		*topt++ = htonl(tsecr);
840 	}
841 
842 #ifdef CONFIG_TCP_MD5SIG
843 	if (key) {
844 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
845 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
846 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
847 				    &ipv6_hdr(skb)->saddr,
848 				    &ipv6_hdr(skb)->daddr, t1);
849 	}
850 #endif
851 
852 	memset(&fl6, 0, sizeof(fl6));
853 	fl6.daddr = ipv6_hdr(skb)->saddr;
854 	fl6.saddr = ipv6_hdr(skb)->daddr;
855 	fl6.flowlabel = label;
856 
857 	buff->ip_summed = CHECKSUM_PARTIAL;
858 	buff->csum = 0;
859 
860 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
861 
862 	fl6.flowi6_proto = IPPROTO_TCP;
863 	if (rt6_need_strict(&fl6.daddr) && !oif)
864 		fl6.flowi6_oif = tcp_v6_iif(skb);
865 	else
866 		fl6.flowi6_oif = oif;
867 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
868 	fl6.fl6_dport = t1->dest;
869 	fl6.fl6_sport = t1->source;
870 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
871 
872 	/* Pass a socket to ip6_dst_lookup either it is for RST
873 	 * Underlying function will use this to retrieve the network
874 	 * namespace
875 	 */
876 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
877 	if (!IS_ERR(dst)) {
878 		skb_dst_set(buff, dst);
879 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
880 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
881 		if (rst)
882 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
883 		return;
884 	}
885 
886 	kfree_skb(buff);
887 }
888 
889 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
890 {
891 	const struct tcphdr *th = tcp_hdr(skb);
892 	u32 seq = 0, ack_seq = 0;
893 	struct tcp_md5sig_key *key = NULL;
894 #ifdef CONFIG_TCP_MD5SIG
895 	const __u8 *hash_location = NULL;
896 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
897 	unsigned char newhash[16];
898 	int genhash;
899 	struct sock *sk1 = NULL;
900 #endif
901 	int oif;
902 
903 	if (th->rst)
904 		return;
905 
906 	/* If sk not NULL, it means we did a successful lookup and incoming
907 	 * route had to be correct. prequeue might have dropped our dst.
908 	 */
909 	if (!sk && !ipv6_unicast_destination(skb))
910 		return;
911 
912 #ifdef CONFIG_TCP_MD5SIG
913 	hash_location = tcp_parse_md5sig_option(th);
914 	if (!sk && hash_location) {
915 		/*
916 		 * active side is lost. Try to find listening socket through
917 		 * source port, and then find md5 key through listening socket.
918 		 * we are not loose security here:
919 		 * Incoming packet is checked with md5 hash with finding key,
920 		 * no RST generated if md5 hash doesn't match.
921 		 */
922 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
923 					   &tcp_hashinfo, &ipv6h->saddr,
924 					   th->source, &ipv6h->daddr,
925 					   ntohs(th->source), tcp_v6_iif(skb));
926 		if (!sk1)
927 			return;
928 
929 		rcu_read_lock();
930 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
931 		if (!key)
932 			goto release_sk1;
933 
934 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
935 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
936 			goto release_sk1;
937 	} else {
938 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
939 	}
940 #endif
941 
942 	if (th->ack)
943 		seq = ntohl(th->ack_seq);
944 	else
945 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
946 			  (th->doff << 2);
947 
948 	oif = sk ? sk->sk_bound_dev_if : 0;
949 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
950 
951 #ifdef CONFIG_TCP_MD5SIG
952 release_sk1:
953 	if (sk1) {
954 		rcu_read_unlock();
955 		sock_put(sk1);
956 	}
957 #endif
958 }
959 
960 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
961 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
962 			    struct tcp_md5sig_key *key, u8 tclass,
963 			    u32 label)
964 {
965 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
966 			     tclass, label);
967 }
968 
969 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
970 {
971 	struct inet_timewait_sock *tw = inet_twsk(sk);
972 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
973 
974 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
975 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
976 			tcp_time_stamp + tcptw->tw_ts_offset,
977 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
978 			tw->tw_tclass, (tw->tw_flowlabel << 12));
979 
980 	inet_twsk_put(tw);
981 }
982 
983 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
984 				  struct request_sock *req)
985 {
986 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
987 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
988 	 */
989 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
990 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
991 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
992 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
993 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
994 			0, 0);
995 }
996 
997 
998 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
999 {
1000 	struct request_sock *req, **prev;
1001 	const struct tcphdr *th = tcp_hdr(skb);
1002 	struct sock *nsk;
1003 
1004 	/* Find possible connection requests. */
1005 	req = inet6_csk_search_req(sk, &prev, th->source,
1006 				   &ipv6_hdr(skb)->saddr,
1007 				   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
1008 	if (req)
1009 		return tcp_check_req(sk, skb, req, prev, false);
1010 
1011 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1012 					 &ipv6_hdr(skb)->saddr, th->source,
1013 					 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1014 					 tcp_v6_iif(skb));
1015 
1016 	if (nsk) {
1017 		if (nsk->sk_state != TCP_TIME_WAIT) {
1018 			bh_lock_sock(nsk);
1019 			return nsk;
1020 		}
1021 		inet_twsk_put(inet_twsk(nsk));
1022 		return NULL;
1023 	}
1024 
1025 #ifdef CONFIG_SYN_COOKIES
1026 	if (!th->syn)
1027 		sk = cookie_v6_check(sk, skb);
1028 #endif
1029 	return sk;
1030 }
1031 
1032 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1033 {
1034 	if (skb->protocol == htons(ETH_P_IP))
1035 		return tcp_v4_conn_request(sk, skb);
1036 
1037 	if (!ipv6_unicast_destination(skb))
1038 		goto drop;
1039 
1040 	return tcp_conn_request(&tcp6_request_sock_ops,
1041 				&tcp_request_sock_ipv6_ops, sk, skb);
1042 
1043 drop:
1044 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1045 	return 0; /* don't send reset */
1046 }
1047 
1048 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1049 					 struct request_sock *req,
1050 					 struct dst_entry *dst)
1051 {
1052 	struct inet_request_sock *ireq;
1053 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1054 	struct tcp6_sock *newtcp6sk;
1055 	struct inet_sock *newinet;
1056 	struct tcp_sock *newtp;
1057 	struct sock *newsk;
1058 #ifdef CONFIG_TCP_MD5SIG
1059 	struct tcp_md5sig_key *key;
1060 #endif
1061 	struct flowi6 fl6;
1062 
1063 	if (skb->protocol == htons(ETH_P_IP)) {
1064 		/*
1065 		 *	v6 mapped
1066 		 */
1067 
1068 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1069 
1070 		if (newsk == NULL)
1071 			return NULL;
1072 
1073 		newtcp6sk = (struct tcp6_sock *)newsk;
1074 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1075 
1076 		newinet = inet_sk(newsk);
1077 		newnp = inet6_sk(newsk);
1078 		newtp = tcp_sk(newsk);
1079 
1080 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1081 
1082 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1083 
1084 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1085 
1086 		newsk->sk_v6_rcv_saddr = newnp->saddr;
1087 
1088 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1089 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1090 #ifdef CONFIG_TCP_MD5SIG
1091 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1092 #endif
1093 
1094 		newnp->ipv6_ac_list = NULL;
1095 		newnp->ipv6_fl_list = NULL;
1096 		newnp->pktoptions  = NULL;
1097 		newnp->opt	   = NULL;
1098 		newnp->mcast_oif   = tcp_v6_iif(skb);
1099 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1100 		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1101 		if (np->repflow)
1102 			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1103 
1104 		/*
1105 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1106 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1107 		 * that function for the gory details. -acme
1108 		 */
1109 
1110 		/* It is tricky place. Until this moment IPv4 tcp
1111 		   worked with IPv6 icsk.icsk_af_ops.
1112 		   Sync it now.
1113 		 */
1114 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1115 
1116 		return newsk;
1117 	}
1118 
1119 	ireq = inet_rsk(req);
1120 
1121 	if (sk_acceptq_is_full(sk))
1122 		goto out_overflow;
1123 
1124 	if (!dst) {
1125 		dst = inet6_csk_route_req(sk, &fl6, req);
1126 		if (!dst)
1127 			goto out;
1128 	}
1129 
1130 	newsk = tcp_create_openreq_child(sk, req, skb);
1131 	if (newsk == NULL)
1132 		goto out_nonewsk;
1133 
1134 	/*
1135 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1136 	 * count here, tcp_create_openreq_child now does this for us, see the
1137 	 * comment in that function for the gory details. -acme
1138 	 */
1139 
1140 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1141 	__ip6_dst_store(newsk, dst, NULL, NULL);
1142 	inet6_sk_rx_dst_set(newsk, skb);
1143 
1144 	newtcp6sk = (struct tcp6_sock *)newsk;
1145 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1146 
1147 	newtp = tcp_sk(newsk);
1148 	newinet = inet_sk(newsk);
1149 	newnp = inet6_sk(newsk);
1150 
1151 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1152 
1153 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1154 	newnp->saddr = ireq->ir_v6_loc_addr;
1155 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1156 	newsk->sk_bound_dev_if = ireq->ir_iif;
1157 
1158 	ip6_set_txhash(newsk);
1159 
1160 	/* Now IPv6 options...
1161 
1162 	   First: no IPv4 options.
1163 	 */
1164 	newinet->inet_opt = NULL;
1165 	newnp->ipv6_ac_list = NULL;
1166 	newnp->ipv6_fl_list = NULL;
1167 
1168 	/* Clone RX bits */
1169 	newnp->rxopt.all = np->rxopt.all;
1170 
1171 	/* Clone pktoptions received with SYN */
1172 	newnp->pktoptions = NULL;
1173 	if (ireq->pktopts != NULL) {
1174 		newnp->pktoptions = skb_clone(ireq->pktopts,
1175 					      sk_gfp_atomic(sk, GFP_ATOMIC));
1176 		consume_skb(ireq->pktopts);
1177 		ireq->pktopts = NULL;
1178 		if (newnp->pktoptions)
1179 			skb_set_owner_r(newnp->pktoptions, newsk);
1180 	}
1181 	newnp->opt	  = NULL;
1182 	newnp->mcast_oif  = tcp_v6_iif(skb);
1183 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1184 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1185 	if (np->repflow)
1186 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1187 
1188 	/* Clone native IPv6 options from listening socket (if any)
1189 
1190 	   Yes, keeping reference count would be much more clever,
1191 	   but we make one more one thing there: reattach optmem
1192 	   to newsk.
1193 	 */
1194 	if (np->opt)
1195 		newnp->opt = ipv6_dup_options(newsk, np->opt);
1196 
1197 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1198 	if (newnp->opt)
1199 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1200 						     newnp->opt->opt_flen);
1201 
1202 	tcp_sync_mss(newsk, dst_mtu(dst));
1203 	newtp->advmss = dst_metric_advmss(dst);
1204 	if (tcp_sk(sk)->rx_opt.user_mss &&
1205 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1206 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1207 
1208 	tcp_initialize_rcv_mss(newsk);
1209 
1210 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1211 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1212 
1213 #ifdef CONFIG_TCP_MD5SIG
1214 	/* Copy over the MD5 key from the original socket */
1215 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1216 	if (key != NULL) {
1217 		/* We're using one, so create a matching key
1218 		 * on the newsk structure. If we fail to get
1219 		 * memory, then we end up not copying the key
1220 		 * across. Shucks.
1221 		 */
1222 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1223 			       AF_INET6, key->key, key->keylen,
1224 			       sk_gfp_atomic(sk, GFP_ATOMIC));
1225 	}
1226 #endif
1227 
1228 	if (__inet_inherit_port(sk, newsk) < 0) {
1229 		inet_csk_prepare_forced_close(newsk);
1230 		tcp_done(newsk);
1231 		goto out;
1232 	}
1233 	__inet6_hash(newsk, NULL);
1234 
1235 	return newsk;
1236 
1237 out_overflow:
1238 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1239 out_nonewsk:
1240 	dst_release(dst);
1241 out:
1242 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1243 	return NULL;
1244 }
1245 
1246 /* The socket must have it's spinlock held when we get
1247  * here.
1248  *
1249  * We have a potential double-lock case here, so even when
1250  * doing backlog processing we use the BH locking scheme.
1251  * This is because we cannot sleep with the original spinlock
1252  * held.
1253  */
1254 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1255 {
1256 	struct ipv6_pinfo *np = inet6_sk(sk);
1257 	struct tcp_sock *tp;
1258 	struct sk_buff *opt_skb = NULL;
1259 
1260 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1261 	   goes to IPv4 receive handler and backlogged.
1262 	   From backlog it always goes here. Kerboom...
1263 	   Fortunately, tcp_rcv_established and rcv_established
1264 	   handle them correctly, but it is not case with
1265 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1266 	 */
1267 
1268 	if (skb->protocol == htons(ETH_P_IP))
1269 		return tcp_v4_do_rcv(sk, skb);
1270 
1271 	if (sk_filter(sk, skb))
1272 		goto discard;
1273 
1274 	/*
1275 	 *	socket locking is here for SMP purposes as backlog rcv
1276 	 *	is currently called with bh processing disabled.
1277 	 */
1278 
1279 	/* Do Stevens' IPV6_PKTOPTIONS.
1280 
1281 	   Yes, guys, it is the only place in our code, where we
1282 	   may make it not affecting IPv4.
1283 	   The rest of code is protocol independent,
1284 	   and I do not like idea to uglify IPv4.
1285 
1286 	   Actually, all the idea behind IPV6_PKTOPTIONS
1287 	   looks not very well thought. For now we latch
1288 	   options, received in the last packet, enqueued
1289 	   by tcp. Feel free to propose better solution.
1290 					       --ANK (980728)
1291 	 */
1292 	if (np->rxopt.all)
1293 		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1294 
1295 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1296 		struct dst_entry *dst = sk->sk_rx_dst;
1297 
1298 		sock_rps_save_rxhash(sk, skb);
1299 		sk_mark_napi_id(sk, skb);
1300 		if (dst) {
1301 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1302 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1303 				dst_release(dst);
1304 				sk->sk_rx_dst = NULL;
1305 			}
1306 		}
1307 
1308 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1309 		if (opt_skb)
1310 			goto ipv6_pktoptions;
1311 		return 0;
1312 	}
1313 
1314 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1315 		goto csum_err;
1316 
1317 	if (sk->sk_state == TCP_LISTEN) {
1318 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1319 		if (!nsk)
1320 			goto discard;
1321 
1322 		/*
1323 		 * Queue it on the new socket if the new socket is active,
1324 		 * otherwise we just shortcircuit this and continue with
1325 		 * the new socket..
1326 		 */
1327 		if (nsk != sk) {
1328 			sock_rps_save_rxhash(nsk, skb);
1329 			sk_mark_napi_id(sk, skb);
1330 			if (tcp_child_process(sk, nsk, skb))
1331 				goto reset;
1332 			if (opt_skb)
1333 				__kfree_skb(opt_skb);
1334 			return 0;
1335 		}
1336 	} else
1337 		sock_rps_save_rxhash(sk, skb);
1338 
1339 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1340 		goto reset;
1341 	if (opt_skb)
1342 		goto ipv6_pktoptions;
1343 	return 0;
1344 
1345 reset:
1346 	tcp_v6_send_reset(sk, skb);
1347 discard:
1348 	if (opt_skb)
1349 		__kfree_skb(opt_skb);
1350 	kfree_skb(skb);
1351 	return 0;
1352 csum_err:
1353 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1354 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1355 	goto discard;
1356 
1357 
1358 ipv6_pktoptions:
1359 	/* Do you ask, what is it?
1360 
1361 	   1. skb was enqueued by tcp.
1362 	   2. skb is added to tail of read queue, rather than out of order.
1363 	   3. socket is not in passive state.
1364 	   4. Finally, it really contains options, which user wants to receive.
1365 	 */
1366 	tp = tcp_sk(sk);
1367 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1368 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1369 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1370 			np->mcast_oif = tcp_v6_iif(opt_skb);
1371 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1372 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1373 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1374 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1375 		if (np->repflow)
1376 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1377 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1378 			skb_set_owner_r(opt_skb, sk);
1379 			opt_skb = xchg(&np->pktoptions, opt_skb);
1380 		} else {
1381 			__kfree_skb(opt_skb);
1382 			opt_skb = xchg(&np->pktoptions, NULL);
1383 		}
1384 	}
1385 
1386 	kfree_skb(opt_skb);
1387 	return 0;
1388 }
1389 
1390 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1391 			   const struct tcphdr *th)
1392 {
1393 	/* This is tricky: we move IP6CB at its correct location into
1394 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1395 	 * _decode_session6() uses IP6CB().
1396 	 * barrier() makes sure compiler won't play aliasing games.
1397 	 */
1398 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1399 		sizeof(struct inet6_skb_parm));
1400 	barrier();
1401 
1402 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1403 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1404 				    skb->len - th->doff*4);
1405 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1406 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1407 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1408 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1409 	TCP_SKB_CB(skb)->sacked = 0;
1410 }
1411 
1412 static int tcp_v6_rcv(struct sk_buff *skb)
1413 {
1414 	const struct tcphdr *th;
1415 	const struct ipv6hdr *hdr;
1416 	struct sock *sk;
1417 	int ret;
1418 	struct net *net = dev_net(skb->dev);
1419 
1420 	if (skb->pkt_type != PACKET_HOST)
1421 		goto discard_it;
1422 
1423 	/*
1424 	 *	Count it even if it's bad.
1425 	 */
1426 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1427 
1428 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1429 		goto discard_it;
1430 
1431 	th = tcp_hdr(skb);
1432 
1433 	if (th->doff < sizeof(struct tcphdr)/4)
1434 		goto bad_packet;
1435 	if (!pskb_may_pull(skb, th->doff*4))
1436 		goto discard_it;
1437 
1438 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1439 		goto csum_error;
1440 
1441 	th = tcp_hdr(skb);
1442 	hdr = ipv6_hdr(skb);
1443 
1444 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1445 				inet6_iif(skb));
1446 	if (!sk)
1447 		goto no_tcp_socket;
1448 
1449 process:
1450 	if (sk->sk_state == TCP_TIME_WAIT)
1451 		goto do_time_wait;
1452 
1453 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1454 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1455 		goto discard_and_relse;
1456 	}
1457 
1458 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1459 		goto discard_and_relse;
1460 
1461 	tcp_v6_fill_cb(skb, hdr, th);
1462 
1463 #ifdef CONFIG_TCP_MD5SIG
1464 	if (tcp_v6_inbound_md5_hash(sk, skb))
1465 		goto discard_and_relse;
1466 #endif
1467 
1468 	if (sk_filter(sk, skb))
1469 		goto discard_and_relse;
1470 
1471 	sk_incoming_cpu_update(sk);
1472 	skb->dev = NULL;
1473 
1474 	bh_lock_sock_nested(sk);
1475 	ret = 0;
1476 	if (!sock_owned_by_user(sk)) {
1477 		if (!tcp_prequeue(sk, skb))
1478 			ret = tcp_v6_do_rcv(sk, skb);
1479 	} else if (unlikely(sk_add_backlog(sk, skb,
1480 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1481 		bh_unlock_sock(sk);
1482 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1483 		goto discard_and_relse;
1484 	}
1485 	bh_unlock_sock(sk);
1486 
1487 	sock_put(sk);
1488 	return ret ? -1 : 0;
1489 
1490 no_tcp_socket:
1491 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1492 		goto discard_it;
1493 
1494 	tcp_v6_fill_cb(skb, hdr, th);
1495 
1496 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1497 csum_error:
1498 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1499 bad_packet:
1500 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1501 	} else {
1502 		tcp_v6_send_reset(NULL, skb);
1503 	}
1504 
1505 discard_it:
1506 	kfree_skb(skb);
1507 	return 0;
1508 
1509 discard_and_relse:
1510 	sock_put(sk);
1511 	goto discard_it;
1512 
1513 do_time_wait:
1514 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1515 		inet_twsk_put(inet_twsk(sk));
1516 		goto discard_it;
1517 	}
1518 
1519 	tcp_v6_fill_cb(skb, hdr, th);
1520 
1521 	if (skb->len < (th->doff<<2)) {
1522 		inet_twsk_put(inet_twsk(sk));
1523 		goto bad_packet;
1524 	}
1525 	if (tcp_checksum_complete(skb)) {
1526 		inet_twsk_put(inet_twsk(sk));
1527 		goto csum_error;
1528 	}
1529 
1530 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1531 	case TCP_TW_SYN:
1532 	{
1533 		struct sock *sk2;
1534 
1535 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1536 					    &ipv6_hdr(skb)->saddr, th->source,
1537 					    &ipv6_hdr(skb)->daddr,
1538 					    ntohs(th->dest), tcp_v6_iif(skb));
1539 		if (sk2 != NULL) {
1540 			struct inet_timewait_sock *tw = inet_twsk(sk);
1541 			inet_twsk_deschedule(tw, &tcp_death_row);
1542 			inet_twsk_put(tw);
1543 			sk = sk2;
1544 			goto process;
1545 		}
1546 		/* Fall through to ACK */
1547 	}
1548 	case TCP_TW_ACK:
1549 		tcp_v6_timewait_ack(sk, skb);
1550 		break;
1551 	case TCP_TW_RST:
1552 		goto no_tcp_socket;
1553 	case TCP_TW_SUCCESS:
1554 		;
1555 	}
1556 	goto discard_it;
1557 }
1558 
1559 static void tcp_v6_early_demux(struct sk_buff *skb)
1560 {
1561 	const struct ipv6hdr *hdr;
1562 	const struct tcphdr *th;
1563 	struct sock *sk;
1564 
1565 	if (skb->pkt_type != PACKET_HOST)
1566 		return;
1567 
1568 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1569 		return;
1570 
1571 	hdr = ipv6_hdr(skb);
1572 	th = tcp_hdr(skb);
1573 
1574 	if (th->doff < sizeof(struct tcphdr) / 4)
1575 		return;
1576 
1577 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1578 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1579 					&hdr->saddr, th->source,
1580 					&hdr->daddr, ntohs(th->dest),
1581 					inet6_iif(skb));
1582 	if (sk) {
1583 		skb->sk = sk;
1584 		skb->destructor = sock_edemux;
1585 		if (sk->sk_state != TCP_TIME_WAIT) {
1586 			struct dst_entry *dst = sk->sk_rx_dst;
1587 
1588 			if (dst)
1589 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1590 			if (dst &&
1591 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1592 				skb_dst_set_noref(skb, dst);
1593 		}
1594 	}
1595 }
1596 
1597 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1598 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1599 	.twsk_unique	= tcp_twsk_unique,
1600 	.twsk_destructor = tcp_twsk_destructor,
1601 };
1602 
1603 static const struct inet_connection_sock_af_ops ipv6_specific = {
1604 	.queue_xmit	   = inet6_csk_xmit,
1605 	.send_check	   = tcp_v6_send_check,
1606 	.rebuild_header	   = inet6_sk_rebuild_header,
1607 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1608 	.conn_request	   = tcp_v6_conn_request,
1609 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1610 	.net_header_len	   = sizeof(struct ipv6hdr),
1611 	.net_frag_header_len = sizeof(struct frag_hdr),
1612 	.setsockopt	   = ipv6_setsockopt,
1613 	.getsockopt	   = ipv6_getsockopt,
1614 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1615 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1616 	.bind_conflict	   = inet6_csk_bind_conflict,
1617 #ifdef CONFIG_COMPAT
1618 	.compat_setsockopt = compat_ipv6_setsockopt,
1619 	.compat_getsockopt = compat_ipv6_getsockopt,
1620 #endif
1621 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1622 };
1623 
1624 #ifdef CONFIG_TCP_MD5SIG
1625 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1626 	.md5_lookup	=	tcp_v6_md5_lookup,
1627 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1628 	.md5_parse	=	tcp_v6_parse_md5_keys,
1629 };
1630 #endif
1631 
1632 /*
1633  *	TCP over IPv4 via INET6 API
1634  */
1635 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1636 	.queue_xmit	   = ip_queue_xmit,
1637 	.send_check	   = tcp_v4_send_check,
1638 	.rebuild_header	   = inet_sk_rebuild_header,
1639 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1640 	.conn_request	   = tcp_v6_conn_request,
1641 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1642 	.net_header_len	   = sizeof(struct iphdr),
1643 	.setsockopt	   = ipv6_setsockopt,
1644 	.getsockopt	   = ipv6_getsockopt,
1645 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1646 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1647 	.bind_conflict	   = inet6_csk_bind_conflict,
1648 #ifdef CONFIG_COMPAT
1649 	.compat_setsockopt = compat_ipv6_setsockopt,
1650 	.compat_getsockopt = compat_ipv6_getsockopt,
1651 #endif
1652 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1653 };
1654 
1655 #ifdef CONFIG_TCP_MD5SIG
1656 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1657 	.md5_lookup	=	tcp_v4_md5_lookup,
1658 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1659 	.md5_parse	=	tcp_v6_parse_md5_keys,
1660 };
1661 #endif
1662 
1663 /* NOTE: A lot of things set to zero explicitly by call to
1664  *       sk_alloc() so need not be done here.
1665  */
1666 static int tcp_v6_init_sock(struct sock *sk)
1667 {
1668 	struct inet_connection_sock *icsk = inet_csk(sk);
1669 
1670 	tcp_init_sock(sk);
1671 
1672 	icsk->icsk_af_ops = &ipv6_specific;
1673 
1674 #ifdef CONFIG_TCP_MD5SIG
1675 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1676 #endif
1677 
1678 	return 0;
1679 }
1680 
1681 static void tcp_v6_destroy_sock(struct sock *sk)
1682 {
1683 	tcp_v4_destroy_sock(sk);
1684 	inet6_destroy_sock(sk);
1685 }
1686 
1687 #ifdef CONFIG_PROC_FS
1688 /* Proc filesystem TCPv6 sock list dumping. */
1689 static void get_openreq6(struct seq_file *seq,
1690 			 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1691 {
1692 	int ttd = req->expires - jiffies;
1693 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1694 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1695 
1696 	if (ttd < 0)
1697 		ttd = 0;
1698 
1699 	seq_printf(seq,
1700 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1701 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1702 		   i,
1703 		   src->s6_addr32[0], src->s6_addr32[1],
1704 		   src->s6_addr32[2], src->s6_addr32[3],
1705 		   inet_rsk(req)->ir_num,
1706 		   dest->s6_addr32[0], dest->s6_addr32[1],
1707 		   dest->s6_addr32[2], dest->s6_addr32[3],
1708 		   ntohs(inet_rsk(req)->ir_rmt_port),
1709 		   TCP_SYN_RECV,
1710 		   0, 0, /* could print option size, but that is af dependent. */
1711 		   1,   /* timers active (only the expire timer) */
1712 		   jiffies_to_clock_t(ttd),
1713 		   req->num_timeout,
1714 		   from_kuid_munged(seq_user_ns(seq), uid),
1715 		   0,  /* non standard timer */
1716 		   0, /* open_requests have no inode */
1717 		   0, req);
1718 }
1719 
1720 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1721 {
1722 	const struct in6_addr *dest, *src;
1723 	__u16 destp, srcp;
1724 	int timer_active;
1725 	unsigned long timer_expires;
1726 	const struct inet_sock *inet = inet_sk(sp);
1727 	const struct tcp_sock *tp = tcp_sk(sp);
1728 	const struct inet_connection_sock *icsk = inet_csk(sp);
1729 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1730 
1731 	dest  = &sp->sk_v6_daddr;
1732 	src   = &sp->sk_v6_rcv_saddr;
1733 	destp = ntohs(inet->inet_dport);
1734 	srcp  = ntohs(inet->inet_sport);
1735 
1736 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1737 		timer_active	= 1;
1738 		timer_expires	= icsk->icsk_timeout;
1739 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1740 		timer_active	= 4;
1741 		timer_expires	= icsk->icsk_timeout;
1742 	} else if (timer_pending(&sp->sk_timer)) {
1743 		timer_active	= 2;
1744 		timer_expires	= sp->sk_timer.expires;
1745 	} else {
1746 		timer_active	= 0;
1747 		timer_expires = jiffies;
1748 	}
1749 
1750 	seq_printf(seq,
1751 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1752 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1753 		   i,
1754 		   src->s6_addr32[0], src->s6_addr32[1],
1755 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1756 		   dest->s6_addr32[0], dest->s6_addr32[1],
1757 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1758 		   sp->sk_state,
1759 		   tp->write_seq-tp->snd_una,
1760 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1761 		   timer_active,
1762 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1763 		   icsk->icsk_retransmits,
1764 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1765 		   icsk->icsk_probes_out,
1766 		   sock_i_ino(sp),
1767 		   atomic_read(&sp->sk_refcnt), sp,
1768 		   jiffies_to_clock_t(icsk->icsk_rto),
1769 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1770 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1771 		   tp->snd_cwnd,
1772 		   sp->sk_state == TCP_LISTEN ?
1773 			(fastopenq ? fastopenq->max_qlen : 0) :
1774 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1775 		   );
1776 }
1777 
1778 static void get_timewait6_sock(struct seq_file *seq,
1779 			       struct inet_timewait_sock *tw, int i)
1780 {
1781 	const struct in6_addr *dest, *src;
1782 	__u16 destp, srcp;
1783 	s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1784 
1785 	dest = &tw->tw_v6_daddr;
1786 	src  = &tw->tw_v6_rcv_saddr;
1787 	destp = ntohs(tw->tw_dport);
1788 	srcp  = ntohs(tw->tw_sport);
1789 
1790 	seq_printf(seq,
1791 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1792 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1793 		   i,
1794 		   src->s6_addr32[0], src->s6_addr32[1],
1795 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1796 		   dest->s6_addr32[0], dest->s6_addr32[1],
1797 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1798 		   tw->tw_substate, 0, 0,
1799 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1800 		   atomic_read(&tw->tw_refcnt), tw);
1801 }
1802 
1803 static int tcp6_seq_show(struct seq_file *seq, void *v)
1804 {
1805 	struct tcp_iter_state *st;
1806 	struct sock *sk = v;
1807 
1808 	if (v == SEQ_START_TOKEN) {
1809 		seq_puts(seq,
1810 			 "  sl  "
1811 			 "local_address                         "
1812 			 "remote_address                        "
1813 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1814 			 "   uid  timeout inode\n");
1815 		goto out;
1816 	}
1817 	st = seq->private;
1818 
1819 	switch (st->state) {
1820 	case TCP_SEQ_STATE_LISTENING:
1821 	case TCP_SEQ_STATE_ESTABLISHED:
1822 		if (sk->sk_state == TCP_TIME_WAIT)
1823 			get_timewait6_sock(seq, v, st->num);
1824 		else
1825 			get_tcp6_sock(seq, v, st->num);
1826 		break;
1827 	case TCP_SEQ_STATE_OPENREQ:
1828 		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1829 		break;
1830 	}
1831 out:
1832 	return 0;
1833 }
1834 
1835 static const struct file_operations tcp6_afinfo_seq_fops = {
1836 	.owner   = THIS_MODULE,
1837 	.open    = tcp_seq_open,
1838 	.read    = seq_read,
1839 	.llseek  = seq_lseek,
1840 	.release = seq_release_net
1841 };
1842 
1843 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1844 	.name		= "tcp6",
1845 	.family		= AF_INET6,
1846 	.seq_fops	= &tcp6_afinfo_seq_fops,
1847 	.seq_ops	= {
1848 		.show		= tcp6_seq_show,
1849 	},
1850 };
1851 
1852 int __net_init tcp6_proc_init(struct net *net)
1853 {
1854 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1855 }
1856 
1857 void tcp6_proc_exit(struct net *net)
1858 {
1859 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1860 }
1861 #endif
1862 
1863 static void tcp_v6_clear_sk(struct sock *sk, int size)
1864 {
1865 	struct inet_sock *inet = inet_sk(sk);
1866 
1867 	/* we do not want to clear pinet6 field, because of RCU lookups */
1868 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1869 
1870 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1871 	memset(&inet->pinet6 + 1, 0, size);
1872 }
1873 
1874 struct proto tcpv6_prot = {
1875 	.name			= "TCPv6",
1876 	.owner			= THIS_MODULE,
1877 	.close			= tcp_close,
1878 	.connect		= tcp_v6_connect,
1879 	.disconnect		= tcp_disconnect,
1880 	.accept			= inet_csk_accept,
1881 	.ioctl			= tcp_ioctl,
1882 	.init			= tcp_v6_init_sock,
1883 	.destroy		= tcp_v6_destroy_sock,
1884 	.shutdown		= tcp_shutdown,
1885 	.setsockopt		= tcp_setsockopt,
1886 	.getsockopt		= tcp_getsockopt,
1887 	.recvmsg		= tcp_recvmsg,
1888 	.sendmsg		= tcp_sendmsg,
1889 	.sendpage		= tcp_sendpage,
1890 	.backlog_rcv		= tcp_v6_do_rcv,
1891 	.release_cb		= tcp_release_cb,
1892 	.hash			= tcp_v6_hash,
1893 	.unhash			= inet_unhash,
1894 	.get_port		= inet_csk_get_port,
1895 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1896 	.stream_memory_free	= tcp_stream_memory_free,
1897 	.sockets_allocated	= &tcp_sockets_allocated,
1898 	.memory_allocated	= &tcp_memory_allocated,
1899 	.memory_pressure	= &tcp_memory_pressure,
1900 	.orphan_count		= &tcp_orphan_count,
1901 	.sysctl_mem		= sysctl_tcp_mem,
1902 	.sysctl_wmem		= sysctl_tcp_wmem,
1903 	.sysctl_rmem		= sysctl_tcp_rmem,
1904 	.max_header		= MAX_TCP_HEADER,
1905 	.obj_size		= sizeof(struct tcp6_sock),
1906 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1907 	.twsk_prot		= &tcp6_timewait_sock_ops,
1908 	.rsk_prot		= &tcp6_request_sock_ops,
1909 	.h.hashinfo		= &tcp_hashinfo,
1910 	.no_autobind		= true,
1911 #ifdef CONFIG_COMPAT
1912 	.compat_setsockopt	= compat_tcp_setsockopt,
1913 	.compat_getsockopt	= compat_tcp_getsockopt,
1914 #endif
1915 #ifdef CONFIG_MEMCG_KMEM
1916 	.proto_cgroup		= tcp_proto_cgroup,
1917 #endif
1918 	.clear_sk		= tcp_v6_clear_sk,
1919 };
1920 
1921 static const struct inet6_protocol tcpv6_protocol = {
1922 	.early_demux	=	tcp_v6_early_demux,
1923 	.handler	=	tcp_v6_rcv,
1924 	.err_handler	=	tcp_v6_err,
1925 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1926 };
1927 
1928 static struct inet_protosw tcpv6_protosw = {
1929 	.type		=	SOCK_STREAM,
1930 	.protocol	=	IPPROTO_TCP,
1931 	.prot		=	&tcpv6_prot,
1932 	.ops		=	&inet6_stream_ops,
1933 	.flags		=	INET_PROTOSW_PERMANENT |
1934 				INET_PROTOSW_ICSK,
1935 };
1936 
1937 static int __net_init tcpv6_net_init(struct net *net)
1938 {
1939 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1940 				    SOCK_RAW, IPPROTO_TCP, net);
1941 }
1942 
1943 static void __net_exit tcpv6_net_exit(struct net *net)
1944 {
1945 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1946 }
1947 
1948 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1949 {
1950 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1951 }
1952 
1953 static struct pernet_operations tcpv6_net_ops = {
1954 	.init	    = tcpv6_net_init,
1955 	.exit	    = tcpv6_net_exit,
1956 	.exit_batch = tcpv6_net_exit_batch,
1957 };
1958 
1959 int __init tcpv6_init(void)
1960 {
1961 	int ret;
1962 
1963 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1964 	if (ret)
1965 		goto out;
1966 
1967 	/* register inet6 protocol */
1968 	ret = inet6_register_protosw(&tcpv6_protosw);
1969 	if (ret)
1970 		goto out_tcpv6_protocol;
1971 
1972 	ret = register_pernet_subsys(&tcpv6_net_ops);
1973 	if (ret)
1974 		goto out_tcpv6_protosw;
1975 out:
1976 	return ret;
1977 
1978 out_tcpv6_protosw:
1979 	inet6_unregister_protosw(&tcpv6_protosw);
1980 out_tcpv6_protocol:
1981 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1982 	goto out;
1983 }
1984 
1985 void tcpv6_exit(void)
1986 {
1987 	unregister_pernet_subsys(&tcpv6_net_ops);
1988 	inet6_unregister_protosw(&tcpv6_protosw);
1989 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1990 }
1991