xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision 2d33394e23d63b750dcba40e5feaeba425427b52)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66 
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69 
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72 
73 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 				      struct request_sock *req);
76 
77 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 						   const struct in6_addr *addr)
87 {
88 	return NULL;
89 }
90 #endif
91 
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 	struct dst_entry *dst = skb_dst(skb);
95 
96 	if (dst) {
97 		const struct rt6_info *rt = (const struct rt6_info *)dst;
98 
99 		dst_hold(dst);
100 		sk->sk_rx_dst = dst;
101 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 		if (rt->rt6i_node)
103 			inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104 	}
105 }
106 
107 static void tcp_v6_hash(struct sock *sk)
108 {
109 	if (sk->sk_state != TCP_CLOSE) {
110 		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
111 			tcp_prot.hash(sk);
112 			return;
113 		}
114 		local_bh_disable();
115 		__inet6_hash(sk, NULL);
116 		local_bh_enable();
117 	}
118 }
119 
120 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
121 {
122 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123 					    ipv6_hdr(skb)->saddr.s6_addr32,
124 					    tcp_hdr(skb)->dest,
125 					    tcp_hdr(skb)->source);
126 }
127 
128 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 			  int addr_len)
130 {
131 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
132 	struct inet_sock *inet = inet_sk(sk);
133 	struct inet_connection_sock *icsk = inet_csk(sk);
134 	struct ipv6_pinfo *np = inet6_sk(sk);
135 	struct tcp_sock *tp = tcp_sk(sk);
136 	struct in6_addr *saddr = NULL, *final_p, final;
137 	struct rt6_info *rt;
138 	struct flowi6 fl6;
139 	struct dst_entry *dst;
140 	int addr_type;
141 	int err;
142 
143 	if (addr_len < SIN6_LEN_RFC2133)
144 		return -EINVAL;
145 
146 	if (usin->sin6_family != AF_INET6)
147 		return -EAFNOSUPPORT;
148 
149 	memset(&fl6, 0, sizeof(fl6));
150 
151 	if (np->sndflow) {
152 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153 		IP6_ECN_flow_init(fl6.flowlabel);
154 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155 			struct ip6_flowlabel *flowlabel;
156 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 			if (flowlabel == NULL)
158 				return -EINVAL;
159 			fl6_sock_release(flowlabel);
160 		}
161 	}
162 
163 	/*
164 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
165 	 */
166 
167 	if (ipv6_addr_any(&usin->sin6_addr))
168 		usin->sin6_addr.s6_addr[15] = 0x1;
169 
170 	addr_type = ipv6_addr_type(&usin->sin6_addr);
171 
172 	if (addr_type & IPV6_ADDR_MULTICAST)
173 		return -ENETUNREACH;
174 
175 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 		if (addr_len >= sizeof(struct sockaddr_in6) &&
177 		    usin->sin6_scope_id) {
178 			/* If interface is set while binding, indices
179 			 * must coincide.
180 			 */
181 			if (sk->sk_bound_dev_if &&
182 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
183 				return -EINVAL;
184 
185 			sk->sk_bound_dev_if = usin->sin6_scope_id;
186 		}
187 
188 		/* Connect to link-local address requires an interface */
189 		if (!sk->sk_bound_dev_if)
190 			return -EINVAL;
191 	}
192 
193 	if (tp->rx_opt.ts_recent_stamp &&
194 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
195 		tp->rx_opt.ts_recent = 0;
196 		tp->rx_opt.ts_recent_stamp = 0;
197 		tp->write_seq = 0;
198 	}
199 
200 	sk->sk_v6_daddr = usin->sin6_addr;
201 	np->flow_label = fl6.flowlabel;
202 
203 	/*
204 	 *	TCP over IPv4
205 	 */
206 
207 	if (addr_type == IPV6_ADDR_MAPPED) {
208 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
209 		struct sockaddr_in sin;
210 
211 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
212 
213 		if (__ipv6_only_sock(sk))
214 			return -ENETUNREACH;
215 
216 		sin.sin_family = AF_INET;
217 		sin.sin_port = usin->sin6_port;
218 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
219 
220 		icsk->icsk_af_ops = &ipv6_mapped;
221 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
222 #ifdef CONFIG_TCP_MD5SIG
223 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224 #endif
225 
226 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227 
228 		if (err) {
229 			icsk->icsk_ext_hdr_len = exthdrlen;
230 			icsk->icsk_af_ops = &ipv6_specific;
231 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
232 #ifdef CONFIG_TCP_MD5SIG
233 			tp->af_specific = &tcp_sock_ipv6_specific;
234 #endif
235 			goto failure;
236 		} else {
237 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
238 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
239 					       &sk->sk_v6_rcv_saddr);
240 		}
241 
242 		return err;
243 	}
244 
245 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
246 		saddr = &sk->sk_v6_rcv_saddr;
247 
248 	fl6.flowi6_proto = IPPROTO_TCP;
249 	fl6.daddr = sk->sk_v6_daddr;
250 	fl6.saddr = saddr ? *saddr : np->saddr;
251 	fl6.flowi6_oif = sk->sk_bound_dev_if;
252 	fl6.flowi6_mark = sk->sk_mark;
253 	fl6.fl6_dport = usin->sin6_port;
254 	fl6.fl6_sport = inet->inet_sport;
255 
256 	final_p = fl6_update_dst(&fl6, np->opt, &final);
257 
258 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
259 
260 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
261 	if (IS_ERR(dst)) {
262 		err = PTR_ERR(dst);
263 		goto failure;
264 	}
265 
266 	if (saddr == NULL) {
267 		saddr = &fl6.saddr;
268 		sk->sk_v6_rcv_saddr = *saddr;
269 	}
270 
271 	/* set the source address */
272 	np->saddr = *saddr;
273 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
274 
275 	sk->sk_gso_type = SKB_GSO_TCPV6;
276 	__ip6_dst_store(sk, dst, NULL, NULL);
277 
278 	rt = (struct rt6_info *) dst;
279 	if (tcp_death_row.sysctl_tw_recycle &&
280 	    !tp->rx_opt.ts_recent_stamp &&
281 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
282 		tcp_fetch_timewait_stamp(sk, dst);
283 
284 	icsk->icsk_ext_hdr_len = 0;
285 	if (np->opt)
286 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 					  np->opt->opt_nflen);
288 
289 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
290 
291 	inet->inet_dport = usin->sin6_port;
292 
293 	tcp_set_state(sk, TCP_SYN_SENT);
294 	err = inet6_hash_connect(&tcp_death_row, sk);
295 	if (err)
296 		goto late_failure;
297 
298 	ip6_set_txhash(sk);
299 
300 	if (!tp->write_seq && likely(!tp->repair))
301 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
302 							     sk->sk_v6_daddr.s6_addr32,
303 							     inet->inet_sport,
304 							     inet->inet_dport);
305 
306 	err = tcp_connect(sk);
307 	if (err)
308 		goto late_failure;
309 
310 	return 0;
311 
312 late_failure:
313 	tcp_set_state(sk, TCP_CLOSE);
314 	__sk_dst_reset(sk);
315 failure:
316 	inet->inet_dport = 0;
317 	sk->sk_route_caps = 0;
318 	return err;
319 }
320 
321 static void tcp_v6_mtu_reduced(struct sock *sk)
322 {
323 	struct dst_entry *dst;
324 
325 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
326 		return;
327 
328 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
329 	if (!dst)
330 		return;
331 
332 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333 		tcp_sync_mss(sk, dst_mtu(dst));
334 		tcp_simple_retransmit(sk);
335 	}
336 }
337 
338 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
339 		u8 type, u8 code, int offset, __be32 info)
340 {
341 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
342 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
343 	struct ipv6_pinfo *np;
344 	struct sock *sk;
345 	int err;
346 	struct tcp_sock *tp;
347 	struct request_sock *fastopen;
348 	__u32 seq, snd_una;
349 	struct net *net = dev_net(skb->dev);
350 
351 	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
352 			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
353 
354 	if (sk == NULL) {
355 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
356 				   ICMP6_MIB_INERRORS);
357 		return;
358 	}
359 
360 	if (sk->sk_state == TCP_TIME_WAIT) {
361 		inet_twsk_put(inet_twsk(sk));
362 		return;
363 	}
364 
365 	bh_lock_sock(sk);
366 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
367 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
368 
369 	if (sk->sk_state == TCP_CLOSE)
370 		goto out;
371 
372 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
373 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
374 		goto out;
375 	}
376 
377 	tp = tcp_sk(sk);
378 	seq = ntohl(th->seq);
379 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
380 	fastopen = tp->fastopen_rsk;
381 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
382 	if (sk->sk_state != TCP_LISTEN &&
383 	    !between(seq, snd_una, tp->snd_nxt)) {
384 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
385 		goto out;
386 	}
387 
388 	np = inet6_sk(sk);
389 
390 	if (type == NDISC_REDIRECT) {
391 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
392 
393 		if (dst)
394 			dst->ops->redirect(dst, sk, skb);
395 		goto out;
396 	}
397 
398 	if (type == ICMPV6_PKT_TOOBIG) {
399 		/* We are not interested in TCP_LISTEN and open_requests
400 		 * (SYN-ACKs send out by Linux are always <576bytes so
401 		 * they should go through unfragmented).
402 		 */
403 		if (sk->sk_state == TCP_LISTEN)
404 			goto out;
405 
406 		if (!ip6_sk_accept_pmtu(sk))
407 			goto out;
408 
409 		tp->mtu_info = ntohl(info);
410 		if (!sock_owned_by_user(sk))
411 			tcp_v6_mtu_reduced(sk);
412 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
413 					   &tp->tsq_flags))
414 			sock_hold(sk);
415 		goto out;
416 	}
417 
418 	icmpv6_err_convert(type, code, &err);
419 
420 	/* Might be for an request_sock */
421 	switch (sk->sk_state) {
422 		struct request_sock *req, **prev;
423 	case TCP_LISTEN:
424 		if (sock_owned_by_user(sk))
425 			goto out;
426 
427 		/* Note : We use inet6_iif() here, not tcp_v6_iif() */
428 		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
429 					   &hdr->saddr, inet6_iif(skb));
430 		if (!req)
431 			goto out;
432 
433 		/* ICMPs are not backlogged, hence we cannot get
434 		 * an established socket here.
435 		 */
436 		WARN_ON(req->sk != NULL);
437 
438 		if (seq != tcp_rsk(req)->snt_isn) {
439 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
440 			goto out;
441 		}
442 
443 		inet_csk_reqsk_queue_drop(sk, req, prev);
444 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
445 		goto out;
446 
447 	case TCP_SYN_SENT:
448 	case TCP_SYN_RECV:
449 		/* Only in fast or simultaneous open. If a fast open socket is
450 		 * is already accepted it is treated as a connected one below.
451 		 */
452 		if (fastopen && fastopen->sk == NULL)
453 			break;
454 
455 		if (!sock_owned_by_user(sk)) {
456 			sk->sk_err = err;
457 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
458 
459 			tcp_done(sk);
460 		} else
461 			sk->sk_err_soft = err;
462 		goto out;
463 	}
464 
465 	if (!sock_owned_by_user(sk) && np->recverr) {
466 		sk->sk_err = err;
467 		sk->sk_error_report(sk);
468 	} else
469 		sk->sk_err_soft = err;
470 
471 out:
472 	bh_unlock_sock(sk);
473 	sock_put(sk);
474 }
475 
476 
477 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
478 			      struct flowi *fl,
479 			      struct request_sock *req,
480 			      u16 queue_mapping,
481 			      struct tcp_fastopen_cookie *foc)
482 {
483 	struct inet_request_sock *ireq = inet_rsk(req);
484 	struct ipv6_pinfo *np = inet6_sk(sk);
485 	struct flowi6 *fl6 = &fl->u.ip6;
486 	struct sk_buff *skb;
487 	int err = -ENOMEM;
488 
489 	/* First, grab a route. */
490 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
491 		goto done;
492 
493 	skb = tcp_make_synack(sk, dst, req, foc);
494 
495 	if (skb) {
496 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
497 				    &ireq->ir_v6_rmt_addr);
498 
499 		fl6->daddr = ireq->ir_v6_rmt_addr;
500 		if (np->repflow && (ireq->pktopts != NULL))
501 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
502 
503 		skb_set_queue_mapping(skb, queue_mapping);
504 		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
505 		err = net_xmit_eval(err);
506 	}
507 
508 done:
509 	return err;
510 }
511 
512 
513 static void tcp_v6_reqsk_destructor(struct request_sock *req)
514 {
515 	kfree_skb(inet_rsk(req)->pktopts);
516 }
517 
518 #ifdef CONFIG_TCP_MD5SIG
519 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
520 						   const struct in6_addr *addr)
521 {
522 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
523 }
524 
525 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
526 						struct sock *addr_sk)
527 {
528 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
529 }
530 
531 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
532 						      struct request_sock *req)
533 {
534 	return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
535 }
536 
537 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
538 				 int optlen)
539 {
540 	struct tcp_md5sig cmd;
541 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
542 
543 	if (optlen < sizeof(cmd))
544 		return -EINVAL;
545 
546 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
547 		return -EFAULT;
548 
549 	if (sin6->sin6_family != AF_INET6)
550 		return -EINVAL;
551 
552 	if (!cmd.tcpm_keylen) {
553 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
554 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
555 					      AF_INET);
556 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
557 				      AF_INET6);
558 	}
559 
560 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
561 		return -EINVAL;
562 
563 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
564 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
565 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
566 
567 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
568 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
569 }
570 
571 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
572 					const struct in6_addr *daddr,
573 					const struct in6_addr *saddr, int nbytes)
574 {
575 	struct tcp6_pseudohdr *bp;
576 	struct scatterlist sg;
577 
578 	bp = &hp->md5_blk.ip6;
579 	/* 1. TCP pseudo-header (RFC2460) */
580 	bp->saddr = *saddr;
581 	bp->daddr = *daddr;
582 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
583 	bp->len = cpu_to_be32(nbytes);
584 
585 	sg_init_one(&sg, bp, sizeof(*bp));
586 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
587 }
588 
589 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
590 			       const struct in6_addr *daddr, struct in6_addr *saddr,
591 			       const struct tcphdr *th)
592 {
593 	struct tcp_md5sig_pool *hp;
594 	struct hash_desc *desc;
595 
596 	hp = tcp_get_md5sig_pool();
597 	if (!hp)
598 		goto clear_hash_noput;
599 	desc = &hp->md5_desc;
600 
601 	if (crypto_hash_init(desc))
602 		goto clear_hash;
603 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
604 		goto clear_hash;
605 	if (tcp_md5_hash_header(hp, th))
606 		goto clear_hash;
607 	if (tcp_md5_hash_key(hp, key))
608 		goto clear_hash;
609 	if (crypto_hash_final(desc, md5_hash))
610 		goto clear_hash;
611 
612 	tcp_put_md5sig_pool();
613 	return 0;
614 
615 clear_hash:
616 	tcp_put_md5sig_pool();
617 clear_hash_noput:
618 	memset(md5_hash, 0, 16);
619 	return 1;
620 }
621 
622 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
623 			       const struct sock *sk,
624 			       const struct request_sock *req,
625 			       const struct sk_buff *skb)
626 {
627 	const struct in6_addr *saddr, *daddr;
628 	struct tcp_md5sig_pool *hp;
629 	struct hash_desc *desc;
630 	const struct tcphdr *th = tcp_hdr(skb);
631 
632 	if (sk) {
633 		saddr = &inet6_sk(sk)->saddr;
634 		daddr = &sk->sk_v6_daddr;
635 	} else if (req) {
636 		saddr = &inet_rsk(req)->ir_v6_loc_addr;
637 		daddr = &inet_rsk(req)->ir_v6_rmt_addr;
638 	} else {
639 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640 		saddr = &ip6h->saddr;
641 		daddr = &ip6h->daddr;
642 	}
643 
644 	hp = tcp_get_md5sig_pool();
645 	if (!hp)
646 		goto clear_hash_noput;
647 	desc = &hp->md5_desc;
648 
649 	if (crypto_hash_init(desc))
650 		goto clear_hash;
651 
652 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
653 		goto clear_hash;
654 	if (tcp_md5_hash_header(hp, th))
655 		goto clear_hash;
656 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
657 		goto clear_hash;
658 	if (tcp_md5_hash_key(hp, key))
659 		goto clear_hash;
660 	if (crypto_hash_final(desc, md5_hash))
661 		goto clear_hash;
662 
663 	tcp_put_md5sig_pool();
664 	return 0;
665 
666 clear_hash:
667 	tcp_put_md5sig_pool();
668 clear_hash_noput:
669 	memset(md5_hash, 0, 16);
670 	return 1;
671 }
672 
673 static int __tcp_v6_inbound_md5_hash(struct sock *sk,
674 				     const struct sk_buff *skb)
675 {
676 	const __u8 *hash_location = NULL;
677 	struct tcp_md5sig_key *hash_expected;
678 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
679 	const struct tcphdr *th = tcp_hdr(skb);
680 	int genhash;
681 	u8 newhash[16];
682 
683 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
684 	hash_location = tcp_parse_md5sig_option(th);
685 
686 	/* We've parsed the options - do we have a hash? */
687 	if (!hash_expected && !hash_location)
688 		return 0;
689 
690 	if (hash_expected && !hash_location) {
691 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
692 		return 1;
693 	}
694 
695 	if (!hash_expected && hash_location) {
696 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
697 		return 1;
698 	}
699 
700 	/* check the signature */
701 	genhash = tcp_v6_md5_hash_skb(newhash,
702 				      hash_expected,
703 				      NULL, NULL, skb);
704 
705 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
706 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
707 				     genhash ? "failed" : "mismatch",
708 				     &ip6h->saddr, ntohs(th->source),
709 				     &ip6h->daddr, ntohs(th->dest));
710 		return 1;
711 	}
712 	return 0;
713 }
714 
715 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
716 {
717 	int ret;
718 
719 	rcu_read_lock();
720 	ret = __tcp_v6_inbound_md5_hash(sk, skb);
721 	rcu_read_unlock();
722 
723 	return ret;
724 }
725 
726 #endif
727 
728 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
729 			    struct sk_buff *skb)
730 {
731 	struct inet_request_sock *ireq = inet_rsk(req);
732 	struct ipv6_pinfo *np = inet6_sk(sk);
733 
734 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
735 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
736 
737 	/* So that link locals have meaning */
738 	if (!sk->sk_bound_dev_if &&
739 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
740 		ireq->ir_iif = tcp_v6_iif(skb);
741 
742 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
743 	    (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
744 	     np->rxopt.bits.rxinfo ||
745 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
746 	     np->rxopt.bits.rxohlim || np->repflow)) {
747 		atomic_inc(&skb->users);
748 		ireq->pktopts = skb;
749 	}
750 	ireq->ireq_family = AF_INET6;
751 }
752 
753 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
754 					  const struct request_sock *req,
755 					  bool *strict)
756 {
757 	if (strict)
758 		*strict = true;
759 	return inet6_csk_route_req(sk, &fl->u.ip6, req);
760 }
761 
762 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
763 	.family		=	AF_INET6,
764 	.obj_size	=	sizeof(struct tcp6_request_sock),
765 	.rtx_syn_ack	=	tcp_rtx_synack,
766 	.send_ack	=	tcp_v6_reqsk_send_ack,
767 	.destructor	=	tcp_v6_reqsk_destructor,
768 	.send_reset	=	tcp_v6_send_reset,
769 	.syn_ack_timeout =	tcp_syn_ack_timeout,
770 };
771 
772 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
773 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
774 				sizeof(struct ipv6hdr),
775 #ifdef CONFIG_TCP_MD5SIG
776 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
777 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
778 #endif
779 	.init_req	=	tcp_v6_init_req,
780 #ifdef CONFIG_SYN_COOKIES
781 	.cookie_init_seq =	cookie_v6_init_sequence,
782 #endif
783 	.route_req	=	tcp_v6_route_req,
784 	.init_seq	=	tcp_v6_init_sequence,
785 	.send_synack	=	tcp_v6_send_synack,
786 	.queue_hash_add =	inet6_csk_reqsk_queue_hash_add,
787 };
788 
789 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
790 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
791 				 int oif, struct tcp_md5sig_key *key, int rst,
792 				 u8 tclass, u32 label)
793 {
794 	const struct tcphdr *th = tcp_hdr(skb);
795 	struct tcphdr *t1;
796 	struct sk_buff *buff;
797 	struct flowi6 fl6;
798 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
799 	struct sock *ctl_sk = net->ipv6.tcp_sk;
800 	unsigned int tot_len = sizeof(struct tcphdr);
801 	struct dst_entry *dst;
802 	__be32 *topt;
803 
804 	if (tsecr)
805 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
806 #ifdef CONFIG_TCP_MD5SIG
807 	if (key)
808 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
809 #endif
810 
811 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
812 			 GFP_ATOMIC);
813 	if (buff == NULL)
814 		return;
815 
816 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
817 
818 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
819 	skb_reset_transport_header(buff);
820 
821 	/* Swap the send and the receive. */
822 	memset(t1, 0, sizeof(*t1));
823 	t1->dest = th->source;
824 	t1->source = th->dest;
825 	t1->doff = tot_len / 4;
826 	t1->seq = htonl(seq);
827 	t1->ack_seq = htonl(ack);
828 	t1->ack = !rst || !th->ack;
829 	t1->rst = rst;
830 	t1->window = htons(win);
831 
832 	topt = (__be32 *)(t1 + 1);
833 
834 	if (tsecr) {
835 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
836 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
837 		*topt++ = htonl(tsval);
838 		*topt++ = htonl(tsecr);
839 	}
840 
841 #ifdef CONFIG_TCP_MD5SIG
842 	if (key) {
843 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
844 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
845 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
846 				    &ipv6_hdr(skb)->saddr,
847 				    &ipv6_hdr(skb)->daddr, t1);
848 	}
849 #endif
850 
851 	memset(&fl6, 0, sizeof(fl6));
852 	fl6.daddr = ipv6_hdr(skb)->saddr;
853 	fl6.saddr = ipv6_hdr(skb)->daddr;
854 	fl6.flowlabel = label;
855 
856 	buff->ip_summed = CHECKSUM_PARTIAL;
857 	buff->csum = 0;
858 
859 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
860 
861 	fl6.flowi6_proto = IPPROTO_TCP;
862 	if (rt6_need_strict(&fl6.daddr) && !oif)
863 		fl6.flowi6_oif = tcp_v6_iif(skb);
864 	else
865 		fl6.flowi6_oif = oif;
866 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
867 	fl6.fl6_dport = t1->dest;
868 	fl6.fl6_sport = t1->source;
869 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
870 
871 	/* Pass a socket to ip6_dst_lookup either it is for RST
872 	 * Underlying function will use this to retrieve the network
873 	 * namespace
874 	 */
875 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
876 	if (!IS_ERR(dst)) {
877 		skb_dst_set(buff, dst);
878 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
879 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
880 		if (rst)
881 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
882 		return;
883 	}
884 
885 	kfree_skb(buff);
886 }
887 
888 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
889 {
890 	const struct tcphdr *th = tcp_hdr(skb);
891 	u32 seq = 0, ack_seq = 0;
892 	struct tcp_md5sig_key *key = NULL;
893 #ifdef CONFIG_TCP_MD5SIG
894 	const __u8 *hash_location = NULL;
895 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
896 	unsigned char newhash[16];
897 	int genhash;
898 	struct sock *sk1 = NULL;
899 #endif
900 	int oif;
901 
902 	if (th->rst)
903 		return;
904 
905 	/* If sk not NULL, it means we did a successful lookup and incoming
906 	 * route had to be correct. prequeue might have dropped our dst.
907 	 */
908 	if (!sk && !ipv6_unicast_destination(skb))
909 		return;
910 
911 #ifdef CONFIG_TCP_MD5SIG
912 	hash_location = tcp_parse_md5sig_option(th);
913 	if (!sk && hash_location) {
914 		/*
915 		 * active side is lost. Try to find listening socket through
916 		 * source port, and then find md5 key through listening socket.
917 		 * we are not loose security here:
918 		 * Incoming packet is checked with md5 hash with finding key,
919 		 * no RST generated if md5 hash doesn't match.
920 		 */
921 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
922 					   &tcp_hashinfo, &ipv6h->saddr,
923 					   th->source, &ipv6h->daddr,
924 					   ntohs(th->source), tcp_v6_iif(skb));
925 		if (!sk1)
926 			return;
927 
928 		rcu_read_lock();
929 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
930 		if (!key)
931 			goto release_sk1;
932 
933 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
934 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
935 			goto release_sk1;
936 	} else {
937 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
938 	}
939 #endif
940 
941 	if (th->ack)
942 		seq = ntohl(th->ack_seq);
943 	else
944 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
945 			  (th->doff << 2);
946 
947 	oif = sk ? sk->sk_bound_dev_if : 0;
948 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
949 
950 #ifdef CONFIG_TCP_MD5SIG
951 release_sk1:
952 	if (sk1) {
953 		rcu_read_unlock();
954 		sock_put(sk1);
955 	}
956 #endif
957 }
958 
959 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
960 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
961 			    struct tcp_md5sig_key *key, u8 tclass,
962 			    u32 label)
963 {
964 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
965 			     tclass, label);
966 }
967 
968 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
969 {
970 	struct inet_timewait_sock *tw = inet_twsk(sk);
971 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
972 
973 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
974 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
975 			tcp_time_stamp + tcptw->tw_ts_offset,
976 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
977 			tw->tw_tclass, (tw->tw_flowlabel << 12));
978 
979 	inet_twsk_put(tw);
980 }
981 
982 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
983 				  struct request_sock *req)
984 {
985 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
986 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
987 	 */
988 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
989 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
990 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
991 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
992 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
993 			0, 0);
994 }
995 
996 
997 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
998 {
999 	struct request_sock *req, **prev;
1000 	const struct tcphdr *th = tcp_hdr(skb);
1001 	struct sock *nsk;
1002 
1003 	/* Find possible connection requests. */
1004 	req = inet6_csk_search_req(sk, &prev, th->source,
1005 				   &ipv6_hdr(skb)->saddr,
1006 				   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
1007 	if (req)
1008 		return tcp_check_req(sk, skb, req, prev, false);
1009 
1010 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1011 					 &ipv6_hdr(skb)->saddr, th->source,
1012 					 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1013 					 tcp_v6_iif(skb));
1014 
1015 	if (nsk) {
1016 		if (nsk->sk_state != TCP_TIME_WAIT) {
1017 			bh_lock_sock(nsk);
1018 			return nsk;
1019 		}
1020 		inet_twsk_put(inet_twsk(nsk));
1021 		return NULL;
1022 	}
1023 
1024 #ifdef CONFIG_SYN_COOKIES
1025 	if (!th->syn)
1026 		sk = cookie_v6_check(sk, skb);
1027 #endif
1028 	return sk;
1029 }
1030 
1031 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1032 {
1033 	if (skb->protocol == htons(ETH_P_IP))
1034 		return tcp_v4_conn_request(sk, skb);
1035 
1036 	if (!ipv6_unicast_destination(skb))
1037 		goto drop;
1038 
1039 	return tcp_conn_request(&tcp6_request_sock_ops,
1040 				&tcp_request_sock_ipv6_ops, sk, skb);
1041 
1042 drop:
1043 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1044 	return 0; /* don't send reset */
1045 }
1046 
1047 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1048 					 struct request_sock *req,
1049 					 struct dst_entry *dst)
1050 {
1051 	struct inet_request_sock *ireq;
1052 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1053 	struct tcp6_sock *newtcp6sk;
1054 	struct inet_sock *newinet;
1055 	struct tcp_sock *newtp;
1056 	struct sock *newsk;
1057 #ifdef CONFIG_TCP_MD5SIG
1058 	struct tcp_md5sig_key *key;
1059 #endif
1060 	struct flowi6 fl6;
1061 
1062 	if (skb->protocol == htons(ETH_P_IP)) {
1063 		/*
1064 		 *	v6 mapped
1065 		 */
1066 
1067 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1068 
1069 		if (newsk == NULL)
1070 			return NULL;
1071 
1072 		newtcp6sk = (struct tcp6_sock *)newsk;
1073 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1074 
1075 		newinet = inet_sk(newsk);
1076 		newnp = inet6_sk(newsk);
1077 		newtp = tcp_sk(newsk);
1078 
1079 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1080 
1081 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1082 
1083 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1084 
1085 		newsk->sk_v6_rcv_saddr = newnp->saddr;
1086 
1087 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1088 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1089 #ifdef CONFIG_TCP_MD5SIG
1090 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1091 #endif
1092 
1093 		newnp->ipv6_ac_list = NULL;
1094 		newnp->ipv6_fl_list = NULL;
1095 		newnp->pktoptions  = NULL;
1096 		newnp->opt	   = NULL;
1097 		newnp->mcast_oif   = tcp_v6_iif(skb);
1098 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1099 		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1100 		if (np->repflow)
1101 			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1102 
1103 		/*
1104 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1105 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1106 		 * that function for the gory details. -acme
1107 		 */
1108 
1109 		/* It is tricky place. Until this moment IPv4 tcp
1110 		   worked with IPv6 icsk.icsk_af_ops.
1111 		   Sync it now.
1112 		 */
1113 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1114 
1115 		return newsk;
1116 	}
1117 
1118 	ireq = inet_rsk(req);
1119 
1120 	if (sk_acceptq_is_full(sk))
1121 		goto out_overflow;
1122 
1123 	if (!dst) {
1124 		dst = inet6_csk_route_req(sk, &fl6, req);
1125 		if (!dst)
1126 			goto out;
1127 	}
1128 
1129 	newsk = tcp_create_openreq_child(sk, req, skb);
1130 	if (newsk == NULL)
1131 		goto out_nonewsk;
1132 
1133 	/*
1134 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1135 	 * count here, tcp_create_openreq_child now does this for us, see the
1136 	 * comment in that function for the gory details. -acme
1137 	 */
1138 
1139 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1140 	__ip6_dst_store(newsk, dst, NULL, NULL);
1141 	inet6_sk_rx_dst_set(newsk, skb);
1142 
1143 	newtcp6sk = (struct tcp6_sock *)newsk;
1144 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1145 
1146 	newtp = tcp_sk(newsk);
1147 	newinet = inet_sk(newsk);
1148 	newnp = inet6_sk(newsk);
1149 
1150 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1151 
1152 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1153 	newnp->saddr = ireq->ir_v6_loc_addr;
1154 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1155 	newsk->sk_bound_dev_if = ireq->ir_iif;
1156 
1157 	ip6_set_txhash(newsk);
1158 
1159 	/* Now IPv6 options...
1160 
1161 	   First: no IPv4 options.
1162 	 */
1163 	newinet->inet_opt = NULL;
1164 	newnp->ipv6_ac_list = NULL;
1165 	newnp->ipv6_fl_list = NULL;
1166 
1167 	/* Clone RX bits */
1168 	newnp->rxopt.all = np->rxopt.all;
1169 
1170 	/* Clone pktoptions received with SYN */
1171 	newnp->pktoptions = NULL;
1172 	if (ireq->pktopts != NULL) {
1173 		newnp->pktoptions = skb_clone(ireq->pktopts,
1174 					      sk_gfp_atomic(sk, GFP_ATOMIC));
1175 		consume_skb(ireq->pktopts);
1176 		ireq->pktopts = NULL;
1177 		if (newnp->pktoptions)
1178 			skb_set_owner_r(newnp->pktoptions, newsk);
1179 	}
1180 	newnp->opt	  = NULL;
1181 	newnp->mcast_oif  = tcp_v6_iif(skb);
1182 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1183 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1184 	if (np->repflow)
1185 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1186 
1187 	/* Clone native IPv6 options from listening socket (if any)
1188 
1189 	   Yes, keeping reference count would be much more clever,
1190 	   but we make one more one thing there: reattach optmem
1191 	   to newsk.
1192 	 */
1193 	if (np->opt)
1194 		newnp->opt = ipv6_dup_options(newsk, np->opt);
1195 
1196 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1197 	if (newnp->opt)
1198 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1199 						     newnp->opt->opt_flen);
1200 
1201 	tcp_ca_openreq_child(newsk, dst);
1202 
1203 	tcp_sync_mss(newsk, dst_mtu(dst));
1204 	newtp->advmss = dst_metric_advmss(dst);
1205 	if (tcp_sk(sk)->rx_opt.user_mss &&
1206 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1207 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1208 
1209 	tcp_initialize_rcv_mss(newsk);
1210 
1211 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1212 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1213 
1214 #ifdef CONFIG_TCP_MD5SIG
1215 	/* Copy over the MD5 key from the original socket */
1216 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1217 	if (key != NULL) {
1218 		/* We're using one, so create a matching key
1219 		 * on the newsk structure. If we fail to get
1220 		 * memory, then we end up not copying the key
1221 		 * across. Shucks.
1222 		 */
1223 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1224 			       AF_INET6, key->key, key->keylen,
1225 			       sk_gfp_atomic(sk, GFP_ATOMIC));
1226 	}
1227 #endif
1228 
1229 	if (__inet_inherit_port(sk, newsk) < 0) {
1230 		inet_csk_prepare_forced_close(newsk);
1231 		tcp_done(newsk);
1232 		goto out;
1233 	}
1234 	__inet6_hash(newsk, NULL);
1235 
1236 	return newsk;
1237 
1238 out_overflow:
1239 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1240 out_nonewsk:
1241 	dst_release(dst);
1242 out:
1243 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1244 	return NULL;
1245 }
1246 
1247 /* The socket must have it's spinlock held when we get
1248  * here.
1249  *
1250  * We have a potential double-lock case here, so even when
1251  * doing backlog processing we use the BH locking scheme.
1252  * This is because we cannot sleep with the original spinlock
1253  * held.
1254  */
1255 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1256 {
1257 	struct ipv6_pinfo *np = inet6_sk(sk);
1258 	struct tcp_sock *tp;
1259 	struct sk_buff *opt_skb = NULL;
1260 
1261 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1262 	   goes to IPv4 receive handler and backlogged.
1263 	   From backlog it always goes here. Kerboom...
1264 	   Fortunately, tcp_rcv_established and rcv_established
1265 	   handle them correctly, but it is not case with
1266 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1267 	 */
1268 
1269 	if (skb->protocol == htons(ETH_P_IP))
1270 		return tcp_v4_do_rcv(sk, skb);
1271 
1272 	if (sk_filter(sk, skb))
1273 		goto discard;
1274 
1275 	/*
1276 	 *	socket locking is here for SMP purposes as backlog rcv
1277 	 *	is currently called with bh processing disabled.
1278 	 */
1279 
1280 	/* Do Stevens' IPV6_PKTOPTIONS.
1281 
1282 	   Yes, guys, it is the only place in our code, where we
1283 	   may make it not affecting IPv4.
1284 	   The rest of code is protocol independent,
1285 	   and I do not like idea to uglify IPv4.
1286 
1287 	   Actually, all the idea behind IPV6_PKTOPTIONS
1288 	   looks not very well thought. For now we latch
1289 	   options, received in the last packet, enqueued
1290 	   by tcp. Feel free to propose better solution.
1291 					       --ANK (980728)
1292 	 */
1293 	if (np->rxopt.all)
1294 		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1295 
1296 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1297 		struct dst_entry *dst = sk->sk_rx_dst;
1298 
1299 		sock_rps_save_rxhash(sk, skb);
1300 		sk_mark_napi_id(sk, skb);
1301 		if (dst) {
1302 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1303 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1304 				dst_release(dst);
1305 				sk->sk_rx_dst = NULL;
1306 			}
1307 		}
1308 
1309 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1310 		if (opt_skb)
1311 			goto ipv6_pktoptions;
1312 		return 0;
1313 	}
1314 
1315 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1316 		goto csum_err;
1317 
1318 	if (sk->sk_state == TCP_LISTEN) {
1319 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1320 		if (!nsk)
1321 			goto discard;
1322 
1323 		/*
1324 		 * Queue it on the new socket if the new socket is active,
1325 		 * otherwise we just shortcircuit this and continue with
1326 		 * the new socket..
1327 		 */
1328 		if (nsk != sk) {
1329 			sock_rps_save_rxhash(nsk, skb);
1330 			sk_mark_napi_id(sk, skb);
1331 			if (tcp_child_process(sk, nsk, skb))
1332 				goto reset;
1333 			if (opt_skb)
1334 				__kfree_skb(opt_skb);
1335 			return 0;
1336 		}
1337 	} else
1338 		sock_rps_save_rxhash(sk, skb);
1339 
1340 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1341 		goto reset;
1342 	if (opt_skb)
1343 		goto ipv6_pktoptions;
1344 	return 0;
1345 
1346 reset:
1347 	tcp_v6_send_reset(sk, skb);
1348 discard:
1349 	if (opt_skb)
1350 		__kfree_skb(opt_skb);
1351 	kfree_skb(skb);
1352 	return 0;
1353 csum_err:
1354 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1355 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1356 	goto discard;
1357 
1358 
1359 ipv6_pktoptions:
1360 	/* Do you ask, what is it?
1361 
1362 	   1. skb was enqueued by tcp.
1363 	   2. skb is added to tail of read queue, rather than out of order.
1364 	   3. socket is not in passive state.
1365 	   4. Finally, it really contains options, which user wants to receive.
1366 	 */
1367 	tp = tcp_sk(sk);
1368 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1369 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1370 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1371 			np->mcast_oif = tcp_v6_iif(opt_skb);
1372 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1373 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1374 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1375 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1376 		if (np->repflow)
1377 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1378 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1379 			skb_set_owner_r(opt_skb, sk);
1380 			opt_skb = xchg(&np->pktoptions, opt_skb);
1381 		} else {
1382 			__kfree_skb(opt_skb);
1383 			opt_skb = xchg(&np->pktoptions, NULL);
1384 		}
1385 	}
1386 
1387 	kfree_skb(opt_skb);
1388 	return 0;
1389 }
1390 
1391 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1392 			   const struct tcphdr *th)
1393 {
1394 	/* This is tricky: we move IP6CB at its correct location into
1395 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1396 	 * _decode_session6() uses IP6CB().
1397 	 * barrier() makes sure compiler won't play aliasing games.
1398 	 */
1399 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1400 		sizeof(struct inet6_skb_parm));
1401 	barrier();
1402 
1403 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1404 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1405 				    skb->len - th->doff*4);
1406 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1407 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1408 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1409 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1410 	TCP_SKB_CB(skb)->sacked = 0;
1411 }
1412 
1413 static int tcp_v6_rcv(struct sk_buff *skb)
1414 {
1415 	const struct tcphdr *th;
1416 	const struct ipv6hdr *hdr;
1417 	struct sock *sk;
1418 	int ret;
1419 	struct net *net = dev_net(skb->dev);
1420 
1421 	if (skb->pkt_type != PACKET_HOST)
1422 		goto discard_it;
1423 
1424 	/*
1425 	 *	Count it even if it's bad.
1426 	 */
1427 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1428 
1429 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1430 		goto discard_it;
1431 
1432 	th = tcp_hdr(skb);
1433 
1434 	if (th->doff < sizeof(struct tcphdr)/4)
1435 		goto bad_packet;
1436 	if (!pskb_may_pull(skb, th->doff*4))
1437 		goto discard_it;
1438 
1439 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1440 		goto csum_error;
1441 
1442 	th = tcp_hdr(skb);
1443 	hdr = ipv6_hdr(skb);
1444 
1445 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1446 				inet6_iif(skb));
1447 	if (!sk)
1448 		goto no_tcp_socket;
1449 
1450 process:
1451 	if (sk->sk_state == TCP_TIME_WAIT)
1452 		goto do_time_wait;
1453 
1454 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1455 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1456 		goto discard_and_relse;
1457 	}
1458 
1459 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1460 		goto discard_and_relse;
1461 
1462 	tcp_v6_fill_cb(skb, hdr, th);
1463 
1464 #ifdef CONFIG_TCP_MD5SIG
1465 	if (tcp_v6_inbound_md5_hash(sk, skb))
1466 		goto discard_and_relse;
1467 #endif
1468 
1469 	if (sk_filter(sk, skb))
1470 		goto discard_and_relse;
1471 
1472 	sk_incoming_cpu_update(sk);
1473 	skb->dev = NULL;
1474 
1475 	bh_lock_sock_nested(sk);
1476 	ret = 0;
1477 	if (!sock_owned_by_user(sk)) {
1478 		if (!tcp_prequeue(sk, skb))
1479 			ret = tcp_v6_do_rcv(sk, skb);
1480 	} else if (unlikely(sk_add_backlog(sk, skb,
1481 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1482 		bh_unlock_sock(sk);
1483 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1484 		goto discard_and_relse;
1485 	}
1486 	bh_unlock_sock(sk);
1487 
1488 	sock_put(sk);
1489 	return ret ? -1 : 0;
1490 
1491 no_tcp_socket:
1492 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1493 		goto discard_it;
1494 
1495 	tcp_v6_fill_cb(skb, hdr, th);
1496 
1497 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1498 csum_error:
1499 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1500 bad_packet:
1501 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1502 	} else {
1503 		tcp_v6_send_reset(NULL, skb);
1504 	}
1505 
1506 discard_it:
1507 	kfree_skb(skb);
1508 	return 0;
1509 
1510 discard_and_relse:
1511 	sock_put(sk);
1512 	goto discard_it;
1513 
1514 do_time_wait:
1515 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1516 		inet_twsk_put(inet_twsk(sk));
1517 		goto discard_it;
1518 	}
1519 
1520 	tcp_v6_fill_cb(skb, hdr, th);
1521 
1522 	if (skb->len < (th->doff<<2)) {
1523 		inet_twsk_put(inet_twsk(sk));
1524 		goto bad_packet;
1525 	}
1526 	if (tcp_checksum_complete(skb)) {
1527 		inet_twsk_put(inet_twsk(sk));
1528 		goto csum_error;
1529 	}
1530 
1531 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1532 	case TCP_TW_SYN:
1533 	{
1534 		struct sock *sk2;
1535 
1536 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1537 					    &ipv6_hdr(skb)->saddr, th->source,
1538 					    &ipv6_hdr(skb)->daddr,
1539 					    ntohs(th->dest), tcp_v6_iif(skb));
1540 		if (sk2 != NULL) {
1541 			struct inet_timewait_sock *tw = inet_twsk(sk);
1542 			inet_twsk_deschedule(tw, &tcp_death_row);
1543 			inet_twsk_put(tw);
1544 			sk = sk2;
1545 			goto process;
1546 		}
1547 		/* Fall through to ACK */
1548 	}
1549 	case TCP_TW_ACK:
1550 		tcp_v6_timewait_ack(sk, skb);
1551 		break;
1552 	case TCP_TW_RST:
1553 		goto no_tcp_socket;
1554 	case TCP_TW_SUCCESS:
1555 		;
1556 	}
1557 	goto discard_it;
1558 }
1559 
1560 static void tcp_v6_early_demux(struct sk_buff *skb)
1561 {
1562 	const struct ipv6hdr *hdr;
1563 	const struct tcphdr *th;
1564 	struct sock *sk;
1565 
1566 	if (skb->pkt_type != PACKET_HOST)
1567 		return;
1568 
1569 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1570 		return;
1571 
1572 	hdr = ipv6_hdr(skb);
1573 	th = tcp_hdr(skb);
1574 
1575 	if (th->doff < sizeof(struct tcphdr) / 4)
1576 		return;
1577 
1578 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1579 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1580 					&hdr->saddr, th->source,
1581 					&hdr->daddr, ntohs(th->dest),
1582 					inet6_iif(skb));
1583 	if (sk) {
1584 		skb->sk = sk;
1585 		skb->destructor = sock_edemux;
1586 		if (sk_fullsock(sk)) {
1587 			struct dst_entry *dst = sk->sk_rx_dst;
1588 
1589 			if (dst)
1590 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1591 			if (dst &&
1592 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1593 				skb_dst_set_noref(skb, dst);
1594 		}
1595 	}
1596 }
1597 
1598 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1599 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1600 	.twsk_unique	= tcp_twsk_unique,
1601 	.twsk_destructor = tcp_twsk_destructor,
1602 };
1603 
1604 static const struct inet_connection_sock_af_ops ipv6_specific = {
1605 	.queue_xmit	   = inet6_csk_xmit,
1606 	.send_check	   = tcp_v6_send_check,
1607 	.rebuild_header	   = inet6_sk_rebuild_header,
1608 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1609 	.conn_request	   = tcp_v6_conn_request,
1610 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1611 	.net_header_len	   = sizeof(struct ipv6hdr),
1612 	.net_frag_header_len = sizeof(struct frag_hdr),
1613 	.setsockopt	   = ipv6_setsockopt,
1614 	.getsockopt	   = ipv6_getsockopt,
1615 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1616 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1617 	.bind_conflict	   = inet6_csk_bind_conflict,
1618 #ifdef CONFIG_COMPAT
1619 	.compat_setsockopt = compat_ipv6_setsockopt,
1620 	.compat_getsockopt = compat_ipv6_getsockopt,
1621 #endif
1622 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1623 };
1624 
1625 #ifdef CONFIG_TCP_MD5SIG
1626 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1627 	.md5_lookup	=	tcp_v6_md5_lookup,
1628 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1629 	.md5_parse	=	tcp_v6_parse_md5_keys,
1630 };
1631 #endif
1632 
1633 /*
1634  *	TCP over IPv4 via INET6 API
1635  */
1636 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1637 	.queue_xmit	   = ip_queue_xmit,
1638 	.send_check	   = tcp_v4_send_check,
1639 	.rebuild_header	   = inet_sk_rebuild_header,
1640 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1641 	.conn_request	   = tcp_v6_conn_request,
1642 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1643 	.net_header_len	   = sizeof(struct iphdr),
1644 	.setsockopt	   = ipv6_setsockopt,
1645 	.getsockopt	   = ipv6_getsockopt,
1646 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1647 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1648 	.bind_conflict	   = inet6_csk_bind_conflict,
1649 #ifdef CONFIG_COMPAT
1650 	.compat_setsockopt = compat_ipv6_setsockopt,
1651 	.compat_getsockopt = compat_ipv6_getsockopt,
1652 #endif
1653 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1654 };
1655 
1656 #ifdef CONFIG_TCP_MD5SIG
1657 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1658 	.md5_lookup	=	tcp_v4_md5_lookup,
1659 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1660 	.md5_parse	=	tcp_v6_parse_md5_keys,
1661 };
1662 #endif
1663 
1664 /* NOTE: A lot of things set to zero explicitly by call to
1665  *       sk_alloc() so need not be done here.
1666  */
1667 static int tcp_v6_init_sock(struct sock *sk)
1668 {
1669 	struct inet_connection_sock *icsk = inet_csk(sk);
1670 
1671 	tcp_init_sock(sk);
1672 
1673 	icsk->icsk_af_ops = &ipv6_specific;
1674 
1675 #ifdef CONFIG_TCP_MD5SIG
1676 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1677 #endif
1678 
1679 	return 0;
1680 }
1681 
1682 static void tcp_v6_destroy_sock(struct sock *sk)
1683 {
1684 	tcp_v4_destroy_sock(sk);
1685 	inet6_destroy_sock(sk);
1686 }
1687 
1688 #ifdef CONFIG_PROC_FS
1689 /* Proc filesystem TCPv6 sock list dumping. */
1690 static void get_openreq6(struct seq_file *seq,
1691 			 struct request_sock *req, int i, kuid_t uid)
1692 {
1693 	int ttd = req->expires - jiffies;
1694 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1695 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1696 
1697 	if (ttd < 0)
1698 		ttd = 0;
1699 
1700 	seq_printf(seq,
1701 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1702 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1703 		   i,
1704 		   src->s6_addr32[0], src->s6_addr32[1],
1705 		   src->s6_addr32[2], src->s6_addr32[3],
1706 		   inet_rsk(req)->ir_num,
1707 		   dest->s6_addr32[0], dest->s6_addr32[1],
1708 		   dest->s6_addr32[2], dest->s6_addr32[3],
1709 		   ntohs(inet_rsk(req)->ir_rmt_port),
1710 		   TCP_SYN_RECV,
1711 		   0, 0, /* could print option size, but that is af dependent. */
1712 		   1,   /* timers active (only the expire timer) */
1713 		   jiffies_to_clock_t(ttd),
1714 		   req->num_timeout,
1715 		   from_kuid_munged(seq_user_ns(seq), uid),
1716 		   0,  /* non standard timer */
1717 		   0, /* open_requests have no inode */
1718 		   0, req);
1719 }
1720 
1721 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1722 {
1723 	const struct in6_addr *dest, *src;
1724 	__u16 destp, srcp;
1725 	int timer_active;
1726 	unsigned long timer_expires;
1727 	const struct inet_sock *inet = inet_sk(sp);
1728 	const struct tcp_sock *tp = tcp_sk(sp);
1729 	const struct inet_connection_sock *icsk = inet_csk(sp);
1730 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1731 
1732 	dest  = &sp->sk_v6_daddr;
1733 	src   = &sp->sk_v6_rcv_saddr;
1734 	destp = ntohs(inet->inet_dport);
1735 	srcp  = ntohs(inet->inet_sport);
1736 
1737 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1738 		timer_active	= 1;
1739 		timer_expires	= icsk->icsk_timeout;
1740 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1741 		timer_active	= 4;
1742 		timer_expires	= icsk->icsk_timeout;
1743 	} else if (timer_pending(&sp->sk_timer)) {
1744 		timer_active	= 2;
1745 		timer_expires	= sp->sk_timer.expires;
1746 	} else {
1747 		timer_active	= 0;
1748 		timer_expires = jiffies;
1749 	}
1750 
1751 	seq_printf(seq,
1752 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1753 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1754 		   i,
1755 		   src->s6_addr32[0], src->s6_addr32[1],
1756 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1757 		   dest->s6_addr32[0], dest->s6_addr32[1],
1758 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1759 		   sp->sk_state,
1760 		   tp->write_seq-tp->snd_una,
1761 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1762 		   timer_active,
1763 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1764 		   icsk->icsk_retransmits,
1765 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1766 		   icsk->icsk_probes_out,
1767 		   sock_i_ino(sp),
1768 		   atomic_read(&sp->sk_refcnt), sp,
1769 		   jiffies_to_clock_t(icsk->icsk_rto),
1770 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1771 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1772 		   tp->snd_cwnd,
1773 		   sp->sk_state == TCP_LISTEN ?
1774 			(fastopenq ? fastopenq->max_qlen : 0) :
1775 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1776 		   );
1777 }
1778 
1779 static void get_timewait6_sock(struct seq_file *seq,
1780 			       struct inet_timewait_sock *tw, int i)
1781 {
1782 	const struct in6_addr *dest, *src;
1783 	__u16 destp, srcp;
1784 	s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1785 
1786 	dest = &tw->tw_v6_daddr;
1787 	src  = &tw->tw_v6_rcv_saddr;
1788 	destp = ntohs(tw->tw_dport);
1789 	srcp  = ntohs(tw->tw_sport);
1790 
1791 	seq_printf(seq,
1792 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1793 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1794 		   i,
1795 		   src->s6_addr32[0], src->s6_addr32[1],
1796 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1797 		   dest->s6_addr32[0], dest->s6_addr32[1],
1798 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1799 		   tw->tw_substate, 0, 0,
1800 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1801 		   atomic_read(&tw->tw_refcnt), tw);
1802 }
1803 
1804 static int tcp6_seq_show(struct seq_file *seq, void *v)
1805 {
1806 	struct tcp_iter_state *st;
1807 	struct sock *sk = v;
1808 
1809 	if (v == SEQ_START_TOKEN) {
1810 		seq_puts(seq,
1811 			 "  sl  "
1812 			 "local_address                         "
1813 			 "remote_address                        "
1814 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1815 			 "   uid  timeout inode\n");
1816 		goto out;
1817 	}
1818 	st = seq->private;
1819 
1820 	switch (st->state) {
1821 	case TCP_SEQ_STATE_LISTENING:
1822 	case TCP_SEQ_STATE_ESTABLISHED:
1823 		if (sk->sk_state == TCP_TIME_WAIT)
1824 			get_timewait6_sock(seq, v, st->num);
1825 		else
1826 			get_tcp6_sock(seq, v, st->num);
1827 		break;
1828 	case TCP_SEQ_STATE_OPENREQ:
1829 		get_openreq6(seq, v, st->num, st->uid);
1830 		break;
1831 	}
1832 out:
1833 	return 0;
1834 }
1835 
1836 static const struct file_operations tcp6_afinfo_seq_fops = {
1837 	.owner   = THIS_MODULE,
1838 	.open    = tcp_seq_open,
1839 	.read    = seq_read,
1840 	.llseek  = seq_lseek,
1841 	.release = seq_release_net
1842 };
1843 
1844 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1845 	.name		= "tcp6",
1846 	.family		= AF_INET6,
1847 	.seq_fops	= &tcp6_afinfo_seq_fops,
1848 	.seq_ops	= {
1849 		.show		= tcp6_seq_show,
1850 	},
1851 };
1852 
1853 int __net_init tcp6_proc_init(struct net *net)
1854 {
1855 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1856 }
1857 
1858 void tcp6_proc_exit(struct net *net)
1859 {
1860 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1861 }
1862 #endif
1863 
1864 static void tcp_v6_clear_sk(struct sock *sk, int size)
1865 {
1866 	struct inet_sock *inet = inet_sk(sk);
1867 
1868 	/* we do not want to clear pinet6 field, because of RCU lookups */
1869 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1870 
1871 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1872 	memset(&inet->pinet6 + 1, 0, size);
1873 }
1874 
1875 struct proto tcpv6_prot = {
1876 	.name			= "TCPv6",
1877 	.owner			= THIS_MODULE,
1878 	.close			= tcp_close,
1879 	.connect		= tcp_v6_connect,
1880 	.disconnect		= tcp_disconnect,
1881 	.accept			= inet_csk_accept,
1882 	.ioctl			= tcp_ioctl,
1883 	.init			= tcp_v6_init_sock,
1884 	.destroy		= tcp_v6_destroy_sock,
1885 	.shutdown		= tcp_shutdown,
1886 	.setsockopt		= tcp_setsockopt,
1887 	.getsockopt		= tcp_getsockopt,
1888 	.recvmsg		= tcp_recvmsg,
1889 	.sendmsg		= tcp_sendmsg,
1890 	.sendpage		= tcp_sendpage,
1891 	.backlog_rcv		= tcp_v6_do_rcv,
1892 	.release_cb		= tcp_release_cb,
1893 	.hash			= tcp_v6_hash,
1894 	.unhash			= inet_unhash,
1895 	.get_port		= inet_csk_get_port,
1896 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1897 	.stream_memory_free	= tcp_stream_memory_free,
1898 	.sockets_allocated	= &tcp_sockets_allocated,
1899 	.memory_allocated	= &tcp_memory_allocated,
1900 	.memory_pressure	= &tcp_memory_pressure,
1901 	.orphan_count		= &tcp_orphan_count,
1902 	.sysctl_mem		= sysctl_tcp_mem,
1903 	.sysctl_wmem		= sysctl_tcp_wmem,
1904 	.sysctl_rmem		= sysctl_tcp_rmem,
1905 	.max_header		= MAX_TCP_HEADER,
1906 	.obj_size		= sizeof(struct tcp6_sock),
1907 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1908 	.twsk_prot		= &tcp6_timewait_sock_ops,
1909 	.rsk_prot		= &tcp6_request_sock_ops,
1910 	.h.hashinfo		= &tcp_hashinfo,
1911 	.no_autobind		= true,
1912 #ifdef CONFIG_COMPAT
1913 	.compat_setsockopt	= compat_tcp_setsockopt,
1914 	.compat_getsockopt	= compat_tcp_getsockopt,
1915 #endif
1916 #ifdef CONFIG_MEMCG_KMEM
1917 	.proto_cgroup		= tcp_proto_cgroup,
1918 #endif
1919 	.clear_sk		= tcp_v6_clear_sk,
1920 };
1921 
1922 static const struct inet6_protocol tcpv6_protocol = {
1923 	.early_demux	=	tcp_v6_early_demux,
1924 	.handler	=	tcp_v6_rcv,
1925 	.err_handler	=	tcp_v6_err,
1926 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1927 };
1928 
1929 static struct inet_protosw tcpv6_protosw = {
1930 	.type		=	SOCK_STREAM,
1931 	.protocol	=	IPPROTO_TCP,
1932 	.prot		=	&tcpv6_prot,
1933 	.ops		=	&inet6_stream_ops,
1934 	.flags		=	INET_PROTOSW_PERMANENT |
1935 				INET_PROTOSW_ICSK,
1936 };
1937 
1938 static int __net_init tcpv6_net_init(struct net *net)
1939 {
1940 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1941 				    SOCK_RAW, IPPROTO_TCP, net);
1942 }
1943 
1944 static void __net_exit tcpv6_net_exit(struct net *net)
1945 {
1946 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1947 }
1948 
1949 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1950 {
1951 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1952 }
1953 
1954 static struct pernet_operations tcpv6_net_ops = {
1955 	.init	    = tcpv6_net_init,
1956 	.exit	    = tcpv6_net_exit,
1957 	.exit_batch = tcpv6_net_exit_batch,
1958 };
1959 
1960 int __init tcpv6_init(void)
1961 {
1962 	int ret;
1963 
1964 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1965 	if (ret)
1966 		goto out;
1967 
1968 	/* register inet6 protocol */
1969 	ret = inet6_register_protosw(&tcpv6_protosw);
1970 	if (ret)
1971 		goto out_tcpv6_protocol;
1972 
1973 	ret = register_pernet_subsys(&tcpv6_net_ops);
1974 	if (ret)
1975 		goto out_tcpv6_protosw;
1976 out:
1977 	return ret;
1978 
1979 out_tcpv6_protosw:
1980 	inet6_unregister_protosw(&tcpv6_protosw);
1981 out_tcpv6_protocol:
1982 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1983 	goto out;
1984 }
1985 
1986 void tcpv6_exit(void)
1987 {
1988 	unregister_pernet_subsys(&tcpv6_net_ops);
1989 	inet6_unregister_protosw(&tcpv6_protosw);
1990 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1991 }
1992