xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision 79f08d9e)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 #include <net/busy_poll.h>
67 
68 #include <asm/uaccess.h>
69 
70 #include <linux/proc_fs.h>
71 #include <linux/seq_file.h>
72 
73 #include <linux/crypto.h>
74 #include <linux/scatterlist.h>
75 
76 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
77 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
78 				      struct request_sock *req);
79 
80 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
81 
82 static const struct inet_connection_sock_af_ops ipv6_mapped;
83 static const struct inet_connection_sock_af_ops ipv6_specific;
84 #ifdef CONFIG_TCP_MD5SIG
85 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87 #else
88 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 						   const struct in6_addr *addr)
90 {
91 	return NULL;
92 }
93 #endif
94 
95 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
96 {
97 	struct dst_entry *dst = skb_dst(skb);
98 	const struct rt6_info *rt = (const struct rt6_info *)dst;
99 
100 	dst_hold(dst);
101 	sk->sk_rx_dst = dst;
102 	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
103 	if (rt->rt6i_node)
104 		inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
105 }
106 
107 static void tcp_v6_hash(struct sock *sk)
108 {
109 	if (sk->sk_state != TCP_CLOSE) {
110 		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
111 			tcp_prot.hash(sk);
112 			return;
113 		}
114 		local_bh_disable();
115 		__inet6_hash(sk, NULL);
116 		local_bh_enable();
117 	}
118 }
119 
120 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
121 {
122 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123 					    ipv6_hdr(skb)->saddr.s6_addr32,
124 					    tcp_hdr(skb)->dest,
125 					    tcp_hdr(skb)->source);
126 }
127 
128 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 			  int addr_len)
130 {
131 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
132 	struct inet_sock *inet = inet_sk(sk);
133 	struct inet_connection_sock *icsk = inet_csk(sk);
134 	struct ipv6_pinfo *np = inet6_sk(sk);
135 	struct tcp_sock *tp = tcp_sk(sk);
136 	struct in6_addr *saddr = NULL, *final_p, final;
137 	struct rt6_info *rt;
138 	struct flowi6 fl6;
139 	struct dst_entry *dst;
140 	int addr_type;
141 	int err;
142 
143 	if (addr_len < SIN6_LEN_RFC2133)
144 		return -EINVAL;
145 
146 	if (usin->sin6_family != AF_INET6)
147 		return -EAFNOSUPPORT;
148 
149 	memset(&fl6, 0, sizeof(fl6));
150 
151 	if (np->sndflow) {
152 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153 		IP6_ECN_flow_init(fl6.flowlabel);
154 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155 			struct ip6_flowlabel *flowlabel;
156 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 			if (flowlabel == NULL)
158 				return -EINVAL;
159 			usin->sin6_addr = flowlabel->dst;
160 			fl6_sock_release(flowlabel);
161 		}
162 	}
163 
164 	/*
165 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
166 	 */
167 
168 	if(ipv6_addr_any(&usin->sin6_addr))
169 		usin->sin6_addr.s6_addr[15] = 0x1;
170 
171 	addr_type = ipv6_addr_type(&usin->sin6_addr);
172 
173 	if(addr_type & IPV6_ADDR_MULTICAST)
174 		return -ENETUNREACH;
175 
176 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
177 		if (addr_len >= sizeof(struct sockaddr_in6) &&
178 		    usin->sin6_scope_id) {
179 			/* If interface is set while binding, indices
180 			 * must coincide.
181 			 */
182 			if (sk->sk_bound_dev_if &&
183 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
184 				return -EINVAL;
185 
186 			sk->sk_bound_dev_if = usin->sin6_scope_id;
187 		}
188 
189 		/* Connect to link-local address requires an interface */
190 		if (!sk->sk_bound_dev_if)
191 			return -EINVAL;
192 	}
193 
194 	if (tp->rx_opt.ts_recent_stamp &&
195 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
196 		tp->rx_opt.ts_recent = 0;
197 		tp->rx_opt.ts_recent_stamp = 0;
198 		tp->write_seq = 0;
199 	}
200 
201 	sk->sk_v6_daddr = usin->sin6_addr;
202 	np->flow_label = fl6.flowlabel;
203 
204 	/*
205 	 *	TCP over IPv4
206 	 */
207 
208 	if (addr_type == IPV6_ADDR_MAPPED) {
209 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
210 		struct sockaddr_in sin;
211 
212 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
213 
214 		if (__ipv6_only_sock(sk))
215 			return -ENETUNREACH;
216 
217 		sin.sin_family = AF_INET;
218 		sin.sin_port = usin->sin6_port;
219 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
220 
221 		icsk->icsk_af_ops = &ipv6_mapped;
222 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
223 #ifdef CONFIG_TCP_MD5SIG
224 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
225 #endif
226 
227 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
228 
229 		if (err) {
230 			icsk->icsk_ext_hdr_len = exthdrlen;
231 			icsk->icsk_af_ops = &ipv6_specific;
232 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
233 #ifdef CONFIG_TCP_MD5SIG
234 			tp->af_specific = &tcp_sock_ipv6_specific;
235 #endif
236 			goto failure;
237 		} else {
238 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
239 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
240 					       &sk->sk_v6_rcv_saddr);
241 		}
242 
243 		return err;
244 	}
245 
246 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
247 		saddr = &sk->sk_v6_rcv_saddr;
248 
249 	fl6.flowi6_proto = IPPROTO_TCP;
250 	fl6.daddr = sk->sk_v6_daddr;
251 	fl6.saddr = saddr ? *saddr : np->saddr;
252 	fl6.flowi6_oif = sk->sk_bound_dev_if;
253 	fl6.flowi6_mark = sk->sk_mark;
254 	fl6.fl6_dport = usin->sin6_port;
255 	fl6.fl6_sport = inet->inet_sport;
256 
257 	final_p = fl6_update_dst(&fl6, np->opt, &final);
258 
259 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
260 
261 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
262 	if (IS_ERR(dst)) {
263 		err = PTR_ERR(dst);
264 		goto failure;
265 	}
266 
267 	if (saddr == NULL) {
268 		saddr = &fl6.saddr;
269 		sk->sk_v6_rcv_saddr = *saddr;
270 	}
271 
272 	/* set the source address */
273 	np->saddr = *saddr;
274 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
275 
276 	sk->sk_gso_type = SKB_GSO_TCPV6;
277 	__ip6_dst_store(sk, dst, NULL, NULL);
278 
279 	rt = (struct rt6_info *) dst;
280 	if (tcp_death_row.sysctl_tw_recycle &&
281 	    !tp->rx_opt.ts_recent_stamp &&
282 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
283 		tcp_fetch_timewait_stamp(sk, dst);
284 
285 	icsk->icsk_ext_hdr_len = 0;
286 	if (np->opt)
287 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
288 					  np->opt->opt_nflen);
289 
290 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
291 
292 	inet->inet_dport = usin->sin6_port;
293 
294 	tcp_set_state(sk, TCP_SYN_SENT);
295 	err = inet6_hash_connect(&tcp_death_row, sk);
296 	if (err)
297 		goto late_failure;
298 
299 	if (!tp->write_seq && likely(!tp->repair))
300 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
301 							     sk->sk_v6_daddr.s6_addr32,
302 							     inet->inet_sport,
303 							     inet->inet_dport);
304 
305 	err = tcp_connect(sk);
306 	if (err)
307 		goto late_failure;
308 
309 	return 0;
310 
311 late_failure:
312 	tcp_set_state(sk, TCP_CLOSE);
313 	__sk_dst_reset(sk);
314 failure:
315 	inet->inet_dport = 0;
316 	sk->sk_route_caps = 0;
317 	return err;
318 }
319 
320 static void tcp_v6_mtu_reduced(struct sock *sk)
321 {
322 	struct dst_entry *dst;
323 
324 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
325 		return;
326 
327 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
328 	if (!dst)
329 		return;
330 
331 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
332 		tcp_sync_mss(sk, dst_mtu(dst));
333 		tcp_simple_retransmit(sk);
334 	}
335 }
336 
337 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
338 		u8 type, u8 code, int offset, __be32 info)
339 {
340 	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
341 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
342 	struct ipv6_pinfo *np;
343 	struct sock *sk;
344 	int err;
345 	struct tcp_sock *tp;
346 	__u32 seq;
347 	struct net *net = dev_net(skb->dev);
348 
349 	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
350 			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
351 
352 	if (sk == NULL) {
353 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
354 				   ICMP6_MIB_INERRORS);
355 		return;
356 	}
357 
358 	if (sk->sk_state == TCP_TIME_WAIT) {
359 		inet_twsk_put(inet_twsk(sk));
360 		return;
361 	}
362 
363 	bh_lock_sock(sk);
364 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
365 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
366 
367 	if (sk->sk_state == TCP_CLOSE)
368 		goto out;
369 
370 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
371 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
372 		goto out;
373 	}
374 
375 	tp = tcp_sk(sk);
376 	seq = ntohl(th->seq);
377 	if (sk->sk_state != TCP_LISTEN &&
378 	    !between(seq, tp->snd_una, tp->snd_nxt)) {
379 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
380 		goto out;
381 	}
382 
383 	np = inet6_sk(sk);
384 
385 	if (type == NDISC_REDIRECT) {
386 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
387 
388 		if (dst)
389 			dst->ops->redirect(dst, sk, skb);
390 		goto out;
391 	}
392 
393 	if (type == ICMPV6_PKT_TOOBIG) {
394 		/* We are not interested in TCP_LISTEN and open_requests
395 		 * (SYN-ACKs send out by Linux are always <576bytes so
396 		 * they should go through unfragmented).
397 		 */
398 		if (sk->sk_state == TCP_LISTEN)
399 			goto out;
400 
401 		tp->mtu_info = ntohl(info);
402 		if (!sock_owned_by_user(sk))
403 			tcp_v6_mtu_reduced(sk);
404 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
405 					   &tp->tsq_flags))
406 			sock_hold(sk);
407 		goto out;
408 	}
409 
410 	icmpv6_err_convert(type, code, &err);
411 
412 	/* Might be for an request_sock */
413 	switch (sk->sk_state) {
414 		struct request_sock *req, **prev;
415 	case TCP_LISTEN:
416 		if (sock_owned_by_user(sk))
417 			goto out;
418 
419 		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
420 					   &hdr->saddr, inet6_iif(skb));
421 		if (!req)
422 			goto out;
423 
424 		/* ICMPs are not backlogged, hence we cannot get
425 		 * an established socket here.
426 		 */
427 		WARN_ON(req->sk != NULL);
428 
429 		if (seq != tcp_rsk(req)->snt_isn) {
430 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
431 			goto out;
432 		}
433 
434 		inet_csk_reqsk_queue_drop(sk, req, prev);
435 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
436 		goto out;
437 
438 	case TCP_SYN_SENT:
439 	case TCP_SYN_RECV:  /* Cannot happen.
440 			       It can, it SYNs are crossed. --ANK */
441 		if (!sock_owned_by_user(sk)) {
442 			sk->sk_err = err;
443 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
444 
445 			tcp_done(sk);
446 		} else
447 			sk->sk_err_soft = err;
448 		goto out;
449 	}
450 
451 	if (!sock_owned_by_user(sk) && np->recverr) {
452 		sk->sk_err = err;
453 		sk->sk_error_report(sk);
454 	} else
455 		sk->sk_err_soft = err;
456 
457 out:
458 	bh_unlock_sock(sk);
459 	sock_put(sk);
460 }
461 
462 
463 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
464 			      struct flowi6 *fl6,
465 			      struct request_sock *req,
466 			      u16 queue_mapping)
467 {
468 	struct inet_request_sock *ireq = inet_rsk(req);
469 	struct ipv6_pinfo *np = inet6_sk(sk);
470 	struct sk_buff * skb;
471 	int err = -ENOMEM;
472 
473 	/* First, grab a route. */
474 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
475 		goto done;
476 
477 	skb = tcp_make_synack(sk, dst, req, NULL);
478 
479 	if (skb) {
480 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
481 				    &ireq->ir_v6_rmt_addr);
482 
483 		fl6->daddr = ireq->ir_v6_rmt_addr;
484 		skb_set_queue_mapping(skb, queue_mapping);
485 		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
486 		err = net_xmit_eval(err);
487 	}
488 
489 done:
490 	return err;
491 }
492 
493 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
494 {
495 	struct flowi6 fl6;
496 	int res;
497 
498 	res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0);
499 	if (!res)
500 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
501 	return res;
502 }
503 
504 static void tcp_v6_reqsk_destructor(struct request_sock *req)
505 {
506 	kfree_skb(inet_rsk(req)->pktopts);
507 }
508 
509 #ifdef CONFIG_TCP_MD5SIG
510 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
511 						   const struct in6_addr *addr)
512 {
513 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
514 }
515 
516 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
517 						struct sock *addr_sk)
518 {
519 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
520 }
521 
522 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
523 						      struct request_sock *req)
524 {
525 	return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
526 }
527 
528 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
529 				  int optlen)
530 {
531 	struct tcp_md5sig cmd;
532 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
533 
534 	if (optlen < sizeof(cmd))
535 		return -EINVAL;
536 
537 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
538 		return -EFAULT;
539 
540 	if (sin6->sin6_family != AF_INET6)
541 		return -EINVAL;
542 
543 	if (!cmd.tcpm_keylen) {
544 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
545 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
546 					      AF_INET);
547 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
548 				      AF_INET6);
549 	}
550 
551 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
552 		return -EINVAL;
553 
554 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
555 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
556 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
557 
558 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
559 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
560 }
561 
562 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
563 					const struct in6_addr *daddr,
564 					const struct in6_addr *saddr, int nbytes)
565 {
566 	struct tcp6_pseudohdr *bp;
567 	struct scatterlist sg;
568 
569 	bp = &hp->md5_blk.ip6;
570 	/* 1. TCP pseudo-header (RFC2460) */
571 	bp->saddr = *saddr;
572 	bp->daddr = *daddr;
573 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
574 	bp->len = cpu_to_be32(nbytes);
575 
576 	sg_init_one(&sg, bp, sizeof(*bp));
577 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
578 }
579 
580 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
581 			       const struct in6_addr *daddr, struct in6_addr *saddr,
582 			       const struct tcphdr *th)
583 {
584 	struct tcp_md5sig_pool *hp;
585 	struct hash_desc *desc;
586 
587 	hp = tcp_get_md5sig_pool();
588 	if (!hp)
589 		goto clear_hash_noput;
590 	desc = &hp->md5_desc;
591 
592 	if (crypto_hash_init(desc))
593 		goto clear_hash;
594 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
595 		goto clear_hash;
596 	if (tcp_md5_hash_header(hp, th))
597 		goto clear_hash;
598 	if (tcp_md5_hash_key(hp, key))
599 		goto clear_hash;
600 	if (crypto_hash_final(desc, md5_hash))
601 		goto clear_hash;
602 
603 	tcp_put_md5sig_pool();
604 	return 0;
605 
606 clear_hash:
607 	tcp_put_md5sig_pool();
608 clear_hash_noput:
609 	memset(md5_hash, 0, 16);
610 	return 1;
611 }
612 
613 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
614 			       const struct sock *sk,
615 			       const struct request_sock *req,
616 			       const struct sk_buff *skb)
617 {
618 	const struct in6_addr *saddr, *daddr;
619 	struct tcp_md5sig_pool *hp;
620 	struct hash_desc *desc;
621 	const struct tcphdr *th = tcp_hdr(skb);
622 
623 	if (sk) {
624 		saddr = &inet6_sk(sk)->saddr;
625 		daddr = &sk->sk_v6_daddr;
626 	} else if (req) {
627 		saddr = &inet_rsk(req)->ir_v6_loc_addr;
628 		daddr = &inet_rsk(req)->ir_v6_rmt_addr;
629 	} else {
630 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
631 		saddr = &ip6h->saddr;
632 		daddr = &ip6h->daddr;
633 	}
634 
635 	hp = tcp_get_md5sig_pool();
636 	if (!hp)
637 		goto clear_hash_noput;
638 	desc = &hp->md5_desc;
639 
640 	if (crypto_hash_init(desc))
641 		goto clear_hash;
642 
643 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
644 		goto clear_hash;
645 	if (tcp_md5_hash_header(hp, th))
646 		goto clear_hash;
647 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
648 		goto clear_hash;
649 	if (tcp_md5_hash_key(hp, key))
650 		goto clear_hash;
651 	if (crypto_hash_final(desc, md5_hash))
652 		goto clear_hash;
653 
654 	tcp_put_md5sig_pool();
655 	return 0;
656 
657 clear_hash:
658 	tcp_put_md5sig_pool();
659 clear_hash_noput:
660 	memset(md5_hash, 0, 16);
661 	return 1;
662 }
663 
664 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
665 {
666 	const __u8 *hash_location = NULL;
667 	struct tcp_md5sig_key *hash_expected;
668 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
669 	const struct tcphdr *th = tcp_hdr(skb);
670 	int genhash;
671 	u8 newhash[16];
672 
673 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
674 	hash_location = tcp_parse_md5sig_option(th);
675 
676 	/* We've parsed the options - do we have a hash? */
677 	if (!hash_expected && !hash_location)
678 		return 0;
679 
680 	if (hash_expected && !hash_location) {
681 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
682 		return 1;
683 	}
684 
685 	if (!hash_expected && hash_location) {
686 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
687 		return 1;
688 	}
689 
690 	/* check the signature */
691 	genhash = tcp_v6_md5_hash_skb(newhash,
692 				      hash_expected,
693 				      NULL, NULL, skb);
694 
695 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
696 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
697 				     genhash ? "failed" : "mismatch",
698 				     &ip6h->saddr, ntohs(th->source),
699 				     &ip6h->daddr, ntohs(th->dest));
700 		return 1;
701 	}
702 	return 0;
703 }
704 #endif
705 
706 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
707 	.family		=	AF_INET6,
708 	.obj_size	=	sizeof(struct tcp6_request_sock),
709 	.rtx_syn_ack	=	tcp_v6_rtx_synack,
710 	.send_ack	=	tcp_v6_reqsk_send_ack,
711 	.destructor	=	tcp_v6_reqsk_destructor,
712 	.send_reset	=	tcp_v6_send_reset,
713 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
714 };
715 
716 #ifdef CONFIG_TCP_MD5SIG
717 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
718 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
719 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
720 };
721 #endif
722 
723 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
724 				 u32 tsval, u32 tsecr,
725 				 struct tcp_md5sig_key *key, int rst, u8 tclass)
726 {
727 	const struct tcphdr *th = tcp_hdr(skb);
728 	struct tcphdr *t1;
729 	struct sk_buff *buff;
730 	struct flowi6 fl6;
731 	struct net *net = dev_net(skb_dst(skb)->dev);
732 	struct sock *ctl_sk = net->ipv6.tcp_sk;
733 	unsigned int tot_len = sizeof(struct tcphdr);
734 	struct dst_entry *dst;
735 	__be32 *topt;
736 
737 	if (tsecr)
738 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
739 #ifdef CONFIG_TCP_MD5SIG
740 	if (key)
741 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
742 #endif
743 
744 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
745 			 GFP_ATOMIC);
746 	if (buff == NULL)
747 		return;
748 
749 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
750 
751 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
752 	skb_reset_transport_header(buff);
753 
754 	/* Swap the send and the receive. */
755 	memset(t1, 0, sizeof(*t1));
756 	t1->dest = th->source;
757 	t1->source = th->dest;
758 	t1->doff = tot_len / 4;
759 	t1->seq = htonl(seq);
760 	t1->ack_seq = htonl(ack);
761 	t1->ack = !rst || !th->ack;
762 	t1->rst = rst;
763 	t1->window = htons(win);
764 
765 	topt = (__be32 *)(t1 + 1);
766 
767 	if (tsecr) {
768 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
769 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
770 		*topt++ = htonl(tsval);
771 		*topt++ = htonl(tsecr);
772 	}
773 
774 #ifdef CONFIG_TCP_MD5SIG
775 	if (key) {
776 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
777 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
778 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
779 				    &ipv6_hdr(skb)->saddr,
780 				    &ipv6_hdr(skb)->daddr, t1);
781 	}
782 #endif
783 
784 	memset(&fl6, 0, sizeof(fl6));
785 	fl6.daddr = ipv6_hdr(skb)->saddr;
786 	fl6.saddr = ipv6_hdr(skb)->daddr;
787 
788 	buff->ip_summed = CHECKSUM_PARTIAL;
789 	buff->csum = 0;
790 
791 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
792 
793 	fl6.flowi6_proto = IPPROTO_TCP;
794 	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
795 		fl6.flowi6_oif = inet6_iif(skb);
796 	fl6.fl6_dport = t1->dest;
797 	fl6.fl6_sport = t1->source;
798 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
799 
800 	/* Pass a socket to ip6_dst_lookup either it is for RST
801 	 * Underlying function will use this to retrieve the network
802 	 * namespace
803 	 */
804 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
805 	if (!IS_ERR(dst)) {
806 		skb_dst_set(buff, dst);
807 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
808 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
809 		if (rst)
810 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
811 		return;
812 	}
813 
814 	kfree_skb(buff);
815 }
816 
817 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
818 {
819 	const struct tcphdr *th = tcp_hdr(skb);
820 	u32 seq = 0, ack_seq = 0;
821 	struct tcp_md5sig_key *key = NULL;
822 #ifdef CONFIG_TCP_MD5SIG
823 	const __u8 *hash_location = NULL;
824 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
825 	unsigned char newhash[16];
826 	int genhash;
827 	struct sock *sk1 = NULL;
828 #endif
829 
830 	if (th->rst)
831 		return;
832 
833 	if (!ipv6_unicast_destination(skb))
834 		return;
835 
836 #ifdef CONFIG_TCP_MD5SIG
837 	hash_location = tcp_parse_md5sig_option(th);
838 	if (!sk && hash_location) {
839 		/*
840 		 * active side is lost. Try to find listening socket through
841 		 * source port, and then find md5 key through listening socket.
842 		 * we are not loose security here:
843 		 * Incoming packet is checked with md5 hash with finding key,
844 		 * no RST generated if md5 hash doesn't match.
845 		 */
846 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
847 					   &tcp_hashinfo, &ipv6h->saddr,
848 					   th->source, &ipv6h->daddr,
849 					   ntohs(th->source), inet6_iif(skb));
850 		if (!sk1)
851 			return;
852 
853 		rcu_read_lock();
854 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
855 		if (!key)
856 			goto release_sk1;
857 
858 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
859 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
860 			goto release_sk1;
861 	} else {
862 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
863 	}
864 #endif
865 
866 	if (th->ack)
867 		seq = ntohl(th->ack_seq);
868 	else
869 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
870 			  (th->doff << 2);
871 
872 	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, key, 1, 0);
873 
874 #ifdef CONFIG_TCP_MD5SIG
875 release_sk1:
876 	if (sk1) {
877 		rcu_read_unlock();
878 		sock_put(sk1);
879 	}
880 #endif
881 }
882 
883 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
884 			    u32 win, u32 tsval, u32 tsecr,
885 			    struct tcp_md5sig_key *key, u8 tclass)
886 {
887 	tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, key, 0, tclass);
888 }
889 
890 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
891 {
892 	struct inet_timewait_sock *tw = inet_twsk(sk);
893 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
894 
895 	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
896 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
897 			tcp_time_stamp + tcptw->tw_ts_offset,
898 			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
899 			tw->tw_tclass);
900 
901 	inet_twsk_put(tw);
902 }
903 
904 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
905 				  struct request_sock *req)
906 {
907 	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
908 			req->rcv_wnd, tcp_time_stamp, req->ts_recent,
909 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
910 }
911 
912 
913 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
914 {
915 	struct request_sock *req, **prev;
916 	const struct tcphdr *th = tcp_hdr(skb);
917 	struct sock *nsk;
918 
919 	/* Find possible connection requests. */
920 	req = inet6_csk_search_req(sk, &prev, th->source,
921 				   &ipv6_hdr(skb)->saddr,
922 				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
923 	if (req)
924 		return tcp_check_req(sk, skb, req, prev, false);
925 
926 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
927 			&ipv6_hdr(skb)->saddr, th->source,
928 			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
929 
930 	if (nsk) {
931 		if (nsk->sk_state != TCP_TIME_WAIT) {
932 			bh_lock_sock(nsk);
933 			return nsk;
934 		}
935 		inet_twsk_put(inet_twsk(nsk));
936 		return NULL;
937 	}
938 
939 #ifdef CONFIG_SYN_COOKIES
940 	if (!th->syn)
941 		sk = cookie_v6_check(sk, skb);
942 #endif
943 	return sk;
944 }
945 
946 /* FIXME: this is substantially similar to the ipv4 code.
947  * Can some kind of merge be done? -- erics
948  */
949 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
950 {
951 	struct tcp_options_received tmp_opt;
952 	struct request_sock *req;
953 	struct inet_request_sock *ireq;
954 	struct ipv6_pinfo *np = inet6_sk(sk);
955 	struct tcp_sock *tp = tcp_sk(sk);
956 	__u32 isn = TCP_SKB_CB(skb)->when;
957 	struct dst_entry *dst = NULL;
958 	struct flowi6 fl6;
959 	bool want_cookie = false;
960 
961 	if (skb->protocol == htons(ETH_P_IP))
962 		return tcp_v4_conn_request(sk, skb);
963 
964 	if (!ipv6_unicast_destination(skb))
965 		goto drop;
966 
967 	if ((sysctl_tcp_syncookies == 2 ||
968 	     inet_csk_reqsk_queue_is_full(sk)) && !isn) {
969 		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
970 		if (!want_cookie)
971 			goto drop;
972 	}
973 
974 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
975 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
976 		goto drop;
977 	}
978 
979 	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
980 	if (req == NULL)
981 		goto drop;
982 
983 #ifdef CONFIG_TCP_MD5SIG
984 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
985 #endif
986 
987 	tcp_clear_options(&tmp_opt);
988 	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
989 	tmp_opt.user_mss = tp->rx_opt.user_mss;
990 	tcp_parse_options(skb, &tmp_opt, 0, NULL);
991 
992 	if (want_cookie && !tmp_opt.saw_tstamp)
993 		tcp_clear_options(&tmp_opt);
994 
995 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
996 	tcp_openreq_init(req, &tmp_opt, skb);
997 
998 	ireq = inet_rsk(req);
999 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
1000 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
1001 	if (!want_cookie || tmp_opt.tstamp_ok)
1002 		TCP_ECN_create_request(req, skb, sock_net(sk));
1003 
1004 	ireq->ir_iif = sk->sk_bound_dev_if;
1005 
1006 	/* So that link locals have meaning */
1007 	if (!sk->sk_bound_dev_if &&
1008 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
1009 		ireq->ir_iif = inet6_iif(skb);
1010 
1011 	if (!isn) {
1012 		if (ipv6_opt_accepted(sk, skb) ||
1013 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1014 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1015 			atomic_inc(&skb->users);
1016 			ireq->pktopts = skb;
1017 		}
1018 
1019 		if (want_cookie) {
1020 			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1021 			req->cookie_ts = tmp_opt.tstamp_ok;
1022 			goto have_isn;
1023 		}
1024 
1025 		/* VJ's idea. We save last timestamp seen
1026 		 * from the destination in peer table, when entering
1027 		 * state TIME-WAIT, and check against it before
1028 		 * accepting new connection request.
1029 		 *
1030 		 * If "isn" is not zero, this request hit alive
1031 		 * timewait bucket, so that all the necessary checks
1032 		 * are made in the function processing timewait state.
1033 		 */
1034 		if (tmp_opt.saw_tstamp &&
1035 		    tcp_death_row.sysctl_tw_recycle &&
1036 		    (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1037 			if (!tcp_peer_is_proven(req, dst, true)) {
1038 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1039 				goto drop_and_release;
1040 			}
1041 		}
1042 		/* Kill the following clause, if you dislike this way. */
1043 		else if (!sysctl_tcp_syncookies &&
1044 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1045 			  (sysctl_max_syn_backlog >> 2)) &&
1046 			 !tcp_peer_is_proven(req, dst, false)) {
1047 			/* Without syncookies last quarter of
1048 			 * backlog is filled with destinations,
1049 			 * proven to be alive.
1050 			 * It means that we continue to communicate
1051 			 * to destinations, already remembered
1052 			 * to the moment of synflood.
1053 			 */
1054 			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1055 				       &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
1056 			goto drop_and_release;
1057 		}
1058 
1059 		isn = tcp_v6_init_sequence(skb);
1060 	}
1061 have_isn:
1062 	tcp_rsk(req)->snt_isn = isn;
1063 
1064 	if (security_inet_conn_request(sk, skb, req))
1065 		goto drop_and_release;
1066 
1067 	if (tcp_v6_send_synack(sk, dst, &fl6, req,
1068 			       skb_get_queue_mapping(skb)) ||
1069 	    want_cookie)
1070 		goto drop_and_free;
1071 
1072 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1073 	tcp_rsk(req)->listener = NULL;
1074 	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1075 	return 0;
1076 
1077 drop_and_release:
1078 	dst_release(dst);
1079 drop_and_free:
1080 	reqsk_free(req);
1081 drop:
1082 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1083 	return 0; /* don't send reset */
1084 }
1085 
1086 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1087 					  struct request_sock *req,
1088 					  struct dst_entry *dst)
1089 {
1090 	struct inet_request_sock *ireq;
1091 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1092 	struct tcp6_sock *newtcp6sk;
1093 	struct inet_sock *newinet;
1094 	struct tcp_sock *newtp;
1095 	struct sock *newsk;
1096 #ifdef CONFIG_TCP_MD5SIG
1097 	struct tcp_md5sig_key *key;
1098 #endif
1099 	struct flowi6 fl6;
1100 
1101 	if (skb->protocol == htons(ETH_P_IP)) {
1102 		/*
1103 		 *	v6 mapped
1104 		 */
1105 
1106 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1107 
1108 		if (newsk == NULL)
1109 			return NULL;
1110 
1111 		newtcp6sk = (struct tcp6_sock *)newsk;
1112 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1113 
1114 		newinet = inet_sk(newsk);
1115 		newnp = inet6_sk(newsk);
1116 		newtp = tcp_sk(newsk);
1117 
1118 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1119 
1120 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1121 
1122 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1123 
1124 		newsk->sk_v6_rcv_saddr = newnp->saddr;
1125 
1126 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1127 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1128 #ifdef CONFIG_TCP_MD5SIG
1129 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1130 #endif
1131 
1132 		newnp->ipv6_ac_list = NULL;
1133 		newnp->ipv6_fl_list = NULL;
1134 		newnp->pktoptions  = NULL;
1135 		newnp->opt	   = NULL;
1136 		newnp->mcast_oif   = inet6_iif(skb);
1137 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1138 		newnp->rcv_tclass  = ipv6_get_dsfield(ipv6_hdr(skb));
1139 
1140 		/*
1141 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1142 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1143 		 * that function for the gory details. -acme
1144 		 */
1145 
1146 		/* It is tricky place. Until this moment IPv4 tcp
1147 		   worked with IPv6 icsk.icsk_af_ops.
1148 		   Sync it now.
1149 		 */
1150 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1151 
1152 		return newsk;
1153 	}
1154 
1155 	ireq = inet_rsk(req);
1156 
1157 	if (sk_acceptq_is_full(sk))
1158 		goto out_overflow;
1159 
1160 	if (!dst) {
1161 		dst = inet6_csk_route_req(sk, &fl6, req);
1162 		if (!dst)
1163 			goto out;
1164 	}
1165 
1166 	newsk = tcp_create_openreq_child(sk, req, skb);
1167 	if (newsk == NULL)
1168 		goto out_nonewsk;
1169 
1170 	/*
1171 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1172 	 * count here, tcp_create_openreq_child now does this for us, see the
1173 	 * comment in that function for the gory details. -acme
1174 	 */
1175 
1176 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1177 	__ip6_dst_store(newsk, dst, NULL, NULL);
1178 	inet6_sk_rx_dst_set(newsk, skb);
1179 
1180 	newtcp6sk = (struct tcp6_sock *)newsk;
1181 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1182 
1183 	newtp = tcp_sk(newsk);
1184 	newinet = inet_sk(newsk);
1185 	newnp = inet6_sk(newsk);
1186 
1187 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1188 
1189 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1190 	newnp->saddr = ireq->ir_v6_loc_addr;
1191 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1192 	newsk->sk_bound_dev_if = ireq->ir_iif;
1193 
1194 	/* Now IPv6 options...
1195 
1196 	   First: no IPv4 options.
1197 	 */
1198 	newinet->inet_opt = NULL;
1199 	newnp->ipv6_ac_list = NULL;
1200 	newnp->ipv6_fl_list = NULL;
1201 
1202 	/* Clone RX bits */
1203 	newnp->rxopt.all = np->rxopt.all;
1204 
1205 	/* Clone pktoptions received with SYN */
1206 	newnp->pktoptions = NULL;
1207 	if (ireq->pktopts != NULL) {
1208 		newnp->pktoptions = skb_clone(ireq->pktopts,
1209 					      sk_gfp_atomic(sk, GFP_ATOMIC));
1210 		consume_skb(ireq->pktopts);
1211 		ireq->pktopts = NULL;
1212 		if (newnp->pktoptions)
1213 			skb_set_owner_r(newnp->pktoptions, newsk);
1214 	}
1215 	newnp->opt	  = NULL;
1216 	newnp->mcast_oif  = inet6_iif(skb);
1217 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1218 	newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
1219 
1220 	/* Clone native IPv6 options from listening socket (if any)
1221 
1222 	   Yes, keeping reference count would be much more clever,
1223 	   but we make one more one thing there: reattach optmem
1224 	   to newsk.
1225 	 */
1226 	if (np->opt)
1227 		newnp->opt = ipv6_dup_options(newsk, np->opt);
1228 
1229 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1230 	if (newnp->opt)
1231 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1232 						     newnp->opt->opt_flen);
1233 
1234 	tcp_mtup_init(newsk);
1235 	tcp_sync_mss(newsk, dst_mtu(dst));
1236 	newtp->advmss = dst_metric_advmss(dst);
1237 	if (tcp_sk(sk)->rx_opt.user_mss &&
1238 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1239 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1240 
1241 	tcp_initialize_rcv_mss(newsk);
1242 
1243 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1244 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1245 
1246 #ifdef CONFIG_TCP_MD5SIG
1247 	/* Copy over the MD5 key from the original socket */
1248 	if ((key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr)) != NULL) {
1249 		/* We're using one, so create a matching key
1250 		 * on the newsk structure. If we fail to get
1251 		 * memory, then we end up not copying the key
1252 		 * across. Shucks.
1253 		 */
1254 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1255 			       AF_INET6, key->key, key->keylen,
1256 			       sk_gfp_atomic(sk, GFP_ATOMIC));
1257 	}
1258 #endif
1259 
1260 	if (__inet_inherit_port(sk, newsk) < 0) {
1261 		inet_csk_prepare_forced_close(newsk);
1262 		tcp_done(newsk);
1263 		goto out;
1264 	}
1265 	__inet6_hash(newsk, NULL);
1266 
1267 	return newsk;
1268 
1269 out_overflow:
1270 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1271 out_nonewsk:
1272 	dst_release(dst);
1273 out:
1274 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1275 	return NULL;
1276 }
1277 
1278 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1279 {
1280 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1281 		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1282 				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1283 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1284 			return 0;
1285 		}
1286 	}
1287 
1288 	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1289 					      &ipv6_hdr(skb)->saddr,
1290 					      &ipv6_hdr(skb)->daddr, 0));
1291 
1292 	if (skb->len <= 76) {
1293 		return __skb_checksum_complete(skb);
1294 	}
1295 	return 0;
1296 }
1297 
1298 /* The socket must have it's spinlock held when we get
1299  * here.
1300  *
1301  * We have a potential double-lock case here, so even when
1302  * doing backlog processing we use the BH locking scheme.
1303  * This is because we cannot sleep with the original spinlock
1304  * held.
1305  */
1306 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1307 {
1308 	struct ipv6_pinfo *np = inet6_sk(sk);
1309 	struct tcp_sock *tp;
1310 	struct sk_buff *opt_skb = NULL;
1311 
1312 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1313 	   goes to IPv4 receive handler and backlogged.
1314 	   From backlog it always goes here. Kerboom...
1315 	   Fortunately, tcp_rcv_established and rcv_established
1316 	   handle them correctly, but it is not case with
1317 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1318 	 */
1319 
1320 	if (skb->protocol == htons(ETH_P_IP))
1321 		return tcp_v4_do_rcv(sk, skb);
1322 
1323 #ifdef CONFIG_TCP_MD5SIG
1324 	if (tcp_v6_inbound_md5_hash (sk, skb))
1325 		goto discard;
1326 #endif
1327 
1328 	if (sk_filter(sk, skb))
1329 		goto discard;
1330 
1331 	/*
1332 	 *	socket locking is here for SMP purposes as backlog rcv
1333 	 *	is currently called with bh processing disabled.
1334 	 */
1335 
1336 	/* Do Stevens' IPV6_PKTOPTIONS.
1337 
1338 	   Yes, guys, it is the only place in our code, where we
1339 	   may make it not affecting IPv4.
1340 	   The rest of code is protocol independent,
1341 	   and I do not like idea to uglify IPv4.
1342 
1343 	   Actually, all the idea behind IPV6_PKTOPTIONS
1344 	   looks not very well thought. For now we latch
1345 	   options, received in the last packet, enqueued
1346 	   by tcp. Feel free to propose better solution.
1347 					       --ANK (980728)
1348 	 */
1349 	if (np->rxopt.all)
1350 		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1351 
1352 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1353 		struct dst_entry *dst = sk->sk_rx_dst;
1354 
1355 		sock_rps_save_rxhash(sk, skb);
1356 		if (dst) {
1357 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1358 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1359 				dst_release(dst);
1360 				sk->sk_rx_dst = NULL;
1361 			}
1362 		}
1363 
1364 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1365 		if (opt_skb)
1366 			goto ipv6_pktoptions;
1367 		return 0;
1368 	}
1369 
1370 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1371 		goto csum_err;
1372 
1373 	if (sk->sk_state == TCP_LISTEN) {
1374 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1375 		if (!nsk)
1376 			goto discard;
1377 
1378 		/*
1379 		 * Queue it on the new socket if the new socket is active,
1380 		 * otherwise we just shortcircuit this and continue with
1381 		 * the new socket..
1382 		 */
1383 		if(nsk != sk) {
1384 			sock_rps_save_rxhash(nsk, skb);
1385 			if (tcp_child_process(sk, nsk, skb))
1386 				goto reset;
1387 			if (opt_skb)
1388 				__kfree_skb(opt_skb);
1389 			return 0;
1390 		}
1391 	} else
1392 		sock_rps_save_rxhash(sk, skb);
1393 
1394 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1395 		goto reset;
1396 	if (opt_skb)
1397 		goto ipv6_pktoptions;
1398 	return 0;
1399 
1400 reset:
1401 	tcp_v6_send_reset(sk, skb);
1402 discard:
1403 	if (opt_skb)
1404 		__kfree_skb(opt_skb);
1405 	kfree_skb(skb);
1406 	return 0;
1407 csum_err:
1408 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1409 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1410 	goto discard;
1411 
1412 
1413 ipv6_pktoptions:
1414 	/* Do you ask, what is it?
1415 
1416 	   1. skb was enqueued by tcp.
1417 	   2. skb is added to tail of read queue, rather than out of order.
1418 	   3. socket is not in passive state.
1419 	   4. Finally, it really contains options, which user wants to receive.
1420 	 */
1421 	tp = tcp_sk(sk);
1422 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1423 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1424 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1425 			np->mcast_oif = inet6_iif(opt_skb);
1426 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1427 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1428 		if (np->rxopt.bits.rxtclass)
1429 			np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(opt_skb));
1430 		if (ipv6_opt_accepted(sk, opt_skb)) {
1431 			skb_set_owner_r(opt_skb, sk);
1432 			opt_skb = xchg(&np->pktoptions, opt_skb);
1433 		} else {
1434 			__kfree_skb(opt_skb);
1435 			opt_skb = xchg(&np->pktoptions, NULL);
1436 		}
1437 	}
1438 
1439 	kfree_skb(opt_skb);
1440 	return 0;
1441 }
1442 
1443 static int tcp_v6_rcv(struct sk_buff *skb)
1444 {
1445 	const struct tcphdr *th;
1446 	const struct ipv6hdr *hdr;
1447 	struct sock *sk;
1448 	int ret;
1449 	struct net *net = dev_net(skb->dev);
1450 
1451 	if (skb->pkt_type != PACKET_HOST)
1452 		goto discard_it;
1453 
1454 	/*
1455 	 *	Count it even if it's bad.
1456 	 */
1457 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1458 
1459 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1460 		goto discard_it;
1461 
1462 	th = tcp_hdr(skb);
1463 
1464 	if (th->doff < sizeof(struct tcphdr)/4)
1465 		goto bad_packet;
1466 	if (!pskb_may_pull(skb, th->doff*4))
1467 		goto discard_it;
1468 
1469 	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1470 		goto csum_error;
1471 
1472 	th = tcp_hdr(skb);
1473 	hdr = ipv6_hdr(skb);
1474 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1475 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1476 				    skb->len - th->doff*4);
1477 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1478 	TCP_SKB_CB(skb)->when = 0;
1479 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1480 	TCP_SKB_CB(skb)->sacked = 0;
1481 
1482 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1483 	if (!sk)
1484 		goto no_tcp_socket;
1485 
1486 process:
1487 	if (sk->sk_state == TCP_TIME_WAIT)
1488 		goto do_time_wait;
1489 
1490 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1491 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1492 		goto discard_and_relse;
1493 	}
1494 
1495 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1496 		goto discard_and_relse;
1497 
1498 	if (sk_filter(sk, skb))
1499 		goto discard_and_relse;
1500 
1501 	sk_mark_napi_id(sk, skb);
1502 	skb->dev = NULL;
1503 
1504 	bh_lock_sock_nested(sk);
1505 	ret = 0;
1506 	if (!sock_owned_by_user(sk)) {
1507 #ifdef CONFIG_NET_DMA
1508 		struct tcp_sock *tp = tcp_sk(sk);
1509 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1510 			tp->ucopy.dma_chan = net_dma_find_channel();
1511 		if (tp->ucopy.dma_chan)
1512 			ret = tcp_v6_do_rcv(sk, skb);
1513 		else
1514 #endif
1515 		{
1516 			if (!tcp_prequeue(sk, skb))
1517 				ret = tcp_v6_do_rcv(sk, skb);
1518 		}
1519 	} else if (unlikely(sk_add_backlog(sk, skb,
1520 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1521 		bh_unlock_sock(sk);
1522 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1523 		goto discard_and_relse;
1524 	}
1525 	bh_unlock_sock(sk);
1526 
1527 	sock_put(sk);
1528 	return ret ? -1 : 0;
1529 
1530 no_tcp_socket:
1531 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1532 		goto discard_it;
1533 
1534 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1535 csum_error:
1536 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1537 bad_packet:
1538 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1539 	} else {
1540 		tcp_v6_send_reset(NULL, skb);
1541 	}
1542 
1543 discard_it:
1544 	kfree_skb(skb);
1545 	return 0;
1546 
1547 discard_and_relse:
1548 	sock_put(sk);
1549 	goto discard_it;
1550 
1551 do_time_wait:
1552 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1553 		inet_twsk_put(inet_twsk(sk));
1554 		goto discard_it;
1555 	}
1556 
1557 	if (skb->len < (th->doff<<2)) {
1558 		inet_twsk_put(inet_twsk(sk));
1559 		goto bad_packet;
1560 	}
1561 	if (tcp_checksum_complete(skb)) {
1562 		inet_twsk_put(inet_twsk(sk));
1563 		goto csum_error;
1564 	}
1565 
1566 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1567 	case TCP_TW_SYN:
1568 	{
1569 		struct sock *sk2;
1570 
1571 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1572 					    &ipv6_hdr(skb)->saddr, th->source,
1573 					    &ipv6_hdr(skb)->daddr,
1574 					    ntohs(th->dest), inet6_iif(skb));
1575 		if (sk2 != NULL) {
1576 			struct inet_timewait_sock *tw = inet_twsk(sk);
1577 			inet_twsk_deschedule(tw, &tcp_death_row);
1578 			inet_twsk_put(tw);
1579 			sk = sk2;
1580 			goto process;
1581 		}
1582 		/* Fall through to ACK */
1583 	}
1584 	case TCP_TW_ACK:
1585 		tcp_v6_timewait_ack(sk, skb);
1586 		break;
1587 	case TCP_TW_RST:
1588 		goto no_tcp_socket;
1589 	case TCP_TW_SUCCESS:;
1590 	}
1591 	goto discard_it;
1592 }
1593 
1594 static void tcp_v6_early_demux(struct sk_buff *skb)
1595 {
1596 	const struct ipv6hdr *hdr;
1597 	const struct tcphdr *th;
1598 	struct sock *sk;
1599 
1600 	if (skb->pkt_type != PACKET_HOST)
1601 		return;
1602 
1603 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1604 		return;
1605 
1606 	hdr = ipv6_hdr(skb);
1607 	th = tcp_hdr(skb);
1608 
1609 	if (th->doff < sizeof(struct tcphdr) / 4)
1610 		return;
1611 
1612 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1613 					&hdr->saddr, th->source,
1614 					&hdr->daddr, ntohs(th->dest),
1615 					inet6_iif(skb));
1616 	if (sk) {
1617 		skb->sk = sk;
1618 		skb->destructor = sock_edemux;
1619 		if (sk->sk_state != TCP_TIME_WAIT) {
1620 			struct dst_entry *dst = sk->sk_rx_dst;
1621 
1622 			if (dst)
1623 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1624 			if (dst &&
1625 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1626 				skb_dst_set_noref(skb, dst);
1627 		}
1628 	}
1629 }
1630 
1631 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1632 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1633 	.twsk_unique	= tcp_twsk_unique,
1634 	.twsk_destructor= tcp_twsk_destructor,
1635 };
1636 
1637 static const struct inet_connection_sock_af_ops ipv6_specific = {
1638 	.queue_xmit	   = inet6_csk_xmit,
1639 	.send_check	   = tcp_v6_send_check,
1640 	.rebuild_header	   = inet6_sk_rebuild_header,
1641 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1642 	.conn_request	   = tcp_v6_conn_request,
1643 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1644 	.net_header_len	   = sizeof(struct ipv6hdr),
1645 	.net_frag_header_len = sizeof(struct frag_hdr),
1646 	.setsockopt	   = ipv6_setsockopt,
1647 	.getsockopt	   = ipv6_getsockopt,
1648 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1649 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1650 	.bind_conflict	   = inet6_csk_bind_conflict,
1651 #ifdef CONFIG_COMPAT
1652 	.compat_setsockopt = compat_ipv6_setsockopt,
1653 	.compat_getsockopt = compat_ipv6_getsockopt,
1654 #endif
1655 };
1656 
1657 #ifdef CONFIG_TCP_MD5SIG
1658 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1659 	.md5_lookup	=	tcp_v6_md5_lookup,
1660 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1661 	.md5_parse	=	tcp_v6_parse_md5_keys,
1662 };
1663 #endif
1664 
1665 /*
1666  *	TCP over IPv4 via INET6 API
1667  */
1668 
1669 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1670 	.queue_xmit	   = ip_queue_xmit,
1671 	.send_check	   = tcp_v4_send_check,
1672 	.rebuild_header	   = inet_sk_rebuild_header,
1673 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1674 	.conn_request	   = tcp_v6_conn_request,
1675 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1676 	.net_header_len	   = sizeof(struct iphdr),
1677 	.setsockopt	   = ipv6_setsockopt,
1678 	.getsockopt	   = ipv6_getsockopt,
1679 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1680 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1681 	.bind_conflict	   = inet6_csk_bind_conflict,
1682 #ifdef CONFIG_COMPAT
1683 	.compat_setsockopt = compat_ipv6_setsockopt,
1684 	.compat_getsockopt = compat_ipv6_getsockopt,
1685 #endif
1686 };
1687 
1688 #ifdef CONFIG_TCP_MD5SIG
1689 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1690 	.md5_lookup	=	tcp_v4_md5_lookup,
1691 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1692 	.md5_parse	=	tcp_v6_parse_md5_keys,
1693 };
1694 #endif
1695 
1696 /* NOTE: A lot of things set to zero explicitly by call to
1697  *       sk_alloc() so need not be done here.
1698  */
1699 static int tcp_v6_init_sock(struct sock *sk)
1700 {
1701 	struct inet_connection_sock *icsk = inet_csk(sk);
1702 
1703 	tcp_init_sock(sk);
1704 
1705 	icsk->icsk_af_ops = &ipv6_specific;
1706 
1707 #ifdef CONFIG_TCP_MD5SIG
1708 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1709 #endif
1710 
1711 	return 0;
1712 }
1713 
1714 static void tcp_v6_destroy_sock(struct sock *sk)
1715 {
1716 	tcp_v4_destroy_sock(sk);
1717 	inet6_destroy_sock(sk);
1718 }
1719 
1720 #ifdef CONFIG_PROC_FS
1721 /* Proc filesystem TCPv6 sock list dumping. */
1722 static void get_openreq6(struct seq_file *seq,
1723 			 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1724 {
1725 	int ttd = req->expires - jiffies;
1726 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1727 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1728 
1729 	if (ttd < 0)
1730 		ttd = 0;
1731 
1732 	seq_printf(seq,
1733 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1734 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1735 		   i,
1736 		   src->s6_addr32[0], src->s6_addr32[1],
1737 		   src->s6_addr32[2], src->s6_addr32[3],
1738 		   inet_rsk(req)->ir_num,
1739 		   dest->s6_addr32[0], dest->s6_addr32[1],
1740 		   dest->s6_addr32[2], dest->s6_addr32[3],
1741 		   ntohs(inet_rsk(req)->ir_rmt_port),
1742 		   TCP_SYN_RECV,
1743 		   0,0, /* could print option size, but that is af dependent. */
1744 		   1,   /* timers active (only the expire timer) */
1745 		   jiffies_to_clock_t(ttd),
1746 		   req->num_timeout,
1747 		   from_kuid_munged(seq_user_ns(seq), uid),
1748 		   0,  /* non standard timer */
1749 		   0, /* open_requests have no inode */
1750 		   0, req);
1751 }
1752 
1753 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1754 {
1755 	const struct in6_addr *dest, *src;
1756 	__u16 destp, srcp;
1757 	int timer_active;
1758 	unsigned long timer_expires;
1759 	const struct inet_sock *inet = inet_sk(sp);
1760 	const struct tcp_sock *tp = tcp_sk(sp);
1761 	const struct inet_connection_sock *icsk = inet_csk(sp);
1762 
1763 	dest  = &sp->sk_v6_daddr;
1764 	src   = &sp->sk_v6_rcv_saddr;
1765 	destp = ntohs(inet->inet_dport);
1766 	srcp  = ntohs(inet->inet_sport);
1767 
1768 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1769 		timer_active	= 1;
1770 		timer_expires	= icsk->icsk_timeout;
1771 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1772 		timer_active	= 4;
1773 		timer_expires	= icsk->icsk_timeout;
1774 	} else if (timer_pending(&sp->sk_timer)) {
1775 		timer_active	= 2;
1776 		timer_expires	= sp->sk_timer.expires;
1777 	} else {
1778 		timer_active	= 0;
1779 		timer_expires = jiffies;
1780 	}
1781 
1782 	seq_printf(seq,
1783 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1784 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1785 		   i,
1786 		   src->s6_addr32[0], src->s6_addr32[1],
1787 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1788 		   dest->s6_addr32[0], dest->s6_addr32[1],
1789 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1790 		   sp->sk_state,
1791 		   tp->write_seq-tp->snd_una,
1792 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1793 		   timer_active,
1794 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1795 		   icsk->icsk_retransmits,
1796 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1797 		   icsk->icsk_probes_out,
1798 		   sock_i_ino(sp),
1799 		   atomic_read(&sp->sk_refcnt), sp,
1800 		   jiffies_to_clock_t(icsk->icsk_rto),
1801 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1802 		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1803 		   tp->snd_cwnd,
1804 		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1805 		   );
1806 }
1807 
1808 static void get_timewait6_sock(struct seq_file *seq,
1809 			       struct inet_timewait_sock *tw, int i)
1810 {
1811 	const struct in6_addr *dest, *src;
1812 	__u16 destp, srcp;
1813 	s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1814 
1815 	dest = &tw->tw_v6_daddr;
1816 	src  = &tw->tw_v6_rcv_saddr;
1817 	destp = ntohs(tw->tw_dport);
1818 	srcp  = ntohs(tw->tw_sport);
1819 
1820 	seq_printf(seq,
1821 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1822 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1823 		   i,
1824 		   src->s6_addr32[0], src->s6_addr32[1],
1825 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1826 		   dest->s6_addr32[0], dest->s6_addr32[1],
1827 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1828 		   tw->tw_substate, 0, 0,
1829 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1830 		   atomic_read(&tw->tw_refcnt), tw);
1831 }
1832 
1833 static int tcp6_seq_show(struct seq_file *seq, void *v)
1834 {
1835 	struct tcp_iter_state *st;
1836 	struct sock *sk = v;
1837 
1838 	if (v == SEQ_START_TOKEN) {
1839 		seq_puts(seq,
1840 			 "  sl  "
1841 			 "local_address                         "
1842 			 "remote_address                        "
1843 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1844 			 "   uid  timeout inode\n");
1845 		goto out;
1846 	}
1847 	st = seq->private;
1848 
1849 	switch (st->state) {
1850 	case TCP_SEQ_STATE_LISTENING:
1851 	case TCP_SEQ_STATE_ESTABLISHED:
1852 		if (sk->sk_state == TCP_TIME_WAIT)
1853 			get_timewait6_sock(seq, v, st->num);
1854 		else
1855 			get_tcp6_sock(seq, v, st->num);
1856 		break;
1857 	case TCP_SEQ_STATE_OPENREQ:
1858 		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1859 		break;
1860 	}
1861 out:
1862 	return 0;
1863 }
1864 
1865 static const struct file_operations tcp6_afinfo_seq_fops = {
1866 	.owner   = THIS_MODULE,
1867 	.open    = tcp_seq_open,
1868 	.read    = seq_read,
1869 	.llseek  = seq_lseek,
1870 	.release = seq_release_net
1871 };
1872 
1873 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1874 	.name		= "tcp6",
1875 	.family		= AF_INET6,
1876 	.seq_fops	= &tcp6_afinfo_seq_fops,
1877 	.seq_ops	= {
1878 		.show		= tcp6_seq_show,
1879 	},
1880 };
1881 
1882 int __net_init tcp6_proc_init(struct net *net)
1883 {
1884 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1885 }
1886 
1887 void tcp6_proc_exit(struct net *net)
1888 {
1889 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1890 }
1891 #endif
1892 
1893 static void tcp_v6_clear_sk(struct sock *sk, int size)
1894 {
1895 	struct inet_sock *inet = inet_sk(sk);
1896 
1897 	/* we do not want to clear pinet6 field, because of RCU lookups */
1898 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1899 
1900 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1901 	memset(&inet->pinet6 + 1, 0, size);
1902 }
1903 
1904 struct proto tcpv6_prot = {
1905 	.name			= "TCPv6",
1906 	.owner			= THIS_MODULE,
1907 	.close			= tcp_close,
1908 	.connect		= tcp_v6_connect,
1909 	.disconnect		= tcp_disconnect,
1910 	.accept			= inet_csk_accept,
1911 	.ioctl			= tcp_ioctl,
1912 	.init			= tcp_v6_init_sock,
1913 	.destroy		= tcp_v6_destroy_sock,
1914 	.shutdown		= tcp_shutdown,
1915 	.setsockopt		= tcp_setsockopt,
1916 	.getsockopt		= tcp_getsockopt,
1917 	.recvmsg		= tcp_recvmsg,
1918 	.sendmsg		= tcp_sendmsg,
1919 	.sendpage		= tcp_sendpage,
1920 	.backlog_rcv		= tcp_v6_do_rcv,
1921 	.release_cb		= tcp_release_cb,
1922 	.mtu_reduced		= tcp_v6_mtu_reduced,
1923 	.hash			= tcp_v6_hash,
1924 	.unhash			= inet_unhash,
1925 	.get_port		= inet_csk_get_port,
1926 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1927 	.stream_memory_free	= tcp_stream_memory_free,
1928 	.sockets_allocated	= &tcp_sockets_allocated,
1929 	.memory_allocated	= &tcp_memory_allocated,
1930 	.memory_pressure	= &tcp_memory_pressure,
1931 	.orphan_count		= &tcp_orphan_count,
1932 	.sysctl_mem		= sysctl_tcp_mem,
1933 	.sysctl_wmem		= sysctl_tcp_wmem,
1934 	.sysctl_rmem		= sysctl_tcp_rmem,
1935 	.max_header		= MAX_TCP_HEADER,
1936 	.obj_size		= sizeof(struct tcp6_sock),
1937 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1938 	.twsk_prot		= &tcp6_timewait_sock_ops,
1939 	.rsk_prot		= &tcp6_request_sock_ops,
1940 	.h.hashinfo		= &tcp_hashinfo,
1941 	.no_autobind		= true,
1942 #ifdef CONFIG_COMPAT
1943 	.compat_setsockopt	= compat_tcp_setsockopt,
1944 	.compat_getsockopt	= compat_tcp_getsockopt,
1945 #endif
1946 #ifdef CONFIG_MEMCG_KMEM
1947 	.proto_cgroup		= tcp_proto_cgroup,
1948 #endif
1949 	.clear_sk		= tcp_v6_clear_sk,
1950 };
1951 
1952 static const struct inet6_protocol tcpv6_protocol = {
1953 	.early_demux	=	tcp_v6_early_demux,
1954 	.handler	=	tcp_v6_rcv,
1955 	.err_handler	=	tcp_v6_err,
1956 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1957 };
1958 
1959 static struct inet_protosw tcpv6_protosw = {
1960 	.type		=	SOCK_STREAM,
1961 	.protocol	=	IPPROTO_TCP,
1962 	.prot		=	&tcpv6_prot,
1963 	.ops		=	&inet6_stream_ops,
1964 	.no_check	=	0,
1965 	.flags		=	INET_PROTOSW_PERMANENT |
1966 				INET_PROTOSW_ICSK,
1967 };
1968 
1969 static int __net_init tcpv6_net_init(struct net *net)
1970 {
1971 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1972 				    SOCK_RAW, IPPROTO_TCP, net);
1973 }
1974 
1975 static void __net_exit tcpv6_net_exit(struct net *net)
1976 {
1977 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1978 }
1979 
1980 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1981 {
1982 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1983 }
1984 
1985 static struct pernet_operations tcpv6_net_ops = {
1986 	.init	    = tcpv6_net_init,
1987 	.exit	    = tcpv6_net_exit,
1988 	.exit_batch = tcpv6_net_exit_batch,
1989 };
1990 
1991 int __init tcpv6_init(void)
1992 {
1993 	int ret;
1994 
1995 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1996 	if (ret)
1997 		goto out;
1998 
1999 	/* register inet6 protocol */
2000 	ret = inet6_register_protosw(&tcpv6_protosw);
2001 	if (ret)
2002 		goto out_tcpv6_protocol;
2003 
2004 	ret = register_pernet_subsys(&tcpv6_net_ops);
2005 	if (ret)
2006 		goto out_tcpv6_protosw;
2007 out:
2008 	return ret;
2009 
2010 out_tcpv6_protosw:
2011 	inet6_unregister_protosw(&tcpv6_protosw);
2012 out_tcpv6_protocol:
2013 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2014 	goto out;
2015 }
2016 
2017 void tcpv6_exit(void)
2018 {
2019 	unregister_pernet_subsys(&tcpv6_net_ops);
2020 	inet6_unregister_protosw(&tcpv6_protosw);
2021 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2022 }
2023