xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision 4bce6fce)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66 
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69 
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72 
73 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 				      struct request_sock *req);
76 
77 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 						   const struct in6_addr *addr)
87 {
88 	return NULL;
89 }
90 #endif
91 
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 	struct dst_entry *dst = skb_dst(skb);
95 
96 	if (dst) {
97 		const struct rt6_info *rt = (const struct rt6_info *)dst;
98 
99 		dst_hold(dst);
100 		sk->sk_rx_dst = dst;
101 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 		if (rt->rt6i_node)
103 			inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104 	}
105 }
106 
107 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
108 {
109 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
110 					    ipv6_hdr(skb)->saddr.s6_addr32,
111 					    tcp_hdr(skb)->dest,
112 					    tcp_hdr(skb)->source);
113 }
114 
115 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
116 			  int addr_len)
117 {
118 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
119 	struct inet_sock *inet = inet_sk(sk);
120 	struct inet_connection_sock *icsk = inet_csk(sk);
121 	struct ipv6_pinfo *np = inet6_sk(sk);
122 	struct tcp_sock *tp = tcp_sk(sk);
123 	struct in6_addr *saddr = NULL, *final_p, final;
124 	struct rt6_info *rt;
125 	struct flowi6 fl6;
126 	struct dst_entry *dst;
127 	int addr_type;
128 	int err;
129 
130 	if (addr_len < SIN6_LEN_RFC2133)
131 		return -EINVAL;
132 
133 	if (usin->sin6_family != AF_INET6)
134 		return -EAFNOSUPPORT;
135 
136 	memset(&fl6, 0, sizeof(fl6));
137 
138 	if (np->sndflow) {
139 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
140 		IP6_ECN_flow_init(fl6.flowlabel);
141 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
142 			struct ip6_flowlabel *flowlabel;
143 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
144 			if (!flowlabel)
145 				return -EINVAL;
146 			fl6_sock_release(flowlabel);
147 		}
148 	}
149 
150 	/*
151 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
152 	 */
153 
154 	if (ipv6_addr_any(&usin->sin6_addr))
155 		usin->sin6_addr.s6_addr[15] = 0x1;
156 
157 	addr_type = ipv6_addr_type(&usin->sin6_addr);
158 
159 	if (addr_type & IPV6_ADDR_MULTICAST)
160 		return -ENETUNREACH;
161 
162 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
163 		if (addr_len >= sizeof(struct sockaddr_in6) &&
164 		    usin->sin6_scope_id) {
165 			/* If interface is set while binding, indices
166 			 * must coincide.
167 			 */
168 			if (sk->sk_bound_dev_if &&
169 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
170 				return -EINVAL;
171 
172 			sk->sk_bound_dev_if = usin->sin6_scope_id;
173 		}
174 
175 		/* Connect to link-local address requires an interface */
176 		if (!sk->sk_bound_dev_if)
177 			return -EINVAL;
178 	}
179 
180 	if (tp->rx_opt.ts_recent_stamp &&
181 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
182 		tp->rx_opt.ts_recent = 0;
183 		tp->rx_opt.ts_recent_stamp = 0;
184 		tp->write_seq = 0;
185 	}
186 
187 	sk->sk_v6_daddr = usin->sin6_addr;
188 	np->flow_label = fl6.flowlabel;
189 
190 	/*
191 	 *	TCP over IPv4
192 	 */
193 
194 	if (addr_type == IPV6_ADDR_MAPPED) {
195 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
196 		struct sockaddr_in sin;
197 
198 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
199 
200 		if (__ipv6_only_sock(sk))
201 			return -ENETUNREACH;
202 
203 		sin.sin_family = AF_INET;
204 		sin.sin_port = usin->sin6_port;
205 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
206 
207 		icsk->icsk_af_ops = &ipv6_mapped;
208 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
209 #ifdef CONFIG_TCP_MD5SIG
210 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
211 #endif
212 
213 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
214 
215 		if (err) {
216 			icsk->icsk_ext_hdr_len = exthdrlen;
217 			icsk->icsk_af_ops = &ipv6_specific;
218 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
219 #ifdef CONFIG_TCP_MD5SIG
220 			tp->af_specific = &tcp_sock_ipv6_specific;
221 #endif
222 			goto failure;
223 		}
224 		np->saddr = sk->sk_v6_rcv_saddr;
225 
226 		return err;
227 	}
228 
229 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
230 		saddr = &sk->sk_v6_rcv_saddr;
231 
232 	fl6.flowi6_proto = IPPROTO_TCP;
233 	fl6.daddr = sk->sk_v6_daddr;
234 	fl6.saddr = saddr ? *saddr : np->saddr;
235 	fl6.flowi6_oif = sk->sk_bound_dev_if;
236 	fl6.flowi6_mark = sk->sk_mark;
237 	fl6.fl6_dport = usin->sin6_port;
238 	fl6.fl6_sport = inet->inet_sport;
239 
240 	final_p = fl6_update_dst(&fl6, np->opt, &final);
241 
242 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
243 
244 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
245 	if (IS_ERR(dst)) {
246 		err = PTR_ERR(dst);
247 		goto failure;
248 	}
249 
250 	if (!saddr) {
251 		saddr = &fl6.saddr;
252 		sk->sk_v6_rcv_saddr = *saddr;
253 	}
254 
255 	/* set the source address */
256 	np->saddr = *saddr;
257 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
258 
259 	sk->sk_gso_type = SKB_GSO_TCPV6;
260 	__ip6_dst_store(sk, dst, NULL, NULL);
261 
262 	rt = (struct rt6_info *) dst;
263 	if (tcp_death_row.sysctl_tw_recycle &&
264 	    !tp->rx_opt.ts_recent_stamp &&
265 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
266 		tcp_fetch_timewait_stamp(sk, dst);
267 
268 	icsk->icsk_ext_hdr_len = 0;
269 	if (np->opt)
270 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
271 					  np->opt->opt_nflen);
272 
273 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
274 
275 	inet->inet_dport = usin->sin6_port;
276 
277 	tcp_set_state(sk, TCP_SYN_SENT);
278 	err = inet6_hash_connect(&tcp_death_row, sk);
279 	if (err)
280 		goto late_failure;
281 
282 	ip6_set_txhash(sk);
283 
284 	if (!tp->write_seq && likely(!tp->repair))
285 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
286 							     sk->sk_v6_daddr.s6_addr32,
287 							     inet->inet_sport,
288 							     inet->inet_dport);
289 
290 	err = tcp_connect(sk);
291 	if (err)
292 		goto late_failure;
293 
294 	return 0;
295 
296 late_failure:
297 	tcp_set_state(sk, TCP_CLOSE);
298 	__sk_dst_reset(sk);
299 failure:
300 	inet->inet_dport = 0;
301 	sk->sk_route_caps = 0;
302 	return err;
303 }
304 
305 static void tcp_v6_mtu_reduced(struct sock *sk)
306 {
307 	struct dst_entry *dst;
308 
309 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
310 		return;
311 
312 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
313 	if (!dst)
314 		return;
315 
316 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
317 		tcp_sync_mss(sk, dst_mtu(dst));
318 		tcp_simple_retransmit(sk);
319 	}
320 }
321 
322 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
323 		u8 type, u8 code, int offset, __be32 info)
324 {
325 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
326 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
327 	struct net *net = dev_net(skb->dev);
328 	struct request_sock *fastopen;
329 	struct ipv6_pinfo *np;
330 	struct tcp_sock *tp;
331 	__u32 seq, snd_una;
332 	struct sock *sk;
333 	int err;
334 
335 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
336 					&hdr->daddr, th->dest,
337 					&hdr->saddr, ntohs(th->source),
338 					skb->dev->ifindex);
339 
340 	if (!sk) {
341 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
342 				   ICMP6_MIB_INERRORS);
343 		return;
344 	}
345 
346 	if (sk->sk_state == TCP_TIME_WAIT) {
347 		inet_twsk_put(inet_twsk(sk));
348 		return;
349 	}
350 	seq = ntohl(th->seq);
351 	if (sk->sk_state == TCP_NEW_SYN_RECV)
352 		return tcp_req_err(sk, seq);
353 
354 	bh_lock_sock(sk);
355 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
356 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
357 
358 	if (sk->sk_state == TCP_CLOSE)
359 		goto out;
360 
361 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
362 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
363 		goto out;
364 	}
365 
366 	tp = tcp_sk(sk);
367 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
368 	fastopen = tp->fastopen_rsk;
369 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
370 	if (sk->sk_state != TCP_LISTEN &&
371 	    !between(seq, snd_una, tp->snd_nxt)) {
372 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
373 		goto out;
374 	}
375 
376 	np = inet6_sk(sk);
377 
378 	if (type == NDISC_REDIRECT) {
379 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
380 
381 		if (dst)
382 			dst->ops->redirect(dst, sk, skb);
383 		goto out;
384 	}
385 
386 	if (type == ICMPV6_PKT_TOOBIG) {
387 		/* We are not interested in TCP_LISTEN and open_requests
388 		 * (SYN-ACKs send out by Linux are always <576bytes so
389 		 * they should go through unfragmented).
390 		 */
391 		if (sk->sk_state == TCP_LISTEN)
392 			goto out;
393 
394 		if (!ip6_sk_accept_pmtu(sk))
395 			goto out;
396 
397 		tp->mtu_info = ntohl(info);
398 		if (!sock_owned_by_user(sk))
399 			tcp_v6_mtu_reduced(sk);
400 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
401 					   &tp->tsq_flags))
402 			sock_hold(sk);
403 		goto out;
404 	}
405 
406 	icmpv6_err_convert(type, code, &err);
407 
408 	/* Might be for an request_sock */
409 	switch (sk->sk_state) {
410 	case TCP_SYN_SENT:
411 	case TCP_SYN_RECV:
412 		/* Only in fast or simultaneous open. If a fast open socket is
413 		 * is already accepted it is treated as a connected one below.
414 		 */
415 		if (fastopen && !fastopen->sk)
416 			break;
417 
418 		if (!sock_owned_by_user(sk)) {
419 			sk->sk_err = err;
420 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
421 
422 			tcp_done(sk);
423 		} else
424 			sk->sk_err_soft = err;
425 		goto out;
426 	}
427 
428 	if (!sock_owned_by_user(sk) && np->recverr) {
429 		sk->sk_err = err;
430 		sk->sk_error_report(sk);
431 	} else
432 		sk->sk_err_soft = err;
433 
434 out:
435 	bh_unlock_sock(sk);
436 	sock_put(sk);
437 }
438 
439 
440 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
441 			      struct flowi *fl,
442 			      struct request_sock *req,
443 			      u16 queue_mapping,
444 			      struct tcp_fastopen_cookie *foc)
445 {
446 	struct inet_request_sock *ireq = inet_rsk(req);
447 	struct ipv6_pinfo *np = inet6_sk(sk);
448 	struct flowi6 *fl6 = &fl->u.ip6;
449 	struct sk_buff *skb;
450 	int err = -ENOMEM;
451 
452 	/* First, grab a route. */
453 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
454 		goto done;
455 
456 	skb = tcp_make_synack(sk, dst, req, foc);
457 
458 	if (skb) {
459 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 				    &ireq->ir_v6_rmt_addr);
461 
462 		fl6->daddr = ireq->ir_v6_rmt_addr;
463 		if (np->repflow && ireq->pktopts)
464 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
465 
466 		skb_set_queue_mapping(skb, queue_mapping);
467 		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
468 		err = net_xmit_eval(err);
469 	}
470 
471 done:
472 	return err;
473 }
474 
475 
476 static void tcp_v6_reqsk_destructor(struct request_sock *req)
477 {
478 	kfree_skb(inet_rsk(req)->pktopts);
479 }
480 
481 #ifdef CONFIG_TCP_MD5SIG
482 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
483 						   const struct in6_addr *addr)
484 {
485 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
486 }
487 
488 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
489 						const struct sock *addr_sk)
490 {
491 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
492 }
493 
494 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
495 				 int optlen)
496 {
497 	struct tcp_md5sig cmd;
498 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
499 
500 	if (optlen < sizeof(cmd))
501 		return -EINVAL;
502 
503 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
504 		return -EFAULT;
505 
506 	if (sin6->sin6_family != AF_INET6)
507 		return -EINVAL;
508 
509 	if (!cmd.tcpm_keylen) {
510 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
511 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
512 					      AF_INET);
513 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
514 				      AF_INET6);
515 	}
516 
517 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
518 		return -EINVAL;
519 
520 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
521 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
522 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
523 
524 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
525 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
526 }
527 
528 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
529 					const struct in6_addr *daddr,
530 					const struct in6_addr *saddr, int nbytes)
531 {
532 	struct tcp6_pseudohdr *bp;
533 	struct scatterlist sg;
534 
535 	bp = &hp->md5_blk.ip6;
536 	/* 1. TCP pseudo-header (RFC2460) */
537 	bp->saddr = *saddr;
538 	bp->daddr = *daddr;
539 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
540 	bp->len = cpu_to_be32(nbytes);
541 
542 	sg_init_one(&sg, bp, sizeof(*bp));
543 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
544 }
545 
546 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
547 			       const struct in6_addr *daddr, struct in6_addr *saddr,
548 			       const struct tcphdr *th)
549 {
550 	struct tcp_md5sig_pool *hp;
551 	struct hash_desc *desc;
552 
553 	hp = tcp_get_md5sig_pool();
554 	if (!hp)
555 		goto clear_hash_noput;
556 	desc = &hp->md5_desc;
557 
558 	if (crypto_hash_init(desc))
559 		goto clear_hash;
560 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
561 		goto clear_hash;
562 	if (tcp_md5_hash_header(hp, th))
563 		goto clear_hash;
564 	if (tcp_md5_hash_key(hp, key))
565 		goto clear_hash;
566 	if (crypto_hash_final(desc, md5_hash))
567 		goto clear_hash;
568 
569 	tcp_put_md5sig_pool();
570 	return 0;
571 
572 clear_hash:
573 	tcp_put_md5sig_pool();
574 clear_hash_noput:
575 	memset(md5_hash, 0, 16);
576 	return 1;
577 }
578 
579 static int tcp_v6_md5_hash_skb(char *md5_hash,
580 			       const struct tcp_md5sig_key *key,
581 			       const struct sock *sk,
582 			       const struct sk_buff *skb)
583 {
584 	const struct in6_addr *saddr, *daddr;
585 	struct tcp_md5sig_pool *hp;
586 	struct hash_desc *desc;
587 	const struct tcphdr *th = tcp_hdr(skb);
588 
589 	if (sk) { /* valid for establish/request sockets */
590 		saddr = &sk->sk_v6_rcv_saddr;
591 		daddr = &sk->sk_v6_daddr;
592 	} else {
593 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
594 		saddr = &ip6h->saddr;
595 		daddr = &ip6h->daddr;
596 	}
597 
598 	hp = tcp_get_md5sig_pool();
599 	if (!hp)
600 		goto clear_hash_noput;
601 	desc = &hp->md5_desc;
602 
603 	if (crypto_hash_init(desc))
604 		goto clear_hash;
605 
606 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
607 		goto clear_hash;
608 	if (tcp_md5_hash_header(hp, th))
609 		goto clear_hash;
610 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
611 		goto clear_hash;
612 	if (tcp_md5_hash_key(hp, key))
613 		goto clear_hash;
614 	if (crypto_hash_final(desc, md5_hash))
615 		goto clear_hash;
616 
617 	tcp_put_md5sig_pool();
618 	return 0;
619 
620 clear_hash:
621 	tcp_put_md5sig_pool();
622 clear_hash_noput:
623 	memset(md5_hash, 0, 16);
624 	return 1;
625 }
626 
627 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
628 {
629 	const __u8 *hash_location = NULL;
630 	struct tcp_md5sig_key *hash_expected;
631 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
632 	const struct tcphdr *th = tcp_hdr(skb);
633 	int genhash;
634 	u8 newhash[16];
635 
636 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
637 	hash_location = tcp_parse_md5sig_option(th);
638 
639 	/* We've parsed the options - do we have a hash? */
640 	if (!hash_expected && !hash_location)
641 		return false;
642 
643 	if (hash_expected && !hash_location) {
644 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
645 		return true;
646 	}
647 
648 	if (!hash_expected && hash_location) {
649 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
650 		return true;
651 	}
652 
653 	/* check the signature */
654 	genhash = tcp_v6_md5_hash_skb(newhash,
655 				      hash_expected,
656 				      NULL, skb);
657 
658 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
659 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
660 				     genhash ? "failed" : "mismatch",
661 				     &ip6h->saddr, ntohs(th->source),
662 				     &ip6h->daddr, ntohs(th->dest));
663 		return true;
664 	}
665 	return false;
666 }
667 #endif
668 
669 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
670 			    struct sk_buff *skb)
671 {
672 	struct inet_request_sock *ireq = inet_rsk(req);
673 	struct ipv6_pinfo *np = inet6_sk(sk);
674 
675 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
676 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
677 
678 	/* So that link locals have meaning */
679 	if (!sk->sk_bound_dev_if &&
680 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
681 		ireq->ir_iif = tcp_v6_iif(skb);
682 
683 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
684 	    (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
685 	     np->rxopt.bits.rxinfo ||
686 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
687 	     np->rxopt.bits.rxohlim || np->repflow)) {
688 		atomic_inc(&skb->users);
689 		ireq->pktopts = skb;
690 	}
691 }
692 
693 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
694 					  const struct request_sock *req,
695 					  bool *strict)
696 {
697 	if (strict)
698 		*strict = true;
699 	return inet6_csk_route_req(sk, &fl->u.ip6, req);
700 }
701 
702 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
703 	.family		=	AF_INET6,
704 	.obj_size	=	sizeof(struct tcp6_request_sock),
705 	.rtx_syn_ack	=	tcp_rtx_synack,
706 	.send_ack	=	tcp_v6_reqsk_send_ack,
707 	.destructor	=	tcp_v6_reqsk_destructor,
708 	.send_reset	=	tcp_v6_send_reset,
709 	.syn_ack_timeout =	tcp_syn_ack_timeout,
710 };
711 
712 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
713 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
714 				sizeof(struct ipv6hdr),
715 #ifdef CONFIG_TCP_MD5SIG
716 	.req_md5_lookup	=	tcp_v6_md5_lookup,
717 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
718 #endif
719 	.init_req	=	tcp_v6_init_req,
720 #ifdef CONFIG_SYN_COOKIES
721 	.cookie_init_seq =	cookie_v6_init_sequence,
722 #endif
723 	.route_req	=	tcp_v6_route_req,
724 	.init_seq	=	tcp_v6_init_sequence,
725 	.send_synack	=	tcp_v6_send_synack,
726 	.queue_hash_add =	inet6_csk_reqsk_queue_hash_add,
727 };
728 
729 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
730 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
731 				 int oif, struct tcp_md5sig_key *key, int rst,
732 				 u8 tclass, u32 label)
733 {
734 	const struct tcphdr *th = tcp_hdr(skb);
735 	struct tcphdr *t1;
736 	struct sk_buff *buff;
737 	struct flowi6 fl6;
738 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
739 	struct sock *ctl_sk = net->ipv6.tcp_sk;
740 	unsigned int tot_len = sizeof(struct tcphdr);
741 	struct dst_entry *dst;
742 	__be32 *topt;
743 
744 	if (tsecr)
745 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
746 #ifdef CONFIG_TCP_MD5SIG
747 	if (key)
748 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
749 #endif
750 
751 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
752 			 GFP_ATOMIC);
753 	if (!buff)
754 		return;
755 
756 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
757 
758 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
759 	skb_reset_transport_header(buff);
760 
761 	/* Swap the send and the receive. */
762 	memset(t1, 0, sizeof(*t1));
763 	t1->dest = th->source;
764 	t1->source = th->dest;
765 	t1->doff = tot_len / 4;
766 	t1->seq = htonl(seq);
767 	t1->ack_seq = htonl(ack);
768 	t1->ack = !rst || !th->ack;
769 	t1->rst = rst;
770 	t1->window = htons(win);
771 
772 	topt = (__be32 *)(t1 + 1);
773 
774 	if (tsecr) {
775 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
776 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
777 		*topt++ = htonl(tsval);
778 		*topt++ = htonl(tsecr);
779 	}
780 
781 #ifdef CONFIG_TCP_MD5SIG
782 	if (key) {
783 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
784 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
785 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
786 				    &ipv6_hdr(skb)->saddr,
787 				    &ipv6_hdr(skb)->daddr, t1);
788 	}
789 #endif
790 
791 	memset(&fl6, 0, sizeof(fl6));
792 	fl6.daddr = ipv6_hdr(skb)->saddr;
793 	fl6.saddr = ipv6_hdr(skb)->daddr;
794 	fl6.flowlabel = label;
795 
796 	buff->ip_summed = CHECKSUM_PARTIAL;
797 	buff->csum = 0;
798 
799 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
800 
801 	fl6.flowi6_proto = IPPROTO_TCP;
802 	if (rt6_need_strict(&fl6.daddr) && !oif)
803 		fl6.flowi6_oif = tcp_v6_iif(skb);
804 	else
805 		fl6.flowi6_oif = oif;
806 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
807 	fl6.fl6_dport = t1->dest;
808 	fl6.fl6_sport = t1->source;
809 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
810 
811 	/* Pass a socket to ip6_dst_lookup either it is for RST
812 	 * Underlying function will use this to retrieve the network
813 	 * namespace
814 	 */
815 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
816 	if (!IS_ERR(dst)) {
817 		skb_dst_set(buff, dst);
818 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
819 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
820 		if (rst)
821 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
822 		return;
823 	}
824 
825 	kfree_skb(buff);
826 }
827 
828 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
829 {
830 	const struct tcphdr *th = tcp_hdr(skb);
831 	u32 seq = 0, ack_seq = 0;
832 	struct tcp_md5sig_key *key = NULL;
833 #ifdef CONFIG_TCP_MD5SIG
834 	const __u8 *hash_location = NULL;
835 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
836 	unsigned char newhash[16];
837 	int genhash;
838 	struct sock *sk1 = NULL;
839 #endif
840 	int oif;
841 
842 	if (th->rst)
843 		return;
844 
845 	/* If sk not NULL, it means we did a successful lookup and incoming
846 	 * route had to be correct. prequeue might have dropped our dst.
847 	 */
848 	if (!sk && !ipv6_unicast_destination(skb))
849 		return;
850 
851 #ifdef CONFIG_TCP_MD5SIG
852 	hash_location = tcp_parse_md5sig_option(th);
853 	if (!sk && hash_location) {
854 		/*
855 		 * active side is lost. Try to find listening socket through
856 		 * source port, and then find md5 key through listening socket.
857 		 * we are not loose security here:
858 		 * Incoming packet is checked with md5 hash with finding key,
859 		 * no RST generated if md5 hash doesn't match.
860 		 */
861 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
862 					   &tcp_hashinfo, &ipv6h->saddr,
863 					   th->source, &ipv6h->daddr,
864 					   ntohs(th->source), tcp_v6_iif(skb));
865 		if (!sk1)
866 			return;
867 
868 		rcu_read_lock();
869 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
870 		if (!key)
871 			goto release_sk1;
872 
873 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
874 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
875 			goto release_sk1;
876 	} else {
877 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
878 	}
879 #endif
880 
881 	if (th->ack)
882 		seq = ntohl(th->ack_seq);
883 	else
884 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
885 			  (th->doff << 2);
886 
887 	oif = sk ? sk->sk_bound_dev_if : 0;
888 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
889 
890 #ifdef CONFIG_TCP_MD5SIG
891 release_sk1:
892 	if (sk1) {
893 		rcu_read_unlock();
894 		sock_put(sk1);
895 	}
896 #endif
897 }
898 
899 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
900 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
901 			    struct tcp_md5sig_key *key, u8 tclass,
902 			    u32 label)
903 {
904 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
905 			     tclass, label);
906 }
907 
908 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
909 {
910 	struct inet_timewait_sock *tw = inet_twsk(sk);
911 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
912 
913 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
914 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
915 			tcp_time_stamp + tcptw->tw_ts_offset,
916 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
917 			tw->tw_tclass, (tw->tw_flowlabel << 12));
918 
919 	inet_twsk_put(tw);
920 }
921 
922 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
923 				  struct request_sock *req)
924 {
925 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
926 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
927 	 */
928 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
929 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
930 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
931 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
932 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
933 			0, 0);
934 }
935 
936 
937 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
938 {
939 	const struct tcphdr *th = tcp_hdr(skb);
940 	struct request_sock *req;
941 	struct sock *nsk;
942 
943 	/* Find possible connection requests. */
944 	req = inet6_csk_search_req(sk, th->source,
945 				   &ipv6_hdr(skb)->saddr,
946 				   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
947 	if (req) {
948 		nsk = tcp_check_req(sk, skb, req, false);
949 		reqsk_put(req);
950 		return nsk;
951 	}
952 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
953 					 &ipv6_hdr(skb)->saddr, th->source,
954 					 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
955 					 tcp_v6_iif(skb));
956 
957 	if (nsk) {
958 		if (nsk->sk_state != TCP_TIME_WAIT) {
959 			bh_lock_sock(nsk);
960 			return nsk;
961 		}
962 		inet_twsk_put(inet_twsk(nsk));
963 		return NULL;
964 	}
965 
966 #ifdef CONFIG_SYN_COOKIES
967 	if (!th->syn)
968 		sk = cookie_v6_check(sk, skb);
969 #endif
970 	return sk;
971 }
972 
973 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
974 {
975 	if (skb->protocol == htons(ETH_P_IP))
976 		return tcp_v4_conn_request(sk, skb);
977 
978 	if (!ipv6_unicast_destination(skb))
979 		goto drop;
980 
981 	return tcp_conn_request(&tcp6_request_sock_ops,
982 				&tcp_request_sock_ipv6_ops, sk, skb);
983 
984 drop:
985 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
986 	return 0; /* don't send reset */
987 }
988 
989 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
990 					 struct request_sock *req,
991 					 struct dst_entry *dst)
992 {
993 	struct inet_request_sock *ireq;
994 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
995 	struct tcp6_sock *newtcp6sk;
996 	struct inet_sock *newinet;
997 	struct tcp_sock *newtp;
998 	struct sock *newsk;
999 #ifdef CONFIG_TCP_MD5SIG
1000 	struct tcp_md5sig_key *key;
1001 #endif
1002 	struct flowi6 fl6;
1003 
1004 	if (skb->protocol == htons(ETH_P_IP)) {
1005 		/*
1006 		 *	v6 mapped
1007 		 */
1008 
1009 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1010 
1011 		if (!newsk)
1012 			return NULL;
1013 
1014 		newtcp6sk = (struct tcp6_sock *)newsk;
1015 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1016 
1017 		newinet = inet_sk(newsk);
1018 		newnp = inet6_sk(newsk);
1019 		newtp = tcp_sk(newsk);
1020 
1021 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1022 
1023 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1024 
1025 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1026 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1027 #ifdef CONFIG_TCP_MD5SIG
1028 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1029 #endif
1030 
1031 		newnp->ipv6_ac_list = NULL;
1032 		newnp->ipv6_fl_list = NULL;
1033 		newnp->pktoptions  = NULL;
1034 		newnp->opt	   = NULL;
1035 		newnp->mcast_oif   = tcp_v6_iif(skb);
1036 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1037 		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1038 		if (np->repflow)
1039 			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1040 
1041 		/*
1042 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1043 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1044 		 * that function for the gory details. -acme
1045 		 */
1046 
1047 		/* It is tricky place. Until this moment IPv4 tcp
1048 		   worked with IPv6 icsk.icsk_af_ops.
1049 		   Sync it now.
1050 		 */
1051 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1052 
1053 		return newsk;
1054 	}
1055 
1056 	ireq = inet_rsk(req);
1057 
1058 	if (sk_acceptq_is_full(sk))
1059 		goto out_overflow;
1060 
1061 	if (!dst) {
1062 		dst = inet6_csk_route_req(sk, &fl6, req);
1063 		if (!dst)
1064 			goto out;
1065 	}
1066 
1067 	newsk = tcp_create_openreq_child(sk, req, skb);
1068 	if (!newsk)
1069 		goto out_nonewsk;
1070 
1071 	/*
1072 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1073 	 * count here, tcp_create_openreq_child now does this for us, see the
1074 	 * comment in that function for the gory details. -acme
1075 	 */
1076 
1077 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1078 	__ip6_dst_store(newsk, dst, NULL, NULL);
1079 	inet6_sk_rx_dst_set(newsk, skb);
1080 
1081 	newtcp6sk = (struct tcp6_sock *)newsk;
1082 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1083 
1084 	newtp = tcp_sk(newsk);
1085 	newinet = inet_sk(newsk);
1086 	newnp = inet6_sk(newsk);
1087 
1088 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1089 
1090 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1091 	newnp->saddr = ireq->ir_v6_loc_addr;
1092 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1093 	newsk->sk_bound_dev_if = ireq->ir_iif;
1094 
1095 	ip6_set_txhash(newsk);
1096 
1097 	/* Now IPv6 options...
1098 
1099 	   First: no IPv4 options.
1100 	 */
1101 	newinet->inet_opt = NULL;
1102 	newnp->ipv6_ac_list = NULL;
1103 	newnp->ipv6_fl_list = NULL;
1104 
1105 	/* Clone RX bits */
1106 	newnp->rxopt.all = np->rxopt.all;
1107 
1108 	/* Clone pktoptions received with SYN */
1109 	newnp->pktoptions = NULL;
1110 	if (ireq->pktopts) {
1111 		newnp->pktoptions = skb_clone(ireq->pktopts,
1112 					      sk_gfp_atomic(sk, GFP_ATOMIC));
1113 		consume_skb(ireq->pktopts);
1114 		ireq->pktopts = NULL;
1115 		if (newnp->pktoptions)
1116 			skb_set_owner_r(newnp->pktoptions, newsk);
1117 	}
1118 	newnp->opt	  = NULL;
1119 	newnp->mcast_oif  = tcp_v6_iif(skb);
1120 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1121 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1122 	if (np->repflow)
1123 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1124 
1125 	/* Clone native IPv6 options from listening socket (if any)
1126 
1127 	   Yes, keeping reference count would be much more clever,
1128 	   but we make one more one thing there: reattach optmem
1129 	   to newsk.
1130 	 */
1131 	if (np->opt)
1132 		newnp->opt = ipv6_dup_options(newsk, np->opt);
1133 
1134 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1135 	if (newnp->opt)
1136 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1137 						     newnp->opt->opt_flen);
1138 
1139 	tcp_ca_openreq_child(newsk, dst);
1140 
1141 	tcp_sync_mss(newsk, dst_mtu(dst));
1142 	newtp->advmss = dst_metric_advmss(dst);
1143 	if (tcp_sk(sk)->rx_opt.user_mss &&
1144 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1145 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1146 
1147 	tcp_initialize_rcv_mss(newsk);
1148 
1149 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1150 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1151 
1152 #ifdef CONFIG_TCP_MD5SIG
1153 	/* Copy over the MD5 key from the original socket */
1154 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1155 	if (key) {
1156 		/* We're using one, so create a matching key
1157 		 * on the newsk structure. If we fail to get
1158 		 * memory, then we end up not copying the key
1159 		 * across. Shucks.
1160 		 */
1161 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1162 			       AF_INET6, key->key, key->keylen,
1163 			       sk_gfp_atomic(sk, GFP_ATOMIC));
1164 	}
1165 #endif
1166 
1167 	if (__inet_inherit_port(sk, newsk) < 0) {
1168 		inet_csk_prepare_forced_close(newsk);
1169 		tcp_done(newsk);
1170 		goto out;
1171 	}
1172 	__inet_hash(newsk, NULL);
1173 
1174 	return newsk;
1175 
1176 out_overflow:
1177 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1178 out_nonewsk:
1179 	dst_release(dst);
1180 out:
1181 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1182 	return NULL;
1183 }
1184 
1185 /* The socket must have it's spinlock held when we get
1186  * here.
1187  *
1188  * We have a potential double-lock case here, so even when
1189  * doing backlog processing we use the BH locking scheme.
1190  * This is because we cannot sleep with the original spinlock
1191  * held.
1192  */
1193 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1194 {
1195 	struct ipv6_pinfo *np = inet6_sk(sk);
1196 	struct tcp_sock *tp;
1197 	struct sk_buff *opt_skb = NULL;
1198 
1199 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1200 	   goes to IPv4 receive handler and backlogged.
1201 	   From backlog it always goes here. Kerboom...
1202 	   Fortunately, tcp_rcv_established and rcv_established
1203 	   handle them correctly, but it is not case with
1204 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1205 	 */
1206 
1207 	if (skb->protocol == htons(ETH_P_IP))
1208 		return tcp_v4_do_rcv(sk, skb);
1209 
1210 	if (sk_filter(sk, skb))
1211 		goto discard;
1212 
1213 	/*
1214 	 *	socket locking is here for SMP purposes as backlog rcv
1215 	 *	is currently called with bh processing disabled.
1216 	 */
1217 
1218 	/* Do Stevens' IPV6_PKTOPTIONS.
1219 
1220 	   Yes, guys, it is the only place in our code, where we
1221 	   may make it not affecting IPv4.
1222 	   The rest of code is protocol independent,
1223 	   and I do not like idea to uglify IPv4.
1224 
1225 	   Actually, all the idea behind IPV6_PKTOPTIONS
1226 	   looks not very well thought. For now we latch
1227 	   options, received in the last packet, enqueued
1228 	   by tcp. Feel free to propose better solution.
1229 					       --ANK (980728)
1230 	 */
1231 	if (np->rxopt.all)
1232 		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1233 
1234 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1235 		struct dst_entry *dst = sk->sk_rx_dst;
1236 
1237 		sock_rps_save_rxhash(sk, skb);
1238 		sk_mark_napi_id(sk, skb);
1239 		if (dst) {
1240 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1241 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1242 				dst_release(dst);
1243 				sk->sk_rx_dst = NULL;
1244 			}
1245 		}
1246 
1247 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1248 		if (opt_skb)
1249 			goto ipv6_pktoptions;
1250 		return 0;
1251 	}
1252 
1253 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1254 		goto csum_err;
1255 
1256 	if (sk->sk_state == TCP_LISTEN) {
1257 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1258 		if (!nsk)
1259 			goto discard;
1260 
1261 		/*
1262 		 * Queue it on the new socket if the new socket is active,
1263 		 * otherwise we just shortcircuit this and continue with
1264 		 * the new socket..
1265 		 */
1266 		if (nsk != sk) {
1267 			sock_rps_save_rxhash(nsk, skb);
1268 			sk_mark_napi_id(sk, skb);
1269 			if (tcp_child_process(sk, nsk, skb))
1270 				goto reset;
1271 			if (opt_skb)
1272 				__kfree_skb(opt_skb);
1273 			return 0;
1274 		}
1275 	} else
1276 		sock_rps_save_rxhash(sk, skb);
1277 
1278 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1279 		goto reset;
1280 	if (opt_skb)
1281 		goto ipv6_pktoptions;
1282 	return 0;
1283 
1284 reset:
1285 	tcp_v6_send_reset(sk, skb);
1286 discard:
1287 	if (opt_skb)
1288 		__kfree_skb(opt_skb);
1289 	kfree_skb(skb);
1290 	return 0;
1291 csum_err:
1292 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1293 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1294 	goto discard;
1295 
1296 
1297 ipv6_pktoptions:
1298 	/* Do you ask, what is it?
1299 
1300 	   1. skb was enqueued by tcp.
1301 	   2. skb is added to tail of read queue, rather than out of order.
1302 	   3. socket is not in passive state.
1303 	   4. Finally, it really contains options, which user wants to receive.
1304 	 */
1305 	tp = tcp_sk(sk);
1306 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1307 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1308 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1309 			np->mcast_oif = tcp_v6_iif(opt_skb);
1310 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1311 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1312 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1313 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1314 		if (np->repflow)
1315 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1316 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1317 			skb_set_owner_r(opt_skb, sk);
1318 			opt_skb = xchg(&np->pktoptions, opt_skb);
1319 		} else {
1320 			__kfree_skb(opt_skb);
1321 			opt_skb = xchg(&np->pktoptions, NULL);
1322 		}
1323 	}
1324 
1325 	kfree_skb(opt_skb);
1326 	return 0;
1327 }
1328 
1329 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1330 			   const struct tcphdr *th)
1331 {
1332 	/* This is tricky: we move IP6CB at its correct location into
1333 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1334 	 * _decode_session6() uses IP6CB().
1335 	 * barrier() makes sure compiler won't play aliasing games.
1336 	 */
1337 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1338 		sizeof(struct inet6_skb_parm));
1339 	barrier();
1340 
1341 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1342 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1343 				    skb->len - th->doff*4);
1344 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1345 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1346 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1347 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1348 	TCP_SKB_CB(skb)->sacked = 0;
1349 }
1350 
1351 static void tcp_v6_restore_cb(struct sk_buff *skb)
1352 {
1353 	/* We need to move header back to the beginning if xfrm6_policy_check()
1354 	 * and tcp_v6_fill_cb() are going to be called again.
1355 	 */
1356 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1357 		sizeof(struct inet6_skb_parm));
1358 }
1359 
1360 static int tcp_v6_rcv(struct sk_buff *skb)
1361 {
1362 	const struct tcphdr *th;
1363 	const struct ipv6hdr *hdr;
1364 	struct sock *sk;
1365 	int ret;
1366 	struct net *net = dev_net(skb->dev);
1367 
1368 	if (skb->pkt_type != PACKET_HOST)
1369 		goto discard_it;
1370 
1371 	/*
1372 	 *	Count it even if it's bad.
1373 	 */
1374 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1375 
1376 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1377 		goto discard_it;
1378 
1379 	th = tcp_hdr(skb);
1380 
1381 	if (th->doff < sizeof(struct tcphdr)/4)
1382 		goto bad_packet;
1383 	if (!pskb_may_pull(skb, th->doff*4))
1384 		goto discard_it;
1385 
1386 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1387 		goto csum_error;
1388 
1389 	th = tcp_hdr(skb);
1390 	hdr = ipv6_hdr(skb);
1391 
1392 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1393 				inet6_iif(skb));
1394 	if (!sk)
1395 		goto no_tcp_socket;
1396 
1397 process:
1398 	if (sk->sk_state == TCP_TIME_WAIT)
1399 		goto do_time_wait;
1400 
1401 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1402 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1403 		goto discard_and_relse;
1404 	}
1405 
1406 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1407 		goto discard_and_relse;
1408 
1409 	tcp_v6_fill_cb(skb, hdr, th);
1410 
1411 #ifdef CONFIG_TCP_MD5SIG
1412 	if (tcp_v6_inbound_md5_hash(sk, skb))
1413 		goto discard_and_relse;
1414 #endif
1415 
1416 	if (sk_filter(sk, skb))
1417 		goto discard_and_relse;
1418 
1419 	sk_incoming_cpu_update(sk);
1420 	skb->dev = NULL;
1421 
1422 	bh_lock_sock_nested(sk);
1423 	ret = 0;
1424 	if (!sock_owned_by_user(sk)) {
1425 		if (!tcp_prequeue(sk, skb))
1426 			ret = tcp_v6_do_rcv(sk, skb);
1427 	} else if (unlikely(sk_add_backlog(sk, skb,
1428 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1429 		bh_unlock_sock(sk);
1430 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1431 		goto discard_and_relse;
1432 	}
1433 	bh_unlock_sock(sk);
1434 
1435 	sock_put(sk);
1436 	return ret ? -1 : 0;
1437 
1438 no_tcp_socket:
1439 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1440 		goto discard_it;
1441 
1442 	tcp_v6_fill_cb(skb, hdr, th);
1443 
1444 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1445 csum_error:
1446 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1447 bad_packet:
1448 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1449 	} else {
1450 		tcp_v6_send_reset(NULL, skb);
1451 	}
1452 
1453 discard_it:
1454 	kfree_skb(skb);
1455 	return 0;
1456 
1457 discard_and_relse:
1458 	sock_put(sk);
1459 	goto discard_it;
1460 
1461 do_time_wait:
1462 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1463 		inet_twsk_put(inet_twsk(sk));
1464 		goto discard_it;
1465 	}
1466 
1467 	tcp_v6_fill_cb(skb, hdr, th);
1468 
1469 	if (skb->len < (th->doff<<2)) {
1470 		inet_twsk_put(inet_twsk(sk));
1471 		goto bad_packet;
1472 	}
1473 	if (tcp_checksum_complete(skb)) {
1474 		inet_twsk_put(inet_twsk(sk));
1475 		goto csum_error;
1476 	}
1477 
1478 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1479 	case TCP_TW_SYN:
1480 	{
1481 		struct sock *sk2;
1482 
1483 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1484 					    &ipv6_hdr(skb)->saddr, th->source,
1485 					    &ipv6_hdr(skb)->daddr,
1486 					    ntohs(th->dest), tcp_v6_iif(skb));
1487 		if (sk2) {
1488 			struct inet_timewait_sock *tw = inet_twsk(sk);
1489 			inet_twsk_deschedule(tw);
1490 			inet_twsk_put(tw);
1491 			sk = sk2;
1492 			tcp_v6_restore_cb(skb);
1493 			goto process;
1494 		}
1495 		/* Fall through to ACK */
1496 	}
1497 	case TCP_TW_ACK:
1498 		tcp_v6_timewait_ack(sk, skb);
1499 		break;
1500 	case TCP_TW_RST:
1501 		tcp_v6_restore_cb(skb);
1502 		goto no_tcp_socket;
1503 	case TCP_TW_SUCCESS:
1504 		;
1505 	}
1506 	goto discard_it;
1507 }
1508 
1509 static void tcp_v6_early_demux(struct sk_buff *skb)
1510 {
1511 	const struct ipv6hdr *hdr;
1512 	const struct tcphdr *th;
1513 	struct sock *sk;
1514 
1515 	if (skb->pkt_type != PACKET_HOST)
1516 		return;
1517 
1518 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1519 		return;
1520 
1521 	hdr = ipv6_hdr(skb);
1522 	th = tcp_hdr(skb);
1523 
1524 	if (th->doff < sizeof(struct tcphdr) / 4)
1525 		return;
1526 
1527 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1528 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1529 					&hdr->saddr, th->source,
1530 					&hdr->daddr, ntohs(th->dest),
1531 					inet6_iif(skb));
1532 	if (sk) {
1533 		skb->sk = sk;
1534 		skb->destructor = sock_edemux;
1535 		if (sk_fullsock(sk)) {
1536 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1537 
1538 			if (dst)
1539 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1540 			if (dst &&
1541 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1542 				skb_dst_set_noref(skb, dst);
1543 		}
1544 	}
1545 }
1546 
1547 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1548 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1549 	.twsk_unique	= tcp_twsk_unique,
1550 	.twsk_destructor = tcp_twsk_destructor,
1551 };
1552 
1553 static const struct inet_connection_sock_af_ops ipv6_specific = {
1554 	.queue_xmit	   = inet6_csk_xmit,
1555 	.send_check	   = tcp_v6_send_check,
1556 	.rebuild_header	   = inet6_sk_rebuild_header,
1557 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1558 	.conn_request	   = tcp_v6_conn_request,
1559 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1560 	.net_header_len	   = sizeof(struct ipv6hdr),
1561 	.net_frag_header_len = sizeof(struct frag_hdr),
1562 	.setsockopt	   = ipv6_setsockopt,
1563 	.getsockopt	   = ipv6_getsockopt,
1564 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1565 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1566 	.bind_conflict	   = inet6_csk_bind_conflict,
1567 #ifdef CONFIG_COMPAT
1568 	.compat_setsockopt = compat_ipv6_setsockopt,
1569 	.compat_getsockopt = compat_ipv6_getsockopt,
1570 #endif
1571 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1572 };
1573 
1574 #ifdef CONFIG_TCP_MD5SIG
1575 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1576 	.md5_lookup	=	tcp_v6_md5_lookup,
1577 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1578 	.md5_parse	=	tcp_v6_parse_md5_keys,
1579 };
1580 #endif
1581 
1582 /*
1583  *	TCP over IPv4 via INET6 API
1584  */
1585 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1586 	.queue_xmit	   = ip_queue_xmit,
1587 	.send_check	   = tcp_v4_send_check,
1588 	.rebuild_header	   = inet_sk_rebuild_header,
1589 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1590 	.conn_request	   = tcp_v6_conn_request,
1591 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1592 	.net_header_len	   = sizeof(struct iphdr),
1593 	.setsockopt	   = ipv6_setsockopt,
1594 	.getsockopt	   = ipv6_getsockopt,
1595 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1596 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1597 	.bind_conflict	   = inet6_csk_bind_conflict,
1598 #ifdef CONFIG_COMPAT
1599 	.compat_setsockopt = compat_ipv6_setsockopt,
1600 	.compat_getsockopt = compat_ipv6_getsockopt,
1601 #endif
1602 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1603 };
1604 
1605 #ifdef CONFIG_TCP_MD5SIG
1606 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1607 	.md5_lookup	=	tcp_v4_md5_lookup,
1608 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1609 	.md5_parse	=	tcp_v6_parse_md5_keys,
1610 };
1611 #endif
1612 
1613 /* NOTE: A lot of things set to zero explicitly by call to
1614  *       sk_alloc() so need not be done here.
1615  */
1616 static int tcp_v6_init_sock(struct sock *sk)
1617 {
1618 	struct inet_connection_sock *icsk = inet_csk(sk);
1619 
1620 	tcp_init_sock(sk);
1621 
1622 	icsk->icsk_af_ops = &ipv6_specific;
1623 
1624 #ifdef CONFIG_TCP_MD5SIG
1625 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1626 #endif
1627 
1628 	return 0;
1629 }
1630 
1631 static void tcp_v6_destroy_sock(struct sock *sk)
1632 {
1633 	tcp_v4_destroy_sock(sk);
1634 	inet6_destroy_sock(sk);
1635 }
1636 
1637 #ifdef CONFIG_PROC_FS
1638 /* Proc filesystem TCPv6 sock list dumping. */
1639 static void get_openreq6(struct seq_file *seq,
1640 			 struct request_sock *req, int i, kuid_t uid)
1641 {
1642 	long ttd = req->rsk_timer.expires - jiffies;
1643 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1644 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1645 
1646 	if (ttd < 0)
1647 		ttd = 0;
1648 
1649 	seq_printf(seq,
1650 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1651 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1652 		   i,
1653 		   src->s6_addr32[0], src->s6_addr32[1],
1654 		   src->s6_addr32[2], src->s6_addr32[3],
1655 		   inet_rsk(req)->ir_num,
1656 		   dest->s6_addr32[0], dest->s6_addr32[1],
1657 		   dest->s6_addr32[2], dest->s6_addr32[3],
1658 		   ntohs(inet_rsk(req)->ir_rmt_port),
1659 		   TCP_SYN_RECV,
1660 		   0, 0, /* could print option size, but that is af dependent. */
1661 		   1,   /* timers active (only the expire timer) */
1662 		   jiffies_to_clock_t(ttd),
1663 		   req->num_timeout,
1664 		   from_kuid_munged(seq_user_ns(seq), uid),
1665 		   0,  /* non standard timer */
1666 		   0, /* open_requests have no inode */
1667 		   0, req);
1668 }
1669 
1670 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1671 {
1672 	const struct in6_addr *dest, *src;
1673 	__u16 destp, srcp;
1674 	int timer_active;
1675 	unsigned long timer_expires;
1676 	const struct inet_sock *inet = inet_sk(sp);
1677 	const struct tcp_sock *tp = tcp_sk(sp);
1678 	const struct inet_connection_sock *icsk = inet_csk(sp);
1679 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1680 
1681 	dest  = &sp->sk_v6_daddr;
1682 	src   = &sp->sk_v6_rcv_saddr;
1683 	destp = ntohs(inet->inet_dport);
1684 	srcp  = ntohs(inet->inet_sport);
1685 
1686 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1687 		timer_active	= 1;
1688 		timer_expires	= icsk->icsk_timeout;
1689 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1690 		timer_active	= 4;
1691 		timer_expires	= icsk->icsk_timeout;
1692 	} else if (timer_pending(&sp->sk_timer)) {
1693 		timer_active	= 2;
1694 		timer_expires	= sp->sk_timer.expires;
1695 	} else {
1696 		timer_active	= 0;
1697 		timer_expires = jiffies;
1698 	}
1699 
1700 	seq_printf(seq,
1701 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1702 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1703 		   i,
1704 		   src->s6_addr32[0], src->s6_addr32[1],
1705 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1706 		   dest->s6_addr32[0], dest->s6_addr32[1],
1707 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1708 		   sp->sk_state,
1709 		   tp->write_seq-tp->snd_una,
1710 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1711 		   timer_active,
1712 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1713 		   icsk->icsk_retransmits,
1714 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1715 		   icsk->icsk_probes_out,
1716 		   sock_i_ino(sp),
1717 		   atomic_read(&sp->sk_refcnt), sp,
1718 		   jiffies_to_clock_t(icsk->icsk_rto),
1719 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1720 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1721 		   tp->snd_cwnd,
1722 		   sp->sk_state == TCP_LISTEN ?
1723 			(fastopenq ? fastopenq->max_qlen : 0) :
1724 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1725 		   );
1726 }
1727 
1728 static void get_timewait6_sock(struct seq_file *seq,
1729 			       struct inet_timewait_sock *tw, int i)
1730 {
1731 	long delta = tw->tw_timer.expires - jiffies;
1732 	const struct in6_addr *dest, *src;
1733 	__u16 destp, srcp;
1734 
1735 	dest = &tw->tw_v6_daddr;
1736 	src  = &tw->tw_v6_rcv_saddr;
1737 	destp = ntohs(tw->tw_dport);
1738 	srcp  = ntohs(tw->tw_sport);
1739 
1740 	seq_printf(seq,
1741 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1742 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1743 		   i,
1744 		   src->s6_addr32[0], src->s6_addr32[1],
1745 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1746 		   dest->s6_addr32[0], dest->s6_addr32[1],
1747 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1748 		   tw->tw_substate, 0, 0,
1749 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1750 		   atomic_read(&tw->tw_refcnt), tw);
1751 }
1752 
1753 static int tcp6_seq_show(struct seq_file *seq, void *v)
1754 {
1755 	struct tcp_iter_state *st;
1756 	struct sock *sk = v;
1757 
1758 	if (v == SEQ_START_TOKEN) {
1759 		seq_puts(seq,
1760 			 "  sl  "
1761 			 "local_address                         "
1762 			 "remote_address                        "
1763 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1764 			 "   uid  timeout inode\n");
1765 		goto out;
1766 	}
1767 	st = seq->private;
1768 
1769 	switch (st->state) {
1770 	case TCP_SEQ_STATE_LISTENING:
1771 	case TCP_SEQ_STATE_ESTABLISHED:
1772 		if (sk->sk_state == TCP_TIME_WAIT)
1773 			get_timewait6_sock(seq, v, st->num);
1774 		else
1775 			get_tcp6_sock(seq, v, st->num);
1776 		break;
1777 	case TCP_SEQ_STATE_OPENREQ:
1778 		get_openreq6(seq, v, st->num, st->uid);
1779 		break;
1780 	}
1781 out:
1782 	return 0;
1783 }
1784 
1785 static const struct file_operations tcp6_afinfo_seq_fops = {
1786 	.owner   = THIS_MODULE,
1787 	.open    = tcp_seq_open,
1788 	.read    = seq_read,
1789 	.llseek  = seq_lseek,
1790 	.release = seq_release_net
1791 };
1792 
1793 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1794 	.name		= "tcp6",
1795 	.family		= AF_INET6,
1796 	.seq_fops	= &tcp6_afinfo_seq_fops,
1797 	.seq_ops	= {
1798 		.show		= tcp6_seq_show,
1799 	},
1800 };
1801 
1802 int __net_init tcp6_proc_init(struct net *net)
1803 {
1804 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1805 }
1806 
1807 void tcp6_proc_exit(struct net *net)
1808 {
1809 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1810 }
1811 #endif
1812 
1813 static void tcp_v6_clear_sk(struct sock *sk, int size)
1814 {
1815 	struct inet_sock *inet = inet_sk(sk);
1816 
1817 	/* we do not want to clear pinet6 field, because of RCU lookups */
1818 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1819 
1820 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1821 	memset(&inet->pinet6 + 1, 0, size);
1822 }
1823 
1824 struct proto tcpv6_prot = {
1825 	.name			= "TCPv6",
1826 	.owner			= THIS_MODULE,
1827 	.close			= tcp_close,
1828 	.connect		= tcp_v6_connect,
1829 	.disconnect		= tcp_disconnect,
1830 	.accept			= inet_csk_accept,
1831 	.ioctl			= tcp_ioctl,
1832 	.init			= tcp_v6_init_sock,
1833 	.destroy		= tcp_v6_destroy_sock,
1834 	.shutdown		= tcp_shutdown,
1835 	.setsockopt		= tcp_setsockopt,
1836 	.getsockopt		= tcp_getsockopt,
1837 	.recvmsg		= tcp_recvmsg,
1838 	.sendmsg		= tcp_sendmsg,
1839 	.sendpage		= tcp_sendpage,
1840 	.backlog_rcv		= tcp_v6_do_rcv,
1841 	.release_cb		= tcp_release_cb,
1842 	.hash			= inet_hash,
1843 	.unhash			= inet_unhash,
1844 	.get_port		= inet_csk_get_port,
1845 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1846 	.stream_memory_free	= tcp_stream_memory_free,
1847 	.sockets_allocated	= &tcp_sockets_allocated,
1848 	.memory_allocated	= &tcp_memory_allocated,
1849 	.memory_pressure	= &tcp_memory_pressure,
1850 	.orphan_count		= &tcp_orphan_count,
1851 	.sysctl_mem		= sysctl_tcp_mem,
1852 	.sysctl_wmem		= sysctl_tcp_wmem,
1853 	.sysctl_rmem		= sysctl_tcp_rmem,
1854 	.max_header		= MAX_TCP_HEADER,
1855 	.obj_size		= sizeof(struct tcp6_sock),
1856 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1857 	.twsk_prot		= &tcp6_timewait_sock_ops,
1858 	.rsk_prot		= &tcp6_request_sock_ops,
1859 	.h.hashinfo		= &tcp_hashinfo,
1860 	.no_autobind		= true,
1861 #ifdef CONFIG_COMPAT
1862 	.compat_setsockopt	= compat_tcp_setsockopt,
1863 	.compat_getsockopt	= compat_tcp_getsockopt,
1864 #endif
1865 #ifdef CONFIG_MEMCG_KMEM
1866 	.proto_cgroup		= tcp_proto_cgroup,
1867 #endif
1868 	.clear_sk		= tcp_v6_clear_sk,
1869 };
1870 
1871 static const struct inet6_protocol tcpv6_protocol = {
1872 	.early_demux	=	tcp_v6_early_demux,
1873 	.handler	=	tcp_v6_rcv,
1874 	.err_handler	=	tcp_v6_err,
1875 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1876 };
1877 
1878 static struct inet_protosw tcpv6_protosw = {
1879 	.type		=	SOCK_STREAM,
1880 	.protocol	=	IPPROTO_TCP,
1881 	.prot		=	&tcpv6_prot,
1882 	.ops		=	&inet6_stream_ops,
1883 	.flags		=	INET_PROTOSW_PERMANENT |
1884 				INET_PROTOSW_ICSK,
1885 };
1886 
1887 static int __net_init tcpv6_net_init(struct net *net)
1888 {
1889 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1890 				    SOCK_RAW, IPPROTO_TCP, net);
1891 }
1892 
1893 static void __net_exit tcpv6_net_exit(struct net *net)
1894 {
1895 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1896 }
1897 
1898 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1899 {
1900 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1901 }
1902 
1903 static struct pernet_operations tcpv6_net_ops = {
1904 	.init	    = tcpv6_net_init,
1905 	.exit	    = tcpv6_net_exit,
1906 	.exit_batch = tcpv6_net_exit_batch,
1907 };
1908 
1909 int __init tcpv6_init(void)
1910 {
1911 	int ret;
1912 
1913 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1914 	if (ret)
1915 		goto out;
1916 
1917 	/* register inet6 protocol */
1918 	ret = inet6_register_protosw(&tcpv6_protosw);
1919 	if (ret)
1920 		goto out_tcpv6_protocol;
1921 
1922 	ret = register_pernet_subsys(&tcpv6_net_ops);
1923 	if (ret)
1924 		goto out_tcpv6_protosw;
1925 out:
1926 	return ret;
1927 
1928 out_tcpv6_protosw:
1929 	inet6_unregister_protosw(&tcpv6_protosw);
1930 out_tcpv6_protocol:
1931 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1932 	goto out;
1933 }
1934 
1935 void tcpv6_exit(void)
1936 {
1937 	unregister_pernet_subsys(&tcpv6_net_ops);
1938 	inet6_unregister_protosw(&tcpv6_protosw);
1939 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1940 }
1941