xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision 372892ec1151c895c7dec362f3246f089690cfc7)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66 
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69 
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72 
73 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 				      struct request_sock *req);
76 
77 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 						   const struct in6_addr *addr)
87 {
88 	return NULL;
89 }
90 #endif
91 
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 	struct dst_entry *dst = skb_dst(skb);
95 
96 	if (dst) {
97 		const struct rt6_info *rt = (const struct rt6_info *)dst;
98 
99 		dst_hold(dst);
100 		sk->sk_rx_dst = dst;
101 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103 	}
104 }
105 
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
107 {
108 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
109 					    ipv6_hdr(skb)->saddr.s6_addr32,
110 					    tcp_hdr(skb)->dest,
111 					    tcp_hdr(skb)->source);
112 }
113 
114 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
115 			  int addr_len)
116 {
117 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
118 	struct inet_sock *inet = inet_sk(sk);
119 	struct inet_connection_sock *icsk = inet_csk(sk);
120 	struct ipv6_pinfo *np = inet6_sk(sk);
121 	struct tcp_sock *tp = tcp_sk(sk);
122 	struct in6_addr *saddr = NULL, *final_p, final;
123 	struct flowi6 fl6;
124 	struct dst_entry *dst;
125 	int addr_type;
126 	int err;
127 
128 	if (addr_len < SIN6_LEN_RFC2133)
129 		return -EINVAL;
130 
131 	if (usin->sin6_family != AF_INET6)
132 		return -EAFNOSUPPORT;
133 
134 	memset(&fl6, 0, sizeof(fl6));
135 
136 	if (np->sndflow) {
137 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 		IP6_ECN_flow_init(fl6.flowlabel);
139 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 			struct ip6_flowlabel *flowlabel;
141 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
142 			if (!flowlabel)
143 				return -EINVAL;
144 			fl6_sock_release(flowlabel);
145 		}
146 	}
147 
148 	/*
149 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
150 	 */
151 
152 	if (ipv6_addr_any(&usin->sin6_addr))
153 		usin->sin6_addr.s6_addr[15] = 0x1;
154 
155 	addr_type = ipv6_addr_type(&usin->sin6_addr);
156 
157 	if (addr_type & IPV6_ADDR_MULTICAST)
158 		return -ENETUNREACH;
159 
160 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 		if (addr_len >= sizeof(struct sockaddr_in6) &&
162 		    usin->sin6_scope_id) {
163 			/* If interface is set while binding, indices
164 			 * must coincide.
165 			 */
166 			if (sk->sk_bound_dev_if &&
167 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
168 				return -EINVAL;
169 
170 			sk->sk_bound_dev_if = usin->sin6_scope_id;
171 		}
172 
173 		/* Connect to link-local address requires an interface */
174 		if (!sk->sk_bound_dev_if)
175 			return -EINVAL;
176 	}
177 
178 	if (tp->rx_opt.ts_recent_stamp &&
179 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 		tp->rx_opt.ts_recent = 0;
181 		tp->rx_opt.ts_recent_stamp = 0;
182 		tp->write_seq = 0;
183 	}
184 
185 	sk->sk_v6_daddr = usin->sin6_addr;
186 	np->flow_label = fl6.flowlabel;
187 
188 	/*
189 	 *	TCP over IPv4
190 	 */
191 
192 	if (addr_type == IPV6_ADDR_MAPPED) {
193 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 		struct sockaddr_in sin;
195 
196 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197 
198 		if (__ipv6_only_sock(sk))
199 			return -ENETUNREACH;
200 
201 		sin.sin_family = AF_INET;
202 		sin.sin_port = usin->sin6_port;
203 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204 
205 		icsk->icsk_af_ops = &ipv6_mapped;
206 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
209 #endif
210 
211 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
212 
213 		if (err) {
214 			icsk->icsk_ext_hdr_len = exthdrlen;
215 			icsk->icsk_af_ops = &ipv6_specific;
216 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 			tp->af_specific = &tcp_sock_ipv6_specific;
219 #endif
220 			goto failure;
221 		}
222 		np->saddr = sk->sk_v6_rcv_saddr;
223 
224 		return err;
225 	}
226 
227 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 		saddr = &sk->sk_v6_rcv_saddr;
229 
230 	fl6.flowi6_proto = IPPROTO_TCP;
231 	fl6.daddr = sk->sk_v6_daddr;
232 	fl6.saddr = saddr ? *saddr : np->saddr;
233 	fl6.flowi6_oif = sk->sk_bound_dev_if;
234 	fl6.flowi6_mark = sk->sk_mark;
235 	fl6.fl6_dport = usin->sin6_port;
236 	fl6.fl6_sport = inet->inet_sport;
237 
238 	final_p = fl6_update_dst(&fl6, np->opt, &final);
239 
240 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241 
242 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 	if (IS_ERR(dst)) {
244 		err = PTR_ERR(dst);
245 		goto failure;
246 	}
247 
248 	if (!saddr) {
249 		saddr = &fl6.saddr;
250 		sk->sk_v6_rcv_saddr = *saddr;
251 	}
252 
253 	/* set the source address */
254 	np->saddr = *saddr;
255 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256 
257 	sk->sk_gso_type = SKB_GSO_TCPV6;
258 	__ip6_dst_store(sk, dst, NULL, NULL);
259 
260 	if (tcp_death_row.sysctl_tw_recycle &&
261 	    !tp->rx_opt.ts_recent_stamp &&
262 	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 		tcp_fetch_timewait_stamp(sk, dst);
264 
265 	icsk->icsk_ext_hdr_len = 0;
266 	if (np->opt)
267 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
268 					  np->opt->opt_nflen);
269 
270 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271 
272 	inet->inet_dport = usin->sin6_port;
273 
274 	tcp_set_state(sk, TCP_SYN_SENT);
275 	err = inet6_hash_connect(&tcp_death_row, sk);
276 	if (err)
277 		goto late_failure;
278 
279 	sk_set_txhash(sk);
280 
281 	if (!tp->write_seq && likely(!tp->repair))
282 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 							     sk->sk_v6_daddr.s6_addr32,
284 							     inet->inet_sport,
285 							     inet->inet_dport);
286 
287 	err = tcp_connect(sk);
288 	if (err)
289 		goto late_failure;
290 
291 	return 0;
292 
293 late_failure:
294 	tcp_set_state(sk, TCP_CLOSE);
295 	__sk_dst_reset(sk);
296 failure:
297 	inet->inet_dport = 0;
298 	sk->sk_route_caps = 0;
299 	return err;
300 }
301 
302 static void tcp_v6_mtu_reduced(struct sock *sk)
303 {
304 	struct dst_entry *dst;
305 
306 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 		return;
308 
309 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 	if (!dst)
311 		return;
312 
313 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 		tcp_sync_mss(sk, dst_mtu(dst));
315 		tcp_simple_retransmit(sk);
316 	}
317 }
318 
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 		u8 type, u8 code, int offset, __be32 info)
321 {
322 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 	struct net *net = dev_net(skb->dev);
325 	struct request_sock *fastopen;
326 	struct ipv6_pinfo *np;
327 	struct tcp_sock *tp;
328 	__u32 seq, snd_una;
329 	struct sock *sk;
330 	int err;
331 
332 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
333 					&hdr->daddr, th->dest,
334 					&hdr->saddr, ntohs(th->source),
335 					skb->dev->ifindex);
336 
337 	if (!sk) {
338 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
339 				   ICMP6_MIB_INERRORS);
340 		return;
341 	}
342 
343 	if (sk->sk_state == TCP_TIME_WAIT) {
344 		inet_twsk_put(inet_twsk(sk));
345 		return;
346 	}
347 	seq = ntohl(th->seq);
348 	if (sk->sk_state == TCP_NEW_SYN_RECV)
349 		return tcp_req_err(sk, seq);
350 
351 	bh_lock_sock(sk);
352 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
353 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
354 
355 	if (sk->sk_state == TCP_CLOSE)
356 		goto out;
357 
358 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
359 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
360 		goto out;
361 	}
362 
363 	tp = tcp_sk(sk);
364 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
365 	fastopen = tp->fastopen_rsk;
366 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
367 	if (sk->sk_state != TCP_LISTEN &&
368 	    !between(seq, snd_una, tp->snd_nxt)) {
369 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
370 		goto out;
371 	}
372 
373 	np = inet6_sk(sk);
374 
375 	if (type == NDISC_REDIRECT) {
376 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
377 
378 		if (dst)
379 			dst->ops->redirect(dst, sk, skb);
380 		goto out;
381 	}
382 
383 	if (type == ICMPV6_PKT_TOOBIG) {
384 		/* We are not interested in TCP_LISTEN and open_requests
385 		 * (SYN-ACKs send out by Linux are always <576bytes so
386 		 * they should go through unfragmented).
387 		 */
388 		if (sk->sk_state == TCP_LISTEN)
389 			goto out;
390 
391 		if (!ip6_sk_accept_pmtu(sk))
392 			goto out;
393 
394 		tp->mtu_info = ntohl(info);
395 		if (!sock_owned_by_user(sk))
396 			tcp_v6_mtu_reduced(sk);
397 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
398 					   &tp->tsq_flags))
399 			sock_hold(sk);
400 		goto out;
401 	}
402 
403 	icmpv6_err_convert(type, code, &err);
404 
405 	/* Might be for an request_sock */
406 	switch (sk->sk_state) {
407 	case TCP_SYN_SENT:
408 	case TCP_SYN_RECV:
409 		/* Only in fast or simultaneous open. If a fast open socket is
410 		 * is already accepted it is treated as a connected one below.
411 		 */
412 		if (fastopen && !fastopen->sk)
413 			break;
414 
415 		if (!sock_owned_by_user(sk)) {
416 			sk->sk_err = err;
417 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
418 
419 			tcp_done(sk);
420 		} else
421 			sk->sk_err_soft = err;
422 		goto out;
423 	}
424 
425 	if (!sock_owned_by_user(sk) && np->recverr) {
426 		sk->sk_err = err;
427 		sk->sk_error_report(sk);
428 	} else
429 		sk->sk_err_soft = err;
430 
431 out:
432 	bh_unlock_sock(sk);
433 	sock_put(sk);
434 }
435 
436 
437 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
438 			      struct flowi *fl,
439 			      struct request_sock *req,
440 			      u16 queue_mapping,
441 			      struct tcp_fastopen_cookie *foc)
442 {
443 	struct inet_request_sock *ireq = inet_rsk(req);
444 	struct ipv6_pinfo *np = inet6_sk(sk);
445 	struct flowi6 *fl6 = &fl->u.ip6;
446 	struct sk_buff *skb;
447 	int err = -ENOMEM;
448 
449 	/* First, grab a route. */
450 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
451 		goto done;
452 
453 	skb = tcp_make_synack(sk, dst, req, foc);
454 
455 	if (skb) {
456 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
457 				    &ireq->ir_v6_rmt_addr);
458 
459 		fl6->daddr = ireq->ir_v6_rmt_addr;
460 		if (np->repflow && ireq->pktopts)
461 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
462 
463 		skb_set_queue_mapping(skb, queue_mapping);
464 		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
465 		err = net_xmit_eval(err);
466 	}
467 
468 done:
469 	return err;
470 }
471 
472 
473 static void tcp_v6_reqsk_destructor(struct request_sock *req)
474 {
475 	kfree_skb(inet_rsk(req)->pktopts);
476 }
477 
478 #ifdef CONFIG_TCP_MD5SIG
479 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
480 						   const struct in6_addr *addr)
481 {
482 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
483 }
484 
485 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
486 						const struct sock *addr_sk)
487 {
488 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
489 }
490 
491 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
492 				 int optlen)
493 {
494 	struct tcp_md5sig cmd;
495 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
496 
497 	if (optlen < sizeof(cmd))
498 		return -EINVAL;
499 
500 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
501 		return -EFAULT;
502 
503 	if (sin6->sin6_family != AF_INET6)
504 		return -EINVAL;
505 
506 	if (!cmd.tcpm_keylen) {
507 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
508 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
509 					      AF_INET);
510 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
511 				      AF_INET6);
512 	}
513 
514 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
515 		return -EINVAL;
516 
517 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
518 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
519 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
520 
521 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
522 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
523 }
524 
525 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
526 					const struct in6_addr *daddr,
527 					const struct in6_addr *saddr, int nbytes)
528 {
529 	struct tcp6_pseudohdr *bp;
530 	struct scatterlist sg;
531 
532 	bp = &hp->md5_blk.ip6;
533 	/* 1. TCP pseudo-header (RFC2460) */
534 	bp->saddr = *saddr;
535 	bp->daddr = *daddr;
536 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
537 	bp->len = cpu_to_be32(nbytes);
538 
539 	sg_init_one(&sg, bp, sizeof(*bp));
540 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
541 }
542 
543 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
544 			       const struct in6_addr *daddr, struct in6_addr *saddr,
545 			       const struct tcphdr *th)
546 {
547 	struct tcp_md5sig_pool *hp;
548 	struct hash_desc *desc;
549 
550 	hp = tcp_get_md5sig_pool();
551 	if (!hp)
552 		goto clear_hash_noput;
553 	desc = &hp->md5_desc;
554 
555 	if (crypto_hash_init(desc))
556 		goto clear_hash;
557 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
558 		goto clear_hash;
559 	if (tcp_md5_hash_header(hp, th))
560 		goto clear_hash;
561 	if (tcp_md5_hash_key(hp, key))
562 		goto clear_hash;
563 	if (crypto_hash_final(desc, md5_hash))
564 		goto clear_hash;
565 
566 	tcp_put_md5sig_pool();
567 	return 0;
568 
569 clear_hash:
570 	tcp_put_md5sig_pool();
571 clear_hash_noput:
572 	memset(md5_hash, 0, 16);
573 	return 1;
574 }
575 
576 static int tcp_v6_md5_hash_skb(char *md5_hash,
577 			       const struct tcp_md5sig_key *key,
578 			       const struct sock *sk,
579 			       const struct sk_buff *skb)
580 {
581 	const struct in6_addr *saddr, *daddr;
582 	struct tcp_md5sig_pool *hp;
583 	struct hash_desc *desc;
584 	const struct tcphdr *th = tcp_hdr(skb);
585 
586 	if (sk) { /* valid for establish/request sockets */
587 		saddr = &sk->sk_v6_rcv_saddr;
588 		daddr = &sk->sk_v6_daddr;
589 	} else {
590 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
591 		saddr = &ip6h->saddr;
592 		daddr = &ip6h->daddr;
593 	}
594 
595 	hp = tcp_get_md5sig_pool();
596 	if (!hp)
597 		goto clear_hash_noput;
598 	desc = &hp->md5_desc;
599 
600 	if (crypto_hash_init(desc))
601 		goto clear_hash;
602 
603 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
604 		goto clear_hash;
605 	if (tcp_md5_hash_header(hp, th))
606 		goto clear_hash;
607 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
608 		goto clear_hash;
609 	if (tcp_md5_hash_key(hp, key))
610 		goto clear_hash;
611 	if (crypto_hash_final(desc, md5_hash))
612 		goto clear_hash;
613 
614 	tcp_put_md5sig_pool();
615 	return 0;
616 
617 clear_hash:
618 	tcp_put_md5sig_pool();
619 clear_hash_noput:
620 	memset(md5_hash, 0, 16);
621 	return 1;
622 }
623 
624 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
625 {
626 	const __u8 *hash_location = NULL;
627 	struct tcp_md5sig_key *hash_expected;
628 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
629 	const struct tcphdr *th = tcp_hdr(skb);
630 	int genhash;
631 	u8 newhash[16];
632 
633 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
634 	hash_location = tcp_parse_md5sig_option(th);
635 
636 	/* We've parsed the options - do we have a hash? */
637 	if (!hash_expected && !hash_location)
638 		return false;
639 
640 	if (hash_expected && !hash_location) {
641 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
642 		return true;
643 	}
644 
645 	if (!hash_expected && hash_location) {
646 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
647 		return true;
648 	}
649 
650 	/* check the signature */
651 	genhash = tcp_v6_md5_hash_skb(newhash,
652 				      hash_expected,
653 				      NULL, skb);
654 
655 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
656 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
657 				     genhash ? "failed" : "mismatch",
658 				     &ip6h->saddr, ntohs(th->source),
659 				     &ip6h->daddr, ntohs(th->dest));
660 		return true;
661 	}
662 	return false;
663 }
664 #endif
665 
666 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
667 			    struct sk_buff *skb)
668 {
669 	struct inet_request_sock *ireq = inet_rsk(req);
670 	struct ipv6_pinfo *np = inet6_sk(sk);
671 
672 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
673 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
674 
675 	/* So that link locals have meaning */
676 	if (!sk->sk_bound_dev_if &&
677 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
678 		ireq->ir_iif = tcp_v6_iif(skb);
679 
680 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
681 	    (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
682 	     np->rxopt.bits.rxinfo ||
683 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
684 	     np->rxopt.bits.rxohlim || np->repflow)) {
685 		atomic_inc(&skb->users);
686 		ireq->pktopts = skb;
687 	}
688 }
689 
690 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
691 					  const struct request_sock *req,
692 					  bool *strict)
693 {
694 	if (strict)
695 		*strict = true;
696 	return inet6_csk_route_req(sk, &fl->u.ip6, req);
697 }
698 
699 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
700 	.family		=	AF_INET6,
701 	.obj_size	=	sizeof(struct tcp6_request_sock),
702 	.rtx_syn_ack	=	tcp_rtx_synack,
703 	.send_ack	=	tcp_v6_reqsk_send_ack,
704 	.destructor	=	tcp_v6_reqsk_destructor,
705 	.send_reset	=	tcp_v6_send_reset,
706 	.syn_ack_timeout =	tcp_syn_ack_timeout,
707 };
708 
709 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
710 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
711 				sizeof(struct ipv6hdr),
712 #ifdef CONFIG_TCP_MD5SIG
713 	.req_md5_lookup	=	tcp_v6_md5_lookup,
714 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
715 #endif
716 	.init_req	=	tcp_v6_init_req,
717 #ifdef CONFIG_SYN_COOKIES
718 	.cookie_init_seq =	cookie_v6_init_sequence,
719 #endif
720 	.route_req	=	tcp_v6_route_req,
721 	.init_seq	=	tcp_v6_init_sequence,
722 	.send_synack	=	tcp_v6_send_synack,
723 	.queue_hash_add =	inet6_csk_reqsk_queue_hash_add,
724 };
725 
726 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
727 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
728 				 int oif, struct tcp_md5sig_key *key, int rst,
729 				 u8 tclass, u32 label)
730 {
731 	const struct tcphdr *th = tcp_hdr(skb);
732 	struct tcphdr *t1;
733 	struct sk_buff *buff;
734 	struct flowi6 fl6;
735 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
736 	struct sock *ctl_sk = net->ipv6.tcp_sk;
737 	unsigned int tot_len = sizeof(struct tcphdr);
738 	struct dst_entry *dst;
739 	__be32 *topt;
740 
741 	if (tsecr)
742 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
743 #ifdef CONFIG_TCP_MD5SIG
744 	if (key)
745 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
746 #endif
747 
748 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
749 			 GFP_ATOMIC);
750 	if (!buff)
751 		return;
752 
753 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
754 
755 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
756 	skb_reset_transport_header(buff);
757 
758 	/* Swap the send and the receive. */
759 	memset(t1, 0, sizeof(*t1));
760 	t1->dest = th->source;
761 	t1->source = th->dest;
762 	t1->doff = tot_len / 4;
763 	t1->seq = htonl(seq);
764 	t1->ack_seq = htonl(ack);
765 	t1->ack = !rst || !th->ack;
766 	t1->rst = rst;
767 	t1->window = htons(win);
768 
769 	topt = (__be32 *)(t1 + 1);
770 
771 	if (tsecr) {
772 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
773 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
774 		*topt++ = htonl(tsval);
775 		*topt++ = htonl(tsecr);
776 	}
777 
778 #ifdef CONFIG_TCP_MD5SIG
779 	if (key) {
780 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
781 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
782 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
783 				    &ipv6_hdr(skb)->saddr,
784 				    &ipv6_hdr(skb)->daddr, t1);
785 	}
786 #endif
787 
788 	memset(&fl6, 0, sizeof(fl6));
789 	fl6.daddr = ipv6_hdr(skb)->saddr;
790 	fl6.saddr = ipv6_hdr(skb)->daddr;
791 	fl6.flowlabel = label;
792 
793 	buff->ip_summed = CHECKSUM_PARTIAL;
794 	buff->csum = 0;
795 
796 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
797 
798 	fl6.flowi6_proto = IPPROTO_TCP;
799 	if (rt6_need_strict(&fl6.daddr) && !oif)
800 		fl6.flowi6_oif = tcp_v6_iif(skb);
801 	else
802 		fl6.flowi6_oif = oif;
803 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
804 	fl6.fl6_dport = t1->dest;
805 	fl6.fl6_sport = t1->source;
806 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
807 
808 	/* Pass a socket to ip6_dst_lookup either it is for RST
809 	 * Underlying function will use this to retrieve the network
810 	 * namespace
811 	 */
812 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
813 	if (!IS_ERR(dst)) {
814 		skb_dst_set(buff, dst);
815 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
816 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
817 		if (rst)
818 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
819 		return;
820 	}
821 
822 	kfree_skb(buff);
823 }
824 
825 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
826 {
827 	const struct tcphdr *th = tcp_hdr(skb);
828 	u32 seq = 0, ack_seq = 0;
829 	struct tcp_md5sig_key *key = NULL;
830 #ifdef CONFIG_TCP_MD5SIG
831 	const __u8 *hash_location = NULL;
832 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
833 	unsigned char newhash[16];
834 	int genhash;
835 	struct sock *sk1 = NULL;
836 #endif
837 	int oif;
838 
839 	if (th->rst)
840 		return;
841 
842 	/* If sk not NULL, it means we did a successful lookup and incoming
843 	 * route had to be correct. prequeue might have dropped our dst.
844 	 */
845 	if (!sk && !ipv6_unicast_destination(skb))
846 		return;
847 
848 #ifdef CONFIG_TCP_MD5SIG
849 	hash_location = tcp_parse_md5sig_option(th);
850 	if (!sk && hash_location) {
851 		/*
852 		 * active side is lost. Try to find listening socket through
853 		 * source port, and then find md5 key through listening socket.
854 		 * we are not loose security here:
855 		 * Incoming packet is checked with md5 hash with finding key,
856 		 * no RST generated if md5 hash doesn't match.
857 		 */
858 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
859 					   &tcp_hashinfo, &ipv6h->saddr,
860 					   th->source, &ipv6h->daddr,
861 					   ntohs(th->source), tcp_v6_iif(skb));
862 		if (!sk1)
863 			return;
864 
865 		rcu_read_lock();
866 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
867 		if (!key)
868 			goto release_sk1;
869 
870 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
871 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
872 			goto release_sk1;
873 	} else {
874 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
875 	}
876 #endif
877 
878 	if (th->ack)
879 		seq = ntohl(th->ack_seq);
880 	else
881 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
882 			  (th->doff << 2);
883 
884 	oif = sk ? sk->sk_bound_dev_if : 0;
885 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
886 
887 #ifdef CONFIG_TCP_MD5SIG
888 release_sk1:
889 	if (sk1) {
890 		rcu_read_unlock();
891 		sock_put(sk1);
892 	}
893 #endif
894 }
895 
896 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
897 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
898 			    struct tcp_md5sig_key *key, u8 tclass,
899 			    u32 label)
900 {
901 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
902 			     tclass, label);
903 }
904 
905 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
906 {
907 	struct inet_timewait_sock *tw = inet_twsk(sk);
908 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
909 
910 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
911 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
912 			tcp_time_stamp + tcptw->tw_ts_offset,
913 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
914 			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
915 
916 	inet_twsk_put(tw);
917 }
918 
919 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
920 				  struct request_sock *req)
921 {
922 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
923 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
924 	 */
925 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
926 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
927 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
928 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
929 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
930 			0, 0);
931 }
932 
933 
934 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
935 {
936 	const struct tcphdr *th = tcp_hdr(skb);
937 	struct request_sock *req;
938 	struct sock *nsk;
939 
940 	/* Find possible connection requests. */
941 	req = inet6_csk_search_req(sk, th->source,
942 				   &ipv6_hdr(skb)->saddr,
943 				   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
944 	if (req) {
945 		nsk = tcp_check_req(sk, skb, req, false);
946 		if (!nsk || nsk == sk)
947 			reqsk_put(req);
948 		return nsk;
949 	}
950 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
951 					 &ipv6_hdr(skb)->saddr, th->source,
952 					 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
953 					 tcp_v6_iif(skb));
954 
955 	if (nsk) {
956 		if (nsk->sk_state != TCP_TIME_WAIT) {
957 			bh_lock_sock(nsk);
958 			return nsk;
959 		}
960 		inet_twsk_put(inet_twsk(nsk));
961 		return NULL;
962 	}
963 
964 #ifdef CONFIG_SYN_COOKIES
965 	if (!th->syn)
966 		sk = cookie_v6_check(sk, skb);
967 #endif
968 	return sk;
969 }
970 
971 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
972 {
973 	if (skb->protocol == htons(ETH_P_IP))
974 		return tcp_v4_conn_request(sk, skb);
975 
976 	if (!ipv6_unicast_destination(skb))
977 		goto drop;
978 
979 	return tcp_conn_request(&tcp6_request_sock_ops,
980 				&tcp_request_sock_ipv6_ops, sk, skb);
981 
982 drop:
983 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
984 	return 0; /* don't send reset */
985 }
986 
987 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
988 					 struct request_sock *req,
989 					 struct dst_entry *dst)
990 {
991 	struct inet_request_sock *ireq;
992 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
993 	struct tcp6_sock *newtcp6sk;
994 	struct inet_sock *newinet;
995 	struct tcp_sock *newtp;
996 	struct sock *newsk;
997 #ifdef CONFIG_TCP_MD5SIG
998 	struct tcp_md5sig_key *key;
999 #endif
1000 	struct flowi6 fl6;
1001 
1002 	if (skb->protocol == htons(ETH_P_IP)) {
1003 		/*
1004 		 *	v6 mapped
1005 		 */
1006 
1007 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1008 
1009 		if (!newsk)
1010 			return NULL;
1011 
1012 		newtcp6sk = (struct tcp6_sock *)newsk;
1013 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1014 
1015 		newinet = inet_sk(newsk);
1016 		newnp = inet6_sk(newsk);
1017 		newtp = tcp_sk(newsk);
1018 
1019 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1020 
1021 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1022 
1023 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1024 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1025 #ifdef CONFIG_TCP_MD5SIG
1026 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1027 #endif
1028 
1029 		newnp->ipv6_ac_list = NULL;
1030 		newnp->ipv6_fl_list = NULL;
1031 		newnp->pktoptions  = NULL;
1032 		newnp->opt	   = NULL;
1033 		newnp->mcast_oif   = tcp_v6_iif(skb);
1034 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1035 		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1036 		if (np->repflow)
1037 			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1038 
1039 		/*
1040 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1041 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1042 		 * that function for the gory details. -acme
1043 		 */
1044 
1045 		/* It is tricky place. Until this moment IPv4 tcp
1046 		   worked with IPv6 icsk.icsk_af_ops.
1047 		   Sync it now.
1048 		 */
1049 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1050 
1051 		return newsk;
1052 	}
1053 
1054 	ireq = inet_rsk(req);
1055 
1056 	if (sk_acceptq_is_full(sk))
1057 		goto out_overflow;
1058 
1059 	if (!dst) {
1060 		dst = inet6_csk_route_req(sk, &fl6, req);
1061 		if (!dst)
1062 			goto out;
1063 	}
1064 
1065 	newsk = tcp_create_openreq_child(sk, req, skb);
1066 	if (!newsk)
1067 		goto out_nonewsk;
1068 
1069 	/*
1070 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1071 	 * count here, tcp_create_openreq_child now does this for us, see the
1072 	 * comment in that function for the gory details. -acme
1073 	 */
1074 
1075 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1076 	__ip6_dst_store(newsk, dst, NULL, NULL);
1077 	inet6_sk_rx_dst_set(newsk, skb);
1078 
1079 	newtcp6sk = (struct tcp6_sock *)newsk;
1080 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1081 
1082 	newtp = tcp_sk(newsk);
1083 	newinet = inet_sk(newsk);
1084 	newnp = inet6_sk(newsk);
1085 
1086 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1087 
1088 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1089 	newnp->saddr = ireq->ir_v6_loc_addr;
1090 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1091 	newsk->sk_bound_dev_if = ireq->ir_iif;
1092 
1093 	/* Now IPv6 options...
1094 
1095 	   First: no IPv4 options.
1096 	 */
1097 	newinet->inet_opt = NULL;
1098 	newnp->ipv6_ac_list = NULL;
1099 	newnp->ipv6_fl_list = NULL;
1100 
1101 	/* Clone RX bits */
1102 	newnp->rxopt.all = np->rxopt.all;
1103 
1104 	/* Clone pktoptions received with SYN */
1105 	newnp->pktoptions = NULL;
1106 	if (ireq->pktopts) {
1107 		newnp->pktoptions = skb_clone(ireq->pktopts,
1108 					      sk_gfp_atomic(sk, GFP_ATOMIC));
1109 		consume_skb(ireq->pktopts);
1110 		ireq->pktopts = NULL;
1111 		if (newnp->pktoptions)
1112 			skb_set_owner_r(newnp->pktoptions, newsk);
1113 	}
1114 	newnp->opt	  = NULL;
1115 	newnp->mcast_oif  = tcp_v6_iif(skb);
1116 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1117 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1118 	if (np->repflow)
1119 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1120 
1121 	/* Clone native IPv6 options from listening socket (if any)
1122 
1123 	   Yes, keeping reference count would be much more clever,
1124 	   but we make one more one thing there: reattach optmem
1125 	   to newsk.
1126 	 */
1127 	if (np->opt)
1128 		newnp->opt = ipv6_dup_options(newsk, np->opt);
1129 
1130 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1131 	if (newnp->opt)
1132 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1133 						     newnp->opt->opt_flen);
1134 
1135 	tcp_ca_openreq_child(newsk, dst);
1136 
1137 	tcp_sync_mss(newsk, dst_mtu(dst));
1138 	newtp->advmss = dst_metric_advmss(dst);
1139 	if (tcp_sk(sk)->rx_opt.user_mss &&
1140 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1141 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1142 
1143 	tcp_initialize_rcv_mss(newsk);
1144 
1145 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1146 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1147 
1148 #ifdef CONFIG_TCP_MD5SIG
1149 	/* Copy over the MD5 key from the original socket */
1150 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1151 	if (key) {
1152 		/* We're using one, so create a matching key
1153 		 * on the newsk structure. If we fail to get
1154 		 * memory, then we end up not copying the key
1155 		 * across. Shucks.
1156 		 */
1157 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1158 			       AF_INET6, key->key, key->keylen,
1159 			       sk_gfp_atomic(sk, GFP_ATOMIC));
1160 	}
1161 #endif
1162 
1163 	if (__inet_inherit_port(sk, newsk) < 0) {
1164 		inet_csk_prepare_forced_close(newsk);
1165 		tcp_done(newsk);
1166 		goto out;
1167 	}
1168 	__inet_hash(newsk, NULL);
1169 
1170 	return newsk;
1171 
1172 out_overflow:
1173 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1174 out_nonewsk:
1175 	dst_release(dst);
1176 out:
1177 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1178 	return NULL;
1179 }
1180 
1181 /* The socket must have it's spinlock held when we get
1182  * here.
1183  *
1184  * We have a potential double-lock case here, so even when
1185  * doing backlog processing we use the BH locking scheme.
1186  * This is because we cannot sleep with the original spinlock
1187  * held.
1188  */
1189 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1190 {
1191 	struct ipv6_pinfo *np = inet6_sk(sk);
1192 	struct tcp_sock *tp;
1193 	struct sk_buff *opt_skb = NULL;
1194 
1195 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1196 	   goes to IPv4 receive handler and backlogged.
1197 	   From backlog it always goes here. Kerboom...
1198 	   Fortunately, tcp_rcv_established and rcv_established
1199 	   handle them correctly, but it is not case with
1200 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1201 	 */
1202 
1203 	if (skb->protocol == htons(ETH_P_IP))
1204 		return tcp_v4_do_rcv(sk, skb);
1205 
1206 	if (sk_filter(sk, skb))
1207 		goto discard;
1208 
1209 	/*
1210 	 *	socket locking is here for SMP purposes as backlog rcv
1211 	 *	is currently called with bh processing disabled.
1212 	 */
1213 
1214 	/* Do Stevens' IPV6_PKTOPTIONS.
1215 
1216 	   Yes, guys, it is the only place in our code, where we
1217 	   may make it not affecting IPv4.
1218 	   The rest of code is protocol independent,
1219 	   and I do not like idea to uglify IPv4.
1220 
1221 	   Actually, all the idea behind IPV6_PKTOPTIONS
1222 	   looks not very well thought. For now we latch
1223 	   options, received in the last packet, enqueued
1224 	   by tcp. Feel free to propose better solution.
1225 					       --ANK (980728)
1226 	 */
1227 	if (np->rxopt.all)
1228 		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1229 
1230 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1231 		struct dst_entry *dst = sk->sk_rx_dst;
1232 
1233 		sock_rps_save_rxhash(sk, skb);
1234 		sk_mark_napi_id(sk, skb);
1235 		if (dst) {
1236 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1237 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1238 				dst_release(dst);
1239 				sk->sk_rx_dst = NULL;
1240 			}
1241 		}
1242 
1243 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1244 		if (opt_skb)
1245 			goto ipv6_pktoptions;
1246 		return 0;
1247 	}
1248 
1249 	if (tcp_checksum_complete(skb))
1250 		goto csum_err;
1251 
1252 	if (sk->sk_state == TCP_LISTEN) {
1253 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1254 		if (!nsk)
1255 			goto discard;
1256 
1257 		/*
1258 		 * Queue it on the new socket if the new socket is active,
1259 		 * otherwise we just shortcircuit this and continue with
1260 		 * the new socket..
1261 		 */
1262 		if (nsk != sk) {
1263 			sock_rps_save_rxhash(nsk, skb);
1264 			sk_mark_napi_id(sk, skb);
1265 			if (tcp_child_process(sk, nsk, skb))
1266 				goto reset;
1267 			if (opt_skb)
1268 				__kfree_skb(opt_skb);
1269 			return 0;
1270 		}
1271 	} else
1272 		sock_rps_save_rxhash(sk, skb);
1273 
1274 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1275 		goto reset;
1276 	if (opt_skb)
1277 		goto ipv6_pktoptions;
1278 	return 0;
1279 
1280 reset:
1281 	tcp_v6_send_reset(sk, skb);
1282 discard:
1283 	if (opt_skb)
1284 		__kfree_skb(opt_skb);
1285 	kfree_skb(skb);
1286 	return 0;
1287 csum_err:
1288 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1289 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1290 	goto discard;
1291 
1292 
1293 ipv6_pktoptions:
1294 	/* Do you ask, what is it?
1295 
1296 	   1. skb was enqueued by tcp.
1297 	   2. skb is added to tail of read queue, rather than out of order.
1298 	   3. socket is not in passive state.
1299 	   4. Finally, it really contains options, which user wants to receive.
1300 	 */
1301 	tp = tcp_sk(sk);
1302 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1303 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1304 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1305 			np->mcast_oif = tcp_v6_iif(opt_skb);
1306 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1307 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1308 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1309 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1310 		if (np->repflow)
1311 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1312 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1313 			skb_set_owner_r(opt_skb, sk);
1314 			opt_skb = xchg(&np->pktoptions, opt_skb);
1315 		} else {
1316 			__kfree_skb(opt_skb);
1317 			opt_skb = xchg(&np->pktoptions, NULL);
1318 		}
1319 	}
1320 
1321 	kfree_skb(opt_skb);
1322 	return 0;
1323 }
1324 
1325 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1326 			   const struct tcphdr *th)
1327 {
1328 	/* This is tricky: we move IP6CB at its correct location into
1329 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1330 	 * _decode_session6() uses IP6CB().
1331 	 * barrier() makes sure compiler won't play aliasing games.
1332 	 */
1333 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1334 		sizeof(struct inet6_skb_parm));
1335 	barrier();
1336 
1337 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1338 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1339 				    skb->len - th->doff*4);
1340 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1341 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1342 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1343 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1344 	TCP_SKB_CB(skb)->sacked = 0;
1345 }
1346 
1347 static void tcp_v6_restore_cb(struct sk_buff *skb)
1348 {
1349 	/* We need to move header back to the beginning if xfrm6_policy_check()
1350 	 * and tcp_v6_fill_cb() are going to be called again.
1351 	 */
1352 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1353 		sizeof(struct inet6_skb_parm));
1354 }
1355 
1356 static int tcp_v6_rcv(struct sk_buff *skb)
1357 {
1358 	const struct tcphdr *th;
1359 	const struct ipv6hdr *hdr;
1360 	struct sock *sk;
1361 	int ret;
1362 	struct net *net = dev_net(skb->dev);
1363 
1364 	if (skb->pkt_type != PACKET_HOST)
1365 		goto discard_it;
1366 
1367 	/*
1368 	 *	Count it even if it's bad.
1369 	 */
1370 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1371 
1372 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1373 		goto discard_it;
1374 
1375 	th = tcp_hdr(skb);
1376 
1377 	if (th->doff < sizeof(struct tcphdr)/4)
1378 		goto bad_packet;
1379 	if (!pskb_may_pull(skb, th->doff*4))
1380 		goto discard_it;
1381 
1382 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1383 		goto csum_error;
1384 
1385 	th = tcp_hdr(skb);
1386 	hdr = ipv6_hdr(skb);
1387 
1388 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1389 				inet6_iif(skb));
1390 	if (!sk)
1391 		goto no_tcp_socket;
1392 
1393 process:
1394 	if (sk->sk_state == TCP_TIME_WAIT)
1395 		goto do_time_wait;
1396 
1397 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1398 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1399 		goto discard_and_relse;
1400 	}
1401 
1402 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1403 		goto discard_and_relse;
1404 
1405 	tcp_v6_fill_cb(skb, hdr, th);
1406 
1407 #ifdef CONFIG_TCP_MD5SIG
1408 	if (tcp_v6_inbound_md5_hash(sk, skb))
1409 		goto discard_and_relse;
1410 #endif
1411 
1412 	if (sk_filter(sk, skb))
1413 		goto discard_and_relse;
1414 
1415 	sk_incoming_cpu_update(sk);
1416 	skb->dev = NULL;
1417 
1418 	bh_lock_sock_nested(sk);
1419 	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1420 	ret = 0;
1421 	if (!sock_owned_by_user(sk)) {
1422 		if (!tcp_prequeue(sk, skb))
1423 			ret = tcp_v6_do_rcv(sk, skb);
1424 	} else if (unlikely(sk_add_backlog(sk, skb,
1425 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1426 		bh_unlock_sock(sk);
1427 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1428 		goto discard_and_relse;
1429 	}
1430 	bh_unlock_sock(sk);
1431 
1432 	sock_put(sk);
1433 	return ret ? -1 : 0;
1434 
1435 no_tcp_socket:
1436 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1437 		goto discard_it;
1438 
1439 	tcp_v6_fill_cb(skb, hdr, th);
1440 
1441 	if (tcp_checksum_complete(skb)) {
1442 csum_error:
1443 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1444 bad_packet:
1445 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1446 	} else {
1447 		tcp_v6_send_reset(NULL, skb);
1448 	}
1449 
1450 discard_it:
1451 	kfree_skb(skb);
1452 	return 0;
1453 
1454 discard_and_relse:
1455 	sock_put(sk);
1456 	goto discard_it;
1457 
1458 do_time_wait:
1459 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1460 		inet_twsk_put(inet_twsk(sk));
1461 		goto discard_it;
1462 	}
1463 
1464 	tcp_v6_fill_cb(skb, hdr, th);
1465 
1466 	if (tcp_checksum_complete(skb)) {
1467 		inet_twsk_put(inet_twsk(sk));
1468 		goto csum_error;
1469 	}
1470 
1471 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1472 	case TCP_TW_SYN:
1473 	{
1474 		struct sock *sk2;
1475 
1476 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1477 					    &ipv6_hdr(skb)->saddr, th->source,
1478 					    &ipv6_hdr(skb)->daddr,
1479 					    ntohs(th->dest), tcp_v6_iif(skb));
1480 		if (sk2) {
1481 			struct inet_timewait_sock *tw = inet_twsk(sk);
1482 			inet_twsk_deschedule_put(tw);
1483 			sk = sk2;
1484 			tcp_v6_restore_cb(skb);
1485 			goto process;
1486 		}
1487 		/* Fall through to ACK */
1488 	}
1489 	case TCP_TW_ACK:
1490 		tcp_v6_timewait_ack(sk, skb);
1491 		break;
1492 	case TCP_TW_RST:
1493 		tcp_v6_restore_cb(skb);
1494 		goto no_tcp_socket;
1495 	case TCP_TW_SUCCESS:
1496 		;
1497 	}
1498 	goto discard_it;
1499 }
1500 
1501 static void tcp_v6_early_demux(struct sk_buff *skb)
1502 {
1503 	const struct ipv6hdr *hdr;
1504 	const struct tcphdr *th;
1505 	struct sock *sk;
1506 
1507 	if (skb->pkt_type != PACKET_HOST)
1508 		return;
1509 
1510 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1511 		return;
1512 
1513 	hdr = ipv6_hdr(skb);
1514 	th = tcp_hdr(skb);
1515 
1516 	if (th->doff < sizeof(struct tcphdr) / 4)
1517 		return;
1518 
1519 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1520 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1521 					&hdr->saddr, th->source,
1522 					&hdr->daddr, ntohs(th->dest),
1523 					inet6_iif(skb));
1524 	if (sk) {
1525 		skb->sk = sk;
1526 		skb->destructor = sock_edemux;
1527 		if (sk_fullsock(sk)) {
1528 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1529 
1530 			if (dst)
1531 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1532 			if (dst &&
1533 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1534 				skb_dst_set_noref(skb, dst);
1535 		}
1536 	}
1537 }
1538 
1539 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1540 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1541 	.twsk_unique	= tcp_twsk_unique,
1542 	.twsk_destructor = tcp_twsk_destructor,
1543 };
1544 
1545 static const struct inet_connection_sock_af_ops ipv6_specific = {
1546 	.queue_xmit	   = inet6_csk_xmit,
1547 	.send_check	   = tcp_v6_send_check,
1548 	.rebuild_header	   = inet6_sk_rebuild_header,
1549 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1550 	.conn_request	   = tcp_v6_conn_request,
1551 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1552 	.net_header_len	   = sizeof(struct ipv6hdr),
1553 	.net_frag_header_len = sizeof(struct frag_hdr),
1554 	.setsockopt	   = ipv6_setsockopt,
1555 	.getsockopt	   = ipv6_getsockopt,
1556 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1557 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1558 	.bind_conflict	   = inet6_csk_bind_conflict,
1559 #ifdef CONFIG_COMPAT
1560 	.compat_setsockopt = compat_ipv6_setsockopt,
1561 	.compat_getsockopt = compat_ipv6_getsockopt,
1562 #endif
1563 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1564 };
1565 
1566 #ifdef CONFIG_TCP_MD5SIG
1567 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1568 	.md5_lookup	=	tcp_v6_md5_lookup,
1569 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1570 	.md5_parse	=	tcp_v6_parse_md5_keys,
1571 };
1572 #endif
1573 
1574 /*
1575  *	TCP over IPv4 via INET6 API
1576  */
1577 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1578 	.queue_xmit	   = ip_queue_xmit,
1579 	.send_check	   = tcp_v4_send_check,
1580 	.rebuild_header	   = inet_sk_rebuild_header,
1581 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1582 	.conn_request	   = tcp_v6_conn_request,
1583 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1584 	.net_header_len	   = sizeof(struct iphdr),
1585 	.setsockopt	   = ipv6_setsockopt,
1586 	.getsockopt	   = ipv6_getsockopt,
1587 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1588 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1589 	.bind_conflict	   = inet6_csk_bind_conflict,
1590 #ifdef CONFIG_COMPAT
1591 	.compat_setsockopt = compat_ipv6_setsockopt,
1592 	.compat_getsockopt = compat_ipv6_getsockopt,
1593 #endif
1594 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1595 };
1596 
1597 #ifdef CONFIG_TCP_MD5SIG
1598 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1599 	.md5_lookup	=	tcp_v4_md5_lookup,
1600 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1601 	.md5_parse	=	tcp_v6_parse_md5_keys,
1602 };
1603 #endif
1604 
1605 /* NOTE: A lot of things set to zero explicitly by call to
1606  *       sk_alloc() so need not be done here.
1607  */
1608 static int tcp_v6_init_sock(struct sock *sk)
1609 {
1610 	struct inet_connection_sock *icsk = inet_csk(sk);
1611 
1612 	tcp_init_sock(sk);
1613 
1614 	icsk->icsk_af_ops = &ipv6_specific;
1615 
1616 #ifdef CONFIG_TCP_MD5SIG
1617 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1618 #endif
1619 
1620 	return 0;
1621 }
1622 
1623 static void tcp_v6_destroy_sock(struct sock *sk)
1624 {
1625 	tcp_v4_destroy_sock(sk);
1626 	inet6_destroy_sock(sk);
1627 }
1628 
1629 #ifdef CONFIG_PROC_FS
1630 /* Proc filesystem TCPv6 sock list dumping. */
1631 static void get_openreq6(struct seq_file *seq,
1632 			 struct request_sock *req, int i, kuid_t uid)
1633 {
1634 	long ttd = req->rsk_timer.expires - jiffies;
1635 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1636 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1637 
1638 	if (ttd < 0)
1639 		ttd = 0;
1640 
1641 	seq_printf(seq,
1642 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1643 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1644 		   i,
1645 		   src->s6_addr32[0], src->s6_addr32[1],
1646 		   src->s6_addr32[2], src->s6_addr32[3],
1647 		   inet_rsk(req)->ir_num,
1648 		   dest->s6_addr32[0], dest->s6_addr32[1],
1649 		   dest->s6_addr32[2], dest->s6_addr32[3],
1650 		   ntohs(inet_rsk(req)->ir_rmt_port),
1651 		   TCP_SYN_RECV,
1652 		   0, 0, /* could print option size, but that is af dependent. */
1653 		   1,   /* timers active (only the expire timer) */
1654 		   jiffies_to_clock_t(ttd),
1655 		   req->num_timeout,
1656 		   from_kuid_munged(seq_user_ns(seq), uid),
1657 		   0,  /* non standard timer */
1658 		   0, /* open_requests have no inode */
1659 		   0, req);
1660 }
1661 
1662 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1663 {
1664 	const struct in6_addr *dest, *src;
1665 	__u16 destp, srcp;
1666 	int timer_active;
1667 	unsigned long timer_expires;
1668 	const struct inet_sock *inet = inet_sk(sp);
1669 	const struct tcp_sock *tp = tcp_sk(sp);
1670 	const struct inet_connection_sock *icsk = inet_csk(sp);
1671 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1672 
1673 	dest  = &sp->sk_v6_daddr;
1674 	src   = &sp->sk_v6_rcv_saddr;
1675 	destp = ntohs(inet->inet_dport);
1676 	srcp  = ntohs(inet->inet_sport);
1677 
1678 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1679 		timer_active	= 1;
1680 		timer_expires	= icsk->icsk_timeout;
1681 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1682 		timer_active	= 4;
1683 		timer_expires	= icsk->icsk_timeout;
1684 	} else if (timer_pending(&sp->sk_timer)) {
1685 		timer_active	= 2;
1686 		timer_expires	= sp->sk_timer.expires;
1687 	} else {
1688 		timer_active	= 0;
1689 		timer_expires = jiffies;
1690 	}
1691 
1692 	seq_printf(seq,
1693 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1694 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1695 		   i,
1696 		   src->s6_addr32[0], src->s6_addr32[1],
1697 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1698 		   dest->s6_addr32[0], dest->s6_addr32[1],
1699 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1700 		   sp->sk_state,
1701 		   tp->write_seq-tp->snd_una,
1702 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1703 		   timer_active,
1704 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1705 		   icsk->icsk_retransmits,
1706 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1707 		   icsk->icsk_probes_out,
1708 		   sock_i_ino(sp),
1709 		   atomic_read(&sp->sk_refcnt), sp,
1710 		   jiffies_to_clock_t(icsk->icsk_rto),
1711 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1712 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1713 		   tp->snd_cwnd,
1714 		   sp->sk_state == TCP_LISTEN ?
1715 			(fastopenq ? fastopenq->max_qlen : 0) :
1716 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1717 		   );
1718 }
1719 
1720 static void get_timewait6_sock(struct seq_file *seq,
1721 			       struct inet_timewait_sock *tw, int i)
1722 {
1723 	long delta = tw->tw_timer.expires - jiffies;
1724 	const struct in6_addr *dest, *src;
1725 	__u16 destp, srcp;
1726 
1727 	dest = &tw->tw_v6_daddr;
1728 	src  = &tw->tw_v6_rcv_saddr;
1729 	destp = ntohs(tw->tw_dport);
1730 	srcp  = ntohs(tw->tw_sport);
1731 
1732 	seq_printf(seq,
1733 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1734 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1735 		   i,
1736 		   src->s6_addr32[0], src->s6_addr32[1],
1737 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1738 		   dest->s6_addr32[0], dest->s6_addr32[1],
1739 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1740 		   tw->tw_substate, 0, 0,
1741 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1742 		   atomic_read(&tw->tw_refcnt), tw);
1743 }
1744 
1745 static int tcp6_seq_show(struct seq_file *seq, void *v)
1746 {
1747 	struct tcp_iter_state *st;
1748 	struct sock *sk = v;
1749 
1750 	if (v == SEQ_START_TOKEN) {
1751 		seq_puts(seq,
1752 			 "  sl  "
1753 			 "local_address                         "
1754 			 "remote_address                        "
1755 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1756 			 "   uid  timeout inode\n");
1757 		goto out;
1758 	}
1759 	st = seq->private;
1760 
1761 	switch (st->state) {
1762 	case TCP_SEQ_STATE_LISTENING:
1763 	case TCP_SEQ_STATE_ESTABLISHED:
1764 		if (sk->sk_state == TCP_TIME_WAIT)
1765 			get_timewait6_sock(seq, v, st->num);
1766 		else
1767 			get_tcp6_sock(seq, v, st->num);
1768 		break;
1769 	case TCP_SEQ_STATE_OPENREQ:
1770 		get_openreq6(seq, v, st->num, st->uid);
1771 		break;
1772 	}
1773 out:
1774 	return 0;
1775 }
1776 
1777 static const struct file_operations tcp6_afinfo_seq_fops = {
1778 	.owner   = THIS_MODULE,
1779 	.open    = tcp_seq_open,
1780 	.read    = seq_read,
1781 	.llseek  = seq_lseek,
1782 	.release = seq_release_net
1783 };
1784 
1785 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1786 	.name		= "tcp6",
1787 	.family		= AF_INET6,
1788 	.seq_fops	= &tcp6_afinfo_seq_fops,
1789 	.seq_ops	= {
1790 		.show		= tcp6_seq_show,
1791 	},
1792 };
1793 
1794 int __net_init tcp6_proc_init(struct net *net)
1795 {
1796 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1797 }
1798 
1799 void tcp6_proc_exit(struct net *net)
1800 {
1801 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1802 }
1803 #endif
1804 
1805 static void tcp_v6_clear_sk(struct sock *sk, int size)
1806 {
1807 	struct inet_sock *inet = inet_sk(sk);
1808 
1809 	/* we do not want to clear pinet6 field, because of RCU lookups */
1810 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1811 
1812 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1813 	memset(&inet->pinet6 + 1, 0, size);
1814 }
1815 
1816 struct proto tcpv6_prot = {
1817 	.name			= "TCPv6",
1818 	.owner			= THIS_MODULE,
1819 	.close			= tcp_close,
1820 	.connect		= tcp_v6_connect,
1821 	.disconnect		= tcp_disconnect,
1822 	.accept			= inet_csk_accept,
1823 	.ioctl			= tcp_ioctl,
1824 	.init			= tcp_v6_init_sock,
1825 	.destroy		= tcp_v6_destroy_sock,
1826 	.shutdown		= tcp_shutdown,
1827 	.setsockopt		= tcp_setsockopt,
1828 	.getsockopt		= tcp_getsockopt,
1829 	.recvmsg		= tcp_recvmsg,
1830 	.sendmsg		= tcp_sendmsg,
1831 	.sendpage		= tcp_sendpage,
1832 	.backlog_rcv		= tcp_v6_do_rcv,
1833 	.release_cb		= tcp_release_cb,
1834 	.hash			= inet_hash,
1835 	.unhash			= inet_unhash,
1836 	.get_port		= inet_csk_get_port,
1837 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1838 	.stream_memory_free	= tcp_stream_memory_free,
1839 	.sockets_allocated	= &tcp_sockets_allocated,
1840 	.memory_allocated	= &tcp_memory_allocated,
1841 	.memory_pressure	= &tcp_memory_pressure,
1842 	.orphan_count		= &tcp_orphan_count,
1843 	.sysctl_mem		= sysctl_tcp_mem,
1844 	.sysctl_wmem		= sysctl_tcp_wmem,
1845 	.sysctl_rmem		= sysctl_tcp_rmem,
1846 	.max_header		= MAX_TCP_HEADER,
1847 	.obj_size		= sizeof(struct tcp6_sock),
1848 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1849 	.twsk_prot		= &tcp6_timewait_sock_ops,
1850 	.rsk_prot		= &tcp6_request_sock_ops,
1851 	.h.hashinfo		= &tcp_hashinfo,
1852 	.no_autobind		= true,
1853 #ifdef CONFIG_COMPAT
1854 	.compat_setsockopt	= compat_tcp_setsockopt,
1855 	.compat_getsockopt	= compat_tcp_getsockopt,
1856 #endif
1857 #ifdef CONFIG_MEMCG_KMEM
1858 	.proto_cgroup		= tcp_proto_cgroup,
1859 #endif
1860 	.clear_sk		= tcp_v6_clear_sk,
1861 };
1862 
1863 static const struct inet6_protocol tcpv6_protocol = {
1864 	.early_demux	=	tcp_v6_early_demux,
1865 	.handler	=	tcp_v6_rcv,
1866 	.err_handler	=	tcp_v6_err,
1867 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1868 };
1869 
1870 static struct inet_protosw tcpv6_protosw = {
1871 	.type		=	SOCK_STREAM,
1872 	.protocol	=	IPPROTO_TCP,
1873 	.prot		=	&tcpv6_prot,
1874 	.ops		=	&inet6_stream_ops,
1875 	.flags		=	INET_PROTOSW_PERMANENT |
1876 				INET_PROTOSW_ICSK,
1877 };
1878 
1879 static int __net_init tcpv6_net_init(struct net *net)
1880 {
1881 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1882 				    SOCK_RAW, IPPROTO_TCP, net);
1883 }
1884 
1885 static void __net_exit tcpv6_net_exit(struct net *net)
1886 {
1887 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1888 }
1889 
1890 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1891 {
1892 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1893 }
1894 
1895 static struct pernet_operations tcpv6_net_ops = {
1896 	.init	    = tcpv6_net_init,
1897 	.exit	    = tcpv6_net_exit,
1898 	.exit_batch = tcpv6_net_exit_batch,
1899 };
1900 
1901 int __init tcpv6_init(void)
1902 {
1903 	int ret;
1904 
1905 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1906 	if (ret)
1907 		goto out;
1908 
1909 	/* register inet6 protocol */
1910 	ret = inet6_register_protosw(&tcpv6_protosw);
1911 	if (ret)
1912 		goto out_tcpv6_protocol;
1913 
1914 	ret = register_pernet_subsys(&tcpv6_net_ops);
1915 	if (ret)
1916 		goto out_tcpv6_protosw;
1917 out:
1918 	return ret;
1919 
1920 out_tcpv6_protosw:
1921 	inet6_unregister_protosw(&tcpv6_protosw);
1922 out_tcpv6_protocol:
1923 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1924 	goto out;
1925 }
1926 
1927 void tcpv6_exit(void)
1928 {
1929 	unregister_pernet_subsys(&tcpv6_net_ops);
1930 	inet6_unregister_protosw(&tcpv6_protosw);
1931 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1932 }
1933