xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision f3539c12)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65 
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68 
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71 
72 static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 				      struct request_sock *req);
75 
76 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77 
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 						   const struct in6_addr *addr)
86 {
87 	return NULL;
88 }
89 #endif
90 
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92 {
93 	struct dst_entry *dst = skb_dst(skb);
94 
95 	if (dst && dst_hold_safe(dst)) {
96 		const struct rt6_info *rt = (const struct rt6_info *)dst;
97 
98 		sk->sk_rx_dst = dst;
99 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101 	}
102 }
103 
104 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
105 {
106 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 					    ipv6_hdr(skb)->saddr.s6_addr32,
108 					    tcp_hdr(skb)->dest,
109 					    tcp_hdr(skb)->source);
110 }
111 
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
113 			  int addr_len)
114 {
115 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 	struct inet_sock *inet = inet_sk(sk);
117 	struct inet_connection_sock *icsk = inet_csk(sk);
118 	struct ipv6_pinfo *np = inet6_sk(sk);
119 	struct tcp_sock *tp = tcp_sk(sk);
120 	struct in6_addr *saddr = NULL, *final_p, final;
121 	struct ipv6_txoptions *opt;
122 	struct flowi6 fl6;
123 	struct dst_entry *dst;
124 	int addr_type;
125 	int err;
126 
127 	if (addr_len < SIN6_LEN_RFC2133)
128 		return -EINVAL;
129 
130 	if (usin->sin6_family != AF_INET6)
131 		return -EAFNOSUPPORT;
132 
133 	memset(&fl6, 0, sizeof(fl6));
134 
135 	if (np->sndflow) {
136 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 		IP6_ECN_flow_init(fl6.flowlabel);
138 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 			struct ip6_flowlabel *flowlabel;
140 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
141 			if (!flowlabel)
142 				return -EINVAL;
143 			fl6_sock_release(flowlabel);
144 		}
145 	}
146 
147 	/*
148 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
149 	 */
150 
151 	if (ipv6_addr_any(&usin->sin6_addr))
152 		usin->sin6_addr.s6_addr[15] = 0x1;
153 
154 	addr_type = ipv6_addr_type(&usin->sin6_addr);
155 
156 	if (addr_type & IPV6_ADDR_MULTICAST)
157 		return -ENETUNREACH;
158 
159 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 		if (addr_len >= sizeof(struct sockaddr_in6) &&
161 		    usin->sin6_scope_id) {
162 			/* If interface is set while binding, indices
163 			 * must coincide.
164 			 */
165 			if (sk->sk_bound_dev_if &&
166 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
167 				return -EINVAL;
168 
169 			sk->sk_bound_dev_if = usin->sin6_scope_id;
170 		}
171 
172 		/* Connect to link-local address requires an interface */
173 		if (!sk->sk_bound_dev_if)
174 			return -EINVAL;
175 	}
176 
177 	if (tp->rx_opt.ts_recent_stamp &&
178 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179 		tp->rx_opt.ts_recent = 0;
180 		tp->rx_opt.ts_recent_stamp = 0;
181 		tp->write_seq = 0;
182 	}
183 
184 	sk->sk_v6_daddr = usin->sin6_addr;
185 	np->flow_label = fl6.flowlabel;
186 
187 	/*
188 	 *	TCP over IPv4
189 	 */
190 
191 	if (addr_type == IPV6_ADDR_MAPPED) {
192 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 		struct sockaddr_in sin;
194 
195 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
196 
197 		if (__ipv6_only_sock(sk))
198 			return -ENETUNREACH;
199 
200 		sin.sin_family = AF_INET;
201 		sin.sin_port = usin->sin6_port;
202 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
203 
204 		icsk->icsk_af_ops = &ipv6_mapped;
205 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
206 #ifdef CONFIG_TCP_MD5SIG
207 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
208 #endif
209 
210 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
211 
212 		if (err) {
213 			icsk->icsk_ext_hdr_len = exthdrlen;
214 			icsk->icsk_af_ops = &ipv6_specific;
215 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217 			tp->af_specific = &tcp_sock_ipv6_specific;
218 #endif
219 			goto failure;
220 		}
221 		np->saddr = sk->sk_v6_rcv_saddr;
222 
223 		return err;
224 	}
225 
226 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 		saddr = &sk->sk_v6_rcv_saddr;
228 
229 	fl6.flowi6_proto = IPPROTO_TCP;
230 	fl6.daddr = sk->sk_v6_daddr;
231 	fl6.saddr = saddr ? *saddr : np->saddr;
232 	fl6.flowi6_oif = sk->sk_bound_dev_if;
233 	fl6.flowi6_mark = sk->sk_mark;
234 	fl6.fl6_dport = usin->sin6_port;
235 	fl6.fl6_sport = inet->inet_sport;
236 
237 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
238 	final_p = fl6_update_dst(&fl6, opt, &final);
239 
240 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241 
242 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 	if (IS_ERR(dst)) {
244 		err = PTR_ERR(dst);
245 		goto failure;
246 	}
247 
248 	if (!saddr) {
249 		saddr = &fl6.saddr;
250 		sk->sk_v6_rcv_saddr = *saddr;
251 	}
252 
253 	/* set the source address */
254 	np->saddr = *saddr;
255 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256 
257 	sk->sk_gso_type = SKB_GSO_TCPV6;
258 	ip6_dst_store(sk, dst, NULL, NULL);
259 
260 	if (tcp_death_row.sysctl_tw_recycle &&
261 	    !tp->rx_opt.ts_recent_stamp &&
262 	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 		tcp_fetch_timewait_stamp(sk, dst);
264 
265 	icsk->icsk_ext_hdr_len = 0;
266 	if (opt)
267 		icsk->icsk_ext_hdr_len = opt->opt_flen +
268 					 opt->opt_nflen;
269 
270 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271 
272 	inet->inet_dport = usin->sin6_port;
273 
274 	tcp_set_state(sk, TCP_SYN_SENT);
275 	err = inet6_hash_connect(&tcp_death_row, sk);
276 	if (err)
277 		goto late_failure;
278 
279 	sk_set_txhash(sk);
280 
281 	if (!tp->write_seq && likely(!tp->repair))
282 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 							     sk->sk_v6_daddr.s6_addr32,
284 							     inet->inet_sport,
285 							     inet->inet_dport);
286 
287 	err = tcp_connect(sk);
288 	if (err)
289 		goto late_failure;
290 
291 	return 0;
292 
293 late_failure:
294 	tcp_set_state(sk, TCP_CLOSE);
295 	__sk_dst_reset(sk);
296 failure:
297 	inet->inet_dport = 0;
298 	sk->sk_route_caps = 0;
299 	return err;
300 }
301 
302 static void tcp_v6_mtu_reduced(struct sock *sk)
303 {
304 	struct dst_entry *dst;
305 
306 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 		return;
308 
309 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 	if (!dst)
311 		return;
312 
313 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 		tcp_sync_mss(sk, dst_mtu(dst));
315 		tcp_simple_retransmit(sk);
316 	}
317 }
318 
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 		u8 type, u8 code, int offset, __be32 info)
321 {
322 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 	struct net *net = dev_net(skb->dev);
325 	struct request_sock *fastopen;
326 	struct ipv6_pinfo *np;
327 	struct tcp_sock *tp;
328 	__u32 seq, snd_una;
329 	struct sock *sk;
330 	bool fatal;
331 	int err;
332 
333 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 					&hdr->daddr, th->dest,
335 					&hdr->saddr, ntohs(th->source),
336 					skb->dev->ifindex);
337 
338 	if (!sk) {
339 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
340 				  ICMP6_MIB_INERRORS);
341 		return;
342 	}
343 
344 	if (sk->sk_state == TCP_TIME_WAIT) {
345 		inet_twsk_put(inet_twsk(sk));
346 		return;
347 	}
348 	seq = ntohl(th->seq);
349 	fatal = icmpv6_err_convert(type, code, &err);
350 	if (sk->sk_state == TCP_NEW_SYN_RECV)
351 		return tcp_req_err(sk, seq, fatal);
352 
353 	bh_lock_sock(sk);
354 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
356 
357 	if (sk->sk_state == TCP_CLOSE)
358 		goto out;
359 
360 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
362 		goto out;
363 	}
364 
365 	tp = tcp_sk(sk);
366 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 	fastopen = tp->fastopen_rsk;
368 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 	if (sk->sk_state != TCP_LISTEN &&
370 	    !between(seq, snd_una, tp->snd_nxt)) {
371 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
372 		goto out;
373 	}
374 
375 	np = inet6_sk(sk);
376 
377 	if (type == NDISC_REDIRECT) {
378 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379 
380 		if (dst)
381 			dst->ops->redirect(dst, sk, skb);
382 		goto out;
383 	}
384 
385 	if (type == ICMPV6_PKT_TOOBIG) {
386 		/* We are not interested in TCP_LISTEN and open_requests
387 		 * (SYN-ACKs send out by Linux are always <576bytes so
388 		 * they should go through unfragmented).
389 		 */
390 		if (sk->sk_state == TCP_LISTEN)
391 			goto out;
392 
393 		if (!ip6_sk_accept_pmtu(sk))
394 			goto out;
395 
396 		tp->mtu_info = ntohl(info);
397 		if (!sock_owned_by_user(sk))
398 			tcp_v6_mtu_reduced(sk);
399 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
400 					   &tp->tsq_flags))
401 			sock_hold(sk);
402 		goto out;
403 	}
404 
405 
406 	/* Might be for an request_sock */
407 	switch (sk->sk_state) {
408 	case TCP_SYN_SENT:
409 	case TCP_SYN_RECV:
410 		/* Only in fast or simultaneous open. If a fast open socket is
411 		 * is already accepted it is treated as a connected one below.
412 		 */
413 		if (fastopen && !fastopen->sk)
414 			break;
415 
416 		if (!sock_owned_by_user(sk)) {
417 			sk->sk_err = err;
418 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
419 
420 			tcp_done(sk);
421 		} else
422 			sk->sk_err_soft = err;
423 		goto out;
424 	}
425 
426 	if (!sock_owned_by_user(sk) && np->recverr) {
427 		sk->sk_err = err;
428 		sk->sk_error_report(sk);
429 	} else
430 		sk->sk_err_soft = err;
431 
432 out:
433 	bh_unlock_sock(sk);
434 	sock_put(sk);
435 }
436 
437 
438 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 			      struct flowi *fl,
440 			      struct request_sock *req,
441 			      struct tcp_fastopen_cookie *foc,
442 			      enum tcp_synack_type synack_type)
443 {
444 	struct inet_request_sock *ireq = inet_rsk(req);
445 	struct ipv6_pinfo *np = inet6_sk(sk);
446 	struct ipv6_txoptions *opt;
447 	struct flowi6 *fl6 = &fl->u.ip6;
448 	struct sk_buff *skb;
449 	int err = -ENOMEM;
450 
451 	/* First, grab a route. */
452 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
453 					       IPPROTO_TCP)) == NULL)
454 		goto done;
455 
456 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
457 
458 	if (skb) {
459 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 				    &ireq->ir_v6_rmt_addr);
461 
462 		fl6->daddr = ireq->ir_v6_rmt_addr;
463 		if (np->repflow && ireq->pktopts)
464 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
465 
466 		rcu_read_lock();
467 		opt = ireq->ipv6_opt;
468 		if (!opt)
469 			opt = rcu_dereference(np->opt);
470 		err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
471 		rcu_read_unlock();
472 		err = net_xmit_eval(err);
473 	}
474 
475 done:
476 	return err;
477 }
478 
479 
480 static void tcp_v6_reqsk_destructor(struct request_sock *req)
481 {
482 	kfree(inet_rsk(req)->ipv6_opt);
483 	kfree_skb(inet_rsk(req)->pktopts);
484 }
485 
486 #ifdef CONFIG_TCP_MD5SIG
487 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
488 						   const struct in6_addr *addr)
489 {
490 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
491 }
492 
493 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
494 						const struct sock *addr_sk)
495 {
496 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
497 }
498 
499 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
500 				 int optlen)
501 {
502 	struct tcp_md5sig cmd;
503 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
504 
505 	if (optlen < sizeof(cmd))
506 		return -EINVAL;
507 
508 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
509 		return -EFAULT;
510 
511 	if (sin6->sin6_family != AF_INET6)
512 		return -EINVAL;
513 
514 	if (!cmd.tcpm_keylen) {
515 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
516 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
517 					      AF_INET);
518 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
519 				      AF_INET6);
520 	}
521 
522 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
523 		return -EINVAL;
524 
525 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
526 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
527 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
528 
529 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
530 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
531 }
532 
533 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
534 				   const struct in6_addr *daddr,
535 				   const struct in6_addr *saddr,
536 				   const struct tcphdr *th, int nbytes)
537 {
538 	struct tcp6_pseudohdr *bp;
539 	struct scatterlist sg;
540 	struct tcphdr *_th;
541 
542 	bp = hp->scratch;
543 	/* 1. TCP pseudo-header (RFC2460) */
544 	bp->saddr = *saddr;
545 	bp->daddr = *daddr;
546 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
547 	bp->len = cpu_to_be32(nbytes);
548 
549 	_th = (struct tcphdr *)(bp + 1);
550 	memcpy(_th, th, sizeof(*th));
551 	_th->check = 0;
552 
553 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
554 	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
555 				sizeof(*bp) + sizeof(*th));
556 	return crypto_ahash_update(hp->md5_req);
557 }
558 
559 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
560 			       const struct in6_addr *daddr, struct in6_addr *saddr,
561 			       const struct tcphdr *th)
562 {
563 	struct tcp_md5sig_pool *hp;
564 	struct ahash_request *req;
565 
566 	hp = tcp_get_md5sig_pool();
567 	if (!hp)
568 		goto clear_hash_noput;
569 	req = hp->md5_req;
570 
571 	if (crypto_ahash_init(req))
572 		goto clear_hash;
573 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
574 		goto clear_hash;
575 	if (tcp_md5_hash_key(hp, key))
576 		goto clear_hash;
577 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
578 	if (crypto_ahash_final(req))
579 		goto clear_hash;
580 
581 	tcp_put_md5sig_pool();
582 	return 0;
583 
584 clear_hash:
585 	tcp_put_md5sig_pool();
586 clear_hash_noput:
587 	memset(md5_hash, 0, 16);
588 	return 1;
589 }
590 
591 static int tcp_v6_md5_hash_skb(char *md5_hash,
592 			       const struct tcp_md5sig_key *key,
593 			       const struct sock *sk,
594 			       const struct sk_buff *skb)
595 {
596 	const struct in6_addr *saddr, *daddr;
597 	struct tcp_md5sig_pool *hp;
598 	struct ahash_request *req;
599 	const struct tcphdr *th = tcp_hdr(skb);
600 
601 	if (sk) { /* valid for establish/request sockets */
602 		saddr = &sk->sk_v6_rcv_saddr;
603 		daddr = &sk->sk_v6_daddr;
604 	} else {
605 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
606 		saddr = &ip6h->saddr;
607 		daddr = &ip6h->daddr;
608 	}
609 
610 	hp = tcp_get_md5sig_pool();
611 	if (!hp)
612 		goto clear_hash_noput;
613 	req = hp->md5_req;
614 
615 	if (crypto_ahash_init(req))
616 		goto clear_hash;
617 
618 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
619 		goto clear_hash;
620 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
621 		goto clear_hash;
622 	if (tcp_md5_hash_key(hp, key))
623 		goto clear_hash;
624 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
625 	if (crypto_ahash_final(req))
626 		goto clear_hash;
627 
628 	tcp_put_md5sig_pool();
629 	return 0;
630 
631 clear_hash:
632 	tcp_put_md5sig_pool();
633 clear_hash_noput:
634 	memset(md5_hash, 0, 16);
635 	return 1;
636 }
637 
638 #endif
639 
640 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
641 				    const struct sk_buff *skb)
642 {
643 #ifdef CONFIG_TCP_MD5SIG
644 	const __u8 *hash_location = NULL;
645 	struct tcp_md5sig_key *hash_expected;
646 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
647 	const struct tcphdr *th = tcp_hdr(skb);
648 	int genhash;
649 	u8 newhash[16];
650 
651 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
652 	hash_location = tcp_parse_md5sig_option(th);
653 
654 	/* We've parsed the options - do we have a hash? */
655 	if (!hash_expected && !hash_location)
656 		return false;
657 
658 	if (hash_expected && !hash_location) {
659 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
660 		return true;
661 	}
662 
663 	if (!hash_expected && hash_location) {
664 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
665 		return true;
666 	}
667 
668 	/* check the signature */
669 	genhash = tcp_v6_md5_hash_skb(newhash,
670 				      hash_expected,
671 				      NULL, skb);
672 
673 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
674 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
675 				     genhash ? "failed" : "mismatch",
676 				     &ip6h->saddr, ntohs(th->source),
677 				     &ip6h->daddr, ntohs(th->dest));
678 		return true;
679 	}
680 #endif
681 	return false;
682 }
683 
684 static void tcp_v6_init_req(struct request_sock *req,
685 			    const struct sock *sk_listener,
686 			    struct sk_buff *skb)
687 {
688 	struct inet_request_sock *ireq = inet_rsk(req);
689 	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
690 
691 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
692 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
693 
694 	/* So that link locals have meaning */
695 	if (!sk_listener->sk_bound_dev_if &&
696 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
697 		ireq->ir_iif = tcp_v6_iif(skb);
698 
699 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
700 	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
701 	     np->rxopt.bits.rxinfo ||
702 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
703 	     np->rxopt.bits.rxohlim || np->repflow)) {
704 		atomic_inc(&skb->users);
705 		ireq->pktopts = skb;
706 	}
707 }
708 
709 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
710 					  struct flowi *fl,
711 					  const struct request_sock *req,
712 					  bool *strict)
713 {
714 	if (strict)
715 		*strict = true;
716 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
717 }
718 
719 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
720 	.family		=	AF_INET6,
721 	.obj_size	=	sizeof(struct tcp6_request_sock),
722 	.rtx_syn_ack	=	tcp_rtx_synack,
723 	.send_ack	=	tcp_v6_reqsk_send_ack,
724 	.destructor	=	tcp_v6_reqsk_destructor,
725 	.send_reset	=	tcp_v6_send_reset,
726 	.syn_ack_timeout =	tcp_syn_ack_timeout,
727 };
728 
729 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
730 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
731 				sizeof(struct ipv6hdr),
732 #ifdef CONFIG_TCP_MD5SIG
733 	.req_md5_lookup	=	tcp_v6_md5_lookup,
734 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
735 #endif
736 	.init_req	=	tcp_v6_init_req,
737 #ifdef CONFIG_SYN_COOKIES
738 	.cookie_init_seq =	cookie_v6_init_sequence,
739 #endif
740 	.route_req	=	tcp_v6_route_req,
741 	.init_seq	=	tcp_v6_init_sequence,
742 	.send_synack	=	tcp_v6_send_synack,
743 };
744 
745 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
746 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
747 				 int oif, struct tcp_md5sig_key *key, int rst,
748 				 u8 tclass, __be32 label)
749 {
750 	const struct tcphdr *th = tcp_hdr(skb);
751 	struct tcphdr *t1;
752 	struct sk_buff *buff;
753 	struct flowi6 fl6;
754 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
755 	struct sock *ctl_sk = net->ipv6.tcp_sk;
756 	unsigned int tot_len = sizeof(struct tcphdr);
757 	struct dst_entry *dst;
758 	__be32 *topt;
759 
760 	if (tsecr)
761 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
762 #ifdef CONFIG_TCP_MD5SIG
763 	if (key)
764 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
765 #endif
766 
767 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
768 			 GFP_ATOMIC);
769 	if (!buff)
770 		return;
771 
772 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
773 
774 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
775 	skb_reset_transport_header(buff);
776 
777 	/* Swap the send and the receive. */
778 	memset(t1, 0, sizeof(*t1));
779 	t1->dest = th->source;
780 	t1->source = th->dest;
781 	t1->doff = tot_len / 4;
782 	t1->seq = htonl(seq);
783 	t1->ack_seq = htonl(ack);
784 	t1->ack = !rst || !th->ack;
785 	t1->rst = rst;
786 	t1->window = htons(win);
787 
788 	topt = (__be32 *)(t1 + 1);
789 
790 	if (tsecr) {
791 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
792 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
793 		*topt++ = htonl(tsval);
794 		*topt++ = htonl(tsecr);
795 	}
796 
797 #ifdef CONFIG_TCP_MD5SIG
798 	if (key) {
799 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
800 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
801 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
802 				    &ipv6_hdr(skb)->saddr,
803 				    &ipv6_hdr(skb)->daddr, t1);
804 	}
805 #endif
806 
807 	memset(&fl6, 0, sizeof(fl6));
808 	fl6.daddr = ipv6_hdr(skb)->saddr;
809 	fl6.saddr = ipv6_hdr(skb)->daddr;
810 	fl6.flowlabel = label;
811 
812 	buff->ip_summed = CHECKSUM_PARTIAL;
813 	buff->csum = 0;
814 
815 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
816 
817 	fl6.flowi6_proto = IPPROTO_TCP;
818 	if (rt6_need_strict(&fl6.daddr) && !oif)
819 		fl6.flowi6_oif = tcp_v6_iif(skb);
820 	else {
821 		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
822 			oif = skb->skb_iif;
823 
824 		fl6.flowi6_oif = oif;
825 	}
826 
827 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
828 	fl6.fl6_dport = t1->dest;
829 	fl6.fl6_sport = t1->source;
830 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
831 
832 	/* Pass a socket to ip6_dst_lookup either it is for RST
833 	 * Underlying function will use this to retrieve the network
834 	 * namespace
835 	 */
836 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
837 	if (!IS_ERR(dst)) {
838 		skb_dst_set(buff, dst);
839 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
840 		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
841 		if (rst)
842 			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
843 		return;
844 	}
845 
846 	kfree_skb(buff);
847 }
848 
849 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
850 {
851 	const struct tcphdr *th = tcp_hdr(skb);
852 	u32 seq = 0, ack_seq = 0;
853 	struct tcp_md5sig_key *key = NULL;
854 #ifdef CONFIG_TCP_MD5SIG
855 	const __u8 *hash_location = NULL;
856 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
857 	unsigned char newhash[16];
858 	int genhash;
859 	struct sock *sk1 = NULL;
860 #endif
861 	int oif;
862 
863 	if (th->rst)
864 		return;
865 
866 	/* If sk not NULL, it means we did a successful lookup and incoming
867 	 * route had to be correct. prequeue might have dropped our dst.
868 	 */
869 	if (!sk && !ipv6_unicast_destination(skb))
870 		return;
871 
872 #ifdef CONFIG_TCP_MD5SIG
873 	rcu_read_lock();
874 	hash_location = tcp_parse_md5sig_option(th);
875 	if (sk && sk_fullsock(sk)) {
876 		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
877 	} else if (hash_location) {
878 		/*
879 		 * active side is lost. Try to find listening socket through
880 		 * source port, and then find md5 key through listening socket.
881 		 * we are not loose security here:
882 		 * Incoming packet is checked with md5 hash with finding key,
883 		 * no RST generated if md5 hash doesn't match.
884 		 */
885 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
886 					   &tcp_hashinfo, NULL, 0,
887 					   &ipv6h->saddr,
888 					   th->source, &ipv6h->daddr,
889 					   ntohs(th->source), tcp_v6_iif(skb));
890 		if (!sk1)
891 			goto out;
892 
893 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
894 		if (!key)
895 			goto out;
896 
897 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
898 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
899 			goto out;
900 	}
901 #endif
902 
903 	if (th->ack)
904 		seq = ntohl(th->ack_seq);
905 	else
906 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
907 			  (th->doff << 2);
908 
909 	oif = sk ? sk->sk_bound_dev_if : 0;
910 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
911 
912 #ifdef CONFIG_TCP_MD5SIG
913 out:
914 	rcu_read_unlock();
915 #endif
916 }
917 
918 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
919 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
920 			    struct tcp_md5sig_key *key, u8 tclass,
921 			    __be32 label)
922 {
923 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
924 			     tclass, label);
925 }
926 
927 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
928 {
929 	struct inet_timewait_sock *tw = inet_twsk(sk);
930 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
931 
932 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
933 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
934 			tcp_time_stamp + tcptw->tw_ts_offset,
935 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
936 			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
937 
938 	inet_twsk_put(tw);
939 }
940 
941 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
942 				  struct request_sock *req)
943 {
944 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
945 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
946 	 */
947 	/* RFC 7323 2.3
948 	 * The window field (SEG.WND) of every outgoing segment, with the
949 	 * exception of <SYN> segments, MUST be right-shifted by
950 	 * Rcv.Wind.Shift bits:
951 	 */
952 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
953 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
954 			tcp_rsk(req)->rcv_nxt,
955 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
956 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
957 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
958 			0, 0);
959 }
960 
961 
962 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
963 {
964 #ifdef CONFIG_SYN_COOKIES
965 	const struct tcphdr *th = tcp_hdr(skb);
966 
967 	if (!th->syn)
968 		sk = cookie_v6_check(sk, skb);
969 #endif
970 	return sk;
971 }
972 
973 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
974 {
975 	if (skb->protocol == htons(ETH_P_IP))
976 		return tcp_v4_conn_request(sk, skb);
977 
978 	if (!ipv6_unicast_destination(skb))
979 		goto drop;
980 
981 	return tcp_conn_request(&tcp6_request_sock_ops,
982 				&tcp_request_sock_ipv6_ops, sk, skb);
983 
984 drop:
985 	tcp_listendrop(sk);
986 	return 0; /* don't send reset */
987 }
988 
989 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
990 					 struct request_sock *req,
991 					 struct dst_entry *dst,
992 					 struct request_sock *req_unhash,
993 					 bool *own_req)
994 {
995 	struct inet_request_sock *ireq;
996 	struct ipv6_pinfo *newnp;
997 	const struct ipv6_pinfo *np = inet6_sk(sk);
998 	struct ipv6_txoptions *opt;
999 	struct tcp6_sock *newtcp6sk;
1000 	struct inet_sock *newinet;
1001 	struct tcp_sock *newtp;
1002 	struct sock *newsk;
1003 #ifdef CONFIG_TCP_MD5SIG
1004 	struct tcp_md5sig_key *key;
1005 #endif
1006 	struct flowi6 fl6;
1007 
1008 	if (skb->protocol == htons(ETH_P_IP)) {
1009 		/*
1010 		 *	v6 mapped
1011 		 */
1012 
1013 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1014 					     req_unhash, own_req);
1015 
1016 		if (!newsk)
1017 			return NULL;
1018 
1019 		newtcp6sk = (struct tcp6_sock *)newsk;
1020 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1021 
1022 		newinet = inet_sk(newsk);
1023 		newnp = inet6_sk(newsk);
1024 		newtp = tcp_sk(newsk);
1025 
1026 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1027 
1028 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1029 
1030 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1031 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1032 #ifdef CONFIG_TCP_MD5SIG
1033 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1034 #endif
1035 
1036 		newnp->ipv6_ac_list = NULL;
1037 		newnp->ipv6_fl_list = NULL;
1038 		newnp->pktoptions  = NULL;
1039 		newnp->opt	   = NULL;
1040 		newnp->mcast_oif   = tcp_v6_iif(skb);
1041 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1042 		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1043 		if (np->repflow)
1044 			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1045 
1046 		/*
1047 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1048 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1049 		 * that function for the gory details. -acme
1050 		 */
1051 
1052 		/* It is tricky place. Until this moment IPv4 tcp
1053 		   worked with IPv6 icsk.icsk_af_ops.
1054 		   Sync it now.
1055 		 */
1056 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1057 
1058 		return newsk;
1059 	}
1060 
1061 	ireq = inet_rsk(req);
1062 
1063 	if (sk_acceptq_is_full(sk))
1064 		goto out_overflow;
1065 
1066 	if (!dst) {
1067 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1068 		if (!dst)
1069 			goto out;
1070 	}
1071 
1072 	newsk = tcp_create_openreq_child(sk, req, skb);
1073 	if (!newsk)
1074 		goto out_nonewsk;
1075 
1076 	/*
1077 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1078 	 * count here, tcp_create_openreq_child now does this for us, see the
1079 	 * comment in that function for the gory details. -acme
1080 	 */
1081 
1082 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1083 	ip6_dst_store(newsk, dst, NULL, NULL);
1084 	inet6_sk_rx_dst_set(newsk, skb);
1085 
1086 	newtcp6sk = (struct tcp6_sock *)newsk;
1087 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1088 
1089 	newtp = tcp_sk(newsk);
1090 	newinet = inet_sk(newsk);
1091 	newnp = inet6_sk(newsk);
1092 
1093 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1094 
1095 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1096 	newnp->saddr = ireq->ir_v6_loc_addr;
1097 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1098 	newsk->sk_bound_dev_if = ireq->ir_iif;
1099 
1100 	/* Now IPv6 options...
1101 
1102 	   First: no IPv4 options.
1103 	 */
1104 	newinet->inet_opt = NULL;
1105 	newnp->ipv6_ac_list = NULL;
1106 	newnp->ipv6_fl_list = NULL;
1107 
1108 	/* Clone RX bits */
1109 	newnp->rxopt.all = np->rxopt.all;
1110 
1111 	newnp->pktoptions = NULL;
1112 	newnp->opt	  = NULL;
1113 	newnp->mcast_oif  = tcp_v6_iif(skb);
1114 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1115 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1116 	if (np->repflow)
1117 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1118 
1119 	/* Clone native IPv6 options from listening socket (if any)
1120 
1121 	   Yes, keeping reference count would be much more clever,
1122 	   but we make one more one thing there: reattach optmem
1123 	   to newsk.
1124 	 */
1125 	opt = ireq->ipv6_opt;
1126 	if (!opt)
1127 		opt = rcu_dereference(np->opt);
1128 	if (opt) {
1129 		opt = ipv6_dup_options(newsk, opt);
1130 		RCU_INIT_POINTER(newnp->opt, opt);
1131 	}
1132 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1133 	if (opt)
1134 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1135 						    opt->opt_flen;
1136 
1137 	tcp_ca_openreq_child(newsk, dst);
1138 
1139 	tcp_sync_mss(newsk, dst_mtu(dst));
1140 	newtp->advmss = dst_metric_advmss(dst);
1141 	if (tcp_sk(sk)->rx_opt.user_mss &&
1142 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1143 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1144 
1145 	tcp_initialize_rcv_mss(newsk);
1146 
1147 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1148 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1149 
1150 #ifdef CONFIG_TCP_MD5SIG
1151 	/* Copy over the MD5 key from the original socket */
1152 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1153 	if (key) {
1154 		/* We're using one, so create a matching key
1155 		 * on the newsk structure. If we fail to get
1156 		 * memory, then we end up not copying the key
1157 		 * across. Shucks.
1158 		 */
1159 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1160 			       AF_INET6, key->key, key->keylen,
1161 			       sk_gfp_mask(sk, GFP_ATOMIC));
1162 	}
1163 #endif
1164 
1165 	if (__inet_inherit_port(sk, newsk) < 0) {
1166 		inet_csk_prepare_forced_close(newsk);
1167 		tcp_done(newsk);
1168 		goto out;
1169 	}
1170 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1171 	if (*own_req) {
1172 		tcp_move_syn(newtp, req);
1173 
1174 		/* Clone pktoptions received with SYN, if we own the req */
1175 		if (ireq->pktopts) {
1176 			newnp->pktoptions = skb_clone(ireq->pktopts,
1177 						      sk_gfp_mask(sk, GFP_ATOMIC));
1178 			consume_skb(ireq->pktopts);
1179 			ireq->pktopts = NULL;
1180 			if (newnp->pktoptions)
1181 				skb_set_owner_r(newnp->pktoptions, newsk);
1182 		}
1183 	}
1184 
1185 	return newsk;
1186 
1187 out_overflow:
1188 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1189 out_nonewsk:
1190 	dst_release(dst);
1191 out:
1192 	tcp_listendrop(sk);
1193 	return NULL;
1194 }
1195 
1196 /* The socket must have it's spinlock held when we get
1197  * here, unless it is a TCP_LISTEN socket.
1198  *
1199  * We have a potential double-lock case here, so even when
1200  * doing backlog processing we use the BH locking scheme.
1201  * This is because we cannot sleep with the original spinlock
1202  * held.
1203  */
1204 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1205 {
1206 	struct ipv6_pinfo *np = inet6_sk(sk);
1207 	struct tcp_sock *tp;
1208 	struct sk_buff *opt_skb = NULL;
1209 
1210 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1211 	   goes to IPv4 receive handler and backlogged.
1212 	   From backlog it always goes here. Kerboom...
1213 	   Fortunately, tcp_rcv_established and rcv_established
1214 	   handle them correctly, but it is not case with
1215 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1216 	 */
1217 
1218 	if (skb->protocol == htons(ETH_P_IP))
1219 		return tcp_v4_do_rcv(sk, skb);
1220 
1221 	if (sk_filter(sk, skb))
1222 		goto discard;
1223 
1224 	/*
1225 	 *	socket locking is here for SMP purposes as backlog rcv
1226 	 *	is currently called with bh processing disabled.
1227 	 */
1228 
1229 	/* Do Stevens' IPV6_PKTOPTIONS.
1230 
1231 	   Yes, guys, it is the only place in our code, where we
1232 	   may make it not affecting IPv4.
1233 	   The rest of code is protocol independent,
1234 	   and I do not like idea to uglify IPv4.
1235 
1236 	   Actually, all the idea behind IPV6_PKTOPTIONS
1237 	   looks not very well thought. For now we latch
1238 	   options, received in the last packet, enqueued
1239 	   by tcp. Feel free to propose better solution.
1240 					       --ANK (980728)
1241 	 */
1242 	if (np->rxopt.all)
1243 		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1244 
1245 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1246 		struct dst_entry *dst = sk->sk_rx_dst;
1247 
1248 		sock_rps_save_rxhash(sk, skb);
1249 		sk_mark_napi_id(sk, skb);
1250 		if (dst) {
1251 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1252 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1253 				dst_release(dst);
1254 				sk->sk_rx_dst = NULL;
1255 			}
1256 		}
1257 
1258 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1259 		if (opt_skb)
1260 			goto ipv6_pktoptions;
1261 		return 0;
1262 	}
1263 
1264 	if (tcp_checksum_complete(skb))
1265 		goto csum_err;
1266 
1267 	if (sk->sk_state == TCP_LISTEN) {
1268 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1269 
1270 		if (!nsk)
1271 			goto discard;
1272 
1273 		if (nsk != sk) {
1274 			sock_rps_save_rxhash(nsk, skb);
1275 			sk_mark_napi_id(nsk, skb);
1276 			if (tcp_child_process(sk, nsk, skb))
1277 				goto reset;
1278 			if (opt_skb)
1279 				__kfree_skb(opt_skb);
1280 			return 0;
1281 		}
1282 	} else
1283 		sock_rps_save_rxhash(sk, skb);
1284 
1285 	if (tcp_rcv_state_process(sk, skb))
1286 		goto reset;
1287 	if (opt_skb)
1288 		goto ipv6_pktoptions;
1289 	return 0;
1290 
1291 reset:
1292 	tcp_v6_send_reset(sk, skb);
1293 discard:
1294 	if (opt_skb)
1295 		__kfree_skb(opt_skb);
1296 	kfree_skb(skb);
1297 	return 0;
1298 csum_err:
1299 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1300 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1301 	goto discard;
1302 
1303 
1304 ipv6_pktoptions:
1305 	/* Do you ask, what is it?
1306 
1307 	   1. skb was enqueued by tcp.
1308 	   2. skb is added to tail of read queue, rather than out of order.
1309 	   3. socket is not in passive state.
1310 	   4. Finally, it really contains options, which user wants to receive.
1311 	 */
1312 	tp = tcp_sk(sk);
1313 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1314 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1315 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1316 			np->mcast_oif = tcp_v6_iif(opt_skb);
1317 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1318 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1319 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1320 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1321 		if (np->repflow)
1322 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1323 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1324 			skb_set_owner_r(opt_skb, sk);
1325 			opt_skb = xchg(&np->pktoptions, opt_skb);
1326 		} else {
1327 			__kfree_skb(opt_skb);
1328 			opt_skb = xchg(&np->pktoptions, NULL);
1329 		}
1330 	}
1331 
1332 	kfree_skb(opt_skb);
1333 	return 0;
1334 }
1335 
1336 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1337 			   const struct tcphdr *th)
1338 {
1339 	/* This is tricky: we move IP6CB at its correct location into
1340 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1341 	 * _decode_session6() uses IP6CB().
1342 	 * barrier() makes sure compiler won't play aliasing games.
1343 	 */
1344 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1345 		sizeof(struct inet6_skb_parm));
1346 	barrier();
1347 
1348 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1349 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1350 				    skb->len - th->doff*4);
1351 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1352 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1353 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1354 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1355 	TCP_SKB_CB(skb)->sacked = 0;
1356 }
1357 
1358 static void tcp_v6_restore_cb(struct sk_buff *skb)
1359 {
1360 	/* We need to move header back to the beginning if xfrm6_policy_check()
1361 	 * and tcp_v6_fill_cb() are going to be called again.
1362 	 */
1363 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1364 		sizeof(struct inet6_skb_parm));
1365 }
1366 
1367 static int tcp_v6_rcv(struct sk_buff *skb)
1368 {
1369 	const struct tcphdr *th;
1370 	const struct ipv6hdr *hdr;
1371 	bool refcounted;
1372 	struct sock *sk;
1373 	int ret;
1374 	struct net *net = dev_net(skb->dev);
1375 
1376 	if (skb->pkt_type != PACKET_HOST)
1377 		goto discard_it;
1378 
1379 	/*
1380 	 *	Count it even if it's bad.
1381 	 */
1382 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1383 
1384 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1385 		goto discard_it;
1386 
1387 	th = (const struct tcphdr *)skb->data;
1388 
1389 	if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1390 		goto bad_packet;
1391 	if (!pskb_may_pull(skb, th->doff*4))
1392 		goto discard_it;
1393 
1394 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1395 		goto csum_error;
1396 
1397 	th = (const struct tcphdr *)skb->data;
1398 	hdr = ipv6_hdr(skb);
1399 
1400 lookup:
1401 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1402 				th->source, th->dest, inet6_iif(skb),
1403 				&refcounted);
1404 	if (!sk)
1405 		goto no_tcp_socket;
1406 
1407 process:
1408 	if (sk->sk_state == TCP_TIME_WAIT)
1409 		goto do_time_wait;
1410 
1411 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1412 		struct request_sock *req = inet_reqsk(sk);
1413 		struct sock *nsk;
1414 
1415 		sk = req->rsk_listener;
1416 		tcp_v6_fill_cb(skb, hdr, th);
1417 		if (tcp_v6_inbound_md5_hash(sk, skb)) {
1418 			reqsk_put(req);
1419 			goto discard_it;
1420 		}
1421 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1422 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1423 			goto lookup;
1424 		}
1425 		sock_hold(sk);
1426 		refcounted = true;
1427 		nsk = tcp_check_req(sk, skb, req, false);
1428 		if (!nsk) {
1429 			reqsk_put(req);
1430 			goto discard_and_relse;
1431 		}
1432 		if (nsk == sk) {
1433 			reqsk_put(req);
1434 			tcp_v6_restore_cb(skb);
1435 		} else if (tcp_child_process(sk, nsk, skb)) {
1436 			tcp_v6_send_reset(nsk, skb);
1437 			goto discard_and_relse;
1438 		} else {
1439 			sock_put(sk);
1440 			return 0;
1441 		}
1442 	}
1443 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1444 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1445 		goto discard_and_relse;
1446 	}
1447 
1448 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1449 		goto discard_and_relse;
1450 
1451 	tcp_v6_fill_cb(skb, hdr, th);
1452 
1453 	if (tcp_v6_inbound_md5_hash(sk, skb))
1454 		goto discard_and_relse;
1455 
1456 	if (sk_filter(sk, skb))
1457 		goto discard_and_relse;
1458 
1459 	skb->dev = NULL;
1460 
1461 	if (sk->sk_state == TCP_LISTEN) {
1462 		ret = tcp_v6_do_rcv(sk, skb);
1463 		goto put_and_return;
1464 	}
1465 
1466 	sk_incoming_cpu_update(sk);
1467 
1468 	bh_lock_sock_nested(sk);
1469 	tcp_segs_in(tcp_sk(sk), skb);
1470 	ret = 0;
1471 	if (!sock_owned_by_user(sk)) {
1472 		if (!tcp_prequeue(sk, skb))
1473 			ret = tcp_v6_do_rcv(sk, skb);
1474 	} else if (unlikely(sk_add_backlog(sk, skb,
1475 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1476 		bh_unlock_sock(sk);
1477 		__NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
1478 		goto discard_and_relse;
1479 	}
1480 	bh_unlock_sock(sk);
1481 
1482 put_and_return:
1483 	if (refcounted)
1484 		sock_put(sk);
1485 	return ret ? -1 : 0;
1486 
1487 no_tcp_socket:
1488 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1489 		goto discard_it;
1490 
1491 	tcp_v6_fill_cb(skb, hdr, th);
1492 
1493 	if (tcp_checksum_complete(skb)) {
1494 csum_error:
1495 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1496 bad_packet:
1497 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1498 	} else {
1499 		tcp_v6_send_reset(NULL, skb);
1500 	}
1501 
1502 discard_it:
1503 	kfree_skb(skb);
1504 	return 0;
1505 
1506 discard_and_relse:
1507 	sk_drops_add(sk, skb);
1508 	if (refcounted)
1509 		sock_put(sk);
1510 	goto discard_it;
1511 
1512 do_time_wait:
1513 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1514 		inet_twsk_put(inet_twsk(sk));
1515 		goto discard_it;
1516 	}
1517 
1518 	tcp_v6_fill_cb(skb, hdr, th);
1519 
1520 	if (tcp_checksum_complete(skb)) {
1521 		inet_twsk_put(inet_twsk(sk));
1522 		goto csum_error;
1523 	}
1524 
1525 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1526 	case TCP_TW_SYN:
1527 	{
1528 		struct sock *sk2;
1529 
1530 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1531 					    skb, __tcp_hdrlen(th),
1532 					    &ipv6_hdr(skb)->saddr, th->source,
1533 					    &ipv6_hdr(skb)->daddr,
1534 					    ntohs(th->dest), tcp_v6_iif(skb));
1535 		if (sk2) {
1536 			struct inet_timewait_sock *tw = inet_twsk(sk);
1537 			inet_twsk_deschedule_put(tw);
1538 			sk = sk2;
1539 			tcp_v6_restore_cb(skb);
1540 			refcounted = false;
1541 			goto process;
1542 		}
1543 		/* Fall through to ACK */
1544 	}
1545 	case TCP_TW_ACK:
1546 		tcp_v6_timewait_ack(sk, skb);
1547 		break;
1548 	case TCP_TW_RST:
1549 		tcp_v6_restore_cb(skb);
1550 		tcp_v6_send_reset(sk, skb);
1551 		inet_twsk_deschedule_put(inet_twsk(sk));
1552 		goto discard_it;
1553 	case TCP_TW_SUCCESS:
1554 		;
1555 	}
1556 	goto discard_it;
1557 }
1558 
1559 static void tcp_v6_early_demux(struct sk_buff *skb)
1560 {
1561 	const struct ipv6hdr *hdr;
1562 	const struct tcphdr *th;
1563 	struct sock *sk;
1564 
1565 	if (skb->pkt_type != PACKET_HOST)
1566 		return;
1567 
1568 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1569 		return;
1570 
1571 	hdr = ipv6_hdr(skb);
1572 	th = tcp_hdr(skb);
1573 
1574 	if (th->doff < sizeof(struct tcphdr) / 4)
1575 		return;
1576 
1577 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1578 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1579 					&hdr->saddr, th->source,
1580 					&hdr->daddr, ntohs(th->dest),
1581 					inet6_iif(skb));
1582 	if (sk) {
1583 		skb->sk = sk;
1584 		skb->destructor = sock_edemux;
1585 		if (sk_fullsock(sk)) {
1586 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1587 
1588 			if (dst)
1589 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1590 			if (dst &&
1591 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1592 				skb_dst_set_noref(skb, dst);
1593 		}
1594 	}
1595 }
1596 
1597 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1598 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1599 	.twsk_unique	= tcp_twsk_unique,
1600 	.twsk_destructor = tcp_twsk_destructor,
1601 };
1602 
1603 static const struct inet_connection_sock_af_ops ipv6_specific = {
1604 	.queue_xmit	   = inet6_csk_xmit,
1605 	.send_check	   = tcp_v6_send_check,
1606 	.rebuild_header	   = inet6_sk_rebuild_header,
1607 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1608 	.conn_request	   = tcp_v6_conn_request,
1609 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1610 	.net_header_len	   = sizeof(struct ipv6hdr),
1611 	.net_frag_header_len = sizeof(struct frag_hdr),
1612 	.setsockopt	   = ipv6_setsockopt,
1613 	.getsockopt	   = ipv6_getsockopt,
1614 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1615 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1616 	.bind_conflict	   = inet6_csk_bind_conflict,
1617 #ifdef CONFIG_COMPAT
1618 	.compat_setsockopt = compat_ipv6_setsockopt,
1619 	.compat_getsockopt = compat_ipv6_getsockopt,
1620 #endif
1621 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1622 };
1623 
1624 #ifdef CONFIG_TCP_MD5SIG
1625 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1626 	.md5_lookup	=	tcp_v6_md5_lookup,
1627 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1628 	.md5_parse	=	tcp_v6_parse_md5_keys,
1629 };
1630 #endif
1631 
1632 /*
1633  *	TCP over IPv4 via INET6 API
1634  */
1635 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1636 	.queue_xmit	   = ip_queue_xmit,
1637 	.send_check	   = tcp_v4_send_check,
1638 	.rebuild_header	   = inet_sk_rebuild_header,
1639 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1640 	.conn_request	   = tcp_v6_conn_request,
1641 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1642 	.net_header_len	   = sizeof(struct iphdr),
1643 	.setsockopt	   = ipv6_setsockopt,
1644 	.getsockopt	   = ipv6_getsockopt,
1645 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1646 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1647 	.bind_conflict	   = inet6_csk_bind_conflict,
1648 #ifdef CONFIG_COMPAT
1649 	.compat_setsockopt = compat_ipv6_setsockopt,
1650 	.compat_getsockopt = compat_ipv6_getsockopt,
1651 #endif
1652 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1653 };
1654 
1655 #ifdef CONFIG_TCP_MD5SIG
1656 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1657 	.md5_lookup	=	tcp_v4_md5_lookup,
1658 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1659 	.md5_parse	=	tcp_v6_parse_md5_keys,
1660 };
1661 #endif
1662 
1663 /* NOTE: A lot of things set to zero explicitly by call to
1664  *       sk_alloc() so need not be done here.
1665  */
1666 static int tcp_v6_init_sock(struct sock *sk)
1667 {
1668 	struct inet_connection_sock *icsk = inet_csk(sk);
1669 
1670 	tcp_init_sock(sk);
1671 
1672 	icsk->icsk_af_ops = &ipv6_specific;
1673 
1674 #ifdef CONFIG_TCP_MD5SIG
1675 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1676 #endif
1677 
1678 	return 0;
1679 }
1680 
1681 static void tcp_v6_destroy_sock(struct sock *sk)
1682 {
1683 	tcp_v4_destroy_sock(sk);
1684 	inet6_destroy_sock(sk);
1685 }
1686 
1687 #ifdef CONFIG_PROC_FS
1688 /* Proc filesystem TCPv6 sock list dumping. */
1689 static void get_openreq6(struct seq_file *seq,
1690 			 const struct request_sock *req, int i)
1691 {
1692 	long ttd = req->rsk_timer.expires - jiffies;
1693 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1694 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1695 
1696 	if (ttd < 0)
1697 		ttd = 0;
1698 
1699 	seq_printf(seq,
1700 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1701 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1702 		   i,
1703 		   src->s6_addr32[0], src->s6_addr32[1],
1704 		   src->s6_addr32[2], src->s6_addr32[3],
1705 		   inet_rsk(req)->ir_num,
1706 		   dest->s6_addr32[0], dest->s6_addr32[1],
1707 		   dest->s6_addr32[2], dest->s6_addr32[3],
1708 		   ntohs(inet_rsk(req)->ir_rmt_port),
1709 		   TCP_SYN_RECV,
1710 		   0, 0, /* could print option size, but that is af dependent. */
1711 		   1,   /* timers active (only the expire timer) */
1712 		   jiffies_to_clock_t(ttd),
1713 		   req->num_timeout,
1714 		   from_kuid_munged(seq_user_ns(seq),
1715 				    sock_i_uid(req->rsk_listener)),
1716 		   0,  /* non standard timer */
1717 		   0, /* open_requests have no inode */
1718 		   0, req);
1719 }
1720 
1721 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1722 {
1723 	const struct in6_addr *dest, *src;
1724 	__u16 destp, srcp;
1725 	int timer_active;
1726 	unsigned long timer_expires;
1727 	const struct inet_sock *inet = inet_sk(sp);
1728 	const struct tcp_sock *tp = tcp_sk(sp);
1729 	const struct inet_connection_sock *icsk = inet_csk(sp);
1730 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1731 	int rx_queue;
1732 	int state;
1733 
1734 	dest  = &sp->sk_v6_daddr;
1735 	src   = &sp->sk_v6_rcv_saddr;
1736 	destp = ntohs(inet->inet_dport);
1737 	srcp  = ntohs(inet->inet_sport);
1738 
1739 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1740 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1741 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1742 		timer_active	= 1;
1743 		timer_expires	= icsk->icsk_timeout;
1744 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1745 		timer_active	= 4;
1746 		timer_expires	= icsk->icsk_timeout;
1747 	} else if (timer_pending(&sp->sk_timer)) {
1748 		timer_active	= 2;
1749 		timer_expires	= sp->sk_timer.expires;
1750 	} else {
1751 		timer_active	= 0;
1752 		timer_expires = jiffies;
1753 	}
1754 
1755 	state = sk_state_load(sp);
1756 	if (state == TCP_LISTEN)
1757 		rx_queue = sp->sk_ack_backlog;
1758 	else
1759 		/* Because we don't lock the socket,
1760 		 * we might find a transient negative value.
1761 		 */
1762 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1763 
1764 	seq_printf(seq,
1765 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1766 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1767 		   i,
1768 		   src->s6_addr32[0], src->s6_addr32[1],
1769 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1770 		   dest->s6_addr32[0], dest->s6_addr32[1],
1771 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1772 		   state,
1773 		   tp->write_seq - tp->snd_una,
1774 		   rx_queue,
1775 		   timer_active,
1776 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1777 		   icsk->icsk_retransmits,
1778 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1779 		   icsk->icsk_probes_out,
1780 		   sock_i_ino(sp),
1781 		   atomic_read(&sp->sk_refcnt), sp,
1782 		   jiffies_to_clock_t(icsk->icsk_rto),
1783 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1784 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1785 		   tp->snd_cwnd,
1786 		   state == TCP_LISTEN ?
1787 			fastopenq->max_qlen :
1788 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1789 		   );
1790 }
1791 
1792 static void get_timewait6_sock(struct seq_file *seq,
1793 			       struct inet_timewait_sock *tw, int i)
1794 {
1795 	long delta = tw->tw_timer.expires - jiffies;
1796 	const struct in6_addr *dest, *src;
1797 	__u16 destp, srcp;
1798 
1799 	dest = &tw->tw_v6_daddr;
1800 	src  = &tw->tw_v6_rcv_saddr;
1801 	destp = ntohs(tw->tw_dport);
1802 	srcp  = ntohs(tw->tw_sport);
1803 
1804 	seq_printf(seq,
1805 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1806 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1807 		   i,
1808 		   src->s6_addr32[0], src->s6_addr32[1],
1809 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1810 		   dest->s6_addr32[0], dest->s6_addr32[1],
1811 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1812 		   tw->tw_substate, 0, 0,
1813 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1814 		   atomic_read(&tw->tw_refcnt), tw);
1815 }
1816 
1817 static int tcp6_seq_show(struct seq_file *seq, void *v)
1818 {
1819 	struct tcp_iter_state *st;
1820 	struct sock *sk = v;
1821 
1822 	if (v == SEQ_START_TOKEN) {
1823 		seq_puts(seq,
1824 			 "  sl  "
1825 			 "local_address                         "
1826 			 "remote_address                        "
1827 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1828 			 "   uid  timeout inode\n");
1829 		goto out;
1830 	}
1831 	st = seq->private;
1832 
1833 	if (sk->sk_state == TCP_TIME_WAIT)
1834 		get_timewait6_sock(seq, v, st->num);
1835 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1836 		get_openreq6(seq, v, st->num);
1837 	else
1838 		get_tcp6_sock(seq, v, st->num);
1839 out:
1840 	return 0;
1841 }
1842 
1843 static const struct file_operations tcp6_afinfo_seq_fops = {
1844 	.owner   = THIS_MODULE,
1845 	.open    = tcp_seq_open,
1846 	.read    = seq_read,
1847 	.llseek  = seq_lseek,
1848 	.release = seq_release_net
1849 };
1850 
1851 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1852 	.name		= "tcp6",
1853 	.family		= AF_INET6,
1854 	.seq_fops	= &tcp6_afinfo_seq_fops,
1855 	.seq_ops	= {
1856 		.show		= tcp6_seq_show,
1857 	},
1858 };
1859 
1860 int __net_init tcp6_proc_init(struct net *net)
1861 {
1862 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1863 }
1864 
1865 void tcp6_proc_exit(struct net *net)
1866 {
1867 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1868 }
1869 #endif
1870 
1871 static void tcp_v6_clear_sk(struct sock *sk, int size)
1872 {
1873 	struct inet_sock *inet = inet_sk(sk);
1874 
1875 	/* we do not want to clear pinet6 field, because of RCU lookups */
1876 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1877 
1878 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1879 	memset(&inet->pinet6 + 1, 0, size);
1880 }
1881 
1882 struct proto tcpv6_prot = {
1883 	.name			= "TCPv6",
1884 	.owner			= THIS_MODULE,
1885 	.close			= tcp_close,
1886 	.connect		= tcp_v6_connect,
1887 	.disconnect		= tcp_disconnect,
1888 	.accept			= inet_csk_accept,
1889 	.ioctl			= tcp_ioctl,
1890 	.init			= tcp_v6_init_sock,
1891 	.destroy		= tcp_v6_destroy_sock,
1892 	.shutdown		= tcp_shutdown,
1893 	.setsockopt		= tcp_setsockopt,
1894 	.getsockopt		= tcp_getsockopt,
1895 	.recvmsg		= tcp_recvmsg,
1896 	.sendmsg		= tcp_sendmsg,
1897 	.sendpage		= tcp_sendpage,
1898 	.backlog_rcv		= tcp_v6_do_rcv,
1899 	.release_cb		= tcp_release_cb,
1900 	.hash			= inet6_hash,
1901 	.unhash			= inet_unhash,
1902 	.get_port		= inet_csk_get_port,
1903 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1904 	.stream_memory_free	= tcp_stream_memory_free,
1905 	.sockets_allocated	= &tcp_sockets_allocated,
1906 	.memory_allocated	= &tcp_memory_allocated,
1907 	.memory_pressure	= &tcp_memory_pressure,
1908 	.orphan_count		= &tcp_orphan_count,
1909 	.sysctl_mem		= sysctl_tcp_mem,
1910 	.sysctl_wmem		= sysctl_tcp_wmem,
1911 	.sysctl_rmem		= sysctl_tcp_rmem,
1912 	.max_header		= MAX_TCP_HEADER,
1913 	.obj_size		= sizeof(struct tcp6_sock),
1914 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1915 	.twsk_prot		= &tcp6_timewait_sock_ops,
1916 	.rsk_prot		= &tcp6_request_sock_ops,
1917 	.h.hashinfo		= &tcp_hashinfo,
1918 	.no_autobind		= true,
1919 #ifdef CONFIG_COMPAT
1920 	.compat_setsockopt	= compat_tcp_setsockopt,
1921 	.compat_getsockopt	= compat_tcp_getsockopt,
1922 #endif
1923 	.clear_sk		= tcp_v6_clear_sk,
1924 	.diag_destroy		= tcp_abort,
1925 };
1926 
1927 static const struct inet6_protocol tcpv6_protocol = {
1928 	.early_demux	=	tcp_v6_early_demux,
1929 	.handler	=	tcp_v6_rcv,
1930 	.err_handler	=	tcp_v6_err,
1931 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1932 };
1933 
1934 static struct inet_protosw tcpv6_protosw = {
1935 	.type		=	SOCK_STREAM,
1936 	.protocol	=	IPPROTO_TCP,
1937 	.prot		=	&tcpv6_prot,
1938 	.ops		=	&inet6_stream_ops,
1939 	.flags		=	INET_PROTOSW_PERMANENT |
1940 				INET_PROTOSW_ICSK,
1941 };
1942 
1943 static int __net_init tcpv6_net_init(struct net *net)
1944 {
1945 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1946 				    SOCK_RAW, IPPROTO_TCP, net);
1947 }
1948 
1949 static void __net_exit tcpv6_net_exit(struct net *net)
1950 {
1951 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1952 }
1953 
1954 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1955 {
1956 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1957 }
1958 
1959 static struct pernet_operations tcpv6_net_ops = {
1960 	.init	    = tcpv6_net_init,
1961 	.exit	    = tcpv6_net_exit,
1962 	.exit_batch = tcpv6_net_exit_batch,
1963 };
1964 
1965 int __init tcpv6_init(void)
1966 {
1967 	int ret;
1968 
1969 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1970 	if (ret)
1971 		goto out;
1972 
1973 	/* register inet6 protocol */
1974 	ret = inet6_register_protosw(&tcpv6_protosw);
1975 	if (ret)
1976 		goto out_tcpv6_protocol;
1977 
1978 	ret = register_pernet_subsys(&tcpv6_net_ops);
1979 	if (ret)
1980 		goto out_tcpv6_protosw;
1981 out:
1982 	return ret;
1983 
1984 out_tcpv6_protosw:
1985 	inet6_unregister_protosw(&tcpv6_protosw);
1986 out_tcpv6_protocol:
1987 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1988 	goto out;
1989 }
1990 
1991 void tcpv6_exit(void)
1992 {
1993 	unregister_pernet_subsys(&tcpv6_net_ops);
1994 	inet6_unregister_protosw(&tcpv6_protosw);
1995 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1996 }
1997