xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision 77a87824)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65 
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68 
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71 
72 static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 				      struct request_sock *req);
75 
76 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77 
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 						   const struct in6_addr *addr)
86 {
87 	return NULL;
88 }
89 #endif
90 
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92 {
93 	struct dst_entry *dst = skb_dst(skb);
94 
95 	if (dst && dst_hold_safe(dst)) {
96 		const struct rt6_info *rt = (const struct rt6_info *)dst;
97 
98 		sk->sk_rx_dst = dst;
99 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101 	}
102 }
103 
104 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
105 {
106 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 					    ipv6_hdr(skb)->saddr.s6_addr32,
108 					    tcp_hdr(skb)->dest,
109 					    tcp_hdr(skb)->source);
110 }
111 
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
113 			  int addr_len)
114 {
115 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 	struct inet_sock *inet = inet_sk(sk);
117 	struct inet_connection_sock *icsk = inet_csk(sk);
118 	struct ipv6_pinfo *np = inet6_sk(sk);
119 	struct tcp_sock *tp = tcp_sk(sk);
120 	struct in6_addr *saddr = NULL, *final_p, final;
121 	struct ipv6_txoptions *opt;
122 	struct flowi6 fl6;
123 	struct dst_entry *dst;
124 	int addr_type;
125 	int err;
126 
127 	if (addr_len < SIN6_LEN_RFC2133)
128 		return -EINVAL;
129 
130 	if (usin->sin6_family != AF_INET6)
131 		return -EAFNOSUPPORT;
132 
133 	memset(&fl6, 0, sizeof(fl6));
134 
135 	if (np->sndflow) {
136 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 		IP6_ECN_flow_init(fl6.flowlabel);
138 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 			struct ip6_flowlabel *flowlabel;
140 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
141 			if (!flowlabel)
142 				return -EINVAL;
143 			fl6_sock_release(flowlabel);
144 		}
145 	}
146 
147 	/*
148 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
149 	 */
150 
151 	if (ipv6_addr_any(&usin->sin6_addr))
152 		usin->sin6_addr.s6_addr[15] = 0x1;
153 
154 	addr_type = ipv6_addr_type(&usin->sin6_addr);
155 
156 	if (addr_type & IPV6_ADDR_MULTICAST)
157 		return -ENETUNREACH;
158 
159 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 		if (addr_len >= sizeof(struct sockaddr_in6) &&
161 		    usin->sin6_scope_id) {
162 			/* If interface is set while binding, indices
163 			 * must coincide.
164 			 */
165 			if (sk->sk_bound_dev_if &&
166 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
167 				return -EINVAL;
168 
169 			sk->sk_bound_dev_if = usin->sin6_scope_id;
170 		}
171 
172 		/* Connect to link-local address requires an interface */
173 		if (!sk->sk_bound_dev_if)
174 			return -EINVAL;
175 	}
176 
177 	if (tp->rx_opt.ts_recent_stamp &&
178 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179 		tp->rx_opt.ts_recent = 0;
180 		tp->rx_opt.ts_recent_stamp = 0;
181 		tp->write_seq = 0;
182 	}
183 
184 	sk->sk_v6_daddr = usin->sin6_addr;
185 	np->flow_label = fl6.flowlabel;
186 
187 	/*
188 	 *	TCP over IPv4
189 	 */
190 
191 	if (addr_type == IPV6_ADDR_MAPPED) {
192 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 		struct sockaddr_in sin;
194 
195 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
196 
197 		if (__ipv6_only_sock(sk))
198 			return -ENETUNREACH;
199 
200 		sin.sin_family = AF_INET;
201 		sin.sin_port = usin->sin6_port;
202 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
203 
204 		icsk->icsk_af_ops = &ipv6_mapped;
205 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
206 #ifdef CONFIG_TCP_MD5SIG
207 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
208 #endif
209 
210 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
211 
212 		if (err) {
213 			icsk->icsk_ext_hdr_len = exthdrlen;
214 			icsk->icsk_af_ops = &ipv6_specific;
215 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217 			tp->af_specific = &tcp_sock_ipv6_specific;
218 #endif
219 			goto failure;
220 		}
221 		np->saddr = sk->sk_v6_rcv_saddr;
222 
223 		return err;
224 	}
225 
226 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 		saddr = &sk->sk_v6_rcv_saddr;
228 
229 	fl6.flowi6_proto = IPPROTO_TCP;
230 	fl6.daddr = sk->sk_v6_daddr;
231 	fl6.saddr = saddr ? *saddr : np->saddr;
232 	fl6.flowi6_oif = sk->sk_bound_dev_if;
233 	fl6.flowi6_mark = sk->sk_mark;
234 	fl6.fl6_dport = usin->sin6_port;
235 	fl6.fl6_sport = inet->inet_sport;
236 
237 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
238 	final_p = fl6_update_dst(&fl6, opt, &final);
239 
240 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241 
242 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 	if (IS_ERR(dst)) {
244 		err = PTR_ERR(dst);
245 		goto failure;
246 	}
247 
248 	if (!saddr) {
249 		saddr = &fl6.saddr;
250 		sk->sk_v6_rcv_saddr = *saddr;
251 	}
252 
253 	/* set the source address */
254 	np->saddr = *saddr;
255 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256 
257 	sk->sk_gso_type = SKB_GSO_TCPV6;
258 	ip6_dst_store(sk, dst, NULL, NULL);
259 
260 	if (tcp_death_row.sysctl_tw_recycle &&
261 	    !tp->rx_opt.ts_recent_stamp &&
262 	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 		tcp_fetch_timewait_stamp(sk, dst);
264 
265 	icsk->icsk_ext_hdr_len = 0;
266 	if (opt)
267 		icsk->icsk_ext_hdr_len = opt->opt_flen +
268 					 opt->opt_nflen;
269 
270 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271 
272 	inet->inet_dport = usin->sin6_port;
273 
274 	tcp_set_state(sk, TCP_SYN_SENT);
275 	err = inet6_hash_connect(&tcp_death_row, sk);
276 	if (err)
277 		goto late_failure;
278 
279 	sk_set_txhash(sk);
280 
281 	if (!tp->write_seq && likely(!tp->repair))
282 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 							     sk->sk_v6_daddr.s6_addr32,
284 							     inet->inet_sport,
285 							     inet->inet_dport);
286 
287 	err = tcp_connect(sk);
288 	if (err)
289 		goto late_failure;
290 
291 	return 0;
292 
293 late_failure:
294 	tcp_set_state(sk, TCP_CLOSE);
295 	__sk_dst_reset(sk);
296 failure:
297 	inet->inet_dport = 0;
298 	sk->sk_route_caps = 0;
299 	return err;
300 }
301 
302 static void tcp_v6_mtu_reduced(struct sock *sk)
303 {
304 	struct dst_entry *dst;
305 
306 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 		return;
308 
309 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 	if (!dst)
311 		return;
312 
313 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 		tcp_sync_mss(sk, dst_mtu(dst));
315 		tcp_simple_retransmit(sk);
316 	}
317 }
318 
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 		u8 type, u8 code, int offset, __be32 info)
321 {
322 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 	struct net *net = dev_net(skb->dev);
325 	struct request_sock *fastopen;
326 	struct ipv6_pinfo *np;
327 	struct tcp_sock *tp;
328 	__u32 seq, snd_una;
329 	struct sock *sk;
330 	bool fatal;
331 	int err;
332 
333 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 					&hdr->daddr, th->dest,
335 					&hdr->saddr, ntohs(th->source),
336 					skb->dev->ifindex);
337 
338 	if (!sk) {
339 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
340 				  ICMP6_MIB_INERRORS);
341 		return;
342 	}
343 
344 	if (sk->sk_state == TCP_TIME_WAIT) {
345 		inet_twsk_put(inet_twsk(sk));
346 		return;
347 	}
348 	seq = ntohl(th->seq);
349 	fatal = icmpv6_err_convert(type, code, &err);
350 	if (sk->sk_state == TCP_NEW_SYN_RECV)
351 		return tcp_req_err(sk, seq, fatal);
352 
353 	bh_lock_sock(sk);
354 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
356 
357 	if (sk->sk_state == TCP_CLOSE)
358 		goto out;
359 
360 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
362 		goto out;
363 	}
364 
365 	tp = tcp_sk(sk);
366 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 	fastopen = tp->fastopen_rsk;
368 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 	if (sk->sk_state != TCP_LISTEN &&
370 	    !between(seq, snd_una, tp->snd_nxt)) {
371 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
372 		goto out;
373 	}
374 
375 	np = inet6_sk(sk);
376 
377 	if (type == NDISC_REDIRECT) {
378 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379 
380 		if (dst)
381 			dst->ops->redirect(dst, sk, skb);
382 		goto out;
383 	}
384 
385 	if (type == ICMPV6_PKT_TOOBIG) {
386 		/* We are not interested in TCP_LISTEN and open_requests
387 		 * (SYN-ACKs send out by Linux are always <576bytes so
388 		 * they should go through unfragmented).
389 		 */
390 		if (sk->sk_state == TCP_LISTEN)
391 			goto out;
392 
393 		if (!ip6_sk_accept_pmtu(sk))
394 			goto out;
395 
396 		tp->mtu_info = ntohl(info);
397 		if (!sock_owned_by_user(sk))
398 			tcp_v6_mtu_reduced(sk);
399 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
400 					   &tp->tsq_flags))
401 			sock_hold(sk);
402 		goto out;
403 	}
404 
405 
406 	/* Might be for an request_sock */
407 	switch (sk->sk_state) {
408 	case TCP_SYN_SENT:
409 	case TCP_SYN_RECV:
410 		/* Only in fast or simultaneous open. If a fast open socket is
411 		 * is already accepted it is treated as a connected one below.
412 		 */
413 		if (fastopen && !fastopen->sk)
414 			break;
415 
416 		if (!sock_owned_by_user(sk)) {
417 			sk->sk_err = err;
418 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
419 
420 			tcp_done(sk);
421 		} else
422 			sk->sk_err_soft = err;
423 		goto out;
424 	}
425 
426 	if (!sock_owned_by_user(sk) && np->recverr) {
427 		sk->sk_err = err;
428 		sk->sk_error_report(sk);
429 	} else
430 		sk->sk_err_soft = err;
431 
432 out:
433 	bh_unlock_sock(sk);
434 	sock_put(sk);
435 }
436 
437 
438 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 			      struct flowi *fl,
440 			      struct request_sock *req,
441 			      struct tcp_fastopen_cookie *foc,
442 			      enum tcp_synack_type synack_type)
443 {
444 	struct inet_request_sock *ireq = inet_rsk(req);
445 	struct ipv6_pinfo *np = inet6_sk(sk);
446 	struct ipv6_txoptions *opt;
447 	struct flowi6 *fl6 = &fl->u.ip6;
448 	struct sk_buff *skb;
449 	int err = -ENOMEM;
450 
451 	/* First, grab a route. */
452 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
453 					       IPPROTO_TCP)) == NULL)
454 		goto done;
455 
456 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
457 
458 	if (skb) {
459 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 				    &ireq->ir_v6_rmt_addr);
461 
462 		fl6->daddr = ireq->ir_v6_rmt_addr;
463 		if (np->repflow && ireq->pktopts)
464 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
465 
466 		rcu_read_lock();
467 		opt = ireq->ipv6_opt;
468 		if (!opt)
469 			opt = rcu_dereference(np->opt);
470 		err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
471 		rcu_read_unlock();
472 		err = net_xmit_eval(err);
473 	}
474 
475 done:
476 	return err;
477 }
478 
479 
480 static void tcp_v6_reqsk_destructor(struct request_sock *req)
481 {
482 	kfree(inet_rsk(req)->ipv6_opt);
483 	kfree_skb(inet_rsk(req)->pktopts);
484 }
485 
486 #ifdef CONFIG_TCP_MD5SIG
487 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
488 						   const struct in6_addr *addr)
489 {
490 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
491 }
492 
493 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
494 						const struct sock *addr_sk)
495 {
496 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
497 }
498 
499 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
500 				 int optlen)
501 {
502 	struct tcp_md5sig cmd;
503 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
504 
505 	if (optlen < sizeof(cmd))
506 		return -EINVAL;
507 
508 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
509 		return -EFAULT;
510 
511 	if (sin6->sin6_family != AF_INET6)
512 		return -EINVAL;
513 
514 	if (!cmd.tcpm_keylen) {
515 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
516 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
517 					      AF_INET);
518 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
519 				      AF_INET6);
520 	}
521 
522 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
523 		return -EINVAL;
524 
525 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
526 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
527 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
528 
529 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
530 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
531 }
532 
533 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
534 				   const struct in6_addr *daddr,
535 				   const struct in6_addr *saddr,
536 				   const struct tcphdr *th, int nbytes)
537 {
538 	struct tcp6_pseudohdr *bp;
539 	struct scatterlist sg;
540 	struct tcphdr *_th;
541 
542 	bp = hp->scratch;
543 	/* 1. TCP pseudo-header (RFC2460) */
544 	bp->saddr = *saddr;
545 	bp->daddr = *daddr;
546 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
547 	bp->len = cpu_to_be32(nbytes);
548 
549 	_th = (struct tcphdr *)(bp + 1);
550 	memcpy(_th, th, sizeof(*th));
551 	_th->check = 0;
552 
553 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
554 	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
555 				sizeof(*bp) + sizeof(*th));
556 	return crypto_ahash_update(hp->md5_req);
557 }
558 
559 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
560 			       const struct in6_addr *daddr, struct in6_addr *saddr,
561 			       const struct tcphdr *th)
562 {
563 	struct tcp_md5sig_pool *hp;
564 	struct ahash_request *req;
565 
566 	hp = tcp_get_md5sig_pool();
567 	if (!hp)
568 		goto clear_hash_noput;
569 	req = hp->md5_req;
570 
571 	if (crypto_ahash_init(req))
572 		goto clear_hash;
573 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
574 		goto clear_hash;
575 	if (tcp_md5_hash_key(hp, key))
576 		goto clear_hash;
577 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
578 	if (crypto_ahash_final(req))
579 		goto clear_hash;
580 
581 	tcp_put_md5sig_pool();
582 	return 0;
583 
584 clear_hash:
585 	tcp_put_md5sig_pool();
586 clear_hash_noput:
587 	memset(md5_hash, 0, 16);
588 	return 1;
589 }
590 
591 static int tcp_v6_md5_hash_skb(char *md5_hash,
592 			       const struct tcp_md5sig_key *key,
593 			       const struct sock *sk,
594 			       const struct sk_buff *skb)
595 {
596 	const struct in6_addr *saddr, *daddr;
597 	struct tcp_md5sig_pool *hp;
598 	struct ahash_request *req;
599 	const struct tcphdr *th = tcp_hdr(skb);
600 
601 	if (sk) { /* valid for establish/request sockets */
602 		saddr = &sk->sk_v6_rcv_saddr;
603 		daddr = &sk->sk_v6_daddr;
604 	} else {
605 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
606 		saddr = &ip6h->saddr;
607 		daddr = &ip6h->daddr;
608 	}
609 
610 	hp = tcp_get_md5sig_pool();
611 	if (!hp)
612 		goto clear_hash_noput;
613 	req = hp->md5_req;
614 
615 	if (crypto_ahash_init(req))
616 		goto clear_hash;
617 
618 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
619 		goto clear_hash;
620 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
621 		goto clear_hash;
622 	if (tcp_md5_hash_key(hp, key))
623 		goto clear_hash;
624 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
625 	if (crypto_ahash_final(req))
626 		goto clear_hash;
627 
628 	tcp_put_md5sig_pool();
629 	return 0;
630 
631 clear_hash:
632 	tcp_put_md5sig_pool();
633 clear_hash_noput:
634 	memset(md5_hash, 0, 16);
635 	return 1;
636 }
637 
638 #endif
639 
640 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
641 				    const struct sk_buff *skb)
642 {
643 #ifdef CONFIG_TCP_MD5SIG
644 	const __u8 *hash_location = NULL;
645 	struct tcp_md5sig_key *hash_expected;
646 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
647 	const struct tcphdr *th = tcp_hdr(skb);
648 	int genhash;
649 	u8 newhash[16];
650 
651 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
652 	hash_location = tcp_parse_md5sig_option(th);
653 
654 	/* We've parsed the options - do we have a hash? */
655 	if (!hash_expected && !hash_location)
656 		return false;
657 
658 	if (hash_expected && !hash_location) {
659 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
660 		return true;
661 	}
662 
663 	if (!hash_expected && hash_location) {
664 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
665 		return true;
666 	}
667 
668 	/* check the signature */
669 	genhash = tcp_v6_md5_hash_skb(newhash,
670 				      hash_expected,
671 				      NULL, skb);
672 
673 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
674 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
675 				     genhash ? "failed" : "mismatch",
676 				     &ip6h->saddr, ntohs(th->source),
677 				     &ip6h->daddr, ntohs(th->dest));
678 		return true;
679 	}
680 #endif
681 	return false;
682 }
683 
684 static void tcp_v6_init_req(struct request_sock *req,
685 			    const struct sock *sk_listener,
686 			    struct sk_buff *skb)
687 {
688 	struct inet_request_sock *ireq = inet_rsk(req);
689 	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
690 
691 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
692 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
693 
694 	/* So that link locals have meaning */
695 	if (!sk_listener->sk_bound_dev_if &&
696 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
697 		ireq->ir_iif = tcp_v6_iif(skb);
698 
699 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
700 	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
701 	     np->rxopt.bits.rxinfo ||
702 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
703 	     np->rxopt.bits.rxohlim || np->repflow)) {
704 		atomic_inc(&skb->users);
705 		ireq->pktopts = skb;
706 	}
707 }
708 
709 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
710 					  struct flowi *fl,
711 					  const struct request_sock *req,
712 					  bool *strict)
713 {
714 	if (strict)
715 		*strict = true;
716 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
717 }
718 
719 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
720 	.family		=	AF_INET6,
721 	.obj_size	=	sizeof(struct tcp6_request_sock),
722 	.rtx_syn_ack	=	tcp_rtx_synack,
723 	.send_ack	=	tcp_v6_reqsk_send_ack,
724 	.destructor	=	tcp_v6_reqsk_destructor,
725 	.send_reset	=	tcp_v6_send_reset,
726 	.syn_ack_timeout =	tcp_syn_ack_timeout,
727 };
728 
729 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
730 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
731 				sizeof(struct ipv6hdr),
732 #ifdef CONFIG_TCP_MD5SIG
733 	.req_md5_lookup	=	tcp_v6_md5_lookup,
734 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
735 #endif
736 	.init_req	=	tcp_v6_init_req,
737 #ifdef CONFIG_SYN_COOKIES
738 	.cookie_init_seq =	cookie_v6_init_sequence,
739 #endif
740 	.route_req	=	tcp_v6_route_req,
741 	.init_seq	=	tcp_v6_init_sequence,
742 	.send_synack	=	tcp_v6_send_synack,
743 };
744 
745 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
746 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
747 				 int oif, struct tcp_md5sig_key *key, int rst,
748 				 u8 tclass, __be32 label)
749 {
750 	const struct tcphdr *th = tcp_hdr(skb);
751 	struct tcphdr *t1;
752 	struct sk_buff *buff;
753 	struct flowi6 fl6;
754 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
755 	struct sock *ctl_sk = net->ipv6.tcp_sk;
756 	unsigned int tot_len = sizeof(struct tcphdr);
757 	struct dst_entry *dst;
758 	__be32 *topt;
759 
760 	if (tsecr)
761 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
762 #ifdef CONFIG_TCP_MD5SIG
763 	if (key)
764 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
765 #endif
766 
767 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
768 			 GFP_ATOMIC);
769 	if (!buff)
770 		return;
771 
772 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
773 
774 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
775 	skb_reset_transport_header(buff);
776 
777 	/* Swap the send and the receive. */
778 	memset(t1, 0, sizeof(*t1));
779 	t1->dest = th->source;
780 	t1->source = th->dest;
781 	t1->doff = tot_len / 4;
782 	t1->seq = htonl(seq);
783 	t1->ack_seq = htonl(ack);
784 	t1->ack = !rst || !th->ack;
785 	t1->rst = rst;
786 	t1->window = htons(win);
787 
788 	topt = (__be32 *)(t1 + 1);
789 
790 	if (tsecr) {
791 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
792 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
793 		*topt++ = htonl(tsval);
794 		*topt++ = htonl(tsecr);
795 	}
796 
797 #ifdef CONFIG_TCP_MD5SIG
798 	if (key) {
799 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
800 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
801 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
802 				    &ipv6_hdr(skb)->saddr,
803 				    &ipv6_hdr(skb)->daddr, t1);
804 	}
805 #endif
806 
807 	memset(&fl6, 0, sizeof(fl6));
808 	fl6.daddr = ipv6_hdr(skb)->saddr;
809 	fl6.saddr = ipv6_hdr(skb)->daddr;
810 	fl6.flowlabel = label;
811 
812 	buff->ip_summed = CHECKSUM_PARTIAL;
813 	buff->csum = 0;
814 
815 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
816 
817 	fl6.flowi6_proto = IPPROTO_TCP;
818 	if (rt6_need_strict(&fl6.daddr) && !oif)
819 		fl6.flowi6_oif = tcp_v6_iif(skb);
820 	else {
821 		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
822 			oif = skb->skb_iif;
823 
824 		fl6.flowi6_oif = oif;
825 	}
826 
827 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
828 	fl6.fl6_dport = t1->dest;
829 	fl6.fl6_sport = t1->source;
830 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
831 
832 	/* Pass a socket to ip6_dst_lookup either it is for RST
833 	 * Underlying function will use this to retrieve the network
834 	 * namespace
835 	 */
836 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
837 	if (!IS_ERR(dst)) {
838 		skb_dst_set(buff, dst);
839 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
840 		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
841 		if (rst)
842 			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
843 		return;
844 	}
845 
846 	kfree_skb(buff);
847 }
848 
849 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
850 {
851 	const struct tcphdr *th = tcp_hdr(skb);
852 	u32 seq = 0, ack_seq = 0;
853 	struct tcp_md5sig_key *key = NULL;
854 #ifdef CONFIG_TCP_MD5SIG
855 	const __u8 *hash_location = NULL;
856 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
857 	unsigned char newhash[16];
858 	int genhash;
859 	struct sock *sk1 = NULL;
860 #endif
861 	int oif;
862 
863 	if (th->rst)
864 		return;
865 
866 	/* If sk not NULL, it means we did a successful lookup and incoming
867 	 * route had to be correct. prequeue might have dropped our dst.
868 	 */
869 	if (!sk && !ipv6_unicast_destination(skb))
870 		return;
871 
872 #ifdef CONFIG_TCP_MD5SIG
873 	rcu_read_lock();
874 	hash_location = tcp_parse_md5sig_option(th);
875 	if (sk && sk_fullsock(sk)) {
876 		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
877 	} else if (hash_location) {
878 		/*
879 		 * active side is lost. Try to find listening socket through
880 		 * source port, and then find md5 key through listening socket.
881 		 * we are not loose security here:
882 		 * Incoming packet is checked with md5 hash with finding key,
883 		 * no RST generated if md5 hash doesn't match.
884 		 */
885 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
886 					   &tcp_hashinfo, NULL, 0,
887 					   &ipv6h->saddr,
888 					   th->source, &ipv6h->daddr,
889 					   ntohs(th->source), tcp_v6_iif(skb));
890 		if (!sk1)
891 			goto out;
892 
893 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
894 		if (!key)
895 			goto out;
896 
897 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
898 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
899 			goto out;
900 	}
901 #endif
902 
903 	if (th->ack)
904 		seq = ntohl(th->ack_seq);
905 	else
906 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
907 			  (th->doff << 2);
908 
909 	oif = sk ? sk->sk_bound_dev_if : 0;
910 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
911 
912 #ifdef CONFIG_TCP_MD5SIG
913 out:
914 	rcu_read_unlock();
915 #endif
916 }
917 
918 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
919 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
920 			    struct tcp_md5sig_key *key, u8 tclass,
921 			    __be32 label)
922 {
923 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
924 			     tclass, label);
925 }
926 
927 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
928 {
929 	struct inet_timewait_sock *tw = inet_twsk(sk);
930 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
931 
932 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
933 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
934 			tcp_time_stamp + tcptw->tw_ts_offset,
935 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
936 			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
937 
938 	inet_twsk_put(tw);
939 }
940 
941 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
942 				  struct request_sock *req)
943 {
944 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
945 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
946 	 */
947 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
948 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
949 			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
950 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
951 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
952 			0, 0);
953 }
954 
955 
956 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
957 {
958 #ifdef CONFIG_SYN_COOKIES
959 	const struct tcphdr *th = tcp_hdr(skb);
960 
961 	if (!th->syn)
962 		sk = cookie_v6_check(sk, skb);
963 #endif
964 	return sk;
965 }
966 
967 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
968 {
969 	if (skb->protocol == htons(ETH_P_IP))
970 		return tcp_v4_conn_request(sk, skb);
971 
972 	if (!ipv6_unicast_destination(skb))
973 		goto drop;
974 
975 	return tcp_conn_request(&tcp6_request_sock_ops,
976 				&tcp_request_sock_ipv6_ops, sk, skb);
977 
978 drop:
979 	tcp_listendrop(sk);
980 	return 0; /* don't send reset */
981 }
982 
983 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
984 					 struct request_sock *req,
985 					 struct dst_entry *dst,
986 					 struct request_sock *req_unhash,
987 					 bool *own_req)
988 {
989 	struct inet_request_sock *ireq;
990 	struct ipv6_pinfo *newnp;
991 	const struct ipv6_pinfo *np = inet6_sk(sk);
992 	struct ipv6_txoptions *opt;
993 	struct tcp6_sock *newtcp6sk;
994 	struct inet_sock *newinet;
995 	struct tcp_sock *newtp;
996 	struct sock *newsk;
997 #ifdef CONFIG_TCP_MD5SIG
998 	struct tcp_md5sig_key *key;
999 #endif
1000 	struct flowi6 fl6;
1001 
1002 	if (skb->protocol == htons(ETH_P_IP)) {
1003 		/*
1004 		 *	v6 mapped
1005 		 */
1006 
1007 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1008 					     req_unhash, own_req);
1009 
1010 		if (!newsk)
1011 			return NULL;
1012 
1013 		newtcp6sk = (struct tcp6_sock *)newsk;
1014 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1015 
1016 		newinet = inet_sk(newsk);
1017 		newnp = inet6_sk(newsk);
1018 		newtp = tcp_sk(newsk);
1019 
1020 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1021 
1022 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1023 
1024 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1025 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1026 #ifdef CONFIG_TCP_MD5SIG
1027 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1028 #endif
1029 
1030 		newnp->ipv6_ac_list = NULL;
1031 		newnp->ipv6_fl_list = NULL;
1032 		newnp->pktoptions  = NULL;
1033 		newnp->opt	   = NULL;
1034 		newnp->mcast_oif   = tcp_v6_iif(skb);
1035 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1036 		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1037 		if (np->repflow)
1038 			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1039 
1040 		/*
1041 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1042 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1043 		 * that function for the gory details. -acme
1044 		 */
1045 
1046 		/* It is tricky place. Until this moment IPv4 tcp
1047 		   worked with IPv6 icsk.icsk_af_ops.
1048 		   Sync it now.
1049 		 */
1050 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1051 
1052 		return newsk;
1053 	}
1054 
1055 	ireq = inet_rsk(req);
1056 
1057 	if (sk_acceptq_is_full(sk))
1058 		goto out_overflow;
1059 
1060 	if (!dst) {
1061 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1062 		if (!dst)
1063 			goto out;
1064 	}
1065 
1066 	newsk = tcp_create_openreq_child(sk, req, skb);
1067 	if (!newsk)
1068 		goto out_nonewsk;
1069 
1070 	/*
1071 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1072 	 * count here, tcp_create_openreq_child now does this for us, see the
1073 	 * comment in that function for the gory details. -acme
1074 	 */
1075 
1076 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1077 	ip6_dst_store(newsk, dst, NULL, NULL);
1078 	inet6_sk_rx_dst_set(newsk, skb);
1079 
1080 	newtcp6sk = (struct tcp6_sock *)newsk;
1081 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1082 
1083 	newtp = tcp_sk(newsk);
1084 	newinet = inet_sk(newsk);
1085 	newnp = inet6_sk(newsk);
1086 
1087 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1088 
1089 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1090 	newnp->saddr = ireq->ir_v6_loc_addr;
1091 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1092 	newsk->sk_bound_dev_if = ireq->ir_iif;
1093 
1094 	/* Now IPv6 options...
1095 
1096 	   First: no IPv4 options.
1097 	 */
1098 	newinet->inet_opt = NULL;
1099 	newnp->ipv6_ac_list = NULL;
1100 	newnp->ipv6_fl_list = NULL;
1101 
1102 	/* Clone RX bits */
1103 	newnp->rxopt.all = np->rxopt.all;
1104 
1105 	newnp->pktoptions = NULL;
1106 	newnp->opt	  = NULL;
1107 	newnp->mcast_oif  = tcp_v6_iif(skb);
1108 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1109 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1110 	if (np->repflow)
1111 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1112 
1113 	/* Clone native IPv6 options from listening socket (if any)
1114 
1115 	   Yes, keeping reference count would be much more clever,
1116 	   but we make one more one thing there: reattach optmem
1117 	   to newsk.
1118 	 */
1119 	opt = ireq->ipv6_opt;
1120 	if (!opt)
1121 		opt = rcu_dereference(np->opt);
1122 	if (opt) {
1123 		opt = ipv6_dup_options(newsk, opt);
1124 		RCU_INIT_POINTER(newnp->opt, opt);
1125 	}
1126 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1127 	if (opt)
1128 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1129 						    opt->opt_flen;
1130 
1131 	tcp_ca_openreq_child(newsk, dst);
1132 
1133 	tcp_sync_mss(newsk, dst_mtu(dst));
1134 	newtp->advmss = dst_metric_advmss(dst);
1135 	if (tcp_sk(sk)->rx_opt.user_mss &&
1136 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1137 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1138 
1139 	tcp_initialize_rcv_mss(newsk);
1140 
1141 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1142 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1143 
1144 #ifdef CONFIG_TCP_MD5SIG
1145 	/* Copy over the MD5 key from the original socket */
1146 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1147 	if (key) {
1148 		/* We're using one, so create a matching key
1149 		 * on the newsk structure. If we fail to get
1150 		 * memory, then we end up not copying the key
1151 		 * across. Shucks.
1152 		 */
1153 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1154 			       AF_INET6, key->key, key->keylen,
1155 			       sk_gfp_mask(sk, GFP_ATOMIC));
1156 	}
1157 #endif
1158 
1159 	if (__inet_inherit_port(sk, newsk) < 0) {
1160 		inet_csk_prepare_forced_close(newsk);
1161 		tcp_done(newsk);
1162 		goto out;
1163 	}
1164 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1165 	if (*own_req) {
1166 		tcp_move_syn(newtp, req);
1167 
1168 		/* Clone pktoptions received with SYN, if we own the req */
1169 		if (ireq->pktopts) {
1170 			newnp->pktoptions = skb_clone(ireq->pktopts,
1171 						      sk_gfp_mask(sk, GFP_ATOMIC));
1172 			consume_skb(ireq->pktopts);
1173 			ireq->pktopts = NULL;
1174 			if (newnp->pktoptions)
1175 				skb_set_owner_r(newnp->pktoptions, newsk);
1176 		}
1177 	}
1178 
1179 	return newsk;
1180 
1181 out_overflow:
1182 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1183 out_nonewsk:
1184 	dst_release(dst);
1185 out:
1186 	tcp_listendrop(sk);
1187 	return NULL;
1188 }
1189 
1190 /* The socket must have it's spinlock held when we get
1191  * here, unless it is a TCP_LISTEN socket.
1192  *
1193  * We have a potential double-lock case here, so even when
1194  * doing backlog processing we use the BH locking scheme.
1195  * This is because we cannot sleep with the original spinlock
1196  * held.
1197  */
1198 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1199 {
1200 	struct ipv6_pinfo *np = inet6_sk(sk);
1201 	struct tcp_sock *tp;
1202 	struct sk_buff *opt_skb = NULL;
1203 
1204 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1205 	   goes to IPv4 receive handler and backlogged.
1206 	   From backlog it always goes here. Kerboom...
1207 	   Fortunately, tcp_rcv_established and rcv_established
1208 	   handle them correctly, but it is not case with
1209 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1210 	 */
1211 
1212 	if (skb->protocol == htons(ETH_P_IP))
1213 		return tcp_v4_do_rcv(sk, skb);
1214 
1215 	if (sk_filter(sk, skb))
1216 		goto discard;
1217 
1218 	/*
1219 	 *	socket locking is here for SMP purposes as backlog rcv
1220 	 *	is currently called with bh processing disabled.
1221 	 */
1222 
1223 	/* Do Stevens' IPV6_PKTOPTIONS.
1224 
1225 	   Yes, guys, it is the only place in our code, where we
1226 	   may make it not affecting IPv4.
1227 	   The rest of code is protocol independent,
1228 	   and I do not like idea to uglify IPv4.
1229 
1230 	   Actually, all the idea behind IPV6_PKTOPTIONS
1231 	   looks not very well thought. For now we latch
1232 	   options, received in the last packet, enqueued
1233 	   by tcp. Feel free to propose better solution.
1234 					       --ANK (980728)
1235 	 */
1236 	if (np->rxopt.all)
1237 		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1238 
1239 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1240 		struct dst_entry *dst = sk->sk_rx_dst;
1241 
1242 		sock_rps_save_rxhash(sk, skb);
1243 		sk_mark_napi_id(sk, skb);
1244 		if (dst) {
1245 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1246 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1247 				dst_release(dst);
1248 				sk->sk_rx_dst = NULL;
1249 			}
1250 		}
1251 
1252 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1253 		if (opt_skb)
1254 			goto ipv6_pktoptions;
1255 		return 0;
1256 	}
1257 
1258 	if (tcp_checksum_complete(skb))
1259 		goto csum_err;
1260 
1261 	if (sk->sk_state == TCP_LISTEN) {
1262 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1263 
1264 		if (!nsk)
1265 			goto discard;
1266 
1267 		if (nsk != sk) {
1268 			sock_rps_save_rxhash(nsk, skb);
1269 			sk_mark_napi_id(nsk, skb);
1270 			if (tcp_child_process(sk, nsk, skb))
1271 				goto reset;
1272 			if (opt_skb)
1273 				__kfree_skb(opt_skb);
1274 			return 0;
1275 		}
1276 	} else
1277 		sock_rps_save_rxhash(sk, skb);
1278 
1279 	if (tcp_rcv_state_process(sk, skb))
1280 		goto reset;
1281 	if (opt_skb)
1282 		goto ipv6_pktoptions;
1283 	return 0;
1284 
1285 reset:
1286 	tcp_v6_send_reset(sk, skb);
1287 discard:
1288 	if (opt_skb)
1289 		__kfree_skb(opt_skb);
1290 	kfree_skb(skb);
1291 	return 0;
1292 csum_err:
1293 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1294 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1295 	goto discard;
1296 
1297 
1298 ipv6_pktoptions:
1299 	/* Do you ask, what is it?
1300 
1301 	   1. skb was enqueued by tcp.
1302 	   2. skb is added to tail of read queue, rather than out of order.
1303 	   3. socket is not in passive state.
1304 	   4. Finally, it really contains options, which user wants to receive.
1305 	 */
1306 	tp = tcp_sk(sk);
1307 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1308 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1309 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1310 			np->mcast_oif = tcp_v6_iif(opt_skb);
1311 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1312 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1313 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1314 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1315 		if (np->repflow)
1316 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1317 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1318 			skb_set_owner_r(opt_skb, sk);
1319 			opt_skb = xchg(&np->pktoptions, opt_skb);
1320 		} else {
1321 			__kfree_skb(opt_skb);
1322 			opt_skb = xchg(&np->pktoptions, NULL);
1323 		}
1324 	}
1325 
1326 	kfree_skb(opt_skb);
1327 	return 0;
1328 }
1329 
1330 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1331 			   const struct tcphdr *th)
1332 {
1333 	/* This is tricky: we move IP6CB at its correct location into
1334 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1335 	 * _decode_session6() uses IP6CB().
1336 	 * barrier() makes sure compiler won't play aliasing games.
1337 	 */
1338 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1339 		sizeof(struct inet6_skb_parm));
1340 	barrier();
1341 
1342 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1343 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1344 				    skb->len - th->doff*4);
1345 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1346 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1347 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1348 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1349 	TCP_SKB_CB(skb)->sacked = 0;
1350 }
1351 
1352 static void tcp_v6_restore_cb(struct sk_buff *skb)
1353 {
1354 	/* We need to move header back to the beginning if xfrm6_policy_check()
1355 	 * and tcp_v6_fill_cb() are going to be called again.
1356 	 */
1357 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1358 		sizeof(struct inet6_skb_parm));
1359 }
1360 
1361 static int tcp_v6_rcv(struct sk_buff *skb)
1362 {
1363 	const struct tcphdr *th;
1364 	const struct ipv6hdr *hdr;
1365 	bool refcounted;
1366 	struct sock *sk;
1367 	int ret;
1368 	struct net *net = dev_net(skb->dev);
1369 
1370 	if (skb->pkt_type != PACKET_HOST)
1371 		goto discard_it;
1372 
1373 	/*
1374 	 *	Count it even if it's bad.
1375 	 */
1376 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1377 
1378 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1379 		goto discard_it;
1380 
1381 	th = (const struct tcphdr *)skb->data;
1382 
1383 	if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1384 		goto bad_packet;
1385 	if (!pskb_may_pull(skb, th->doff*4))
1386 		goto discard_it;
1387 
1388 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1389 		goto csum_error;
1390 
1391 	th = (const struct tcphdr *)skb->data;
1392 	hdr = ipv6_hdr(skb);
1393 
1394 lookup:
1395 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1396 				th->source, th->dest, inet6_iif(skb),
1397 				&refcounted);
1398 	if (!sk)
1399 		goto no_tcp_socket;
1400 
1401 process:
1402 	if (sk->sk_state == TCP_TIME_WAIT)
1403 		goto do_time_wait;
1404 
1405 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1406 		struct request_sock *req = inet_reqsk(sk);
1407 		struct sock *nsk;
1408 
1409 		sk = req->rsk_listener;
1410 		tcp_v6_fill_cb(skb, hdr, th);
1411 		if (tcp_v6_inbound_md5_hash(sk, skb)) {
1412 			reqsk_put(req);
1413 			goto discard_it;
1414 		}
1415 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1416 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1417 			goto lookup;
1418 		}
1419 		sock_hold(sk);
1420 		refcounted = true;
1421 		nsk = tcp_check_req(sk, skb, req, false);
1422 		if (!nsk) {
1423 			reqsk_put(req);
1424 			goto discard_and_relse;
1425 		}
1426 		if (nsk == sk) {
1427 			reqsk_put(req);
1428 			tcp_v6_restore_cb(skb);
1429 		} else if (tcp_child_process(sk, nsk, skb)) {
1430 			tcp_v6_send_reset(nsk, skb);
1431 			goto discard_and_relse;
1432 		} else {
1433 			sock_put(sk);
1434 			return 0;
1435 		}
1436 	}
1437 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1438 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1439 		goto discard_and_relse;
1440 	}
1441 
1442 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1443 		goto discard_and_relse;
1444 
1445 	tcp_v6_fill_cb(skb, hdr, th);
1446 
1447 	if (tcp_v6_inbound_md5_hash(sk, skb))
1448 		goto discard_and_relse;
1449 
1450 	if (sk_filter(sk, skb))
1451 		goto discard_and_relse;
1452 
1453 	skb->dev = NULL;
1454 
1455 	if (sk->sk_state == TCP_LISTEN) {
1456 		ret = tcp_v6_do_rcv(sk, skb);
1457 		goto put_and_return;
1458 	}
1459 
1460 	sk_incoming_cpu_update(sk);
1461 
1462 	bh_lock_sock_nested(sk);
1463 	tcp_segs_in(tcp_sk(sk), skb);
1464 	ret = 0;
1465 	if (!sock_owned_by_user(sk)) {
1466 		if (!tcp_prequeue(sk, skb))
1467 			ret = tcp_v6_do_rcv(sk, skb);
1468 	} else if (unlikely(sk_add_backlog(sk, skb,
1469 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1470 		bh_unlock_sock(sk);
1471 		__NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
1472 		goto discard_and_relse;
1473 	}
1474 	bh_unlock_sock(sk);
1475 
1476 put_and_return:
1477 	if (refcounted)
1478 		sock_put(sk);
1479 	return ret ? -1 : 0;
1480 
1481 no_tcp_socket:
1482 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1483 		goto discard_it;
1484 
1485 	tcp_v6_fill_cb(skb, hdr, th);
1486 
1487 	if (tcp_checksum_complete(skb)) {
1488 csum_error:
1489 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1490 bad_packet:
1491 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1492 	} else {
1493 		tcp_v6_send_reset(NULL, skb);
1494 	}
1495 
1496 discard_it:
1497 	kfree_skb(skb);
1498 	return 0;
1499 
1500 discard_and_relse:
1501 	sk_drops_add(sk, skb);
1502 	if (refcounted)
1503 		sock_put(sk);
1504 	goto discard_it;
1505 
1506 do_time_wait:
1507 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1508 		inet_twsk_put(inet_twsk(sk));
1509 		goto discard_it;
1510 	}
1511 
1512 	tcp_v6_fill_cb(skb, hdr, th);
1513 
1514 	if (tcp_checksum_complete(skb)) {
1515 		inet_twsk_put(inet_twsk(sk));
1516 		goto csum_error;
1517 	}
1518 
1519 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1520 	case TCP_TW_SYN:
1521 	{
1522 		struct sock *sk2;
1523 
1524 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1525 					    skb, __tcp_hdrlen(th),
1526 					    &ipv6_hdr(skb)->saddr, th->source,
1527 					    &ipv6_hdr(skb)->daddr,
1528 					    ntohs(th->dest), tcp_v6_iif(skb));
1529 		if (sk2) {
1530 			struct inet_timewait_sock *tw = inet_twsk(sk);
1531 			inet_twsk_deschedule_put(tw);
1532 			sk = sk2;
1533 			tcp_v6_restore_cb(skb);
1534 			refcounted = false;
1535 			goto process;
1536 		}
1537 		/* Fall through to ACK */
1538 	}
1539 	case TCP_TW_ACK:
1540 		tcp_v6_timewait_ack(sk, skb);
1541 		break;
1542 	case TCP_TW_RST:
1543 		tcp_v6_restore_cb(skb);
1544 		tcp_v6_send_reset(sk, skb);
1545 		inet_twsk_deschedule_put(inet_twsk(sk));
1546 		goto discard_it;
1547 	case TCP_TW_SUCCESS:
1548 		;
1549 	}
1550 	goto discard_it;
1551 }
1552 
1553 static void tcp_v6_early_demux(struct sk_buff *skb)
1554 {
1555 	const struct ipv6hdr *hdr;
1556 	const struct tcphdr *th;
1557 	struct sock *sk;
1558 
1559 	if (skb->pkt_type != PACKET_HOST)
1560 		return;
1561 
1562 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1563 		return;
1564 
1565 	hdr = ipv6_hdr(skb);
1566 	th = tcp_hdr(skb);
1567 
1568 	if (th->doff < sizeof(struct tcphdr) / 4)
1569 		return;
1570 
1571 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1572 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1573 					&hdr->saddr, th->source,
1574 					&hdr->daddr, ntohs(th->dest),
1575 					inet6_iif(skb));
1576 	if (sk) {
1577 		skb->sk = sk;
1578 		skb->destructor = sock_edemux;
1579 		if (sk_fullsock(sk)) {
1580 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1581 
1582 			if (dst)
1583 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1584 			if (dst &&
1585 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1586 				skb_dst_set_noref(skb, dst);
1587 		}
1588 	}
1589 }
1590 
1591 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1592 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1593 	.twsk_unique	= tcp_twsk_unique,
1594 	.twsk_destructor = tcp_twsk_destructor,
1595 };
1596 
1597 static const struct inet_connection_sock_af_ops ipv6_specific = {
1598 	.queue_xmit	   = inet6_csk_xmit,
1599 	.send_check	   = tcp_v6_send_check,
1600 	.rebuild_header	   = inet6_sk_rebuild_header,
1601 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1602 	.conn_request	   = tcp_v6_conn_request,
1603 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1604 	.net_header_len	   = sizeof(struct ipv6hdr),
1605 	.net_frag_header_len = sizeof(struct frag_hdr),
1606 	.setsockopt	   = ipv6_setsockopt,
1607 	.getsockopt	   = ipv6_getsockopt,
1608 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1609 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1610 	.bind_conflict	   = inet6_csk_bind_conflict,
1611 #ifdef CONFIG_COMPAT
1612 	.compat_setsockopt = compat_ipv6_setsockopt,
1613 	.compat_getsockopt = compat_ipv6_getsockopt,
1614 #endif
1615 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1616 };
1617 
1618 #ifdef CONFIG_TCP_MD5SIG
1619 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1620 	.md5_lookup	=	tcp_v6_md5_lookup,
1621 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1622 	.md5_parse	=	tcp_v6_parse_md5_keys,
1623 };
1624 #endif
1625 
1626 /*
1627  *	TCP over IPv4 via INET6 API
1628  */
1629 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1630 	.queue_xmit	   = ip_queue_xmit,
1631 	.send_check	   = tcp_v4_send_check,
1632 	.rebuild_header	   = inet_sk_rebuild_header,
1633 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1634 	.conn_request	   = tcp_v6_conn_request,
1635 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1636 	.net_header_len	   = sizeof(struct iphdr),
1637 	.setsockopt	   = ipv6_setsockopt,
1638 	.getsockopt	   = ipv6_getsockopt,
1639 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1640 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1641 	.bind_conflict	   = inet6_csk_bind_conflict,
1642 #ifdef CONFIG_COMPAT
1643 	.compat_setsockopt = compat_ipv6_setsockopt,
1644 	.compat_getsockopt = compat_ipv6_getsockopt,
1645 #endif
1646 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1647 };
1648 
1649 #ifdef CONFIG_TCP_MD5SIG
1650 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1651 	.md5_lookup	=	tcp_v4_md5_lookup,
1652 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1653 	.md5_parse	=	tcp_v6_parse_md5_keys,
1654 };
1655 #endif
1656 
1657 /* NOTE: A lot of things set to zero explicitly by call to
1658  *       sk_alloc() so need not be done here.
1659  */
1660 static int tcp_v6_init_sock(struct sock *sk)
1661 {
1662 	struct inet_connection_sock *icsk = inet_csk(sk);
1663 
1664 	tcp_init_sock(sk);
1665 
1666 	icsk->icsk_af_ops = &ipv6_specific;
1667 
1668 #ifdef CONFIG_TCP_MD5SIG
1669 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1670 #endif
1671 
1672 	return 0;
1673 }
1674 
1675 static void tcp_v6_destroy_sock(struct sock *sk)
1676 {
1677 	tcp_v4_destroy_sock(sk);
1678 	inet6_destroy_sock(sk);
1679 }
1680 
1681 #ifdef CONFIG_PROC_FS
1682 /* Proc filesystem TCPv6 sock list dumping. */
1683 static void get_openreq6(struct seq_file *seq,
1684 			 const struct request_sock *req, int i)
1685 {
1686 	long ttd = req->rsk_timer.expires - jiffies;
1687 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1688 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1689 
1690 	if (ttd < 0)
1691 		ttd = 0;
1692 
1693 	seq_printf(seq,
1694 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1695 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1696 		   i,
1697 		   src->s6_addr32[0], src->s6_addr32[1],
1698 		   src->s6_addr32[2], src->s6_addr32[3],
1699 		   inet_rsk(req)->ir_num,
1700 		   dest->s6_addr32[0], dest->s6_addr32[1],
1701 		   dest->s6_addr32[2], dest->s6_addr32[3],
1702 		   ntohs(inet_rsk(req)->ir_rmt_port),
1703 		   TCP_SYN_RECV,
1704 		   0, 0, /* could print option size, but that is af dependent. */
1705 		   1,   /* timers active (only the expire timer) */
1706 		   jiffies_to_clock_t(ttd),
1707 		   req->num_timeout,
1708 		   from_kuid_munged(seq_user_ns(seq),
1709 				    sock_i_uid(req->rsk_listener)),
1710 		   0,  /* non standard timer */
1711 		   0, /* open_requests have no inode */
1712 		   0, req);
1713 }
1714 
1715 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1716 {
1717 	const struct in6_addr *dest, *src;
1718 	__u16 destp, srcp;
1719 	int timer_active;
1720 	unsigned long timer_expires;
1721 	const struct inet_sock *inet = inet_sk(sp);
1722 	const struct tcp_sock *tp = tcp_sk(sp);
1723 	const struct inet_connection_sock *icsk = inet_csk(sp);
1724 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1725 	int rx_queue;
1726 	int state;
1727 
1728 	dest  = &sp->sk_v6_daddr;
1729 	src   = &sp->sk_v6_rcv_saddr;
1730 	destp = ntohs(inet->inet_dport);
1731 	srcp  = ntohs(inet->inet_sport);
1732 
1733 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1734 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1735 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1736 		timer_active	= 1;
1737 		timer_expires	= icsk->icsk_timeout;
1738 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1739 		timer_active	= 4;
1740 		timer_expires	= icsk->icsk_timeout;
1741 	} else if (timer_pending(&sp->sk_timer)) {
1742 		timer_active	= 2;
1743 		timer_expires	= sp->sk_timer.expires;
1744 	} else {
1745 		timer_active	= 0;
1746 		timer_expires = jiffies;
1747 	}
1748 
1749 	state = sk_state_load(sp);
1750 	if (state == TCP_LISTEN)
1751 		rx_queue = sp->sk_ack_backlog;
1752 	else
1753 		/* Because we don't lock the socket,
1754 		 * we might find a transient negative value.
1755 		 */
1756 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1757 
1758 	seq_printf(seq,
1759 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1760 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1761 		   i,
1762 		   src->s6_addr32[0], src->s6_addr32[1],
1763 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1764 		   dest->s6_addr32[0], dest->s6_addr32[1],
1765 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1766 		   state,
1767 		   tp->write_seq - tp->snd_una,
1768 		   rx_queue,
1769 		   timer_active,
1770 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1771 		   icsk->icsk_retransmits,
1772 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1773 		   icsk->icsk_probes_out,
1774 		   sock_i_ino(sp),
1775 		   atomic_read(&sp->sk_refcnt), sp,
1776 		   jiffies_to_clock_t(icsk->icsk_rto),
1777 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1778 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1779 		   tp->snd_cwnd,
1780 		   state == TCP_LISTEN ?
1781 			fastopenq->max_qlen :
1782 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1783 		   );
1784 }
1785 
1786 static void get_timewait6_sock(struct seq_file *seq,
1787 			       struct inet_timewait_sock *tw, int i)
1788 {
1789 	long delta = tw->tw_timer.expires - jiffies;
1790 	const struct in6_addr *dest, *src;
1791 	__u16 destp, srcp;
1792 
1793 	dest = &tw->tw_v6_daddr;
1794 	src  = &tw->tw_v6_rcv_saddr;
1795 	destp = ntohs(tw->tw_dport);
1796 	srcp  = ntohs(tw->tw_sport);
1797 
1798 	seq_printf(seq,
1799 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1800 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1801 		   i,
1802 		   src->s6_addr32[0], src->s6_addr32[1],
1803 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1804 		   dest->s6_addr32[0], dest->s6_addr32[1],
1805 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1806 		   tw->tw_substate, 0, 0,
1807 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1808 		   atomic_read(&tw->tw_refcnt), tw);
1809 }
1810 
1811 static int tcp6_seq_show(struct seq_file *seq, void *v)
1812 {
1813 	struct tcp_iter_state *st;
1814 	struct sock *sk = v;
1815 
1816 	if (v == SEQ_START_TOKEN) {
1817 		seq_puts(seq,
1818 			 "  sl  "
1819 			 "local_address                         "
1820 			 "remote_address                        "
1821 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1822 			 "   uid  timeout inode\n");
1823 		goto out;
1824 	}
1825 	st = seq->private;
1826 
1827 	if (sk->sk_state == TCP_TIME_WAIT)
1828 		get_timewait6_sock(seq, v, st->num);
1829 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1830 		get_openreq6(seq, v, st->num);
1831 	else
1832 		get_tcp6_sock(seq, v, st->num);
1833 out:
1834 	return 0;
1835 }
1836 
1837 static const struct file_operations tcp6_afinfo_seq_fops = {
1838 	.owner   = THIS_MODULE,
1839 	.open    = tcp_seq_open,
1840 	.read    = seq_read,
1841 	.llseek  = seq_lseek,
1842 	.release = seq_release_net
1843 };
1844 
1845 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1846 	.name		= "tcp6",
1847 	.family		= AF_INET6,
1848 	.seq_fops	= &tcp6_afinfo_seq_fops,
1849 	.seq_ops	= {
1850 		.show		= tcp6_seq_show,
1851 	},
1852 };
1853 
1854 int __net_init tcp6_proc_init(struct net *net)
1855 {
1856 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1857 }
1858 
1859 void tcp6_proc_exit(struct net *net)
1860 {
1861 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1862 }
1863 #endif
1864 
1865 static void tcp_v6_clear_sk(struct sock *sk, int size)
1866 {
1867 	struct inet_sock *inet = inet_sk(sk);
1868 
1869 	/* we do not want to clear pinet6 field, because of RCU lookups */
1870 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1871 
1872 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1873 	memset(&inet->pinet6 + 1, 0, size);
1874 }
1875 
1876 struct proto tcpv6_prot = {
1877 	.name			= "TCPv6",
1878 	.owner			= THIS_MODULE,
1879 	.close			= tcp_close,
1880 	.connect		= tcp_v6_connect,
1881 	.disconnect		= tcp_disconnect,
1882 	.accept			= inet_csk_accept,
1883 	.ioctl			= tcp_ioctl,
1884 	.init			= tcp_v6_init_sock,
1885 	.destroy		= tcp_v6_destroy_sock,
1886 	.shutdown		= tcp_shutdown,
1887 	.setsockopt		= tcp_setsockopt,
1888 	.getsockopt		= tcp_getsockopt,
1889 	.recvmsg		= tcp_recvmsg,
1890 	.sendmsg		= tcp_sendmsg,
1891 	.sendpage		= tcp_sendpage,
1892 	.backlog_rcv		= tcp_v6_do_rcv,
1893 	.release_cb		= tcp_release_cb,
1894 	.hash			= inet6_hash,
1895 	.unhash			= inet_unhash,
1896 	.get_port		= inet_csk_get_port,
1897 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1898 	.stream_memory_free	= tcp_stream_memory_free,
1899 	.sockets_allocated	= &tcp_sockets_allocated,
1900 	.memory_allocated	= &tcp_memory_allocated,
1901 	.memory_pressure	= &tcp_memory_pressure,
1902 	.orphan_count		= &tcp_orphan_count,
1903 	.sysctl_mem		= sysctl_tcp_mem,
1904 	.sysctl_wmem		= sysctl_tcp_wmem,
1905 	.sysctl_rmem		= sysctl_tcp_rmem,
1906 	.max_header		= MAX_TCP_HEADER,
1907 	.obj_size		= sizeof(struct tcp6_sock),
1908 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1909 	.twsk_prot		= &tcp6_timewait_sock_ops,
1910 	.rsk_prot		= &tcp6_request_sock_ops,
1911 	.h.hashinfo		= &tcp_hashinfo,
1912 	.no_autobind		= true,
1913 #ifdef CONFIG_COMPAT
1914 	.compat_setsockopt	= compat_tcp_setsockopt,
1915 	.compat_getsockopt	= compat_tcp_getsockopt,
1916 #endif
1917 	.clear_sk		= tcp_v6_clear_sk,
1918 	.diag_destroy		= tcp_abort,
1919 };
1920 
1921 static const struct inet6_protocol tcpv6_protocol = {
1922 	.early_demux	=	tcp_v6_early_demux,
1923 	.handler	=	tcp_v6_rcv,
1924 	.err_handler	=	tcp_v6_err,
1925 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1926 };
1927 
1928 static struct inet_protosw tcpv6_protosw = {
1929 	.type		=	SOCK_STREAM,
1930 	.protocol	=	IPPROTO_TCP,
1931 	.prot		=	&tcpv6_prot,
1932 	.ops		=	&inet6_stream_ops,
1933 	.flags		=	INET_PROTOSW_PERMANENT |
1934 				INET_PROTOSW_ICSK,
1935 };
1936 
1937 static int __net_init tcpv6_net_init(struct net *net)
1938 {
1939 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1940 				    SOCK_RAW, IPPROTO_TCP, net);
1941 }
1942 
1943 static void __net_exit tcpv6_net_exit(struct net *net)
1944 {
1945 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1946 }
1947 
1948 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1949 {
1950 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1951 }
1952 
1953 static struct pernet_operations tcpv6_net_ops = {
1954 	.init	    = tcpv6_net_init,
1955 	.exit	    = tcpv6_net_exit,
1956 	.exit_batch = tcpv6_net_exit_batch,
1957 };
1958 
1959 int __init tcpv6_init(void)
1960 {
1961 	int ret;
1962 
1963 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1964 	if (ret)
1965 		goto out;
1966 
1967 	/* register inet6 protocol */
1968 	ret = inet6_register_protosw(&tcpv6_protosw);
1969 	if (ret)
1970 		goto out_tcpv6_protocol;
1971 
1972 	ret = register_pernet_subsys(&tcpv6_net_ops);
1973 	if (ret)
1974 		goto out_tcpv6_protosw;
1975 out:
1976 	return ret;
1977 
1978 out_tcpv6_protosw:
1979 	inet6_unregister_protosw(&tcpv6_protosw);
1980 out_tcpv6_protocol:
1981 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1982 	goto out;
1983 }
1984 
1985 void tcpv6_exit(void)
1986 {
1987 	unregister_pernet_subsys(&tcpv6_net_ops);
1988 	inet6_unregister_protosw(&tcpv6_protosw);
1989 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1990 }
1991