xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision 80c1834fc86c2bbacb54a8fc3c04a8b0066b0996)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 
67 #include <asm/uaccess.h>
68 
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
71 
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
74 
75 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 				      struct request_sock *req);
78 
79 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static void	__tcp_v6_send_check(struct sk_buff *skb,
81 				    const struct in6_addr *saddr,
82 				    const struct in6_addr *daddr);
83 
84 static const struct inet_connection_sock_af_ops ipv6_mapped;
85 static const struct inet_connection_sock_af_ops ipv6_specific;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
89 #else
90 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91 						   const struct in6_addr *addr)
92 {
93 	return NULL;
94 }
95 #endif
96 
97 static void tcp_v6_hash(struct sock *sk)
98 {
99 	if (sk->sk_state != TCP_CLOSE) {
100 		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
101 			tcp_prot.hash(sk);
102 			return;
103 		}
104 		local_bh_disable();
105 		__inet6_hash(sk, NULL);
106 		local_bh_enable();
107 	}
108 }
109 
110 static __inline__ __sum16 tcp_v6_check(int len,
111 				   const struct in6_addr *saddr,
112 				   const struct in6_addr *daddr,
113 				   __wsum base)
114 {
115 	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
116 }
117 
118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
119 {
120 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 					    ipv6_hdr(skb)->saddr.s6_addr32,
122 					    tcp_hdr(skb)->dest,
123 					    tcp_hdr(skb)->source);
124 }
125 
126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
127 			  int addr_len)
128 {
129 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 	struct inet_sock *inet = inet_sk(sk);
131 	struct inet_connection_sock *icsk = inet_csk(sk);
132 	struct ipv6_pinfo *np = inet6_sk(sk);
133 	struct tcp_sock *tp = tcp_sk(sk);
134 	struct in6_addr *saddr = NULL, *final_p, final;
135 	struct rt6_info *rt;
136 	struct flowi6 fl6;
137 	struct dst_entry *dst;
138 	int addr_type;
139 	int err;
140 
141 	if (addr_len < SIN6_LEN_RFC2133)
142 		return -EINVAL;
143 
144 	if (usin->sin6_family != AF_INET6)
145 		return -EAFNOSUPPORT;
146 
147 	memset(&fl6, 0, sizeof(fl6));
148 
149 	if (np->sndflow) {
150 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 		IP6_ECN_flow_init(fl6.flowlabel);
152 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 			struct ip6_flowlabel *flowlabel;
154 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 			if (flowlabel == NULL)
156 				return -EINVAL;
157 			usin->sin6_addr = flowlabel->dst;
158 			fl6_sock_release(flowlabel);
159 		}
160 	}
161 
162 	/*
163 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
164 	 */
165 
166 	if(ipv6_addr_any(&usin->sin6_addr))
167 		usin->sin6_addr.s6_addr[15] = 0x1;
168 
169 	addr_type = ipv6_addr_type(&usin->sin6_addr);
170 
171 	if(addr_type & IPV6_ADDR_MULTICAST)
172 		return -ENETUNREACH;
173 
174 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
175 		if (addr_len >= sizeof(struct sockaddr_in6) &&
176 		    usin->sin6_scope_id) {
177 			/* If interface is set while binding, indices
178 			 * must coincide.
179 			 */
180 			if (sk->sk_bound_dev_if &&
181 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
182 				return -EINVAL;
183 
184 			sk->sk_bound_dev_if = usin->sin6_scope_id;
185 		}
186 
187 		/* Connect to link-local address requires an interface */
188 		if (!sk->sk_bound_dev_if)
189 			return -EINVAL;
190 	}
191 
192 	if (tp->rx_opt.ts_recent_stamp &&
193 	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
194 		tp->rx_opt.ts_recent = 0;
195 		tp->rx_opt.ts_recent_stamp = 0;
196 		tp->write_seq = 0;
197 	}
198 
199 	np->daddr = usin->sin6_addr;
200 	np->flow_label = fl6.flowlabel;
201 
202 	/*
203 	 *	TCP over IPv4
204 	 */
205 
206 	if (addr_type == IPV6_ADDR_MAPPED) {
207 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
208 		struct sockaddr_in sin;
209 
210 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211 
212 		if (__ipv6_only_sock(sk))
213 			return -ENETUNREACH;
214 
215 		sin.sin_family = AF_INET;
216 		sin.sin_port = usin->sin6_port;
217 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218 
219 		icsk->icsk_af_ops = &ipv6_mapped;
220 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
221 #ifdef CONFIG_TCP_MD5SIG
222 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
223 #endif
224 
225 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
226 
227 		if (err) {
228 			icsk->icsk_ext_hdr_len = exthdrlen;
229 			icsk->icsk_af_ops = &ipv6_specific;
230 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
231 #ifdef CONFIG_TCP_MD5SIG
232 			tp->af_specific = &tcp_sock_ipv6_specific;
233 #endif
234 			goto failure;
235 		} else {
236 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
237 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
238 					       &np->rcv_saddr);
239 		}
240 
241 		return err;
242 	}
243 
244 	if (!ipv6_addr_any(&np->rcv_saddr))
245 		saddr = &np->rcv_saddr;
246 
247 	fl6.flowi6_proto = IPPROTO_TCP;
248 	fl6.daddr = np->daddr;
249 	fl6.saddr = saddr ? *saddr : np->saddr;
250 	fl6.flowi6_oif = sk->sk_bound_dev_if;
251 	fl6.flowi6_mark = sk->sk_mark;
252 	fl6.fl6_dport = usin->sin6_port;
253 	fl6.fl6_sport = inet->inet_sport;
254 
255 	final_p = fl6_update_dst(&fl6, np->opt, &final);
256 
257 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258 
259 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
260 	if (IS_ERR(dst)) {
261 		err = PTR_ERR(dst);
262 		goto failure;
263 	}
264 
265 	if (saddr == NULL) {
266 		saddr = &fl6.saddr;
267 		np->rcv_saddr = *saddr;
268 	}
269 
270 	/* set the source address */
271 	np->saddr = *saddr;
272 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273 
274 	sk->sk_gso_type = SKB_GSO_TCPV6;
275 	__ip6_dst_store(sk, dst, NULL, NULL);
276 
277 	rt = (struct rt6_info *) dst;
278 	if (tcp_death_row.sysctl_tw_recycle &&
279 	    !tp->rx_opt.ts_recent_stamp &&
280 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 		struct inet_peer *peer = rt6_get_peer(rt);
282 		/*
283 		 * VJ's idea. We save last timestamp seen from
284 		 * the destination in peer table, when entering state
285 		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 		 * when trying new connection.
287 		 */
288 		if (peer) {
289 			inet_peer_refcheck(peer);
290 			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 				tp->rx_opt.ts_recent = peer->tcp_ts;
293 			}
294 		}
295 	}
296 
297 	icsk->icsk_ext_hdr_len = 0;
298 	if (np->opt)
299 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 					  np->opt->opt_nflen);
301 
302 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303 
304 	inet->inet_dport = usin->sin6_port;
305 
306 	tcp_set_state(sk, TCP_SYN_SENT);
307 	err = inet6_hash_connect(&tcp_death_row, sk);
308 	if (err)
309 		goto late_failure;
310 
311 	if (!tp->write_seq)
312 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
313 							     np->daddr.s6_addr32,
314 							     inet->inet_sport,
315 							     inet->inet_dport);
316 
317 	err = tcp_connect(sk);
318 	if (err)
319 		goto late_failure;
320 
321 	return 0;
322 
323 late_failure:
324 	tcp_set_state(sk, TCP_CLOSE);
325 	__sk_dst_reset(sk);
326 failure:
327 	inet->inet_dport = 0;
328 	sk->sk_route_caps = 0;
329 	return err;
330 }
331 
332 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 		u8 type, u8 code, int offset, __be32 info)
334 {
335 	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 	struct ipv6_pinfo *np;
338 	struct sock *sk;
339 	int err;
340 	struct tcp_sock *tp;
341 	__u32 seq;
342 	struct net *net = dev_net(skb->dev);
343 
344 	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
346 
347 	if (sk == NULL) {
348 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
349 				   ICMP6_MIB_INERRORS);
350 		return;
351 	}
352 
353 	if (sk->sk_state == TCP_TIME_WAIT) {
354 		inet_twsk_put(inet_twsk(sk));
355 		return;
356 	}
357 
358 	bh_lock_sock(sk);
359 	if (sock_owned_by_user(sk))
360 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
361 
362 	if (sk->sk_state == TCP_CLOSE)
363 		goto out;
364 
365 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 		goto out;
368 	}
369 
370 	tp = tcp_sk(sk);
371 	seq = ntohl(th->seq);
372 	if (sk->sk_state != TCP_LISTEN &&
373 	    !between(seq, tp->snd_una, tp->snd_nxt)) {
374 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 		goto out;
376 	}
377 
378 	np = inet6_sk(sk);
379 
380 	if (type == ICMPV6_PKT_TOOBIG) {
381 		struct dst_entry *dst;
382 
383 		if (sock_owned_by_user(sk))
384 			goto out;
385 		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 			goto out;
387 
388 		/* icmp should have updated the destination cache entry */
389 		dst = __sk_dst_check(sk, np->dst_cookie);
390 
391 		if (dst == NULL) {
392 			struct inet_sock *inet = inet_sk(sk);
393 			struct flowi6 fl6;
394 
395 			/* BUGGG_FUTURE: Again, it is not clear how
396 			   to handle rthdr case. Ignore this complexity
397 			   for now.
398 			 */
399 			memset(&fl6, 0, sizeof(fl6));
400 			fl6.flowi6_proto = IPPROTO_TCP;
401 			fl6.daddr = np->daddr;
402 			fl6.saddr = np->saddr;
403 			fl6.flowi6_oif = sk->sk_bound_dev_if;
404 			fl6.flowi6_mark = sk->sk_mark;
405 			fl6.fl6_dport = inet->inet_dport;
406 			fl6.fl6_sport = inet->inet_sport;
407 			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408 
409 			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
410 			if (IS_ERR(dst)) {
411 				sk->sk_err_soft = -PTR_ERR(dst);
412 				goto out;
413 			}
414 
415 		} else
416 			dst_hold(dst);
417 
418 		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 			tcp_sync_mss(sk, dst_mtu(dst));
420 			tcp_simple_retransmit(sk);
421 		} /* else let the usual retransmit timer handle it */
422 		dst_release(dst);
423 		goto out;
424 	}
425 
426 	icmpv6_err_convert(type, code, &err);
427 
428 	/* Might be for an request_sock */
429 	switch (sk->sk_state) {
430 		struct request_sock *req, **prev;
431 	case TCP_LISTEN:
432 		if (sock_owned_by_user(sk))
433 			goto out;
434 
435 		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 					   &hdr->saddr, inet6_iif(skb));
437 		if (!req)
438 			goto out;
439 
440 		/* ICMPs are not backlogged, hence we cannot get
441 		 * an established socket here.
442 		 */
443 		WARN_ON(req->sk != NULL);
444 
445 		if (seq != tcp_rsk(req)->snt_isn) {
446 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
447 			goto out;
448 		}
449 
450 		inet_csk_reqsk_queue_drop(sk, req, prev);
451 		goto out;
452 
453 	case TCP_SYN_SENT:
454 	case TCP_SYN_RECV:  /* Cannot happen.
455 			       It can, it SYNs are crossed. --ANK */
456 		if (!sock_owned_by_user(sk)) {
457 			sk->sk_err = err;
458 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
459 
460 			tcp_done(sk);
461 		} else
462 			sk->sk_err_soft = err;
463 		goto out;
464 	}
465 
466 	if (!sock_owned_by_user(sk) && np->recverr) {
467 		sk->sk_err = err;
468 		sk->sk_error_report(sk);
469 	} else
470 		sk->sk_err_soft = err;
471 
472 out:
473 	bh_unlock_sock(sk);
474 	sock_put(sk);
475 }
476 
477 
478 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 			      struct request_values *rvp,
480 			      u16 queue_mapping)
481 {
482 	struct inet6_request_sock *treq = inet6_rsk(req);
483 	struct ipv6_pinfo *np = inet6_sk(sk);
484 	struct sk_buff * skb;
485 	struct ipv6_txoptions *opt = NULL;
486 	struct in6_addr * final_p, final;
487 	struct flowi6 fl6;
488 	struct dst_entry *dst;
489 	int err;
490 
491 	memset(&fl6, 0, sizeof(fl6));
492 	fl6.flowi6_proto = IPPROTO_TCP;
493 	fl6.daddr = treq->rmt_addr;
494 	fl6.saddr = treq->loc_addr;
495 	fl6.flowlabel = 0;
496 	fl6.flowi6_oif = treq->iif;
497 	fl6.flowi6_mark = sk->sk_mark;
498 	fl6.fl6_dport = inet_rsk(req)->rmt_port;
499 	fl6.fl6_sport = inet_rsk(req)->loc_port;
500 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
501 
502 	opt = np->opt;
503 	final_p = fl6_update_dst(&fl6, opt, &final);
504 
505 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
506 	if (IS_ERR(dst)) {
507 		err = PTR_ERR(dst);
508 		dst = NULL;
509 		goto done;
510 	}
511 	skb = tcp_make_synack(sk, dst, req, rvp);
512 	err = -ENOMEM;
513 	if (skb) {
514 		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
515 
516 		fl6.daddr = treq->rmt_addr;
517 		skb_set_queue_mapping(skb, queue_mapping);
518 		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
519 		err = net_xmit_eval(err);
520 	}
521 
522 done:
523 	if (opt && opt != np->opt)
524 		sock_kfree_s(sk, opt, opt->tot_len);
525 	dst_release(dst);
526 	return err;
527 }
528 
529 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
530 			     struct request_values *rvp)
531 {
532 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
533 	return tcp_v6_send_synack(sk, req, rvp, 0);
534 }
535 
536 static void tcp_v6_reqsk_destructor(struct request_sock *req)
537 {
538 	kfree_skb(inet6_rsk(req)->pktopts);
539 }
540 
541 #ifdef CONFIG_TCP_MD5SIG
542 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
543 						   const struct in6_addr *addr)
544 {
545 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
546 }
547 
548 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
549 						struct sock *addr_sk)
550 {
551 	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
552 }
553 
554 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
555 						      struct request_sock *req)
556 {
557 	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
558 }
559 
560 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
561 				  int optlen)
562 {
563 	struct tcp_md5sig cmd;
564 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
565 
566 	if (optlen < sizeof(cmd))
567 		return -EINVAL;
568 
569 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
570 		return -EFAULT;
571 
572 	if (sin6->sin6_family != AF_INET6)
573 		return -EINVAL;
574 
575 	if (!cmd.tcpm_keylen) {
576 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
577 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
578 					      AF_INET);
579 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
580 				      AF_INET6);
581 	}
582 
583 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
584 		return -EINVAL;
585 
586 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
587 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
588 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
589 
590 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
591 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
592 }
593 
594 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
595 					const struct in6_addr *daddr,
596 					const struct in6_addr *saddr, int nbytes)
597 {
598 	struct tcp6_pseudohdr *bp;
599 	struct scatterlist sg;
600 
601 	bp = &hp->md5_blk.ip6;
602 	/* 1. TCP pseudo-header (RFC2460) */
603 	bp->saddr = *saddr;
604 	bp->daddr = *daddr;
605 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
606 	bp->len = cpu_to_be32(nbytes);
607 
608 	sg_init_one(&sg, bp, sizeof(*bp));
609 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
610 }
611 
612 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
613 			       const struct in6_addr *daddr, struct in6_addr *saddr,
614 			       const struct tcphdr *th)
615 {
616 	struct tcp_md5sig_pool *hp;
617 	struct hash_desc *desc;
618 
619 	hp = tcp_get_md5sig_pool();
620 	if (!hp)
621 		goto clear_hash_noput;
622 	desc = &hp->md5_desc;
623 
624 	if (crypto_hash_init(desc))
625 		goto clear_hash;
626 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
627 		goto clear_hash;
628 	if (tcp_md5_hash_header(hp, th))
629 		goto clear_hash;
630 	if (tcp_md5_hash_key(hp, key))
631 		goto clear_hash;
632 	if (crypto_hash_final(desc, md5_hash))
633 		goto clear_hash;
634 
635 	tcp_put_md5sig_pool();
636 	return 0;
637 
638 clear_hash:
639 	tcp_put_md5sig_pool();
640 clear_hash_noput:
641 	memset(md5_hash, 0, 16);
642 	return 1;
643 }
644 
645 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
646 			       const struct sock *sk,
647 			       const struct request_sock *req,
648 			       const struct sk_buff *skb)
649 {
650 	const struct in6_addr *saddr, *daddr;
651 	struct tcp_md5sig_pool *hp;
652 	struct hash_desc *desc;
653 	const struct tcphdr *th = tcp_hdr(skb);
654 
655 	if (sk) {
656 		saddr = &inet6_sk(sk)->saddr;
657 		daddr = &inet6_sk(sk)->daddr;
658 	} else if (req) {
659 		saddr = &inet6_rsk(req)->loc_addr;
660 		daddr = &inet6_rsk(req)->rmt_addr;
661 	} else {
662 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
663 		saddr = &ip6h->saddr;
664 		daddr = &ip6h->daddr;
665 	}
666 
667 	hp = tcp_get_md5sig_pool();
668 	if (!hp)
669 		goto clear_hash_noput;
670 	desc = &hp->md5_desc;
671 
672 	if (crypto_hash_init(desc))
673 		goto clear_hash;
674 
675 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
676 		goto clear_hash;
677 	if (tcp_md5_hash_header(hp, th))
678 		goto clear_hash;
679 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
680 		goto clear_hash;
681 	if (tcp_md5_hash_key(hp, key))
682 		goto clear_hash;
683 	if (crypto_hash_final(desc, md5_hash))
684 		goto clear_hash;
685 
686 	tcp_put_md5sig_pool();
687 	return 0;
688 
689 clear_hash:
690 	tcp_put_md5sig_pool();
691 clear_hash_noput:
692 	memset(md5_hash, 0, 16);
693 	return 1;
694 }
695 
696 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
697 {
698 	const __u8 *hash_location = NULL;
699 	struct tcp_md5sig_key *hash_expected;
700 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
701 	const struct tcphdr *th = tcp_hdr(skb);
702 	int genhash;
703 	u8 newhash[16];
704 
705 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
706 	hash_location = tcp_parse_md5sig_option(th);
707 
708 	/* We've parsed the options - do we have a hash? */
709 	if (!hash_expected && !hash_location)
710 		return 0;
711 
712 	if (hash_expected && !hash_location) {
713 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
714 		return 1;
715 	}
716 
717 	if (!hash_expected && hash_location) {
718 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
719 		return 1;
720 	}
721 
722 	/* check the signature */
723 	genhash = tcp_v6_md5_hash_skb(newhash,
724 				      hash_expected,
725 				      NULL, NULL, skb);
726 
727 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
728 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
729 				     genhash ? "failed" : "mismatch",
730 				     &ip6h->saddr, ntohs(th->source),
731 				     &ip6h->daddr, ntohs(th->dest));
732 		return 1;
733 	}
734 	return 0;
735 }
736 #endif
737 
738 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
739 	.family		=	AF_INET6,
740 	.obj_size	=	sizeof(struct tcp6_request_sock),
741 	.rtx_syn_ack	=	tcp_v6_rtx_synack,
742 	.send_ack	=	tcp_v6_reqsk_send_ack,
743 	.destructor	=	tcp_v6_reqsk_destructor,
744 	.send_reset	=	tcp_v6_send_reset,
745 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
746 };
747 
748 #ifdef CONFIG_TCP_MD5SIG
749 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
750 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
751 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
752 };
753 #endif
754 
755 static void __tcp_v6_send_check(struct sk_buff *skb,
756 				const struct in6_addr *saddr, const struct in6_addr *daddr)
757 {
758 	struct tcphdr *th = tcp_hdr(skb);
759 
760 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
762 		skb->csum_start = skb_transport_header(skb) - skb->head;
763 		skb->csum_offset = offsetof(struct tcphdr, check);
764 	} else {
765 		th->check = tcp_v6_check(skb->len, saddr, daddr,
766 					 csum_partial(th, th->doff << 2,
767 						      skb->csum));
768 	}
769 }
770 
771 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
772 {
773 	struct ipv6_pinfo *np = inet6_sk(sk);
774 
775 	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
776 }
777 
778 static int tcp_v6_gso_send_check(struct sk_buff *skb)
779 {
780 	const struct ipv6hdr *ipv6h;
781 	struct tcphdr *th;
782 
783 	if (!pskb_may_pull(skb, sizeof(*th)))
784 		return -EINVAL;
785 
786 	ipv6h = ipv6_hdr(skb);
787 	th = tcp_hdr(skb);
788 
789 	th->check = 0;
790 	skb->ip_summed = CHECKSUM_PARTIAL;
791 	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
792 	return 0;
793 }
794 
795 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
796 					 struct sk_buff *skb)
797 {
798 	const struct ipv6hdr *iph = skb_gro_network_header(skb);
799 
800 	switch (skb->ip_summed) {
801 	case CHECKSUM_COMPLETE:
802 		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
803 				  skb->csum)) {
804 			skb->ip_summed = CHECKSUM_UNNECESSARY;
805 			break;
806 		}
807 
808 		/* fall through */
809 	case CHECKSUM_NONE:
810 		NAPI_GRO_CB(skb)->flush = 1;
811 		return NULL;
812 	}
813 
814 	return tcp_gro_receive(head, skb);
815 }
816 
817 static int tcp6_gro_complete(struct sk_buff *skb)
818 {
819 	const struct ipv6hdr *iph = ipv6_hdr(skb);
820 	struct tcphdr *th = tcp_hdr(skb);
821 
822 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
823 				  &iph->saddr, &iph->daddr, 0);
824 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
825 
826 	return tcp_gro_complete(skb);
827 }
828 
829 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
830 				 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
831 {
832 	const struct tcphdr *th = tcp_hdr(skb);
833 	struct tcphdr *t1;
834 	struct sk_buff *buff;
835 	struct flowi6 fl6;
836 	struct net *net = dev_net(skb_dst(skb)->dev);
837 	struct sock *ctl_sk = net->ipv6.tcp_sk;
838 	unsigned int tot_len = sizeof(struct tcphdr);
839 	struct dst_entry *dst;
840 	__be32 *topt;
841 
842 	if (ts)
843 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
844 #ifdef CONFIG_TCP_MD5SIG
845 	if (key)
846 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
847 #endif
848 
849 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
850 			 GFP_ATOMIC);
851 	if (buff == NULL)
852 		return;
853 
854 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
855 
856 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
857 	skb_reset_transport_header(buff);
858 
859 	/* Swap the send and the receive. */
860 	memset(t1, 0, sizeof(*t1));
861 	t1->dest = th->source;
862 	t1->source = th->dest;
863 	t1->doff = tot_len / 4;
864 	t1->seq = htonl(seq);
865 	t1->ack_seq = htonl(ack);
866 	t1->ack = !rst || !th->ack;
867 	t1->rst = rst;
868 	t1->window = htons(win);
869 
870 	topt = (__be32 *)(t1 + 1);
871 
872 	if (ts) {
873 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
874 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
875 		*topt++ = htonl(tcp_time_stamp);
876 		*topt++ = htonl(ts);
877 	}
878 
879 #ifdef CONFIG_TCP_MD5SIG
880 	if (key) {
881 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
882 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
883 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
884 				    &ipv6_hdr(skb)->saddr,
885 				    &ipv6_hdr(skb)->daddr, t1);
886 	}
887 #endif
888 
889 	memset(&fl6, 0, sizeof(fl6));
890 	fl6.daddr = ipv6_hdr(skb)->saddr;
891 	fl6.saddr = ipv6_hdr(skb)->daddr;
892 
893 	buff->ip_summed = CHECKSUM_PARTIAL;
894 	buff->csum = 0;
895 
896 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
897 
898 	fl6.flowi6_proto = IPPROTO_TCP;
899 	fl6.flowi6_oif = inet6_iif(skb);
900 	fl6.fl6_dport = t1->dest;
901 	fl6.fl6_sport = t1->source;
902 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
903 
904 	/* Pass a socket to ip6_dst_lookup either it is for RST
905 	 * Underlying function will use this to retrieve the network
906 	 * namespace
907 	 */
908 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
909 	if (!IS_ERR(dst)) {
910 		skb_dst_set(buff, dst);
911 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
912 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
913 		if (rst)
914 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
915 		return;
916 	}
917 
918 	kfree_skb(buff);
919 }
920 
921 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
922 {
923 	const struct tcphdr *th = tcp_hdr(skb);
924 	u32 seq = 0, ack_seq = 0;
925 	struct tcp_md5sig_key *key = NULL;
926 #ifdef CONFIG_TCP_MD5SIG
927 	const __u8 *hash_location = NULL;
928 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
929 	unsigned char newhash[16];
930 	int genhash;
931 	struct sock *sk1 = NULL;
932 #endif
933 
934 	if (th->rst)
935 		return;
936 
937 	if (!ipv6_unicast_destination(skb))
938 		return;
939 
940 #ifdef CONFIG_TCP_MD5SIG
941 	hash_location = tcp_parse_md5sig_option(th);
942 	if (!sk && hash_location) {
943 		/*
944 		 * active side is lost. Try to find listening socket through
945 		 * source port, and then find md5 key through listening socket.
946 		 * we are not loose security here:
947 		 * Incoming packet is checked with md5 hash with finding key,
948 		 * no RST generated if md5 hash doesn't match.
949 		 */
950 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
951 					   &tcp_hashinfo, &ipv6h->daddr,
952 					   ntohs(th->source), inet6_iif(skb));
953 		if (!sk1)
954 			return;
955 
956 		rcu_read_lock();
957 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
958 		if (!key)
959 			goto release_sk1;
960 
961 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
962 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
963 			goto release_sk1;
964 	} else {
965 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
966 	}
967 #endif
968 
969 	if (th->ack)
970 		seq = ntohl(th->ack_seq);
971 	else
972 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
973 			  (th->doff << 2);
974 
975 	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
976 
977 #ifdef CONFIG_TCP_MD5SIG
978 release_sk1:
979 	if (sk1) {
980 		rcu_read_unlock();
981 		sock_put(sk1);
982 	}
983 #endif
984 }
985 
986 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
987 			    struct tcp_md5sig_key *key, u8 tclass)
988 {
989 	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
990 }
991 
992 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
993 {
994 	struct inet_timewait_sock *tw = inet_twsk(sk);
995 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
996 
997 	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
998 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
999 			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1000 			tw->tw_tclass);
1001 
1002 	inet_twsk_put(tw);
1003 }
1004 
1005 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1006 				  struct request_sock *req)
1007 {
1008 	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1009 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1010 }
1011 
1012 
1013 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1014 {
1015 	struct request_sock *req, **prev;
1016 	const struct tcphdr *th = tcp_hdr(skb);
1017 	struct sock *nsk;
1018 
1019 	/* Find possible connection requests. */
1020 	req = inet6_csk_search_req(sk, &prev, th->source,
1021 				   &ipv6_hdr(skb)->saddr,
1022 				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1023 	if (req)
1024 		return tcp_check_req(sk, skb, req, prev);
1025 
1026 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1027 			&ipv6_hdr(skb)->saddr, th->source,
1028 			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1029 
1030 	if (nsk) {
1031 		if (nsk->sk_state != TCP_TIME_WAIT) {
1032 			bh_lock_sock(nsk);
1033 			return nsk;
1034 		}
1035 		inet_twsk_put(inet_twsk(nsk));
1036 		return NULL;
1037 	}
1038 
1039 #ifdef CONFIG_SYN_COOKIES
1040 	if (!th->syn)
1041 		sk = cookie_v6_check(sk, skb);
1042 #endif
1043 	return sk;
1044 }
1045 
1046 /* FIXME: this is substantially similar to the ipv4 code.
1047  * Can some kind of merge be done? -- erics
1048  */
1049 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1050 {
1051 	struct tcp_extend_values tmp_ext;
1052 	struct tcp_options_received tmp_opt;
1053 	const u8 *hash_location;
1054 	struct request_sock *req;
1055 	struct inet6_request_sock *treq;
1056 	struct ipv6_pinfo *np = inet6_sk(sk);
1057 	struct tcp_sock *tp = tcp_sk(sk);
1058 	__u32 isn = TCP_SKB_CB(skb)->when;
1059 	struct dst_entry *dst = NULL;
1060 	bool want_cookie = false;
1061 
1062 	if (skb->protocol == htons(ETH_P_IP))
1063 		return tcp_v4_conn_request(sk, skb);
1064 
1065 	if (!ipv6_unicast_destination(skb))
1066 		goto drop;
1067 
1068 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1069 		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1070 		if (!want_cookie)
1071 			goto drop;
1072 	}
1073 
1074 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1075 		goto drop;
1076 
1077 	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1078 	if (req == NULL)
1079 		goto drop;
1080 
1081 #ifdef CONFIG_TCP_MD5SIG
1082 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1083 #endif
1084 
1085 	tcp_clear_options(&tmp_opt);
1086 	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1087 	tmp_opt.user_mss = tp->rx_opt.user_mss;
1088 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1089 
1090 	if (tmp_opt.cookie_plus > 0 &&
1091 	    tmp_opt.saw_tstamp &&
1092 	    !tp->rx_opt.cookie_out_never &&
1093 	    (sysctl_tcp_cookie_size > 0 ||
1094 	     (tp->cookie_values != NULL &&
1095 	      tp->cookie_values->cookie_desired > 0))) {
1096 		u8 *c;
1097 		u32 *d;
1098 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1099 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1100 
1101 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1102 			goto drop_and_free;
1103 
1104 		/* Secret recipe starts with IP addresses */
1105 		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1106 		*mess++ ^= *d++;
1107 		*mess++ ^= *d++;
1108 		*mess++ ^= *d++;
1109 		*mess++ ^= *d++;
1110 		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1111 		*mess++ ^= *d++;
1112 		*mess++ ^= *d++;
1113 		*mess++ ^= *d++;
1114 		*mess++ ^= *d++;
1115 
1116 		/* plus variable length Initiator Cookie */
1117 		c = (u8 *)mess;
1118 		while (l-- > 0)
1119 			*c++ ^= *hash_location++;
1120 
1121 		want_cookie = false;	/* not our kind of cookie */
1122 		tmp_ext.cookie_out_never = 0; /* false */
1123 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1124 	} else if (!tp->rx_opt.cookie_in_always) {
1125 		/* redundant indications, but ensure initialization. */
1126 		tmp_ext.cookie_out_never = 1; /* true */
1127 		tmp_ext.cookie_plus = 0;
1128 	} else {
1129 		goto drop_and_free;
1130 	}
1131 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1132 
1133 	if (want_cookie && !tmp_opt.saw_tstamp)
1134 		tcp_clear_options(&tmp_opt);
1135 
1136 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1137 	tcp_openreq_init(req, &tmp_opt, skb);
1138 
1139 	treq = inet6_rsk(req);
1140 	treq->rmt_addr = ipv6_hdr(skb)->saddr;
1141 	treq->loc_addr = ipv6_hdr(skb)->daddr;
1142 	if (!want_cookie || tmp_opt.tstamp_ok)
1143 		TCP_ECN_create_request(req, skb);
1144 
1145 	treq->iif = sk->sk_bound_dev_if;
1146 
1147 	/* So that link locals have meaning */
1148 	if (!sk->sk_bound_dev_if &&
1149 	    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1150 		treq->iif = inet6_iif(skb);
1151 
1152 	if (!isn) {
1153 		struct inet_peer *peer = NULL;
1154 
1155 		if (ipv6_opt_accepted(sk, skb) ||
1156 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1157 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1158 			atomic_inc(&skb->users);
1159 			treq->pktopts = skb;
1160 		}
1161 
1162 		if (want_cookie) {
1163 			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1164 			req->cookie_ts = tmp_opt.tstamp_ok;
1165 			goto have_isn;
1166 		}
1167 
1168 		/* VJ's idea. We save last timestamp seen
1169 		 * from the destination in peer table, when entering
1170 		 * state TIME-WAIT, and check against it before
1171 		 * accepting new connection request.
1172 		 *
1173 		 * If "isn" is not zero, this request hit alive
1174 		 * timewait bucket, so that all the necessary checks
1175 		 * are made in the function processing timewait state.
1176 		 */
1177 		if (tmp_opt.saw_tstamp &&
1178 		    tcp_death_row.sysctl_tw_recycle &&
1179 		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
1180 		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1181 		    ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1182 				    &treq->rmt_addr)) {
1183 			inet_peer_refcheck(peer);
1184 			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1185 			    (s32)(peer->tcp_ts - req->ts_recent) >
1186 							TCP_PAWS_WINDOW) {
1187 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1188 				goto drop_and_release;
1189 			}
1190 		}
1191 		/* Kill the following clause, if you dislike this way. */
1192 		else if (!sysctl_tcp_syncookies &&
1193 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1194 			  (sysctl_max_syn_backlog >> 2)) &&
1195 			 (!peer || !peer->tcp_ts_stamp) &&
1196 			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1197 			/* Without syncookies last quarter of
1198 			 * backlog is filled with destinations,
1199 			 * proven to be alive.
1200 			 * It means that we continue to communicate
1201 			 * to destinations, already remembered
1202 			 * to the moment of synflood.
1203 			 */
1204 			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1205 				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1206 			goto drop_and_release;
1207 		}
1208 
1209 		isn = tcp_v6_init_sequence(skb);
1210 	}
1211 have_isn:
1212 	tcp_rsk(req)->snt_isn = isn;
1213 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1214 
1215 	if (security_inet_conn_request(sk, skb, req))
1216 		goto drop_and_release;
1217 
1218 	if (tcp_v6_send_synack(sk, req,
1219 			       (struct request_values *)&tmp_ext,
1220 			       skb_get_queue_mapping(skb)) ||
1221 	    want_cookie)
1222 		goto drop_and_free;
1223 
1224 	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1225 	return 0;
1226 
1227 drop_and_release:
1228 	dst_release(dst);
1229 drop_and_free:
1230 	reqsk_free(req);
1231 drop:
1232 	return 0; /* don't send reset */
1233 }
1234 
1235 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1236 					  struct request_sock *req,
1237 					  struct dst_entry *dst)
1238 {
1239 	struct inet6_request_sock *treq;
1240 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1241 	struct tcp6_sock *newtcp6sk;
1242 	struct inet_sock *newinet;
1243 	struct tcp_sock *newtp;
1244 	struct sock *newsk;
1245 	struct ipv6_txoptions *opt;
1246 #ifdef CONFIG_TCP_MD5SIG
1247 	struct tcp_md5sig_key *key;
1248 #endif
1249 
1250 	if (skb->protocol == htons(ETH_P_IP)) {
1251 		/*
1252 		 *	v6 mapped
1253 		 */
1254 
1255 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1256 
1257 		if (newsk == NULL)
1258 			return NULL;
1259 
1260 		newtcp6sk = (struct tcp6_sock *)newsk;
1261 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1262 
1263 		newinet = inet_sk(newsk);
1264 		newnp = inet6_sk(newsk);
1265 		newtp = tcp_sk(newsk);
1266 
1267 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1268 
1269 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1270 
1271 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1272 
1273 		newnp->rcv_saddr = newnp->saddr;
1274 
1275 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1276 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1277 #ifdef CONFIG_TCP_MD5SIG
1278 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1279 #endif
1280 
1281 		newnp->ipv6_ac_list = NULL;
1282 		newnp->ipv6_fl_list = NULL;
1283 		newnp->pktoptions  = NULL;
1284 		newnp->opt	   = NULL;
1285 		newnp->mcast_oif   = inet6_iif(skb);
1286 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1287 		newnp->rcv_tclass  = ipv6_tclass(ipv6_hdr(skb));
1288 
1289 		/*
1290 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1291 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1292 		 * that function for the gory details. -acme
1293 		 */
1294 
1295 		/* It is tricky place. Until this moment IPv4 tcp
1296 		   worked with IPv6 icsk.icsk_af_ops.
1297 		   Sync it now.
1298 		 */
1299 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1300 
1301 		return newsk;
1302 	}
1303 
1304 	treq = inet6_rsk(req);
1305 	opt = np->opt;
1306 
1307 	if (sk_acceptq_is_full(sk))
1308 		goto out_overflow;
1309 
1310 	if (!dst) {
1311 		dst = inet6_csk_route_req(sk, req);
1312 		if (!dst)
1313 			goto out;
1314 	}
1315 
1316 	newsk = tcp_create_openreq_child(sk, req, skb);
1317 	if (newsk == NULL)
1318 		goto out_nonewsk;
1319 
1320 	/*
1321 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1322 	 * count here, tcp_create_openreq_child now does this for us, see the
1323 	 * comment in that function for the gory details. -acme
1324 	 */
1325 
1326 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1327 	__ip6_dst_store(newsk, dst, NULL, NULL);
1328 
1329 	newtcp6sk = (struct tcp6_sock *)newsk;
1330 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1331 
1332 	newtp = tcp_sk(newsk);
1333 	newinet = inet_sk(newsk);
1334 	newnp = inet6_sk(newsk);
1335 
1336 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1337 
1338 	newnp->daddr = treq->rmt_addr;
1339 	newnp->saddr = treq->loc_addr;
1340 	newnp->rcv_saddr = treq->loc_addr;
1341 	newsk->sk_bound_dev_if = treq->iif;
1342 
1343 	/* Now IPv6 options...
1344 
1345 	   First: no IPv4 options.
1346 	 */
1347 	newinet->inet_opt = NULL;
1348 	newnp->ipv6_ac_list = NULL;
1349 	newnp->ipv6_fl_list = NULL;
1350 
1351 	/* Clone RX bits */
1352 	newnp->rxopt.all = np->rxopt.all;
1353 
1354 	/* Clone pktoptions received with SYN */
1355 	newnp->pktoptions = NULL;
1356 	if (treq->pktopts != NULL) {
1357 		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1358 		consume_skb(treq->pktopts);
1359 		treq->pktopts = NULL;
1360 		if (newnp->pktoptions)
1361 			skb_set_owner_r(newnp->pktoptions, newsk);
1362 	}
1363 	newnp->opt	  = NULL;
1364 	newnp->mcast_oif  = inet6_iif(skb);
1365 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1366 	newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1367 
1368 	/* Clone native IPv6 options from listening socket (if any)
1369 
1370 	   Yes, keeping reference count would be much more clever,
1371 	   but we make one more one thing there: reattach optmem
1372 	   to newsk.
1373 	 */
1374 	if (opt) {
1375 		newnp->opt = ipv6_dup_options(newsk, opt);
1376 		if (opt != np->opt)
1377 			sock_kfree_s(sk, opt, opt->tot_len);
1378 	}
1379 
1380 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1381 	if (newnp->opt)
1382 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1383 						     newnp->opt->opt_flen);
1384 
1385 	tcp_mtup_init(newsk);
1386 	tcp_sync_mss(newsk, dst_mtu(dst));
1387 	newtp->advmss = dst_metric_advmss(dst);
1388 	if (tcp_sk(sk)->rx_opt.user_mss &&
1389 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1390 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1391 
1392 	tcp_initialize_rcv_mss(newsk);
1393 	if (tcp_rsk(req)->snt_synack)
1394 		tcp_valid_rtt_meas(newsk,
1395 		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1396 	newtp->total_retrans = req->retrans;
1397 
1398 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1399 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1400 
1401 #ifdef CONFIG_TCP_MD5SIG
1402 	/* Copy over the MD5 key from the original socket */
1403 	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1404 		/* We're using one, so create a matching key
1405 		 * on the newsk structure. If we fail to get
1406 		 * memory, then we end up not copying the key
1407 		 * across. Shucks.
1408 		 */
1409 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1410 			       AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1411 	}
1412 #endif
1413 
1414 	if (__inet_inherit_port(sk, newsk) < 0) {
1415 		sock_put(newsk);
1416 		goto out;
1417 	}
1418 	__inet6_hash(newsk, NULL);
1419 
1420 	return newsk;
1421 
1422 out_overflow:
1423 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1424 out_nonewsk:
1425 	if (opt && opt != np->opt)
1426 		sock_kfree_s(sk, opt, opt->tot_len);
1427 	dst_release(dst);
1428 out:
1429 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1430 	return NULL;
1431 }
1432 
1433 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1434 {
1435 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1436 		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1437 				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1438 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1439 			return 0;
1440 		}
1441 	}
1442 
1443 	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1444 					      &ipv6_hdr(skb)->saddr,
1445 					      &ipv6_hdr(skb)->daddr, 0));
1446 
1447 	if (skb->len <= 76) {
1448 		return __skb_checksum_complete(skb);
1449 	}
1450 	return 0;
1451 }
1452 
1453 /* The socket must have it's spinlock held when we get
1454  * here.
1455  *
1456  * We have a potential double-lock case here, so even when
1457  * doing backlog processing we use the BH locking scheme.
1458  * This is because we cannot sleep with the original spinlock
1459  * held.
1460  */
1461 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1462 {
1463 	struct ipv6_pinfo *np = inet6_sk(sk);
1464 	struct tcp_sock *tp;
1465 	struct sk_buff *opt_skb = NULL;
1466 
1467 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1468 	   goes to IPv4 receive handler and backlogged.
1469 	   From backlog it always goes here. Kerboom...
1470 	   Fortunately, tcp_rcv_established and rcv_established
1471 	   handle them correctly, but it is not case with
1472 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1473 	 */
1474 
1475 	if (skb->protocol == htons(ETH_P_IP))
1476 		return tcp_v4_do_rcv(sk, skb);
1477 
1478 #ifdef CONFIG_TCP_MD5SIG
1479 	if (tcp_v6_inbound_md5_hash (sk, skb))
1480 		goto discard;
1481 #endif
1482 
1483 	if (sk_filter(sk, skb))
1484 		goto discard;
1485 
1486 	/*
1487 	 *	socket locking is here for SMP purposes as backlog rcv
1488 	 *	is currently called with bh processing disabled.
1489 	 */
1490 
1491 	/* Do Stevens' IPV6_PKTOPTIONS.
1492 
1493 	   Yes, guys, it is the only place in our code, where we
1494 	   may make it not affecting IPv4.
1495 	   The rest of code is protocol independent,
1496 	   and I do not like idea to uglify IPv4.
1497 
1498 	   Actually, all the idea behind IPV6_PKTOPTIONS
1499 	   looks not very well thought. For now we latch
1500 	   options, received in the last packet, enqueued
1501 	   by tcp. Feel free to propose better solution.
1502 					       --ANK (980728)
1503 	 */
1504 	if (np->rxopt.all)
1505 		opt_skb = skb_clone(skb, GFP_ATOMIC);
1506 
1507 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1508 		sock_rps_save_rxhash(sk, skb);
1509 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1510 			goto reset;
1511 		if (opt_skb)
1512 			goto ipv6_pktoptions;
1513 		return 0;
1514 	}
1515 
1516 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1517 		goto csum_err;
1518 
1519 	if (sk->sk_state == TCP_LISTEN) {
1520 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1521 		if (!nsk)
1522 			goto discard;
1523 
1524 		/*
1525 		 * Queue it on the new socket if the new socket is active,
1526 		 * otherwise we just shortcircuit this and continue with
1527 		 * the new socket..
1528 		 */
1529 		if(nsk != sk) {
1530 			sock_rps_save_rxhash(nsk, skb);
1531 			if (tcp_child_process(sk, nsk, skb))
1532 				goto reset;
1533 			if (opt_skb)
1534 				__kfree_skb(opt_skb);
1535 			return 0;
1536 		}
1537 	} else
1538 		sock_rps_save_rxhash(sk, skb);
1539 
1540 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1541 		goto reset;
1542 	if (opt_skb)
1543 		goto ipv6_pktoptions;
1544 	return 0;
1545 
1546 reset:
1547 	tcp_v6_send_reset(sk, skb);
1548 discard:
1549 	if (opt_skb)
1550 		__kfree_skb(opt_skb);
1551 	kfree_skb(skb);
1552 	return 0;
1553 csum_err:
1554 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1555 	goto discard;
1556 
1557 
1558 ipv6_pktoptions:
1559 	/* Do you ask, what is it?
1560 
1561 	   1. skb was enqueued by tcp.
1562 	   2. skb is added to tail of read queue, rather than out of order.
1563 	   3. socket is not in passive state.
1564 	   4. Finally, it really contains options, which user wants to receive.
1565 	 */
1566 	tp = tcp_sk(sk);
1567 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1568 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1569 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1570 			np->mcast_oif = inet6_iif(opt_skb);
1571 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1572 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1573 		if (np->rxopt.bits.rxtclass)
1574 			np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1575 		if (ipv6_opt_accepted(sk, opt_skb)) {
1576 			skb_set_owner_r(opt_skb, sk);
1577 			opt_skb = xchg(&np->pktoptions, opt_skb);
1578 		} else {
1579 			__kfree_skb(opt_skb);
1580 			opt_skb = xchg(&np->pktoptions, NULL);
1581 		}
1582 	}
1583 
1584 	kfree_skb(opt_skb);
1585 	return 0;
1586 }
1587 
1588 static int tcp_v6_rcv(struct sk_buff *skb)
1589 {
1590 	const struct tcphdr *th;
1591 	const struct ipv6hdr *hdr;
1592 	struct sock *sk;
1593 	int ret;
1594 	struct net *net = dev_net(skb->dev);
1595 
1596 	if (skb->pkt_type != PACKET_HOST)
1597 		goto discard_it;
1598 
1599 	/*
1600 	 *	Count it even if it's bad.
1601 	 */
1602 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1603 
1604 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1605 		goto discard_it;
1606 
1607 	th = tcp_hdr(skb);
1608 
1609 	if (th->doff < sizeof(struct tcphdr)/4)
1610 		goto bad_packet;
1611 	if (!pskb_may_pull(skb, th->doff*4))
1612 		goto discard_it;
1613 
1614 	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1615 		goto bad_packet;
1616 
1617 	th = tcp_hdr(skb);
1618 	hdr = ipv6_hdr(skb);
1619 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1620 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1621 				    skb->len - th->doff*4);
1622 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1623 	TCP_SKB_CB(skb)->when = 0;
1624 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1625 	TCP_SKB_CB(skb)->sacked = 0;
1626 
1627 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1628 	if (!sk)
1629 		goto no_tcp_socket;
1630 
1631 process:
1632 	if (sk->sk_state == TCP_TIME_WAIT)
1633 		goto do_time_wait;
1634 
1635 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1636 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1637 		goto discard_and_relse;
1638 	}
1639 
1640 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1641 		goto discard_and_relse;
1642 
1643 	if (sk_filter(sk, skb))
1644 		goto discard_and_relse;
1645 
1646 	skb->dev = NULL;
1647 
1648 	bh_lock_sock_nested(sk);
1649 	ret = 0;
1650 	if (!sock_owned_by_user(sk)) {
1651 #ifdef CONFIG_NET_DMA
1652 		struct tcp_sock *tp = tcp_sk(sk);
1653 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1654 			tp->ucopy.dma_chan = net_dma_find_channel();
1655 		if (tp->ucopy.dma_chan)
1656 			ret = tcp_v6_do_rcv(sk, skb);
1657 		else
1658 #endif
1659 		{
1660 			if (!tcp_prequeue(sk, skb))
1661 				ret = tcp_v6_do_rcv(sk, skb);
1662 		}
1663 	} else if (unlikely(sk_add_backlog(sk, skb,
1664 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1665 		bh_unlock_sock(sk);
1666 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1667 		goto discard_and_relse;
1668 	}
1669 	bh_unlock_sock(sk);
1670 
1671 	sock_put(sk);
1672 	return ret ? -1 : 0;
1673 
1674 no_tcp_socket:
1675 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1676 		goto discard_it;
1677 
1678 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1679 bad_packet:
1680 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1681 	} else {
1682 		tcp_v6_send_reset(NULL, skb);
1683 	}
1684 
1685 discard_it:
1686 
1687 	/*
1688 	 *	Discard frame
1689 	 */
1690 
1691 	kfree_skb(skb);
1692 	return 0;
1693 
1694 discard_and_relse:
1695 	sock_put(sk);
1696 	goto discard_it;
1697 
1698 do_time_wait:
1699 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1700 		inet_twsk_put(inet_twsk(sk));
1701 		goto discard_it;
1702 	}
1703 
1704 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1705 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1706 		inet_twsk_put(inet_twsk(sk));
1707 		goto discard_it;
1708 	}
1709 
1710 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1711 	case TCP_TW_SYN:
1712 	{
1713 		struct sock *sk2;
1714 
1715 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1716 					    &ipv6_hdr(skb)->daddr,
1717 					    ntohs(th->dest), inet6_iif(skb));
1718 		if (sk2 != NULL) {
1719 			struct inet_timewait_sock *tw = inet_twsk(sk);
1720 			inet_twsk_deschedule(tw, &tcp_death_row);
1721 			inet_twsk_put(tw);
1722 			sk = sk2;
1723 			goto process;
1724 		}
1725 		/* Fall through to ACK */
1726 	}
1727 	case TCP_TW_ACK:
1728 		tcp_v6_timewait_ack(sk, skb);
1729 		break;
1730 	case TCP_TW_RST:
1731 		goto no_tcp_socket;
1732 	case TCP_TW_SUCCESS:;
1733 	}
1734 	goto discard_it;
1735 }
1736 
1737 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1738 {
1739 	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1740 	struct ipv6_pinfo *np = inet6_sk(sk);
1741 	struct inet_peer *peer;
1742 
1743 	if (!rt ||
1744 	    !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1745 		peer = inet_getpeer_v6(&np->daddr, 1);
1746 		*release_it = true;
1747 	} else {
1748 		if (!rt->rt6i_peer)
1749 			rt6_bind_peer(rt, 1);
1750 		peer = rt->rt6i_peer;
1751 		*release_it = false;
1752 	}
1753 
1754 	return peer;
1755 }
1756 
1757 static void *tcp_v6_tw_get_peer(struct sock *sk)
1758 {
1759 	const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1760 	const struct inet_timewait_sock *tw = inet_twsk(sk);
1761 
1762 	if (tw->tw_family == AF_INET)
1763 		return tcp_v4_tw_get_peer(sk);
1764 
1765 	return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1766 }
1767 
1768 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1769 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1770 	.twsk_unique	= tcp_twsk_unique,
1771 	.twsk_destructor= tcp_twsk_destructor,
1772 	.twsk_getpeer	= tcp_v6_tw_get_peer,
1773 };
1774 
1775 static const struct inet_connection_sock_af_ops ipv6_specific = {
1776 	.queue_xmit	   = inet6_csk_xmit,
1777 	.send_check	   = tcp_v6_send_check,
1778 	.rebuild_header	   = inet6_sk_rebuild_header,
1779 	.conn_request	   = tcp_v6_conn_request,
1780 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1781 	.get_peer	   = tcp_v6_get_peer,
1782 	.net_header_len	   = sizeof(struct ipv6hdr),
1783 	.net_frag_header_len = sizeof(struct frag_hdr),
1784 	.setsockopt	   = ipv6_setsockopt,
1785 	.getsockopt	   = ipv6_getsockopt,
1786 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1787 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1788 	.bind_conflict	   = inet6_csk_bind_conflict,
1789 #ifdef CONFIG_COMPAT
1790 	.compat_setsockopt = compat_ipv6_setsockopt,
1791 	.compat_getsockopt = compat_ipv6_getsockopt,
1792 #endif
1793 };
1794 
1795 #ifdef CONFIG_TCP_MD5SIG
1796 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1797 	.md5_lookup	=	tcp_v6_md5_lookup,
1798 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1799 	.md5_parse	=	tcp_v6_parse_md5_keys,
1800 };
1801 #endif
1802 
1803 /*
1804  *	TCP over IPv4 via INET6 API
1805  */
1806 
1807 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1808 	.queue_xmit	   = ip_queue_xmit,
1809 	.send_check	   = tcp_v4_send_check,
1810 	.rebuild_header	   = inet_sk_rebuild_header,
1811 	.conn_request	   = tcp_v6_conn_request,
1812 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1813 	.get_peer	   = tcp_v4_get_peer,
1814 	.net_header_len	   = sizeof(struct iphdr),
1815 	.setsockopt	   = ipv6_setsockopt,
1816 	.getsockopt	   = ipv6_getsockopt,
1817 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1818 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1819 	.bind_conflict	   = inet6_csk_bind_conflict,
1820 #ifdef CONFIG_COMPAT
1821 	.compat_setsockopt = compat_ipv6_setsockopt,
1822 	.compat_getsockopt = compat_ipv6_getsockopt,
1823 #endif
1824 };
1825 
1826 #ifdef CONFIG_TCP_MD5SIG
1827 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1828 	.md5_lookup	=	tcp_v4_md5_lookup,
1829 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1830 	.md5_parse	=	tcp_v6_parse_md5_keys,
1831 };
1832 #endif
1833 
1834 /* NOTE: A lot of things set to zero explicitly by call to
1835  *       sk_alloc() so need not be done here.
1836  */
1837 static int tcp_v6_init_sock(struct sock *sk)
1838 {
1839 	struct inet_connection_sock *icsk = inet_csk(sk);
1840 
1841 	tcp_init_sock(sk);
1842 
1843 	icsk->icsk_af_ops = &ipv6_specific;
1844 
1845 #ifdef CONFIG_TCP_MD5SIG
1846 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1847 #endif
1848 
1849 	return 0;
1850 }
1851 
1852 static void tcp_v6_destroy_sock(struct sock *sk)
1853 {
1854 	tcp_v4_destroy_sock(sk);
1855 	inet6_destroy_sock(sk);
1856 }
1857 
1858 #ifdef CONFIG_PROC_FS
1859 /* Proc filesystem TCPv6 sock list dumping. */
1860 static void get_openreq6(struct seq_file *seq,
1861 			 const struct sock *sk, struct request_sock *req, int i, int uid)
1862 {
1863 	int ttd = req->expires - jiffies;
1864 	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1865 	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1866 
1867 	if (ttd < 0)
1868 		ttd = 0;
1869 
1870 	seq_printf(seq,
1871 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1872 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1873 		   i,
1874 		   src->s6_addr32[0], src->s6_addr32[1],
1875 		   src->s6_addr32[2], src->s6_addr32[3],
1876 		   ntohs(inet_rsk(req)->loc_port),
1877 		   dest->s6_addr32[0], dest->s6_addr32[1],
1878 		   dest->s6_addr32[2], dest->s6_addr32[3],
1879 		   ntohs(inet_rsk(req)->rmt_port),
1880 		   TCP_SYN_RECV,
1881 		   0,0, /* could print option size, but that is af dependent. */
1882 		   1,   /* timers active (only the expire timer) */
1883 		   jiffies_to_clock_t(ttd),
1884 		   req->retrans,
1885 		   uid,
1886 		   0,  /* non standard timer */
1887 		   0, /* open_requests have no inode */
1888 		   0, req);
1889 }
1890 
1891 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1892 {
1893 	const struct in6_addr *dest, *src;
1894 	__u16 destp, srcp;
1895 	int timer_active;
1896 	unsigned long timer_expires;
1897 	const struct inet_sock *inet = inet_sk(sp);
1898 	const struct tcp_sock *tp = tcp_sk(sp);
1899 	const struct inet_connection_sock *icsk = inet_csk(sp);
1900 	const struct ipv6_pinfo *np = inet6_sk(sp);
1901 
1902 	dest  = &np->daddr;
1903 	src   = &np->rcv_saddr;
1904 	destp = ntohs(inet->inet_dport);
1905 	srcp  = ntohs(inet->inet_sport);
1906 
1907 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1908 		timer_active	= 1;
1909 		timer_expires	= icsk->icsk_timeout;
1910 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1911 		timer_active	= 4;
1912 		timer_expires	= icsk->icsk_timeout;
1913 	} else if (timer_pending(&sp->sk_timer)) {
1914 		timer_active	= 2;
1915 		timer_expires	= sp->sk_timer.expires;
1916 	} else {
1917 		timer_active	= 0;
1918 		timer_expires = jiffies;
1919 	}
1920 
1921 	seq_printf(seq,
1922 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1923 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1924 		   i,
1925 		   src->s6_addr32[0], src->s6_addr32[1],
1926 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1927 		   dest->s6_addr32[0], dest->s6_addr32[1],
1928 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1929 		   sp->sk_state,
1930 		   tp->write_seq-tp->snd_una,
1931 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1932 		   timer_active,
1933 		   jiffies_to_clock_t(timer_expires - jiffies),
1934 		   icsk->icsk_retransmits,
1935 		   sock_i_uid(sp),
1936 		   icsk->icsk_probes_out,
1937 		   sock_i_ino(sp),
1938 		   atomic_read(&sp->sk_refcnt), sp,
1939 		   jiffies_to_clock_t(icsk->icsk_rto),
1940 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1941 		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1942 		   tp->snd_cwnd,
1943 		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1944 		   );
1945 }
1946 
1947 static void get_timewait6_sock(struct seq_file *seq,
1948 			       struct inet_timewait_sock *tw, int i)
1949 {
1950 	const struct in6_addr *dest, *src;
1951 	__u16 destp, srcp;
1952 	const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1953 	int ttd = tw->tw_ttd - jiffies;
1954 
1955 	if (ttd < 0)
1956 		ttd = 0;
1957 
1958 	dest = &tw6->tw_v6_daddr;
1959 	src  = &tw6->tw_v6_rcv_saddr;
1960 	destp = ntohs(tw->tw_dport);
1961 	srcp  = ntohs(tw->tw_sport);
1962 
1963 	seq_printf(seq,
1964 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1965 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1966 		   i,
1967 		   src->s6_addr32[0], src->s6_addr32[1],
1968 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1969 		   dest->s6_addr32[0], dest->s6_addr32[1],
1970 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1971 		   tw->tw_substate, 0, 0,
1972 		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1973 		   atomic_read(&tw->tw_refcnt), tw);
1974 }
1975 
1976 static int tcp6_seq_show(struct seq_file *seq, void *v)
1977 {
1978 	struct tcp_iter_state *st;
1979 
1980 	if (v == SEQ_START_TOKEN) {
1981 		seq_puts(seq,
1982 			 "  sl  "
1983 			 "local_address                         "
1984 			 "remote_address                        "
1985 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1986 			 "   uid  timeout inode\n");
1987 		goto out;
1988 	}
1989 	st = seq->private;
1990 
1991 	switch (st->state) {
1992 	case TCP_SEQ_STATE_LISTENING:
1993 	case TCP_SEQ_STATE_ESTABLISHED:
1994 		get_tcp6_sock(seq, v, st->num);
1995 		break;
1996 	case TCP_SEQ_STATE_OPENREQ:
1997 		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1998 		break;
1999 	case TCP_SEQ_STATE_TIME_WAIT:
2000 		get_timewait6_sock(seq, v, st->num);
2001 		break;
2002 	}
2003 out:
2004 	return 0;
2005 }
2006 
2007 static const struct file_operations tcp6_afinfo_seq_fops = {
2008 	.owner   = THIS_MODULE,
2009 	.open    = tcp_seq_open,
2010 	.read    = seq_read,
2011 	.llseek  = seq_lseek,
2012 	.release = seq_release_net
2013 };
2014 
2015 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2016 	.name		= "tcp6",
2017 	.family		= AF_INET6,
2018 	.seq_fops	= &tcp6_afinfo_seq_fops,
2019 	.seq_ops	= {
2020 		.show		= tcp6_seq_show,
2021 	},
2022 };
2023 
2024 int __net_init tcp6_proc_init(struct net *net)
2025 {
2026 	return tcp_proc_register(net, &tcp6_seq_afinfo);
2027 }
2028 
2029 void tcp6_proc_exit(struct net *net)
2030 {
2031 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
2032 }
2033 #endif
2034 
2035 struct proto tcpv6_prot = {
2036 	.name			= "TCPv6",
2037 	.owner			= THIS_MODULE,
2038 	.close			= tcp_close,
2039 	.connect		= tcp_v6_connect,
2040 	.disconnect		= tcp_disconnect,
2041 	.accept			= inet_csk_accept,
2042 	.ioctl			= tcp_ioctl,
2043 	.init			= tcp_v6_init_sock,
2044 	.destroy		= tcp_v6_destroy_sock,
2045 	.shutdown		= tcp_shutdown,
2046 	.setsockopt		= tcp_setsockopt,
2047 	.getsockopt		= tcp_getsockopt,
2048 	.recvmsg		= tcp_recvmsg,
2049 	.sendmsg		= tcp_sendmsg,
2050 	.sendpage		= tcp_sendpage,
2051 	.backlog_rcv		= tcp_v6_do_rcv,
2052 	.hash			= tcp_v6_hash,
2053 	.unhash			= inet_unhash,
2054 	.get_port		= inet_csk_get_port,
2055 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2056 	.sockets_allocated	= &tcp_sockets_allocated,
2057 	.memory_allocated	= &tcp_memory_allocated,
2058 	.memory_pressure	= &tcp_memory_pressure,
2059 	.orphan_count		= &tcp_orphan_count,
2060 	.sysctl_wmem		= sysctl_tcp_wmem,
2061 	.sysctl_rmem		= sysctl_tcp_rmem,
2062 	.max_header		= MAX_TCP_HEADER,
2063 	.obj_size		= sizeof(struct tcp6_sock),
2064 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2065 	.twsk_prot		= &tcp6_timewait_sock_ops,
2066 	.rsk_prot		= &tcp6_request_sock_ops,
2067 	.h.hashinfo		= &tcp_hashinfo,
2068 	.no_autobind		= true,
2069 #ifdef CONFIG_COMPAT
2070 	.compat_setsockopt	= compat_tcp_setsockopt,
2071 	.compat_getsockopt	= compat_tcp_getsockopt,
2072 #endif
2073 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2074 	.proto_cgroup		= tcp_proto_cgroup,
2075 #endif
2076 };
2077 
2078 static const struct inet6_protocol tcpv6_protocol = {
2079 	.handler	=	tcp_v6_rcv,
2080 	.err_handler	=	tcp_v6_err,
2081 	.gso_send_check	=	tcp_v6_gso_send_check,
2082 	.gso_segment	=	tcp_tso_segment,
2083 	.gro_receive	=	tcp6_gro_receive,
2084 	.gro_complete	=	tcp6_gro_complete,
2085 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2086 };
2087 
2088 static struct inet_protosw tcpv6_protosw = {
2089 	.type		=	SOCK_STREAM,
2090 	.protocol	=	IPPROTO_TCP,
2091 	.prot		=	&tcpv6_prot,
2092 	.ops		=	&inet6_stream_ops,
2093 	.no_check	=	0,
2094 	.flags		=	INET_PROTOSW_PERMANENT |
2095 				INET_PROTOSW_ICSK,
2096 };
2097 
2098 static int __net_init tcpv6_net_init(struct net *net)
2099 {
2100 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2101 				    SOCK_RAW, IPPROTO_TCP, net);
2102 }
2103 
2104 static void __net_exit tcpv6_net_exit(struct net *net)
2105 {
2106 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2107 }
2108 
2109 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2110 {
2111 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2112 }
2113 
2114 static struct pernet_operations tcpv6_net_ops = {
2115 	.init	    = tcpv6_net_init,
2116 	.exit	    = tcpv6_net_exit,
2117 	.exit_batch = tcpv6_net_exit_batch,
2118 };
2119 
2120 int __init tcpv6_init(void)
2121 {
2122 	int ret;
2123 
2124 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2125 	if (ret)
2126 		goto out;
2127 
2128 	/* register inet6 protocol */
2129 	ret = inet6_register_protosw(&tcpv6_protosw);
2130 	if (ret)
2131 		goto out_tcpv6_protocol;
2132 
2133 	ret = register_pernet_subsys(&tcpv6_net_ops);
2134 	if (ret)
2135 		goto out_tcpv6_protosw;
2136 out:
2137 	return ret;
2138 
2139 out_tcpv6_protocol:
2140 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2141 out_tcpv6_protosw:
2142 	inet6_unregister_protosw(&tcpv6_protosw);
2143 	goto out;
2144 }
2145 
2146 void tcpv6_exit(void)
2147 {
2148 	unregister_pernet_subsys(&tcpv6_net_ops);
2149 	inet6_unregister_protosw(&tcpv6_protosw);
2150 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2151 }
2152