xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision 81aded24675ebda5de8a68843250ad15584ac38a)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 
67 #include <asm/uaccess.h>
68 
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
71 
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
74 
75 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 				      struct request_sock *req);
78 
79 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static void	__tcp_v6_send_check(struct sk_buff *skb,
81 				    const struct in6_addr *saddr,
82 				    const struct in6_addr *daddr);
83 
84 static const struct inet_connection_sock_af_ops ipv6_mapped;
85 static const struct inet_connection_sock_af_ops ipv6_specific;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
89 #else
90 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91 						   const struct in6_addr *addr)
92 {
93 	return NULL;
94 }
95 #endif
96 
97 static void tcp_v6_hash(struct sock *sk)
98 {
99 	if (sk->sk_state != TCP_CLOSE) {
100 		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
101 			tcp_prot.hash(sk);
102 			return;
103 		}
104 		local_bh_disable();
105 		__inet6_hash(sk, NULL);
106 		local_bh_enable();
107 	}
108 }
109 
110 static __inline__ __sum16 tcp_v6_check(int len,
111 				   const struct in6_addr *saddr,
112 				   const struct in6_addr *daddr,
113 				   __wsum base)
114 {
115 	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
116 }
117 
118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
119 {
120 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 					    ipv6_hdr(skb)->saddr.s6_addr32,
122 					    tcp_hdr(skb)->dest,
123 					    tcp_hdr(skb)->source);
124 }
125 
126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
127 			  int addr_len)
128 {
129 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 	struct inet_sock *inet = inet_sk(sk);
131 	struct inet_connection_sock *icsk = inet_csk(sk);
132 	struct ipv6_pinfo *np = inet6_sk(sk);
133 	struct tcp_sock *tp = tcp_sk(sk);
134 	struct in6_addr *saddr = NULL, *final_p, final;
135 	struct rt6_info *rt;
136 	struct flowi6 fl6;
137 	struct dst_entry *dst;
138 	int addr_type;
139 	int err;
140 
141 	if (addr_len < SIN6_LEN_RFC2133)
142 		return -EINVAL;
143 
144 	if (usin->sin6_family != AF_INET6)
145 		return -EAFNOSUPPORT;
146 
147 	memset(&fl6, 0, sizeof(fl6));
148 
149 	if (np->sndflow) {
150 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 		IP6_ECN_flow_init(fl6.flowlabel);
152 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 			struct ip6_flowlabel *flowlabel;
154 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 			if (flowlabel == NULL)
156 				return -EINVAL;
157 			usin->sin6_addr = flowlabel->dst;
158 			fl6_sock_release(flowlabel);
159 		}
160 	}
161 
162 	/*
163 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
164 	 */
165 
166 	if(ipv6_addr_any(&usin->sin6_addr))
167 		usin->sin6_addr.s6_addr[15] = 0x1;
168 
169 	addr_type = ipv6_addr_type(&usin->sin6_addr);
170 
171 	if(addr_type & IPV6_ADDR_MULTICAST)
172 		return -ENETUNREACH;
173 
174 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
175 		if (addr_len >= sizeof(struct sockaddr_in6) &&
176 		    usin->sin6_scope_id) {
177 			/* If interface is set while binding, indices
178 			 * must coincide.
179 			 */
180 			if (sk->sk_bound_dev_if &&
181 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
182 				return -EINVAL;
183 
184 			sk->sk_bound_dev_if = usin->sin6_scope_id;
185 		}
186 
187 		/* Connect to link-local address requires an interface */
188 		if (!sk->sk_bound_dev_if)
189 			return -EINVAL;
190 	}
191 
192 	if (tp->rx_opt.ts_recent_stamp &&
193 	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
194 		tp->rx_opt.ts_recent = 0;
195 		tp->rx_opt.ts_recent_stamp = 0;
196 		tp->write_seq = 0;
197 	}
198 
199 	np->daddr = usin->sin6_addr;
200 	np->flow_label = fl6.flowlabel;
201 
202 	/*
203 	 *	TCP over IPv4
204 	 */
205 
206 	if (addr_type == IPV6_ADDR_MAPPED) {
207 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
208 		struct sockaddr_in sin;
209 
210 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211 
212 		if (__ipv6_only_sock(sk))
213 			return -ENETUNREACH;
214 
215 		sin.sin_family = AF_INET;
216 		sin.sin_port = usin->sin6_port;
217 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218 
219 		icsk->icsk_af_ops = &ipv6_mapped;
220 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
221 #ifdef CONFIG_TCP_MD5SIG
222 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
223 #endif
224 
225 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
226 
227 		if (err) {
228 			icsk->icsk_ext_hdr_len = exthdrlen;
229 			icsk->icsk_af_ops = &ipv6_specific;
230 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
231 #ifdef CONFIG_TCP_MD5SIG
232 			tp->af_specific = &tcp_sock_ipv6_specific;
233 #endif
234 			goto failure;
235 		} else {
236 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
237 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
238 					       &np->rcv_saddr);
239 		}
240 
241 		return err;
242 	}
243 
244 	if (!ipv6_addr_any(&np->rcv_saddr))
245 		saddr = &np->rcv_saddr;
246 
247 	fl6.flowi6_proto = IPPROTO_TCP;
248 	fl6.daddr = np->daddr;
249 	fl6.saddr = saddr ? *saddr : np->saddr;
250 	fl6.flowi6_oif = sk->sk_bound_dev_if;
251 	fl6.flowi6_mark = sk->sk_mark;
252 	fl6.fl6_dport = usin->sin6_port;
253 	fl6.fl6_sport = inet->inet_sport;
254 
255 	final_p = fl6_update_dst(&fl6, np->opt, &final);
256 
257 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258 
259 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
260 	if (IS_ERR(dst)) {
261 		err = PTR_ERR(dst);
262 		goto failure;
263 	}
264 
265 	if (saddr == NULL) {
266 		saddr = &fl6.saddr;
267 		np->rcv_saddr = *saddr;
268 	}
269 
270 	/* set the source address */
271 	np->saddr = *saddr;
272 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273 
274 	sk->sk_gso_type = SKB_GSO_TCPV6;
275 	__ip6_dst_store(sk, dst, NULL, NULL);
276 
277 	rt = (struct rt6_info *) dst;
278 	if (tcp_death_row.sysctl_tw_recycle &&
279 	    !tp->rx_opt.ts_recent_stamp &&
280 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 		struct inet_peer *peer = rt6_get_peer(rt);
282 		/*
283 		 * VJ's idea. We save last timestamp seen from
284 		 * the destination in peer table, when entering state
285 		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 		 * when trying new connection.
287 		 */
288 		if (peer) {
289 			inet_peer_refcheck(peer);
290 			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 				tp->rx_opt.ts_recent = peer->tcp_ts;
293 			}
294 		}
295 	}
296 
297 	icsk->icsk_ext_hdr_len = 0;
298 	if (np->opt)
299 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 					  np->opt->opt_nflen);
301 
302 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303 
304 	inet->inet_dport = usin->sin6_port;
305 
306 	tcp_set_state(sk, TCP_SYN_SENT);
307 	err = inet6_hash_connect(&tcp_death_row, sk);
308 	if (err)
309 		goto late_failure;
310 
311 	if (!tp->write_seq)
312 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
313 							     np->daddr.s6_addr32,
314 							     inet->inet_sport,
315 							     inet->inet_dport);
316 
317 	err = tcp_connect(sk);
318 	if (err)
319 		goto late_failure;
320 
321 	return 0;
322 
323 late_failure:
324 	tcp_set_state(sk, TCP_CLOSE);
325 	__sk_dst_reset(sk);
326 failure:
327 	inet->inet_dport = 0;
328 	sk->sk_route_caps = 0;
329 	return err;
330 }
331 
332 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 		u8 type, u8 code, int offset, __be32 info)
334 {
335 	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 	struct ipv6_pinfo *np;
338 	struct sock *sk;
339 	int err;
340 	struct tcp_sock *tp;
341 	__u32 seq;
342 	struct net *net = dev_net(skb->dev);
343 
344 	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
346 
347 	if (sk == NULL) {
348 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
349 				   ICMP6_MIB_INERRORS);
350 		return;
351 	}
352 
353 	if (sk->sk_state == TCP_TIME_WAIT) {
354 		inet_twsk_put(inet_twsk(sk));
355 		return;
356 	}
357 
358 	bh_lock_sock(sk);
359 	if (sock_owned_by_user(sk))
360 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
361 
362 	if (sk->sk_state == TCP_CLOSE)
363 		goto out;
364 
365 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 		goto out;
368 	}
369 
370 	tp = tcp_sk(sk);
371 	seq = ntohl(th->seq);
372 	if (sk->sk_state != TCP_LISTEN &&
373 	    !between(seq, tp->snd_una, tp->snd_nxt)) {
374 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 		goto out;
376 	}
377 
378 	np = inet6_sk(sk);
379 
380 	if (type == ICMPV6_PKT_TOOBIG) {
381 		struct dst_entry *dst;
382 
383 		if (sock_owned_by_user(sk))
384 			goto out;
385 		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 			goto out;
387 
388 		/* icmp should have updated the destination cache entry */
389 		dst = __sk_dst_check(sk, np->dst_cookie);
390 
391 		if (dst == NULL) {
392 			struct inet_sock *inet = inet_sk(sk);
393 			struct flowi6 fl6;
394 
395 			/* BUGGG_FUTURE: Again, it is not clear how
396 			   to handle rthdr case. Ignore this complexity
397 			   for now.
398 			 */
399 			memset(&fl6, 0, sizeof(fl6));
400 			fl6.flowi6_proto = IPPROTO_TCP;
401 			fl6.daddr = np->daddr;
402 			fl6.saddr = np->saddr;
403 			fl6.flowi6_oif = sk->sk_bound_dev_if;
404 			fl6.flowi6_mark = sk->sk_mark;
405 			fl6.fl6_dport = inet->inet_dport;
406 			fl6.fl6_sport = inet->inet_sport;
407 			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408 
409 			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
410 			if (IS_ERR(dst)) {
411 				sk->sk_err_soft = -PTR_ERR(dst);
412 				goto out;
413 			}
414 
415 		} else
416 			dst_hold(dst);
417 
418 		dst->ops->update_pmtu(dst, ntohl(info));
419 
420 		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
421 			tcp_sync_mss(sk, dst_mtu(dst));
422 			tcp_simple_retransmit(sk);
423 		} /* else let the usual retransmit timer handle it */
424 		dst_release(dst);
425 		goto out;
426 	}
427 
428 	icmpv6_err_convert(type, code, &err);
429 
430 	/* Might be for an request_sock */
431 	switch (sk->sk_state) {
432 		struct request_sock *req, **prev;
433 	case TCP_LISTEN:
434 		if (sock_owned_by_user(sk))
435 			goto out;
436 
437 		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
438 					   &hdr->saddr, inet6_iif(skb));
439 		if (!req)
440 			goto out;
441 
442 		/* ICMPs are not backlogged, hence we cannot get
443 		 * an established socket here.
444 		 */
445 		WARN_ON(req->sk != NULL);
446 
447 		if (seq != tcp_rsk(req)->snt_isn) {
448 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
449 			goto out;
450 		}
451 
452 		inet_csk_reqsk_queue_drop(sk, req, prev);
453 		goto out;
454 
455 	case TCP_SYN_SENT:
456 	case TCP_SYN_RECV:  /* Cannot happen.
457 			       It can, it SYNs are crossed. --ANK */
458 		if (!sock_owned_by_user(sk)) {
459 			sk->sk_err = err;
460 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
461 
462 			tcp_done(sk);
463 		} else
464 			sk->sk_err_soft = err;
465 		goto out;
466 	}
467 
468 	if (!sock_owned_by_user(sk) && np->recverr) {
469 		sk->sk_err = err;
470 		sk->sk_error_report(sk);
471 	} else
472 		sk->sk_err_soft = err;
473 
474 out:
475 	bh_unlock_sock(sk);
476 	sock_put(sk);
477 }
478 
479 
480 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
481 			      struct request_values *rvp,
482 			      u16 queue_mapping)
483 {
484 	struct inet6_request_sock *treq = inet6_rsk(req);
485 	struct ipv6_pinfo *np = inet6_sk(sk);
486 	struct sk_buff * skb;
487 	struct ipv6_txoptions *opt = NULL;
488 	struct in6_addr * final_p, final;
489 	struct flowi6 fl6;
490 	struct dst_entry *dst;
491 	int err;
492 
493 	memset(&fl6, 0, sizeof(fl6));
494 	fl6.flowi6_proto = IPPROTO_TCP;
495 	fl6.daddr = treq->rmt_addr;
496 	fl6.saddr = treq->loc_addr;
497 	fl6.flowlabel = 0;
498 	fl6.flowi6_oif = treq->iif;
499 	fl6.flowi6_mark = sk->sk_mark;
500 	fl6.fl6_dport = inet_rsk(req)->rmt_port;
501 	fl6.fl6_sport = inet_rsk(req)->loc_port;
502 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
503 
504 	opt = np->opt;
505 	final_p = fl6_update_dst(&fl6, opt, &final);
506 
507 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
508 	if (IS_ERR(dst)) {
509 		err = PTR_ERR(dst);
510 		dst = NULL;
511 		goto done;
512 	}
513 	skb = tcp_make_synack(sk, dst, req, rvp);
514 	err = -ENOMEM;
515 	if (skb) {
516 		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
517 
518 		fl6.daddr = treq->rmt_addr;
519 		skb_set_queue_mapping(skb, queue_mapping);
520 		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
521 		err = net_xmit_eval(err);
522 	}
523 
524 done:
525 	if (opt && opt != np->opt)
526 		sock_kfree_s(sk, opt, opt->tot_len);
527 	return err;
528 }
529 
530 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
531 			     struct request_values *rvp)
532 {
533 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
534 	return tcp_v6_send_synack(sk, req, rvp, 0);
535 }
536 
537 static void tcp_v6_reqsk_destructor(struct request_sock *req)
538 {
539 	kfree_skb(inet6_rsk(req)->pktopts);
540 }
541 
542 #ifdef CONFIG_TCP_MD5SIG
543 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
544 						   const struct in6_addr *addr)
545 {
546 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
547 }
548 
549 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
550 						struct sock *addr_sk)
551 {
552 	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
553 }
554 
555 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
556 						      struct request_sock *req)
557 {
558 	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
559 }
560 
561 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
562 				  int optlen)
563 {
564 	struct tcp_md5sig cmd;
565 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
566 
567 	if (optlen < sizeof(cmd))
568 		return -EINVAL;
569 
570 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
571 		return -EFAULT;
572 
573 	if (sin6->sin6_family != AF_INET6)
574 		return -EINVAL;
575 
576 	if (!cmd.tcpm_keylen) {
577 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
578 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
579 					      AF_INET);
580 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
581 				      AF_INET6);
582 	}
583 
584 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
585 		return -EINVAL;
586 
587 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
588 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
589 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
590 
591 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
592 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
593 }
594 
595 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
596 					const struct in6_addr *daddr,
597 					const struct in6_addr *saddr, int nbytes)
598 {
599 	struct tcp6_pseudohdr *bp;
600 	struct scatterlist sg;
601 
602 	bp = &hp->md5_blk.ip6;
603 	/* 1. TCP pseudo-header (RFC2460) */
604 	bp->saddr = *saddr;
605 	bp->daddr = *daddr;
606 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
607 	bp->len = cpu_to_be32(nbytes);
608 
609 	sg_init_one(&sg, bp, sizeof(*bp));
610 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
611 }
612 
613 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
614 			       const struct in6_addr *daddr, struct in6_addr *saddr,
615 			       const struct tcphdr *th)
616 {
617 	struct tcp_md5sig_pool *hp;
618 	struct hash_desc *desc;
619 
620 	hp = tcp_get_md5sig_pool();
621 	if (!hp)
622 		goto clear_hash_noput;
623 	desc = &hp->md5_desc;
624 
625 	if (crypto_hash_init(desc))
626 		goto clear_hash;
627 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
628 		goto clear_hash;
629 	if (tcp_md5_hash_header(hp, th))
630 		goto clear_hash;
631 	if (tcp_md5_hash_key(hp, key))
632 		goto clear_hash;
633 	if (crypto_hash_final(desc, md5_hash))
634 		goto clear_hash;
635 
636 	tcp_put_md5sig_pool();
637 	return 0;
638 
639 clear_hash:
640 	tcp_put_md5sig_pool();
641 clear_hash_noput:
642 	memset(md5_hash, 0, 16);
643 	return 1;
644 }
645 
646 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
647 			       const struct sock *sk,
648 			       const struct request_sock *req,
649 			       const struct sk_buff *skb)
650 {
651 	const struct in6_addr *saddr, *daddr;
652 	struct tcp_md5sig_pool *hp;
653 	struct hash_desc *desc;
654 	const struct tcphdr *th = tcp_hdr(skb);
655 
656 	if (sk) {
657 		saddr = &inet6_sk(sk)->saddr;
658 		daddr = &inet6_sk(sk)->daddr;
659 	} else if (req) {
660 		saddr = &inet6_rsk(req)->loc_addr;
661 		daddr = &inet6_rsk(req)->rmt_addr;
662 	} else {
663 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
664 		saddr = &ip6h->saddr;
665 		daddr = &ip6h->daddr;
666 	}
667 
668 	hp = tcp_get_md5sig_pool();
669 	if (!hp)
670 		goto clear_hash_noput;
671 	desc = &hp->md5_desc;
672 
673 	if (crypto_hash_init(desc))
674 		goto clear_hash;
675 
676 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
677 		goto clear_hash;
678 	if (tcp_md5_hash_header(hp, th))
679 		goto clear_hash;
680 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
681 		goto clear_hash;
682 	if (tcp_md5_hash_key(hp, key))
683 		goto clear_hash;
684 	if (crypto_hash_final(desc, md5_hash))
685 		goto clear_hash;
686 
687 	tcp_put_md5sig_pool();
688 	return 0;
689 
690 clear_hash:
691 	tcp_put_md5sig_pool();
692 clear_hash_noput:
693 	memset(md5_hash, 0, 16);
694 	return 1;
695 }
696 
697 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
698 {
699 	const __u8 *hash_location = NULL;
700 	struct tcp_md5sig_key *hash_expected;
701 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
702 	const struct tcphdr *th = tcp_hdr(skb);
703 	int genhash;
704 	u8 newhash[16];
705 
706 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
707 	hash_location = tcp_parse_md5sig_option(th);
708 
709 	/* We've parsed the options - do we have a hash? */
710 	if (!hash_expected && !hash_location)
711 		return 0;
712 
713 	if (hash_expected && !hash_location) {
714 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
715 		return 1;
716 	}
717 
718 	if (!hash_expected && hash_location) {
719 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
720 		return 1;
721 	}
722 
723 	/* check the signature */
724 	genhash = tcp_v6_md5_hash_skb(newhash,
725 				      hash_expected,
726 				      NULL, NULL, skb);
727 
728 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
729 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
730 				     genhash ? "failed" : "mismatch",
731 				     &ip6h->saddr, ntohs(th->source),
732 				     &ip6h->daddr, ntohs(th->dest));
733 		return 1;
734 	}
735 	return 0;
736 }
737 #endif
738 
739 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
740 	.family		=	AF_INET6,
741 	.obj_size	=	sizeof(struct tcp6_request_sock),
742 	.rtx_syn_ack	=	tcp_v6_rtx_synack,
743 	.send_ack	=	tcp_v6_reqsk_send_ack,
744 	.destructor	=	tcp_v6_reqsk_destructor,
745 	.send_reset	=	tcp_v6_send_reset,
746 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
747 };
748 
749 #ifdef CONFIG_TCP_MD5SIG
750 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
751 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
752 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
753 };
754 #endif
755 
756 static void __tcp_v6_send_check(struct sk_buff *skb,
757 				const struct in6_addr *saddr, const struct in6_addr *daddr)
758 {
759 	struct tcphdr *th = tcp_hdr(skb);
760 
761 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
762 		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
763 		skb->csum_start = skb_transport_header(skb) - skb->head;
764 		skb->csum_offset = offsetof(struct tcphdr, check);
765 	} else {
766 		th->check = tcp_v6_check(skb->len, saddr, daddr,
767 					 csum_partial(th, th->doff << 2,
768 						      skb->csum));
769 	}
770 }
771 
772 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
773 {
774 	struct ipv6_pinfo *np = inet6_sk(sk);
775 
776 	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
777 }
778 
779 static int tcp_v6_gso_send_check(struct sk_buff *skb)
780 {
781 	const struct ipv6hdr *ipv6h;
782 	struct tcphdr *th;
783 
784 	if (!pskb_may_pull(skb, sizeof(*th)))
785 		return -EINVAL;
786 
787 	ipv6h = ipv6_hdr(skb);
788 	th = tcp_hdr(skb);
789 
790 	th->check = 0;
791 	skb->ip_summed = CHECKSUM_PARTIAL;
792 	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
793 	return 0;
794 }
795 
796 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
797 					 struct sk_buff *skb)
798 {
799 	const struct ipv6hdr *iph = skb_gro_network_header(skb);
800 
801 	switch (skb->ip_summed) {
802 	case CHECKSUM_COMPLETE:
803 		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
804 				  skb->csum)) {
805 			skb->ip_summed = CHECKSUM_UNNECESSARY;
806 			break;
807 		}
808 
809 		/* fall through */
810 	case CHECKSUM_NONE:
811 		NAPI_GRO_CB(skb)->flush = 1;
812 		return NULL;
813 	}
814 
815 	return tcp_gro_receive(head, skb);
816 }
817 
818 static int tcp6_gro_complete(struct sk_buff *skb)
819 {
820 	const struct ipv6hdr *iph = ipv6_hdr(skb);
821 	struct tcphdr *th = tcp_hdr(skb);
822 
823 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
824 				  &iph->saddr, &iph->daddr, 0);
825 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
826 
827 	return tcp_gro_complete(skb);
828 }
829 
830 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
831 				 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
832 {
833 	const struct tcphdr *th = tcp_hdr(skb);
834 	struct tcphdr *t1;
835 	struct sk_buff *buff;
836 	struct flowi6 fl6;
837 	struct net *net = dev_net(skb_dst(skb)->dev);
838 	struct sock *ctl_sk = net->ipv6.tcp_sk;
839 	unsigned int tot_len = sizeof(struct tcphdr);
840 	struct dst_entry *dst;
841 	__be32 *topt;
842 
843 	if (ts)
844 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
845 #ifdef CONFIG_TCP_MD5SIG
846 	if (key)
847 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
848 #endif
849 
850 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
851 			 GFP_ATOMIC);
852 	if (buff == NULL)
853 		return;
854 
855 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
856 
857 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
858 	skb_reset_transport_header(buff);
859 
860 	/* Swap the send and the receive. */
861 	memset(t1, 0, sizeof(*t1));
862 	t1->dest = th->source;
863 	t1->source = th->dest;
864 	t1->doff = tot_len / 4;
865 	t1->seq = htonl(seq);
866 	t1->ack_seq = htonl(ack);
867 	t1->ack = !rst || !th->ack;
868 	t1->rst = rst;
869 	t1->window = htons(win);
870 
871 	topt = (__be32 *)(t1 + 1);
872 
873 	if (ts) {
874 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
875 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
876 		*topt++ = htonl(tcp_time_stamp);
877 		*topt++ = htonl(ts);
878 	}
879 
880 #ifdef CONFIG_TCP_MD5SIG
881 	if (key) {
882 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
883 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
884 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
885 				    &ipv6_hdr(skb)->saddr,
886 				    &ipv6_hdr(skb)->daddr, t1);
887 	}
888 #endif
889 
890 	memset(&fl6, 0, sizeof(fl6));
891 	fl6.daddr = ipv6_hdr(skb)->saddr;
892 	fl6.saddr = ipv6_hdr(skb)->daddr;
893 
894 	buff->ip_summed = CHECKSUM_PARTIAL;
895 	buff->csum = 0;
896 
897 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
898 
899 	fl6.flowi6_proto = IPPROTO_TCP;
900 	fl6.flowi6_oif = inet6_iif(skb);
901 	fl6.fl6_dport = t1->dest;
902 	fl6.fl6_sport = t1->source;
903 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
904 
905 	/* Pass a socket to ip6_dst_lookup either it is for RST
906 	 * Underlying function will use this to retrieve the network
907 	 * namespace
908 	 */
909 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
910 	if (!IS_ERR(dst)) {
911 		skb_dst_set(buff, dst);
912 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
913 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
914 		if (rst)
915 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
916 		return;
917 	}
918 
919 	kfree_skb(buff);
920 }
921 
922 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
923 {
924 	const struct tcphdr *th = tcp_hdr(skb);
925 	u32 seq = 0, ack_seq = 0;
926 	struct tcp_md5sig_key *key = NULL;
927 #ifdef CONFIG_TCP_MD5SIG
928 	const __u8 *hash_location = NULL;
929 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
930 	unsigned char newhash[16];
931 	int genhash;
932 	struct sock *sk1 = NULL;
933 #endif
934 
935 	if (th->rst)
936 		return;
937 
938 	if (!ipv6_unicast_destination(skb))
939 		return;
940 
941 #ifdef CONFIG_TCP_MD5SIG
942 	hash_location = tcp_parse_md5sig_option(th);
943 	if (!sk && hash_location) {
944 		/*
945 		 * active side is lost. Try to find listening socket through
946 		 * source port, and then find md5 key through listening socket.
947 		 * we are not loose security here:
948 		 * Incoming packet is checked with md5 hash with finding key,
949 		 * no RST generated if md5 hash doesn't match.
950 		 */
951 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
952 					   &tcp_hashinfo, &ipv6h->daddr,
953 					   ntohs(th->source), inet6_iif(skb));
954 		if (!sk1)
955 			return;
956 
957 		rcu_read_lock();
958 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
959 		if (!key)
960 			goto release_sk1;
961 
962 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
963 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
964 			goto release_sk1;
965 	} else {
966 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
967 	}
968 #endif
969 
970 	if (th->ack)
971 		seq = ntohl(th->ack_seq);
972 	else
973 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
974 			  (th->doff << 2);
975 
976 	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
977 
978 #ifdef CONFIG_TCP_MD5SIG
979 release_sk1:
980 	if (sk1) {
981 		rcu_read_unlock();
982 		sock_put(sk1);
983 	}
984 #endif
985 }
986 
987 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
988 			    struct tcp_md5sig_key *key, u8 tclass)
989 {
990 	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
991 }
992 
993 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
994 {
995 	struct inet_timewait_sock *tw = inet_twsk(sk);
996 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
997 
998 	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
999 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1000 			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1001 			tw->tw_tclass);
1002 
1003 	inet_twsk_put(tw);
1004 }
1005 
1006 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1007 				  struct request_sock *req)
1008 {
1009 	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1010 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1011 }
1012 
1013 
1014 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1015 {
1016 	struct request_sock *req, **prev;
1017 	const struct tcphdr *th = tcp_hdr(skb);
1018 	struct sock *nsk;
1019 
1020 	/* Find possible connection requests. */
1021 	req = inet6_csk_search_req(sk, &prev, th->source,
1022 				   &ipv6_hdr(skb)->saddr,
1023 				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1024 	if (req)
1025 		return tcp_check_req(sk, skb, req, prev);
1026 
1027 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1028 			&ipv6_hdr(skb)->saddr, th->source,
1029 			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1030 
1031 	if (nsk) {
1032 		if (nsk->sk_state != TCP_TIME_WAIT) {
1033 			bh_lock_sock(nsk);
1034 			return nsk;
1035 		}
1036 		inet_twsk_put(inet_twsk(nsk));
1037 		return NULL;
1038 	}
1039 
1040 #ifdef CONFIG_SYN_COOKIES
1041 	if (!th->syn)
1042 		sk = cookie_v6_check(sk, skb);
1043 #endif
1044 	return sk;
1045 }
1046 
1047 /* FIXME: this is substantially similar to the ipv4 code.
1048  * Can some kind of merge be done? -- erics
1049  */
1050 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1051 {
1052 	struct tcp_extend_values tmp_ext;
1053 	struct tcp_options_received tmp_opt;
1054 	const u8 *hash_location;
1055 	struct request_sock *req;
1056 	struct inet6_request_sock *treq;
1057 	struct ipv6_pinfo *np = inet6_sk(sk);
1058 	struct tcp_sock *tp = tcp_sk(sk);
1059 	__u32 isn = TCP_SKB_CB(skb)->when;
1060 	struct dst_entry *dst = NULL;
1061 	bool want_cookie = false;
1062 
1063 	if (skb->protocol == htons(ETH_P_IP))
1064 		return tcp_v4_conn_request(sk, skb);
1065 
1066 	if (!ipv6_unicast_destination(skb))
1067 		goto drop;
1068 
1069 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1070 		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1071 		if (!want_cookie)
1072 			goto drop;
1073 	}
1074 
1075 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1076 		goto drop;
1077 
1078 	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1079 	if (req == NULL)
1080 		goto drop;
1081 
1082 #ifdef CONFIG_TCP_MD5SIG
1083 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1084 #endif
1085 
1086 	tcp_clear_options(&tmp_opt);
1087 	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1088 	tmp_opt.user_mss = tp->rx_opt.user_mss;
1089 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1090 
1091 	if (tmp_opt.cookie_plus > 0 &&
1092 	    tmp_opt.saw_tstamp &&
1093 	    !tp->rx_opt.cookie_out_never &&
1094 	    (sysctl_tcp_cookie_size > 0 ||
1095 	     (tp->cookie_values != NULL &&
1096 	      tp->cookie_values->cookie_desired > 0))) {
1097 		u8 *c;
1098 		u32 *d;
1099 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1100 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1101 
1102 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1103 			goto drop_and_free;
1104 
1105 		/* Secret recipe starts with IP addresses */
1106 		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1107 		*mess++ ^= *d++;
1108 		*mess++ ^= *d++;
1109 		*mess++ ^= *d++;
1110 		*mess++ ^= *d++;
1111 		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1112 		*mess++ ^= *d++;
1113 		*mess++ ^= *d++;
1114 		*mess++ ^= *d++;
1115 		*mess++ ^= *d++;
1116 
1117 		/* plus variable length Initiator Cookie */
1118 		c = (u8 *)mess;
1119 		while (l-- > 0)
1120 			*c++ ^= *hash_location++;
1121 
1122 		want_cookie = false;	/* not our kind of cookie */
1123 		tmp_ext.cookie_out_never = 0; /* false */
1124 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1125 	} else if (!tp->rx_opt.cookie_in_always) {
1126 		/* redundant indications, but ensure initialization. */
1127 		tmp_ext.cookie_out_never = 1; /* true */
1128 		tmp_ext.cookie_plus = 0;
1129 	} else {
1130 		goto drop_and_free;
1131 	}
1132 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1133 
1134 	if (want_cookie && !tmp_opt.saw_tstamp)
1135 		tcp_clear_options(&tmp_opt);
1136 
1137 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1138 	tcp_openreq_init(req, &tmp_opt, skb);
1139 
1140 	treq = inet6_rsk(req);
1141 	treq->rmt_addr = ipv6_hdr(skb)->saddr;
1142 	treq->loc_addr = ipv6_hdr(skb)->daddr;
1143 	if (!want_cookie || tmp_opt.tstamp_ok)
1144 		TCP_ECN_create_request(req, skb);
1145 
1146 	treq->iif = sk->sk_bound_dev_if;
1147 
1148 	/* So that link locals have meaning */
1149 	if (!sk->sk_bound_dev_if &&
1150 	    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1151 		treq->iif = inet6_iif(skb);
1152 
1153 	if (!isn) {
1154 		struct inet_peer *peer = NULL;
1155 
1156 		if (ipv6_opt_accepted(sk, skb) ||
1157 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1158 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1159 			atomic_inc(&skb->users);
1160 			treq->pktopts = skb;
1161 		}
1162 
1163 		if (want_cookie) {
1164 			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1165 			req->cookie_ts = tmp_opt.tstamp_ok;
1166 			goto have_isn;
1167 		}
1168 
1169 		/* VJ's idea. We save last timestamp seen
1170 		 * from the destination in peer table, when entering
1171 		 * state TIME-WAIT, and check against it before
1172 		 * accepting new connection request.
1173 		 *
1174 		 * If "isn" is not zero, this request hit alive
1175 		 * timewait bucket, so that all the necessary checks
1176 		 * are made in the function processing timewait state.
1177 		 */
1178 		if (tmp_opt.saw_tstamp &&
1179 		    tcp_death_row.sysctl_tw_recycle &&
1180 		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
1181 		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1182 		    ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1183 				    &treq->rmt_addr)) {
1184 			inet_peer_refcheck(peer);
1185 			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1186 			    (s32)(peer->tcp_ts - req->ts_recent) >
1187 							TCP_PAWS_WINDOW) {
1188 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1189 				goto drop_and_release;
1190 			}
1191 		}
1192 		/* Kill the following clause, if you dislike this way. */
1193 		else if (!sysctl_tcp_syncookies &&
1194 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1195 			  (sysctl_max_syn_backlog >> 2)) &&
1196 			 (!peer || !peer->tcp_ts_stamp) &&
1197 			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1198 			/* Without syncookies last quarter of
1199 			 * backlog is filled with destinations,
1200 			 * proven to be alive.
1201 			 * It means that we continue to communicate
1202 			 * to destinations, already remembered
1203 			 * to the moment of synflood.
1204 			 */
1205 			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1206 				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1207 			goto drop_and_release;
1208 		}
1209 
1210 		isn = tcp_v6_init_sequence(skb);
1211 	}
1212 have_isn:
1213 	tcp_rsk(req)->snt_isn = isn;
1214 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1215 
1216 	security_inet_conn_request(sk, skb, req);
1217 
1218 	if (tcp_v6_send_synack(sk, req,
1219 			       (struct request_values *)&tmp_ext,
1220 			       skb_get_queue_mapping(skb)) ||
1221 	    want_cookie)
1222 		goto drop_and_free;
1223 
1224 	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1225 	return 0;
1226 
1227 drop_and_release:
1228 	dst_release(dst);
1229 drop_and_free:
1230 	reqsk_free(req);
1231 drop:
1232 	return 0; /* don't send reset */
1233 }
1234 
1235 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1236 					  struct request_sock *req,
1237 					  struct dst_entry *dst)
1238 {
1239 	struct inet6_request_sock *treq;
1240 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1241 	struct tcp6_sock *newtcp6sk;
1242 	struct inet_sock *newinet;
1243 	struct tcp_sock *newtp;
1244 	struct sock *newsk;
1245 	struct ipv6_txoptions *opt;
1246 #ifdef CONFIG_TCP_MD5SIG
1247 	struct tcp_md5sig_key *key;
1248 #endif
1249 
1250 	if (skb->protocol == htons(ETH_P_IP)) {
1251 		/*
1252 		 *	v6 mapped
1253 		 */
1254 
1255 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1256 
1257 		if (newsk == NULL)
1258 			return NULL;
1259 
1260 		newtcp6sk = (struct tcp6_sock *)newsk;
1261 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1262 
1263 		newinet = inet_sk(newsk);
1264 		newnp = inet6_sk(newsk);
1265 		newtp = tcp_sk(newsk);
1266 
1267 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1268 
1269 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1270 
1271 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1272 
1273 		newnp->rcv_saddr = newnp->saddr;
1274 
1275 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1276 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1277 #ifdef CONFIG_TCP_MD5SIG
1278 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1279 #endif
1280 
1281 		newnp->ipv6_ac_list = NULL;
1282 		newnp->ipv6_fl_list = NULL;
1283 		newnp->pktoptions  = NULL;
1284 		newnp->opt	   = NULL;
1285 		newnp->mcast_oif   = inet6_iif(skb);
1286 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1287 		newnp->rcv_tclass  = ipv6_tclass(ipv6_hdr(skb));
1288 
1289 		/*
1290 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1291 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1292 		 * that function for the gory details. -acme
1293 		 */
1294 
1295 		/* It is tricky place. Until this moment IPv4 tcp
1296 		   worked with IPv6 icsk.icsk_af_ops.
1297 		   Sync it now.
1298 		 */
1299 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1300 
1301 		return newsk;
1302 	}
1303 
1304 	treq = inet6_rsk(req);
1305 	opt = np->opt;
1306 
1307 	if (sk_acceptq_is_full(sk))
1308 		goto out_overflow;
1309 
1310 	if (!dst) {
1311 		dst = inet6_csk_route_req(sk, req);
1312 		if (!dst)
1313 			goto out;
1314 	}
1315 
1316 	newsk = tcp_create_openreq_child(sk, req, skb);
1317 	if (newsk == NULL)
1318 		goto out_nonewsk;
1319 
1320 	/*
1321 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1322 	 * count here, tcp_create_openreq_child now does this for us, see the
1323 	 * comment in that function for the gory details. -acme
1324 	 */
1325 
1326 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1327 	__ip6_dst_store(newsk, dst, NULL, NULL);
1328 
1329 	newtcp6sk = (struct tcp6_sock *)newsk;
1330 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1331 
1332 	newtp = tcp_sk(newsk);
1333 	newinet = inet_sk(newsk);
1334 	newnp = inet6_sk(newsk);
1335 
1336 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1337 
1338 	newnp->daddr = treq->rmt_addr;
1339 	newnp->saddr = treq->loc_addr;
1340 	newnp->rcv_saddr = treq->loc_addr;
1341 	newsk->sk_bound_dev_if = treq->iif;
1342 
1343 	/* Now IPv6 options...
1344 
1345 	   First: no IPv4 options.
1346 	 */
1347 	newinet->inet_opt = NULL;
1348 	newnp->ipv6_ac_list = NULL;
1349 	newnp->ipv6_fl_list = NULL;
1350 
1351 	/* Clone RX bits */
1352 	newnp->rxopt.all = np->rxopt.all;
1353 
1354 	/* Clone pktoptions received with SYN */
1355 	newnp->pktoptions = NULL;
1356 	if (treq->pktopts != NULL) {
1357 		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1358 		consume_skb(treq->pktopts);
1359 		treq->pktopts = NULL;
1360 		if (newnp->pktoptions)
1361 			skb_set_owner_r(newnp->pktoptions, newsk);
1362 	}
1363 	newnp->opt	  = NULL;
1364 	newnp->mcast_oif  = inet6_iif(skb);
1365 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1366 	newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1367 
1368 	/* Clone native IPv6 options from listening socket (if any)
1369 
1370 	   Yes, keeping reference count would be much more clever,
1371 	   but we make one more one thing there: reattach optmem
1372 	   to newsk.
1373 	 */
1374 	if (opt) {
1375 		newnp->opt = ipv6_dup_options(newsk, opt);
1376 		if (opt != np->opt)
1377 			sock_kfree_s(sk, opt, opt->tot_len);
1378 	}
1379 
1380 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1381 	if (newnp->opt)
1382 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1383 						     newnp->opt->opt_flen);
1384 
1385 	tcp_mtup_init(newsk);
1386 	tcp_sync_mss(newsk, dst_mtu(dst));
1387 	newtp->advmss = dst_metric_advmss(dst);
1388 	if (tcp_sk(sk)->rx_opt.user_mss &&
1389 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1390 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1391 
1392 	tcp_initialize_rcv_mss(newsk);
1393 	if (tcp_rsk(req)->snt_synack)
1394 		tcp_valid_rtt_meas(newsk,
1395 		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1396 	newtp->total_retrans = req->retrans;
1397 
1398 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1399 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1400 
1401 #ifdef CONFIG_TCP_MD5SIG
1402 	/* Copy over the MD5 key from the original socket */
1403 	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1404 		/* We're using one, so create a matching key
1405 		 * on the newsk structure. If we fail to get
1406 		 * memory, then we end up not copying the key
1407 		 * across. Shucks.
1408 		 */
1409 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1410 			       AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1411 	}
1412 #endif
1413 
1414 	if (__inet_inherit_port(sk, newsk) < 0) {
1415 		sock_put(newsk);
1416 		goto out;
1417 	}
1418 	__inet6_hash(newsk, NULL);
1419 
1420 	return newsk;
1421 
1422 out_overflow:
1423 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1424 out_nonewsk:
1425 	if (opt && opt != np->opt)
1426 		sock_kfree_s(sk, opt, opt->tot_len);
1427 	dst_release(dst);
1428 out:
1429 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1430 	return NULL;
1431 }
1432 
1433 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1434 {
1435 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1436 		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1437 				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1438 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1439 			return 0;
1440 		}
1441 	}
1442 
1443 	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1444 					      &ipv6_hdr(skb)->saddr,
1445 					      &ipv6_hdr(skb)->daddr, 0));
1446 
1447 	if (skb->len <= 76) {
1448 		return __skb_checksum_complete(skb);
1449 	}
1450 	return 0;
1451 }
1452 
1453 /* The socket must have it's spinlock held when we get
1454  * here.
1455  *
1456  * We have a potential double-lock case here, so even when
1457  * doing backlog processing we use the BH locking scheme.
1458  * This is because we cannot sleep with the original spinlock
1459  * held.
1460  */
1461 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1462 {
1463 	struct ipv6_pinfo *np = inet6_sk(sk);
1464 	struct tcp_sock *tp;
1465 	struct sk_buff *opt_skb = NULL;
1466 
1467 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1468 	   goes to IPv4 receive handler and backlogged.
1469 	   From backlog it always goes here. Kerboom...
1470 	   Fortunately, tcp_rcv_established and rcv_established
1471 	   handle them correctly, but it is not case with
1472 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1473 	 */
1474 
1475 	if (skb->protocol == htons(ETH_P_IP))
1476 		return tcp_v4_do_rcv(sk, skb);
1477 
1478 #ifdef CONFIG_TCP_MD5SIG
1479 	if (tcp_v6_inbound_md5_hash (sk, skb))
1480 		goto discard;
1481 #endif
1482 
1483 	if (sk_filter(sk, skb))
1484 		goto discard;
1485 
1486 	/*
1487 	 *	socket locking is here for SMP purposes as backlog rcv
1488 	 *	is currently called with bh processing disabled.
1489 	 */
1490 
1491 	/* Do Stevens' IPV6_PKTOPTIONS.
1492 
1493 	   Yes, guys, it is the only place in our code, where we
1494 	   may make it not affecting IPv4.
1495 	   The rest of code is protocol independent,
1496 	   and I do not like idea to uglify IPv4.
1497 
1498 	   Actually, all the idea behind IPV6_PKTOPTIONS
1499 	   looks not very well thought. For now we latch
1500 	   options, received in the last packet, enqueued
1501 	   by tcp. Feel free to propose better solution.
1502 					       --ANK (980728)
1503 	 */
1504 	if (np->rxopt.all)
1505 		opt_skb = skb_clone(skb, GFP_ATOMIC);
1506 
1507 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1508 		sock_rps_save_rxhash(sk, skb);
1509 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1510 			goto reset;
1511 		if (opt_skb)
1512 			goto ipv6_pktoptions;
1513 		return 0;
1514 	}
1515 
1516 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1517 		goto csum_err;
1518 
1519 	if (sk->sk_state == TCP_LISTEN) {
1520 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1521 		if (!nsk)
1522 			goto discard;
1523 
1524 		/*
1525 		 * Queue it on the new socket if the new socket is active,
1526 		 * otherwise we just shortcircuit this and continue with
1527 		 * the new socket..
1528 		 */
1529 		if(nsk != sk) {
1530 			sock_rps_save_rxhash(nsk, skb);
1531 			if (tcp_child_process(sk, nsk, skb))
1532 				goto reset;
1533 			if (opt_skb)
1534 				__kfree_skb(opt_skb);
1535 			return 0;
1536 		}
1537 	} else
1538 		sock_rps_save_rxhash(sk, skb);
1539 
1540 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1541 		goto reset;
1542 	if (opt_skb)
1543 		goto ipv6_pktoptions;
1544 	return 0;
1545 
1546 reset:
1547 	tcp_v6_send_reset(sk, skb);
1548 discard:
1549 	if (opt_skb)
1550 		__kfree_skb(opt_skb);
1551 	kfree_skb(skb);
1552 	return 0;
1553 csum_err:
1554 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1555 	goto discard;
1556 
1557 
1558 ipv6_pktoptions:
1559 	/* Do you ask, what is it?
1560 
1561 	   1. skb was enqueued by tcp.
1562 	   2. skb is added to tail of read queue, rather than out of order.
1563 	   3. socket is not in passive state.
1564 	   4. Finally, it really contains options, which user wants to receive.
1565 	 */
1566 	tp = tcp_sk(sk);
1567 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1568 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1569 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1570 			np->mcast_oif = inet6_iif(opt_skb);
1571 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1572 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1573 		if (np->rxopt.bits.rxtclass)
1574 			np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1575 		if (ipv6_opt_accepted(sk, opt_skb)) {
1576 			skb_set_owner_r(opt_skb, sk);
1577 			opt_skb = xchg(&np->pktoptions, opt_skb);
1578 		} else {
1579 			__kfree_skb(opt_skb);
1580 			opt_skb = xchg(&np->pktoptions, NULL);
1581 		}
1582 	}
1583 
1584 	kfree_skb(opt_skb);
1585 	return 0;
1586 }
1587 
1588 static int tcp_v6_rcv(struct sk_buff *skb)
1589 {
1590 	const struct tcphdr *th;
1591 	const struct ipv6hdr *hdr;
1592 	struct sock *sk;
1593 	int ret;
1594 	struct net *net = dev_net(skb->dev);
1595 
1596 	if (skb->pkt_type != PACKET_HOST)
1597 		goto discard_it;
1598 
1599 	/*
1600 	 *	Count it even if it's bad.
1601 	 */
1602 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1603 
1604 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1605 		goto discard_it;
1606 
1607 	th = tcp_hdr(skb);
1608 
1609 	if (th->doff < sizeof(struct tcphdr)/4)
1610 		goto bad_packet;
1611 	if (!pskb_may_pull(skb, th->doff*4))
1612 		goto discard_it;
1613 
1614 	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1615 		goto bad_packet;
1616 
1617 	th = tcp_hdr(skb);
1618 	hdr = ipv6_hdr(skb);
1619 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1620 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1621 				    skb->len - th->doff*4);
1622 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1623 	TCP_SKB_CB(skb)->when = 0;
1624 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1625 	TCP_SKB_CB(skb)->sacked = 0;
1626 
1627 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1628 	if (!sk)
1629 		goto no_tcp_socket;
1630 
1631 process:
1632 	if (sk->sk_state == TCP_TIME_WAIT)
1633 		goto do_time_wait;
1634 
1635 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1636 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1637 		goto discard_and_relse;
1638 	}
1639 
1640 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1641 		goto discard_and_relse;
1642 
1643 	if (sk_filter(sk, skb))
1644 		goto discard_and_relse;
1645 
1646 	skb->dev = NULL;
1647 
1648 	bh_lock_sock_nested(sk);
1649 	ret = 0;
1650 	if (!sock_owned_by_user(sk)) {
1651 #ifdef CONFIG_NET_DMA
1652 		struct tcp_sock *tp = tcp_sk(sk);
1653 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1654 			tp->ucopy.dma_chan = net_dma_find_channel();
1655 		if (tp->ucopy.dma_chan)
1656 			ret = tcp_v6_do_rcv(sk, skb);
1657 		else
1658 #endif
1659 		{
1660 			if (!tcp_prequeue(sk, skb))
1661 				ret = tcp_v6_do_rcv(sk, skb);
1662 		}
1663 	} else if (unlikely(sk_add_backlog(sk, skb,
1664 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1665 		bh_unlock_sock(sk);
1666 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1667 		goto discard_and_relse;
1668 	}
1669 	bh_unlock_sock(sk);
1670 
1671 	sock_put(sk);
1672 	return ret ? -1 : 0;
1673 
1674 no_tcp_socket:
1675 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1676 		goto discard_it;
1677 
1678 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1679 bad_packet:
1680 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1681 	} else {
1682 		tcp_v6_send_reset(NULL, skb);
1683 	}
1684 
1685 discard_it:
1686 
1687 	/*
1688 	 *	Discard frame
1689 	 */
1690 
1691 	kfree_skb(skb);
1692 	return 0;
1693 
1694 discard_and_relse:
1695 	sock_put(sk);
1696 	goto discard_it;
1697 
1698 do_time_wait:
1699 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1700 		inet_twsk_put(inet_twsk(sk));
1701 		goto discard_it;
1702 	}
1703 
1704 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1705 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1706 		inet_twsk_put(inet_twsk(sk));
1707 		goto discard_it;
1708 	}
1709 
1710 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1711 	case TCP_TW_SYN:
1712 	{
1713 		struct sock *sk2;
1714 
1715 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1716 					    &ipv6_hdr(skb)->daddr,
1717 					    ntohs(th->dest), inet6_iif(skb));
1718 		if (sk2 != NULL) {
1719 			struct inet_timewait_sock *tw = inet_twsk(sk);
1720 			inet_twsk_deschedule(tw, &tcp_death_row);
1721 			inet_twsk_put(tw);
1722 			sk = sk2;
1723 			goto process;
1724 		}
1725 		/* Fall through to ACK */
1726 	}
1727 	case TCP_TW_ACK:
1728 		tcp_v6_timewait_ack(sk, skb);
1729 		break;
1730 	case TCP_TW_RST:
1731 		goto no_tcp_socket;
1732 	case TCP_TW_SUCCESS:;
1733 	}
1734 	goto discard_it;
1735 }
1736 
1737 static struct inet_peer *tcp_v6_get_peer(struct sock *sk)
1738 {
1739 	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1740 	struct ipv6_pinfo *np = inet6_sk(sk);
1741 
1742 	/* If we don't have a valid cached route, or we're doing IP
1743 	 * options which make the IPv6 header destination address
1744 	 * different from our peer's, do not bother with this.
1745 	 */
1746 	if (!rt || !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr))
1747 		return NULL;
1748 	return rt6_get_peer_create(rt);
1749 }
1750 
1751 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1752 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1753 	.twsk_unique	= tcp_twsk_unique,
1754 	.twsk_destructor= tcp_twsk_destructor,
1755 };
1756 
1757 static const struct inet_connection_sock_af_ops ipv6_specific = {
1758 	.queue_xmit	   = inet6_csk_xmit,
1759 	.send_check	   = tcp_v6_send_check,
1760 	.rebuild_header	   = inet6_sk_rebuild_header,
1761 	.conn_request	   = tcp_v6_conn_request,
1762 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1763 	.get_peer	   = tcp_v6_get_peer,
1764 	.net_header_len	   = sizeof(struct ipv6hdr),
1765 	.net_frag_header_len = sizeof(struct frag_hdr),
1766 	.setsockopt	   = ipv6_setsockopt,
1767 	.getsockopt	   = ipv6_getsockopt,
1768 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1769 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1770 	.bind_conflict	   = inet6_csk_bind_conflict,
1771 #ifdef CONFIG_COMPAT
1772 	.compat_setsockopt = compat_ipv6_setsockopt,
1773 	.compat_getsockopt = compat_ipv6_getsockopt,
1774 #endif
1775 };
1776 
1777 #ifdef CONFIG_TCP_MD5SIG
1778 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1779 	.md5_lookup	=	tcp_v6_md5_lookup,
1780 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1781 	.md5_parse	=	tcp_v6_parse_md5_keys,
1782 };
1783 #endif
1784 
1785 /*
1786  *	TCP over IPv4 via INET6 API
1787  */
1788 
1789 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1790 	.queue_xmit	   = ip_queue_xmit,
1791 	.send_check	   = tcp_v4_send_check,
1792 	.rebuild_header	   = inet_sk_rebuild_header,
1793 	.conn_request	   = tcp_v6_conn_request,
1794 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1795 	.get_peer	   = tcp_v4_get_peer,
1796 	.net_header_len	   = sizeof(struct iphdr),
1797 	.setsockopt	   = ipv6_setsockopt,
1798 	.getsockopt	   = ipv6_getsockopt,
1799 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1800 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1801 	.bind_conflict	   = inet6_csk_bind_conflict,
1802 #ifdef CONFIG_COMPAT
1803 	.compat_setsockopt = compat_ipv6_setsockopt,
1804 	.compat_getsockopt = compat_ipv6_getsockopt,
1805 #endif
1806 };
1807 
1808 #ifdef CONFIG_TCP_MD5SIG
1809 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1810 	.md5_lookup	=	tcp_v4_md5_lookup,
1811 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1812 	.md5_parse	=	tcp_v6_parse_md5_keys,
1813 };
1814 #endif
1815 
1816 /* NOTE: A lot of things set to zero explicitly by call to
1817  *       sk_alloc() so need not be done here.
1818  */
1819 static int tcp_v6_init_sock(struct sock *sk)
1820 {
1821 	struct inet_connection_sock *icsk = inet_csk(sk);
1822 
1823 	tcp_init_sock(sk);
1824 
1825 	icsk->icsk_af_ops = &ipv6_specific;
1826 
1827 #ifdef CONFIG_TCP_MD5SIG
1828 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1829 #endif
1830 
1831 	return 0;
1832 }
1833 
1834 static void tcp_v6_destroy_sock(struct sock *sk)
1835 {
1836 	tcp_v4_destroy_sock(sk);
1837 	inet6_destroy_sock(sk);
1838 }
1839 
1840 #ifdef CONFIG_PROC_FS
1841 /* Proc filesystem TCPv6 sock list dumping. */
1842 static void get_openreq6(struct seq_file *seq,
1843 			 const struct sock *sk, struct request_sock *req, int i, int uid)
1844 {
1845 	int ttd = req->expires - jiffies;
1846 	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1847 	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1848 
1849 	if (ttd < 0)
1850 		ttd = 0;
1851 
1852 	seq_printf(seq,
1853 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1854 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1855 		   i,
1856 		   src->s6_addr32[0], src->s6_addr32[1],
1857 		   src->s6_addr32[2], src->s6_addr32[3],
1858 		   ntohs(inet_rsk(req)->loc_port),
1859 		   dest->s6_addr32[0], dest->s6_addr32[1],
1860 		   dest->s6_addr32[2], dest->s6_addr32[3],
1861 		   ntohs(inet_rsk(req)->rmt_port),
1862 		   TCP_SYN_RECV,
1863 		   0,0, /* could print option size, but that is af dependent. */
1864 		   1,   /* timers active (only the expire timer) */
1865 		   jiffies_to_clock_t(ttd),
1866 		   req->retrans,
1867 		   uid,
1868 		   0,  /* non standard timer */
1869 		   0, /* open_requests have no inode */
1870 		   0, req);
1871 }
1872 
1873 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1874 {
1875 	const struct in6_addr *dest, *src;
1876 	__u16 destp, srcp;
1877 	int timer_active;
1878 	unsigned long timer_expires;
1879 	const struct inet_sock *inet = inet_sk(sp);
1880 	const struct tcp_sock *tp = tcp_sk(sp);
1881 	const struct inet_connection_sock *icsk = inet_csk(sp);
1882 	const struct ipv6_pinfo *np = inet6_sk(sp);
1883 
1884 	dest  = &np->daddr;
1885 	src   = &np->rcv_saddr;
1886 	destp = ntohs(inet->inet_dport);
1887 	srcp  = ntohs(inet->inet_sport);
1888 
1889 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1890 		timer_active	= 1;
1891 		timer_expires	= icsk->icsk_timeout;
1892 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1893 		timer_active	= 4;
1894 		timer_expires	= icsk->icsk_timeout;
1895 	} else if (timer_pending(&sp->sk_timer)) {
1896 		timer_active	= 2;
1897 		timer_expires	= sp->sk_timer.expires;
1898 	} else {
1899 		timer_active	= 0;
1900 		timer_expires = jiffies;
1901 	}
1902 
1903 	seq_printf(seq,
1904 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1905 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1906 		   i,
1907 		   src->s6_addr32[0], src->s6_addr32[1],
1908 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1909 		   dest->s6_addr32[0], dest->s6_addr32[1],
1910 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1911 		   sp->sk_state,
1912 		   tp->write_seq-tp->snd_una,
1913 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1914 		   timer_active,
1915 		   jiffies_to_clock_t(timer_expires - jiffies),
1916 		   icsk->icsk_retransmits,
1917 		   sock_i_uid(sp),
1918 		   icsk->icsk_probes_out,
1919 		   sock_i_ino(sp),
1920 		   atomic_read(&sp->sk_refcnt), sp,
1921 		   jiffies_to_clock_t(icsk->icsk_rto),
1922 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1923 		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1924 		   tp->snd_cwnd,
1925 		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1926 		   );
1927 }
1928 
1929 static void get_timewait6_sock(struct seq_file *seq,
1930 			       struct inet_timewait_sock *tw, int i)
1931 {
1932 	const struct in6_addr *dest, *src;
1933 	__u16 destp, srcp;
1934 	const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1935 	int ttd = tw->tw_ttd - jiffies;
1936 
1937 	if (ttd < 0)
1938 		ttd = 0;
1939 
1940 	dest = &tw6->tw_v6_daddr;
1941 	src  = &tw6->tw_v6_rcv_saddr;
1942 	destp = ntohs(tw->tw_dport);
1943 	srcp  = ntohs(tw->tw_sport);
1944 
1945 	seq_printf(seq,
1946 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1947 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1948 		   i,
1949 		   src->s6_addr32[0], src->s6_addr32[1],
1950 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1951 		   dest->s6_addr32[0], dest->s6_addr32[1],
1952 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1953 		   tw->tw_substate, 0, 0,
1954 		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1955 		   atomic_read(&tw->tw_refcnt), tw);
1956 }
1957 
1958 static int tcp6_seq_show(struct seq_file *seq, void *v)
1959 {
1960 	struct tcp_iter_state *st;
1961 
1962 	if (v == SEQ_START_TOKEN) {
1963 		seq_puts(seq,
1964 			 "  sl  "
1965 			 "local_address                         "
1966 			 "remote_address                        "
1967 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1968 			 "   uid  timeout inode\n");
1969 		goto out;
1970 	}
1971 	st = seq->private;
1972 
1973 	switch (st->state) {
1974 	case TCP_SEQ_STATE_LISTENING:
1975 	case TCP_SEQ_STATE_ESTABLISHED:
1976 		get_tcp6_sock(seq, v, st->num);
1977 		break;
1978 	case TCP_SEQ_STATE_OPENREQ:
1979 		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1980 		break;
1981 	case TCP_SEQ_STATE_TIME_WAIT:
1982 		get_timewait6_sock(seq, v, st->num);
1983 		break;
1984 	}
1985 out:
1986 	return 0;
1987 }
1988 
1989 static const struct file_operations tcp6_afinfo_seq_fops = {
1990 	.owner   = THIS_MODULE,
1991 	.open    = tcp_seq_open,
1992 	.read    = seq_read,
1993 	.llseek  = seq_lseek,
1994 	.release = seq_release_net
1995 };
1996 
1997 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1998 	.name		= "tcp6",
1999 	.family		= AF_INET6,
2000 	.seq_fops	= &tcp6_afinfo_seq_fops,
2001 	.seq_ops	= {
2002 		.show		= tcp6_seq_show,
2003 	},
2004 };
2005 
2006 int __net_init tcp6_proc_init(struct net *net)
2007 {
2008 	return tcp_proc_register(net, &tcp6_seq_afinfo);
2009 }
2010 
2011 void tcp6_proc_exit(struct net *net)
2012 {
2013 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
2014 }
2015 #endif
2016 
2017 struct proto tcpv6_prot = {
2018 	.name			= "TCPv6",
2019 	.owner			= THIS_MODULE,
2020 	.close			= tcp_close,
2021 	.connect		= tcp_v6_connect,
2022 	.disconnect		= tcp_disconnect,
2023 	.accept			= inet_csk_accept,
2024 	.ioctl			= tcp_ioctl,
2025 	.init			= tcp_v6_init_sock,
2026 	.destroy		= tcp_v6_destroy_sock,
2027 	.shutdown		= tcp_shutdown,
2028 	.setsockopt		= tcp_setsockopt,
2029 	.getsockopt		= tcp_getsockopt,
2030 	.recvmsg		= tcp_recvmsg,
2031 	.sendmsg		= tcp_sendmsg,
2032 	.sendpage		= tcp_sendpage,
2033 	.backlog_rcv		= tcp_v6_do_rcv,
2034 	.hash			= tcp_v6_hash,
2035 	.unhash			= inet_unhash,
2036 	.get_port		= inet_csk_get_port,
2037 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2038 	.sockets_allocated	= &tcp_sockets_allocated,
2039 	.memory_allocated	= &tcp_memory_allocated,
2040 	.memory_pressure	= &tcp_memory_pressure,
2041 	.orphan_count		= &tcp_orphan_count,
2042 	.sysctl_wmem		= sysctl_tcp_wmem,
2043 	.sysctl_rmem		= sysctl_tcp_rmem,
2044 	.max_header		= MAX_TCP_HEADER,
2045 	.obj_size		= sizeof(struct tcp6_sock),
2046 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2047 	.twsk_prot		= &tcp6_timewait_sock_ops,
2048 	.rsk_prot		= &tcp6_request_sock_ops,
2049 	.h.hashinfo		= &tcp_hashinfo,
2050 	.no_autobind		= true,
2051 #ifdef CONFIG_COMPAT
2052 	.compat_setsockopt	= compat_tcp_setsockopt,
2053 	.compat_getsockopt	= compat_tcp_getsockopt,
2054 #endif
2055 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2056 	.proto_cgroup		= tcp_proto_cgroup,
2057 #endif
2058 };
2059 
2060 static const struct inet6_protocol tcpv6_protocol = {
2061 	.handler	=	tcp_v6_rcv,
2062 	.err_handler	=	tcp_v6_err,
2063 	.gso_send_check	=	tcp_v6_gso_send_check,
2064 	.gso_segment	=	tcp_tso_segment,
2065 	.gro_receive	=	tcp6_gro_receive,
2066 	.gro_complete	=	tcp6_gro_complete,
2067 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2068 };
2069 
2070 static struct inet_protosw tcpv6_protosw = {
2071 	.type		=	SOCK_STREAM,
2072 	.protocol	=	IPPROTO_TCP,
2073 	.prot		=	&tcpv6_prot,
2074 	.ops		=	&inet6_stream_ops,
2075 	.no_check	=	0,
2076 	.flags		=	INET_PROTOSW_PERMANENT |
2077 				INET_PROTOSW_ICSK,
2078 };
2079 
2080 static int __net_init tcpv6_net_init(struct net *net)
2081 {
2082 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2083 				    SOCK_RAW, IPPROTO_TCP, net);
2084 }
2085 
2086 static void __net_exit tcpv6_net_exit(struct net *net)
2087 {
2088 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2089 }
2090 
2091 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2092 {
2093 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2094 }
2095 
2096 static struct pernet_operations tcpv6_net_ops = {
2097 	.init	    = tcpv6_net_init,
2098 	.exit	    = tcpv6_net_exit,
2099 	.exit_batch = tcpv6_net_exit_batch,
2100 };
2101 
2102 int __init tcpv6_init(void)
2103 {
2104 	int ret;
2105 
2106 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2107 	if (ret)
2108 		goto out;
2109 
2110 	/* register inet6 protocol */
2111 	ret = inet6_register_protosw(&tcpv6_protosw);
2112 	if (ret)
2113 		goto out_tcpv6_protocol;
2114 
2115 	ret = register_pernet_subsys(&tcpv6_net_ops);
2116 	if (ret)
2117 		goto out_tcpv6_protosw;
2118 out:
2119 	return ret;
2120 
2121 out_tcpv6_protocol:
2122 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2123 out_tcpv6_protosw:
2124 	inet6_unregister_protosw(&tcpv6_protosw);
2125 	goto out;
2126 }
2127 
2128 void tcpv6_exit(void)
2129 {
2130 	unregister_pernet_subsys(&tcpv6_net_ops);
2131 	inet6_unregister_protosw(&tcpv6_protosw);
2132 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2133 }
2134