xref: /openbmc/linux/net/ipv6/tcp_ipv6.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 
65 #include <asm/uaccess.h>
66 
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69 
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72 
73 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 				      struct request_sock *req);
76 
77 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 static void	__tcp_v6_send_check(struct sk_buff *skb,
79 				    struct in6_addr *saddr,
80 				    struct in6_addr *daddr);
81 
82 static const struct inet_connection_sock_af_ops ipv6_mapped;
83 static const struct inet_connection_sock_af_ops ipv6_specific;
84 #ifdef CONFIG_TCP_MD5SIG
85 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87 #else
88 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 						   struct in6_addr *addr)
90 {
91 	return NULL;
92 }
93 #endif
94 
95 static void tcp_v6_hash(struct sock *sk)
96 {
97 	if (sk->sk_state != TCP_CLOSE) {
98 		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
99 			tcp_prot.hash(sk);
100 			return;
101 		}
102 		local_bh_disable();
103 		__inet6_hash(sk, NULL);
104 		local_bh_enable();
105 	}
106 }
107 
108 static __inline__ __sum16 tcp_v6_check(int len,
109 				   struct in6_addr *saddr,
110 				   struct in6_addr *daddr,
111 				   __wsum base)
112 {
113 	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114 }
115 
116 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
117 {
118 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
119 					    ipv6_hdr(skb)->saddr.s6_addr32,
120 					    tcp_hdr(skb)->dest,
121 					    tcp_hdr(skb)->source);
122 }
123 
124 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
125 			  int addr_len)
126 {
127 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128 	struct inet_sock *inet = inet_sk(sk);
129 	struct inet_connection_sock *icsk = inet_csk(sk);
130 	struct ipv6_pinfo *np = inet6_sk(sk);
131 	struct tcp_sock *tp = tcp_sk(sk);
132 	struct in6_addr *saddr = NULL, *final_p, final;
133 	struct rt6_info *rt;
134 	struct flowi fl;
135 	struct dst_entry *dst;
136 	int addr_type;
137 	int err;
138 
139 	if (addr_len < SIN6_LEN_RFC2133)
140 		return -EINVAL;
141 
142 	if (usin->sin6_family != AF_INET6)
143 		return -EAFNOSUPPORT;
144 
145 	memset(&fl, 0, sizeof(fl));
146 
147 	if (np->sndflow) {
148 		fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
149 		IP6_ECN_flow_init(fl.fl6_flowlabel);
150 		if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
151 			struct ip6_flowlabel *flowlabel;
152 			flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
153 			if (flowlabel == NULL)
154 				return -EINVAL;
155 			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
156 			fl6_sock_release(flowlabel);
157 		}
158 	}
159 
160 	/*
161 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
162 	 */
163 
164 	if(ipv6_addr_any(&usin->sin6_addr))
165 		usin->sin6_addr.s6_addr[15] = 0x1;
166 
167 	addr_type = ipv6_addr_type(&usin->sin6_addr);
168 
169 	if(addr_type & IPV6_ADDR_MULTICAST)
170 		return -ENETUNREACH;
171 
172 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
173 		if (addr_len >= sizeof(struct sockaddr_in6) &&
174 		    usin->sin6_scope_id) {
175 			/* If interface is set while binding, indices
176 			 * must coincide.
177 			 */
178 			if (sk->sk_bound_dev_if &&
179 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
180 				return -EINVAL;
181 
182 			sk->sk_bound_dev_if = usin->sin6_scope_id;
183 		}
184 
185 		/* Connect to link-local address requires an interface */
186 		if (!sk->sk_bound_dev_if)
187 			return -EINVAL;
188 	}
189 
190 	if (tp->rx_opt.ts_recent_stamp &&
191 	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
192 		tp->rx_opt.ts_recent = 0;
193 		tp->rx_opt.ts_recent_stamp = 0;
194 		tp->write_seq = 0;
195 	}
196 
197 	ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
198 	np->flow_label = fl.fl6_flowlabel;
199 
200 	/*
201 	 *	TCP over IPv4
202 	 */
203 
204 	if (addr_type == IPV6_ADDR_MAPPED) {
205 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
206 		struct sockaddr_in sin;
207 
208 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
209 
210 		if (__ipv6_only_sock(sk))
211 			return -ENETUNREACH;
212 
213 		sin.sin_family = AF_INET;
214 		sin.sin_port = usin->sin6_port;
215 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
216 
217 		icsk->icsk_af_ops = &ipv6_mapped;
218 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
219 #ifdef CONFIG_TCP_MD5SIG
220 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
221 #endif
222 
223 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
224 
225 		if (err) {
226 			icsk->icsk_ext_hdr_len = exthdrlen;
227 			icsk->icsk_af_ops = &ipv6_specific;
228 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
229 #ifdef CONFIG_TCP_MD5SIG
230 			tp->af_specific = &tcp_sock_ipv6_specific;
231 #endif
232 			goto failure;
233 		} else {
234 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
235 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
236 					       &np->rcv_saddr);
237 		}
238 
239 		return err;
240 	}
241 
242 	if (!ipv6_addr_any(&np->rcv_saddr))
243 		saddr = &np->rcv_saddr;
244 
245 	fl.proto = IPPROTO_TCP;
246 	ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
247 	ipv6_addr_copy(&fl.fl6_src,
248 		       (saddr ? saddr : &np->saddr));
249 	fl.oif = sk->sk_bound_dev_if;
250 	fl.mark = sk->sk_mark;
251 	fl.fl_ip_dport = usin->sin6_port;
252 	fl.fl_ip_sport = inet->inet_sport;
253 
254 	final_p = fl6_update_dst(&fl, np->opt, &final);
255 
256 	security_sk_classify_flow(sk, &fl);
257 
258 	err = ip6_dst_lookup(sk, &dst, &fl);
259 	if (err)
260 		goto failure;
261 	if (final_p)
262 		ipv6_addr_copy(&fl.fl6_dst, final_p);
263 
264 	err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
265 	if (err < 0) {
266 		if (err == -EREMOTE)
267 			err = ip6_dst_blackhole(sk, &dst, &fl);
268 		if (err < 0)
269 			goto failure;
270 	}
271 
272 	if (saddr == NULL) {
273 		saddr = &fl.fl6_src;
274 		ipv6_addr_copy(&np->rcv_saddr, saddr);
275 	}
276 
277 	/* set the source address */
278 	ipv6_addr_copy(&np->saddr, saddr);
279 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
280 
281 	sk->sk_gso_type = SKB_GSO_TCPV6;
282 	__ip6_dst_store(sk, dst, NULL, NULL);
283 
284 	rt = (struct rt6_info *) dst;
285 	if (tcp_death_row.sysctl_tw_recycle &&
286 	    !tp->rx_opt.ts_recent_stamp &&
287 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
288 		struct inet_peer *peer = rt6_get_peer(rt);
289 		/*
290 		 * VJ's idea. We save last timestamp seen from
291 		 * the destination in peer table, when entering state
292 		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
293 		 * when trying new connection.
294 		 */
295 		if (peer) {
296 			inet_peer_refcheck(peer);
297 			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
298 				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
299 				tp->rx_opt.ts_recent = peer->tcp_ts;
300 			}
301 		}
302 	}
303 
304 	icsk->icsk_ext_hdr_len = 0;
305 	if (np->opt)
306 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
307 					  np->opt->opt_nflen);
308 
309 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
310 
311 	inet->inet_dport = usin->sin6_port;
312 
313 	tcp_set_state(sk, TCP_SYN_SENT);
314 	err = inet6_hash_connect(&tcp_death_row, sk);
315 	if (err)
316 		goto late_failure;
317 
318 	if (!tp->write_seq)
319 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
320 							     np->daddr.s6_addr32,
321 							     inet->inet_sport,
322 							     inet->inet_dport);
323 
324 	err = tcp_connect(sk);
325 	if (err)
326 		goto late_failure;
327 
328 	return 0;
329 
330 late_failure:
331 	tcp_set_state(sk, TCP_CLOSE);
332 	__sk_dst_reset(sk);
333 failure:
334 	inet->inet_dport = 0;
335 	sk->sk_route_caps = 0;
336 	return err;
337 }
338 
339 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
340 		u8 type, u8 code, int offset, __be32 info)
341 {
342 	struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
343 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
344 	struct ipv6_pinfo *np;
345 	struct sock *sk;
346 	int err;
347 	struct tcp_sock *tp;
348 	__u32 seq;
349 	struct net *net = dev_net(skb->dev);
350 
351 	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
352 			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
353 
354 	if (sk == NULL) {
355 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
356 				   ICMP6_MIB_INERRORS);
357 		return;
358 	}
359 
360 	if (sk->sk_state == TCP_TIME_WAIT) {
361 		inet_twsk_put(inet_twsk(sk));
362 		return;
363 	}
364 
365 	bh_lock_sock(sk);
366 	if (sock_owned_by_user(sk))
367 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
368 
369 	if (sk->sk_state == TCP_CLOSE)
370 		goto out;
371 
372 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
373 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
374 		goto out;
375 	}
376 
377 	tp = tcp_sk(sk);
378 	seq = ntohl(th->seq);
379 	if (sk->sk_state != TCP_LISTEN &&
380 	    !between(seq, tp->snd_una, tp->snd_nxt)) {
381 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
382 		goto out;
383 	}
384 
385 	np = inet6_sk(sk);
386 
387 	if (type == ICMPV6_PKT_TOOBIG) {
388 		struct dst_entry *dst = NULL;
389 
390 		if (sock_owned_by_user(sk))
391 			goto out;
392 		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
393 			goto out;
394 
395 		/* icmp should have updated the destination cache entry */
396 		dst = __sk_dst_check(sk, np->dst_cookie);
397 
398 		if (dst == NULL) {
399 			struct inet_sock *inet = inet_sk(sk);
400 			struct flowi fl;
401 
402 			/* BUGGG_FUTURE: Again, it is not clear how
403 			   to handle rthdr case. Ignore this complexity
404 			   for now.
405 			 */
406 			memset(&fl, 0, sizeof(fl));
407 			fl.proto = IPPROTO_TCP;
408 			ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
409 			ipv6_addr_copy(&fl.fl6_src, &np->saddr);
410 			fl.oif = sk->sk_bound_dev_if;
411 			fl.mark = sk->sk_mark;
412 			fl.fl_ip_dport = inet->inet_dport;
413 			fl.fl_ip_sport = inet->inet_sport;
414 			security_skb_classify_flow(skb, &fl);
415 
416 			if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
417 				sk->sk_err_soft = -err;
418 				goto out;
419 			}
420 
421 			if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
422 				sk->sk_err_soft = -err;
423 				goto out;
424 			}
425 
426 		} else
427 			dst_hold(dst);
428 
429 		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
430 			tcp_sync_mss(sk, dst_mtu(dst));
431 			tcp_simple_retransmit(sk);
432 		} /* else let the usual retransmit timer handle it */
433 		dst_release(dst);
434 		goto out;
435 	}
436 
437 	icmpv6_err_convert(type, code, &err);
438 
439 	/* Might be for an request_sock */
440 	switch (sk->sk_state) {
441 		struct request_sock *req, **prev;
442 	case TCP_LISTEN:
443 		if (sock_owned_by_user(sk))
444 			goto out;
445 
446 		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
447 					   &hdr->saddr, inet6_iif(skb));
448 		if (!req)
449 			goto out;
450 
451 		/* ICMPs are not backlogged, hence we cannot get
452 		 * an established socket here.
453 		 */
454 		WARN_ON(req->sk != NULL);
455 
456 		if (seq != tcp_rsk(req)->snt_isn) {
457 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
458 			goto out;
459 		}
460 
461 		inet_csk_reqsk_queue_drop(sk, req, prev);
462 		goto out;
463 
464 	case TCP_SYN_SENT:
465 	case TCP_SYN_RECV:  /* Cannot happen.
466 			       It can, it SYNs are crossed. --ANK */
467 		if (!sock_owned_by_user(sk)) {
468 			sk->sk_err = err;
469 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
470 
471 			tcp_done(sk);
472 		} else
473 			sk->sk_err_soft = err;
474 		goto out;
475 	}
476 
477 	if (!sock_owned_by_user(sk) && np->recverr) {
478 		sk->sk_err = err;
479 		sk->sk_error_report(sk);
480 	} else
481 		sk->sk_err_soft = err;
482 
483 out:
484 	bh_unlock_sock(sk);
485 	sock_put(sk);
486 }
487 
488 
489 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
490 			      struct request_values *rvp)
491 {
492 	struct inet6_request_sock *treq = inet6_rsk(req);
493 	struct ipv6_pinfo *np = inet6_sk(sk);
494 	struct sk_buff * skb;
495 	struct ipv6_txoptions *opt = NULL;
496 	struct in6_addr * final_p, final;
497 	struct flowi fl;
498 	struct dst_entry *dst;
499 	int err = -1;
500 
501 	memset(&fl, 0, sizeof(fl));
502 	fl.proto = IPPROTO_TCP;
503 	ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
504 	ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
505 	fl.fl6_flowlabel = 0;
506 	fl.oif = treq->iif;
507 	fl.mark = sk->sk_mark;
508 	fl.fl_ip_dport = inet_rsk(req)->rmt_port;
509 	fl.fl_ip_sport = inet_rsk(req)->loc_port;
510 	security_req_classify_flow(req, &fl);
511 
512 	opt = np->opt;
513 	final_p = fl6_update_dst(&fl, opt, &final);
514 
515 	err = ip6_dst_lookup(sk, &dst, &fl);
516 	if (err)
517 		goto done;
518 	if (final_p)
519 		ipv6_addr_copy(&fl.fl6_dst, final_p);
520 	if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
521 		goto done;
522 
523 	skb = tcp_make_synack(sk, dst, req, rvp);
524 	if (skb) {
525 		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
526 
527 		ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
528 		err = ip6_xmit(sk, skb, &fl, opt);
529 		err = net_xmit_eval(err);
530 	}
531 
532 done:
533 	if (opt && opt != np->opt)
534 		sock_kfree_s(sk, opt, opt->tot_len);
535 	dst_release(dst);
536 	return err;
537 }
538 
539 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
540 			     struct request_values *rvp)
541 {
542 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
543 	return tcp_v6_send_synack(sk, req, rvp);
544 }
545 
546 static inline void syn_flood_warning(struct sk_buff *skb)
547 {
548 #ifdef CONFIG_SYN_COOKIES
549 	if (sysctl_tcp_syncookies)
550 		printk(KERN_INFO
551 		       "TCPv6: Possible SYN flooding on port %d. "
552 		       "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
553 	else
554 #endif
555 		printk(KERN_INFO
556 		       "TCPv6: Possible SYN flooding on port %d. "
557 		       "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
558 }
559 
560 static void tcp_v6_reqsk_destructor(struct request_sock *req)
561 {
562 	kfree_skb(inet6_rsk(req)->pktopts);
563 }
564 
565 #ifdef CONFIG_TCP_MD5SIG
566 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
567 						   struct in6_addr *addr)
568 {
569 	struct tcp_sock *tp = tcp_sk(sk);
570 	int i;
571 
572 	BUG_ON(tp == NULL);
573 
574 	if (!tp->md5sig_info || !tp->md5sig_info->entries6)
575 		return NULL;
576 
577 	for (i = 0; i < tp->md5sig_info->entries6; i++) {
578 		if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
579 			return &tp->md5sig_info->keys6[i].base;
580 	}
581 	return NULL;
582 }
583 
584 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
585 						struct sock *addr_sk)
586 {
587 	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
588 }
589 
590 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
591 						      struct request_sock *req)
592 {
593 	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
594 }
595 
596 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
597 			     char *newkey, u8 newkeylen)
598 {
599 	/* Add key to the list */
600 	struct tcp_md5sig_key *key;
601 	struct tcp_sock *tp = tcp_sk(sk);
602 	struct tcp6_md5sig_key *keys;
603 
604 	key = tcp_v6_md5_do_lookup(sk, peer);
605 	if (key) {
606 		/* modify existing entry - just update that one */
607 		kfree(key->key);
608 		key->key = newkey;
609 		key->keylen = newkeylen;
610 	} else {
611 		/* reallocate new list if current one is full. */
612 		if (!tp->md5sig_info) {
613 			tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
614 			if (!tp->md5sig_info) {
615 				kfree(newkey);
616 				return -ENOMEM;
617 			}
618 			sk_nocaps_add(sk, NETIF_F_GSO_MASK);
619 		}
620 		if (tcp_alloc_md5sig_pool(sk) == NULL) {
621 			kfree(newkey);
622 			return -ENOMEM;
623 		}
624 		if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
625 			keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
626 				       (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
627 
628 			if (!keys) {
629 				tcp_free_md5sig_pool();
630 				kfree(newkey);
631 				return -ENOMEM;
632 			}
633 
634 			if (tp->md5sig_info->entries6)
635 				memmove(keys, tp->md5sig_info->keys6,
636 					(sizeof (tp->md5sig_info->keys6[0]) *
637 					 tp->md5sig_info->entries6));
638 
639 			kfree(tp->md5sig_info->keys6);
640 			tp->md5sig_info->keys6 = keys;
641 			tp->md5sig_info->alloced6++;
642 		}
643 
644 		ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
645 			       peer);
646 		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
647 		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
648 
649 		tp->md5sig_info->entries6++;
650 	}
651 	return 0;
652 }
653 
654 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
655 			       u8 *newkey, __u8 newkeylen)
656 {
657 	return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
658 				 newkey, newkeylen);
659 }
660 
661 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
662 {
663 	struct tcp_sock *tp = tcp_sk(sk);
664 	int i;
665 
666 	for (i = 0; i < tp->md5sig_info->entries6; i++) {
667 		if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
668 			/* Free the key */
669 			kfree(tp->md5sig_info->keys6[i].base.key);
670 			tp->md5sig_info->entries6--;
671 
672 			if (tp->md5sig_info->entries6 == 0) {
673 				kfree(tp->md5sig_info->keys6);
674 				tp->md5sig_info->keys6 = NULL;
675 				tp->md5sig_info->alloced6 = 0;
676 			} else {
677 				/* shrink the database */
678 				if (tp->md5sig_info->entries6 != i)
679 					memmove(&tp->md5sig_info->keys6[i],
680 						&tp->md5sig_info->keys6[i+1],
681 						(tp->md5sig_info->entries6 - i)
682 						* sizeof (tp->md5sig_info->keys6[0]));
683 			}
684 			tcp_free_md5sig_pool();
685 			return 0;
686 		}
687 	}
688 	return -ENOENT;
689 }
690 
691 static void tcp_v6_clear_md5_list (struct sock *sk)
692 {
693 	struct tcp_sock *tp = tcp_sk(sk);
694 	int i;
695 
696 	if (tp->md5sig_info->entries6) {
697 		for (i = 0; i < tp->md5sig_info->entries6; i++)
698 			kfree(tp->md5sig_info->keys6[i].base.key);
699 		tp->md5sig_info->entries6 = 0;
700 		tcp_free_md5sig_pool();
701 	}
702 
703 	kfree(tp->md5sig_info->keys6);
704 	tp->md5sig_info->keys6 = NULL;
705 	tp->md5sig_info->alloced6 = 0;
706 
707 	if (tp->md5sig_info->entries4) {
708 		for (i = 0; i < tp->md5sig_info->entries4; i++)
709 			kfree(tp->md5sig_info->keys4[i].base.key);
710 		tp->md5sig_info->entries4 = 0;
711 		tcp_free_md5sig_pool();
712 	}
713 
714 	kfree(tp->md5sig_info->keys4);
715 	tp->md5sig_info->keys4 = NULL;
716 	tp->md5sig_info->alloced4 = 0;
717 }
718 
719 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
720 				  int optlen)
721 {
722 	struct tcp_md5sig cmd;
723 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
724 	u8 *newkey;
725 
726 	if (optlen < sizeof(cmd))
727 		return -EINVAL;
728 
729 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
730 		return -EFAULT;
731 
732 	if (sin6->sin6_family != AF_INET6)
733 		return -EINVAL;
734 
735 	if (!cmd.tcpm_keylen) {
736 		if (!tcp_sk(sk)->md5sig_info)
737 			return -ENOENT;
738 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
739 			return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
740 		return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
741 	}
742 
743 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
744 		return -EINVAL;
745 
746 	if (!tcp_sk(sk)->md5sig_info) {
747 		struct tcp_sock *tp = tcp_sk(sk);
748 		struct tcp_md5sig_info *p;
749 
750 		p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
751 		if (!p)
752 			return -ENOMEM;
753 
754 		tp->md5sig_info = p;
755 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
756 	}
757 
758 	newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
759 	if (!newkey)
760 		return -ENOMEM;
761 	if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
762 		return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
763 					 newkey, cmd.tcpm_keylen);
764 	}
765 	return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
766 }
767 
768 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
769 					struct in6_addr *daddr,
770 					struct in6_addr *saddr, int nbytes)
771 {
772 	struct tcp6_pseudohdr *bp;
773 	struct scatterlist sg;
774 
775 	bp = &hp->md5_blk.ip6;
776 	/* 1. TCP pseudo-header (RFC2460) */
777 	ipv6_addr_copy(&bp->saddr, saddr);
778 	ipv6_addr_copy(&bp->daddr, daddr);
779 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
780 	bp->len = cpu_to_be32(nbytes);
781 
782 	sg_init_one(&sg, bp, sizeof(*bp));
783 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
784 }
785 
786 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
787 			       struct in6_addr *daddr, struct in6_addr *saddr,
788 			       struct tcphdr *th)
789 {
790 	struct tcp_md5sig_pool *hp;
791 	struct hash_desc *desc;
792 
793 	hp = tcp_get_md5sig_pool();
794 	if (!hp)
795 		goto clear_hash_noput;
796 	desc = &hp->md5_desc;
797 
798 	if (crypto_hash_init(desc))
799 		goto clear_hash;
800 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
801 		goto clear_hash;
802 	if (tcp_md5_hash_header(hp, th))
803 		goto clear_hash;
804 	if (tcp_md5_hash_key(hp, key))
805 		goto clear_hash;
806 	if (crypto_hash_final(desc, md5_hash))
807 		goto clear_hash;
808 
809 	tcp_put_md5sig_pool();
810 	return 0;
811 
812 clear_hash:
813 	tcp_put_md5sig_pool();
814 clear_hash_noput:
815 	memset(md5_hash, 0, 16);
816 	return 1;
817 }
818 
819 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
820 			       struct sock *sk, struct request_sock *req,
821 			       struct sk_buff *skb)
822 {
823 	struct in6_addr *saddr, *daddr;
824 	struct tcp_md5sig_pool *hp;
825 	struct hash_desc *desc;
826 	struct tcphdr *th = tcp_hdr(skb);
827 
828 	if (sk) {
829 		saddr = &inet6_sk(sk)->saddr;
830 		daddr = &inet6_sk(sk)->daddr;
831 	} else if (req) {
832 		saddr = &inet6_rsk(req)->loc_addr;
833 		daddr = &inet6_rsk(req)->rmt_addr;
834 	} else {
835 		struct ipv6hdr *ip6h = ipv6_hdr(skb);
836 		saddr = &ip6h->saddr;
837 		daddr = &ip6h->daddr;
838 	}
839 
840 	hp = tcp_get_md5sig_pool();
841 	if (!hp)
842 		goto clear_hash_noput;
843 	desc = &hp->md5_desc;
844 
845 	if (crypto_hash_init(desc))
846 		goto clear_hash;
847 
848 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
849 		goto clear_hash;
850 	if (tcp_md5_hash_header(hp, th))
851 		goto clear_hash;
852 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
853 		goto clear_hash;
854 	if (tcp_md5_hash_key(hp, key))
855 		goto clear_hash;
856 	if (crypto_hash_final(desc, md5_hash))
857 		goto clear_hash;
858 
859 	tcp_put_md5sig_pool();
860 	return 0;
861 
862 clear_hash:
863 	tcp_put_md5sig_pool();
864 clear_hash_noput:
865 	memset(md5_hash, 0, 16);
866 	return 1;
867 }
868 
869 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
870 {
871 	__u8 *hash_location = NULL;
872 	struct tcp_md5sig_key *hash_expected;
873 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
874 	struct tcphdr *th = tcp_hdr(skb);
875 	int genhash;
876 	u8 newhash[16];
877 
878 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
879 	hash_location = tcp_parse_md5sig_option(th);
880 
881 	/* We've parsed the options - do we have a hash? */
882 	if (!hash_expected && !hash_location)
883 		return 0;
884 
885 	if (hash_expected && !hash_location) {
886 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
887 		return 1;
888 	}
889 
890 	if (!hash_expected && hash_location) {
891 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
892 		return 1;
893 	}
894 
895 	/* check the signature */
896 	genhash = tcp_v6_md5_hash_skb(newhash,
897 				      hash_expected,
898 				      NULL, NULL, skb);
899 
900 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
901 		if (net_ratelimit()) {
902 			printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
903 			       genhash ? "failed" : "mismatch",
904 			       &ip6h->saddr, ntohs(th->source),
905 			       &ip6h->daddr, ntohs(th->dest));
906 		}
907 		return 1;
908 	}
909 	return 0;
910 }
911 #endif
912 
913 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
914 	.family		=	AF_INET6,
915 	.obj_size	=	sizeof(struct tcp6_request_sock),
916 	.rtx_syn_ack	=	tcp_v6_rtx_synack,
917 	.send_ack	=	tcp_v6_reqsk_send_ack,
918 	.destructor	=	tcp_v6_reqsk_destructor,
919 	.send_reset	=	tcp_v6_send_reset,
920 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
921 };
922 
923 #ifdef CONFIG_TCP_MD5SIG
924 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
925 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
926 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
927 };
928 #endif
929 
930 static void __tcp_v6_send_check(struct sk_buff *skb,
931 				struct in6_addr *saddr, struct in6_addr *daddr)
932 {
933 	struct tcphdr *th = tcp_hdr(skb);
934 
935 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
936 		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
937 		skb->csum_start = skb_transport_header(skb) - skb->head;
938 		skb->csum_offset = offsetof(struct tcphdr, check);
939 	} else {
940 		th->check = tcp_v6_check(skb->len, saddr, daddr,
941 					 csum_partial(th, th->doff << 2,
942 						      skb->csum));
943 	}
944 }
945 
946 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
947 {
948 	struct ipv6_pinfo *np = inet6_sk(sk);
949 
950 	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
951 }
952 
953 static int tcp_v6_gso_send_check(struct sk_buff *skb)
954 {
955 	struct ipv6hdr *ipv6h;
956 	struct tcphdr *th;
957 
958 	if (!pskb_may_pull(skb, sizeof(*th)))
959 		return -EINVAL;
960 
961 	ipv6h = ipv6_hdr(skb);
962 	th = tcp_hdr(skb);
963 
964 	th->check = 0;
965 	skb->ip_summed = CHECKSUM_PARTIAL;
966 	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
967 	return 0;
968 }
969 
970 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
971 					 struct sk_buff *skb)
972 {
973 	struct ipv6hdr *iph = skb_gro_network_header(skb);
974 
975 	switch (skb->ip_summed) {
976 	case CHECKSUM_COMPLETE:
977 		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
978 				  skb->csum)) {
979 			skb->ip_summed = CHECKSUM_UNNECESSARY;
980 			break;
981 		}
982 
983 		/* fall through */
984 	case CHECKSUM_NONE:
985 		NAPI_GRO_CB(skb)->flush = 1;
986 		return NULL;
987 	}
988 
989 	return tcp_gro_receive(head, skb);
990 }
991 
992 static int tcp6_gro_complete(struct sk_buff *skb)
993 {
994 	struct ipv6hdr *iph = ipv6_hdr(skb);
995 	struct tcphdr *th = tcp_hdr(skb);
996 
997 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
998 				  &iph->saddr, &iph->daddr, 0);
999 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1000 
1001 	return tcp_gro_complete(skb);
1002 }
1003 
1004 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1005 				 u32 ts, struct tcp_md5sig_key *key, int rst)
1006 {
1007 	struct tcphdr *th = tcp_hdr(skb), *t1;
1008 	struct sk_buff *buff;
1009 	struct flowi fl;
1010 	struct net *net = dev_net(skb_dst(skb)->dev);
1011 	struct sock *ctl_sk = net->ipv6.tcp_sk;
1012 	unsigned int tot_len = sizeof(struct tcphdr);
1013 	struct dst_entry *dst;
1014 	__be32 *topt;
1015 
1016 	if (ts)
1017 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
1018 #ifdef CONFIG_TCP_MD5SIG
1019 	if (key)
1020 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
1021 #endif
1022 
1023 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1024 			 GFP_ATOMIC);
1025 	if (buff == NULL)
1026 		return;
1027 
1028 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1029 
1030 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
1031 	skb_reset_transport_header(buff);
1032 
1033 	/* Swap the send and the receive. */
1034 	memset(t1, 0, sizeof(*t1));
1035 	t1->dest = th->source;
1036 	t1->source = th->dest;
1037 	t1->doff = tot_len / 4;
1038 	t1->seq = htonl(seq);
1039 	t1->ack_seq = htonl(ack);
1040 	t1->ack = !rst || !th->ack;
1041 	t1->rst = rst;
1042 	t1->window = htons(win);
1043 
1044 	topt = (__be32 *)(t1 + 1);
1045 
1046 	if (ts) {
1047 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1048 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1049 		*topt++ = htonl(tcp_time_stamp);
1050 		*topt++ = htonl(ts);
1051 	}
1052 
1053 #ifdef CONFIG_TCP_MD5SIG
1054 	if (key) {
1055 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1056 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1057 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1058 				    &ipv6_hdr(skb)->saddr,
1059 				    &ipv6_hdr(skb)->daddr, t1);
1060 	}
1061 #endif
1062 
1063 	memset(&fl, 0, sizeof(fl));
1064 	ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1065 	ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1066 
1067 	buff->ip_summed = CHECKSUM_PARTIAL;
1068 	buff->csum = 0;
1069 
1070 	__tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
1071 
1072 	fl.proto = IPPROTO_TCP;
1073 	fl.oif = inet6_iif(skb);
1074 	fl.fl_ip_dport = t1->dest;
1075 	fl.fl_ip_sport = t1->source;
1076 	security_skb_classify_flow(skb, &fl);
1077 
1078 	/* Pass a socket to ip6_dst_lookup either it is for RST
1079 	 * Underlying function will use this to retrieve the network
1080 	 * namespace
1081 	 */
1082 	if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1083 		if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1084 			skb_dst_set(buff, dst);
1085 			ip6_xmit(ctl_sk, buff, &fl, NULL);
1086 			TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1087 			if (rst)
1088 				TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1089 			return;
1090 		}
1091 	}
1092 
1093 	kfree_skb(buff);
1094 }
1095 
1096 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1097 {
1098 	struct tcphdr *th = tcp_hdr(skb);
1099 	u32 seq = 0, ack_seq = 0;
1100 	struct tcp_md5sig_key *key = NULL;
1101 
1102 	if (th->rst)
1103 		return;
1104 
1105 	if (!ipv6_unicast_destination(skb))
1106 		return;
1107 
1108 #ifdef CONFIG_TCP_MD5SIG
1109 	if (sk)
1110 		key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1111 #endif
1112 
1113 	if (th->ack)
1114 		seq = ntohl(th->ack_seq);
1115 	else
1116 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1117 			  (th->doff << 2);
1118 
1119 	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1120 }
1121 
1122 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1123 			    struct tcp_md5sig_key *key)
1124 {
1125 	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1126 }
1127 
1128 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1129 {
1130 	struct inet_timewait_sock *tw = inet_twsk(sk);
1131 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1132 
1133 	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1134 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1135 			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1136 
1137 	inet_twsk_put(tw);
1138 }
1139 
1140 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1141 				  struct request_sock *req)
1142 {
1143 	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1144 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1145 }
1146 
1147 
1148 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1149 {
1150 	struct request_sock *req, **prev;
1151 	const struct tcphdr *th = tcp_hdr(skb);
1152 	struct sock *nsk;
1153 
1154 	/* Find possible connection requests. */
1155 	req = inet6_csk_search_req(sk, &prev, th->source,
1156 				   &ipv6_hdr(skb)->saddr,
1157 				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1158 	if (req)
1159 		return tcp_check_req(sk, skb, req, prev);
1160 
1161 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1162 			&ipv6_hdr(skb)->saddr, th->source,
1163 			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1164 
1165 	if (nsk) {
1166 		if (nsk->sk_state != TCP_TIME_WAIT) {
1167 			bh_lock_sock(nsk);
1168 			return nsk;
1169 		}
1170 		inet_twsk_put(inet_twsk(nsk));
1171 		return NULL;
1172 	}
1173 
1174 #ifdef CONFIG_SYN_COOKIES
1175 	if (!th->syn)
1176 		sk = cookie_v6_check(sk, skb);
1177 #endif
1178 	return sk;
1179 }
1180 
1181 /* FIXME: this is substantially similar to the ipv4 code.
1182  * Can some kind of merge be done? -- erics
1183  */
1184 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1185 {
1186 	struct tcp_extend_values tmp_ext;
1187 	struct tcp_options_received tmp_opt;
1188 	u8 *hash_location;
1189 	struct request_sock *req;
1190 	struct inet6_request_sock *treq;
1191 	struct ipv6_pinfo *np = inet6_sk(sk);
1192 	struct tcp_sock *tp = tcp_sk(sk);
1193 	__u32 isn = TCP_SKB_CB(skb)->when;
1194 	struct dst_entry *dst = NULL;
1195 #ifdef CONFIG_SYN_COOKIES
1196 	int want_cookie = 0;
1197 #else
1198 #define want_cookie 0
1199 #endif
1200 
1201 	if (skb->protocol == htons(ETH_P_IP))
1202 		return tcp_v4_conn_request(sk, skb);
1203 
1204 	if (!ipv6_unicast_destination(skb))
1205 		goto drop;
1206 
1207 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1208 		if (net_ratelimit())
1209 			syn_flood_warning(skb);
1210 #ifdef CONFIG_SYN_COOKIES
1211 		if (sysctl_tcp_syncookies)
1212 			want_cookie = 1;
1213 		else
1214 #endif
1215 		goto drop;
1216 	}
1217 
1218 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1219 		goto drop;
1220 
1221 	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1222 	if (req == NULL)
1223 		goto drop;
1224 
1225 #ifdef CONFIG_TCP_MD5SIG
1226 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1227 #endif
1228 
1229 	tcp_clear_options(&tmp_opt);
1230 	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1231 	tmp_opt.user_mss = tp->rx_opt.user_mss;
1232 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1233 
1234 	if (tmp_opt.cookie_plus > 0 &&
1235 	    tmp_opt.saw_tstamp &&
1236 	    !tp->rx_opt.cookie_out_never &&
1237 	    (sysctl_tcp_cookie_size > 0 ||
1238 	     (tp->cookie_values != NULL &&
1239 	      tp->cookie_values->cookie_desired > 0))) {
1240 		u8 *c;
1241 		u32 *d;
1242 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1243 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1244 
1245 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1246 			goto drop_and_free;
1247 
1248 		/* Secret recipe starts with IP addresses */
1249 		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1250 		*mess++ ^= *d++;
1251 		*mess++ ^= *d++;
1252 		*mess++ ^= *d++;
1253 		*mess++ ^= *d++;
1254 		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1255 		*mess++ ^= *d++;
1256 		*mess++ ^= *d++;
1257 		*mess++ ^= *d++;
1258 		*mess++ ^= *d++;
1259 
1260 		/* plus variable length Initiator Cookie */
1261 		c = (u8 *)mess;
1262 		while (l-- > 0)
1263 			*c++ ^= *hash_location++;
1264 
1265 #ifdef CONFIG_SYN_COOKIES
1266 		want_cookie = 0;	/* not our kind of cookie */
1267 #endif
1268 		tmp_ext.cookie_out_never = 0; /* false */
1269 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1270 	} else if (!tp->rx_opt.cookie_in_always) {
1271 		/* redundant indications, but ensure initialization. */
1272 		tmp_ext.cookie_out_never = 1; /* true */
1273 		tmp_ext.cookie_plus = 0;
1274 	} else {
1275 		goto drop_and_free;
1276 	}
1277 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1278 
1279 	if (want_cookie && !tmp_opt.saw_tstamp)
1280 		tcp_clear_options(&tmp_opt);
1281 
1282 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1283 	tcp_openreq_init(req, &tmp_opt, skb);
1284 
1285 	treq = inet6_rsk(req);
1286 	ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1287 	ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1288 	if (!want_cookie || tmp_opt.tstamp_ok)
1289 		TCP_ECN_create_request(req, tcp_hdr(skb));
1290 
1291 	if (!isn) {
1292 		struct inet_peer *peer = NULL;
1293 
1294 		if (ipv6_opt_accepted(sk, skb) ||
1295 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1296 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1297 			atomic_inc(&skb->users);
1298 			treq->pktopts = skb;
1299 		}
1300 		treq->iif = sk->sk_bound_dev_if;
1301 
1302 		/* So that link locals have meaning */
1303 		if (!sk->sk_bound_dev_if &&
1304 		    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1305 			treq->iif = inet6_iif(skb);
1306 
1307 		if (want_cookie) {
1308 			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1309 			req->cookie_ts = tmp_opt.tstamp_ok;
1310 			goto have_isn;
1311 		}
1312 
1313 		/* VJ's idea. We save last timestamp seen
1314 		 * from the destination in peer table, when entering
1315 		 * state TIME-WAIT, and check against it before
1316 		 * accepting new connection request.
1317 		 *
1318 		 * If "isn" is not zero, this request hit alive
1319 		 * timewait bucket, so that all the necessary checks
1320 		 * are made in the function processing timewait state.
1321 		 */
1322 		if (tmp_opt.saw_tstamp &&
1323 		    tcp_death_row.sysctl_tw_recycle &&
1324 		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
1325 		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1326 		    ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
1327 				    &treq->rmt_addr)) {
1328 			inet_peer_refcheck(peer);
1329 			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1330 			    (s32)(peer->tcp_ts - req->ts_recent) >
1331 							TCP_PAWS_WINDOW) {
1332 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1333 				goto drop_and_release;
1334 			}
1335 		}
1336 		/* Kill the following clause, if you dislike this way. */
1337 		else if (!sysctl_tcp_syncookies &&
1338 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1339 			  (sysctl_max_syn_backlog >> 2)) &&
1340 			 (!peer || !peer->tcp_ts_stamp) &&
1341 			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1342 			/* Without syncookies last quarter of
1343 			 * backlog is filled with destinations,
1344 			 * proven to be alive.
1345 			 * It means that we continue to communicate
1346 			 * to destinations, already remembered
1347 			 * to the moment of synflood.
1348 			 */
1349 			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1350 				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1351 			goto drop_and_release;
1352 		}
1353 
1354 		isn = tcp_v6_init_sequence(skb);
1355 	}
1356 have_isn:
1357 	tcp_rsk(req)->snt_isn = isn;
1358 
1359 	security_inet_conn_request(sk, skb, req);
1360 
1361 	if (tcp_v6_send_synack(sk, req,
1362 			       (struct request_values *)&tmp_ext) ||
1363 	    want_cookie)
1364 		goto drop_and_free;
1365 
1366 	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1367 	return 0;
1368 
1369 drop_and_release:
1370 	dst_release(dst);
1371 drop_and_free:
1372 	reqsk_free(req);
1373 drop:
1374 	return 0; /* don't send reset */
1375 }
1376 
1377 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1378 					  struct request_sock *req,
1379 					  struct dst_entry *dst)
1380 {
1381 	struct inet6_request_sock *treq;
1382 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1383 	struct tcp6_sock *newtcp6sk;
1384 	struct inet_sock *newinet;
1385 	struct tcp_sock *newtp;
1386 	struct sock *newsk;
1387 	struct ipv6_txoptions *opt;
1388 #ifdef CONFIG_TCP_MD5SIG
1389 	struct tcp_md5sig_key *key;
1390 #endif
1391 
1392 	if (skb->protocol == htons(ETH_P_IP)) {
1393 		/*
1394 		 *	v6 mapped
1395 		 */
1396 
1397 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1398 
1399 		if (newsk == NULL)
1400 			return NULL;
1401 
1402 		newtcp6sk = (struct tcp6_sock *)newsk;
1403 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1404 
1405 		newinet = inet_sk(newsk);
1406 		newnp = inet6_sk(newsk);
1407 		newtp = tcp_sk(newsk);
1408 
1409 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1410 
1411 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1412 
1413 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1414 
1415 		ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1416 
1417 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1418 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1419 #ifdef CONFIG_TCP_MD5SIG
1420 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1421 #endif
1422 
1423 		newnp->pktoptions  = NULL;
1424 		newnp->opt	   = NULL;
1425 		newnp->mcast_oif   = inet6_iif(skb);
1426 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1427 
1428 		/*
1429 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1430 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1431 		 * that function for the gory details. -acme
1432 		 */
1433 
1434 		/* It is tricky place. Until this moment IPv4 tcp
1435 		   worked with IPv6 icsk.icsk_af_ops.
1436 		   Sync it now.
1437 		 */
1438 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1439 
1440 		return newsk;
1441 	}
1442 
1443 	treq = inet6_rsk(req);
1444 	opt = np->opt;
1445 
1446 	if (sk_acceptq_is_full(sk))
1447 		goto out_overflow;
1448 
1449 	if (!dst) {
1450 		dst = inet6_csk_route_req(sk, req);
1451 		if (!dst)
1452 			goto out;
1453 	}
1454 
1455 	newsk = tcp_create_openreq_child(sk, req, skb);
1456 	if (newsk == NULL)
1457 		goto out_nonewsk;
1458 
1459 	/*
1460 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1461 	 * count here, tcp_create_openreq_child now does this for us, see the
1462 	 * comment in that function for the gory details. -acme
1463 	 */
1464 
1465 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1466 	__ip6_dst_store(newsk, dst, NULL, NULL);
1467 
1468 	newtcp6sk = (struct tcp6_sock *)newsk;
1469 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1470 
1471 	newtp = tcp_sk(newsk);
1472 	newinet = inet_sk(newsk);
1473 	newnp = inet6_sk(newsk);
1474 
1475 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1476 
1477 	ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1478 	ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1479 	ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1480 	newsk->sk_bound_dev_if = treq->iif;
1481 
1482 	/* Now IPv6 options...
1483 
1484 	   First: no IPv4 options.
1485 	 */
1486 	newinet->opt = NULL;
1487 	newnp->ipv6_fl_list = NULL;
1488 
1489 	/* Clone RX bits */
1490 	newnp->rxopt.all = np->rxopt.all;
1491 
1492 	/* Clone pktoptions received with SYN */
1493 	newnp->pktoptions = NULL;
1494 	if (treq->pktopts != NULL) {
1495 		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1496 		kfree_skb(treq->pktopts);
1497 		treq->pktopts = NULL;
1498 		if (newnp->pktoptions)
1499 			skb_set_owner_r(newnp->pktoptions, newsk);
1500 	}
1501 	newnp->opt	  = NULL;
1502 	newnp->mcast_oif  = inet6_iif(skb);
1503 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1504 
1505 	/* Clone native IPv6 options from listening socket (if any)
1506 
1507 	   Yes, keeping reference count would be much more clever,
1508 	   but we make one more one thing there: reattach optmem
1509 	   to newsk.
1510 	 */
1511 	if (opt) {
1512 		newnp->opt = ipv6_dup_options(newsk, opt);
1513 		if (opt != np->opt)
1514 			sock_kfree_s(sk, opt, opt->tot_len);
1515 	}
1516 
1517 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1518 	if (newnp->opt)
1519 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1520 						     newnp->opt->opt_flen);
1521 
1522 	tcp_mtup_init(newsk);
1523 	tcp_sync_mss(newsk, dst_mtu(dst));
1524 	newtp->advmss = dst_metric_advmss(dst);
1525 	tcp_initialize_rcv_mss(newsk);
1526 
1527 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1528 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1529 
1530 #ifdef CONFIG_TCP_MD5SIG
1531 	/* Copy over the MD5 key from the original socket */
1532 	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1533 		/* We're using one, so create a matching key
1534 		 * on the newsk structure. If we fail to get
1535 		 * memory, then we end up not copying the key
1536 		 * across. Shucks.
1537 		 */
1538 		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1539 		if (newkey != NULL)
1540 			tcp_v6_md5_do_add(newsk, &newnp->daddr,
1541 					  newkey, key->keylen);
1542 	}
1543 #endif
1544 
1545 	if (__inet_inherit_port(sk, newsk) < 0) {
1546 		sock_put(newsk);
1547 		goto out;
1548 	}
1549 	__inet6_hash(newsk, NULL);
1550 
1551 	return newsk;
1552 
1553 out_overflow:
1554 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1555 out_nonewsk:
1556 	if (opt && opt != np->opt)
1557 		sock_kfree_s(sk, opt, opt->tot_len);
1558 	dst_release(dst);
1559 out:
1560 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1561 	return NULL;
1562 }
1563 
1564 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1565 {
1566 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1567 		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1568 				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1569 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1570 			return 0;
1571 		}
1572 	}
1573 
1574 	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1575 					      &ipv6_hdr(skb)->saddr,
1576 					      &ipv6_hdr(skb)->daddr, 0));
1577 
1578 	if (skb->len <= 76) {
1579 		return __skb_checksum_complete(skb);
1580 	}
1581 	return 0;
1582 }
1583 
1584 /* The socket must have it's spinlock held when we get
1585  * here.
1586  *
1587  * We have a potential double-lock case here, so even when
1588  * doing backlog processing we use the BH locking scheme.
1589  * This is because we cannot sleep with the original spinlock
1590  * held.
1591  */
1592 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1593 {
1594 	struct ipv6_pinfo *np = inet6_sk(sk);
1595 	struct tcp_sock *tp;
1596 	struct sk_buff *opt_skb = NULL;
1597 
1598 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1599 	   goes to IPv4 receive handler and backlogged.
1600 	   From backlog it always goes here. Kerboom...
1601 	   Fortunately, tcp_rcv_established and rcv_established
1602 	   handle them correctly, but it is not case with
1603 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1604 	 */
1605 
1606 	if (skb->protocol == htons(ETH_P_IP))
1607 		return tcp_v4_do_rcv(sk, skb);
1608 
1609 #ifdef CONFIG_TCP_MD5SIG
1610 	if (tcp_v6_inbound_md5_hash (sk, skb))
1611 		goto discard;
1612 #endif
1613 
1614 	if (sk_filter(sk, skb))
1615 		goto discard;
1616 
1617 	/*
1618 	 *	socket locking is here for SMP purposes as backlog rcv
1619 	 *	is currently called with bh processing disabled.
1620 	 */
1621 
1622 	/* Do Stevens' IPV6_PKTOPTIONS.
1623 
1624 	   Yes, guys, it is the only place in our code, where we
1625 	   may make it not affecting IPv4.
1626 	   The rest of code is protocol independent,
1627 	   and I do not like idea to uglify IPv4.
1628 
1629 	   Actually, all the idea behind IPV6_PKTOPTIONS
1630 	   looks not very well thought. For now we latch
1631 	   options, received in the last packet, enqueued
1632 	   by tcp. Feel free to propose better solution.
1633 					       --ANK (980728)
1634 	 */
1635 	if (np->rxopt.all)
1636 		opt_skb = skb_clone(skb, GFP_ATOMIC);
1637 
1638 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1639 		TCP_CHECK_TIMER(sk);
1640 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1641 			goto reset;
1642 		TCP_CHECK_TIMER(sk);
1643 		if (opt_skb)
1644 			goto ipv6_pktoptions;
1645 		return 0;
1646 	}
1647 
1648 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1649 		goto csum_err;
1650 
1651 	if (sk->sk_state == TCP_LISTEN) {
1652 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1653 		if (!nsk)
1654 			goto discard;
1655 
1656 		/*
1657 		 * Queue it on the new socket if the new socket is active,
1658 		 * otherwise we just shortcircuit this and continue with
1659 		 * the new socket..
1660 		 */
1661 		if(nsk != sk) {
1662 			if (tcp_child_process(sk, nsk, skb))
1663 				goto reset;
1664 			if (opt_skb)
1665 				__kfree_skb(opt_skb);
1666 			return 0;
1667 		}
1668 	}
1669 
1670 	TCP_CHECK_TIMER(sk);
1671 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1672 		goto reset;
1673 	TCP_CHECK_TIMER(sk);
1674 	if (opt_skb)
1675 		goto ipv6_pktoptions;
1676 	return 0;
1677 
1678 reset:
1679 	tcp_v6_send_reset(sk, skb);
1680 discard:
1681 	if (opt_skb)
1682 		__kfree_skb(opt_skb);
1683 	kfree_skb(skb);
1684 	return 0;
1685 csum_err:
1686 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1687 	goto discard;
1688 
1689 
1690 ipv6_pktoptions:
1691 	/* Do you ask, what is it?
1692 
1693 	   1. skb was enqueued by tcp.
1694 	   2. skb is added to tail of read queue, rather than out of order.
1695 	   3. socket is not in passive state.
1696 	   4. Finally, it really contains options, which user wants to receive.
1697 	 */
1698 	tp = tcp_sk(sk);
1699 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1700 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1701 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1702 			np->mcast_oif = inet6_iif(opt_skb);
1703 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1704 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1705 		if (ipv6_opt_accepted(sk, opt_skb)) {
1706 			skb_set_owner_r(opt_skb, sk);
1707 			opt_skb = xchg(&np->pktoptions, opt_skb);
1708 		} else {
1709 			__kfree_skb(opt_skb);
1710 			opt_skb = xchg(&np->pktoptions, NULL);
1711 		}
1712 	}
1713 
1714 	kfree_skb(opt_skb);
1715 	return 0;
1716 }
1717 
1718 static int tcp_v6_rcv(struct sk_buff *skb)
1719 {
1720 	struct tcphdr *th;
1721 	struct ipv6hdr *hdr;
1722 	struct sock *sk;
1723 	int ret;
1724 	struct net *net = dev_net(skb->dev);
1725 
1726 	if (skb->pkt_type != PACKET_HOST)
1727 		goto discard_it;
1728 
1729 	/*
1730 	 *	Count it even if it's bad.
1731 	 */
1732 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1733 
1734 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1735 		goto discard_it;
1736 
1737 	th = tcp_hdr(skb);
1738 
1739 	if (th->doff < sizeof(struct tcphdr)/4)
1740 		goto bad_packet;
1741 	if (!pskb_may_pull(skb, th->doff*4))
1742 		goto discard_it;
1743 
1744 	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1745 		goto bad_packet;
1746 
1747 	th = tcp_hdr(skb);
1748 	hdr = ipv6_hdr(skb);
1749 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1750 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1751 				    skb->len - th->doff*4);
1752 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1753 	TCP_SKB_CB(skb)->when = 0;
1754 	TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1755 	TCP_SKB_CB(skb)->sacked = 0;
1756 
1757 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1758 	if (!sk)
1759 		goto no_tcp_socket;
1760 
1761 process:
1762 	if (sk->sk_state == TCP_TIME_WAIT)
1763 		goto do_time_wait;
1764 
1765 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1766 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1767 		goto discard_and_relse;
1768 	}
1769 
1770 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1771 		goto discard_and_relse;
1772 
1773 	if (sk_filter(sk, skb))
1774 		goto discard_and_relse;
1775 
1776 	skb->dev = NULL;
1777 
1778 	bh_lock_sock_nested(sk);
1779 	ret = 0;
1780 	if (!sock_owned_by_user(sk)) {
1781 #ifdef CONFIG_NET_DMA
1782 		struct tcp_sock *tp = tcp_sk(sk);
1783 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1784 			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1785 		if (tp->ucopy.dma_chan)
1786 			ret = tcp_v6_do_rcv(sk, skb);
1787 		else
1788 #endif
1789 		{
1790 			if (!tcp_prequeue(sk, skb))
1791 				ret = tcp_v6_do_rcv(sk, skb);
1792 		}
1793 	} else if (unlikely(sk_add_backlog(sk, skb))) {
1794 		bh_unlock_sock(sk);
1795 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1796 		goto discard_and_relse;
1797 	}
1798 	bh_unlock_sock(sk);
1799 
1800 	sock_put(sk);
1801 	return ret ? -1 : 0;
1802 
1803 no_tcp_socket:
1804 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1805 		goto discard_it;
1806 
1807 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1808 bad_packet:
1809 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1810 	} else {
1811 		tcp_v6_send_reset(NULL, skb);
1812 	}
1813 
1814 discard_it:
1815 
1816 	/*
1817 	 *	Discard frame
1818 	 */
1819 
1820 	kfree_skb(skb);
1821 	return 0;
1822 
1823 discard_and_relse:
1824 	sock_put(sk);
1825 	goto discard_it;
1826 
1827 do_time_wait:
1828 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1829 		inet_twsk_put(inet_twsk(sk));
1830 		goto discard_it;
1831 	}
1832 
1833 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1834 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1835 		inet_twsk_put(inet_twsk(sk));
1836 		goto discard_it;
1837 	}
1838 
1839 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1840 	case TCP_TW_SYN:
1841 	{
1842 		struct sock *sk2;
1843 
1844 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1845 					    &ipv6_hdr(skb)->daddr,
1846 					    ntohs(th->dest), inet6_iif(skb));
1847 		if (sk2 != NULL) {
1848 			struct inet_timewait_sock *tw = inet_twsk(sk);
1849 			inet_twsk_deschedule(tw, &tcp_death_row);
1850 			inet_twsk_put(tw);
1851 			sk = sk2;
1852 			goto process;
1853 		}
1854 		/* Fall through to ACK */
1855 	}
1856 	case TCP_TW_ACK:
1857 		tcp_v6_timewait_ack(sk, skb);
1858 		break;
1859 	case TCP_TW_RST:
1860 		goto no_tcp_socket;
1861 	case TCP_TW_SUCCESS:;
1862 	}
1863 	goto discard_it;
1864 }
1865 
1866 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1867 {
1868 	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1869 	struct ipv6_pinfo *np = inet6_sk(sk);
1870 	struct inet_peer *peer;
1871 
1872 	if (!rt ||
1873 	    !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1874 		peer = inet_getpeer_v6(&np->daddr, 1);
1875 		*release_it = true;
1876 	} else {
1877 		if (!rt->rt6i_peer)
1878 			rt6_bind_peer(rt, 1);
1879 		peer = rt->rt6i_peer;
1880 		*release_it = false;
1881 	}
1882 
1883 	return peer;
1884 }
1885 
1886 static void *tcp_v6_tw_get_peer(struct sock *sk)
1887 {
1888 	struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1889 	struct inet_timewait_sock *tw = inet_twsk(sk);
1890 
1891 	if (tw->tw_family == AF_INET)
1892 		return tcp_v4_tw_get_peer(sk);
1893 
1894 	return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1895 }
1896 
1897 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1898 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1899 	.twsk_unique	= tcp_twsk_unique,
1900 	.twsk_destructor= tcp_twsk_destructor,
1901 	.twsk_getpeer	= tcp_v6_tw_get_peer,
1902 };
1903 
1904 static const struct inet_connection_sock_af_ops ipv6_specific = {
1905 	.queue_xmit	   = inet6_csk_xmit,
1906 	.send_check	   = tcp_v6_send_check,
1907 	.rebuild_header	   = inet6_sk_rebuild_header,
1908 	.conn_request	   = tcp_v6_conn_request,
1909 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1910 	.get_peer	   = tcp_v6_get_peer,
1911 	.net_header_len	   = sizeof(struct ipv6hdr),
1912 	.setsockopt	   = ipv6_setsockopt,
1913 	.getsockopt	   = ipv6_getsockopt,
1914 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1915 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1916 	.bind_conflict	   = inet6_csk_bind_conflict,
1917 #ifdef CONFIG_COMPAT
1918 	.compat_setsockopt = compat_ipv6_setsockopt,
1919 	.compat_getsockopt = compat_ipv6_getsockopt,
1920 #endif
1921 };
1922 
1923 #ifdef CONFIG_TCP_MD5SIG
1924 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1925 	.md5_lookup	=	tcp_v6_md5_lookup,
1926 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1927 	.md5_add	=	tcp_v6_md5_add_func,
1928 	.md5_parse	=	tcp_v6_parse_md5_keys,
1929 };
1930 #endif
1931 
1932 /*
1933  *	TCP over IPv4 via INET6 API
1934  */
1935 
1936 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1937 	.queue_xmit	   = ip_queue_xmit,
1938 	.send_check	   = tcp_v4_send_check,
1939 	.rebuild_header	   = inet_sk_rebuild_header,
1940 	.conn_request	   = tcp_v6_conn_request,
1941 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1942 	.get_peer	   = tcp_v4_get_peer,
1943 	.net_header_len	   = sizeof(struct iphdr),
1944 	.setsockopt	   = ipv6_setsockopt,
1945 	.getsockopt	   = ipv6_getsockopt,
1946 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1947 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1948 	.bind_conflict	   = inet6_csk_bind_conflict,
1949 #ifdef CONFIG_COMPAT
1950 	.compat_setsockopt = compat_ipv6_setsockopt,
1951 	.compat_getsockopt = compat_ipv6_getsockopt,
1952 #endif
1953 };
1954 
1955 #ifdef CONFIG_TCP_MD5SIG
1956 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1957 	.md5_lookup	=	tcp_v4_md5_lookup,
1958 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1959 	.md5_add	=	tcp_v6_md5_add_func,
1960 	.md5_parse	=	tcp_v6_parse_md5_keys,
1961 };
1962 #endif
1963 
1964 /* NOTE: A lot of things set to zero explicitly by call to
1965  *       sk_alloc() so need not be done here.
1966  */
1967 static int tcp_v6_init_sock(struct sock *sk)
1968 {
1969 	struct inet_connection_sock *icsk = inet_csk(sk);
1970 	struct tcp_sock *tp = tcp_sk(sk);
1971 
1972 	skb_queue_head_init(&tp->out_of_order_queue);
1973 	tcp_init_xmit_timers(sk);
1974 	tcp_prequeue_init(tp);
1975 
1976 	icsk->icsk_rto = TCP_TIMEOUT_INIT;
1977 	tp->mdev = TCP_TIMEOUT_INIT;
1978 
1979 	/* So many TCP implementations out there (incorrectly) count the
1980 	 * initial SYN frame in their delayed-ACK and congestion control
1981 	 * algorithms that we must have the following bandaid to talk
1982 	 * efficiently to them.  -DaveM
1983 	 */
1984 	tp->snd_cwnd = 2;
1985 
1986 	/* See draft-stevens-tcpca-spec-01 for discussion of the
1987 	 * initialization of these values.
1988 	 */
1989 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1990 	tp->snd_cwnd_clamp = ~0;
1991 	tp->mss_cache = TCP_MSS_DEFAULT;
1992 
1993 	tp->reordering = sysctl_tcp_reordering;
1994 
1995 	sk->sk_state = TCP_CLOSE;
1996 
1997 	icsk->icsk_af_ops = &ipv6_specific;
1998 	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1999 	icsk->icsk_sync_mss = tcp_sync_mss;
2000 	sk->sk_write_space = sk_stream_write_space;
2001 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2002 
2003 #ifdef CONFIG_TCP_MD5SIG
2004 	tp->af_specific = &tcp_sock_ipv6_specific;
2005 #endif
2006 
2007 	/* TCP Cookie Transactions */
2008 	if (sysctl_tcp_cookie_size > 0) {
2009 		/* Default, cookies without s_data_payload. */
2010 		tp->cookie_values =
2011 			kzalloc(sizeof(*tp->cookie_values),
2012 				sk->sk_allocation);
2013 		if (tp->cookie_values != NULL)
2014 			kref_init(&tp->cookie_values->kref);
2015 	}
2016 	/* Presumed zeroed, in order of appearance:
2017 	 *	cookie_in_always, cookie_out_never,
2018 	 *	s_data_constant, s_data_in, s_data_out
2019 	 */
2020 	sk->sk_sndbuf = sysctl_tcp_wmem[1];
2021 	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2022 
2023 	local_bh_disable();
2024 	percpu_counter_inc(&tcp_sockets_allocated);
2025 	local_bh_enable();
2026 
2027 	return 0;
2028 }
2029 
2030 static void tcp_v6_destroy_sock(struct sock *sk)
2031 {
2032 #ifdef CONFIG_TCP_MD5SIG
2033 	/* Clean up the MD5 key list */
2034 	if (tcp_sk(sk)->md5sig_info)
2035 		tcp_v6_clear_md5_list(sk);
2036 #endif
2037 	tcp_v4_destroy_sock(sk);
2038 	inet6_destroy_sock(sk);
2039 }
2040 
2041 #ifdef CONFIG_PROC_FS
2042 /* Proc filesystem TCPv6 sock list dumping. */
2043 static void get_openreq6(struct seq_file *seq,
2044 			 struct sock *sk, struct request_sock *req, int i, int uid)
2045 {
2046 	int ttd = req->expires - jiffies;
2047 	struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2048 	struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2049 
2050 	if (ttd < 0)
2051 		ttd = 0;
2052 
2053 	seq_printf(seq,
2054 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2055 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2056 		   i,
2057 		   src->s6_addr32[0], src->s6_addr32[1],
2058 		   src->s6_addr32[2], src->s6_addr32[3],
2059 		   ntohs(inet_rsk(req)->loc_port),
2060 		   dest->s6_addr32[0], dest->s6_addr32[1],
2061 		   dest->s6_addr32[2], dest->s6_addr32[3],
2062 		   ntohs(inet_rsk(req)->rmt_port),
2063 		   TCP_SYN_RECV,
2064 		   0,0, /* could print option size, but that is af dependent. */
2065 		   1,   /* timers active (only the expire timer) */
2066 		   jiffies_to_clock_t(ttd),
2067 		   req->retrans,
2068 		   uid,
2069 		   0,  /* non standard timer */
2070 		   0, /* open_requests have no inode */
2071 		   0, req);
2072 }
2073 
2074 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2075 {
2076 	struct in6_addr *dest, *src;
2077 	__u16 destp, srcp;
2078 	int timer_active;
2079 	unsigned long timer_expires;
2080 	struct inet_sock *inet = inet_sk(sp);
2081 	struct tcp_sock *tp = tcp_sk(sp);
2082 	const struct inet_connection_sock *icsk = inet_csk(sp);
2083 	struct ipv6_pinfo *np = inet6_sk(sp);
2084 
2085 	dest  = &np->daddr;
2086 	src   = &np->rcv_saddr;
2087 	destp = ntohs(inet->inet_dport);
2088 	srcp  = ntohs(inet->inet_sport);
2089 
2090 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2091 		timer_active	= 1;
2092 		timer_expires	= icsk->icsk_timeout;
2093 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2094 		timer_active	= 4;
2095 		timer_expires	= icsk->icsk_timeout;
2096 	} else if (timer_pending(&sp->sk_timer)) {
2097 		timer_active	= 2;
2098 		timer_expires	= sp->sk_timer.expires;
2099 	} else {
2100 		timer_active	= 0;
2101 		timer_expires = jiffies;
2102 	}
2103 
2104 	seq_printf(seq,
2105 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2106 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
2107 		   i,
2108 		   src->s6_addr32[0], src->s6_addr32[1],
2109 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2110 		   dest->s6_addr32[0], dest->s6_addr32[1],
2111 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2112 		   sp->sk_state,
2113 		   tp->write_seq-tp->snd_una,
2114 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2115 		   timer_active,
2116 		   jiffies_to_clock_t(timer_expires - jiffies),
2117 		   icsk->icsk_retransmits,
2118 		   sock_i_uid(sp),
2119 		   icsk->icsk_probes_out,
2120 		   sock_i_ino(sp),
2121 		   atomic_read(&sp->sk_refcnt), sp,
2122 		   jiffies_to_clock_t(icsk->icsk_rto),
2123 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
2124 		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2125 		   tp->snd_cwnd,
2126 		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
2127 		   );
2128 }
2129 
2130 static void get_timewait6_sock(struct seq_file *seq,
2131 			       struct inet_timewait_sock *tw, int i)
2132 {
2133 	struct in6_addr *dest, *src;
2134 	__u16 destp, srcp;
2135 	struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2136 	int ttd = tw->tw_ttd - jiffies;
2137 
2138 	if (ttd < 0)
2139 		ttd = 0;
2140 
2141 	dest = &tw6->tw_v6_daddr;
2142 	src  = &tw6->tw_v6_rcv_saddr;
2143 	destp = ntohs(tw->tw_dport);
2144 	srcp  = ntohs(tw->tw_sport);
2145 
2146 	seq_printf(seq,
2147 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2148 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2149 		   i,
2150 		   src->s6_addr32[0], src->s6_addr32[1],
2151 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2152 		   dest->s6_addr32[0], dest->s6_addr32[1],
2153 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2154 		   tw->tw_substate, 0, 0,
2155 		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2156 		   atomic_read(&tw->tw_refcnt), tw);
2157 }
2158 
2159 static int tcp6_seq_show(struct seq_file *seq, void *v)
2160 {
2161 	struct tcp_iter_state *st;
2162 
2163 	if (v == SEQ_START_TOKEN) {
2164 		seq_puts(seq,
2165 			 "  sl  "
2166 			 "local_address                         "
2167 			 "remote_address                        "
2168 			 "st tx_queue rx_queue tr tm->when retrnsmt"
2169 			 "   uid  timeout inode\n");
2170 		goto out;
2171 	}
2172 	st = seq->private;
2173 
2174 	switch (st->state) {
2175 	case TCP_SEQ_STATE_LISTENING:
2176 	case TCP_SEQ_STATE_ESTABLISHED:
2177 		get_tcp6_sock(seq, v, st->num);
2178 		break;
2179 	case TCP_SEQ_STATE_OPENREQ:
2180 		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2181 		break;
2182 	case TCP_SEQ_STATE_TIME_WAIT:
2183 		get_timewait6_sock(seq, v, st->num);
2184 		break;
2185 	}
2186 out:
2187 	return 0;
2188 }
2189 
2190 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2191 	.name		= "tcp6",
2192 	.family		= AF_INET6,
2193 	.seq_fops	= {
2194 		.owner		= THIS_MODULE,
2195 	},
2196 	.seq_ops	= {
2197 		.show		= tcp6_seq_show,
2198 	},
2199 };
2200 
2201 int __net_init tcp6_proc_init(struct net *net)
2202 {
2203 	return tcp_proc_register(net, &tcp6_seq_afinfo);
2204 }
2205 
2206 void tcp6_proc_exit(struct net *net)
2207 {
2208 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
2209 }
2210 #endif
2211 
2212 struct proto tcpv6_prot = {
2213 	.name			= "TCPv6",
2214 	.owner			= THIS_MODULE,
2215 	.close			= tcp_close,
2216 	.connect		= tcp_v6_connect,
2217 	.disconnect		= tcp_disconnect,
2218 	.accept			= inet_csk_accept,
2219 	.ioctl			= tcp_ioctl,
2220 	.init			= tcp_v6_init_sock,
2221 	.destroy		= tcp_v6_destroy_sock,
2222 	.shutdown		= tcp_shutdown,
2223 	.setsockopt		= tcp_setsockopt,
2224 	.getsockopt		= tcp_getsockopt,
2225 	.recvmsg		= tcp_recvmsg,
2226 	.sendmsg		= tcp_sendmsg,
2227 	.sendpage		= tcp_sendpage,
2228 	.backlog_rcv		= tcp_v6_do_rcv,
2229 	.hash			= tcp_v6_hash,
2230 	.unhash			= inet_unhash,
2231 	.get_port		= inet_csk_get_port,
2232 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2233 	.sockets_allocated	= &tcp_sockets_allocated,
2234 	.memory_allocated	= &tcp_memory_allocated,
2235 	.memory_pressure	= &tcp_memory_pressure,
2236 	.orphan_count		= &tcp_orphan_count,
2237 	.sysctl_mem		= sysctl_tcp_mem,
2238 	.sysctl_wmem		= sysctl_tcp_wmem,
2239 	.sysctl_rmem		= sysctl_tcp_rmem,
2240 	.max_header		= MAX_TCP_HEADER,
2241 	.obj_size		= sizeof(struct tcp6_sock),
2242 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2243 	.twsk_prot		= &tcp6_timewait_sock_ops,
2244 	.rsk_prot		= &tcp6_request_sock_ops,
2245 	.h.hashinfo		= &tcp_hashinfo,
2246 	.no_autobind		= true,
2247 #ifdef CONFIG_COMPAT
2248 	.compat_setsockopt	= compat_tcp_setsockopt,
2249 	.compat_getsockopt	= compat_tcp_getsockopt,
2250 #endif
2251 };
2252 
2253 static const struct inet6_protocol tcpv6_protocol = {
2254 	.handler	=	tcp_v6_rcv,
2255 	.err_handler	=	tcp_v6_err,
2256 	.gso_send_check	=	tcp_v6_gso_send_check,
2257 	.gso_segment	=	tcp_tso_segment,
2258 	.gro_receive	=	tcp6_gro_receive,
2259 	.gro_complete	=	tcp6_gro_complete,
2260 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2261 };
2262 
2263 static struct inet_protosw tcpv6_protosw = {
2264 	.type		=	SOCK_STREAM,
2265 	.protocol	=	IPPROTO_TCP,
2266 	.prot		=	&tcpv6_prot,
2267 	.ops		=	&inet6_stream_ops,
2268 	.no_check	=	0,
2269 	.flags		=	INET_PROTOSW_PERMANENT |
2270 				INET_PROTOSW_ICSK,
2271 };
2272 
2273 static int __net_init tcpv6_net_init(struct net *net)
2274 {
2275 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2276 				    SOCK_RAW, IPPROTO_TCP, net);
2277 }
2278 
2279 static void __net_exit tcpv6_net_exit(struct net *net)
2280 {
2281 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2282 }
2283 
2284 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2285 {
2286 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2287 }
2288 
2289 static struct pernet_operations tcpv6_net_ops = {
2290 	.init	    = tcpv6_net_init,
2291 	.exit	    = tcpv6_net_exit,
2292 	.exit_batch = tcpv6_net_exit_batch,
2293 };
2294 
2295 int __init tcpv6_init(void)
2296 {
2297 	int ret;
2298 
2299 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2300 	if (ret)
2301 		goto out;
2302 
2303 	/* register inet6 protocol */
2304 	ret = inet6_register_protosw(&tcpv6_protosw);
2305 	if (ret)
2306 		goto out_tcpv6_protocol;
2307 
2308 	ret = register_pernet_subsys(&tcpv6_net_ops);
2309 	if (ret)
2310 		goto out_tcpv6_protosw;
2311 out:
2312 	return ret;
2313 
2314 out_tcpv6_protocol:
2315 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2316 out_tcpv6_protosw:
2317 	inet6_unregister_protosw(&tcpv6_protosw);
2318 	goto out;
2319 }
2320 
2321 void tcpv6_exit(void)
2322 {
2323 	unregister_pernet_subsys(&tcpv6_net_ops);
2324 	inet6_unregister_protosw(&tcpv6_protosw);
2325 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2326 }
2327