xref: /openbmc/linux/net/dccp/ipv6.c (revision d78c317f)
1 /*
2  *	DCCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Based on net/dccp6/ipv6.c
6  *
7  *	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8  *
9  *	This program is free software; you can redistribute it and/or
10  *      modify it under the terms of the GNU General Public License
11  *      as published by the Free Software Foundation; either version
12  *      2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/xfrm.h>
19 
20 #include <net/addrconf.h>
21 #include <net/inet_common.h>
22 #include <net/inet_hashtables.h>
23 #include <net/inet_sock.h>
24 #include <net/inet6_connection_sock.h>
25 #include <net/inet6_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/ipv6.h>
28 #include <net/protocol.h>
29 #include <net/transp_v6.h>
30 #include <net/ip6_checksum.h>
31 #include <net/xfrm.h>
32 #include <net/secure_seq.h>
33 
34 #include "dccp.h"
35 #include "ipv6.h"
36 #include "feat.h"
37 
38 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
39 
40 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
41 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
42 
43 static void dccp_v6_hash(struct sock *sk)
44 {
45 	if (sk->sk_state != DCCP_CLOSED) {
46 		if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
47 			inet_hash(sk);
48 			return;
49 		}
50 		local_bh_disable();
51 		__inet6_hash(sk, NULL);
52 		local_bh_enable();
53 	}
54 }
55 
56 /* add pseudo-header to DCCP checksum stored in skb->csum */
57 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
58 				      const struct in6_addr *saddr,
59 				      const struct in6_addr *daddr)
60 {
61 	return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
62 }
63 
64 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
65 {
66 	struct ipv6_pinfo *np = inet6_sk(sk);
67 	struct dccp_hdr *dh = dccp_hdr(skb);
68 
69 	dccp_csum_outgoing(skb);
70 	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
71 }
72 
73 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
74 {
75 	return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
76 					     ipv6_hdr(skb)->saddr.s6_addr32,
77 					     dccp_hdr(skb)->dccph_dport,
78 					     dccp_hdr(skb)->dccph_sport     );
79 
80 }
81 
82 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
83 			u8 type, u8 code, int offset, __be32 info)
84 {
85 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
86 	const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
87 	struct dccp_sock *dp;
88 	struct ipv6_pinfo *np;
89 	struct sock *sk;
90 	int err;
91 	__u64 seq;
92 	struct net *net = dev_net(skb->dev);
93 
94 	if (skb->len < offset + sizeof(*dh) ||
95 	    skb->len < offset + __dccp_basic_hdr_len(dh)) {
96 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
97 				   ICMP6_MIB_INERRORS);
98 		return;
99 	}
100 
101 	sk = inet6_lookup(net, &dccp_hashinfo,
102 			&hdr->daddr, dh->dccph_dport,
103 			&hdr->saddr, dh->dccph_sport, inet6_iif(skb));
104 
105 	if (sk == NULL) {
106 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
107 				   ICMP6_MIB_INERRORS);
108 		return;
109 	}
110 
111 	if (sk->sk_state == DCCP_TIME_WAIT) {
112 		inet_twsk_put(inet_twsk(sk));
113 		return;
114 	}
115 
116 	bh_lock_sock(sk);
117 	if (sock_owned_by_user(sk))
118 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
119 
120 	if (sk->sk_state == DCCP_CLOSED)
121 		goto out;
122 
123 	dp = dccp_sk(sk);
124 	seq = dccp_hdr_seq(dh);
125 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
126 	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
127 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
128 		goto out;
129 	}
130 
131 	np = inet6_sk(sk);
132 
133 	if (type == ICMPV6_PKT_TOOBIG) {
134 		struct dst_entry *dst = NULL;
135 
136 		if (sock_owned_by_user(sk))
137 			goto out;
138 		if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
139 			goto out;
140 
141 		/* icmp should have updated the destination cache entry */
142 		dst = __sk_dst_check(sk, np->dst_cookie);
143 		if (dst == NULL) {
144 			struct inet_sock *inet = inet_sk(sk);
145 			struct flowi6 fl6;
146 
147 			/* BUGGG_FUTURE: Again, it is not clear how
148 			   to handle rthdr case. Ignore this complexity
149 			   for now.
150 			 */
151 			memset(&fl6, 0, sizeof(fl6));
152 			fl6.flowi6_proto = IPPROTO_DCCP;
153 			fl6.daddr = np->daddr;
154 			fl6.saddr = np->saddr;
155 			fl6.flowi6_oif = sk->sk_bound_dev_if;
156 			fl6.fl6_dport = inet->inet_dport;
157 			fl6.fl6_sport = inet->inet_sport;
158 			security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
159 
160 			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
161 			if (IS_ERR(dst)) {
162 				sk->sk_err_soft = -PTR_ERR(dst);
163 				goto out;
164 			}
165 		} else
166 			dst_hold(dst);
167 
168 		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
169 			dccp_sync_mss(sk, dst_mtu(dst));
170 		} /* else let the usual retransmit timer handle it */
171 		dst_release(dst);
172 		goto out;
173 	}
174 
175 	icmpv6_err_convert(type, code, &err);
176 
177 	/* Might be for an request_sock */
178 	switch (sk->sk_state) {
179 		struct request_sock *req, **prev;
180 	case DCCP_LISTEN:
181 		if (sock_owned_by_user(sk))
182 			goto out;
183 
184 		req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
185 					   &hdr->daddr, &hdr->saddr,
186 					   inet6_iif(skb));
187 		if (req == NULL)
188 			goto out;
189 
190 		/*
191 		 * ICMPs are not backlogged, hence we cannot get an established
192 		 * socket here.
193 		 */
194 		WARN_ON(req->sk != NULL);
195 
196 		if (seq != dccp_rsk(req)->dreq_iss) {
197 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
198 			goto out;
199 		}
200 
201 		inet_csk_reqsk_queue_drop(sk, req, prev);
202 		goto out;
203 
204 	case DCCP_REQUESTING:
205 	case DCCP_RESPOND:  /* Cannot happen.
206 			       It can, it SYNs are crossed. --ANK */
207 		if (!sock_owned_by_user(sk)) {
208 			DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
209 			sk->sk_err = err;
210 			/*
211 			 * Wake people up to see the error
212 			 * (see connect in sock.c)
213 			 */
214 			sk->sk_error_report(sk);
215 			dccp_done(sk);
216 		} else
217 			sk->sk_err_soft = err;
218 		goto out;
219 	}
220 
221 	if (!sock_owned_by_user(sk) && np->recverr) {
222 		sk->sk_err = err;
223 		sk->sk_error_report(sk);
224 	} else
225 		sk->sk_err_soft = err;
226 
227 out:
228 	bh_unlock_sock(sk);
229 	sock_put(sk);
230 }
231 
232 
233 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
234 				 struct request_values *rv_unused)
235 {
236 	struct inet6_request_sock *ireq6 = inet6_rsk(req);
237 	struct ipv6_pinfo *np = inet6_sk(sk);
238 	struct sk_buff *skb;
239 	struct ipv6_txoptions *opt = NULL;
240 	struct in6_addr *final_p, final;
241 	struct flowi6 fl6;
242 	int err = -1;
243 	struct dst_entry *dst;
244 
245 	memset(&fl6, 0, sizeof(fl6));
246 	fl6.flowi6_proto = IPPROTO_DCCP;
247 	fl6.daddr = ireq6->rmt_addr;
248 	fl6.saddr = ireq6->loc_addr;
249 	fl6.flowlabel = 0;
250 	fl6.flowi6_oif = ireq6->iif;
251 	fl6.fl6_dport = inet_rsk(req)->rmt_port;
252 	fl6.fl6_sport = inet_rsk(req)->loc_port;
253 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
254 
255 	opt = np->opt;
256 
257 	final_p = fl6_update_dst(&fl6, opt, &final);
258 
259 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
260 	if (IS_ERR(dst)) {
261 		err = PTR_ERR(dst);
262 		dst = NULL;
263 		goto done;
264 	}
265 
266 	skb = dccp_make_response(sk, dst, req);
267 	if (skb != NULL) {
268 		struct dccp_hdr *dh = dccp_hdr(skb);
269 
270 		dh->dccph_checksum = dccp_v6_csum_finish(skb,
271 							 &ireq6->loc_addr,
272 							 &ireq6->rmt_addr);
273 		fl6.daddr = ireq6->rmt_addr;
274 		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
275 		err = net_xmit_eval(err);
276 	}
277 
278 done:
279 	if (opt != NULL && opt != np->opt)
280 		sock_kfree_s(sk, opt, opt->tot_len);
281 	dst_release(dst);
282 	return err;
283 }
284 
285 static void dccp_v6_reqsk_destructor(struct request_sock *req)
286 {
287 	dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
288 	if (inet6_rsk(req)->pktopts != NULL)
289 		kfree_skb(inet6_rsk(req)->pktopts);
290 }
291 
292 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
293 {
294 	const struct ipv6hdr *rxip6h;
295 	struct sk_buff *skb;
296 	struct flowi6 fl6;
297 	struct net *net = dev_net(skb_dst(rxskb)->dev);
298 	struct sock *ctl_sk = net->dccp.v6_ctl_sk;
299 	struct dst_entry *dst;
300 
301 	if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
302 		return;
303 
304 	if (!ipv6_unicast_destination(rxskb))
305 		return;
306 
307 	skb = dccp_ctl_make_reset(ctl_sk, rxskb);
308 	if (skb == NULL)
309 		return;
310 
311 	rxip6h = ipv6_hdr(rxskb);
312 	dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
313 							    &rxip6h->daddr);
314 
315 	memset(&fl6, 0, sizeof(fl6));
316 	fl6.daddr = rxip6h->saddr;
317 	fl6.saddr = rxip6h->daddr;
318 
319 	fl6.flowi6_proto = IPPROTO_DCCP;
320 	fl6.flowi6_oif = inet6_iif(rxskb);
321 	fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
322 	fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
323 	security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
324 
325 	/* sk = NULL, but it is safe for now. RST socket required. */
326 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
327 	if (!IS_ERR(dst)) {
328 		skb_dst_set(skb, dst);
329 		ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
330 		DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
331 		DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
332 		return;
333 	}
334 
335 	kfree_skb(skb);
336 }
337 
338 static struct request_sock_ops dccp6_request_sock_ops = {
339 	.family		= AF_INET6,
340 	.obj_size	= sizeof(struct dccp6_request_sock),
341 	.rtx_syn_ack	= dccp_v6_send_response,
342 	.send_ack	= dccp_reqsk_send_ack,
343 	.destructor	= dccp_v6_reqsk_destructor,
344 	.send_reset	= dccp_v6_ctl_send_reset,
345 };
346 
347 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
348 {
349 	const struct dccp_hdr *dh = dccp_hdr(skb);
350 	const struct ipv6hdr *iph = ipv6_hdr(skb);
351 	struct sock *nsk;
352 	struct request_sock **prev;
353 	/* Find possible connection requests. */
354 	struct request_sock *req = inet6_csk_search_req(sk, &prev,
355 							dh->dccph_sport,
356 							&iph->saddr,
357 							&iph->daddr,
358 							inet6_iif(skb));
359 	if (req != NULL)
360 		return dccp_check_req(sk, skb, req, prev);
361 
362 	nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
363 					 &iph->saddr, dh->dccph_sport,
364 					 &iph->daddr, ntohs(dh->dccph_dport),
365 					 inet6_iif(skb));
366 	if (nsk != NULL) {
367 		if (nsk->sk_state != DCCP_TIME_WAIT) {
368 			bh_lock_sock(nsk);
369 			return nsk;
370 		}
371 		inet_twsk_put(inet_twsk(nsk));
372 		return NULL;
373 	}
374 
375 	return sk;
376 }
377 
378 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
379 {
380 	struct request_sock *req;
381 	struct dccp_request_sock *dreq;
382 	struct inet6_request_sock *ireq6;
383 	struct ipv6_pinfo *np = inet6_sk(sk);
384 	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
385 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
386 
387 	if (skb->protocol == htons(ETH_P_IP))
388 		return dccp_v4_conn_request(sk, skb);
389 
390 	if (!ipv6_unicast_destination(skb))
391 		return 0;	/* discard, don't send a reset here */
392 
393 	if (dccp_bad_service_code(sk, service)) {
394 		dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
395 		goto drop;
396 	}
397 	/*
398 	 * There are no SYN attacks on IPv6, yet...
399 	 */
400 	dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
401 	if (inet_csk_reqsk_queue_is_full(sk))
402 		goto drop;
403 
404 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
405 		goto drop;
406 
407 	req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
408 	if (req == NULL)
409 		goto drop;
410 
411 	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
412 		goto drop_and_free;
413 
414 	dreq = dccp_rsk(req);
415 	if (dccp_parse_options(sk, dreq, skb))
416 		goto drop_and_free;
417 
418 	if (security_inet_conn_request(sk, skb, req))
419 		goto drop_and_free;
420 
421 	ireq6 = inet6_rsk(req);
422 	ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
423 	ireq6->loc_addr = ipv6_hdr(skb)->daddr;
424 
425 	if (ipv6_opt_accepted(sk, skb) ||
426 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
427 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
428 		atomic_inc(&skb->users);
429 		ireq6->pktopts = skb;
430 	}
431 	ireq6->iif = sk->sk_bound_dev_if;
432 
433 	/* So that link locals have meaning */
434 	if (!sk->sk_bound_dev_if &&
435 	    ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
436 		ireq6->iif = inet6_iif(skb);
437 
438 	/*
439 	 * Step 3: Process LISTEN state
440 	 *
441 	 *   Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
442 	 *
443 	 *   In fact we defer setting S.GSR, S.SWL, S.SWH to
444 	 *   dccp_create_openreq_child.
445 	 */
446 	dreq->dreq_isr	   = dcb->dccpd_seq;
447 	dreq->dreq_iss	   = dccp_v6_init_sequence(skb);
448 	dreq->dreq_service = service;
449 
450 	if (dccp_v6_send_response(sk, req, NULL))
451 		goto drop_and_free;
452 
453 	inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
454 	return 0;
455 
456 drop_and_free:
457 	reqsk_free(req);
458 drop:
459 	DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
460 	return -1;
461 }
462 
463 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
464 					      struct sk_buff *skb,
465 					      struct request_sock *req,
466 					      struct dst_entry *dst)
467 {
468 	struct inet6_request_sock *ireq6 = inet6_rsk(req);
469 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
470 	struct inet_sock *newinet;
471 	struct dccp6_sock *newdp6;
472 	struct sock *newsk;
473 	struct ipv6_txoptions *opt;
474 
475 	if (skb->protocol == htons(ETH_P_IP)) {
476 		/*
477 		 *	v6 mapped
478 		 */
479 		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
480 		if (newsk == NULL)
481 			return NULL;
482 
483 		newdp6 = (struct dccp6_sock *)newsk;
484 		newinet = inet_sk(newsk);
485 		newinet->pinet6 = &newdp6->inet6;
486 		newnp = inet6_sk(newsk);
487 
488 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
489 
490 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
491 
492 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
493 
494 		newnp->rcv_saddr = newnp->saddr;
495 
496 		inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
497 		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
498 		newnp->pktoptions  = NULL;
499 		newnp->opt	   = NULL;
500 		newnp->mcast_oif   = inet6_iif(skb);
501 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
502 
503 		/*
504 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
505 		 * here, dccp_create_openreq_child now does this for us, see the comment in
506 		 * that function for the gory details. -acme
507 		 */
508 
509 		/* It is tricky place. Until this moment IPv4 tcp
510 		   worked with IPv6 icsk.icsk_af_ops.
511 		   Sync it now.
512 		 */
513 		dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
514 
515 		return newsk;
516 	}
517 
518 	opt = np->opt;
519 
520 	if (sk_acceptq_is_full(sk))
521 		goto out_overflow;
522 
523 	if (dst == NULL) {
524 		struct in6_addr *final_p, final;
525 		struct flowi6 fl6;
526 
527 		memset(&fl6, 0, sizeof(fl6));
528 		fl6.flowi6_proto = IPPROTO_DCCP;
529 		fl6.daddr = ireq6->rmt_addr;
530 		final_p = fl6_update_dst(&fl6, opt, &final);
531 		fl6.saddr = ireq6->loc_addr;
532 		fl6.flowi6_oif = sk->sk_bound_dev_if;
533 		fl6.fl6_dport = inet_rsk(req)->rmt_port;
534 		fl6.fl6_sport = inet_rsk(req)->loc_port;
535 		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
536 
537 		dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
538 		if (IS_ERR(dst))
539 			goto out;
540 	}
541 
542 	newsk = dccp_create_openreq_child(sk, req, skb);
543 	if (newsk == NULL)
544 		goto out_nonewsk;
545 
546 	/*
547 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
548 	 * count here, dccp_create_openreq_child now does this for us, see the
549 	 * comment in that function for the gory details. -acme
550 	 */
551 
552 	__ip6_dst_store(newsk, dst, NULL, NULL);
553 	newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
554 						      NETIF_F_TSO);
555 	newdp6 = (struct dccp6_sock *)newsk;
556 	newinet = inet_sk(newsk);
557 	newinet->pinet6 = &newdp6->inet6;
558 	newnp = inet6_sk(newsk);
559 
560 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
561 
562 	newnp->daddr = ireq6->rmt_addr;
563 	newnp->saddr = ireq6->loc_addr;
564 	newnp->rcv_saddr = ireq6->loc_addr;
565 	newsk->sk_bound_dev_if = ireq6->iif;
566 
567 	/* Now IPv6 options...
568 
569 	   First: no IPv4 options.
570 	 */
571 	newinet->inet_opt = NULL;
572 
573 	/* Clone RX bits */
574 	newnp->rxopt.all = np->rxopt.all;
575 
576 	/* Clone pktoptions received with SYN */
577 	newnp->pktoptions = NULL;
578 	if (ireq6->pktopts != NULL) {
579 		newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
580 		kfree_skb(ireq6->pktopts);
581 		ireq6->pktopts = NULL;
582 		if (newnp->pktoptions)
583 			skb_set_owner_r(newnp->pktoptions, newsk);
584 	}
585 	newnp->opt	  = NULL;
586 	newnp->mcast_oif  = inet6_iif(skb);
587 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
588 
589 	/*
590 	 * Clone native IPv6 options from listening socket (if any)
591 	 *
592 	 * Yes, keeping reference count would be much more clever, but we make
593 	 * one more one thing there: reattach optmem to newsk.
594 	 */
595 	if (opt != NULL) {
596 		newnp->opt = ipv6_dup_options(newsk, opt);
597 		if (opt != np->opt)
598 			sock_kfree_s(sk, opt, opt->tot_len);
599 	}
600 
601 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
602 	if (newnp->opt != NULL)
603 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
604 						     newnp->opt->opt_flen);
605 
606 	dccp_sync_mss(newsk, dst_mtu(dst));
607 
608 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
609 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
610 
611 	if (__inet_inherit_port(sk, newsk) < 0) {
612 		sock_put(newsk);
613 		goto out;
614 	}
615 	__inet6_hash(newsk, NULL);
616 
617 	return newsk;
618 
619 out_overflow:
620 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
621 out_nonewsk:
622 	dst_release(dst);
623 out:
624 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
625 	if (opt != NULL && opt != np->opt)
626 		sock_kfree_s(sk, opt, opt->tot_len);
627 	return NULL;
628 }
629 
630 /* The socket must have it's spinlock held when we get
631  * here.
632  *
633  * We have a potential double-lock case here, so even when
634  * doing backlog processing we use the BH locking scheme.
635  * This is because we cannot sleep with the original spinlock
636  * held.
637  */
638 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
639 {
640 	struct ipv6_pinfo *np = inet6_sk(sk);
641 	struct sk_buff *opt_skb = NULL;
642 
643 	/* Imagine: socket is IPv6. IPv4 packet arrives,
644 	   goes to IPv4 receive handler and backlogged.
645 	   From backlog it always goes here. Kerboom...
646 	   Fortunately, dccp_rcv_established and rcv_established
647 	   handle them correctly, but it is not case with
648 	   dccp_v6_hnd_req and dccp_v6_ctl_send_reset().   --ANK
649 	 */
650 
651 	if (skb->protocol == htons(ETH_P_IP))
652 		return dccp_v4_do_rcv(sk, skb);
653 
654 	if (sk_filter(sk, skb))
655 		goto discard;
656 
657 	/*
658 	 * socket locking is here for SMP purposes as backlog rcv is currently
659 	 * called with bh processing disabled.
660 	 */
661 
662 	/* Do Stevens' IPV6_PKTOPTIONS.
663 
664 	   Yes, guys, it is the only place in our code, where we
665 	   may make it not affecting IPv4.
666 	   The rest of code is protocol independent,
667 	   and I do not like idea to uglify IPv4.
668 
669 	   Actually, all the idea behind IPV6_PKTOPTIONS
670 	   looks not very well thought. For now we latch
671 	   options, received in the last packet, enqueued
672 	   by tcp. Feel free to propose better solution.
673 					       --ANK (980728)
674 	 */
675 	if (np->rxopt.all)
676 	/*
677 	 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
678 	 *        (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
679 	 */
680 		opt_skb = skb_clone(skb, GFP_ATOMIC);
681 
682 	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
683 		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
684 			goto reset;
685 		if (opt_skb) {
686 			/* XXX This is where we would goto ipv6_pktoptions. */
687 			__kfree_skb(opt_skb);
688 		}
689 		return 0;
690 	}
691 
692 	/*
693 	 *  Step 3: Process LISTEN state
694 	 *     If S.state == LISTEN,
695 	 *	 If P.type == Request or P contains a valid Init Cookie option,
696 	 *	      (* Must scan the packet's options to check for Init
697 	 *		 Cookies.  Only Init Cookies are processed here,
698 	 *		 however; other options are processed in Step 8.  This
699 	 *		 scan need only be performed if the endpoint uses Init
700 	 *		 Cookies *)
701 	 *	      (* Generate a new socket and switch to that socket *)
702 	 *	      Set S := new socket for this port pair
703 	 *	      S.state = RESPOND
704 	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
705 	 *	      Initialize S.GAR := S.ISS
706 	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
707 	 *	      Continue with S.state == RESPOND
708 	 *	      (* A Response packet will be generated in Step 11 *)
709 	 *	 Otherwise,
710 	 *	      Generate Reset(No Connection) unless P.type == Reset
711 	 *	      Drop packet and return
712 	 *
713 	 * NOTE: the check for the packet types is done in
714 	 *	 dccp_rcv_state_process
715 	 */
716 	if (sk->sk_state == DCCP_LISTEN) {
717 		struct sock *nsk = dccp_v6_hnd_req(sk, skb);
718 
719 		if (nsk == NULL)
720 			goto discard;
721 		/*
722 		 * Queue it on the new socket if the new socket is active,
723 		 * otherwise we just shortcircuit this and continue with
724 		 * the new socket..
725 		 */
726 		if (nsk != sk) {
727 			if (dccp_child_process(sk, nsk, skb))
728 				goto reset;
729 			if (opt_skb != NULL)
730 				__kfree_skb(opt_skb);
731 			return 0;
732 		}
733 	}
734 
735 	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
736 		goto reset;
737 	if (opt_skb) {
738 		/* XXX This is where we would goto ipv6_pktoptions. */
739 		__kfree_skb(opt_skb);
740 	}
741 	return 0;
742 
743 reset:
744 	dccp_v6_ctl_send_reset(sk, skb);
745 discard:
746 	if (opt_skb != NULL)
747 		__kfree_skb(opt_skb);
748 	kfree_skb(skb);
749 	return 0;
750 }
751 
752 static int dccp_v6_rcv(struct sk_buff *skb)
753 {
754 	const struct dccp_hdr *dh;
755 	struct sock *sk;
756 	int min_cov;
757 
758 	/* Step 1: Check header basics */
759 
760 	if (dccp_invalid_packet(skb))
761 		goto discard_it;
762 
763 	/* Step 1: If header checksum is incorrect, drop packet and return. */
764 	if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
765 				     &ipv6_hdr(skb)->daddr)) {
766 		DCCP_WARN("dropped packet with invalid checksum\n");
767 		goto discard_it;
768 	}
769 
770 	dh = dccp_hdr(skb);
771 
772 	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);
773 	DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
774 
775 	if (dccp_packet_without_ack(skb))
776 		DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
777 	else
778 		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
779 
780 	/* Step 2:
781 	 *	Look up flow ID in table and get corresponding socket */
782 	sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
783 			        dh->dccph_sport, dh->dccph_dport);
784 	/*
785 	 * Step 2:
786 	 *	If no socket ...
787 	 */
788 	if (sk == NULL) {
789 		dccp_pr_debug("failed to look up flow ID in table and "
790 			      "get corresponding socket\n");
791 		goto no_dccp_socket;
792 	}
793 
794 	/*
795 	 * Step 2:
796 	 *	... or S.state == TIMEWAIT,
797 	 *		Generate Reset(No Connection) unless P.type == Reset
798 	 *		Drop packet and return
799 	 */
800 	if (sk->sk_state == DCCP_TIME_WAIT) {
801 		dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
802 		inet_twsk_put(inet_twsk(sk));
803 		goto no_dccp_socket;
804 	}
805 
806 	/*
807 	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
808 	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
809 	 *	o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
810 	 */
811 	min_cov = dccp_sk(sk)->dccps_pcrlen;
812 	if (dh->dccph_cscov  &&  (min_cov == 0 || dh->dccph_cscov < min_cov))  {
813 		dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
814 			      dh->dccph_cscov, min_cov);
815 		/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
816 		goto discard_and_relse;
817 	}
818 
819 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
820 		goto discard_and_relse;
821 
822 	return sk_receive_skb(sk, skb, 1) ? -1 : 0;
823 
824 no_dccp_socket:
825 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
826 		goto discard_it;
827 	/*
828 	 * Step 2:
829 	 *	If no socket ...
830 	 *		Generate Reset(No Connection) unless P.type == Reset
831 	 *		Drop packet and return
832 	 */
833 	if (dh->dccph_type != DCCP_PKT_RESET) {
834 		DCCP_SKB_CB(skb)->dccpd_reset_code =
835 					DCCP_RESET_CODE_NO_CONNECTION;
836 		dccp_v6_ctl_send_reset(sk, skb);
837 	}
838 
839 discard_it:
840 	kfree_skb(skb);
841 	return 0;
842 
843 discard_and_relse:
844 	sock_put(sk);
845 	goto discard_it;
846 }
847 
848 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
849 			   int addr_len)
850 {
851 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
852 	struct inet_connection_sock *icsk = inet_csk(sk);
853 	struct inet_sock *inet = inet_sk(sk);
854 	struct ipv6_pinfo *np = inet6_sk(sk);
855 	struct dccp_sock *dp = dccp_sk(sk);
856 	struct in6_addr *saddr = NULL, *final_p, final;
857 	struct flowi6 fl6;
858 	struct dst_entry *dst;
859 	int addr_type;
860 	int err;
861 
862 	dp->dccps_role = DCCP_ROLE_CLIENT;
863 
864 	if (addr_len < SIN6_LEN_RFC2133)
865 		return -EINVAL;
866 
867 	if (usin->sin6_family != AF_INET6)
868 		return -EAFNOSUPPORT;
869 
870 	memset(&fl6, 0, sizeof(fl6));
871 
872 	if (np->sndflow) {
873 		fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
874 		IP6_ECN_flow_init(fl6.flowlabel);
875 		if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
876 			struct ip6_flowlabel *flowlabel;
877 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
878 			if (flowlabel == NULL)
879 				return -EINVAL;
880 			usin->sin6_addr = flowlabel->dst;
881 			fl6_sock_release(flowlabel);
882 		}
883 	}
884 	/*
885 	 * connect() to INADDR_ANY means loopback (BSD'ism).
886 	 */
887 	if (ipv6_addr_any(&usin->sin6_addr))
888 		usin->sin6_addr.s6_addr[15] = 1;
889 
890 	addr_type = ipv6_addr_type(&usin->sin6_addr);
891 
892 	if (addr_type & IPV6_ADDR_MULTICAST)
893 		return -ENETUNREACH;
894 
895 	if (addr_type & IPV6_ADDR_LINKLOCAL) {
896 		if (addr_len >= sizeof(struct sockaddr_in6) &&
897 		    usin->sin6_scope_id) {
898 			/* If interface is set while binding, indices
899 			 * must coincide.
900 			 */
901 			if (sk->sk_bound_dev_if &&
902 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
903 				return -EINVAL;
904 
905 			sk->sk_bound_dev_if = usin->sin6_scope_id;
906 		}
907 
908 		/* Connect to link-local address requires an interface */
909 		if (!sk->sk_bound_dev_if)
910 			return -EINVAL;
911 	}
912 
913 	np->daddr = usin->sin6_addr;
914 	np->flow_label = fl6.flowlabel;
915 
916 	/*
917 	 * DCCP over IPv4
918 	 */
919 	if (addr_type == IPV6_ADDR_MAPPED) {
920 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
921 		struct sockaddr_in sin;
922 
923 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
924 
925 		if (__ipv6_only_sock(sk))
926 			return -ENETUNREACH;
927 
928 		sin.sin_family = AF_INET;
929 		sin.sin_port = usin->sin6_port;
930 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
931 
932 		icsk->icsk_af_ops = &dccp_ipv6_mapped;
933 		sk->sk_backlog_rcv = dccp_v4_do_rcv;
934 
935 		err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
936 		if (err) {
937 			icsk->icsk_ext_hdr_len = exthdrlen;
938 			icsk->icsk_af_ops = &dccp_ipv6_af_ops;
939 			sk->sk_backlog_rcv = dccp_v6_do_rcv;
940 			goto failure;
941 		}
942 		ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
943 		ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
944 
945 		return err;
946 	}
947 
948 	if (!ipv6_addr_any(&np->rcv_saddr))
949 		saddr = &np->rcv_saddr;
950 
951 	fl6.flowi6_proto = IPPROTO_DCCP;
952 	fl6.daddr = np->daddr;
953 	fl6.saddr = saddr ? *saddr : np->saddr;
954 	fl6.flowi6_oif = sk->sk_bound_dev_if;
955 	fl6.fl6_dport = usin->sin6_port;
956 	fl6.fl6_sport = inet->inet_sport;
957 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
958 
959 	final_p = fl6_update_dst(&fl6, np->opt, &final);
960 
961 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
962 	if (IS_ERR(dst)) {
963 		err = PTR_ERR(dst);
964 		goto failure;
965 	}
966 
967 	if (saddr == NULL) {
968 		saddr = &fl6.saddr;
969 		np->rcv_saddr = *saddr;
970 	}
971 
972 	/* set the source address */
973 	np->saddr = *saddr;
974 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
975 
976 	__ip6_dst_store(sk, dst, NULL, NULL);
977 
978 	icsk->icsk_ext_hdr_len = 0;
979 	if (np->opt != NULL)
980 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
981 					  np->opt->opt_nflen);
982 
983 	inet->inet_dport = usin->sin6_port;
984 
985 	dccp_set_state(sk, DCCP_REQUESTING);
986 	err = inet6_hash_connect(&dccp_death_row, sk);
987 	if (err)
988 		goto late_failure;
989 
990 	dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
991 						      np->daddr.s6_addr32,
992 						      inet->inet_sport,
993 						      inet->inet_dport);
994 	err = dccp_connect(sk);
995 	if (err)
996 		goto late_failure;
997 
998 	return 0;
999 
1000 late_failure:
1001 	dccp_set_state(sk, DCCP_CLOSED);
1002 	__sk_dst_reset(sk);
1003 failure:
1004 	inet->inet_dport = 0;
1005 	sk->sk_route_caps = 0;
1006 	return err;
1007 }
1008 
1009 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1010 	.queue_xmit	   = inet6_csk_xmit,
1011 	.send_check	   = dccp_v6_send_check,
1012 	.rebuild_header	   = inet6_sk_rebuild_header,
1013 	.conn_request	   = dccp_v6_conn_request,
1014 	.syn_recv_sock	   = dccp_v6_request_recv_sock,
1015 	.net_header_len	   = sizeof(struct ipv6hdr),
1016 	.setsockopt	   = ipv6_setsockopt,
1017 	.getsockopt	   = ipv6_getsockopt,
1018 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1019 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1020 	.bind_conflict	   = inet6_csk_bind_conflict,
1021 #ifdef CONFIG_COMPAT
1022 	.compat_setsockopt = compat_ipv6_setsockopt,
1023 	.compat_getsockopt = compat_ipv6_getsockopt,
1024 #endif
1025 };
1026 
1027 /*
1028  *	DCCP over IPv4 via INET6 API
1029  */
1030 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1031 	.queue_xmit	   = ip_queue_xmit,
1032 	.send_check	   = dccp_v4_send_check,
1033 	.rebuild_header	   = inet_sk_rebuild_header,
1034 	.conn_request	   = dccp_v6_conn_request,
1035 	.syn_recv_sock	   = dccp_v6_request_recv_sock,
1036 	.net_header_len	   = sizeof(struct iphdr),
1037 	.setsockopt	   = ipv6_setsockopt,
1038 	.getsockopt	   = ipv6_getsockopt,
1039 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1040 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1041 #ifdef CONFIG_COMPAT
1042 	.compat_setsockopt = compat_ipv6_setsockopt,
1043 	.compat_getsockopt = compat_ipv6_getsockopt,
1044 #endif
1045 };
1046 
1047 /* NOTE: A lot of things set to zero explicitly by call to
1048  *       sk_alloc() so need not be done here.
1049  */
1050 static int dccp_v6_init_sock(struct sock *sk)
1051 {
1052 	static __u8 dccp_v6_ctl_sock_initialized;
1053 	int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1054 
1055 	if (err == 0) {
1056 		if (unlikely(!dccp_v6_ctl_sock_initialized))
1057 			dccp_v6_ctl_sock_initialized = 1;
1058 		inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1059 	}
1060 
1061 	return err;
1062 }
1063 
1064 static void dccp_v6_destroy_sock(struct sock *sk)
1065 {
1066 	dccp_destroy_sock(sk);
1067 	inet6_destroy_sock(sk);
1068 }
1069 
1070 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1071 	.twsk_obj_size	= sizeof(struct dccp6_timewait_sock),
1072 };
1073 
1074 static struct proto dccp_v6_prot = {
1075 	.name		   = "DCCPv6",
1076 	.owner		   = THIS_MODULE,
1077 	.close		   = dccp_close,
1078 	.connect	   = dccp_v6_connect,
1079 	.disconnect	   = dccp_disconnect,
1080 	.ioctl		   = dccp_ioctl,
1081 	.init		   = dccp_v6_init_sock,
1082 	.setsockopt	   = dccp_setsockopt,
1083 	.getsockopt	   = dccp_getsockopt,
1084 	.sendmsg	   = dccp_sendmsg,
1085 	.recvmsg	   = dccp_recvmsg,
1086 	.backlog_rcv	   = dccp_v6_do_rcv,
1087 	.hash		   = dccp_v6_hash,
1088 	.unhash		   = inet_unhash,
1089 	.accept		   = inet_csk_accept,
1090 	.get_port	   = inet_csk_get_port,
1091 	.shutdown	   = dccp_shutdown,
1092 	.destroy	   = dccp_v6_destroy_sock,
1093 	.orphan_count	   = &dccp_orphan_count,
1094 	.max_header	   = MAX_DCCP_HEADER,
1095 	.obj_size	   = sizeof(struct dccp6_sock),
1096 	.slab_flags	   = SLAB_DESTROY_BY_RCU,
1097 	.rsk_prot	   = &dccp6_request_sock_ops,
1098 	.twsk_prot	   = &dccp6_timewait_sock_ops,
1099 	.h.hashinfo	   = &dccp_hashinfo,
1100 #ifdef CONFIG_COMPAT
1101 	.compat_setsockopt = compat_dccp_setsockopt,
1102 	.compat_getsockopt = compat_dccp_getsockopt,
1103 #endif
1104 };
1105 
1106 static const struct inet6_protocol dccp_v6_protocol = {
1107 	.handler	= dccp_v6_rcv,
1108 	.err_handler	= dccp_v6_err,
1109 	.flags		= INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1110 };
1111 
1112 static const struct proto_ops inet6_dccp_ops = {
1113 	.family		   = PF_INET6,
1114 	.owner		   = THIS_MODULE,
1115 	.release	   = inet6_release,
1116 	.bind		   = inet6_bind,
1117 	.connect	   = inet_stream_connect,
1118 	.socketpair	   = sock_no_socketpair,
1119 	.accept		   = inet_accept,
1120 	.getname	   = inet6_getname,
1121 	.poll		   = dccp_poll,
1122 	.ioctl		   = inet6_ioctl,
1123 	.listen		   = inet_dccp_listen,
1124 	.shutdown	   = inet_shutdown,
1125 	.setsockopt	   = sock_common_setsockopt,
1126 	.getsockopt	   = sock_common_getsockopt,
1127 	.sendmsg	   = inet_sendmsg,
1128 	.recvmsg	   = sock_common_recvmsg,
1129 	.mmap		   = sock_no_mmap,
1130 	.sendpage	   = sock_no_sendpage,
1131 #ifdef CONFIG_COMPAT
1132 	.compat_setsockopt = compat_sock_common_setsockopt,
1133 	.compat_getsockopt = compat_sock_common_getsockopt,
1134 #endif
1135 };
1136 
1137 static struct inet_protosw dccp_v6_protosw = {
1138 	.type		= SOCK_DCCP,
1139 	.protocol	= IPPROTO_DCCP,
1140 	.prot		= &dccp_v6_prot,
1141 	.ops		= &inet6_dccp_ops,
1142 	.flags		= INET_PROTOSW_ICSK,
1143 };
1144 
1145 static int __net_init dccp_v6_init_net(struct net *net)
1146 {
1147 	if (dccp_hashinfo.bhash == NULL)
1148 		return -ESOCKTNOSUPPORT;
1149 
1150 	return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1151 				    SOCK_DCCP, IPPROTO_DCCP, net);
1152 }
1153 
1154 static void __net_exit dccp_v6_exit_net(struct net *net)
1155 {
1156 	inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1157 }
1158 
1159 static struct pernet_operations dccp_v6_ops = {
1160 	.init   = dccp_v6_init_net,
1161 	.exit   = dccp_v6_exit_net,
1162 };
1163 
1164 static int __init dccp_v6_init(void)
1165 {
1166 	int err = proto_register(&dccp_v6_prot, 1);
1167 
1168 	if (err != 0)
1169 		goto out;
1170 
1171 	err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1172 	if (err != 0)
1173 		goto out_unregister_proto;
1174 
1175 	inet6_register_protosw(&dccp_v6_protosw);
1176 
1177 	err = register_pernet_subsys(&dccp_v6_ops);
1178 	if (err != 0)
1179 		goto out_destroy_ctl_sock;
1180 out:
1181 	return err;
1182 
1183 out_destroy_ctl_sock:
1184 	inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1185 	inet6_unregister_protosw(&dccp_v6_protosw);
1186 out_unregister_proto:
1187 	proto_unregister(&dccp_v6_prot);
1188 	goto out;
1189 }
1190 
1191 static void __exit dccp_v6_exit(void)
1192 {
1193 	unregister_pernet_subsys(&dccp_v6_ops);
1194 	inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1195 	inet6_unregister_protosw(&dccp_v6_protosw);
1196 	proto_unregister(&dccp_v6_prot);
1197 }
1198 
1199 module_init(dccp_v6_init);
1200 module_exit(dccp_v6_exit);
1201 
1202 /*
1203  * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1204  * values directly, Also cover the case where the protocol is not specified,
1205  * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1206  */
1207 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1208 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1209 MODULE_LICENSE("GPL");
1210 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1211 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
1212