xref: /openbmc/linux/net/dccp/ipv6.c (revision 8730046c)
1 /*
2  *	DCCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Based on net/dccp6/ipv6.c
6  *
7  *	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8  *
9  *	This program is free software; you can redistribute it and/or
10  *      modify it under the terms of the GNU General Public License
11  *      as published by the Free Software Foundation; either version
12  *      2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/xfrm.h>
19 
20 #include <net/addrconf.h>
21 #include <net/inet_common.h>
22 #include <net/inet_hashtables.h>
23 #include <net/inet_sock.h>
24 #include <net/inet6_connection_sock.h>
25 #include <net/inet6_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/ipv6.h>
28 #include <net/protocol.h>
29 #include <net/transp_v6.h>
30 #include <net/ip6_checksum.h>
31 #include <net/xfrm.h>
32 #include <net/secure_seq.h>
33 
34 #include "dccp.h"
35 #include "ipv6.h"
36 #include "feat.h"
37 
38 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
39 
40 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
41 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
42 
43 /* add pseudo-header to DCCP checksum stored in skb->csum */
44 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
45 				      const struct in6_addr *saddr,
46 				      const struct in6_addr *daddr)
47 {
48 	return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
49 }
50 
51 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
52 {
53 	struct ipv6_pinfo *np = inet6_sk(sk);
54 	struct dccp_hdr *dh = dccp_hdr(skb);
55 
56 	dccp_csum_outgoing(skb);
57 	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
58 }
59 
60 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
61 {
62 	return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
63 					     ipv6_hdr(skb)->saddr.s6_addr32,
64 					     dccp_hdr(skb)->dccph_dport,
65 					     dccp_hdr(skb)->dccph_sport     );
66 
67 }
68 
69 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
70 			u8 type, u8 code, int offset, __be32 info)
71 {
72 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
73 	const struct dccp_hdr *dh;
74 	struct dccp_sock *dp;
75 	struct ipv6_pinfo *np;
76 	struct sock *sk;
77 	int err;
78 	__u64 seq;
79 	struct net *net = dev_net(skb->dev);
80 
81 	/* Only need dccph_dport & dccph_sport which are the first
82 	 * 4 bytes in dccp header.
83 	 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
84 	 */
85 	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
86 	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
87 	dh = (struct dccp_hdr *)(skb->data + offset);
88 
89 	sk = __inet6_lookup_established(net, &dccp_hashinfo,
90 					&hdr->daddr, dh->dccph_dport,
91 					&hdr->saddr, ntohs(dh->dccph_sport),
92 					inet6_iif(skb));
93 
94 	if (!sk) {
95 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
96 				  ICMP6_MIB_INERRORS);
97 		return;
98 	}
99 
100 	if (sk->sk_state == DCCP_TIME_WAIT) {
101 		inet_twsk_put(inet_twsk(sk));
102 		return;
103 	}
104 	seq = dccp_hdr_seq(dh);
105 	if (sk->sk_state == DCCP_NEW_SYN_RECV)
106 		return dccp_req_err(sk, seq);
107 
108 	bh_lock_sock(sk);
109 	if (sock_owned_by_user(sk))
110 		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
111 
112 	if (sk->sk_state == DCCP_CLOSED)
113 		goto out;
114 
115 	dp = dccp_sk(sk);
116 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
117 	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
118 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
119 		goto out;
120 	}
121 
122 	np = inet6_sk(sk);
123 
124 	if (type == NDISC_REDIRECT) {
125 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
126 
127 		if (dst)
128 			dst->ops->redirect(dst, sk, skb);
129 		goto out;
130 	}
131 
132 	if (type == ICMPV6_PKT_TOOBIG) {
133 		struct dst_entry *dst = NULL;
134 
135 		if (!ip6_sk_accept_pmtu(sk))
136 			goto out;
137 
138 		if (sock_owned_by_user(sk))
139 			goto out;
140 		if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
141 			goto out;
142 
143 		dst = inet6_csk_update_pmtu(sk, ntohl(info));
144 		if (!dst)
145 			goto out;
146 
147 		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
148 			dccp_sync_mss(sk, dst_mtu(dst));
149 		goto out;
150 	}
151 
152 	icmpv6_err_convert(type, code, &err);
153 
154 	/* Might be for an request_sock */
155 	switch (sk->sk_state) {
156 	case DCCP_REQUESTING:
157 	case DCCP_RESPOND:  /* Cannot happen.
158 			       It can, it SYNs are crossed. --ANK */
159 		if (!sock_owned_by_user(sk)) {
160 			__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
161 			sk->sk_err = err;
162 			/*
163 			 * Wake people up to see the error
164 			 * (see connect in sock.c)
165 			 */
166 			sk->sk_error_report(sk);
167 			dccp_done(sk);
168 		} else
169 			sk->sk_err_soft = err;
170 		goto out;
171 	}
172 
173 	if (!sock_owned_by_user(sk) && np->recverr) {
174 		sk->sk_err = err;
175 		sk->sk_error_report(sk);
176 	} else
177 		sk->sk_err_soft = err;
178 
179 out:
180 	bh_unlock_sock(sk);
181 	sock_put(sk);
182 }
183 
184 
185 static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
186 {
187 	struct inet_request_sock *ireq = inet_rsk(req);
188 	struct ipv6_pinfo *np = inet6_sk(sk);
189 	struct sk_buff *skb;
190 	struct in6_addr *final_p, final;
191 	struct flowi6 fl6;
192 	int err = -1;
193 	struct dst_entry *dst;
194 
195 	memset(&fl6, 0, sizeof(fl6));
196 	fl6.flowi6_proto = IPPROTO_DCCP;
197 	fl6.daddr = ireq->ir_v6_rmt_addr;
198 	fl6.saddr = ireq->ir_v6_loc_addr;
199 	fl6.flowlabel = 0;
200 	fl6.flowi6_oif = ireq->ir_iif;
201 	fl6.fl6_dport = ireq->ir_rmt_port;
202 	fl6.fl6_sport = htons(ireq->ir_num);
203 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
204 
205 
206 	rcu_read_lock();
207 	final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
208 	rcu_read_unlock();
209 
210 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
211 	if (IS_ERR(dst)) {
212 		err = PTR_ERR(dst);
213 		dst = NULL;
214 		goto done;
215 	}
216 
217 	skb = dccp_make_response(sk, dst, req);
218 	if (skb != NULL) {
219 		struct dccp_hdr *dh = dccp_hdr(skb);
220 		struct ipv6_txoptions *opt;
221 
222 		dh->dccph_checksum = dccp_v6_csum_finish(skb,
223 							 &ireq->ir_v6_loc_addr,
224 							 &ireq->ir_v6_rmt_addr);
225 		fl6.daddr = ireq->ir_v6_rmt_addr;
226 		rcu_read_lock();
227 		opt = ireq->ipv6_opt;
228 		if (!opt)
229 			opt = rcu_dereference(np->opt);
230 		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
231 		rcu_read_unlock();
232 		err = net_xmit_eval(err);
233 	}
234 
235 done:
236 	dst_release(dst);
237 	return err;
238 }
239 
240 static void dccp_v6_reqsk_destructor(struct request_sock *req)
241 {
242 	dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
243 	kfree(inet_rsk(req)->ipv6_opt);
244 	kfree_skb(inet_rsk(req)->pktopts);
245 }
246 
247 static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
248 {
249 	const struct ipv6hdr *rxip6h;
250 	struct sk_buff *skb;
251 	struct flowi6 fl6;
252 	struct net *net = dev_net(skb_dst(rxskb)->dev);
253 	struct sock *ctl_sk = net->dccp.v6_ctl_sk;
254 	struct dst_entry *dst;
255 
256 	if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
257 		return;
258 
259 	if (!ipv6_unicast_destination(rxskb))
260 		return;
261 
262 	skb = dccp_ctl_make_reset(ctl_sk, rxskb);
263 	if (skb == NULL)
264 		return;
265 
266 	rxip6h = ipv6_hdr(rxskb);
267 	dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
268 							    &rxip6h->daddr);
269 
270 	memset(&fl6, 0, sizeof(fl6));
271 	fl6.daddr = rxip6h->saddr;
272 	fl6.saddr = rxip6h->daddr;
273 
274 	fl6.flowi6_proto = IPPROTO_DCCP;
275 	fl6.flowi6_oif = inet6_iif(rxskb);
276 	fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
277 	fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
278 	security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
279 
280 	/* sk = NULL, but it is safe for now. RST socket required. */
281 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
282 	if (!IS_ERR(dst)) {
283 		skb_dst_set(skb, dst);
284 		ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
285 		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
286 		DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
287 		return;
288 	}
289 
290 	kfree_skb(skb);
291 }
292 
293 static struct request_sock_ops dccp6_request_sock_ops = {
294 	.family		= AF_INET6,
295 	.obj_size	= sizeof(struct dccp6_request_sock),
296 	.rtx_syn_ack	= dccp_v6_send_response,
297 	.send_ack	= dccp_reqsk_send_ack,
298 	.destructor	= dccp_v6_reqsk_destructor,
299 	.send_reset	= dccp_v6_ctl_send_reset,
300 	.syn_ack_timeout = dccp_syn_ack_timeout,
301 };
302 
303 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
304 {
305 	struct request_sock *req;
306 	struct dccp_request_sock *dreq;
307 	struct inet_request_sock *ireq;
308 	struct ipv6_pinfo *np = inet6_sk(sk);
309 	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
310 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
311 
312 	if (skb->protocol == htons(ETH_P_IP))
313 		return dccp_v4_conn_request(sk, skb);
314 
315 	if (!ipv6_unicast_destination(skb))
316 		return 0;	/* discard, don't send a reset here */
317 
318 	if (dccp_bad_service_code(sk, service)) {
319 		dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
320 		goto drop;
321 	}
322 	/*
323 	 * There are no SYN attacks on IPv6, yet...
324 	 */
325 	dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
326 	if (inet_csk_reqsk_queue_is_full(sk))
327 		goto drop;
328 
329 	if (sk_acceptq_is_full(sk))
330 		goto drop;
331 
332 	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
333 	if (req == NULL)
334 		goto drop;
335 
336 	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
337 		goto drop_and_free;
338 
339 	dreq = dccp_rsk(req);
340 	if (dccp_parse_options(sk, dreq, skb))
341 		goto drop_and_free;
342 
343 	if (security_inet_conn_request(sk, skb, req))
344 		goto drop_and_free;
345 
346 	ireq = inet_rsk(req);
347 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
348 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
349 	ireq->ireq_family = AF_INET6;
350 
351 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
352 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
353 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
354 		atomic_inc(&skb->users);
355 		ireq->pktopts = skb;
356 	}
357 	ireq->ir_iif = sk->sk_bound_dev_if;
358 
359 	/* So that link locals have meaning */
360 	if (!sk->sk_bound_dev_if &&
361 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
362 		ireq->ir_iif = inet6_iif(skb);
363 
364 	/*
365 	 * Step 3: Process LISTEN state
366 	 *
367 	 *   Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
368 	 *
369 	 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
370 	 */
371 	dreq->dreq_isr	   = dcb->dccpd_seq;
372 	dreq->dreq_gsr     = dreq->dreq_isr;
373 	dreq->dreq_iss	   = dccp_v6_init_sequence(skb);
374 	dreq->dreq_gss     = dreq->dreq_iss;
375 	dreq->dreq_service = service;
376 
377 	if (dccp_v6_send_response(sk, req))
378 		goto drop_and_free;
379 
380 	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
381 	return 0;
382 
383 drop_and_free:
384 	reqsk_free(req);
385 drop:
386 	__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
387 	return -1;
388 }
389 
390 static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
391 					      struct sk_buff *skb,
392 					      struct request_sock *req,
393 					      struct dst_entry *dst,
394 					      struct request_sock *req_unhash,
395 					      bool *own_req)
396 {
397 	struct inet_request_sock *ireq = inet_rsk(req);
398 	struct ipv6_pinfo *newnp;
399 	const struct ipv6_pinfo *np = inet6_sk(sk);
400 	struct ipv6_txoptions *opt;
401 	struct inet_sock *newinet;
402 	struct dccp6_sock *newdp6;
403 	struct sock *newsk;
404 
405 	if (skb->protocol == htons(ETH_P_IP)) {
406 		/*
407 		 *	v6 mapped
408 		 */
409 		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
410 						  req_unhash, own_req);
411 		if (newsk == NULL)
412 			return NULL;
413 
414 		newdp6 = (struct dccp6_sock *)newsk;
415 		newinet = inet_sk(newsk);
416 		newinet->pinet6 = &newdp6->inet6;
417 		newnp = inet6_sk(newsk);
418 
419 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
420 
421 		newnp->saddr = newsk->sk_v6_rcv_saddr;
422 
423 		inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
424 		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
425 		newnp->pktoptions  = NULL;
426 		newnp->opt	   = NULL;
427 		newnp->mcast_oif   = inet6_iif(skb);
428 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
429 
430 		/*
431 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
432 		 * here, dccp_create_openreq_child now does this for us, see the comment in
433 		 * that function for the gory details. -acme
434 		 */
435 
436 		/* It is tricky place. Until this moment IPv4 tcp
437 		   worked with IPv6 icsk.icsk_af_ops.
438 		   Sync it now.
439 		 */
440 		dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
441 
442 		return newsk;
443 	}
444 
445 
446 	if (sk_acceptq_is_full(sk))
447 		goto out_overflow;
448 
449 	if (!dst) {
450 		struct flowi6 fl6;
451 
452 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
453 		if (!dst)
454 			goto out;
455 	}
456 
457 	newsk = dccp_create_openreq_child(sk, req, skb);
458 	if (newsk == NULL)
459 		goto out_nonewsk;
460 
461 	/*
462 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
463 	 * count here, dccp_create_openreq_child now does this for us, see the
464 	 * comment in that function for the gory details. -acme
465 	 */
466 
467 	ip6_dst_store(newsk, dst, NULL, NULL);
468 	newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
469 						      NETIF_F_TSO);
470 	newdp6 = (struct dccp6_sock *)newsk;
471 	newinet = inet_sk(newsk);
472 	newinet->pinet6 = &newdp6->inet6;
473 	newnp = inet6_sk(newsk);
474 
475 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
476 
477 	newsk->sk_v6_daddr	= ireq->ir_v6_rmt_addr;
478 	newnp->saddr		= ireq->ir_v6_loc_addr;
479 	newsk->sk_v6_rcv_saddr	= ireq->ir_v6_loc_addr;
480 	newsk->sk_bound_dev_if	= ireq->ir_iif;
481 
482 	/* Now IPv6 options...
483 
484 	   First: no IPv4 options.
485 	 */
486 	newinet->inet_opt = NULL;
487 
488 	/* Clone RX bits */
489 	newnp->rxopt.all = np->rxopt.all;
490 
491 	newnp->pktoptions = NULL;
492 	newnp->opt	  = NULL;
493 	newnp->mcast_oif  = inet6_iif(skb);
494 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
495 
496 	/*
497 	 * Clone native IPv6 options from listening socket (if any)
498 	 *
499 	 * Yes, keeping reference count would be much more clever, but we make
500 	 * one more one thing there: reattach optmem to newsk.
501 	 */
502 	opt = ireq->ipv6_opt;
503 	if (!opt)
504 		opt = rcu_dereference(np->opt);
505 	if (opt) {
506 		opt = ipv6_dup_options(newsk, opt);
507 		RCU_INIT_POINTER(newnp->opt, opt);
508 	}
509 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
510 	if (opt)
511 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
512 						    opt->opt_flen;
513 
514 	dccp_sync_mss(newsk, dst_mtu(dst));
515 
516 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
517 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
518 
519 	if (__inet_inherit_port(sk, newsk) < 0) {
520 		inet_csk_prepare_forced_close(newsk);
521 		dccp_done(newsk);
522 		goto out;
523 	}
524 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
525 	/* Clone pktoptions received with SYN, if we own the req */
526 	if (*own_req && ireq->pktopts) {
527 		newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
528 		consume_skb(ireq->pktopts);
529 		ireq->pktopts = NULL;
530 		if (newnp->pktoptions)
531 			skb_set_owner_r(newnp->pktoptions, newsk);
532 	}
533 
534 	return newsk;
535 
536 out_overflow:
537 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
538 out_nonewsk:
539 	dst_release(dst);
540 out:
541 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
542 	return NULL;
543 }
544 
545 /* The socket must have it's spinlock held when we get
546  * here.
547  *
548  * We have a potential double-lock case here, so even when
549  * doing backlog processing we use the BH locking scheme.
550  * This is because we cannot sleep with the original spinlock
551  * held.
552  */
553 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
554 {
555 	struct ipv6_pinfo *np = inet6_sk(sk);
556 	struct sk_buff *opt_skb = NULL;
557 
558 	/* Imagine: socket is IPv6. IPv4 packet arrives,
559 	   goes to IPv4 receive handler and backlogged.
560 	   From backlog it always goes here. Kerboom...
561 	   Fortunately, dccp_rcv_established and rcv_established
562 	   handle them correctly, but it is not case with
563 	   dccp_v6_hnd_req and dccp_v6_ctl_send_reset().   --ANK
564 	 */
565 
566 	if (skb->protocol == htons(ETH_P_IP))
567 		return dccp_v4_do_rcv(sk, skb);
568 
569 	if (sk_filter(sk, skb))
570 		goto discard;
571 
572 	/*
573 	 * socket locking is here for SMP purposes as backlog rcv is currently
574 	 * called with bh processing disabled.
575 	 */
576 
577 	/* Do Stevens' IPV6_PKTOPTIONS.
578 
579 	   Yes, guys, it is the only place in our code, where we
580 	   may make it not affecting IPv4.
581 	   The rest of code is protocol independent,
582 	   and I do not like idea to uglify IPv4.
583 
584 	   Actually, all the idea behind IPV6_PKTOPTIONS
585 	   looks not very well thought. For now we latch
586 	   options, received in the last packet, enqueued
587 	   by tcp. Feel free to propose better solution.
588 					       --ANK (980728)
589 	 */
590 	if (np->rxopt.all)
591 	/*
592 	 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
593 	 *        (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
594 	 */
595 		opt_skb = skb_clone(skb, GFP_ATOMIC);
596 
597 	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
598 		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
599 			goto reset;
600 		if (opt_skb) {
601 			/* XXX This is where we would goto ipv6_pktoptions. */
602 			__kfree_skb(opt_skb);
603 		}
604 		return 0;
605 	}
606 
607 	/*
608 	 *  Step 3: Process LISTEN state
609 	 *     If S.state == LISTEN,
610 	 *	 If P.type == Request or P contains a valid Init Cookie option,
611 	 *	      (* Must scan the packet's options to check for Init
612 	 *		 Cookies.  Only Init Cookies are processed here,
613 	 *		 however; other options are processed in Step 8.  This
614 	 *		 scan need only be performed if the endpoint uses Init
615 	 *		 Cookies *)
616 	 *	      (* Generate a new socket and switch to that socket *)
617 	 *	      Set S := new socket for this port pair
618 	 *	      S.state = RESPOND
619 	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
620 	 *	      Initialize S.GAR := S.ISS
621 	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
622 	 *	      Continue with S.state == RESPOND
623 	 *	      (* A Response packet will be generated in Step 11 *)
624 	 *	 Otherwise,
625 	 *	      Generate Reset(No Connection) unless P.type == Reset
626 	 *	      Drop packet and return
627 	 *
628 	 * NOTE: the check for the packet types is done in
629 	 *	 dccp_rcv_state_process
630 	 */
631 
632 	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
633 		goto reset;
634 	if (opt_skb) {
635 		/* XXX This is where we would goto ipv6_pktoptions. */
636 		__kfree_skb(opt_skb);
637 	}
638 	return 0;
639 
640 reset:
641 	dccp_v6_ctl_send_reset(sk, skb);
642 discard:
643 	if (opt_skb != NULL)
644 		__kfree_skb(opt_skb);
645 	kfree_skb(skb);
646 	return 0;
647 }
648 
649 static int dccp_v6_rcv(struct sk_buff *skb)
650 {
651 	const struct dccp_hdr *dh;
652 	bool refcounted;
653 	struct sock *sk;
654 	int min_cov;
655 
656 	/* Step 1: Check header basics */
657 
658 	if (dccp_invalid_packet(skb))
659 		goto discard_it;
660 
661 	/* Step 1: If header checksum is incorrect, drop packet and return. */
662 	if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
663 				     &ipv6_hdr(skb)->daddr)) {
664 		DCCP_WARN("dropped packet with invalid checksum\n");
665 		goto discard_it;
666 	}
667 
668 	dh = dccp_hdr(skb);
669 
670 	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);
671 	DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
672 
673 	if (dccp_packet_without_ack(skb))
674 		DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
675 	else
676 		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
677 
678 lookup:
679 	sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
680 			        dh->dccph_sport, dh->dccph_dport,
681 				inet6_iif(skb), &refcounted);
682 	if (!sk) {
683 		dccp_pr_debug("failed to look up flow ID in table and "
684 			      "get corresponding socket\n");
685 		goto no_dccp_socket;
686 	}
687 
688 	/*
689 	 * Step 2:
690 	 *	... or S.state == TIMEWAIT,
691 	 *		Generate Reset(No Connection) unless P.type == Reset
692 	 *		Drop packet and return
693 	 */
694 	if (sk->sk_state == DCCP_TIME_WAIT) {
695 		dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
696 		inet_twsk_put(inet_twsk(sk));
697 		goto no_dccp_socket;
698 	}
699 
700 	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
701 		struct request_sock *req = inet_reqsk(sk);
702 		struct sock *nsk;
703 
704 		sk = req->rsk_listener;
705 		if (unlikely(sk->sk_state != DCCP_LISTEN)) {
706 			inet_csk_reqsk_queue_drop_and_put(sk, req);
707 			goto lookup;
708 		}
709 		sock_hold(sk);
710 		refcounted = true;
711 		nsk = dccp_check_req(sk, skb, req);
712 		if (!nsk) {
713 			reqsk_put(req);
714 			goto discard_and_relse;
715 		}
716 		if (nsk == sk) {
717 			reqsk_put(req);
718 		} else if (dccp_child_process(sk, nsk, skb)) {
719 			dccp_v6_ctl_send_reset(sk, skb);
720 			goto discard_and_relse;
721 		} else {
722 			sock_put(sk);
723 			return 0;
724 		}
725 	}
726 	/*
727 	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
728 	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
729 	 *	o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
730 	 */
731 	min_cov = dccp_sk(sk)->dccps_pcrlen;
732 	if (dh->dccph_cscov  &&  (min_cov == 0 || dh->dccph_cscov < min_cov))  {
733 		dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
734 			      dh->dccph_cscov, min_cov);
735 		/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
736 		goto discard_and_relse;
737 	}
738 
739 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
740 		goto discard_and_relse;
741 
742 	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
743 				refcounted) ? -1 : 0;
744 
745 no_dccp_socket:
746 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
747 		goto discard_it;
748 	/*
749 	 * Step 2:
750 	 *	If no socket ...
751 	 *		Generate Reset(No Connection) unless P.type == Reset
752 	 *		Drop packet and return
753 	 */
754 	if (dh->dccph_type != DCCP_PKT_RESET) {
755 		DCCP_SKB_CB(skb)->dccpd_reset_code =
756 					DCCP_RESET_CODE_NO_CONNECTION;
757 		dccp_v6_ctl_send_reset(sk, skb);
758 	}
759 
760 discard_it:
761 	kfree_skb(skb);
762 	return 0;
763 
764 discard_and_relse:
765 	if (refcounted)
766 		sock_put(sk);
767 	goto discard_it;
768 }
769 
770 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
771 			   int addr_len)
772 {
773 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
774 	struct inet_connection_sock *icsk = inet_csk(sk);
775 	struct inet_sock *inet = inet_sk(sk);
776 	struct ipv6_pinfo *np = inet6_sk(sk);
777 	struct dccp_sock *dp = dccp_sk(sk);
778 	struct in6_addr *saddr = NULL, *final_p, final;
779 	struct ipv6_txoptions *opt;
780 	struct flowi6 fl6;
781 	struct dst_entry *dst;
782 	int addr_type;
783 	int err;
784 
785 	dp->dccps_role = DCCP_ROLE_CLIENT;
786 
787 	if (addr_len < SIN6_LEN_RFC2133)
788 		return -EINVAL;
789 
790 	if (usin->sin6_family != AF_INET6)
791 		return -EAFNOSUPPORT;
792 
793 	memset(&fl6, 0, sizeof(fl6));
794 
795 	if (np->sndflow) {
796 		fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
797 		IP6_ECN_flow_init(fl6.flowlabel);
798 		if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
799 			struct ip6_flowlabel *flowlabel;
800 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
801 			if (flowlabel == NULL)
802 				return -EINVAL;
803 			fl6_sock_release(flowlabel);
804 		}
805 	}
806 	/*
807 	 * connect() to INADDR_ANY means loopback (BSD'ism).
808 	 */
809 	if (ipv6_addr_any(&usin->sin6_addr))
810 		usin->sin6_addr.s6_addr[15] = 1;
811 
812 	addr_type = ipv6_addr_type(&usin->sin6_addr);
813 
814 	if (addr_type & IPV6_ADDR_MULTICAST)
815 		return -ENETUNREACH;
816 
817 	if (addr_type & IPV6_ADDR_LINKLOCAL) {
818 		if (addr_len >= sizeof(struct sockaddr_in6) &&
819 		    usin->sin6_scope_id) {
820 			/* If interface is set while binding, indices
821 			 * must coincide.
822 			 */
823 			if (sk->sk_bound_dev_if &&
824 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
825 				return -EINVAL;
826 
827 			sk->sk_bound_dev_if = usin->sin6_scope_id;
828 		}
829 
830 		/* Connect to link-local address requires an interface */
831 		if (!sk->sk_bound_dev_if)
832 			return -EINVAL;
833 	}
834 
835 	sk->sk_v6_daddr = usin->sin6_addr;
836 	np->flow_label = fl6.flowlabel;
837 
838 	/*
839 	 * DCCP over IPv4
840 	 */
841 	if (addr_type == IPV6_ADDR_MAPPED) {
842 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
843 		struct sockaddr_in sin;
844 
845 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
846 
847 		if (__ipv6_only_sock(sk))
848 			return -ENETUNREACH;
849 
850 		sin.sin_family = AF_INET;
851 		sin.sin_port = usin->sin6_port;
852 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
853 
854 		icsk->icsk_af_ops = &dccp_ipv6_mapped;
855 		sk->sk_backlog_rcv = dccp_v4_do_rcv;
856 
857 		err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
858 		if (err) {
859 			icsk->icsk_ext_hdr_len = exthdrlen;
860 			icsk->icsk_af_ops = &dccp_ipv6_af_ops;
861 			sk->sk_backlog_rcv = dccp_v6_do_rcv;
862 			goto failure;
863 		}
864 		np->saddr = sk->sk_v6_rcv_saddr;
865 		return err;
866 	}
867 
868 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
869 		saddr = &sk->sk_v6_rcv_saddr;
870 
871 	fl6.flowi6_proto = IPPROTO_DCCP;
872 	fl6.daddr = sk->sk_v6_daddr;
873 	fl6.saddr = saddr ? *saddr : np->saddr;
874 	fl6.flowi6_oif = sk->sk_bound_dev_if;
875 	fl6.fl6_dport = usin->sin6_port;
876 	fl6.fl6_sport = inet->inet_sport;
877 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
878 
879 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
880 	final_p = fl6_update_dst(&fl6, opt, &final);
881 
882 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
883 	if (IS_ERR(dst)) {
884 		err = PTR_ERR(dst);
885 		goto failure;
886 	}
887 
888 	if (saddr == NULL) {
889 		saddr = &fl6.saddr;
890 		sk->sk_v6_rcv_saddr = *saddr;
891 	}
892 
893 	/* set the source address */
894 	np->saddr = *saddr;
895 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
896 
897 	ip6_dst_store(sk, dst, NULL, NULL);
898 
899 	icsk->icsk_ext_hdr_len = 0;
900 	if (opt)
901 		icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
902 
903 	inet->inet_dport = usin->sin6_port;
904 
905 	dccp_set_state(sk, DCCP_REQUESTING);
906 	err = inet6_hash_connect(&dccp_death_row, sk);
907 	if (err)
908 		goto late_failure;
909 
910 	dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
911 						      sk->sk_v6_daddr.s6_addr32,
912 						      inet->inet_sport,
913 						      inet->inet_dport);
914 	err = dccp_connect(sk);
915 	if (err)
916 		goto late_failure;
917 
918 	return 0;
919 
920 late_failure:
921 	dccp_set_state(sk, DCCP_CLOSED);
922 	__sk_dst_reset(sk);
923 failure:
924 	inet->inet_dport = 0;
925 	sk->sk_route_caps = 0;
926 	return err;
927 }
928 
929 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
930 	.queue_xmit	   = inet6_csk_xmit,
931 	.send_check	   = dccp_v6_send_check,
932 	.rebuild_header	   = inet6_sk_rebuild_header,
933 	.conn_request	   = dccp_v6_conn_request,
934 	.syn_recv_sock	   = dccp_v6_request_recv_sock,
935 	.net_header_len	   = sizeof(struct ipv6hdr),
936 	.setsockopt	   = ipv6_setsockopt,
937 	.getsockopt	   = ipv6_getsockopt,
938 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
939 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
940 	.bind_conflict	   = inet6_csk_bind_conflict,
941 #ifdef CONFIG_COMPAT
942 	.compat_setsockopt = compat_ipv6_setsockopt,
943 	.compat_getsockopt = compat_ipv6_getsockopt,
944 #endif
945 };
946 
947 /*
948  *	DCCP over IPv4 via INET6 API
949  */
950 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
951 	.queue_xmit	   = ip_queue_xmit,
952 	.send_check	   = dccp_v4_send_check,
953 	.rebuild_header	   = inet_sk_rebuild_header,
954 	.conn_request	   = dccp_v6_conn_request,
955 	.syn_recv_sock	   = dccp_v6_request_recv_sock,
956 	.net_header_len	   = sizeof(struct iphdr),
957 	.setsockopt	   = ipv6_setsockopt,
958 	.getsockopt	   = ipv6_getsockopt,
959 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
960 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
961 	.bind_conflict	   = inet6_csk_bind_conflict,
962 #ifdef CONFIG_COMPAT
963 	.compat_setsockopt = compat_ipv6_setsockopt,
964 	.compat_getsockopt = compat_ipv6_getsockopt,
965 #endif
966 };
967 
968 /* NOTE: A lot of things set to zero explicitly by call to
969  *       sk_alloc() so need not be done here.
970  */
971 static int dccp_v6_init_sock(struct sock *sk)
972 {
973 	static __u8 dccp_v6_ctl_sock_initialized;
974 	int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
975 
976 	if (err == 0) {
977 		if (unlikely(!dccp_v6_ctl_sock_initialized))
978 			dccp_v6_ctl_sock_initialized = 1;
979 		inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
980 	}
981 
982 	return err;
983 }
984 
985 static void dccp_v6_destroy_sock(struct sock *sk)
986 {
987 	dccp_destroy_sock(sk);
988 	inet6_destroy_sock(sk);
989 }
990 
991 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
992 	.twsk_obj_size	= sizeof(struct dccp6_timewait_sock),
993 };
994 
995 static struct proto dccp_v6_prot = {
996 	.name		   = "DCCPv6",
997 	.owner		   = THIS_MODULE,
998 	.close		   = dccp_close,
999 	.connect	   = dccp_v6_connect,
1000 	.disconnect	   = dccp_disconnect,
1001 	.ioctl		   = dccp_ioctl,
1002 	.init		   = dccp_v6_init_sock,
1003 	.setsockopt	   = dccp_setsockopt,
1004 	.getsockopt	   = dccp_getsockopt,
1005 	.sendmsg	   = dccp_sendmsg,
1006 	.recvmsg	   = dccp_recvmsg,
1007 	.backlog_rcv	   = dccp_v6_do_rcv,
1008 	.hash		   = inet6_hash,
1009 	.unhash		   = inet_unhash,
1010 	.accept		   = inet_csk_accept,
1011 	.get_port	   = inet_csk_get_port,
1012 	.shutdown	   = dccp_shutdown,
1013 	.destroy	   = dccp_v6_destroy_sock,
1014 	.orphan_count	   = &dccp_orphan_count,
1015 	.max_header	   = MAX_DCCP_HEADER,
1016 	.obj_size	   = sizeof(struct dccp6_sock),
1017 	.slab_flags	   = SLAB_DESTROY_BY_RCU,
1018 	.rsk_prot	   = &dccp6_request_sock_ops,
1019 	.twsk_prot	   = &dccp6_timewait_sock_ops,
1020 	.h.hashinfo	   = &dccp_hashinfo,
1021 #ifdef CONFIG_COMPAT
1022 	.compat_setsockopt = compat_dccp_setsockopt,
1023 	.compat_getsockopt = compat_dccp_getsockopt,
1024 #endif
1025 };
1026 
1027 static const struct inet6_protocol dccp_v6_protocol = {
1028 	.handler	= dccp_v6_rcv,
1029 	.err_handler	= dccp_v6_err,
1030 	.flags		= INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1031 };
1032 
1033 static const struct proto_ops inet6_dccp_ops = {
1034 	.family		   = PF_INET6,
1035 	.owner		   = THIS_MODULE,
1036 	.release	   = inet6_release,
1037 	.bind		   = inet6_bind,
1038 	.connect	   = inet_stream_connect,
1039 	.socketpair	   = sock_no_socketpair,
1040 	.accept		   = inet_accept,
1041 	.getname	   = inet6_getname,
1042 	.poll		   = dccp_poll,
1043 	.ioctl		   = inet6_ioctl,
1044 	.listen		   = inet_dccp_listen,
1045 	.shutdown	   = inet_shutdown,
1046 	.setsockopt	   = sock_common_setsockopt,
1047 	.getsockopt	   = sock_common_getsockopt,
1048 	.sendmsg	   = inet_sendmsg,
1049 	.recvmsg	   = sock_common_recvmsg,
1050 	.mmap		   = sock_no_mmap,
1051 	.sendpage	   = sock_no_sendpage,
1052 #ifdef CONFIG_COMPAT
1053 	.compat_setsockopt = compat_sock_common_setsockopt,
1054 	.compat_getsockopt = compat_sock_common_getsockopt,
1055 #endif
1056 };
1057 
1058 static struct inet_protosw dccp_v6_protosw = {
1059 	.type		= SOCK_DCCP,
1060 	.protocol	= IPPROTO_DCCP,
1061 	.prot		= &dccp_v6_prot,
1062 	.ops		= &inet6_dccp_ops,
1063 	.flags		= INET_PROTOSW_ICSK,
1064 };
1065 
1066 static int __net_init dccp_v6_init_net(struct net *net)
1067 {
1068 	if (dccp_hashinfo.bhash == NULL)
1069 		return -ESOCKTNOSUPPORT;
1070 
1071 	return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1072 				    SOCK_DCCP, IPPROTO_DCCP, net);
1073 }
1074 
1075 static void __net_exit dccp_v6_exit_net(struct net *net)
1076 {
1077 	inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1078 }
1079 
1080 static struct pernet_operations dccp_v6_ops = {
1081 	.init   = dccp_v6_init_net,
1082 	.exit   = dccp_v6_exit_net,
1083 };
1084 
1085 static int __init dccp_v6_init(void)
1086 {
1087 	int err = proto_register(&dccp_v6_prot, 1);
1088 
1089 	if (err != 0)
1090 		goto out;
1091 
1092 	err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1093 	if (err != 0)
1094 		goto out_unregister_proto;
1095 
1096 	inet6_register_protosw(&dccp_v6_protosw);
1097 
1098 	err = register_pernet_subsys(&dccp_v6_ops);
1099 	if (err != 0)
1100 		goto out_destroy_ctl_sock;
1101 out:
1102 	return err;
1103 
1104 out_destroy_ctl_sock:
1105 	inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1106 	inet6_unregister_protosw(&dccp_v6_protosw);
1107 out_unregister_proto:
1108 	proto_unregister(&dccp_v6_prot);
1109 	goto out;
1110 }
1111 
1112 static void __exit dccp_v6_exit(void)
1113 {
1114 	unregister_pernet_subsys(&dccp_v6_ops);
1115 	inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1116 	inet6_unregister_protosw(&dccp_v6_protosw);
1117 	proto_unregister(&dccp_v6_prot);
1118 }
1119 
1120 module_init(dccp_v6_init);
1121 module_exit(dccp_v6_exit);
1122 
1123 /*
1124  * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1125  * values directly, Also cover the case where the protocol is not specified,
1126  * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1127  */
1128 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1129 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1130 MODULE_LICENSE("GPL");
1131 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1132 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
1133