1 /* 2 * DCCP over IPv6 3 * Linux INET6 implementation 4 * 5 * Based on net/dccp6/ipv6.c 6 * 7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/module.h> 16 #include <linux/random.h> 17 #include <linux/slab.h> 18 #include <linux/xfrm.h> 19 20 #include <net/addrconf.h> 21 #include <net/inet_common.h> 22 #include <net/inet_hashtables.h> 23 #include <net/inet_sock.h> 24 #include <net/inet6_connection_sock.h> 25 #include <net/inet6_hashtables.h> 26 #include <net/ip6_route.h> 27 #include <net/ipv6.h> 28 #include <net/protocol.h> 29 #include <net/transp_v6.h> 30 #include <net/ip6_checksum.h> 31 #include <net/xfrm.h> 32 #include <net/secure_seq.h> 33 34 #include "dccp.h" 35 #include "ipv6.h" 36 #include "feat.h" 37 38 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */ 39 40 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped; 41 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops; 42 43 /* add pseudo-header to DCCP checksum stored in skb->csum */ 44 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, 45 const struct in6_addr *saddr, 46 const struct in6_addr *daddr) 47 { 48 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); 49 } 50 51 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb) 52 { 53 struct ipv6_pinfo *np = inet6_sk(sk); 54 struct dccp_hdr *dh = dccp_hdr(skb); 55 56 dccp_csum_outgoing(skb); 57 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr); 58 } 59 60 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb) 61 { 62 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 63 ipv6_hdr(skb)->saddr.s6_addr32, 64 dccp_hdr(skb)->dccph_dport, 65 dccp_hdr(skb)->dccph_sport ); 66 67 } 68 69 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 70 u8 type, u8 code, int offset, __be32 info) 71 { 72 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 73 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 74 struct dccp_sock *dp; 75 struct ipv6_pinfo *np; 76 struct sock *sk; 77 int err; 78 __u64 seq; 79 struct net *net = dev_net(skb->dev); 80 81 if (skb->len < offset + sizeof(*dh) || 82 skb->len < offset + __dccp_basic_hdr_len(dh)) { 83 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), 84 ICMP6_MIB_INERRORS); 85 return; 86 } 87 88 sk = __inet6_lookup_established(net, &dccp_hashinfo, 89 &hdr->daddr, dh->dccph_dport, 90 &hdr->saddr, ntohs(dh->dccph_sport), 91 inet6_iif(skb)); 92 93 if (!sk) { 94 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), 95 ICMP6_MIB_INERRORS); 96 return; 97 } 98 99 if (sk->sk_state == DCCP_TIME_WAIT) { 100 inet_twsk_put(inet_twsk(sk)); 101 return; 102 } 103 seq = dccp_hdr_seq(dh); 104 if (sk->sk_state == DCCP_NEW_SYN_RECV) 105 return dccp_req_err(sk, seq); 106 107 bh_lock_sock(sk); 108 if (sock_owned_by_user(sk)) 109 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 110 111 if (sk->sk_state == DCCP_CLOSED) 112 goto out; 113 114 dp = dccp_sk(sk); 115 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && 116 !between48(seq, dp->dccps_awl, dp->dccps_awh)) { 117 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 118 goto out; 119 } 120 121 np = inet6_sk(sk); 122 123 if (type == NDISC_REDIRECT) { 124 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 125 126 if (dst) 127 dst->ops->redirect(dst, sk, skb); 128 goto out; 129 } 130 131 if (type == ICMPV6_PKT_TOOBIG) { 132 struct dst_entry *dst = NULL; 133 134 if (!ip6_sk_accept_pmtu(sk)) 135 goto out; 136 137 if (sock_owned_by_user(sk)) 138 goto out; 139 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED)) 140 goto out; 141 142 dst = inet6_csk_update_pmtu(sk, ntohl(info)); 143 if (!dst) 144 goto out; 145 146 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) 147 dccp_sync_mss(sk, dst_mtu(dst)); 148 goto out; 149 } 150 151 icmpv6_err_convert(type, code, &err); 152 153 /* Might be for an request_sock */ 154 switch (sk->sk_state) { 155 case DCCP_REQUESTING: 156 case DCCP_RESPOND: /* Cannot happen. 157 It can, it SYNs are crossed. --ANK */ 158 if (!sock_owned_by_user(sk)) { 159 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); 160 sk->sk_err = err; 161 /* 162 * Wake people up to see the error 163 * (see connect in sock.c) 164 */ 165 sk->sk_error_report(sk); 166 dccp_done(sk); 167 } else 168 sk->sk_err_soft = err; 169 goto out; 170 } 171 172 if (!sock_owned_by_user(sk) && np->recverr) { 173 sk->sk_err = err; 174 sk->sk_error_report(sk); 175 } else 176 sk->sk_err_soft = err; 177 178 out: 179 bh_unlock_sock(sk); 180 sock_put(sk); 181 } 182 183 184 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) 185 { 186 struct inet_request_sock *ireq = inet_rsk(req); 187 struct ipv6_pinfo *np = inet6_sk(sk); 188 struct sk_buff *skb; 189 struct in6_addr *final_p, final; 190 struct flowi6 fl6; 191 int err = -1; 192 struct dst_entry *dst; 193 194 memset(&fl6, 0, sizeof(fl6)); 195 fl6.flowi6_proto = IPPROTO_DCCP; 196 fl6.daddr = ireq->ir_v6_rmt_addr; 197 fl6.saddr = ireq->ir_v6_loc_addr; 198 fl6.flowlabel = 0; 199 fl6.flowi6_oif = ireq->ir_iif; 200 fl6.fl6_dport = ireq->ir_rmt_port; 201 fl6.fl6_sport = htons(ireq->ir_num); 202 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 203 204 205 final_p = fl6_update_dst(&fl6, np->opt, &final); 206 207 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 208 if (IS_ERR(dst)) { 209 err = PTR_ERR(dst); 210 dst = NULL; 211 goto done; 212 } 213 214 skb = dccp_make_response(sk, dst, req); 215 if (skb != NULL) { 216 struct dccp_hdr *dh = dccp_hdr(skb); 217 218 dh->dccph_checksum = dccp_v6_csum_finish(skb, 219 &ireq->ir_v6_loc_addr, 220 &ireq->ir_v6_rmt_addr); 221 fl6.daddr = ireq->ir_v6_rmt_addr; 222 err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 223 err = net_xmit_eval(err); 224 } 225 226 done: 227 dst_release(dst); 228 return err; 229 } 230 231 static void dccp_v6_reqsk_destructor(struct request_sock *req) 232 { 233 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); 234 kfree_skb(inet_rsk(req)->pktopts); 235 } 236 237 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) 238 { 239 const struct ipv6hdr *rxip6h; 240 struct sk_buff *skb; 241 struct flowi6 fl6; 242 struct net *net = dev_net(skb_dst(rxskb)->dev); 243 struct sock *ctl_sk = net->dccp.v6_ctl_sk; 244 struct dst_entry *dst; 245 246 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) 247 return; 248 249 if (!ipv6_unicast_destination(rxskb)) 250 return; 251 252 skb = dccp_ctl_make_reset(ctl_sk, rxskb); 253 if (skb == NULL) 254 return; 255 256 rxip6h = ipv6_hdr(rxskb); 257 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr, 258 &rxip6h->daddr); 259 260 memset(&fl6, 0, sizeof(fl6)); 261 fl6.daddr = rxip6h->saddr; 262 fl6.saddr = rxip6h->daddr; 263 264 fl6.flowi6_proto = IPPROTO_DCCP; 265 fl6.flowi6_oif = inet6_iif(rxskb); 266 fl6.fl6_dport = dccp_hdr(skb)->dccph_dport; 267 fl6.fl6_sport = dccp_hdr(skb)->dccph_sport; 268 security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); 269 270 /* sk = NULL, but it is safe for now. RST socket required. */ 271 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 272 if (!IS_ERR(dst)) { 273 skb_dst_set(skb, dst); 274 ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); 275 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 276 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 277 return; 278 } 279 280 kfree_skb(skb); 281 } 282 283 static struct request_sock_ops dccp6_request_sock_ops = { 284 .family = AF_INET6, 285 .obj_size = sizeof(struct dccp6_request_sock), 286 .rtx_syn_ack = dccp_v6_send_response, 287 .send_ack = dccp_reqsk_send_ack, 288 .destructor = dccp_v6_reqsk_destructor, 289 .send_reset = dccp_v6_ctl_send_reset, 290 .syn_ack_timeout = dccp_syn_ack_timeout, 291 }; 292 293 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 294 { 295 const struct dccp_hdr *dh = dccp_hdr(skb); 296 const struct ipv6hdr *iph = ipv6_hdr(skb); 297 struct request_sock *req; 298 struct sock *nsk; 299 300 req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr, 301 &iph->daddr, inet6_iif(skb)); 302 if (req) { 303 nsk = dccp_check_req(sk, skb, req); 304 reqsk_put(req); 305 return nsk; 306 } 307 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, 308 &iph->saddr, dh->dccph_sport, 309 &iph->daddr, ntohs(dh->dccph_dport), 310 inet6_iif(skb)); 311 if (nsk != NULL) { 312 if (nsk->sk_state != DCCP_TIME_WAIT) { 313 bh_lock_sock(nsk); 314 return nsk; 315 } 316 inet_twsk_put(inet_twsk(nsk)); 317 return NULL; 318 } 319 320 return sk; 321 } 322 323 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 324 { 325 struct request_sock *req; 326 struct dccp_request_sock *dreq; 327 struct inet_request_sock *ireq; 328 struct ipv6_pinfo *np = inet6_sk(sk); 329 const __be32 service = dccp_hdr_request(skb)->dccph_req_service; 330 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 331 332 if (skb->protocol == htons(ETH_P_IP)) 333 return dccp_v4_conn_request(sk, skb); 334 335 if (!ipv6_unicast_destination(skb)) 336 return 0; /* discard, don't send a reset here */ 337 338 if (dccp_bad_service_code(sk, service)) { 339 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; 340 goto drop; 341 } 342 /* 343 * There are no SYN attacks on IPv6, yet... 344 */ 345 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; 346 if (inet_csk_reqsk_queue_is_full(sk)) 347 goto drop; 348 349 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 350 goto drop; 351 352 req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk); 353 if (req == NULL) 354 goto drop; 355 356 if (dccp_reqsk_init(req, dccp_sk(sk), skb)) 357 goto drop_and_free; 358 359 dreq = dccp_rsk(req); 360 if (dccp_parse_options(sk, dreq, skb)) 361 goto drop_and_free; 362 363 if (security_inet_conn_request(sk, skb, req)) 364 goto drop_and_free; 365 366 ireq = inet_rsk(req); 367 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; 368 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; 369 ireq->ireq_family = AF_INET6; 370 371 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || 372 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 373 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 374 atomic_inc(&skb->users); 375 ireq->pktopts = skb; 376 } 377 ireq->ir_iif = sk->sk_bound_dev_if; 378 379 /* So that link locals have meaning */ 380 if (!sk->sk_bound_dev_if && 381 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) 382 ireq->ir_iif = inet6_iif(skb); 383 384 /* 385 * Step 3: Process LISTEN state 386 * 387 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 388 * 389 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child(). 390 */ 391 dreq->dreq_isr = dcb->dccpd_seq; 392 dreq->dreq_gsr = dreq->dreq_isr; 393 dreq->dreq_iss = dccp_v6_init_sequence(skb); 394 dreq->dreq_gss = dreq->dreq_iss; 395 dreq->dreq_service = service; 396 397 if (dccp_v6_send_response(sk, req)) 398 goto drop_and_free; 399 400 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 401 return 0; 402 403 drop_and_free: 404 reqsk_free(req); 405 drop: 406 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); 407 return -1; 408 } 409 410 static struct sock *dccp_v6_request_recv_sock(struct sock *sk, 411 struct sk_buff *skb, 412 struct request_sock *req, 413 struct dst_entry *dst) 414 { 415 struct inet_request_sock *ireq = inet_rsk(req); 416 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 417 struct inet_sock *newinet; 418 struct dccp6_sock *newdp6; 419 struct sock *newsk; 420 421 if (skb->protocol == htons(ETH_P_IP)) { 422 /* 423 * v6 mapped 424 */ 425 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst); 426 if (newsk == NULL) 427 return NULL; 428 429 newdp6 = (struct dccp6_sock *)newsk; 430 newinet = inet_sk(newsk); 431 newinet->pinet6 = &newdp6->inet6; 432 newnp = inet6_sk(newsk); 433 434 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 435 436 newnp->saddr = newsk->sk_v6_rcv_saddr; 437 438 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; 439 newsk->sk_backlog_rcv = dccp_v4_do_rcv; 440 newnp->pktoptions = NULL; 441 newnp->opt = NULL; 442 newnp->mcast_oif = inet6_iif(skb); 443 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 444 445 /* 446 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 447 * here, dccp_create_openreq_child now does this for us, see the comment in 448 * that function for the gory details. -acme 449 */ 450 451 /* It is tricky place. Until this moment IPv4 tcp 452 worked with IPv6 icsk.icsk_af_ops. 453 Sync it now. 454 */ 455 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); 456 457 return newsk; 458 } 459 460 461 if (sk_acceptq_is_full(sk)) 462 goto out_overflow; 463 464 if (dst == NULL) { 465 struct in6_addr *final_p, final; 466 struct flowi6 fl6; 467 468 memset(&fl6, 0, sizeof(fl6)); 469 fl6.flowi6_proto = IPPROTO_DCCP; 470 fl6.daddr = ireq->ir_v6_rmt_addr; 471 final_p = fl6_update_dst(&fl6, np->opt, &final); 472 fl6.saddr = ireq->ir_v6_loc_addr; 473 fl6.flowi6_oif = sk->sk_bound_dev_if; 474 fl6.fl6_dport = ireq->ir_rmt_port; 475 fl6.fl6_sport = htons(ireq->ir_num); 476 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 477 478 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 479 if (IS_ERR(dst)) 480 goto out; 481 } 482 483 newsk = dccp_create_openreq_child(sk, req, skb); 484 if (newsk == NULL) 485 goto out_nonewsk; 486 487 /* 488 * No need to charge this sock to the relevant IPv6 refcnt debug socks 489 * count here, dccp_create_openreq_child now does this for us, see the 490 * comment in that function for the gory details. -acme 491 */ 492 493 __ip6_dst_store(newsk, dst, NULL, NULL); 494 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 495 NETIF_F_TSO); 496 newdp6 = (struct dccp6_sock *)newsk; 497 newinet = inet_sk(newsk); 498 newinet->pinet6 = &newdp6->inet6; 499 newnp = inet6_sk(newsk); 500 501 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 502 503 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; 504 newnp->saddr = ireq->ir_v6_loc_addr; 505 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; 506 newsk->sk_bound_dev_if = ireq->ir_iif; 507 508 /* Now IPv6 options... 509 510 First: no IPv4 options. 511 */ 512 newinet->inet_opt = NULL; 513 514 /* Clone RX bits */ 515 newnp->rxopt.all = np->rxopt.all; 516 517 /* Clone pktoptions received with SYN */ 518 newnp->pktoptions = NULL; 519 if (ireq->pktopts != NULL) { 520 newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC); 521 consume_skb(ireq->pktopts); 522 ireq->pktopts = NULL; 523 if (newnp->pktoptions) 524 skb_set_owner_r(newnp->pktoptions, newsk); 525 } 526 newnp->opt = NULL; 527 newnp->mcast_oif = inet6_iif(skb); 528 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 529 530 /* 531 * Clone native IPv6 options from listening socket (if any) 532 * 533 * Yes, keeping reference count would be much more clever, but we make 534 * one more one thing there: reattach optmem to newsk. 535 */ 536 if (np->opt != NULL) 537 newnp->opt = ipv6_dup_options(newsk, np->opt); 538 539 inet_csk(newsk)->icsk_ext_hdr_len = 0; 540 if (newnp->opt != NULL) 541 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 542 newnp->opt->opt_flen); 543 544 dccp_sync_mss(newsk, dst_mtu(dst)); 545 546 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; 547 newinet->inet_rcv_saddr = LOOPBACK4_IPV6; 548 549 if (__inet_inherit_port(sk, newsk) < 0) { 550 inet_csk_prepare_forced_close(newsk); 551 dccp_done(newsk); 552 goto out; 553 } 554 __inet_hash(newsk, NULL); 555 556 return newsk; 557 558 out_overflow: 559 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 560 out_nonewsk: 561 dst_release(dst); 562 out: 563 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 564 return NULL; 565 } 566 567 /* The socket must have it's spinlock held when we get 568 * here. 569 * 570 * We have a potential double-lock case here, so even when 571 * doing backlog processing we use the BH locking scheme. 572 * This is because we cannot sleep with the original spinlock 573 * held. 574 */ 575 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) 576 { 577 struct ipv6_pinfo *np = inet6_sk(sk); 578 struct sk_buff *opt_skb = NULL; 579 580 /* Imagine: socket is IPv6. IPv4 packet arrives, 581 goes to IPv4 receive handler and backlogged. 582 From backlog it always goes here. Kerboom... 583 Fortunately, dccp_rcv_established and rcv_established 584 handle them correctly, but it is not case with 585 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK 586 */ 587 588 if (skb->protocol == htons(ETH_P_IP)) 589 return dccp_v4_do_rcv(sk, skb); 590 591 if (sk_filter(sk, skb)) 592 goto discard; 593 594 /* 595 * socket locking is here for SMP purposes as backlog rcv is currently 596 * called with bh processing disabled. 597 */ 598 599 /* Do Stevens' IPV6_PKTOPTIONS. 600 601 Yes, guys, it is the only place in our code, where we 602 may make it not affecting IPv4. 603 The rest of code is protocol independent, 604 and I do not like idea to uglify IPv4. 605 606 Actually, all the idea behind IPV6_PKTOPTIONS 607 looks not very well thought. For now we latch 608 options, received in the last packet, enqueued 609 by tcp. Feel free to propose better solution. 610 --ANK (980728) 611 */ 612 if (np->rxopt.all) 613 /* 614 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below 615 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example. 616 */ 617 opt_skb = skb_clone(skb, GFP_ATOMIC); 618 619 if (sk->sk_state == DCCP_OPEN) { /* Fast path */ 620 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) 621 goto reset; 622 if (opt_skb) { 623 /* XXX This is where we would goto ipv6_pktoptions. */ 624 __kfree_skb(opt_skb); 625 } 626 return 0; 627 } 628 629 /* 630 * Step 3: Process LISTEN state 631 * If S.state == LISTEN, 632 * If P.type == Request or P contains a valid Init Cookie option, 633 * (* Must scan the packet's options to check for Init 634 * Cookies. Only Init Cookies are processed here, 635 * however; other options are processed in Step 8. This 636 * scan need only be performed if the endpoint uses Init 637 * Cookies *) 638 * (* Generate a new socket and switch to that socket *) 639 * Set S := new socket for this port pair 640 * S.state = RESPOND 641 * Choose S.ISS (initial seqno) or set from Init Cookies 642 * Initialize S.GAR := S.ISS 643 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies 644 * Continue with S.state == RESPOND 645 * (* A Response packet will be generated in Step 11 *) 646 * Otherwise, 647 * Generate Reset(No Connection) unless P.type == Reset 648 * Drop packet and return 649 * 650 * NOTE: the check for the packet types is done in 651 * dccp_rcv_state_process 652 */ 653 if (sk->sk_state == DCCP_LISTEN) { 654 struct sock *nsk = dccp_v6_hnd_req(sk, skb); 655 656 if (nsk == NULL) 657 goto discard; 658 /* 659 * Queue it on the new socket if the new socket is active, 660 * otherwise we just shortcircuit this and continue with 661 * the new socket.. 662 */ 663 if (nsk != sk) { 664 if (dccp_child_process(sk, nsk, skb)) 665 goto reset; 666 if (opt_skb != NULL) 667 __kfree_skb(opt_skb); 668 return 0; 669 } 670 } 671 672 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) 673 goto reset; 674 if (opt_skb) { 675 /* XXX This is where we would goto ipv6_pktoptions. */ 676 __kfree_skb(opt_skb); 677 } 678 return 0; 679 680 reset: 681 dccp_v6_ctl_send_reset(sk, skb); 682 discard: 683 if (opt_skb != NULL) 684 __kfree_skb(opt_skb); 685 kfree_skb(skb); 686 return 0; 687 } 688 689 static int dccp_v6_rcv(struct sk_buff *skb) 690 { 691 const struct dccp_hdr *dh; 692 struct sock *sk; 693 int min_cov; 694 695 /* Step 1: Check header basics */ 696 697 if (dccp_invalid_packet(skb)) 698 goto discard_it; 699 700 /* Step 1: If header checksum is incorrect, drop packet and return. */ 701 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr, 702 &ipv6_hdr(skb)->daddr)) { 703 DCCP_WARN("dropped packet with invalid checksum\n"); 704 goto discard_it; 705 } 706 707 dh = dccp_hdr(skb); 708 709 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh); 710 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; 711 712 if (dccp_packet_without_ack(skb)) 713 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ; 714 else 715 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); 716 717 /* Step 2: 718 * Look up flow ID in table and get corresponding socket */ 719 sk = __inet6_lookup_skb(&dccp_hashinfo, skb, 720 dh->dccph_sport, dh->dccph_dport, 721 inet6_iif(skb)); 722 /* 723 * Step 2: 724 * If no socket ... 725 */ 726 if (sk == NULL) { 727 dccp_pr_debug("failed to look up flow ID in table and " 728 "get corresponding socket\n"); 729 goto no_dccp_socket; 730 } 731 732 /* 733 * Step 2: 734 * ... or S.state == TIMEWAIT, 735 * Generate Reset(No Connection) unless P.type == Reset 736 * Drop packet and return 737 */ 738 if (sk->sk_state == DCCP_TIME_WAIT) { 739 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); 740 inet_twsk_put(inet_twsk(sk)); 741 goto no_dccp_socket; 742 } 743 744 /* 745 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage 746 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted 747 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov 748 */ 749 min_cov = dccp_sk(sk)->dccps_pcrlen; 750 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { 751 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", 752 dh->dccph_cscov, min_cov); 753 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */ 754 goto discard_and_relse; 755 } 756 757 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 758 goto discard_and_relse; 759 760 return sk_receive_skb(sk, skb, 1) ? -1 : 0; 761 762 no_dccp_socket: 763 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 764 goto discard_it; 765 /* 766 * Step 2: 767 * If no socket ... 768 * Generate Reset(No Connection) unless P.type == Reset 769 * Drop packet and return 770 */ 771 if (dh->dccph_type != DCCP_PKT_RESET) { 772 DCCP_SKB_CB(skb)->dccpd_reset_code = 773 DCCP_RESET_CODE_NO_CONNECTION; 774 dccp_v6_ctl_send_reset(sk, skb); 775 } 776 777 discard_it: 778 kfree_skb(skb); 779 return 0; 780 781 discard_and_relse: 782 sock_put(sk); 783 goto discard_it; 784 } 785 786 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 787 int addr_len) 788 { 789 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; 790 struct inet_connection_sock *icsk = inet_csk(sk); 791 struct inet_sock *inet = inet_sk(sk); 792 struct ipv6_pinfo *np = inet6_sk(sk); 793 struct dccp_sock *dp = dccp_sk(sk); 794 struct in6_addr *saddr = NULL, *final_p, final; 795 struct flowi6 fl6; 796 struct dst_entry *dst; 797 int addr_type; 798 int err; 799 800 dp->dccps_role = DCCP_ROLE_CLIENT; 801 802 if (addr_len < SIN6_LEN_RFC2133) 803 return -EINVAL; 804 805 if (usin->sin6_family != AF_INET6) 806 return -EAFNOSUPPORT; 807 808 memset(&fl6, 0, sizeof(fl6)); 809 810 if (np->sndflow) { 811 fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; 812 IP6_ECN_flow_init(fl6.flowlabel); 813 if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) { 814 struct ip6_flowlabel *flowlabel; 815 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 816 if (flowlabel == NULL) 817 return -EINVAL; 818 fl6_sock_release(flowlabel); 819 } 820 } 821 /* 822 * connect() to INADDR_ANY means loopback (BSD'ism). 823 */ 824 if (ipv6_addr_any(&usin->sin6_addr)) 825 usin->sin6_addr.s6_addr[15] = 1; 826 827 addr_type = ipv6_addr_type(&usin->sin6_addr); 828 829 if (addr_type & IPV6_ADDR_MULTICAST) 830 return -ENETUNREACH; 831 832 if (addr_type & IPV6_ADDR_LINKLOCAL) { 833 if (addr_len >= sizeof(struct sockaddr_in6) && 834 usin->sin6_scope_id) { 835 /* If interface is set while binding, indices 836 * must coincide. 837 */ 838 if (sk->sk_bound_dev_if && 839 sk->sk_bound_dev_if != usin->sin6_scope_id) 840 return -EINVAL; 841 842 sk->sk_bound_dev_if = usin->sin6_scope_id; 843 } 844 845 /* Connect to link-local address requires an interface */ 846 if (!sk->sk_bound_dev_if) 847 return -EINVAL; 848 } 849 850 sk->sk_v6_daddr = usin->sin6_addr; 851 np->flow_label = fl6.flowlabel; 852 853 /* 854 * DCCP over IPv4 855 */ 856 if (addr_type == IPV6_ADDR_MAPPED) { 857 u32 exthdrlen = icsk->icsk_ext_hdr_len; 858 struct sockaddr_in sin; 859 860 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); 861 862 if (__ipv6_only_sock(sk)) 863 return -ENETUNREACH; 864 865 sin.sin_family = AF_INET; 866 sin.sin_port = usin->sin6_port; 867 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; 868 869 icsk->icsk_af_ops = &dccp_ipv6_mapped; 870 sk->sk_backlog_rcv = dccp_v4_do_rcv; 871 872 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 873 if (err) { 874 icsk->icsk_ext_hdr_len = exthdrlen; 875 icsk->icsk_af_ops = &dccp_ipv6_af_ops; 876 sk->sk_backlog_rcv = dccp_v6_do_rcv; 877 goto failure; 878 } 879 np->saddr = sk->sk_v6_rcv_saddr; 880 return err; 881 } 882 883 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) 884 saddr = &sk->sk_v6_rcv_saddr; 885 886 fl6.flowi6_proto = IPPROTO_DCCP; 887 fl6.daddr = sk->sk_v6_daddr; 888 fl6.saddr = saddr ? *saddr : np->saddr; 889 fl6.flowi6_oif = sk->sk_bound_dev_if; 890 fl6.fl6_dport = usin->sin6_port; 891 fl6.fl6_sport = inet->inet_sport; 892 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 893 894 final_p = fl6_update_dst(&fl6, np->opt, &final); 895 896 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 897 if (IS_ERR(dst)) { 898 err = PTR_ERR(dst); 899 goto failure; 900 } 901 902 if (saddr == NULL) { 903 saddr = &fl6.saddr; 904 sk->sk_v6_rcv_saddr = *saddr; 905 } 906 907 /* set the source address */ 908 np->saddr = *saddr; 909 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 910 911 __ip6_dst_store(sk, dst, NULL, NULL); 912 913 icsk->icsk_ext_hdr_len = 0; 914 if (np->opt != NULL) 915 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 916 np->opt->opt_nflen); 917 918 inet->inet_dport = usin->sin6_port; 919 920 dccp_set_state(sk, DCCP_REQUESTING); 921 err = inet6_hash_connect(&dccp_death_row, sk); 922 if (err) 923 goto late_failure; 924 925 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, 926 sk->sk_v6_daddr.s6_addr32, 927 inet->inet_sport, 928 inet->inet_dport); 929 err = dccp_connect(sk); 930 if (err) 931 goto late_failure; 932 933 return 0; 934 935 late_failure: 936 dccp_set_state(sk, DCCP_CLOSED); 937 __sk_dst_reset(sk); 938 failure: 939 inet->inet_dport = 0; 940 sk->sk_route_caps = 0; 941 return err; 942 } 943 944 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { 945 .queue_xmit = inet6_csk_xmit, 946 .send_check = dccp_v6_send_check, 947 .rebuild_header = inet6_sk_rebuild_header, 948 .conn_request = dccp_v6_conn_request, 949 .syn_recv_sock = dccp_v6_request_recv_sock, 950 .net_header_len = sizeof(struct ipv6hdr), 951 .setsockopt = ipv6_setsockopt, 952 .getsockopt = ipv6_getsockopt, 953 .addr2sockaddr = inet6_csk_addr2sockaddr, 954 .sockaddr_len = sizeof(struct sockaddr_in6), 955 .bind_conflict = inet6_csk_bind_conflict, 956 #ifdef CONFIG_COMPAT 957 .compat_setsockopt = compat_ipv6_setsockopt, 958 .compat_getsockopt = compat_ipv6_getsockopt, 959 #endif 960 }; 961 962 /* 963 * DCCP over IPv4 via INET6 API 964 */ 965 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { 966 .queue_xmit = ip_queue_xmit, 967 .send_check = dccp_v4_send_check, 968 .rebuild_header = inet_sk_rebuild_header, 969 .conn_request = dccp_v6_conn_request, 970 .syn_recv_sock = dccp_v6_request_recv_sock, 971 .net_header_len = sizeof(struct iphdr), 972 .setsockopt = ipv6_setsockopt, 973 .getsockopt = ipv6_getsockopt, 974 .addr2sockaddr = inet6_csk_addr2sockaddr, 975 .sockaddr_len = sizeof(struct sockaddr_in6), 976 #ifdef CONFIG_COMPAT 977 .compat_setsockopt = compat_ipv6_setsockopt, 978 .compat_getsockopt = compat_ipv6_getsockopt, 979 #endif 980 }; 981 982 /* NOTE: A lot of things set to zero explicitly by call to 983 * sk_alloc() so need not be done here. 984 */ 985 static int dccp_v6_init_sock(struct sock *sk) 986 { 987 static __u8 dccp_v6_ctl_sock_initialized; 988 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized); 989 990 if (err == 0) { 991 if (unlikely(!dccp_v6_ctl_sock_initialized)) 992 dccp_v6_ctl_sock_initialized = 1; 993 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; 994 } 995 996 return err; 997 } 998 999 static void dccp_v6_destroy_sock(struct sock *sk) 1000 { 1001 dccp_destroy_sock(sk); 1002 inet6_destroy_sock(sk); 1003 } 1004 1005 static struct timewait_sock_ops dccp6_timewait_sock_ops = { 1006 .twsk_obj_size = sizeof(struct dccp6_timewait_sock), 1007 }; 1008 1009 static struct proto dccp_v6_prot = { 1010 .name = "DCCPv6", 1011 .owner = THIS_MODULE, 1012 .close = dccp_close, 1013 .connect = dccp_v6_connect, 1014 .disconnect = dccp_disconnect, 1015 .ioctl = dccp_ioctl, 1016 .init = dccp_v6_init_sock, 1017 .setsockopt = dccp_setsockopt, 1018 .getsockopt = dccp_getsockopt, 1019 .sendmsg = dccp_sendmsg, 1020 .recvmsg = dccp_recvmsg, 1021 .backlog_rcv = dccp_v6_do_rcv, 1022 .hash = inet_hash, 1023 .unhash = inet_unhash, 1024 .accept = inet_csk_accept, 1025 .get_port = inet_csk_get_port, 1026 .shutdown = dccp_shutdown, 1027 .destroy = dccp_v6_destroy_sock, 1028 .orphan_count = &dccp_orphan_count, 1029 .max_header = MAX_DCCP_HEADER, 1030 .obj_size = sizeof(struct dccp6_sock), 1031 .slab_flags = SLAB_DESTROY_BY_RCU, 1032 .rsk_prot = &dccp6_request_sock_ops, 1033 .twsk_prot = &dccp6_timewait_sock_ops, 1034 .h.hashinfo = &dccp_hashinfo, 1035 #ifdef CONFIG_COMPAT 1036 .compat_setsockopt = compat_dccp_setsockopt, 1037 .compat_getsockopt = compat_dccp_getsockopt, 1038 #endif 1039 }; 1040 1041 static const struct inet6_protocol dccp_v6_protocol = { 1042 .handler = dccp_v6_rcv, 1043 .err_handler = dccp_v6_err, 1044 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, 1045 }; 1046 1047 static const struct proto_ops inet6_dccp_ops = { 1048 .family = PF_INET6, 1049 .owner = THIS_MODULE, 1050 .release = inet6_release, 1051 .bind = inet6_bind, 1052 .connect = inet_stream_connect, 1053 .socketpair = sock_no_socketpair, 1054 .accept = inet_accept, 1055 .getname = inet6_getname, 1056 .poll = dccp_poll, 1057 .ioctl = inet6_ioctl, 1058 .listen = inet_dccp_listen, 1059 .shutdown = inet_shutdown, 1060 .setsockopt = sock_common_setsockopt, 1061 .getsockopt = sock_common_getsockopt, 1062 .sendmsg = inet_sendmsg, 1063 .recvmsg = sock_common_recvmsg, 1064 .mmap = sock_no_mmap, 1065 .sendpage = sock_no_sendpage, 1066 #ifdef CONFIG_COMPAT 1067 .compat_setsockopt = compat_sock_common_setsockopt, 1068 .compat_getsockopt = compat_sock_common_getsockopt, 1069 #endif 1070 }; 1071 1072 static struct inet_protosw dccp_v6_protosw = { 1073 .type = SOCK_DCCP, 1074 .protocol = IPPROTO_DCCP, 1075 .prot = &dccp_v6_prot, 1076 .ops = &inet6_dccp_ops, 1077 .flags = INET_PROTOSW_ICSK, 1078 }; 1079 1080 static int __net_init dccp_v6_init_net(struct net *net) 1081 { 1082 if (dccp_hashinfo.bhash == NULL) 1083 return -ESOCKTNOSUPPORT; 1084 1085 return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, 1086 SOCK_DCCP, IPPROTO_DCCP, net); 1087 } 1088 1089 static void __net_exit dccp_v6_exit_net(struct net *net) 1090 { 1091 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk); 1092 } 1093 1094 static struct pernet_operations dccp_v6_ops = { 1095 .init = dccp_v6_init_net, 1096 .exit = dccp_v6_exit_net, 1097 }; 1098 1099 static int __init dccp_v6_init(void) 1100 { 1101 int err = proto_register(&dccp_v6_prot, 1); 1102 1103 if (err != 0) 1104 goto out; 1105 1106 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1107 if (err != 0) 1108 goto out_unregister_proto; 1109 1110 inet6_register_protosw(&dccp_v6_protosw); 1111 1112 err = register_pernet_subsys(&dccp_v6_ops); 1113 if (err != 0) 1114 goto out_destroy_ctl_sock; 1115 out: 1116 return err; 1117 1118 out_destroy_ctl_sock: 1119 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1120 inet6_unregister_protosw(&dccp_v6_protosw); 1121 out_unregister_proto: 1122 proto_unregister(&dccp_v6_prot); 1123 goto out; 1124 } 1125 1126 static void __exit dccp_v6_exit(void) 1127 { 1128 unregister_pernet_subsys(&dccp_v6_ops); 1129 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1130 inet6_unregister_protosw(&dccp_v6_protosw); 1131 proto_unregister(&dccp_v6_prot); 1132 } 1133 1134 module_init(dccp_v6_init); 1135 module_exit(dccp_v6_exit); 1136 1137 /* 1138 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33) 1139 * values directly, Also cover the case where the protocol is not specified, 1140 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP 1141 */ 1142 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6); 1143 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6); 1144 MODULE_LICENSE("GPL"); 1145 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); 1146 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol"); 1147