1 /* 2 * DCCP over IPv6 3 * Linux INET6 implementation 4 * 5 * Based on net/dccp6/ipv6.c 6 * 7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/module.h> 16 #include <linux/random.h> 17 #include <linux/xfrm.h> 18 19 #include <net/addrconf.h> 20 #include <net/inet_common.h> 21 #include <net/inet_hashtables.h> 22 #include <net/inet_sock.h> 23 #include <net/inet6_connection_sock.h> 24 #include <net/inet6_hashtables.h> 25 #include <net/ip6_route.h> 26 #include <net/ipv6.h> 27 #include <net/protocol.h> 28 #include <net/transp_v6.h> 29 #include <net/ip6_checksum.h> 30 #include <net/xfrm.h> 31 32 #include "dccp.h" 33 #include "ipv6.h" 34 #include "feat.h" 35 36 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */ 37 38 static struct inet_connection_sock_af_ops dccp_ipv6_mapped; 39 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; 40 41 static void dccp_v6_hash(struct sock *sk) 42 { 43 if (sk->sk_state != DCCP_CLOSED) { 44 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) { 45 inet_hash(sk); 46 return; 47 } 48 local_bh_disable(); 49 __inet6_hash(sk); 50 local_bh_enable(); 51 } 52 } 53 54 /* add pseudo-header to DCCP checksum stored in skb->csum */ 55 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, 56 struct in6_addr *saddr, 57 struct in6_addr *daddr) 58 { 59 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); 60 } 61 62 static inline void dccp_v6_send_check(struct sock *sk, int unused_value, 63 struct sk_buff *skb) 64 { 65 struct ipv6_pinfo *np = inet6_sk(sk); 66 struct dccp_hdr *dh = dccp_hdr(skb); 67 68 dccp_csum_outgoing(skb); 69 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr); 70 } 71 72 static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, 73 __be16 sport, __be16 dport ) 74 { 75 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport); 76 } 77 78 static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb) 79 { 80 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 81 ipv6_hdr(skb)->saddr.s6_addr32, 82 dccp_hdr(skb)->dccph_dport, 83 dccp_hdr(skb)->dccph_sport ); 84 85 } 86 87 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 88 int type, int code, int offset, __be32 info) 89 { 90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data; 91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 92 struct dccp_sock *dp; 93 struct ipv6_pinfo *np; 94 struct sock *sk; 95 int err; 96 __u64 seq; 97 struct net *net = dev_net(skb->dev); 98 99 if (skb->len < offset + sizeof(*dh) || 100 skb->len < offset + __dccp_basic_hdr_len(dh)) { 101 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), 102 ICMP6_MIB_INERRORS); 103 return; 104 } 105 106 sk = inet6_lookup(net, &dccp_hashinfo, 107 &hdr->daddr, dh->dccph_dport, 108 &hdr->saddr, dh->dccph_sport, inet6_iif(skb)); 109 110 if (sk == NULL) { 111 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), 112 ICMP6_MIB_INERRORS); 113 return; 114 } 115 116 if (sk->sk_state == DCCP_TIME_WAIT) { 117 inet_twsk_put(inet_twsk(sk)); 118 return; 119 } 120 121 bh_lock_sock(sk); 122 if (sock_owned_by_user(sk)) 123 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 124 125 if (sk->sk_state == DCCP_CLOSED) 126 goto out; 127 128 dp = dccp_sk(sk); 129 seq = dccp_hdr_seq(dh); 130 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && 131 !between48(seq, dp->dccps_awl, dp->dccps_awh)) { 132 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 133 goto out; 134 } 135 136 np = inet6_sk(sk); 137 138 if (type == ICMPV6_PKT_TOOBIG) { 139 struct dst_entry *dst = NULL; 140 141 if (sock_owned_by_user(sk)) 142 goto out; 143 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED)) 144 goto out; 145 146 /* icmp should have updated the destination cache entry */ 147 dst = __sk_dst_check(sk, np->dst_cookie); 148 if (dst == NULL) { 149 struct inet_sock *inet = inet_sk(sk); 150 struct flowi fl; 151 152 /* BUGGG_FUTURE: Again, it is not clear how 153 to handle rthdr case. Ignore this complexity 154 for now. 155 */ 156 memset(&fl, 0, sizeof(fl)); 157 fl.proto = IPPROTO_DCCP; 158 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 159 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 160 fl.oif = sk->sk_bound_dev_if; 161 fl.fl_ip_dport = inet->dport; 162 fl.fl_ip_sport = inet->sport; 163 security_sk_classify_flow(sk, &fl); 164 165 err = ip6_dst_lookup(sk, &dst, &fl); 166 if (err) { 167 sk->sk_err_soft = -err; 168 goto out; 169 } 170 171 err = xfrm_lookup(net, &dst, &fl, sk, 0); 172 if (err < 0) { 173 sk->sk_err_soft = -err; 174 goto out; 175 } 176 } else 177 dst_hold(dst); 178 179 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 180 dccp_sync_mss(sk, dst_mtu(dst)); 181 } /* else let the usual retransmit timer handle it */ 182 dst_release(dst); 183 goto out; 184 } 185 186 icmpv6_err_convert(type, code, &err); 187 188 /* Might be for an request_sock */ 189 switch (sk->sk_state) { 190 struct request_sock *req, **prev; 191 case DCCP_LISTEN: 192 if (sock_owned_by_user(sk)) 193 goto out; 194 195 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport, 196 &hdr->daddr, &hdr->saddr, 197 inet6_iif(skb)); 198 if (req == NULL) 199 goto out; 200 201 /* 202 * ICMPs are not backlogged, hence we cannot get an established 203 * socket here. 204 */ 205 WARN_ON(req->sk != NULL); 206 207 if (seq != dccp_rsk(req)->dreq_iss) { 208 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 209 goto out; 210 } 211 212 inet_csk_reqsk_queue_drop(sk, req, prev); 213 goto out; 214 215 case DCCP_REQUESTING: 216 case DCCP_RESPOND: /* Cannot happen. 217 It can, it SYNs are crossed. --ANK */ 218 if (!sock_owned_by_user(sk)) { 219 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); 220 sk->sk_err = err; 221 /* 222 * Wake people up to see the error 223 * (see connect in sock.c) 224 */ 225 sk->sk_error_report(sk); 226 dccp_done(sk); 227 } else 228 sk->sk_err_soft = err; 229 goto out; 230 } 231 232 if (!sock_owned_by_user(sk) && np->recverr) { 233 sk->sk_err = err; 234 sk->sk_error_report(sk); 235 } else 236 sk->sk_err_soft = err; 237 238 out: 239 bh_unlock_sock(sk); 240 sock_put(sk); 241 } 242 243 244 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) 245 { 246 struct inet6_request_sock *ireq6 = inet6_rsk(req); 247 struct ipv6_pinfo *np = inet6_sk(sk); 248 struct sk_buff *skb; 249 struct ipv6_txoptions *opt = NULL; 250 struct in6_addr *final_p = NULL, final; 251 struct flowi fl; 252 int err = -1; 253 struct dst_entry *dst; 254 255 memset(&fl, 0, sizeof(fl)); 256 fl.proto = IPPROTO_DCCP; 257 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 258 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); 259 fl.fl6_flowlabel = 0; 260 fl.oif = ireq6->iif; 261 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 262 fl.fl_ip_sport = inet_rsk(req)->loc_port; 263 security_req_classify_flow(req, &fl); 264 265 opt = np->opt; 266 267 if (opt != NULL && opt->srcrt != NULL) { 268 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; 269 270 ipv6_addr_copy(&final, &fl.fl6_dst); 271 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 272 final_p = &final; 273 } 274 275 err = ip6_dst_lookup(sk, &dst, &fl); 276 if (err) 277 goto done; 278 279 if (final_p) 280 ipv6_addr_copy(&fl.fl6_dst, final_p); 281 282 err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0); 283 if (err < 0) 284 goto done; 285 286 skb = dccp_make_response(sk, dst, req); 287 if (skb != NULL) { 288 struct dccp_hdr *dh = dccp_hdr(skb); 289 290 dh->dccph_checksum = dccp_v6_csum_finish(skb, 291 &ireq6->loc_addr, 292 &ireq6->rmt_addr); 293 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 294 err = ip6_xmit(sk, skb, &fl, opt, 0); 295 err = net_xmit_eval(err); 296 } 297 298 done: 299 if (opt != NULL && opt != np->opt) 300 sock_kfree_s(sk, opt, opt->tot_len); 301 dst_release(dst); 302 return err; 303 } 304 305 static void dccp_v6_reqsk_destructor(struct request_sock *req) 306 { 307 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); 308 if (inet6_rsk(req)->pktopts != NULL) 309 kfree_skb(inet6_rsk(req)->pktopts); 310 } 311 312 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) 313 { 314 struct ipv6hdr *rxip6h; 315 struct sk_buff *skb; 316 struct flowi fl; 317 struct net *net = dev_net(rxskb->dst->dev); 318 struct sock *ctl_sk = net->dccp.v6_ctl_sk; 319 320 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) 321 return; 322 323 if (!ipv6_unicast_destination(rxskb)) 324 return; 325 326 skb = dccp_ctl_make_reset(ctl_sk, rxskb); 327 if (skb == NULL) 328 return; 329 330 rxip6h = ipv6_hdr(rxskb); 331 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr, 332 &rxip6h->daddr); 333 334 memset(&fl, 0, sizeof(fl)); 335 ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr); 336 ipv6_addr_copy(&fl.fl6_src, &rxip6h->daddr); 337 338 fl.proto = IPPROTO_DCCP; 339 fl.oif = inet6_iif(rxskb); 340 fl.fl_ip_dport = dccp_hdr(skb)->dccph_dport; 341 fl.fl_ip_sport = dccp_hdr(skb)->dccph_sport; 342 security_skb_classify_flow(rxskb, &fl); 343 344 /* sk = NULL, but it is safe for now. RST socket required. */ 345 if (!ip6_dst_lookup(ctl_sk, &skb->dst, &fl)) { 346 if (xfrm_lookup(net, &skb->dst, &fl, NULL, 0) >= 0) { 347 ip6_xmit(ctl_sk, skb, &fl, NULL, 0); 348 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 349 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 350 return; 351 } 352 } 353 354 kfree_skb(skb); 355 } 356 357 static struct request_sock_ops dccp6_request_sock_ops = { 358 .family = AF_INET6, 359 .obj_size = sizeof(struct dccp6_request_sock), 360 .rtx_syn_ack = dccp_v6_send_response, 361 .send_ack = dccp_reqsk_send_ack, 362 .destructor = dccp_v6_reqsk_destructor, 363 .send_reset = dccp_v6_ctl_send_reset, 364 }; 365 366 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 367 { 368 const struct dccp_hdr *dh = dccp_hdr(skb); 369 const struct ipv6hdr *iph = ipv6_hdr(skb); 370 struct sock *nsk; 371 struct request_sock **prev; 372 /* Find possible connection requests. */ 373 struct request_sock *req = inet6_csk_search_req(sk, &prev, 374 dh->dccph_sport, 375 &iph->saddr, 376 &iph->daddr, 377 inet6_iif(skb)); 378 if (req != NULL) 379 return dccp_check_req(sk, skb, req, prev); 380 381 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, 382 &iph->saddr, dh->dccph_sport, 383 &iph->daddr, ntohs(dh->dccph_dport), 384 inet6_iif(skb)); 385 if (nsk != NULL) { 386 if (nsk->sk_state != DCCP_TIME_WAIT) { 387 bh_lock_sock(nsk); 388 return nsk; 389 } 390 inet_twsk_put(inet_twsk(nsk)); 391 return NULL; 392 } 393 394 return sk; 395 } 396 397 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 398 { 399 struct request_sock *req; 400 struct dccp_request_sock *dreq; 401 struct inet6_request_sock *ireq6; 402 struct ipv6_pinfo *np = inet6_sk(sk); 403 const __be32 service = dccp_hdr_request(skb)->dccph_req_service; 404 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 405 406 if (skb->protocol == htons(ETH_P_IP)) 407 return dccp_v4_conn_request(sk, skb); 408 409 if (!ipv6_unicast_destination(skb)) 410 return 0; /* discard, don't send a reset here */ 411 412 if (dccp_bad_service_code(sk, service)) { 413 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; 414 goto drop; 415 } 416 /* 417 * There are no SYN attacks on IPv6, yet... 418 */ 419 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; 420 if (inet_csk_reqsk_queue_is_full(sk)) 421 goto drop; 422 423 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 424 goto drop; 425 426 req = inet6_reqsk_alloc(&dccp6_request_sock_ops); 427 if (req == NULL) 428 goto drop; 429 430 if (dccp_reqsk_init(req, dccp_sk(sk), skb)) 431 goto drop_and_free; 432 433 dreq = dccp_rsk(req); 434 if (dccp_parse_options(sk, dreq, skb)) 435 goto drop_and_free; 436 437 if (security_inet_conn_request(sk, skb, req)) 438 goto drop_and_free; 439 440 ireq6 = inet6_rsk(req); 441 ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); 442 ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); 443 444 if (ipv6_opt_accepted(sk, skb) || 445 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 446 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 447 atomic_inc(&skb->users); 448 ireq6->pktopts = skb; 449 } 450 ireq6->iif = sk->sk_bound_dev_if; 451 452 /* So that link locals have meaning */ 453 if (!sk->sk_bound_dev_if && 454 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL) 455 ireq6->iif = inet6_iif(skb); 456 457 /* 458 * Step 3: Process LISTEN state 459 * 460 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 461 * 462 * In fact we defer setting S.GSR, S.SWL, S.SWH to 463 * dccp_create_openreq_child. 464 */ 465 dreq->dreq_isr = dcb->dccpd_seq; 466 dreq->dreq_iss = dccp_v6_init_sequence(skb); 467 dreq->dreq_service = service; 468 469 if (dccp_v6_send_response(sk, req)) 470 goto drop_and_free; 471 472 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 473 return 0; 474 475 drop_and_free: 476 reqsk_free(req); 477 drop: 478 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); 479 return -1; 480 } 481 482 static struct sock *dccp_v6_request_recv_sock(struct sock *sk, 483 struct sk_buff *skb, 484 struct request_sock *req, 485 struct dst_entry *dst) 486 { 487 struct inet6_request_sock *ireq6 = inet6_rsk(req); 488 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 489 struct inet_sock *newinet; 490 struct dccp_sock *newdp; 491 struct dccp6_sock *newdp6; 492 struct sock *newsk; 493 struct ipv6_txoptions *opt; 494 495 if (skb->protocol == htons(ETH_P_IP)) { 496 /* 497 * v6 mapped 498 */ 499 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst); 500 if (newsk == NULL) 501 return NULL; 502 503 newdp6 = (struct dccp6_sock *)newsk; 504 newdp = dccp_sk(newsk); 505 newinet = inet_sk(newsk); 506 newinet->pinet6 = &newdp6->inet6; 507 newnp = inet6_sk(newsk); 508 509 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 510 511 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), 512 newinet->daddr); 513 514 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF), 515 newinet->saddr); 516 517 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); 518 519 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; 520 newsk->sk_backlog_rcv = dccp_v4_do_rcv; 521 newnp->pktoptions = NULL; 522 newnp->opt = NULL; 523 newnp->mcast_oif = inet6_iif(skb); 524 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 525 526 /* 527 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 528 * here, dccp_create_openreq_child now does this for us, see the comment in 529 * that function for the gory details. -acme 530 */ 531 532 /* It is tricky place. Until this moment IPv4 tcp 533 worked with IPv6 icsk.icsk_af_ops. 534 Sync it now. 535 */ 536 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); 537 538 return newsk; 539 } 540 541 opt = np->opt; 542 543 if (sk_acceptq_is_full(sk)) 544 goto out_overflow; 545 546 if (dst == NULL) { 547 struct in6_addr *final_p = NULL, final; 548 struct flowi fl; 549 550 memset(&fl, 0, sizeof(fl)); 551 fl.proto = IPPROTO_DCCP; 552 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 553 if (opt != NULL && opt->srcrt != NULL) { 554 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; 555 556 ipv6_addr_copy(&final, &fl.fl6_dst); 557 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 558 final_p = &final; 559 } 560 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); 561 fl.oif = sk->sk_bound_dev_if; 562 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 563 fl.fl_ip_sport = inet_rsk(req)->loc_port; 564 security_sk_classify_flow(sk, &fl); 565 566 if (ip6_dst_lookup(sk, &dst, &fl)) 567 goto out; 568 569 if (final_p) 570 ipv6_addr_copy(&fl.fl6_dst, final_p); 571 572 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) 573 goto out; 574 } 575 576 newsk = dccp_create_openreq_child(sk, req, skb); 577 if (newsk == NULL) 578 goto out; 579 580 /* 581 * No need to charge this sock to the relevant IPv6 refcnt debug socks 582 * count here, dccp_create_openreq_child now does this for us, see the 583 * comment in that function for the gory details. -acme 584 */ 585 586 __ip6_dst_store(newsk, dst, NULL, NULL); 587 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 588 NETIF_F_TSO); 589 newdp6 = (struct dccp6_sock *)newsk; 590 newinet = inet_sk(newsk); 591 newinet->pinet6 = &newdp6->inet6; 592 newdp = dccp_sk(newsk); 593 newnp = inet6_sk(newsk); 594 595 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 596 597 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr); 598 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr); 599 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr); 600 newsk->sk_bound_dev_if = ireq6->iif; 601 602 /* Now IPv6 options... 603 604 First: no IPv4 options. 605 */ 606 newinet->opt = NULL; 607 608 /* Clone RX bits */ 609 newnp->rxopt.all = np->rxopt.all; 610 611 /* Clone pktoptions received with SYN */ 612 newnp->pktoptions = NULL; 613 if (ireq6->pktopts != NULL) { 614 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC); 615 kfree_skb(ireq6->pktopts); 616 ireq6->pktopts = NULL; 617 if (newnp->pktoptions) 618 skb_set_owner_r(newnp->pktoptions, newsk); 619 } 620 newnp->opt = NULL; 621 newnp->mcast_oif = inet6_iif(skb); 622 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 623 624 /* 625 * Clone native IPv6 options from listening socket (if any) 626 * 627 * Yes, keeping reference count would be much more clever, but we make 628 * one more one thing there: reattach optmem to newsk. 629 */ 630 if (opt != NULL) { 631 newnp->opt = ipv6_dup_options(newsk, opt); 632 if (opt != np->opt) 633 sock_kfree_s(sk, opt, opt->tot_len); 634 } 635 636 inet_csk(newsk)->icsk_ext_hdr_len = 0; 637 if (newnp->opt != NULL) 638 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 639 newnp->opt->opt_flen); 640 641 dccp_sync_mss(newsk, dst_mtu(dst)); 642 643 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 644 645 __inet6_hash(newsk); 646 __inet_inherit_port(sk, newsk); 647 648 return newsk; 649 650 out_overflow: 651 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 652 out: 653 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 654 if (opt != NULL && opt != np->opt) 655 sock_kfree_s(sk, opt, opt->tot_len); 656 dst_release(dst); 657 return NULL; 658 } 659 660 /* The socket must have it's spinlock held when we get 661 * here. 662 * 663 * We have a potential double-lock case here, so even when 664 * doing backlog processing we use the BH locking scheme. 665 * This is because we cannot sleep with the original spinlock 666 * held. 667 */ 668 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) 669 { 670 struct ipv6_pinfo *np = inet6_sk(sk); 671 struct sk_buff *opt_skb = NULL; 672 673 /* Imagine: socket is IPv6. IPv4 packet arrives, 674 goes to IPv4 receive handler and backlogged. 675 From backlog it always goes here. Kerboom... 676 Fortunately, dccp_rcv_established and rcv_established 677 handle them correctly, but it is not case with 678 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK 679 */ 680 681 if (skb->protocol == htons(ETH_P_IP)) 682 return dccp_v4_do_rcv(sk, skb); 683 684 if (sk_filter(sk, skb)) 685 goto discard; 686 687 /* 688 * socket locking is here for SMP purposes as backlog rcv is currently 689 * called with bh processing disabled. 690 */ 691 692 /* Do Stevens' IPV6_PKTOPTIONS. 693 694 Yes, guys, it is the only place in our code, where we 695 may make it not affecting IPv4. 696 The rest of code is protocol independent, 697 and I do not like idea to uglify IPv4. 698 699 Actually, all the idea behind IPV6_PKTOPTIONS 700 looks not very well thought. For now we latch 701 options, received in the last packet, enqueued 702 by tcp. Feel free to propose better solution. 703 --ANK (980728) 704 */ 705 if (np->rxopt.all) 706 /* 707 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below 708 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example. 709 */ 710 opt_skb = skb_clone(skb, GFP_ATOMIC); 711 712 if (sk->sk_state == DCCP_OPEN) { /* Fast path */ 713 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) 714 goto reset; 715 if (opt_skb) { 716 /* XXX This is where we would goto ipv6_pktoptions. */ 717 __kfree_skb(opt_skb); 718 } 719 return 0; 720 } 721 722 /* 723 * Step 3: Process LISTEN state 724 * If S.state == LISTEN, 725 * If P.type == Request or P contains a valid Init Cookie option, 726 * (* Must scan the packet's options to check for Init 727 * Cookies. Only Init Cookies are processed here, 728 * however; other options are processed in Step 8. This 729 * scan need only be performed if the endpoint uses Init 730 * Cookies *) 731 * (* Generate a new socket and switch to that socket *) 732 * Set S := new socket for this port pair 733 * S.state = RESPOND 734 * Choose S.ISS (initial seqno) or set from Init Cookies 735 * Initialize S.GAR := S.ISS 736 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies 737 * Continue with S.state == RESPOND 738 * (* A Response packet will be generated in Step 11 *) 739 * Otherwise, 740 * Generate Reset(No Connection) unless P.type == Reset 741 * Drop packet and return 742 * 743 * NOTE: the check for the packet types is done in 744 * dccp_rcv_state_process 745 */ 746 if (sk->sk_state == DCCP_LISTEN) { 747 struct sock *nsk = dccp_v6_hnd_req(sk, skb); 748 749 if (nsk == NULL) 750 goto discard; 751 /* 752 * Queue it on the new socket if the new socket is active, 753 * otherwise we just shortcircuit this and continue with 754 * the new socket.. 755 */ 756 if (nsk != sk) { 757 if (dccp_child_process(sk, nsk, skb)) 758 goto reset; 759 if (opt_skb != NULL) 760 __kfree_skb(opt_skb); 761 return 0; 762 } 763 } 764 765 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) 766 goto reset; 767 if (opt_skb) { 768 /* XXX This is where we would goto ipv6_pktoptions. */ 769 __kfree_skb(opt_skb); 770 } 771 return 0; 772 773 reset: 774 dccp_v6_ctl_send_reset(sk, skb); 775 discard: 776 if (opt_skb != NULL) 777 __kfree_skb(opt_skb); 778 kfree_skb(skb); 779 return 0; 780 } 781 782 static int dccp_v6_rcv(struct sk_buff *skb) 783 { 784 const struct dccp_hdr *dh; 785 struct sock *sk; 786 int min_cov; 787 788 /* Step 1: Check header basics */ 789 790 if (dccp_invalid_packet(skb)) 791 goto discard_it; 792 793 /* Step 1: If header checksum is incorrect, drop packet and return. */ 794 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr, 795 &ipv6_hdr(skb)->daddr)) { 796 DCCP_WARN("dropped packet with invalid checksum\n"); 797 goto discard_it; 798 } 799 800 dh = dccp_hdr(skb); 801 802 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh); 803 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; 804 805 if (dccp_packet_without_ack(skb)) 806 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ; 807 else 808 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); 809 810 /* Step 2: 811 * Look up flow ID in table and get corresponding socket */ 812 sk = __inet6_lookup_skb(&dccp_hashinfo, skb, 813 dh->dccph_sport, dh->dccph_dport); 814 /* 815 * Step 2: 816 * If no socket ... 817 */ 818 if (sk == NULL) { 819 dccp_pr_debug("failed to look up flow ID in table and " 820 "get corresponding socket\n"); 821 goto no_dccp_socket; 822 } 823 824 /* 825 * Step 2: 826 * ... or S.state == TIMEWAIT, 827 * Generate Reset(No Connection) unless P.type == Reset 828 * Drop packet and return 829 */ 830 if (sk->sk_state == DCCP_TIME_WAIT) { 831 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); 832 inet_twsk_put(inet_twsk(sk)); 833 goto no_dccp_socket; 834 } 835 836 /* 837 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage 838 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted 839 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov 840 */ 841 min_cov = dccp_sk(sk)->dccps_pcrlen; 842 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { 843 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", 844 dh->dccph_cscov, min_cov); 845 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */ 846 goto discard_and_relse; 847 } 848 849 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 850 goto discard_and_relse; 851 852 return sk_receive_skb(sk, skb, 1) ? -1 : 0; 853 854 no_dccp_socket: 855 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 856 goto discard_it; 857 /* 858 * Step 2: 859 * If no socket ... 860 * Generate Reset(No Connection) unless P.type == Reset 861 * Drop packet and return 862 */ 863 if (dh->dccph_type != DCCP_PKT_RESET) { 864 DCCP_SKB_CB(skb)->dccpd_reset_code = 865 DCCP_RESET_CODE_NO_CONNECTION; 866 dccp_v6_ctl_send_reset(sk, skb); 867 } 868 869 discard_it: 870 kfree_skb(skb); 871 return 0; 872 873 discard_and_relse: 874 sock_put(sk); 875 goto discard_it; 876 } 877 878 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 879 int addr_len) 880 { 881 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; 882 struct inet_connection_sock *icsk = inet_csk(sk); 883 struct inet_sock *inet = inet_sk(sk); 884 struct ipv6_pinfo *np = inet6_sk(sk); 885 struct dccp_sock *dp = dccp_sk(sk); 886 struct in6_addr *saddr = NULL, *final_p = NULL, final; 887 struct flowi fl; 888 struct dst_entry *dst; 889 int addr_type; 890 int err; 891 892 dp->dccps_role = DCCP_ROLE_CLIENT; 893 894 if (addr_len < SIN6_LEN_RFC2133) 895 return -EINVAL; 896 897 if (usin->sin6_family != AF_INET6) 898 return -EAFNOSUPPORT; 899 900 memset(&fl, 0, sizeof(fl)); 901 902 if (np->sndflow) { 903 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; 904 IP6_ECN_flow_init(fl.fl6_flowlabel); 905 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) { 906 struct ip6_flowlabel *flowlabel; 907 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); 908 if (flowlabel == NULL) 909 return -EINVAL; 910 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); 911 fl6_sock_release(flowlabel); 912 } 913 } 914 /* 915 * connect() to INADDR_ANY means loopback (BSD'ism). 916 */ 917 if (ipv6_addr_any(&usin->sin6_addr)) 918 usin->sin6_addr.s6_addr[15] = 1; 919 920 addr_type = ipv6_addr_type(&usin->sin6_addr); 921 922 if (addr_type & IPV6_ADDR_MULTICAST) 923 return -ENETUNREACH; 924 925 if (addr_type & IPV6_ADDR_LINKLOCAL) { 926 if (addr_len >= sizeof(struct sockaddr_in6) && 927 usin->sin6_scope_id) { 928 /* If interface is set while binding, indices 929 * must coincide. 930 */ 931 if (sk->sk_bound_dev_if && 932 sk->sk_bound_dev_if != usin->sin6_scope_id) 933 return -EINVAL; 934 935 sk->sk_bound_dev_if = usin->sin6_scope_id; 936 } 937 938 /* Connect to link-local address requires an interface */ 939 if (!sk->sk_bound_dev_if) 940 return -EINVAL; 941 } 942 943 ipv6_addr_copy(&np->daddr, &usin->sin6_addr); 944 np->flow_label = fl.fl6_flowlabel; 945 946 /* 947 * DCCP over IPv4 948 */ 949 if (addr_type == IPV6_ADDR_MAPPED) { 950 u32 exthdrlen = icsk->icsk_ext_hdr_len; 951 struct sockaddr_in sin; 952 953 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); 954 955 if (__ipv6_only_sock(sk)) 956 return -ENETUNREACH; 957 958 sin.sin_family = AF_INET; 959 sin.sin_port = usin->sin6_port; 960 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; 961 962 icsk->icsk_af_ops = &dccp_ipv6_mapped; 963 sk->sk_backlog_rcv = dccp_v4_do_rcv; 964 965 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 966 if (err) { 967 icsk->icsk_ext_hdr_len = exthdrlen; 968 icsk->icsk_af_ops = &dccp_ipv6_af_ops; 969 sk->sk_backlog_rcv = dccp_v6_do_rcv; 970 goto failure; 971 } else { 972 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), 973 inet->saddr); 974 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), 975 inet->rcv_saddr); 976 } 977 978 return err; 979 } 980 981 if (!ipv6_addr_any(&np->rcv_saddr)) 982 saddr = &np->rcv_saddr; 983 984 fl.proto = IPPROTO_DCCP; 985 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 986 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); 987 fl.oif = sk->sk_bound_dev_if; 988 fl.fl_ip_dport = usin->sin6_port; 989 fl.fl_ip_sport = inet->sport; 990 security_sk_classify_flow(sk, &fl); 991 992 if (np->opt != NULL && np->opt->srcrt != NULL) { 993 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; 994 995 ipv6_addr_copy(&final, &fl.fl6_dst); 996 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 997 final_p = &final; 998 } 999 1000 err = ip6_dst_lookup(sk, &dst, &fl); 1001 if (err) 1002 goto failure; 1003 1004 if (final_p) 1005 ipv6_addr_copy(&fl.fl6_dst, final_p); 1006 1007 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT); 1008 if (err < 0) { 1009 if (err == -EREMOTE) 1010 err = ip6_dst_blackhole(sk, &dst, &fl); 1011 if (err < 0) 1012 goto failure; 1013 } 1014 1015 if (saddr == NULL) { 1016 saddr = &fl.fl6_src; 1017 ipv6_addr_copy(&np->rcv_saddr, saddr); 1018 } 1019 1020 /* set the source address */ 1021 ipv6_addr_copy(&np->saddr, saddr); 1022 inet->rcv_saddr = LOOPBACK4_IPV6; 1023 1024 __ip6_dst_store(sk, dst, NULL, NULL); 1025 1026 icsk->icsk_ext_hdr_len = 0; 1027 if (np->opt != NULL) 1028 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 1029 np->opt->opt_nflen); 1030 1031 inet->dport = usin->sin6_port; 1032 1033 dccp_set_state(sk, DCCP_REQUESTING); 1034 err = inet6_hash_connect(&dccp_death_row, sk); 1035 if (err) 1036 goto late_failure; 1037 1038 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, 1039 np->daddr.s6_addr32, 1040 inet->sport, inet->dport); 1041 err = dccp_connect(sk); 1042 if (err) 1043 goto late_failure; 1044 1045 return 0; 1046 1047 late_failure: 1048 dccp_set_state(sk, DCCP_CLOSED); 1049 __sk_dst_reset(sk); 1050 failure: 1051 inet->dport = 0; 1052 sk->sk_route_caps = 0; 1053 return err; 1054 } 1055 1056 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { 1057 .queue_xmit = inet6_csk_xmit, 1058 .send_check = dccp_v6_send_check, 1059 .rebuild_header = inet6_sk_rebuild_header, 1060 .conn_request = dccp_v6_conn_request, 1061 .syn_recv_sock = dccp_v6_request_recv_sock, 1062 .net_header_len = sizeof(struct ipv6hdr), 1063 .setsockopt = ipv6_setsockopt, 1064 .getsockopt = ipv6_getsockopt, 1065 .addr2sockaddr = inet6_csk_addr2sockaddr, 1066 .sockaddr_len = sizeof(struct sockaddr_in6), 1067 .bind_conflict = inet6_csk_bind_conflict, 1068 #ifdef CONFIG_COMPAT 1069 .compat_setsockopt = compat_ipv6_setsockopt, 1070 .compat_getsockopt = compat_ipv6_getsockopt, 1071 #endif 1072 }; 1073 1074 /* 1075 * DCCP over IPv4 via INET6 API 1076 */ 1077 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = { 1078 .queue_xmit = ip_queue_xmit, 1079 .send_check = dccp_v4_send_check, 1080 .rebuild_header = inet_sk_rebuild_header, 1081 .conn_request = dccp_v6_conn_request, 1082 .syn_recv_sock = dccp_v6_request_recv_sock, 1083 .net_header_len = sizeof(struct iphdr), 1084 .setsockopt = ipv6_setsockopt, 1085 .getsockopt = ipv6_getsockopt, 1086 .addr2sockaddr = inet6_csk_addr2sockaddr, 1087 .sockaddr_len = sizeof(struct sockaddr_in6), 1088 #ifdef CONFIG_COMPAT 1089 .compat_setsockopt = compat_ipv6_setsockopt, 1090 .compat_getsockopt = compat_ipv6_getsockopt, 1091 #endif 1092 }; 1093 1094 /* NOTE: A lot of things set to zero explicitly by call to 1095 * sk_alloc() so need not be done here. 1096 */ 1097 static int dccp_v6_init_sock(struct sock *sk) 1098 { 1099 static __u8 dccp_v6_ctl_sock_initialized; 1100 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized); 1101 1102 if (err == 0) { 1103 if (unlikely(!dccp_v6_ctl_sock_initialized)) 1104 dccp_v6_ctl_sock_initialized = 1; 1105 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; 1106 } 1107 1108 return err; 1109 } 1110 1111 static void dccp_v6_destroy_sock(struct sock *sk) 1112 { 1113 dccp_destroy_sock(sk); 1114 inet6_destroy_sock(sk); 1115 } 1116 1117 static struct timewait_sock_ops dccp6_timewait_sock_ops = { 1118 .twsk_obj_size = sizeof(struct dccp6_timewait_sock), 1119 }; 1120 1121 static struct proto dccp_v6_prot = { 1122 .name = "DCCPv6", 1123 .owner = THIS_MODULE, 1124 .close = dccp_close, 1125 .connect = dccp_v6_connect, 1126 .disconnect = dccp_disconnect, 1127 .ioctl = dccp_ioctl, 1128 .init = dccp_v6_init_sock, 1129 .setsockopt = dccp_setsockopt, 1130 .getsockopt = dccp_getsockopt, 1131 .sendmsg = dccp_sendmsg, 1132 .recvmsg = dccp_recvmsg, 1133 .backlog_rcv = dccp_v6_do_rcv, 1134 .hash = dccp_v6_hash, 1135 .unhash = inet_unhash, 1136 .accept = inet_csk_accept, 1137 .get_port = inet_csk_get_port, 1138 .shutdown = dccp_shutdown, 1139 .destroy = dccp_v6_destroy_sock, 1140 .orphan_count = &dccp_orphan_count, 1141 .max_header = MAX_DCCP_HEADER, 1142 .obj_size = sizeof(struct dccp6_sock), 1143 .slab_flags = SLAB_DESTROY_BY_RCU, 1144 .rsk_prot = &dccp6_request_sock_ops, 1145 .twsk_prot = &dccp6_timewait_sock_ops, 1146 .h.hashinfo = &dccp_hashinfo, 1147 #ifdef CONFIG_COMPAT 1148 .compat_setsockopt = compat_dccp_setsockopt, 1149 .compat_getsockopt = compat_dccp_getsockopt, 1150 #endif 1151 }; 1152 1153 static struct inet6_protocol dccp_v6_protocol = { 1154 .handler = dccp_v6_rcv, 1155 .err_handler = dccp_v6_err, 1156 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, 1157 }; 1158 1159 static struct proto_ops inet6_dccp_ops = { 1160 .family = PF_INET6, 1161 .owner = THIS_MODULE, 1162 .release = inet6_release, 1163 .bind = inet6_bind, 1164 .connect = inet_stream_connect, 1165 .socketpair = sock_no_socketpair, 1166 .accept = inet_accept, 1167 .getname = inet6_getname, 1168 .poll = dccp_poll, 1169 .ioctl = inet6_ioctl, 1170 .listen = inet_dccp_listen, 1171 .shutdown = inet_shutdown, 1172 .setsockopt = sock_common_setsockopt, 1173 .getsockopt = sock_common_getsockopt, 1174 .sendmsg = inet_sendmsg, 1175 .recvmsg = sock_common_recvmsg, 1176 .mmap = sock_no_mmap, 1177 .sendpage = sock_no_sendpage, 1178 #ifdef CONFIG_COMPAT 1179 .compat_setsockopt = compat_sock_common_setsockopt, 1180 .compat_getsockopt = compat_sock_common_getsockopt, 1181 #endif 1182 }; 1183 1184 static struct inet_protosw dccp_v6_protosw = { 1185 .type = SOCK_DCCP, 1186 .protocol = IPPROTO_DCCP, 1187 .prot = &dccp_v6_prot, 1188 .ops = &inet6_dccp_ops, 1189 .capability = -1, 1190 .flags = INET_PROTOSW_ICSK, 1191 }; 1192 1193 static int dccp_v6_init_net(struct net *net) 1194 { 1195 int err; 1196 1197 err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, 1198 SOCK_DCCP, IPPROTO_DCCP, net); 1199 return err; 1200 } 1201 1202 static void dccp_v6_exit_net(struct net *net) 1203 { 1204 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk); 1205 } 1206 1207 static struct pernet_operations dccp_v6_ops = { 1208 .init = dccp_v6_init_net, 1209 .exit = dccp_v6_exit_net, 1210 }; 1211 1212 static int __init dccp_v6_init(void) 1213 { 1214 int err = proto_register(&dccp_v6_prot, 1); 1215 1216 if (err != 0) 1217 goto out; 1218 1219 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1220 if (err != 0) 1221 goto out_unregister_proto; 1222 1223 inet6_register_protosw(&dccp_v6_protosw); 1224 1225 err = register_pernet_subsys(&dccp_v6_ops); 1226 if (err != 0) 1227 goto out_destroy_ctl_sock; 1228 out: 1229 return err; 1230 1231 out_destroy_ctl_sock: 1232 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1233 inet6_unregister_protosw(&dccp_v6_protosw); 1234 out_unregister_proto: 1235 proto_unregister(&dccp_v6_prot); 1236 goto out; 1237 } 1238 1239 static void __exit dccp_v6_exit(void) 1240 { 1241 unregister_pernet_subsys(&dccp_v6_ops); 1242 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1243 inet6_unregister_protosw(&dccp_v6_protosw); 1244 proto_unregister(&dccp_v6_prot); 1245 } 1246 1247 module_init(dccp_v6_init); 1248 module_exit(dccp_v6_exit); 1249 1250 /* 1251 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33) 1252 * values directly, Also cover the case where the protocol is not specified, 1253 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP 1254 */ 1255 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6); 1256 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6); 1257 MODULE_LICENSE("GPL"); 1258 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); 1259 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol"); 1260