1 /* 2 * Extension Header handling for IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * Andi Kleen <ak@muc.de> 8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 /* Changes: 17 * yoshfuji : ensure not to overrun while parsing 18 * tlv options. 19 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs(). 20 * YOSHIFUJI Hideaki @USAGI Register inbound extension header 21 * handlers as inet6_protocol{}. 22 */ 23 24 #include <linux/errno.h> 25 #include <linux/types.h> 26 #include <linux/socket.h> 27 #include <linux/sockios.h> 28 #include <linux/net.h> 29 #include <linux/netdevice.h> 30 #include <linux/in6.h> 31 #include <linux/icmpv6.h> 32 33 #include <net/dst.h> 34 #include <net/sock.h> 35 #include <net/snmp.h> 36 37 #include <net/ipv6.h> 38 #include <net/protocol.h> 39 #include <net/transp_v6.h> 40 #include <net/rawv6.h> 41 #include <net/ndisc.h> 42 #include <net/ip6_route.h> 43 #include <net/addrconf.h> 44 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 45 #include <net/xfrm.h> 46 #endif 47 48 #include <asm/uaccess.h> 49 50 int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) 51 { 52 const unsigned char *nh = skb_network_header(skb); 53 int packet_len = skb->tail - skb->network_header; 54 struct ipv6_opt_hdr *hdr; 55 int len; 56 57 if (offset + 2 > packet_len) 58 goto bad; 59 hdr = (struct ipv6_opt_hdr *)(nh + offset); 60 len = ((hdr->hdrlen + 1) << 3); 61 62 if (offset + len > packet_len) 63 goto bad; 64 65 offset += 2; 66 len -= 2; 67 68 while (len > 0) { 69 int opttype = nh[offset]; 70 int optlen; 71 72 if (opttype == type) 73 return offset; 74 75 switch (opttype) { 76 case IPV6_TLV_PAD0: 77 optlen = 1; 78 break; 79 default: 80 optlen = nh[offset + 1] + 2; 81 if (optlen > len) 82 goto bad; 83 break; 84 } 85 offset += optlen; 86 len -= optlen; 87 } 88 /* not_found */ 89 bad: 90 return -1; 91 } 92 EXPORT_SYMBOL_GPL(ipv6_find_tlv); 93 94 /* 95 * Parsing tlv encoded headers. 96 * 97 * Parsing function "func" returns 1, if parsing succeed 98 * and 0, if it failed. 99 * It MUST NOT touch skb->h. 100 */ 101 102 struct tlvtype_proc { 103 int type; 104 int (*func)(struct sk_buff *skb, int offset); 105 }; 106 107 /********************* 108 Generic functions 109 *********************/ 110 111 /* An unknown option is detected, decide what to do */ 112 113 static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) 114 { 115 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { 116 case 0: /* ignore */ 117 return 1; 118 119 case 1: /* drop packet */ 120 break; 121 122 case 3: /* Send ICMP if not a multicast address and drop packet */ 123 /* Actually, it is redundant check. icmp_send 124 will recheck in any case. 125 */ 126 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) 127 break; 128 case 2: /* send ICMP PARM PROB regardless and drop packet */ 129 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); 130 return 0; 131 } 132 133 kfree_skb(skb); 134 return 0; 135 } 136 137 /* Parse tlv encoded option header (hop-by-hop or destination) */ 138 139 static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) 140 { 141 struct tlvtype_proc *curr; 142 const unsigned char *nh = skb_network_header(skb); 143 int off = skb_network_header_len(skb); 144 int len = (skb_transport_header(skb)[1] + 1) << 3; 145 146 if (skb_transport_offset(skb) + len > skb_headlen(skb)) 147 goto bad; 148 149 off += 2; 150 len -= 2; 151 152 while (len > 0) { 153 int optlen = nh[off + 1] + 2; 154 155 switch (nh[off]) { 156 case IPV6_TLV_PAD0: 157 optlen = 1; 158 break; 159 160 case IPV6_TLV_PADN: 161 break; 162 163 default: /* Other TLV code so scan list */ 164 if (optlen > len) 165 goto bad; 166 for (curr=procs; curr->type >= 0; curr++) { 167 if (curr->type == nh[off]) { 168 /* type specific length/alignment 169 checks will be performed in the 170 func(). */ 171 if (curr->func(skb, off) == 0) 172 return 0; 173 break; 174 } 175 } 176 if (curr->type < 0) { 177 if (ip6_tlvopt_unknown(skb, off) == 0) 178 return 0; 179 } 180 break; 181 } 182 off += optlen; 183 len -= optlen; 184 } 185 if (len == 0) 186 return 1; 187 bad: 188 kfree_skb(skb); 189 return 0; 190 } 191 192 /***************************** 193 Destination options header. 194 *****************************/ 195 196 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 197 static int ipv6_dest_hao(struct sk_buff *skb, int optoff) 198 { 199 struct ipv6_destopt_hao *hao; 200 struct inet6_skb_parm *opt = IP6CB(skb); 201 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 202 struct in6_addr tmp_addr; 203 int ret; 204 205 if (opt->dsthao) { 206 LIMIT_NETDEBUG(KERN_DEBUG "hao duplicated\n"); 207 goto discard; 208 } 209 opt->dsthao = opt->dst1; 210 opt->dst1 = 0; 211 212 hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); 213 214 if (hao->length != 16) { 215 LIMIT_NETDEBUG( 216 KERN_DEBUG "hao invalid option length = %d\n", hao->length); 217 goto discard; 218 } 219 220 if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) { 221 LIMIT_NETDEBUG( 222 KERN_DEBUG "hao is not an unicast addr: " NIP6_FMT "\n", NIP6(hao->addr)); 223 goto discard; 224 } 225 226 ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr, 227 (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS); 228 if (unlikely(ret < 0)) 229 goto discard; 230 231 if (skb_cloned(skb)) { 232 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 233 goto discard; 234 235 /* update all variable using below by copied skbuff */ 236 hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + 237 optoff); 238 ipv6h = ipv6_hdr(skb); 239 } 240 241 if (skb->ip_summed == CHECKSUM_COMPLETE) 242 skb->ip_summed = CHECKSUM_NONE; 243 244 ipv6_addr_copy(&tmp_addr, &ipv6h->saddr); 245 ipv6_addr_copy(&ipv6h->saddr, &hao->addr); 246 ipv6_addr_copy(&hao->addr, &tmp_addr); 247 248 if (skb->tstamp.tv64 == 0) 249 __net_timestamp(skb); 250 251 return 1; 252 253 discard: 254 kfree_skb(skb); 255 return 0; 256 } 257 #endif 258 259 static struct tlvtype_proc tlvprocdestopt_lst[] = { 260 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 261 { 262 .type = IPV6_TLV_HAO, 263 .func = ipv6_dest_hao, 264 }, 265 #endif 266 {-1, NULL} 267 }; 268 269 static int ipv6_destopt_rcv(struct sk_buff *skb) 270 { 271 struct inet6_skb_parm *opt = IP6CB(skb); 272 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 273 __u16 dstbuf; 274 #endif 275 struct dst_entry *dst; 276 277 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || 278 !pskb_may_pull(skb, (skb_transport_offset(skb) + 279 ((skb_transport_header(skb)[1] + 1) << 3)))) { 280 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 281 IPSTATS_MIB_INHDRERRORS); 282 kfree_skb(skb); 283 return -1; 284 } 285 286 opt->lastopt = opt->dst1 = skb_network_header_len(skb); 287 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 288 dstbuf = opt->dst1; 289 #endif 290 291 dst = dst_clone(skb->dst); 292 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { 293 dst_release(dst); 294 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; 295 opt = IP6CB(skb); 296 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 297 opt->nhoff = dstbuf; 298 #else 299 opt->nhoff = opt->dst1; 300 #endif 301 return 1; 302 } 303 304 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); 305 dst_release(dst); 306 return -1; 307 } 308 309 /******************************** 310 Routing header. 311 ********************************/ 312 313 static int ipv6_rthdr_rcv(struct sk_buff *skb) 314 { 315 struct inet6_skb_parm *opt = IP6CB(skb); 316 struct in6_addr *addr = NULL; 317 struct in6_addr daddr; 318 struct inet6_dev *idev; 319 int n, i; 320 struct ipv6_rt_hdr *hdr; 321 struct rt0_hdr *rthdr; 322 int accept_source_route = dev_net(skb->dev)->ipv6.devconf_all->accept_source_route; 323 324 idev = in6_dev_get(skb->dev); 325 if (idev) { 326 if (accept_source_route > idev->cnf.accept_source_route) 327 accept_source_route = idev->cnf.accept_source_route; 328 in6_dev_put(idev); 329 } 330 331 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || 332 !pskb_may_pull(skb, (skb_transport_offset(skb) + 333 ((skb_transport_header(skb)[1] + 1) << 3)))) { 334 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 335 IPSTATS_MIB_INHDRERRORS); 336 kfree_skb(skb); 337 return -1; 338 } 339 340 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); 341 342 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || 343 skb->pkt_type != PACKET_HOST) { 344 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 345 IPSTATS_MIB_INADDRERRORS); 346 kfree_skb(skb); 347 return -1; 348 } 349 350 looped_back: 351 if (hdr->segments_left == 0) { 352 switch (hdr->type) { 353 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 354 case IPV6_SRCRT_TYPE_2: 355 /* Silently discard type 2 header unless it was 356 * processed by own 357 */ 358 if (!addr) { 359 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 360 IPSTATS_MIB_INADDRERRORS); 361 kfree_skb(skb); 362 return -1; 363 } 364 break; 365 #endif 366 default: 367 break; 368 } 369 370 opt->lastopt = opt->srcrt = skb_network_header_len(skb); 371 skb->transport_header += (hdr->hdrlen + 1) << 3; 372 opt->dst0 = opt->dst1; 373 opt->dst1 = 0; 374 opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); 375 return 1; 376 } 377 378 switch (hdr->type) { 379 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 380 case IPV6_SRCRT_TYPE_2: 381 if (accept_source_route < 0) 382 goto unknown_rh; 383 /* Silently discard invalid RTH type 2 */ 384 if (hdr->hdrlen != 2 || hdr->segments_left != 1) { 385 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 386 IPSTATS_MIB_INHDRERRORS); 387 kfree_skb(skb); 388 return -1; 389 } 390 break; 391 #endif 392 default: 393 goto unknown_rh; 394 } 395 396 /* 397 * This is the routing header forwarding algorithm from 398 * RFC 2460, page 16. 399 */ 400 401 n = hdr->hdrlen >> 1; 402 403 if (hdr->segments_left > n) { 404 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 405 IPSTATS_MIB_INHDRERRORS); 406 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 407 ((&hdr->segments_left) - 408 skb_network_header(skb))); 409 return -1; 410 } 411 412 /* We are about to mangle packet header. Be careful! 413 Do not damage packets queued somewhere. 414 */ 415 if (skb_cloned(skb)) { 416 /* the copy is a forwarded packet */ 417 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 418 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 419 IPSTATS_MIB_OUTDISCARDS); 420 kfree_skb(skb); 421 return -1; 422 } 423 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); 424 } 425 426 if (skb->ip_summed == CHECKSUM_COMPLETE) 427 skb->ip_summed = CHECKSUM_NONE; 428 429 i = n - --hdr->segments_left; 430 431 rthdr = (struct rt0_hdr *) hdr; 432 addr = rthdr->addr; 433 addr += i - 1; 434 435 switch (hdr->type) { 436 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 437 case IPV6_SRCRT_TYPE_2: 438 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, 439 (xfrm_address_t *)&ipv6_hdr(skb)->saddr, 440 IPPROTO_ROUTING) < 0) { 441 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 442 IPSTATS_MIB_INADDRERRORS); 443 kfree_skb(skb); 444 return -1; 445 } 446 if (!ipv6_chk_home_addr(dev_net(skb->dst->dev), addr)) { 447 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 448 IPSTATS_MIB_INADDRERRORS); 449 kfree_skb(skb); 450 return -1; 451 } 452 break; 453 #endif 454 default: 455 break; 456 } 457 458 if (ipv6_addr_is_multicast(addr)) { 459 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 460 IPSTATS_MIB_INADDRERRORS); 461 kfree_skb(skb); 462 return -1; 463 } 464 465 ipv6_addr_copy(&daddr, addr); 466 ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr); 467 ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr); 468 469 dst_release(xchg(&skb->dst, NULL)); 470 ip6_route_input(skb); 471 if (skb->dst->error) { 472 skb_push(skb, skb->data - skb_network_header(skb)); 473 dst_input(skb); 474 return -1; 475 } 476 477 if (skb->dst->dev->flags&IFF_LOOPBACK) { 478 if (ipv6_hdr(skb)->hop_limit <= 1) { 479 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 480 IPSTATS_MIB_INHDRERRORS); 481 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 482 0, skb->dev); 483 kfree_skb(skb); 484 return -1; 485 } 486 ipv6_hdr(skb)->hop_limit--; 487 goto looped_back; 488 } 489 490 skb_push(skb, skb->data - skb_network_header(skb)); 491 dst_input(skb); 492 return -1; 493 494 unknown_rh: 495 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); 496 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 497 (&hdr->type) - skb_network_header(skb)); 498 return -1; 499 } 500 501 static struct inet6_protocol rthdr_protocol = { 502 .handler = ipv6_rthdr_rcv, 503 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR, 504 }; 505 506 static struct inet6_protocol destopt_protocol = { 507 .handler = ipv6_destopt_rcv, 508 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR, 509 }; 510 511 static struct inet6_protocol nodata_protocol = { 512 .handler = dst_discard, 513 .flags = INET6_PROTO_NOPOLICY, 514 }; 515 516 int __init ipv6_exthdrs_init(void) 517 { 518 int ret; 519 520 ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING); 521 if (ret) 522 goto out; 523 524 ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS); 525 if (ret) 526 goto out_rthdr; 527 528 ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE); 529 if (ret) 530 goto out_destopt; 531 532 out: 533 return ret; 534 out_rthdr: 535 inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); 536 out_destopt: 537 inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); 538 goto out; 539 }; 540 541 void ipv6_exthdrs_exit(void) 542 { 543 inet6_del_protocol(&nodata_protocol, IPPROTO_NONE); 544 inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); 545 inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); 546 } 547 548 /********************************** 549 Hop-by-hop options. 550 **********************************/ 551 552 /* 553 * Note: we cannot rely on skb->dst before we assign it in ip6_route_input(). 554 */ 555 static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) 556 { 557 return skb->dst ? ip6_dst_idev(skb->dst) : __in6_dev_get(skb->dev); 558 } 559 560 /* Router Alert as of RFC 2711 */ 561 562 static int ipv6_hop_ra(struct sk_buff *skb, int optoff) 563 { 564 const unsigned char *nh = skb_network_header(skb); 565 566 if (nh[optoff + 1] == 2) { 567 IP6CB(skb)->ra = optoff; 568 return 1; 569 } 570 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", 571 nh[optoff + 1]); 572 kfree_skb(skb); 573 return 0; 574 } 575 576 /* Jumbo payload */ 577 578 static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) 579 { 580 const unsigned char *nh = skb_network_header(skb); 581 u32 pkt_len; 582 583 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { 584 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", 585 nh[optoff+1]); 586 IP6_INC_STATS_BH(ipv6_skb_idev(skb), 587 IPSTATS_MIB_INHDRERRORS); 588 goto drop; 589 } 590 591 pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); 592 if (pkt_len <= IPV6_MAXPLEN) { 593 IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); 594 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); 595 return 0; 596 } 597 if (ipv6_hdr(skb)->payload_len) { 598 IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); 599 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); 600 return 0; 601 } 602 603 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { 604 IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INTRUNCATEDPKTS); 605 goto drop; 606 } 607 608 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) 609 goto drop; 610 611 return 1; 612 613 drop: 614 kfree_skb(skb); 615 return 0; 616 } 617 618 static struct tlvtype_proc tlvprochopopt_lst[] = { 619 { 620 .type = IPV6_TLV_ROUTERALERT, 621 .func = ipv6_hop_ra, 622 }, 623 { 624 .type = IPV6_TLV_JUMBO, 625 .func = ipv6_hop_jumbo, 626 }, 627 { -1, } 628 }; 629 630 int ipv6_parse_hopopts(struct sk_buff *skb) 631 { 632 struct inet6_skb_parm *opt = IP6CB(skb); 633 634 /* 635 * skb_network_header(skb) is equal to skb->data, and 636 * skb_network_header_len(skb) is always equal to 637 * sizeof(struct ipv6hdr) by definition of 638 * hop-by-hop options. 639 */ 640 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) || 641 !pskb_may_pull(skb, (sizeof(struct ipv6hdr) + 642 ((skb_transport_header(skb)[1] + 1) << 3)))) { 643 kfree_skb(skb); 644 return -1; 645 } 646 647 opt->hop = sizeof(struct ipv6hdr); 648 if (ip6_parse_tlv(tlvprochopopt_lst, skb)) { 649 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; 650 opt = IP6CB(skb); 651 opt->nhoff = sizeof(struct ipv6hdr); 652 return 1; 653 } 654 return -1; 655 } 656 657 /* 658 * Creating outbound headers. 659 * 660 * "build" functions work when skb is filled from head to tail (datagram) 661 * "push" functions work when headers are added from tail to head (tcp) 662 * 663 * In both cases we assume, that caller reserved enough room 664 * for headers. 665 */ 666 667 static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto, 668 struct ipv6_rt_hdr *opt, 669 struct in6_addr **addr_p) 670 { 671 struct rt0_hdr *phdr, *ihdr; 672 int hops; 673 674 ihdr = (struct rt0_hdr *) opt; 675 676 phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3); 677 memcpy(phdr, ihdr, sizeof(struct rt0_hdr)); 678 679 hops = ihdr->rt_hdr.hdrlen >> 1; 680 681 if (hops > 1) 682 memcpy(phdr->addr, ihdr->addr + 1, 683 (hops - 1) * sizeof(struct in6_addr)); 684 685 ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p); 686 *addr_p = ihdr->addr; 687 688 phdr->rt_hdr.nexthdr = *proto; 689 *proto = NEXTHDR_ROUTING; 690 } 691 692 static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt) 693 { 694 struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt)); 695 696 memcpy(h, opt, ipv6_optlen(opt)); 697 h->nexthdr = *proto; 698 *proto = type; 699 } 700 701 void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, 702 u8 *proto, 703 struct in6_addr **daddr) 704 { 705 if (opt->srcrt) { 706 ipv6_push_rthdr(skb, proto, opt->srcrt, daddr); 707 /* 708 * IPV6_RTHDRDSTOPTS is ignored 709 * unless IPV6_RTHDR is set (RFC3542). 710 */ 711 if (opt->dst0opt) 712 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt); 713 } 714 if (opt->hopopt) 715 ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); 716 } 717 718 EXPORT_SYMBOL(ipv6_push_nfrag_opts); 719 720 void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) 721 { 722 if (opt->dst1opt) 723 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt); 724 } 725 726 struct ipv6_txoptions * 727 ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) 728 { 729 struct ipv6_txoptions *opt2; 730 731 opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC); 732 if (opt2) { 733 long dif = (char*)opt2 - (char*)opt; 734 memcpy(opt2, opt, opt->tot_len); 735 if (opt2->hopopt) 736 *((char**)&opt2->hopopt) += dif; 737 if (opt2->dst0opt) 738 *((char**)&opt2->dst0opt) += dif; 739 if (opt2->dst1opt) 740 *((char**)&opt2->dst1opt) += dif; 741 if (opt2->srcrt) 742 *((char**)&opt2->srcrt) += dif; 743 } 744 return opt2; 745 } 746 747 EXPORT_SYMBOL_GPL(ipv6_dup_options); 748 749 static int ipv6_renew_option(void *ohdr, 750 struct ipv6_opt_hdr __user *newopt, int newoptlen, 751 int inherit, 752 struct ipv6_opt_hdr **hdr, 753 char **p) 754 { 755 if (inherit) { 756 if (ohdr) { 757 memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); 758 *hdr = (struct ipv6_opt_hdr *)*p; 759 *p += CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr **)hdr)); 760 } 761 } else { 762 if (newopt) { 763 if (copy_from_user(*p, newopt, newoptlen)) 764 return -EFAULT; 765 *hdr = (struct ipv6_opt_hdr *)*p; 766 if (ipv6_optlen(*(struct ipv6_opt_hdr **)hdr) > newoptlen) 767 return -EINVAL; 768 *p += CMSG_ALIGN(newoptlen); 769 } 770 } 771 return 0; 772 } 773 774 struct ipv6_txoptions * 775 ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, 776 int newtype, 777 struct ipv6_opt_hdr __user *newopt, int newoptlen) 778 { 779 int tot_len = 0; 780 char *p; 781 struct ipv6_txoptions *opt2; 782 int err; 783 784 if (opt) { 785 if (newtype != IPV6_HOPOPTS && opt->hopopt) 786 tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt)); 787 if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt) 788 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt)); 789 if (newtype != IPV6_RTHDR && opt->srcrt) 790 tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt)); 791 if (newtype != IPV6_DSTOPTS && opt->dst1opt) 792 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); 793 } 794 795 if (newopt && newoptlen) 796 tot_len += CMSG_ALIGN(newoptlen); 797 798 if (!tot_len) 799 return NULL; 800 801 tot_len += sizeof(*opt2); 802 opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC); 803 if (!opt2) 804 return ERR_PTR(-ENOBUFS); 805 806 memset(opt2, 0, tot_len); 807 808 opt2->tot_len = tot_len; 809 p = (char *)(opt2 + 1); 810 811 err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen, 812 newtype != IPV6_HOPOPTS, 813 &opt2->hopopt, &p); 814 if (err) 815 goto out; 816 817 err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen, 818 newtype != IPV6_RTHDRDSTOPTS, 819 &opt2->dst0opt, &p); 820 if (err) 821 goto out; 822 823 err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen, 824 newtype != IPV6_RTHDR, 825 (struct ipv6_opt_hdr **)&opt2->srcrt, &p); 826 if (err) 827 goto out; 828 829 err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen, 830 newtype != IPV6_DSTOPTS, 831 &opt2->dst1opt, &p); 832 if (err) 833 goto out; 834 835 opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + 836 (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + 837 (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0); 838 opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); 839 840 return opt2; 841 out: 842 sock_kfree_s(sk, opt2, opt2->tot_len); 843 return ERR_PTR(err); 844 } 845 846 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, 847 struct ipv6_txoptions *opt) 848 { 849 /* 850 * ignore the dest before srcrt unless srcrt is being included. 851 * --yoshfuji 852 */ 853 if (opt && opt->dst0opt && !opt->srcrt) { 854 if (opt_space != opt) { 855 memcpy(opt_space, opt, sizeof(*opt_space)); 856 opt = opt_space; 857 } 858 opt->opt_nflen -= ipv6_optlen(opt->dst0opt); 859 opt->dst0opt = NULL; 860 } 861 862 return opt; 863 } 864 865