1 /* 2 * IPv6 over IPv4 tunnel device - Simple Internet Transition (SIT) 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 * 14 * Changes: 15 * Roger Venning <r.venning@telstra.com>: 6to4 support 16 * Nate Thompson <nate@thebog.net>: 6to4 support 17 * Fred Templin <fred.l.templin@boeing.com>: isatap support 18 * Sascha Hlusiak <mail@saschahlusiak.de>: stateless autoconf for isatap 19 */ 20 21 #include <linux/module.h> 22 #include <linux/capability.h> 23 #include <linux/errno.h> 24 #include <linux/types.h> 25 #include <linux/socket.h> 26 #include <linux/sockios.h> 27 #include <linux/net.h> 28 #include <linux/in6.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_arp.h> 31 #include <linux/icmp.h> 32 #include <asm/uaccess.h> 33 #include <linux/init.h> 34 #include <linux/netfilter_ipv4.h> 35 #include <linux/if_ether.h> 36 37 #include <net/sock.h> 38 #include <net/snmp.h> 39 40 #include <net/ipv6.h> 41 #include <net/protocol.h> 42 #include <net/transp_v6.h> 43 #include <net/ip6_fib.h> 44 #include <net/ip6_route.h> 45 #include <net/ndisc.h> 46 #include <net/addrconf.h> 47 #include <net/ip.h> 48 #include <net/udp.h> 49 #include <net/icmp.h> 50 #include <net/ipip.h> 51 #include <net/inet_ecn.h> 52 #include <net/xfrm.h> 53 #include <net/dsfield.h> 54 #include <net/net_namespace.h> 55 #include <net/netns/generic.h> 56 57 /* 58 This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c 59 60 For comments look at net/ipv4/ip_gre.c --ANK 61 */ 62 63 #define HASH_SIZE 16 64 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 65 66 static void ipip6_fb_tunnel_init(struct net_device *dev); 67 static void ipip6_tunnel_init(struct net_device *dev); 68 static void ipip6_tunnel_setup(struct net_device *dev); 69 70 static int sit_net_id; 71 struct sit_net { 72 struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 73 struct ip_tunnel *tunnels_r[HASH_SIZE]; 74 struct ip_tunnel *tunnels_l[HASH_SIZE]; 75 struct ip_tunnel *tunnels_wc[1]; 76 struct ip_tunnel **tunnels[4]; 77 78 struct net_device *fb_tunnel_dev; 79 }; 80 81 static DEFINE_RWLOCK(ipip6_lock); 82 83 static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, 84 struct net_device *dev, __be32 remote, __be32 local) 85 { 86 unsigned h0 = HASH(remote); 87 unsigned h1 = HASH(local); 88 struct ip_tunnel *t; 89 struct sit_net *sitn = net_generic(net, sit_net_id); 90 91 for (t = sitn->tunnels_r_l[h0^h1]; t; t = t->next) { 92 if (local == t->parms.iph.saddr && 93 remote == t->parms.iph.daddr && 94 (!dev || !t->parms.link || dev->iflink == t->parms.link) && 95 (t->dev->flags & IFF_UP)) 96 return t; 97 } 98 for (t = sitn->tunnels_r[h0]; t; t = t->next) { 99 if (remote == t->parms.iph.daddr && 100 (!dev || !t->parms.link || dev->iflink == t->parms.link) && 101 (t->dev->flags & IFF_UP)) 102 return t; 103 } 104 for (t = sitn->tunnels_l[h1]; t; t = t->next) { 105 if (local == t->parms.iph.saddr && 106 (!dev || !t->parms.link || dev->iflink == t->parms.link) && 107 (t->dev->flags & IFF_UP)) 108 return t; 109 } 110 t = sitn->tunnels_wc[0]; 111 if ((t != NULL) && (t->dev->flags & IFF_UP)) 112 return t; 113 return NULL; 114 } 115 116 static struct ip_tunnel **__ipip6_bucket(struct sit_net *sitn, 117 struct ip_tunnel_parm *parms) 118 { 119 __be32 remote = parms->iph.daddr; 120 __be32 local = parms->iph.saddr; 121 unsigned h = 0; 122 int prio = 0; 123 124 if (remote) { 125 prio |= 2; 126 h ^= HASH(remote); 127 } 128 if (local) { 129 prio |= 1; 130 h ^= HASH(local); 131 } 132 return &sitn->tunnels[prio][h]; 133 } 134 135 static inline struct ip_tunnel **ipip6_bucket(struct sit_net *sitn, 136 struct ip_tunnel *t) 137 { 138 return __ipip6_bucket(sitn, &t->parms); 139 } 140 141 static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t) 142 { 143 struct ip_tunnel **tp; 144 145 for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) { 146 if (t == *tp) { 147 write_lock_bh(&ipip6_lock); 148 *tp = t->next; 149 write_unlock_bh(&ipip6_lock); 150 break; 151 } 152 } 153 } 154 155 static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t) 156 { 157 struct ip_tunnel **tp = ipip6_bucket(sitn, t); 158 159 t->next = *tp; 160 write_lock_bh(&ipip6_lock); 161 *tp = t; 162 write_unlock_bh(&ipip6_lock); 163 } 164 165 static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, 166 struct ip_tunnel_parm *parms, int create) 167 { 168 __be32 remote = parms->iph.daddr; 169 __be32 local = parms->iph.saddr; 170 struct ip_tunnel *t, **tp, *nt; 171 struct net_device *dev; 172 char name[IFNAMSIZ]; 173 struct sit_net *sitn = net_generic(net, sit_net_id); 174 175 for (tp = __ipip6_bucket(sitn, parms); (t = *tp) != NULL; tp = &t->next) { 176 if (local == t->parms.iph.saddr && 177 remote == t->parms.iph.daddr && 178 parms->link == t->parms.link) { 179 if (create) 180 return NULL; 181 else 182 return t; 183 } 184 } 185 if (!create) 186 goto failed; 187 188 if (parms->name[0]) 189 strlcpy(name, parms->name, IFNAMSIZ); 190 else 191 sprintf(name, "sit%%d"); 192 193 dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup); 194 if (dev == NULL) 195 return NULL; 196 197 dev_net_set(dev, net); 198 199 if (strchr(name, '%')) { 200 if (dev_alloc_name(dev, name) < 0) 201 goto failed_free; 202 } 203 204 nt = netdev_priv(dev); 205 206 nt->parms = *parms; 207 ipip6_tunnel_init(dev); 208 209 if (parms->i_flags & SIT_ISATAP) 210 dev->priv_flags |= IFF_ISATAP; 211 212 if (register_netdevice(dev) < 0) 213 goto failed_free; 214 215 dev_hold(dev); 216 217 ipip6_tunnel_link(sitn, nt); 218 return nt; 219 220 failed_free: 221 free_netdev(dev); 222 failed: 223 return NULL; 224 } 225 226 static void ipip6_tunnel_rs_timer(unsigned long data) 227 { 228 struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *) data; 229 struct inet6_dev *ifp; 230 struct inet6_ifaddr *addr; 231 232 spin_lock(&p->lock); 233 ifp = __in6_dev_get(p->tunnel->dev); 234 235 read_lock_bh(&ifp->lock); 236 for (addr = ifp->addr_list; addr; addr = addr->if_next) { 237 struct in6_addr rtr; 238 239 if (!(ipv6_addr_type(&addr->addr) & IPV6_ADDR_LINKLOCAL)) 240 continue; 241 242 /* Send RS to guessed linklocal address of router 243 * 244 * Better: send to ff02::2 encapsuled in unicast directly 245 * to router-v4 instead of guessing the v6 address. 246 * 247 * Cisco/Windows seem to not set the u/l bit correctly, 248 * so we won't guess right. 249 */ 250 ipv6_addr_set(&rtr, htonl(0xFE800000), 0, 0, 0); 251 if (!__ipv6_isatap_ifid(rtr.s6_addr + 8, 252 p->addr)) { 253 ndisc_send_rs(p->tunnel->dev, &addr->addr, &rtr); 254 } 255 } 256 read_unlock_bh(&ifp->lock); 257 258 mod_timer(&p->rs_timer, jiffies + HZ * p->rs_delay); 259 spin_unlock(&p->lock); 260 261 return; 262 } 263 264 static struct ip_tunnel_prl_entry * 265 __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) 266 { 267 struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *)NULL; 268 269 for (p = t->prl; p; p = p->next) 270 if (p->addr == addr) 271 break; 272 return p; 273 274 } 275 276 static int ipip6_tunnel_get_prl(struct ip_tunnel *t, 277 struct ip_tunnel_prl __user *a) 278 { 279 struct ip_tunnel_prl kprl, *kp; 280 struct ip_tunnel_prl_entry *prl; 281 unsigned int cmax, c = 0, ca, len; 282 int ret = 0; 283 284 if (copy_from_user(&kprl, a, sizeof(kprl))) 285 return -EFAULT; 286 cmax = kprl.datalen / sizeof(kprl); 287 if (cmax > 1 && kprl.addr != htonl(INADDR_ANY)) 288 cmax = 1; 289 290 /* For simple GET or for root users, 291 * we try harder to allocate. 292 */ 293 kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ? 294 kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : 295 NULL; 296 297 read_lock(&ipip6_lock); 298 299 ca = t->prl_count < cmax ? t->prl_count : cmax; 300 301 if (!kp) { 302 /* We don't try hard to allocate much memory for 303 * non-root users. 304 * For root users, retry allocating enough memory for 305 * the answer. 306 */ 307 kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC); 308 if (!kp) { 309 ret = -ENOMEM; 310 goto out; 311 } 312 } 313 314 c = 0; 315 for (prl = t->prl; prl; prl = prl->next) { 316 if (c > cmax) 317 break; 318 if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) 319 continue; 320 kp[c].addr = prl->addr; 321 kp[c].flags = prl->flags; 322 kp[c].rs_delay = prl->rs_delay; 323 c++; 324 if (kprl.addr != htonl(INADDR_ANY)) 325 break; 326 } 327 out: 328 read_unlock(&ipip6_lock); 329 330 len = sizeof(*kp) * c; 331 ret = 0; 332 if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen)) 333 ret = -EFAULT; 334 335 kfree(kp); 336 337 return ret; 338 } 339 340 static int 341 ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) 342 { 343 struct ip_tunnel_prl_entry *p; 344 int err = 0; 345 346 if (a->addr == htonl(INADDR_ANY)) 347 return -EINVAL; 348 349 write_lock(&ipip6_lock); 350 351 for (p = t->prl; p; p = p->next) { 352 if (p->addr == a->addr) { 353 if (chg) 354 goto update; 355 err = -EEXIST; 356 goto out; 357 } 358 } 359 360 if (chg) { 361 err = -ENXIO; 362 goto out; 363 } 364 365 p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL); 366 if (!p) { 367 err = -ENOBUFS; 368 goto out; 369 } 370 371 p->next = t->prl; 372 p->tunnel = t; 373 t->prl = p; 374 t->prl_count++; 375 376 spin_lock_init(&p->lock); 377 setup_timer(&p->rs_timer, ipip6_tunnel_rs_timer, (unsigned long) p); 378 update: 379 p->addr = a->addr; 380 p->flags = a->flags; 381 p->rs_delay = a->rs_delay; 382 if (p->rs_delay == 0) 383 p->rs_delay = IPTUNNEL_RS_DEFAULT_DELAY; 384 spin_lock(&p->lock); 385 del_timer(&p->rs_timer); 386 if (p->flags & PRL_DEFAULT) 387 mod_timer(&p->rs_timer, jiffies + 1); 388 spin_unlock(&p->lock); 389 out: 390 write_unlock(&ipip6_lock); 391 return err; 392 } 393 394 static int 395 ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) 396 { 397 struct ip_tunnel_prl_entry *x, **p; 398 int err = 0; 399 400 write_lock(&ipip6_lock); 401 402 if (a && a->addr != htonl(INADDR_ANY)) { 403 for (p = &t->prl; *p; p = &(*p)->next) { 404 if ((*p)->addr == a->addr) { 405 x = *p; 406 *p = x->next; 407 spin_lock(&x->lock); 408 del_timer(&x->rs_timer); 409 spin_unlock(&x->lock); 410 kfree(x); 411 t->prl_count--; 412 goto out; 413 } 414 } 415 err = -ENXIO; 416 } else { 417 while (t->prl) { 418 x = t->prl; 419 t->prl = t->prl->next; 420 spin_lock(&x->lock); 421 del_timer(&x->rs_timer); 422 spin_unlock(&x->lock); 423 kfree(x); 424 t->prl_count--; 425 } 426 } 427 out: 428 write_unlock(&ipip6_lock); 429 return err; 430 } 431 432 static int 433 isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t) 434 { 435 struct ip_tunnel_prl_entry *p; 436 int ok = 1; 437 438 read_lock(&ipip6_lock); 439 p = __ipip6_tunnel_locate_prl(t, iph->saddr); 440 if (p) { 441 if (p->flags & PRL_DEFAULT) 442 skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT; 443 else 444 skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT; 445 } else { 446 struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr; 447 if (ipv6_addr_is_isatap(addr6) && 448 (addr6->s6_addr32[3] == iph->saddr) && 449 ipv6_chk_prefix(addr6, t->dev)) 450 skb->ndisc_nodetype = NDISC_NODETYPE_HOST; 451 else 452 ok = 0; 453 } 454 read_unlock(&ipip6_lock); 455 return ok; 456 } 457 458 static void ipip6_tunnel_uninit(struct net_device *dev) 459 { 460 struct net *net = dev_net(dev); 461 struct sit_net *sitn = net_generic(net, sit_net_id); 462 463 if (dev == sitn->fb_tunnel_dev) { 464 write_lock_bh(&ipip6_lock); 465 sitn->tunnels_wc[0] = NULL; 466 write_unlock_bh(&ipip6_lock); 467 dev_put(dev); 468 } else { 469 ipip6_tunnel_unlink(sitn, netdev_priv(dev)); 470 ipip6_tunnel_del_prl(netdev_priv(dev), NULL); 471 dev_put(dev); 472 } 473 } 474 475 476 static int ipip6_err(struct sk_buff *skb, u32 info) 477 { 478 479 /* All the routers (except for Linux) return only 480 8 bytes of packet payload. It means, that precise relaying of 481 ICMP in the real Internet is absolutely infeasible. 482 */ 483 struct iphdr *iph = (struct iphdr*)skb->data; 484 const int type = icmp_hdr(skb)->type; 485 const int code = icmp_hdr(skb)->code; 486 struct ip_tunnel *t; 487 int err; 488 489 switch (type) { 490 default: 491 case ICMP_PARAMETERPROB: 492 return 0; 493 494 case ICMP_DEST_UNREACH: 495 switch (code) { 496 case ICMP_SR_FAILED: 497 case ICMP_PORT_UNREACH: 498 /* Impossible event. */ 499 return 0; 500 case ICMP_FRAG_NEEDED: 501 /* Soft state for pmtu is maintained by IP core. */ 502 return 0; 503 default: 504 /* All others are translated to HOST_UNREACH. 505 rfc2003 contains "deep thoughts" about NET_UNREACH, 506 I believe they are just ether pollution. --ANK 507 */ 508 break; 509 } 510 break; 511 case ICMP_TIME_EXCEEDED: 512 if (code != ICMP_EXC_TTL) 513 return 0; 514 break; 515 } 516 517 err = -ENOENT; 518 519 read_lock(&ipip6_lock); 520 t = ipip6_tunnel_lookup(dev_net(skb->dev), 521 skb->dev, 522 iph->daddr, 523 iph->saddr); 524 if (t == NULL || t->parms.iph.daddr == 0) 525 goto out; 526 527 err = 0; 528 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 529 goto out; 530 531 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) 532 t->err_count++; 533 else 534 t->err_count = 1; 535 t->err_time = jiffies; 536 out: 537 read_unlock(&ipip6_lock); 538 return err; 539 } 540 541 static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 542 { 543 if (INET_ECN_is_ce(iph->tos)) 544 IP6_ECN_set_ce(ipv6_hdr(skb)); 545 } 546 547 static int ipip6_rcv(struct sk_buff *skb) 548 { 549 struct iphdr *iph; 550 struct ip_tunnel *tunnel; 551 552 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 553 goto out; 554 555 iph = ip_hdr(skb); 556 557 read_lock(&ipip6_lock); 558 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, 559 iph->saddr, iph->daddr); 560 if (tunnel != NULL) { 561 secpath_reset(skb); 562 skb->mac_header = skb->network_header; 563 skb_reset_network_header(skb); 564 IPCB(skb)->flags = 0; 565 skb->protocol = htons(ETH_P_IPV6); 566 skb->pkt_type = PACKET_HOST; 567 568 if ((tunnel->dev->priv_flags & IFF_ISATAP) && 569 !isatap_chksrc(skb, iph, tunnel)) { 570 tunnel->dev->stats.rx_errors++; 571 read_unlock(&ipip6_lock); 572 kfree_skb(skb); 573 return 0; 574 } 575 tunnel->dev->stats.rx_packets++; 576 tunnel->dev->stats.rx_bytes += skb->len; 577 skb->dev = tunnel->dev; 578 skb_dst_drop(skb); 579 nf_reset(skb); 580 ipip6_ecn_decapsulate(iph, skb); 581 netif_rx(skb); 582 read_unlock(&ipip6_lock); 583 return 0; 584 } 585 586 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 587 read_unlock(&ipip6_lock); 588 out: 589 kfree_skb(skb); 590 return 0; 591 } 592 593 /* Returns the embedded IPv4 address if the IPv6 address 594 comes from 6to4 (RFC 3056) addr space */ 595 596 static inline __be32 try_6to4(struct in6_addr *v6dst) 597 { 598 __be32 dst = 0; 599 600 if (v6dst->s6_addr16[0] == htons(0x2002)) { 601 /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ 602 memcpy(&dst, &v6dst->s6_addr16[1], 4); 603 } 604 return dst; 605 } 606 607 /* 608 * This function assumes it is being called from dev_queue_xmit() 609 * and that skb is filled properly by that function. 610 */ 611 612 static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, 613 struct net_device *dev) 614 { 615 struct ip_tunnel *tunnel = netdev_priv(dev); 616 struct net_device_stats *stats = &tunnel->dev->stats; 617 struct iphdr *tiph = &tunnel->parms.iph; 618 struct ipv6hdr *iph6 = ipv6_hdr(skb); 619 u8 tos = tunnel->parms.iph.tos; 620 struct rtable *rt; /* Route to the other host */ 621 struct net_device *tdev; /* Device to other host */ 622 struct iphdr *iph; /* Our new IP header */ 623 unsigned int max_headroom; /* The extra header space needed */ 624 __be32 dst = tiph->daddr; 625 int mtu; 626 struct in6_addr *addr6; 627 int addr_type; 628 629 if (tunnel->recursion++) { 630 stats->collisions++; 631 goto tx_error; 632 } 633 634 if (skb->protocol != htons(ETH_P_IPV6)) 635 goto tx_error; 636 637 /* ISATAP (RFC4214) - must come before 6to4 */ 638 if (dev->priv_flags & IFF_ISATAP) { 639 struct neighbour *neigh = NULL; 640 641 if (skb_dst(skb)) 642 neigh = skb_dst(skb)->neighbour; 643 644 if (neigh == NULL) { 645 if (net_ratelimit()) 646 printk(KERN_DEBUG "sit: nexthop == NULL\n"); 647 goto tx_error; 648 } 649 650 addr6 = (struct in6_addr*)&neigh->primary_key; 651 addr_type = ipv6_addr_type(addr6); 652 653 if ((addr_type & IPV6_ADDR_UNICAST) && 654 ipv6_addr_is_isatap(addr6)) 655 dst = addr6->s6_addr32[3]; 656 else 657 goto tx_error; 658 } 659 660 if (!dst) 661 dst = try_6to4(&iph6->daddr); 662 663 if (!dst) { 664 struct neighbour *neigh = NULL; 665 666 if (skb_dst(skb)) 667 neigh = skb_dst(skb)->neighbour; 668 669 if (neigh == NULL) { 670 if (net_ratelimit()) 671 printk(KERN_DEBUG "sit: nexthop == NULL\n"); 672 goto tx_error; 673 } 674 675 addr6 = (struct in6_addr*)&neigh->primary_key; 676 addr_type = ipv6_addr_type(addr6); 677 678 if (addr_type == IPV6_ADDR_ANY) { 679 addr6 = &ipv6_hdr(skb)->daddr; 680 addr_type = ipv6_addr_type(addr6); 681 } 682 683 if ((addr_type & IPV6_ADDR_COMPATv4) == 0) 684 goto tx_error_icmp; 685 686 dst = addr6->s6_addr32[3]; 687 } 688 689 { 690 struct flowi fl = { .nl_u = { .ip4_u = 691 { .daddr = dst, 692 .saddr = tiph->saddr, 693 .tos = RT_TOS(tos) } }, 694 .oif = tunnel->parms.link, 695 .proto = IPPROTO_IPV6 }; 696 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 697 stats->tx_carrier_errors++; 698 goto tx_error_icmp; 699 } 700 } 701 if (rt->rt_type != RTN_UNICAST) { 702 ip_rt_put(rt); 703 stats->tx_carrier_errors++; 704 goto tx_error_icmp; 705 } 706 tdev = rt->u.dst.dev; 707 708 if (tdev == dev) { 709 ip_rt_put(rt); 710 stats->collisions++; 711 goto tx_error; 712 } 713 714 if (tiph->frag_off) 715 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 716 else 717 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 718 719 if (mtu < 68) { 720 stats->collisions++; 721 ip_rt_put(rt); 722 goto tx_error; 723 } 724 if (mtu < IPV6_MIN_MTU) 725 mtu = IPV6_MIN_MTU; 726 if (tunnel->parms.iph.daddr && skb_dst(skb)) 727 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 728 729 if (skb->len > mtu) { 730 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 731 ip_rt_put(rt); 732 goto tx_error; 733 } 734 735 if (tunnel->err_count > 0) { 736 if (time_before(jiffies, 737 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { 738 tunnel->err_count--; 739 dst_link_failure(skb); 740 } else 741 tunnel->err_count = 0; 742 } 743 744 /* 745 * Okay, now see if we can stuff it in the buffer as-is. 746 */ 747 max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr); 748 749 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 750 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 751 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 752 if (!new_skb) { 753 ip_rt_put(rt); 754 stats->tx_dropped++; 755 dev_kfree_skb(skb); 756 tunnel->recursion--; 757 return NETDEV_TX_OK; 758 } 759 if (skb->sk) 760 skb_set_owner_w(new_skb, skb->sk); 761 dev_kfree_skb(skb); 762 skb = new_skb; 763 iph6 = ipv6_hdr(skb); 764 } 765 766 skb->transport_header = skb->network_header; 767 skb_push(skb, sizeof(struct iphdr)); 768 skb_reset_network_header(skb); 769 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 770 IPCB(skb)->flags = 0; 771 skb_dst_drop(skb); 772 skb_dst_set(skb, &rt->u.dst); 773 774 /* 775 * Push down and install the IPIP header. 776 */ 777 778 iph = ip_hdr(skb); 779 iph->version = 4; 780 iph->ihl = sizeof(struct iphdr)>>2; 781 if (mtu > IPV6_MIN_MTU) 782 iph->frag_off = tiph->frag_off; 783 else 784 iph->frag_off = 0; 785 786 iph->protocol = IPPROTO_IPV6; 787 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 788 iph->daddr = rt->rt_dst; 789 iph->saddr = rt->rt_src; 790 791 if ((iph->ttl = tiph->ttl) == 0) 792 iph->ttl = iph6->hop_limit; 793 794 nf_reset(skb); 795 796 IPTUNNEL_XMIT(); 797 tunnel->recursion--; 798 return NETDEV_TX_OK; 799 800 tx_error_icmp: 801 dst_link_failure(skb); 802 tx_error: 803 stats->tx_errors++; 804 dev_kfree_skb(skb); 805 tunnel->recursion--; 806 return NETDEV_TX_OK; 807 } 808 809 static void ipip6_tunnel_bind_dev(struct net_device *dev) 810 { 811 struct net_device *tdev = NULL; 812 struct ip_tunnel *tunnel; 813 struct iphdr *iph; 814 815 tunnel = netdev_priv(dev); 816 iph = &tunnel->parms.iph; 817 818 if (iph->daddr) { 819 struct flowi fl = { .nl_u = { .ip4_u = 820 { .daddr = iph->daddr, 821 .saddr = iph->saddr, 822 .tos = RT_TOS(iph->tos) } }, 823 .oif = tunnel->parms.link, 824 .proto = IPPROTO_IPV6 }; 825 struct rtable *rt; 826 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 827 tdev = rt->u.dst.dev; 828 ip_rt_put(rt); 829 } 830 dev->flags |= IFF_POINTOPOINT; 831 } 832 833 if (!tdev && tunnel->parms.link) 834 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link); 835 836 if (tdev) { 837 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); 838 dev->mtu = tdev->mtu - sizeof(struct iphdr); 839 if (dev->mtu < IPV6_MIN_MTU) 840 dev->mtu = IPV6_MIN_MTU; 841 } 842 dev->iflink = tunnel->parms.link; 843 } 844 845 static int 846 ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) 847 { 848 int err = 0; 849 struct ip_tunnel_parm p; 850 struct ip_tunnel_prl prl; 851 struct ip_tunnel *t; 852 struct net *net = dev_net(dev); 853 struct sit_net *sitn = net_generic(net, sit_net_id); 854 855 switch (cmd) { 856 case SIOCGETTUNNEL: 857 t = NULL; 858 if (dev == sitn->fb_tunnel_dev) { 859 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 860 err = -EFAULT; 861 break; 862 } 863 t = ipip6_tunnel_locate(net, &p, 0); 864 } 865 if (t == NULL) 866 t = netdev_priv(dev); 867 memcpy(&p, &t->parms, sizeof(p)); 868 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 869 err = -EFAULT; 870 break; 871 872 case SIOCADDTUNNEL: 873 case SIOCCHGTUNNEL: 874 err = -EPERM; 875 if (!capable(CAP_NET_ADMIN)) 876 goto done; 877 878 err = -EFAULT; 879 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 880 goto done; 881 882 err = -EINVAL; 883 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 || 884 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF))) 885 goto done; 886 if (p.iph.ttl) 887 p.iph.frag_off |= htons(IP_DF); 888 889 t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL); 890 891 if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 892 if (t != NULL) { 893 if (t->dev != dev) { 894 err = -EEXIST; 895 break; 896 } 897 } else { 898 if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) || 899 (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) { 900 err = -EINVAL; 901 break; 902 } 903 t = netdev_priv(dev); 904 ipip6_tunnel_unlink(sitn, t); 905 t->parms.iph.saddr = p.iph.saddr; 906 t->parms.iph.daddr = p.iph.daddr; 907 memcpy(dev->dev_addr, &p.iph.saddr, 4); 908 memcpy(dev->broadcast, &p.iph.daddr, 4); 909 ipip6_tunnel_link(sitn, t); 910 netdev_state_change(dev); 911 } 912 } 913 914 if (t) { 915 err = 0; 916 if (cmd == SIOCCHGTUNNEL) { 917 t->parms.iph.ttl = p.iph.ttl; 918 t->parms.iph.tos = p.iph.tos; 919 if (t->parms.link != p.link) { 920 t->parms.link = p.link; 921 ipip6_tunnel_bind_dev(dev); 922 netdev_state_change(dev); 923 } 924 } 925 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) 926 err = -EFAULT; 927 } else 928 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 929 break; 930 931 case SIOCDELTUNNEL: 932 err = -EPERM; 933 if (!capable(CAP_NET_ADMIN)) 934 goto done; 935 936 if (dev == sitn->fb_tunnel_dev) { 937 err = -EFAULT; 938 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 939 goto done; 940 err = -ENOENT; 941 if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL) 942 goto done; 943 err = -EPERM; 944 if (t == netdev_priv(sitn->fb_tunnel_dev)) 945 goto done; 946 dev = t->dev; 947 } 948 unregister_netdevice(dev); 949 err = 0; 950 break; 951 952 case SIOCGETPRL: 953 err = -EINVAL; 954 if (dev == sitn->fb_tunnel_dev) 955 goto done; 956 err = -ENOENT; 957 if (!(t = netdev_priv(dev))) 958 goto done; 959 err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data); 960 break; 961 962 case SIOCADDPRL: 963 case SIOCDELPRL: 964 case SIOCCHGPRL: 965 err = -EPERM; 966 if (!capable(CAP_NET_ADMIN)) 967 goto done; 968 err = -EINVAL; 969 if (dev == sitn->fb_tunnel_dev) 970 goto done; 971 err = -EFAULT; 972 if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl))) 973 goto done; 974 err = -ENOENT; 975 if (!(t = netdev_priv(dev))) 976 goto done; 977 978 switch (cmd) { 979 case SIOCDELPRL: 980 err = ipip6_tunnel_del_prl(t, &prl); 981 break; 982 case SIOCADDPRL: 983 case SIOCCHGPRL: 984 err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL); 985 break; 986 } 987 netdev_state_change(dev); 988 break; 989 990 default: 991 err = -EINVAL; 992 } 993 994 done: 995 return err; 996 } 997 998 static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) 999 { 1000 if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) 1001 return -EINVAL; 1002 dev->mtu = new_mtu; 1003 return 0; 1004 } 1005 1006 static const struct net_device_ops ipip6_netdev_ops = { 1007 .ndo_uninit = ipip6_tunnel_uninit, 1008 .ndo_start_xmit = ipip6_tunnel_xmit, 1009 .ndo_do_ioctl = ipip6_tunnel_ioctl, 1010 .ndo_change_mtu = ipip6_tunnel_change_mtu, 1011 }; 1012 1013 static void ipip6_tunnel_setup(struct net_device *dev) 1014 { 1015 dev->netdev_ops = &ipip6_netdev_ops; 1016 dev->destructor = free_netdev; 1017 1018 dev->type = ARPHRD_SIT; 1019 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); 1020 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr); 1021 dev->flags = IFF_NOARP; 1022 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1023 dev->iflink = 0; 1024 dev->addr_len = 4; 1025 dev->features |= NETIF_F_NETNS_LOCAL; 1026 } 1027 1028 static void ipip6_tunnel_init(struct net_device *dev) 1029 { 1030 struct ip_tunnel *tunnel = netdev_priv(dev); 1031 1032 tunnel->dev = dev; 1033 strcpy(tunnel->parms.name, dev->name); 1034 1035 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); 1036 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 1037 1038 ipip6_tunnel_bind_dev(dev); 1039 } 1040 1041 static void ipip6_fb_tunnel_init(struct net_device *dev) 1042 { 1043 struct ip_tunnel *tunnel = netdev_priv(dev); 1044 struct iphdr *iph = &tunnel->parms.iph; 1045 struct net *net = dev_net(dev); 1046 struct sit_net *sitn = net_generic(net, sit_net_id); 1047 1048 tunnel->dev = dev; 1049 strcpy(tunnel->parms.name, dev->name); 1050 1051 iph->version = 4; 1052 iph->protocol = IPPROTO_IPV6; 1053 iph->ihl = 5; 1054 iph->ttl = 64; 1055 1056 dev_hold(dev); 1057 sitn->tunnels_wc[0] = tunnel; 1058 } 1059 1060 static struct xfrm_tunnel sit_handler = { 1061 .handler = ipip6_rcv, 1062 .err_handler = ipip6_err, 1063 .priority = 1, 1064 }; 1065 1066 static void sit_destroy_tunnels(struct sit_net *sitn) 1067 { 1068 int prio; 1069 1070 for (prio = 1; prio < 4; prio++) { 1071 int h; 1072 for (h = 0; h < HASH_SIZE; h++) { 1073 struct ip_tunnel *t; 1074 while ((t = sitn->tunnels[prio][h]) != NULL) 1075 unregister_netdevice(t->dev); 1076 } 1077 } 1078 } 1079 1080 static int sit_init_net(struct net *net) 1081 { 1082 int err; 1083 struct sit_net *sitn; 1084 1085 err = -ENOMEM; 1086 sitn = kzalloc(sizeof(struct sit_net), GFP_KERNEL); 1087 if (sitn == NULL) 1088 goto err_alloc; 1089 1090 err = net_assign_generic(net, sit_net_id, sitn); 1091 if (err < 0) 1092 goto err_assign; 1093 1094 sitn->tunnels[0] = sitn->tunnels_wc; 1095 sitn->tunnels[1] = sitn->tunnels_l; 1096 sitn->tunnels[2] = sitn->tunnels_r; 1097 sitn->tunnels[3] = sitn->tunnels_r_l; 1098 1099 sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0", 1100 ipip6_tunnel_setup); 1101 if (!sitn->fb_tunnel_dev) { 1102 err = -ENOMEM; 1103 goto err_alloc_dev; 1104 } 1105 dev_net_set(sitn->fb_tunnel_dev, net); 1106 1107 ipip6_fb_tunnel_init(sitn->fb_tunnel_dev); 1108 1109 if ((err = register_netdev(sitn->fb_tunnel_dev))) 1110 goto err_reg_dev; 1111 1112 return 0; 1113 1114 err_reg_dev: 1115 dev_put(sitn->fb_tunnel_dev); 1116 free_netdev(sitn->fb_tunnel_dev); 1117 err_alloc_dev: 1118 /* nothing */ 1119 err_assign: 1120 kfree(sitn); 1121 err_alloc: 1122 return err; 1123 } 1124 1125 static void sit_exit_net(struct net *net) 1126 { 1127 struct sit_net *sitn; 1128 1129 sitn = net_generic(net, sit_net_id); 1130 rtnl_lock(); 1131 sit_destroy_tunnels(sitn); 1132 unregister_netdevice(sitn->fb_tunnel_dev); 1133 rtnl_unlock(); 1134 kfree(sitn); 1135 } 1136 1137 static struct pernet_operations sit_net_ops = { 1138 .init = sit_init_net, 1139 .exit = sit_exit_net, 1140 }; 1141 1142 static void __exit sit_cleanup(void) 1143 { 1144 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1145 1146 unregister_pernet_gen_device(sit_net_id, &sit_net_ops); 1147 } 1148 1149 static int __init sit_init(void) 1150 { 1151 int err; 1152 1153 printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n"); 1154 1155 if (xfrm4_tunnel_register(&sit_handler, AF_INET6) < 0) { 1156 printk(KERN_INFO "sit init: Can't add protocol\n"); 1157 return -EAGAIN; 1158 } 1159 1160 err = register_pernet_gen_device(&sit_net_id, &sit_net_ops); 1161 if (err < 0) 1162 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1163 1164 return err; 1165 } 1166 1167 module_init(sit_init); 1168 module_exit(sit_cleanup); 1169 MODULE_LICENSE("GPL"); 1170 MODULE_ALIAS("sit0"); 1171