1 /* 2 * IPv6 over IPv4 tunnel device - Simple Internet Transition (SIT) 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * 9 * $Id: sit.c,v 1.53 2001/09/25 05:09:53 davem Exp $ 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 * Changes: 17 * Roger Venning <r.venning@telstra.com>: 6to4 support 18 * Nate Thompson <nate@thebog.net>: 6to4 support 19 */ 20 21 #include <linux/module.h> 22 #include <linux/capability.h> 23 #include <linux/errno.h> 24 #include <linux/types.h> 25 #include <linux/socket.h> 26 #include <linux/sockios.h> 27 #include <linux/net.h> 28 #include <linux/in6.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_arp.h> 31 #include <linux/icmp.h> 32 #include <asm/uaccess.h> 33 #include <linux/init.h> 34 #include <linux/netfilter_ipv4.h> 35 #include <linux/if_ether.h> 36 37 #include <net/sock.h> 38 #include <net/snmp.h> 39 40 #include <net/ipv6.h> 41 #include <net/protocol.h> 42 #include <net/transp_v6.h> 43 #include <net/ip6_fib.h> 44 #include <net/ip6_route.h> 45 #include <net/ndisc.h> 46 #include <net/addrconf.h> 47 #include <net/ip.h> 48 #include <net/udp.h> 49 #include <net/icmp.h> 50 #include <net/ipip.h> 51 #include <net/inet_ecn.h> 52 #include <net/xfrm.h> 53 #include <net/dsfield.h> 54 55 /* 56 This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c 57 58 For comments look at net/ipv4/ip_gre.c --ANK 59 */ 60 61 #define HASH_SIZE 16 62 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 63 64 static int ipip6_fb_tunnel_init(struct net_device *dev); 65 static int ipip6_tunnel_init(struct net_device *dev); 66 static void ipip6_tunnel_setup(struct net_device *dev); 67 68 static struct net_device *ipip6_fb_tunnel_dev; 69 70 static struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 71 static struct ip_tunnel *tunnels_r[HASH_SIZE]; 72 static struct ip_tunnel *tunnels_l[HASH_SIZE]; 73 static struct ip_tunnel *tunnels_wc[1]; 74 static struct ip_tunnel **tunnels[4] = { tunnels_wc, tunnels_l, tunnels_r, tunnels_r_l }; 75 76 static DEFINE_RWLOCK(ipip6_lock); 77 78 static struct ip_tunnel * ipip6_tunnel_lookup(__be32 remote, __be32 local) 79 { 80 unsigned h0 = HASH(remote); 81 unsigned h1 = HASH(local); 82 struct ip_tunnel *t; 83 84 for (t = tunnels_r_l[h0^h1]; t; t = t->next) { 85 if (local == t->parms.iph.saddr && 86 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 87 return t; 88 } 89 for (t = tunnels_r[h0]; t; t = t->next) { 90 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 91 return t; 92 } 93 for (t = tunnels_l[h1]; t; t = t->next) { 94 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) 95 return t; 96 } 97 if ((t = tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP)) 98 return t; 99 return NULL; 100 } 101 102 static struct ip_tunnel **__ipip6_bucket(struct ip_tunnel_parm *parms) 103 { 104 __be32 remote = parms->iph.daddr; 105 __be32 local = parms->iph.saddr; 106 unsigned h = 0; 107 int prio = 0; 108 109 if (remote) { 110 prio |= 2; 111 h ^= HASH(remote); 112 } 113 if (local) { 114 prio |= 1; 115 h ^= HASH(local); 116 } 117 return &tunnels[prio][h]; 118 } 119 120 static inline struct ip_tunnel **ipip6_bucket(struct ip_tunnel *t) 121 { 122 return __ipip6_bucket(&t->parms); 123 } 124 125 static void ipip6_tunnel_unlink(struct ip_tunnel *t) 126 { 127 struct ip_tunnel **tp; 128 129 for (tp = ipip6_bucket(t); *tp; tp = &(*tp)->next) { 130 if (t == *tp) { 131 write_lock_bh(&ipip6_lock); 132 *tp = t->next; 133 write_unlock_bh(&ipip6_lock); 134 break; 135 } 136 } 137 } 138 139 static void ipip6_tunnel_link(struct ip_tunnel *t) 140 { 141 struct ip_tunnel **tp = ipip6_bucket(t); 142 143 t->next = *tp; 144 write_lock_bh(&ipip6_lock); 145 *tp = t; 146 write_unlock_bh(&ipip6_lock); 147 } 148 149 static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int create) 150 { 151 __be32 remote = parms->iph.daddr; 152 __be32 local = parms->iph.saddr; 153 struct ip_tunnel *t, **tp, *nt; 154 struct net_device *dev; 155 char name[IFNAMSIZ]; 156 157 for (tp = __ipip6_bucket(parms); (t = *tp) != NULL; tp = &t->next) { 158 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) 159 return t; 160 } 161 if (!create) 162 goto failed; 163 164 if (parms->name[0]) 165 strlcpy(name, parms->name, IFNAMSIZ); 166 else { 167 int i; 168 for (i=1; i<100; i++) { 169 sprintf(name, "sit%d", i); 170 if (__dev_get_by_name(name) == NULL) 171 break; 172 } 173 if (i==100) 174 goto failed; 175 } 176 177 dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup); 178 if (dev == NULL) 179 return NULL; 180 181 nt = netdev_priv(dev); 182 dev->init = ipip6_tunnel_init; 183 nt->parms = *parms; 184 185 if (register_netdevice(dev) < 0) { 186 free_netdev(dev); 187 goto failed; 188 } 189 190 dev_hold(dev); 191 192 ipip6_tunnel_link(nt); 193 return nt; 194 195 failed: 196 return NULL; 197 } 198 199 static void ipip6_tunnel_uninit(struct net_device *dev) 200 { 201 if (dev == ipip6_fb_tunnel_dev) { 202 write_lock_bh(&ipip6_lock); 203 tunnels_wc[0] = NULL; 204 write_unlock_bh(&ipip6_lock); 205 dev_put(dev); 206 } else { 207 ipip6_tunnel_unlink(netdev_priv(dev)); 208 dev_put(dev); 209 } 210 } 211 212 213 static int ipip6_err(struct sk_buff *skb, u32 info) 214 { 215 #ifndef I_WISH_WORLD_WERE_PERFECT 216 217 /* It is not :-( All the routers (except for Linux) return only 218 8 bytes of packet payload. It means, that precise relaying of 219 ICMP in the real Internet is absolutely infeasible. 220 */ 221 struct iphdr *iph = (struct iphdr*)skb->data; 222 const int type = icmp_hdr(skb)->type; 223 const int code = icmp_hdr(skb)->code; 224 struct ip_tunnel *t; 225 int err; 226 227 switch (type) { 228 default: 229 case ICMP_PARAMETERPROB: 230 return 0; 231 232 case ICMP_DEST_UNREACH: 233 switch (code) { 234 case ICMP_SR_FAILED: 235 case ICMP_PORT_UNREACH: 236 /* Impossible event. */ 237 return 0; 238 case ICMP_FRAG_NEEDED: 239 /* Soft state for pmtu is maintained by IP core. */ 240 return 0; 241 default: 242 /* All others are translated to HOST_UNREACH. 243 rfc2003 contains "deep thoughts" about NET_UNREACH, 244 I believe they are just ether pollution. --ANK 245 */ 246 break; 247 } 248 break; 249 case ICMP_TIME_EXCEEDED: 250 if (code != ICMP_EXC_TTL) 251 return 0; 252 break; 253 } 254 255 err = -ENOENT; 256 257 read_lock(&ipip6_lock); 258 t = ipip6_tunnel_lookup(iph->daddr, iph->saddr); 259 if (t == NULL || t->parms.iph.daddr == 0) 260 goto out; 261 262 err = 0; 263 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 264 goto out; 265 266 if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO) 267 t->err_count++; 268 else 269 t->err_count = 1; 270 t->err_time = jiffies; 271 out: 272 read_unlock(&ipip6_lock); 273 return err; 274 #else 275 struct iphdr *iph = (struct iphdr*)dp; 276 int hlen = iph->ihl<<2; 277 struct ipv6hdr *iph6; 278 const int type = icmp_hdr(skb)->type; 279 const int code = icmp_hdr(skb)->code; 280 int rel_type = 0; 281 int rel_code = 0; 282 int rel_info = 0; 283 struct sk_buff *skb2; 284 struct rt6_info *rt6i; 285 286 if (len < hlen + sizeof(struct ipv6hdr)) 287 return; 288 iph6 = (struct ipv6hdr*)(dp + hlen); 289 290 switch (type) { 291 default: 292 return; 293 case ICMP_PARAMETERPROB: 294 if (icmp_hdr(skb)->un.gateway < hlen) 295 return; 296 297 /* So... This guy found something strange INSIDE encapsulated 298 packet. Well, he is fool, but what can we do ? 299 */ 300 rel_type = ICMPV6_PARAMPROB; 301 rel_info = icmp_hdr(skb)->un.gateway - hlen; 302 break; 303 304 case ICMP_DEST_UNREACH: 305 switch (code) { 306 case ICMP_SR_FAILED: 307 case ICMP_PORT_UNREACH: 308 /* Impossible event. */ 309 return; 310 case ICMP_FRAG_NEEDED: 311 /* Too complicated case ... */ 312 return; 313 default: 314 /* All others are translated to HOST_UNREACH. 315 rfc2003 contains "deep thoughts" about NET_UNREACH, 316 I believe, it is just ether pollution. --ANK 317 */ 318 rel_type = ICMPV6_DEST_UNREACH; 319 rel_code = ICMPV6_ADDR_UNREACH; 320 break; 321 } 322 break; 323 case ICMP_TIME_EXCEEDED: 324 if (code != ICMP_EXC_TTL) 325 return; 326 rel_type = ICMPV6_TIME_EXCEED; 327 rel_code = ICMPV6_EXC_HOPLIMIT; 328 break; 329 } 330 331 /* Prepare fake skb to feed it to icmpv6_send */ 332 skb2 = skb_clone(skb, GFP_ATOMIC); 333 if (skb2 == NULL) 334 return 0; 335 dst_release(skb2->dst); 336 skb2->dst = NULL; 337 skb_pull(skb2, skb->data - (u8*)iph6); 338 skb_reset_network_header(skb2); 339 340 /* Try to guess incoming interface */ 341 rt6i = rt6_lookup(&iph6->saddr, NULL, NULL, 0); 342 if (rt6i && rt6i->rt6i_dev) { 343 skb2->dev = rt6i->rt6i_dev; 344 345 rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0); 346 347 if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { 348 struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev); 349 if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) { 350 rel_type = ICMPV6_DEST_UNREACH; 351 rel_code = ICMPV6_ADDR_UNREACH; 352 } 353 icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev); 354 } 355 } 356 kfree_skb(skb2); 357 return 0; 358 #endif 359 } 360 361 static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 362 { 363 if (INET_ECN_is_ce(iph->tos)) 364 IP6_ECN_set_ce(ipv6_hdr(skb)); 365 } 366 367 static int ipip6_rcv(struct sk_buff *skb) 368 { 369 struct iphdr *iph; 370 struct ip_tunnel *tunnel; 371 372 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 373 goto out; 374 375 iph = ip_hdr(skb); 376 377 read_lock(&ipip6_lock); 378 if ((tunnel = ipip6_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) { 379 secpath_reset(skb); 380 skb->mac_header = skb->network_header; 381 skb_reset_network_header(skb); 382 IPCB(skb)->flags = 0; 383 skb->protocol = htons(ETH_P_IPV6); 384 skb->pkt_type = PACKET_HOST; 385 tunnel->stat.rx_packets++; 386 tunnel->stat.rx_bytes += skb->len; 387 skb->dev = tunnel->dev; 388 dst_release(skb->dst); 389 skb->dst = NULL; 390 nf_reset(skb); 391 ipip6_ecn_decapsulate(iph, skb); 392 netif_rx(skb); 393 read_unlock(&ipip6_lock); 394 return 0; 395 } 396 397 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 398 kfree_skb(skb); 399 read_unlock(&ipip6_lock); 400 out: 401 return 0; 402 } 403 404 /* Returns the embedded IPv4 address if the IPv6 address 405 comes from 6to4 (RFC 3056) addr space */ 406 407 static inline __be32 try_6to4(struct in6_addr *v6dst) 408 { 409 __be32 dst = 0; 410 411 if (v6dst->s6_addr16[0] == htons(0x2002)) { 412 /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ 413 memcpy(&dst, &v6dst->s6_addr16[1], 4); 414 } 415 return dst; 416 } 417 418 /* 419 * This function assumes it is being called from dev_queue_xmit() 420 * and that skb is filled properly by that function. 421 */ 422 423 static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 424 { 425 struct ip_tunnel *tunnel = netdev_priv(dev); 426 struct net_device_stats *stats = &tunnel->stat; 427 struct iphdr *tiph = &tunnel->parms.iph; 428 struct ipv6hdr *iph6 = ipv6_hdr(skb); 429 u8 tos = tunnel->parms.iph.tos; 430 struct rtable *rt; /* Route to the other host */ 431 struct net_device *tdev; /* Device to other host */ 432 struct iphdr *iph; /* Our new IP header */ 433 int max_headroom; /* The extra header space needed */ 434 __be32 dst = tiph->daddr; 435 int mtu; 436 struct in6_addr *addr6; 437 int addr_type; 438 439 if (tunnel->recursion++) { 440 tunnel->stat.collisions++; 441 goto tx_error; 442 } 443 444 if (skb->protocol != htons(ETH_P_IPV6)) 445 goto tx_error; 446 447 if (!dst) 448 dst = try_6to4(&iph6->daddr); 449 450 if (!dst) { 451 struct neighbour *neigh = NULL; 452 453 if (skb->dst) 454 neigh = skb->dst->neighbour; 455 456 if (neigh == NULL) { 457 if (net_ratelimit()) 458 printk(KERN_DEBUG "sit: nexthop == NULL\n"); 459 goto tx_error; 460 } 461 462 addr6 = (struct in6_addr*)&neigh->primary_key; 463 addr_type = ipv6_addr_type(addr6); 464 465 if (addr_type == IPV6_ADDR_ANY) { 466 addr6 = &ipv6_hdr(skb)->daddr; 467 addr_type = ipv6_addr_type(addr6); 468 } 469 470 if ((addr_type & IPV6_ADDR_COMPATv4) == 0) 471 goto tx_error_icmp; 472 473 dst = addr6->s6_addr32[3]; 474 } 475 476 { 477 struct flowi fl = { .nl_u = { .ip4_u = 478 { .daddr = dst, 479 .saddr = tiph->saddr, 480 .tos = RT_TOS(tos) } }, 481 .oif = tunnel->parms.link, 482 .proto = IPPROTO_IPV6 }; 483 if (ip_route_output_key(&rt, &fl)) { 484 tunnel->stat.tx_carrier_errors++; 485 goto tx_error_icmp; 486 } 487 } 488 if (rt->rt_type != RTN_UNICAST) { 489 ip_rt_put(rt); 490 tunnel->stat.tx_carrier_errors++; 491 goto tx_error_icmp; 492 } 493 tdev = rt->u.dst.dev; 494 495 if (tdev == dev) { 496 ip_rt_put(rt); 497 tunnel->stat.collisions++; 498 goto tx_error; 499 } 500 501 if (tiph->frag_off) 502 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 503 else 504 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; 505 506 if (mtu < 68) { 507 tunnel->stat.collisions++; 508 ip_rt_put(rt); 509 goto tx_error; 510 } 511 if (mtu < IPV6_MIN_MTU) 512 mtu = IPV6_MIN_MTU; 513 if (tunnel->parms.iph.daddr && skb->dst) 514 skb->dst->ops->update_pmtu(skb->dst, mtu); 515 516 if (skb->len > mtu) { 517 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 518 ip_rt_put(rt); 519 goto tx_error; 520 } 521 522 if (tunnel->err_count > 0) { 523 if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) { 524 tunnel->err_count--; 525 dst_link_failure(skb); 526 } else 527 tunnel->err_count = 0; 528 } 529 530 /* 531 * Okay, now see if we can stuff it in the buffer as-is. 532 */ 533 max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr); 534 535 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { 536 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 537 if (!new_skb) { 538 ip_rt_put(rt); 539 stats->tx_dropped++; 540 dev_kfree_skb(skb); 541 tunnel->recursion--; 542 return 0; 543 } 544 if (skb->sk) 545 skb_set_owner_w(new_skb, skb->sk); 546 dev_kfree_skb(skb); 547 skb = new_skb; 548 iph6 = ipv6_hdr(skb); 549 } 550 551 skb->transport_header = skb->network_header; 552 skb_push(skb, sizeof(struct iphdr)); 553 skb_reset_network_header(skb); 554 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 555 IPCB(skb)->flags = 0; 556 dst_release(skb->dst); 557 skb->dst = &rt->u.dst; 558 559 /* 560 * Push down and install the IPIP header. 561 */ 562 563 iph = ip_hdr(skb); 564 iph->version = 4; 565 iph->ihl = sizeof(struct iphdr)>>2; 566 if (mtu > IPV6_MIN_MTU) 567 iph->frag_off = htons(IP_DF); 568 else 569 iph->frag_off = 0; 570 571 iph->protocol = IPPROTO_IPV6; 572 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 573 iph->daddr = rt->rt_dst; 574 iph->saddr = rt->rt_src; 575 576 if ((iph->ttl = tiph->ttl) == 0) 577 iph->ttl = iph6->hop_limit; 578 579 nf_reset(skb); 580 581 IPTUNNEL_XMIT(); 582 tunnel->recursion--; 583 return 0; 584 585 tx_error_icmp: 586 dst_link_failure(skb); 587 tx_error: 588 stats->tx_errors++; 589 dev_kfree_skb(skb); 590 tunnel->recursion--; 591 return 0; 592 } 593 594 static int 595 ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) 596 { 597 int err = 0; 598 struct ip_tunnel_parm p; 599 struct ip_tunnel *t; 600 601 switch (cmd) { 602 case SIOCGETTUNNEL: 603 t = NULL; 604 if (dev == ipip6_fb_tunnel_dev) { 605 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 606 err = -EFAULT; 607 break; 608 } 609 t = ipip6_tunnel_locate(&p, 0); 610 } 611 if (t == NULL) 612 t = netdev_priv(dev); 613 memcpy(&p, &t->parms, sizeof(p)); 614 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 615 err = -EFAULT; 616 break; 617 618 case SIOCADDTUNNEL: 619 case SIOCCHGTUNNEL: 620 err = -EPERM; 621 if (!capable(CAP_NET_ADMIN)) 622 goto done; 623 624 err = -EFAULT; 625 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 626 goto done; 627 628 err = -EINVAL; 629 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 || 630 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF))) 631 goto done; 632 if (p.iph.ttl) 633 p.iph.frag_off |= htons(IP_DF); 634 635 t = ipip6_tunnel_locate(&p, cmd == SIOCADDTUNNEL); 636 637 if (dev != ipip6_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 638 if (t != NULL) { 639 if (t->dev != dev) { 640 err = -EEXIST; 641 break; 642 } 643 } else { 644 if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) || 645 (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) { 646 err = -EINVAL; 647 break; 648 } 649 t = netdev_priv(dev); 650 ipip6_tunnel_unlink(t); 651 t->parms.iph.saddr = p.iph.saddr; 652 t->parms.iph.daddr = p.iph.daddr; 653 memcpy(dev->dev_addr, &p.iph.saddr, 4); 654 memcpy(dev->broadcast, &p.iph.daddr, 4); 655 ipip6_tunnel_link(t); 656 netdev_state_change(dev); 657 } 658 } 659 660 if (t) { 661 err = 0; 662 if (cmd == SIOCCHGTUNNEL) { 663 t->parms.iph.ttl = p.iph.ttl; 664 t->parms.iph.tos = p.iph.tos; 665 } 666 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) 667 err = -EFAULT; 668 } else 669 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 670 break; 671 672 case SIOCDELTUNNEL: 673 err = -EPERM; 674 if (!capable(CAP_NET_ADMIN)) 675 goto done; 676 677 if (dev == ipip6_fb_tunnel_dev) { 678 err = -EFAULT; 679 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 680 goto done; 681 err = -ENOENT; 682 if ((t = ipip6_tunnel_locate(&p, 0)) == NULL) 683 goto done; 684 err = -EPERM; 685 if (t == netdev_priv(ipip6_fb_tunnel_dev)) 686 goto done; 687 dev = t->dev; 688 } 689 unregister_netdevice(dev); 690 err = 0; 691 break; 692 693 default: 694 err = -EINVAL; 695 } 696 697 done: 698 return err; 699 } 700 701 static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev) 702 { 703 return &(((struct ip_tunnel*)netdev_priv(dev))->stat); 704 } 705 706 static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) 707 { 708 if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) 709 return -EINVAL; 710 dev->mtu = new_mtu; 711 return 0; 712 } 713 714 static void ipip6_tunnel_setup(struct net_device *dev) 715 { 716 SET_MODULE_OWNER(dev); 717 dev->uninit = ipip6_tunnel_uninit; 718 dev->destructor = free_netdev; 719 dev->hard_start_xmit = ipip6_tunnel_xmit; 720 dev->get_stats = ipip6_tunnel_get_stats; 721 dev->do_ioctl = ipip6_tunnel_ioctl; 722 dev->change_mtu = ipip6_tunnel_change_mtu; 723 724 dev->type = ARPHRD_SIT; 725 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); 726 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr); 727 dev->flags = IFF_NOARP; 728 dev->iflink = 0; 729 dev->addr_len = 4; 730 } 731 732 static int ipip6_tunnel_init(struct net_device *dev) 733 { 734 struct net_device *tdev = NULL; 735 struct ip_tunnel *tunnel; 736 struct iphdr *iph; 737 738 tunnel = netdev_priv(dev); 739 iph = &tunnel->parms.iph; 740 741 tunnel->dev = dev; 742 strcpy(tunnel->parms.name, dev->name); 743 744 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); 745 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 746 747 if (iph->daddr) { 748 struct flowi fl = { .nl_u = { .ip4_u = 749 { .daddr = iph->daddr, 750 .saddr = iph->saddr, 751 .tos = RT_TOS(iph->tos) } }, 752 .oif = tunnel->parms.link, 753 .proto = IPPROTO_IPV6 }; 754 struct rtable *rt; 755 if (!ip_route_output_key(&rt, &fl)) { 756 tdev = rt->u.dst.dev; 757 ip_rt_put(rt); 758 } 759 dev->flags |= IFF_POINTOPOINT; 760 } 761 762 if (!tdev && tunnel->parms.link) 763 tdev = __dev_get_by_index(tunnel->parms.link); 764 765 if (tdev) { 766 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); 767 dev->mtu = tdev->mtu - sizeof(struct iphdr); 768 if (dev->mtu < IPV6_MIN_MTU) 769 dev->mtu = IPV6_MIN_MTU; 770 } 771 dev->iflink = tunnel->parms.link; 772 773 return 0; 774 } 775 776 static int __init ipip6_fb_tunnel_init(struct net_device *dev) 777 { 778 struct ip_tunnel *tunnel = netdev_priv(dev); 779 struct iphdr *iph = &tunnel->parms.iph; 780 781 tunnel->dev = dev; 782 strcpy(tunnel->parms.name, dev->name); 783 784 iph->version = 4; 785 iph->protocol = IPPROTO_IPV6; 786 iph->ihl = 5; 787 iph->ttl = 64; 788 789 dev_hold(dev); 790 tunnels_wc[0] = tunnel; 791 return 0; 792 } 793 794 static struct xfrm_tunnel sit_handler = { 795 .handler = ipip6_rcv, 796 .err_handler = ipip6_err, 797 .priority = 1, 798 }; 799 800 static void __exit sit_destroy_tunnels(void) 801 { 802 int prio; 803 804 for (prio = 1; prio < 4; prio++) { 805 int h; 806 for (h = 0; h < HASH_SIZE; h++) { 807 struct ip_tunnel *t; 808 while ((t = tunnels[prio][h]) != NULL) 809 unregister_netdevice(t->dev); 810 } 811 } 812 } 813 814 static void __exit sit_cleanup(void) 815 { 816 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 817 818 rtnl_lock(); 819 sit_destroy_tunnels(); 820 unregister_netdevice(ipip6_fb_tunnel_dev); 821 rtnl_unlock(); 822 } 823 824 static int __init sit_init(void) 825 { 826 int err; 827 828 printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n"); 829 830 if (xfrm4_tunnel_register(&sit_handler, AF_INET6) < 0) { 831 printk(KERN_INFO "sit init: Can't add protocol\n"); 832 return -EAGAIN; 833 } 834 835 ipip6_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0", 836 ipip6_tunnel_setup); 837 if (!ipip6_fb_tunnel_dev) { 838 err = -ENOMEM; 839 goto err1; 840 } 841 842 ipip6_fb_tunnel_dev->init = ipip6_fb_tunnel_init; 843 844 if ((err = register_netdev(ipip6_fb_tunnel_dev))) 845 goto err2; 846 847 out: 848 return err; 849 err2: 850 free_netdev(ipip6_fb_tunnel_dev); 851 err1: 852 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 853 goto out; 854 } 855 856 module_init(sit_init); 857 module_exit(sit_cleanup); 858 MODULE_LICENSE("GPL"); 859 MODULE_ALIAS("sit0"); 860