1 /* 2 * IPv6 virtual tunneling interface 3 * 4 * Copyright (C) 2013 secunet Security Networks AG 5 * 6 * Author: 7 * Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * Based on: 10 * net/ipv6/ip6_tunnel.c 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/capability.h> 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/sockios.h> 23 #include <linux/icmp.h> 24 #include <linux/if.h> 25 #include <linux/in.h> 26 #include <linux/ip.h> 27 #include <linux/net.h> 28 #include <linux/in6.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_arp.h> 31 #include <linux/icmpv6.h> 32 #include <linux/init.h> 33 #include <linux/route.h> 34 #include <linux/rtnetlink.h> 35 #include <linux/netfilter_ipv6.h> 36 #include <linux/slab.h> 37 #include <linux/hash.h> 38 39 #include <linux/uaccess.h> 40 #include <linux/atomic.h> 41 42 #include <net/icmp.h> 43 #include <net/ip.h> 44 #include <net/ip_tunnels.h> 45 #include <net/ipv6.h> 46 #include <net/ip6_route.h> 47 #include <net/addrconf.h> 48 #include <net/ip6_tunnel.h> 49 #include <net/xfrm.h> 50 #include <net/net_namespace.h> 51 #include <net/netns/generic.h> 52 53 #define HASH_SIZE_SHIFT 5 54 #define HASH_SIZE (1 << HASH_SIZE_SHIFT) 55 56 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 57 { 58 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 59 60 return hash_32(hash, HASH_SIZE_SHIFT); 61 } 62 63 static int vti6_dev_init(struct net_device *dev); 64 static void vti6_dev_setup(struct net_device *dev); 65 static struct rtnl_link_ops vti6_link_ops __read_mostly; 66 67 static int vti6_net_id __read_mostly; 68 struct vti6_net { 69 /* the vti6 tunnel fallback device */ 70 struct net_device *fb_tnl_dev; 71 /* lists for storing tunnels in use */ 72 struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE]; 73 struct ip6_tnl __rcu *tnls_wc[1]; 74 struct ip6_tnl __rcu **tnls[2]; 75 }; 76 77 #define for_each_vti6_tunnel_rcu(start) \ 78 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 79 80 /** 81 * vti6_tnl_lookup - fetch tunnel matching the end-point addresses 82 * @net: network namespace 83 * @remote: the address of the tunnel exit-point 84 * @local: the address of the tunnel entry-point 85 * 86 * Return: 87 * tunnel matching given end-points if found, 88 * else fallback tunnel if its device is up, 89 * else %NULL 90 **/ 91 static struct ip6_tnl * 92 vti6_tnl_lookup(struct net *net, const struct in6_addr *remote, 93 const struct in6_addr *local) 94 { 95 unsigned int hash = HASH(remote, local); 96 struct ip6_tnl *t; 97 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 98 99 for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 100 if (ipv6_addr_equal(local, &t->parms.laddr) && 101 ipv6_addr_equal(remote, &t->parms.raddr) && 102 (t->dev->flags & IFF_UP)) 103 return t; 104 } 105 t = rcu_dereference(ip6n->tnls_wc[0]); 106 if (t && (t->dev->flags & IFF_UP)) 107 return t; 108 109 return NULL; 110 } 111 112 /** 113 * vti6_tnl_bucket - get head of list matching given tunnel parameters 114 * @p: parameters containing tunnel end-points 115 * 116 * Description: 117 * vti6_tnl_bucket() returns the head of the list matching the 118 * &struct in6_addr entries laddr and raddr in @p. 119 * 120 * Return: head of IPv6 tunnel list 121 **/ 122 static struct ip6_tnl __rcu ** 123 vti6_tnl_bucket(struct vti6_net *ip6n, const struct __ip6_tnl_parm *p) 124 { 125 const struct in6_addr *remote = &p->raddr; 126 const struct in6_addr *local = &p->laddr; 127 unsigned int h = 0; 128 int prio = 0; 129 130 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 131 prio = 1; 132 h = HASH(remote, local); 133 } 134 return &ip6n->tnls[prio][h]; 135 } 136 137 static void 138 vti6_tnl_link(struct vti6_net *ip6n, struct ip6_tnl *t) 139 { 140 struct ip6_tnl __rcu **tp = vti6_tnl_bucket(ip6n, &t->parms); 141 142 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 143 rcu_assign_pointer(*tp, t); 144 } 145 146 static void 147 vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t) 148 { 149 struct ip6_tnl __rcu **tp; 150 struct ip6_tnl *iter; 151 152 for (tp = vti6_tnl_bucket(ip6n, &t->parms); 153 (iter = rtnl_dereference(*tp)) != NULL; 154 tp = &iter->next) { 155 if (t == iter) { 156 rcu_assign_pointer(*tp, t->next); 157 break; 158 } 159 } 160 } 161 162 static void vti6_dev_free(struct net_device *dev) 163 { 164 free_percpu(dev->tstats); 165 free_netdev(dev); 166 } 167 168 static int vti6_tnl_create2(struct net_device *dev) 169 { 170 struct ip6_tnl *t = netdev_priv(dev); 171 struct net *net = dev_net(dev); 172 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 173 int err; 174 175 err = vti6_dev_init(dev); 176 if (err < 0) 177 goto out; 178 179 err = register_netdevice(dev); 180 if (err < 0) 181 goto out; 182 183 strcpy(t->parms.name, dev->name); 184 dev->rtnl_link_ops = &vti6_link_ops; 185 186 dev_hold(dev); 187 vti6_tnl_link(ip6n, t); 188 189 return 0; 190 191 out: 192 return err; 193 } 194 195 static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 196 { 197 struct net_device *dev; 198 struct ip6_tnl *t; 199 char name[IFNAMSIZ]; 200 int err; 201 202 if (p->name[0]) 203 strlcpy(name, p->name, IFNAMSIZ); 204 else 205 sprintf(name, "ip6_vti%%d"); 206 207 dev = alloc_netdev(sizeof(*t), name, vti6_dev_setup); 208 if (dev == NULL) 209 goto failed; 210 211 dev_net_set(dev, net); 212 213 t = netdev_priv(dev); 214 t->parms = *p; 215 t->net = dev_net(dev); 216 217 err = vti6_tnl_create2(dev); 218 if (err < 0) 219 goto failed_free; 220 221 return t; 222 223 failed_free: 224 vti6_dev_free(dev); 225 failed: 226 return NULL; 227 } 228 229 /** 230 * vti6_locate - find or create tunnel matching given parameters 231 * @net: network namespace 232 * @p: tunnel parameters 233 * @create: != 0 if allowed to create new tunnel if no match found 234 * 235 * Description: 236 * vti6_locate() first tries to locate an existing tunnel 237 * based on @parms. If this is unsuccessful, but @create is set a new 238 * tunnel device is created and registered for use. 239 * 240 * Return: 241 * matching tunnel or NULL 242 **/ 243 static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p, 244 int create) 245 { 246 const struct in6_addr *remote = &p->raddr; 247 const struct in6_addr *local = &p->laddr; 248 struct ip6_tnl __rcu **tp; 249 struct ip6_tnl *t; 250 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 251 252 for (tp = vti6_tnl_bucket(ip6n, p); 253 (t = rtnl_dereference(*tp)) != NULL; 254 tp = &t->next) { 255 if (ipv6_addr_equal(local, &t->parms.laddr) && 256 ipv6_addr_equal(remote, &t->parms.raddr)) 257 return t; 258 } 259 if (!create) 260 return NULL; 261 return vti6_tnl_create(net, p); 262 } 263 264 /** 265 * vti6_dev_uninit - tunnel device uninitializer 266 * @dev: the device to be destroyed 267 * 268 * Description: 269 * vti6_dev_uninit() removes tunnel from its list 270 **/ 271 static void vti6_dev_uninit(struct net_device *dev) 272 { 273 struct ip6_tnl *t = netdev_priv(dev); 274 struct net *net = dev_net(dev); 275 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 276 277 if (dev == ip6n->fb_tnl_dev) 278 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 279 else 280 vti6_tnl_unlink(ip6n, t); 281 dev_put(dev); 282 } 283 284 static int vti6_rcv(struct sk_buff *skb) 285 { 286 struct ip6_tnl *t; 287 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 288 289 rcu_read_lock(); 290 if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, 291 &ipv6h->daddr)) != NULL) { 292 if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) { 293 rcu_read_unlock(); 294 goto discard; 295 } 296 297 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 298 rcu_read_unlock(); 299 return 0; 300 } 301 302 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) { 303 t->dev->stats.rx_dropped++; 304 rcu_read_unlock(); 305 goto discard; 306 } 307 308 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; 309 skb->mark = be32_to_cpu(t->parms.i_key); 310 311 rcu_read_unlock(); 312 313 return xfrm6_rcv(skb); 314 } 315 rcu_read_unlock(); 316 return -EINVAL; 317 discard: 318 kfree_skb(skb); 319 return 0; 320 } 321 322 static int vti6_rcv_cb(struct sk_buff *skb, int err) 323 { 324 unsigned short family; 325 struct net_device *dev; 326 struct pcpu_sw_netstats *tstats; 327 struct xfrm_state *x; 328 struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; 329 330 if (!t) 331 return 1; 332 333 dev = t->dev; 334 335 if (err) { 336 dev->stats.rx_errors++; 337 dev->stats.rx_dropped++; 338 339 return 0; 340 } 341 342 x = xfrm_input_state(skb); 343 family = x->inner_mode->afinfo->family; 344 345 if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) 346 return -EPERM; 347 348 skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev))); 349 skb->dev = dev; 350 351 tstats = this_cpu_ptr(dev->tstats); 352 u64_stats_update_begin(&tstats->syncp); 353 tstats->rx_packets++; 354 tstats->rx_bytes += skb->len; 355 u64_stats_update_end(&tstats->syncp); 356 357 return 0; 358 } 359 360 /** 361 * vti6_addr_conflict - compare packet addresses to tunnel's own 362 * @t: the outgoing tunnel device 363 * @hdr: IPv6 header from the incoming packet 364 * 365 * Description: 366 * Avoid trivial tunneling loop by checking that tunnel exit-point 367 * doesn't match source of incoming packet. 368 * 369 * Return: 370 * 1 if conflict, 371 * 0 else 372 **/ 373 static inline bool 374 vti6_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 375 { 376 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 377 } 378 379 static bool vti6_state_check(const struct xfrm_state *x, 380 const struct in6_addr *dst, 381 const struct in6_addr *src) 382 { 383 xfrm_address_t *daddr = (xfrm_address_t *)dst; 384 xfrm_address_t *saddr = (xfrm_address_t *)src; 385 386 /* if there is no transform then this tunnel is not functional. 387 * Or if the xfrm is not mode tunnel. 388 */ 389 if (!x || x->props.mode != XFRM_MODE_TUNNEL || 390 x->props.family != AF_INET6) 391 return false; 392 393 if (ipv6_addr_any(dst)) 394 return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET6); 395 396 if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET6)) 397 return false; 398 399 return true; 400 } 401 402 /** 403 * vti6_xmit - send a packet 404 * @skb: the outgoing socket buffer 405 * @dev: the outgoing tunnel device 406 * @fl: the flow informations for the xfrm_lookup 407 **/ 408 static int 409 vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) 410 { 411 struct ip6_tnl *t = netdev_priv(dev); 412 struct net_device_stats *stats = &t->dev->stats; 413 struct dst_entry *dst = skb_dst(skb); 414 struct net_device *tdev; 415 int err = -1; 416 417 if (!dst) 418 goto tx_err_link_failure; 419 420 dst_hold(dst); 421 dst = xfrm_lookup(t->net, dst, fl, NULL, 0); 422 if (IS_ERR(dst)) { 423 err = PTR_ERR(dst); 424 dst = NULL; 425 goto tx_err_link_failure; 426 } 427 428 if (!vti6_state_check(dst->xfrm, &t->parms.raddr, &t->parms.laddr)) 429 goto tx_err_link_failure; 430 431 tdev = dst->dev; 432 433 if (tdev == dev) { 434 stats->collisions++; 435 net_warn_ratelimited("%s: Local routing loop detected!\n", 436 t->parms.name); 437 goto tx_err_dst_release; 438 } 439 440 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 441 skb_dst_set(skb, dst); 442 skb->dev = skb_dst(skb)->dev; 443 444 err = dst_output(skb); 445 if (net_xmit_eval(err) == 0) { 446 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 447 448 u64_stats_update_begin(&tstats->syncp); 449 tstats->tx_bytes += skb->len; 450 tstats->tx_packets++; 451 u64_stats_update_end(&tstats->syncp); 452 } else { 453 stats->tx_errors++; 454 stats->tx_aborted_errors++; 455 } 456 457 return 0; 458 tx_err_link_failure: 459 stats->tx_carrier_errors++; 460 dst_link_failure(skb); 461 tx_err_dst_release: 462 dst_release(dst); 463 return err; 464 } 465 466 static netdev_tx_t 467 vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 468 { 469 struct ip6_tnl *t = netdev_priv(dev); 470 struct net_device_stats *stats = &t->dev->stats; 471 struct ipv6hdr *ipv6h; 472 struct flowi fl; 473 int ret; 474 475 memset(&fl, 0, sizeof(fl)); 476 skb->mark = be32_to_cpu(t->parms.o_key); 477 478 switch (skb->protocol) { 479 case htons(ETH_P_IPV6): 480 ipv6h = ipv6_hdr(skb); 481 482 if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || 483 !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h)) 484 goto tx_err; 485 486 xfrm_decode_session(skb, &fl, AF_INET6); 487 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 488 break; 489 case htons(ETH_P_IP): 490 xfrm_decode_session(skb, &fl, AF_INET); 491 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 492 break; 493 default: 494 goto tx_err; 495 } 496 497 ret = vti6_xmit(skb, dev, &fl); 498 if (ret < 0) 499 goto tx_err; 500 501 return NETDEV_TX_OK; 502 503 tx_err: 504 stats->tx_errors++; 505 stats->tx_dropped++; 506 kfree_skb(skb); 507 return NETDEV_TX_OK; 508 } 509 510 static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 511 u8 type, u8 code, int offset, __be32 info) 512 { 513 __be32 spi; 514 __u32 mark; 515 struct xfrm_state *x; 516 struct ip6_tnl *t; 517 struct ip_esp_hdr *esph; 518 struct ip_auth_hdr *ah; 519 struct ip_comp_hdr *ipch; 520 struct net *net = dev_net(skb->dev); 521 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; 522 int protocol = iph->nexthdr; 523 524 t = vti6_tnl_lookup(dev_net(skb->dev), &iph->daddr, &iph->saddr); 525 if (!t) 526 return -1; 527 528 mark = be32_to_cpu(t->parms.o_key); 529 530 switch (protocol) { 531 case IPPROTO_ESP: 532 esph = (struct ip_esp_hdr *)(skb->data + offset); 533 spi = esph->spi; 534 break; 535 case IPPROTO_AH: 536 ah = (struct ip_auth_hdr *)(skb->data + offset); 537 spi = ah->spi; 538 break; 539 case IPPROTO_COMP: 540 ipch = (struct ip_comp_hdr *)(skb->data + offset); 541 spi = htonl(ntohs(ipch->cpi)); 542 break; 543 default: 544 return 0; 545 } 546 547 if (type != ICMPV6_PKT_TOOBIG && 548 type != NDISC_REDIRECT) 549 return 0; 550 551 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr, 552 spi, protocol, AF_INET6); 553 if (!x) 554 return 0; 555 556 if (type == NDISC_REDIRECT) 557 ip6_redirect(skb, net, skb->dev->ifindex, 0); 558 else 559 ip6_update_pmtu(skb, net, info, 0, 0); 560 xfrm_state_put(x); 561 562 return 0; 563 } 564 565 static void vti6_link_config(struct ip6_tnl *t) 566 { 567 struct net_device *dev = t->dev; 568 struct __ip6_tnl_parm *p = &t->parms; 569 570 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 571 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 572 573 p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV | 574 IP6_TNL_F_CAP_PER_PACKET); 575 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 576 577 if (p->flags & IP6_TNL_F_CAP_XMIT && p->flags & IP6_TNL_F_CAP_RCV) 578 dev->flags |= IFF_POINTOPOINT; 579 else 580 dev->flags &= ~IFF_POINTOPOINT; 581 582 dev->iflink = p->link; 583 } 584 585 /** 586 * vti6_tnl_change - update the tunnel parameters 587 * @t: tunnel to be changed 588 * @p: tunnel configuration parameters 589 * 590 * Description: 591 * vti6_tnl_change() updates the tunnel parameters 592 **/ 593 static int 594 vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) 595 { 596 t->parms.laddr = p->laddr; 597 t->parms.raddr = p->raddr; 598 t->parms.link = p->link; 599 t->parms.i_key = p->i_key; 600 t->parms.o_key = p->o_key; 601 t->parms.proto = p->proto; 602 ip6_tnl_dst_reset(t); 603 vti6_link_config(t); 604 return 0; 605 } 606 607 static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 608 { 609 struct net *net = dev_net(t->dev); 610 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 611 int err; 612 613 vti6_tnl_unlink(ip6n, t); 614 synchronize_net(); 615 err = vti6_tnl_change(t, p); 616 vti6_tnl_link(ip6n, t); 617 netdev_state_change(t->dev); 618 return err; 619 } 620 621 static void 622 vti6_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm2 *u) 623 { 624 p->laddr = u->laddr; 625 p->raddr = u->raddr; 626 p->link = u->link; 627 p->i_key = u->i_key; 628 p->o_key = u->o_key; 629 p->proto = u->proto; 630 631 memcpy(p->name, u->name, sizeof(u->name)); 632 } 633 634 static void 635 vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p) 636 { 637 u->laddr = p->laddr; 638 u->raddr = p->raddr; 639 u->link = p->link; 640 u->i_key = p->i_key; 641 u->o_key = p->o_key; 642 u->proto = p->proto; 643 644 memcpy(u->name, p->name, sizeof(u->name)); 645 } 646 647 /** 648 * vti6_tnl_ioctl - configure vti6 tunnels from userspace 649 * @dev: virtual device associated with tunnel 650 * @ifr: parameters passed from userspace 651 * @cmd: command to be performed 652 * 653 * Description: 654 * vti6_ioctl() is used for managing vti6 tunnels 655 * from userspace. 656 * 657 * The possible commands are the following: 658 * %SIOCGETTUNNEL: get tunnel parameters for device 659 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 660 * %SIOCCHGTUNNEL: change tunnel parameters to those given 661 * %SIOCDELTUNNEL: delete tunnel 662 * 663 * The fallback device "ip6_vti0", created during module 664 * initialization, can be used for creating other tunnel devices. 665 * 666 * Return: 667 * 0 on success, 668 * %-EFAULT if unable to copy data to or from userspace, 669 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 670 * %-EINVAL if passed tunnel parameters are invalid, 671 * %-EEXIST if changing a tunnel's parameters would cause a conflict 672 * %-ENODEV if attempting to change or delete a nonexisting device 673 **/ 674 static int 675 vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 676 { 677 int err = 0; 678 struct ip6_tnl_parm2 p; 679 struct __ip6_tnl_parm p1; 680 struct ip6_tnl *t = NULL; 681 struct net *net = dev_net(dev); 682 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 683 684 switch (cmd) { 685 case SIOCGETTUNNEL: 686 if (dev == ip6n->fb_tnl_dev) { 687 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 688 err = -EFAULT; 689 break; 690 } 691 vti6_parm_from_user(&p1, &p); 692 t = vti6_locate(net, &p1, 0); 693 } else { 694 memset(&p, 0, sizeof(p)); 695 } 696 if (t == NULL) 697 t = netdev_priv(dev); 698 vti6_parm_to_user(&p, &t->parms); 699 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 700 err = -EFAULT; 701 break; 702 case SIOCADDTUNNEL: 703 case SIOCCHGTUNNEL: 704 err = -EPERM; 705 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 706 break; 707 err = -EFAULT; 708 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 709 break; 710 err = -EINVAL; 711 if (p.proto != IPPROTO_IPV6 && p.proto != 0) 712 break; 713 vti6_parm_from_user(&p1, &p); 714 t = vti6_locate(net, &p1, cmd == SIOCADDTUNNEL); 715 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { 716 if (t != NULL) { 717 if (t->dev != dev) { 718 err = -EEXIST; 719 break; 720 } 721 } else 722 t = netdev_priv(dev); 723 724 err = vti6_update(t, &p1); 725 } 726 if (t) { 727 err = 0; 728 vti6_parm_to_user(&p, &t->parms); 729 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 730 err = -EFAULT; 731 732 } else 733 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 734 break; 735 case SIOCDELTUNNEL: 736 err = -EPERM; 737 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 738 break; 739 740 if (dev == ip6n->fb_tnl_dev) { 741 err = -EFAULT; 742 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 743 break; 744 err = -ENOENT; 745 vti6_parm_from_user(&p1, &p); 746 t = vti6_locate(net, &p1, 0); 747 if (t == NULL) 748 break; 749 err = -EPERM; 750 if (t->dev == ip6n->fb_tnl_dev) 751 break; 752 dev = t->dev; 753 } 754 err = 0; 755 unregister_netdevice(dev); 756 break; 757 default: 758 err = -EINVAL; 759 } 760 return err; 761 } 762 763 /** 764 * vti6_tnl_change_mtu - change mtu manually for tunnel device 765 * @dev: virtual device associated with tunnel 766 * @new_mtu: the new mtu 767 * 768 * Return: 769 * 0 on success, 770 * %-EINVAL if mtu too small 771 **/ 772 static int vti6_change_mtu(struct net_device *dev, int new_mtu) 773 { 774 if (new_mtu < IPV6_MIN_MTU) 775 return -EINVAL; 776 777 dev->mtu = new_mtu; 778 return 0; 779 } 780 781 static const struct net_device_ops vti6_netdev_ops = { 782 .ndo_uninit = vti6_dev_uninit, 783 .ndo_start_xmit = vti6_tnl_xmit, 784 .ndo_do_ioctl = vti6_ioctl, 785 .ndo_change_mtu = vti6_change_mtu, 786 .ndo_get_stats64 = ip_tunnel_get_stats64, 787 }; 788 789 /** 790 * vti6_dev_setup - setup virtual tunnel device 791 * @dev: virtual device associated with tunnel 792 * 793 * Description: 794 * Initialize function pointers and device parameters 795 **/ 796 static void vti6_dev_setup(struct net_device *dev) 797 { 798 dev->netdev_ops = &vti6_netdev_ops; 799 dev->destructor = vti6_dev_free; 800 801 dev->type = ARPHRD_TUNNEL6; 802 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); 803 dev->mtu = ETH_DATA_LEN; 804 dev->flags |= IFF_NOARP; 805 dev->addr_len = sizeof(struct in6_addr); 806 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 807 } 808 809 /** 810 * vti6_dev_init_gen - general initializer for all tunnel devices 811 * @dev: virtual device associated with tunnel 812 **/ 813 static inline int vti6_dev_init_gen(struct net_device *dev) 814 { 815 struct ip6_tnl *t = netdev_priv(dev); 816 817 t->dev = dev; 818 t->net = dev_net(dev); 819 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 820 if (!dev->tstats) 821 return -ENOMEM; 822 return 0; 823 } 824 825 /** 826 * vti6_dev_init - initializer for all non fallback tunnel devices 827 * @dev: virtual device associated with tunnel 828 **/ 829 static int vti6_dev_init(struct net_device *dev) 830 { 831 struct ip6_tnl *t = netdev_priv(dev); 832 int err = vti6_dev_init_gen(dev); 833 834 if (err) 835 return err; 836 vti6_link_config(t); 837 return 0; 838 } 839 840 /** 841 * vti6_fb_tnl_dev_init - initializer for fallback tunnel device 842 * @dev: fallback device 843 * 844 * Return: 0 845 **/ 846 static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) 847 { 848 struct ip6_tnl *t = netdev_priv(dev); 849 struct net *net = dev_net(dev); 850 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 851 int err = vti6_dev_init_gen(dev); 852 853 if (err) 854 return err; 855 856 t->parms.proto = IPPROTO_IPV6; 857 dev_hold(dev); 858 859 vti6_link_config(t); 860 861 rcu_assign_pointer(ip6n->tnls_wc[0], t); 862 return 0; 863 } 864 865 static int vti6_validate(struct nlattr *tb[], struct nlattr *data[]) 866 { 867 return 0; 868 } 869 870 static void vti6_netlink_parms(struct nlattr *data[], 871 struct __ip6_tnl_parm *parms) 872 { 873 memset(parms, 0, sizeof(*parms)); 874 875 if (!data) 876 return; 877 878 if (data[IFLA_VTI_LINK]) 879 parms->link = nla_get_u32(data[IFLA_VTI_LINK]); 880 881 if (data[IFLA_VTI_LOCAL]) 882 nla_memcpy(&parms->laddr, data[IFLA_VTI_LOCAL], 883 sizeof(struct in6_addr)); 884 885 if (data[IFLA_VTI_REMOTE]) 886 nla_memcpy(&parms->raddr, data[IFLA_VTI_REMOTE], 887 sizeof(struct in6_addr)); 888 889 if (data[IFLA_VTI_IKEY]) 890 parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]); 891 892 if (data[IFLA_VTI_OKEY]) 893 parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]); 894 } 895 896 static int vti6_newlink(struct net *src_net, struct net_device *dev, 897 struct nlattr *tb[], struct nlattr *data[]) 898 { 899 struct net *net = dev_net(dev); 900 struct ip6_tnl *nt; 901 902 nt = netdev_priv(dev); 903 vti6_netlink_parms(data, &nt->parms); 904 905 nt->parms.proto = IPPROTO_IPV6; 906 907 if (vti6_locate(net, &nt->parms, 0)) 908 return -EEXIST; 909 910 return vti6_tnl_create2(dev); 911 } 912 913 static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], 914 struct nlattr *data[]) 915 { 916 struct ip6_tnl *t; 917 struct __ip6_tnl_parm p; 918 struct net *net = dev_net(dev); 919 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 920 921 if (dev == ip6n->fb_tnl_dev) 922 return -EINVAL; 923 924 vti6_netlink_parms(data, &p); 925 926 t = vti6_locate(net, &p, 0); 927 928 if (t) { 929 if (t->dev != dev) 930 return -EEXIST; 931 } else 932 t = netdev_priv(dev); 933 934 return vti6_update(t, &p); 935 } 936 937 static size_t vti6_get_size(const struct net_device *dev) 938 { 939 return 940 /* IFLA_VTI_LINK */ 941 nla_total_size(4) + 942 /* IFLA_VTI_LOCAL */ 943 nla_total_size(sizeof(struct in6_addr)) + 944 /* IFLA_VTI_REMOTE */ 945 nla_total_size(sizeof(struct in6_addr)) + 946 /* IFLA_VTI_IKEY */ 947 nla_total_size(4) + 948 /* IFLA_VTI_OKEY */ 949 nla_total_size(4) + 950 0; 951 } 952 953 static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev) 954 { 955 struct ip6_tnl *tunnel = netdev_priv(dev); 956 struct __ip6_tnl_parm *parm = &tunnel->parms; 957 958 if (nla_put_u32(skb, IFLA_VTI_LINK, parm->link) || 959 nla_put(skb, IFLA_VTI_LOCAL, sizeof(struct in6_addr), 960 &parm->laddr) || 961 nla_put(skb, IFLA_VTI_REMOTE, sizeof(struct in6_addr), 962 &parm->raddr) || 963 nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) || 964 nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key)) 965 goto nla_put_failure; 966 return 0; 967 968 nla_put_failure: 969 return -EMSGSIZE; 970 } 971 972 static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = { 973 [IFLA_VTI_LINK] = { .type = NLA_U32 }, 974 [IFLA_VTI_LOCAL] = { .len = sizeof(struct in6_addr) }, 975 [IFLA_VTI_REMOTE] = { .len = sizeof(struct in6_addr) }, 976 [IFLA_VTI_IKEY] = { .type = NLA_U32 }, 977 [IFLA_VTI_OKEY] = { .type = NLA_U32 }, 978 }; 979 980 static struct rtnl_link_ops vti6_link_ops __read_mostly = { 981 .kind = "vti6", 982 .maxtype = IFLA_VTI_MAX, 983 .policy = vti6_policy, 984 .priv_size = sizeof(struct ip6_tnl), 985 .setup = vti6_dev_setup, 986 .validate = vti6_validate, 987 .newlink = vti6_newlink, 988 .changelink = vti6_changelink, 989 .get_size = vti6_get_size, 990 .fill_info = vti6_fill_info, 991 }; 992 993 static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n) 994 { 995 int h; 996 struct ip6_tnl *t; 997 LIST_HEAD(list); 998 999 for (h = 0; h < HASH_SIZE; h++) { 1000 t = rtnl_dereference(ip6n->tnls_r_l[h]); 1001 while (t != NULL) { 1002 unregister_netdevice_queue(t->dev, &list); 1003 t = rtnl_dereference(t->next); 1004 } 1005 } 1006 1007 t = rtnl_dereference(ip6n->tnls_wc[0]); 1008 unregister_netdevice_queue(t->dev, &list); 1009 unregister_netdevice_many(&list); 1010 } 1011 1012 static int __net_init vti6_init_net(struct net *net) 1013 { 1014 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 1015 struct ip6_tnl *t = NULL; 1016 int err; 1017 1018 ip6n->tnls[0] = ip6n->tnls_wc; 1019 ip6n->tnls[1] = ip6n->tnls_r_l; 1020 1021 err = -ENOMEM; 1022 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0", 1023 vti6_dev_setup); 1024 1025 if (!ip6n->fb_tnl_dev) 1026 goto err_alloc_dev; 1027 dev_net_set(ip6n->fb_tnl_dev, net); 1028 1029 err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 1030 if (err < 0) 1031 goto err_register; 1032 1033 err = register_netdev(ip6n->fb_tnl_dev); 1034 if (err < 0) 1035 goto err_register; 1036 1037 t = netdev_priv(ip6n->fb_tnl_dev); 1038 1039 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 1040 return 0; 1041 1042 err_register: 1043 vti6_dev_free(ip6n->fb_tnl_dev); 1044 err_alloc_dev: 1045 return err; 1046 } 1047 1048 static void __net_exit vti6_exit_net(struct net *net) 1049 { 1050 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 1051 1052 rtnl_lock(); 1053 vti6_destroy_tunnels(ip6n); 1054 rtnl_unlock(); 1055 } 1056 1057 static struct pernet_operations vti6_net_ops = { 1058 .init = vti6_init_net, 1059 .exit = vti6_exit_net, 1060 .id = &vti6_net_id, 1061 .size = sizeof(struct vti6_net), 1062 }; 1063 1064 static struct xfrm6_protocol vti_esp6_protocol __read_mostly = { 1065 .handler = vti6_rcv, 1066 .cb_handler = vti6_rcv_cb, 1067 .err_handler = vti6_err, 1068 .priority = 100, 1069 }; 1070 1071 static struct xfrm6_protocol vti_ah6_protocol __read_mostly = { 1072 .handler = vti6_rcv, 1073 .cb_handler = vti6_rcv_cb, 1074 .err_handler = vti6_err, 1075 .priority = 100, 1076 }; 1077 1078 static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = { 1079 .handler = vti6_rcv, 1080 .cb_handler = vti6_rcv_cb, 1081 .err_handler = vti6_err, 1082 .priority = 100, 1083 }; 1084 1085 /** 1086 * vti6_tunnel_init - register protocol and reserve needed resources 1087 * 1088 * Return: 0 on success 1089 **/ 1090 static int __init vti6_tunnel_init(void) 1091 { 1092 int err; 1093 1094 err = register_pernet_device(&vti6_net_ops); 1095 if (err < 0) 1096 goto out_pernet; 1097 1098 err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP); 1099 if (err < 0) { 1100 pr_err("%s: can't register vti6 protocol\n", __func__); 1101 1102 goto out; 1103 } 1104 1105 err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH); 1106 if (err < 0) { 1107 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1108 pr_err("%s: can't register vti6 protocol\n", __func__); 1109 1110 goto out; 1111 } 1112 1113 err = xfrm6_protocol_register(&vti_ipcomp6_protocol, IPPROTO_COMP); 1114 if (err < 0) { 1115 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1116 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1117 pr_err("%s: can't register vti6 protocol\n", __func__); 1118 1119 goto out; 1120 } 1121 1122 err = rtnl_link_register(&vti6_link_ops); 1123 if (err < 0) 1124 goto rtnl_link_failed; 1125 1126 return 0; 1127 1128 rtnl_link_failed: 1129 xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP); 1130 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1131 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1132 out: 1133 unregister_pernet_device(&vti6_net_ops); 1134 out_pernet: 1135 return err; 1136 } 1137 1138 /** 1139 * vti6_tunnel_cleanup - free resources and unregister protocol 1140 **/ 1141 static void __exit vti6_tunnel_cleanup(void) 1142 { 1143 rtnl_link_unregister(&vti6_link_ops); 1144 if (xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP)) 1145 pr_info("%s: can't deregister protocol\n", __func__); 1146 if (xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH)) 1147 pr_info("%s: can't deregister protocol\n", __func__); 1148 if (xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP)) 1149 pr_info("%s: can't deregister protocol\n", __func__); 1150 1151 unregister_pernet_device(&vti6_net_ops); 1152 } 1153 1154 module_init(vti6_tunnel_init); 1155 module_exit(vti6_tunnel_cleanup); 1156 MODULE_LICENSE("GPL"); 1157 MODULE_ALIAS_RTNL_LINK("vti6"); 1158 MODULE_ALIAS_NETDEV("ip6_vti0"); 1159 MODULE_AUTHOR("Steffen Klassert"); 1160 MODULE_DESCRIPTION("IPv6 virtual tunnel interface"); 1161