1 /* 2 * IPv6 tunneling device 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 8 * 9 * Based on: 10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 11 * 12 * RFC 2473 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 * 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/capability.h> 25 #include <linux/errno.h> 26 #include <linux/types.h> 27 #include <linux/sockios.h> 28 #include <linux/icmp.h> 29 #include <linux/if.h> 30 #include <linux/in.h> 31 #include <linux/ip.h> 32 #include <linux/net.h> 33 #include <linux/in6.h> 34 #include <linux/netdevice.h> 35 #include <linux/if_arp.h> 36 #include <linux/icmpv6.h> 37 #include <linux/init.h> 38 #include <linux/route.h> 39 #include <linux/rtnetlink.h> 40 #include <linux/netfilter_ipv6.h> 41 #include <linux/slab.h> 42 #include <linux/hash.h> 43 #include <linux/etherdevice.h> 44 45 #include <asm/uaccess.h> 46 #include <linux/atomic.h> 47 48 #include <net/icmp.h> 49 #include <net/ip.h> 50 #include <net/ip_tunnels.h> 51 #include <net/ipv6.h> 52 #include <net/ip6_route.h> 53 #include <net/addrconf.h> 54 #include <net/ip6_tunnel.h> 55 #include <net/xfrm.h> 56 #include <net/dsfield.h> 57 #include <net/inet_ecn.h> 58 #include <net/net_namespace.h> 59 #include <net/netns/generic.h> 60 61 MODULE_AUTHOR("Ville Nuorvala"); 62 MODULE_DESCRIPTION("IPv6 tunneling device"); 63 MODULE_LICENSE("GPL"); 64 MODULE_ALIAS_RTNL_LINK("ip6tnl"); 65 MODULE_ALIAS_NETDEV("ip6tnl0"); 66 67 #define HASH_SIZE_SHIFT 5 68 #define HASH_SIZE (1 << HASH_SIZE_SHIFT) 69 70 static bool log_ecn_error = true; 71 module_param(log_ecn_error, bool, 0644); 72 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 73 74 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 75 { 76 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 77 78 return hash_32(hash, HASH_SIZE_SHIFT); 79 } 80 81 static int ip6_tnl_dev_init(struct net_device *dev); 82 static void ip6_tnl_dev_setup(struct net_device *dev); 83 static struct rtnl_link_ops ip6_link_ops __read_mostly; 84 85 static int ip6_tnl_net_id __read_mostly; 86 struct ip6_tnl_net { 87 /* the IPv6 tunnel fallback device */ 88 struct net_device *fb_tnl_dev; 89 /* lists for storing tunnels in use */ 90 struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE]; 91 struct ip6_tnl __rcu *tnls_wc[1]; 92 struct ip6_tnl __rcu **tnls[2]; 93 }; 94 95 static struct net_device_stats *ip6_get_stats(struct net_device *dev) 96 { 97 struct pcpu_sw_netstats tmp, sum = { 0 }; 98 int i; 99 100 for_each_possible_cpu(i) { 101 unsigned int start; 102 const struct pcpu_sw_netstats *tstats = 103 per_cpu_ptr(dev->tstats, i); 104 105 do { 106 start = u64_stats_fetch_begin_irq(&tstats->syncp); 107 tmp.rx_packets = tstats->rx_packets; 108 tmp.rx_bytes = tstats->rx_bytes; 109 tmp.tx_packets = tstats->tx_packets; 110 tmp.tx_bytes = tstats->tx_bytes; 111 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); 112 113 sum.rx_packets += tmp.rx_packets; 114 sum.rx_bytes += tmp.rx_bytes; 115 sum.tx_packets += tmp.tx_packets; 116 sum.tx_bytes += tmp.tx_bytes; 117 } 118 dev->stats.rx_packets = sum.rx_packets; 119 dev->stats.rx_bytes = sum.rx_bytes; 120 dev->stats.tx_packets = sum.tx_packets; 121 dev->stats.tx_bytes = sum.tx_bytes; 122 return &dev->stats; 123 } 124 125 /** 126 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 127 * @remote: the address of the tunnel exit-point 128 * @local: the address of the tunnel entry-point 129 * 130 * Return: 131 * tunnel matching given end-points if found, 132 * else fallback tunnel if its device is up, 133 * else %NULL 134 **/ 135 136 #define for_each_ip6_tunnel_rcu(start) \ 137 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 138 139 static struct ip6_tnl * 140 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) 141 { 142 unsigned int hash = HASH(remote, local); 143 struct ip6_tnl *t; 144 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 145 struct in6_addr any; 146 147 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 148 if (ipv6_addr_equal(local, &t->parms.laddr) && 149 ipv6_addr_equal(remote, &t->parms.raddr) && 150 (t->dev->flags & IFF_UP)) 151 return t; 152 } 153 154 memset(&any, 0, sizeof(any)); 155 hash = HASH(&any, local); 156 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 157 if (ipv6_addr_equal(local, &t->parms.laddr) && 158 (t->dev->flags & IFF_UP)) 159 return t; 160 } 161 162 hash = HASH(remote, &any); 163 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 164 if (ipv6_addr_equal(remote, &t->parms.raddr) && 165 (t->dev->flags & IFF_UP)) 166 return t; 167 } 168 169 t = rcu_dereference(ip6n->tnls_wc[0]); 170 if (t && (t->dev->flags & IFF_UP)) 171 return t; 172 173 return NULL; 174 } 175 176 /** 177 * ip6_tnl_bucket - get head of list matching given tunnel parameters 178 * @p: parameters containing tunnel end-points 179 * 180 * Description: 181 * ip6_tnl_bucket() returns the head of the list matching the 182 * &struct in6_addr entries laddr and raddr in @p. 183 * 184 * Return: head of IPv6 tunnel list 185 **/ 186 187 static struct ip6_tnl __rcu ** 188 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p) 189 { 190 const struct in6_addr *remote = &p->raddr; 191 const struct in6_addr *local = &p->laddr; 192 unsigned int h = 0; 193 int prio = 0; 194 195 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 196 prio = 1; 197 h = HASH(remote, local); 198 } 199 return &ip6n->tnls[prio][h]; 200 } 201 202 /** 203 * ip6_tnl_link - add tunnel to hash table 204 * @t: tunnel to be added 205 **/ 206 207 static void 208 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 209 { 210 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 211 212 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 213 rcu_assign_pointer(*tp, t); 214 } 215 216 /** 217 * ip6_tnl_unlink - remove tunnel from hash table 218 * @t: tunnel to be removed 219 **/ 220 221 static void 222 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 223 { 224 struct ip6_tnl __rcu **tp; 225 struct ip6_tnl *iter; 226 227 for (tp = ip6_tnl_bucket(ip6n, &t->parms); 228 (iter = rtnl_dereference(*tp)) != NULL; 229 tp = &iter->next) { 230 if (t == iter) { 231 rcu_assign_pointer(*tp, t->next); 232 break; 233 } 234 } 235 } 236 237 static void ip6_dev_free(struct net_device *dev) 238 { 239 struct ip6_tnl *t = netdev_priv(dev); 240 241 gro_cells_destroy(&t->gro_cells); 242 dst_cache_destroy(&t->dst_cache); 243 free_percpu(dev->tstats); 244 free_netdev(dev); 245 } 246 247 static int ip6_tnl_create2(struct net_device *dev) 248 { 249 struct ip6_tnl *t = netdev_priv(dev); 250 struct net *net = dev_net(dev); 251 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 252 int err; 253 254 t = netdev_priv(dev); 255 256 dev->rtnl_link_ops = &ip6_link_ops; 257 err = register_netdevice(dev); 258 if (err < 0) 259 goto out; 260 261 strcpy(t->parms.name, dev->name); 262 263 dev_hold(dev); 264 ip6_tnl_link(ip6n, t); 265 return 0; 266 267 out: 268 return err; 269 } 270 271 /** 272 * ip6_tnl_create - create a new tunnel 273 * @p: tunnel parameters 274 * @pt: pointer to new tunnel 275 * 276 * Description: 277 * Create tunnel matching given parameters. 278 * 279 * Return: 280 * created tunnel or error pointer 281 **/ 282 283 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 284 { 285 struct net_device *dev; 286 struct ip6_tnl *t; 287 char name[IFNAMSIZ]; 288 int err = -ENOMEM; 289 290 if (p->name[0]) 291 strlcpy(name, p->name, IFNAMSIZ); 292 else 293 sprintf(name, "ip6tnl%%d"); 294 295 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 296 ip6_tnl_dev_setup); 297 if (!dev) 298 goto failed; 299 300 dev_net_set(dev, net); 301 302 t = netdev_priv(dev); 303 t->parms = *p; 304 t->net = dev_net(dev); 305 err = ip6_tnl_create2(dev); 306 if (err < 0) 307 goto failed_free; 308 309 return t; 310 311 failed_free: 312 ip6_dev_free(dev); 313 failed: 314 return ERR_PTR(err); 315 } 316 317 /** 318 * ip6_tnl_locate - find or create tunnel matching given parameters 319 * @p: tunnel parameters 320 * @create: != 0 if allowed to create new tunnel if no match found 321 * 322 * Description: 323 * ip6_tnl_locate() first tries to locate an existing tunnel 324 * based on @parms. If this is unsuccessful, but @create is set a new 325 * tunnel device is created and registered for use. 326 * 327 * Return: 328 * matching tunnel or error pointer 329 **/ 330 331 static struct ip6_tnl *ip6_tnl_locate(struct net *net, 332 struct __ip6_tnl_parm *p, int create) 333 { 334 const struct in6_addr *remote = &p->raddr; 335 const struct in6_addr *local = &p->laddr; 336 struct ip6_tnl __rcu **tp; 337 struct ip6_tnl *t; 338 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 339 340 for (tp = ip6_tnl_bucket(ip6n, p); 341 (t = rtnl_dereference(*tp)) != NULL; 342 tp = &t->next) { 343 if (ipv6_addr_equal(local, &t->parms.laddr) && 344 ipv6_addr_equal(remote, &t->parms.raddr)) { 345 if (create) 346 return ERR_PTR(-EEXIST); 347 348 return t; 349 } 350 } 351 if (!create) 352 return ERR_PTR(-ENODEV); 353 return ip6_tnl_create(net, p); 354 } 355 356 /** 357 * ip6_tnl_dev_uninit - tunnel device uninitializer 358 * @dev: the device to be destroyed 359 * 360 * Description: 361 * ip6_tnl_dev_uninit() removes tunnel from its list 362 **/ 363 364 static void 365 ip6_tnl_dev_uninit(struct net_device *dev) 366 { 367 struct ip6_tnl *t = netdev_priv(dev); 368 struct net *net = t->net; 369 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 370 371 if (dev == ip6n->fb_tnl_dev) 372 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 373 else 374 ip6_tnl_unlink(ip6n, t); 375 dst_cache_reset(&t->dst_cache); 376 dev_put(dev); 377 } 378 379 /** 380 * parse_tvl_tnl_enc_lim - handle encapsulation limit option 381 * @skb: received socket buffer 382 * 383 * Return: 384 * 0 if none was found, 385 * else index to encapsulation limit 386 **/ 387 388 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 389 { 390 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; 391 __u8 nexthdr = ipv6h->nexthdr; 392 __u16 off = sizeof(*ipv6h); 393 394 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 395 __u16 optlen = 0; 396 struct ipv6_opt_hdr *hdr; 397 if (raw + off + sizeof(*hdr) > skb->data && 398 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) 399 break; 400 401 hdr = (struct ipv6_opt_hdr *) (raw + off); 402 if (nexthdr == NEXTHDR_FRAGMENT) { 403 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 404 if (frag_hdr->frag_off) 405 break; 406 optlen = 8; 407 } else if (nexthdr == NEXTHDR_AUTH) { 408 optlen = (hdr->hdrlen + 2) << 2; 409 } else { 410 optlen = ipv6_optlen(hdr); 411 } 412 if (nexthdr == NEXTHDR_DEST) { 413 __u16 i = off + 2; 414 while (1) { 415 struct ipv6_tlv_tnl_enc_lim *tel; 416 417 /* No more room for encapsulation limit */ 418 if (i + sizeof (*tel) > off + optlen) 419 break; 420 421 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; 422 /* return index of option if found and valid */ 423 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 424 tel->length == 1) 425 return i; 426 /* else jump to next option */ 427 if (tel->type) 428 i += tel->length + 2; 429 else 430 i++; 431 } 432 } 433 nexthdr = hdr->nexthdr; 434 off += optlen; 435 } 436 return 0; 437 } 438 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim); 439 440 /** 441 * ip6_tnl_err - tunnel error handler 442 * 443 * Description: 444 * ip6_tnl_err() should handle errors in the tunnel according 445 * to the specifications in RFC 2473. 446 **/ 447 448 static int 449 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 450 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 451 { 452 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data; 453 struct ip6_tnl *t; 454 int rel_msg = 0; 455 u8 rel_type = ICMPV6_DEST_UNREACH; 456 u8 rel_code = ICMPV6_ADDR_UNREACH; 457 u8 tproto; 458 __u32 rel_info = 0; 459 __u16 len; 460 int err = -ENOENT; 461 462 /* If the packet doesn't contain the original IPv6 header we are 463 in trouble since we might need the source address for further 464 processing of the error. */ 465 466 rcu_read_lock(); 467 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr); 468 if (!t) 469 goto out; 470 471 tproto = ACCESS_ONCE(t->parms.proto); 472 if (tproto != ipproto && tproto != 0) 473 goto out; 474 475 err = 0; 476 477 switch (*type) { 478 __u32 teli; 479 struct ipv6_tlv_tnl_enc_lim *tel; 480 __u32 mtu; 481 case ICMPV6_DEST_UNREACH: 482 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 483 t->parms.name); 484 rel_msg = 1; 485 break; 486 case ICMPV6_TIME_EXCEED: 487 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 488 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 489 t->parms.name); 490 rel_msg = 1; 491 } 492 break; 493 case ICMPV6_PARAMPROB: 494 teli = 0; 495 if ((*code) == ICMPV6_HDR_FIELD) 496 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 497 498 if (teli && teli == *info - 2) { 499 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 500 if (tel->encap_limit == 0) { 501 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 502 t->parms.name); 503 rel_msg = 1; 504 } 505 } else { 506 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 507 t->parms.name); 508 } 509 break; 510 case ICMPV6_PKT_TOOBIG: 511 mtu = *info - offset; 512 if (mtu < IPV6_MIN_MTU) 513 mtu = IPV6_MIN_MTU; 514 t->dev->mtu = mtu; 515 516 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len); 517 if (len > mtu) { 518 rel_type = ICMPV6_PKT_TOOBIG; 519 rel_code = 0; 520 rel_info = mtu; 521 rel_msg = 1; 522 } 523 break; 524 } 525 526 *type = rel_type; 527 *code = rel_code; 528 *info = rel_info; 529 *msg = rel_msg; 530 531 out: 532 rcu_read_unlock(); 533 return err; 534 } 535 536 static int 537 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 538 u8 type, u8 code, int offset, __be32 info) 539 { 540 int rel_msg = 0; 541 u8 rel_type = type; 542 u8 rel_code = code; 543 __u32 rel_info = ntohl(info); 544 int err; 545 struct sk_buff *skb2; 546 const struct iphdr *eiph; 547 struct rtable *rt; 548 struct flowi4 fl4; 549 550 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 551 &rel_msg, &rel_info, offset); 552 if (err < 0) 553 return err; 554 555 if (rel_msg == 0) 556 return 0; 557 558 switch (rel_type) { 559 case ICMPV6_DEST_UNREACH: 560 if (rel_code != ICMPV6_ADDR_UNREACH) 561 return 0; 562 rel_type = ICMP_DEST_UNREACH; 563 rel_code = ICMP_HOST_UNREACH; 564 break; 565 case ICMPV6_PKT_TOOBIG: 566 if (rel_code != 0) 567 return 0; 568 rel_type = ICMP_DEST_UNREACH; 569 rel_code = ICMP_FRAG_NEEDED; 570 break; 571 case NDISC_REDIRECT: 572 rel_type = ICMP_REDIRECT; 573 rel_code = ICMP_REDIR_HOST; 574 default: 575 return 0; 576 } 577 578 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) 579 return 0; 580 581 skb2 = skb_clone(skb, GFP_ATOMIC); 582 if (!skb2) 583 return 0; 584 585 skb_dst_drop(skb2); 586 587 skb_pull(skb2, offset); 588 skb_reset_network_header(skb2); 589 eiph = ip_hdr(skb2); 590 591 /* Try to guess incoming interface */ 592 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 593 eiph->saddr, 0, 594 0, 0, 595 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 596 if (IS_ERR(rt)) 597 goto out; 598 599 skb2->dev = rt->dst.dev; 600 601 /* route "incoming" packet */ 602 if (rt->rt_flags & RTCF_LOCAL) { 603 ip_rt_put(rt); 604 rt = NULL; 605 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 606 eiph->daddr, eiph->saddr, 607 0, 0, 608 IPPROTO_IPIP, 609 RT_TOS(eiph->tos), 0); 610 if (IS_ERR(rt) || 611 rt->dst.dev->type != ARPHRD_TUNNEL) { 612 if (!IS_ERR(rt)) 613 ip_rt_put(rt); 614 goto out; 615 } 616 skb_dst_set(skb2, &rt->dst); 617 } else { 618 ip_rt_put(rt); 619 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 620 skb2->dev) || 621 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) 622 goto out; 623 } 624 625 /* change mtu on this route */ 626 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { 627 if (rel_info > dst_mtu(skb_dst(skb2))) 628 goto out; 629 630 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info); 631 } 632 if (rel_type == ICMP_REDIRECT) 633 skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2); 634 635 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 636 637 out: 638 kfree_skb(skb2); 639 return 0; 640 } 641 642 static int 643 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 644 u8 type, u8 code, int offset, __be32 info) 645 { 646 int rel_msg = 0; 647 u8 rel_type = type; 648 u8 rel_code = code; 649 __u32 rel_info = ntohl(info); 650 int err; 651 652 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, 653 &rel_msg, &rel_info, offset); 654 if (err < 0) 655 return err; 656 657 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { 658 struct rt6_info *rt; 659 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 660 661 if (!skb2) 662 return 0; 663 664 skb_dst_drop(skb2); 665 skb_pull(skb2, offset); 666 skb_reset_network_header(skb2); 667 668 /* Try to guess incoming interface */ 669 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, 670 NULL, 0, 0); 671 672 if (rt && rt->dst.dev) 673 skb2->dev = rt->dst.dev; 674 675 icmpv6_send(skb2, rel_type, rel_code, rel_info); 676 677 ip6_rt_put(rt); 678 679 kfree_skb(skb2); 680 } 681 682 return 0; 683 } 684 685 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 686 const struct ipv6hdr *ipv6h, 687 struct sk_buff *skb) 688 { 689 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 690 691 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 692 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); 693 694 return IP6_ECN_decapsulate(ipv6h, skb); 695 } 696 697 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 698 const struct ipv6hdr *ipv6h, 699 struct sk_buff *skb) 700 { 701 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 702 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); 703 704 return IP6_ECN_decapsulate(ipv6h, skb); 705 } 706 707 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, 708 const struct in6_addr *laddr, 709 const struct in6_addr *raddr) 710 { 711 struct __ip6_tnl_parm *p = &t->parms; 712 int ltype = ipv6_addr_type(laddr); 713 int rtype = ipv6_addr_type(raddr); 714 __u32 flags = 0; 715 716 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) { 717 flags = IP6_TNL_F_CAP_PER_PACKET; 718 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 719 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 720 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && 721 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { 722 if (ltype&IPV6_ADDR_UNICAST) 723 flags |= IP6_TNL_F_CAP_XMIT; 724 if (rtype&IPV6_ADDR_UNICAST) 725 flags |= IP6_TNL_F_CAP_RCV; 726 } 727 return flags; 728 } 729 EXPORT_SYMBOL(ip6_tnl_get_cap); 730 731 /* called with rcu_read_lock() */ 732 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, 733 const struct in6_addr *laddr, 734 const struct in6_addr *raddr) 735 { 736 struct __ip6_tnl_parm *p = &t->parms; 737 int ret = 0; 738 struct net *net = t->net; 739 740 if ((p->flags & IP6_TNL_F_CAP_RCV) || 741 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 742 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) { 743 struct net_device *ldev = NULL; 744 745 if (p->link) 746 ldev = dev_get_by_index_rcu(net, p->link); 747 748 if ((ipv6_addr_is_multicast(laddr) || 749 likely(ipv6_chk_addr(net, laddr, ldev, 0))) && 750 likely(!ipv6_chk_addr(net, raddr, NULL, 0))) 751 ret = 1; 752 } 753 return ret; 754 } 755 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); 756 757 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, 758 const struct tnl_ptk_info *tpi, 759 struct metadata_dst *tun_dst, 760 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 761 const struct ipv6hdr *ipv6h, 762 struct sk_buff *skb), 763 bool log_ecn_err) 764 { 765 struct pcpu_sw_netstats *tstats; 766 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 767 int err; 768 769 if ((!(tpi->flags & TUNNEL_CSUM) && 770 (tunnel->parms.i_flags & TUNNEL_CSUM)) || 771 ((tpi->flags & TUNNEL_CSUM) && 772 !(tunnel->parms.i_flags & TUNNEL_CSUM))) { 773 tunnel->dev->stats.rx_crc_errors++; 774 tunnel->dev->stats.rx_errors++; 775 goto drop; 776 } 777 778 if (tunnel->parms.i_flags & TUNNEL_SEQ) { 779 if (!(tpi->flags & TUNNEL_SEQ) || 780 (tunnel->i_seqno && 781 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { 782 tunnel->dev->stats.rx_fifo_errors++; 783 tunnel->dev->stats.rx_errors++; 784 goto drop; 785 } 786 tunnel->i_seqno = ntohl(tpi->seq) + 1; 787 } 788 789 skb->protocol = tpi->proto; 790 791 /* Warning: All skb pointers will be invalidated! */ 792 if (tunnel->dev->type == ARPHRD_ETHER) { 793 if (!pskb_may_pull(skb, ETH_HLEN)) { 794 tunnel->dev->stats.rx_length_errors++; 795 tunnel->dev->stats.rx_errors++; 796 goto drop; 797 } 798 799 ipv6h = ipv6_hdr(skb); 800 skb->protocol = eth_type_trans(skb, tunnel->dev); 801 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 802 } else { 803 skb->dev = tunnel->dev; 804 } 805 806 skb_reset_network_header(skb); 807 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 808 809 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); 810 811 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb); 812 if (unlikely(err)) { 813 if (log_ecn_err) 814 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n", 815 &ipv6h->saddr, 816 ipv6_get_dsfield(ipv6h)); 817 if (err > 1) { 818 ++tunnel->dev->stats.rx_frame_errors; 819 ++tunnel->dev->stats.rx_errors; 820 goto drop; 821 } 822 } 823 824 tstats = this_cpu_ptr(tunnel->dev->tstats); 825 u64_stats_update_begin(&tstats->syncp); 826 tstats->rx_packets++; 827 tstats->rx_bytes += skb->len; 828 u64_stats_update_end(&tstats->syncp); 829 830 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); 831 832 gro_cells_receive(&tunnel->gro_cells, skb); 833 return 0; 834 835 drop: 836 kfree_skb(skb); 837 return 0; 838 } 839 840 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, 841 const struct tnl_ptk_info *tpi, 842 struct metadata_dst *tun_dst, 843 bool log_ecn_err) 844 { 845 return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate, 846 log_ecn_err); 847 } 848 EXPORT_SYMBOL(ip6_tnl_rcv); 849 850 static const struct tnl_ptk_info tpi_v6 = { 851 /* no tunnel info required for ipxip6. */ 852 .proto = htons(ETH_P_IPV6), 853 }; 854 855 static const struct tnl_ptk_info tpi_v4 = { 856 /* no tunnel info required for ipxip6. */ 857 .proto = htons(ETH_P_IP), 858 }; 859 860 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, 861 const struct tnl_ptk_info *tpi, 862 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 863 const struct ipv6hdr *ipv6h, 864 struct sk_buff *skb)) 865 { 866 struct ip6_tnl *t; 867 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 868 int ret = -1; 869 870 rcu_read_lock(); 871 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); 872 873 if (t) { 874 u8 tproto = ACCESS_ONCE(t->parms.proto); 875 876 if (tproto != ipproto && tproto != 0) 877 goto drop; 878 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 879 goto drop; 880 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) 881 goto drop; 882 if (iptunnel_pull_header(skb, 0, tpi->proto, false)) 883 goto drop; 884 ret = __ip6_tnl_rcv(t, skb, tpi, NULL, dscp_ecn_decapsulate, 885 log_ecn_error); 886 } 887 888 rcu_read_unlock(); 889 890 return ret; 891 892 drop: 893 rcu_read_unlock(); 894 kfree_skb(skb); 895 return 0; 896 } 897 898 static int ip4ip6_rcv(struct sk_buff *skb) 899 { 900 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4, 901 ip4ip6_dscp_ecn_decapsulate); 902 } 903 904 static int ip6ip6_rcv(struct sk_buff *skb) 905 { 906 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6, 907 ip6ip6_dscp_ecn_decapsulate); 908 } 909 910 struct ipv6_tel_txoption { 911 struct ipv6_txoptions ops; 912 __u8 dst_opt[8]; 913 }; 914 915 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) 916 { 917 memset(opt, 0, sizeof(struct ipv6_tel_txoption)); 918 919 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; 920 opt->dst_opt[3] = 1; 921 opt->dst_opt[4] = encap_limit; 922 opt->dst_opt[5] = IPV6_TLV_PADN; 923 opt->dst_opt[6] = 1; 924 925 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; 926 opt->ops.opt_nflen = 8; 927 } 928 929 /** 930 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 931 * @t: the outgoing tunnel device 932 * @hdr: IPv6 header from the incoming packet 933 * 934 * Description: 935 * Avoid trivial tunneling loop by checking that tunnel exit-point 936 * doesn't match source of incoming packet. 937 * 938 * Return: 939 * 1 if conflict, 940 * 0 else 941 **/ 942 943 static inline bool 944 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 945 { 946 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 947 } 948 949 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, 950 const struct in6_addr *laddr, 951 const struct in6_addr *raddr) 952 { 953 struct __ip6_tnl_parm *p = &t->parms; 954 int ret = 0; 955 struct net *net = t->net; 956 957 if ((p->flags & IP6_TNL_F_CAP_XMIT) || 958 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 959 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) { 960 struct net_device *ldev = NULL; 961 962 rcu_read_lock(); 963 if (p->link) 964 ldev = dev_get_by_index_rcu(net, p->link); 965 966 if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0))) 967 pr_warn("%s xmit: Local address not yet configured!\n", 968 p->name); 969 else if (!ipv6_addr_is_multicast(raddr) && 970 unlikely(ipv6_chk_addr(net, raddr, NULL, 0))) 971 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", 972 p->name); 973 else 974 ret = 1; 975 rcu_read_unlock(); 976 } 977 return ret; 978 } 979 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); 980 981 /** 982 * ip6_tnl_xmit - encapsulate packet and send 983 * @skb: the outgoing socket buffer 984 * @dev: the outgoing tunnel device 985 * @dsfield: dscp code for outer header 986 * @fl6: flow of tunneled packet 987 * @encap_limit: encapsulation limit 988 * @pmtu: Path MTU is stored if packet is too big 989 * @proto: next header value 990 * 991 * Description: 992 * Build new header and do some sanity checks on the packet before sending 993 * it. 994 * 995 * Return: 996 * 0 on success 997 * -1 fail 998 * %-EMSGSIZE message too big. return mtu in this case. 999 **/ 1000 1001 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, 1002 struct flowi6 *fl6, int encap_limit, __u32 *pmtu, 1003 __u8 proto) 1004 { 1005 struct ip6_tnl *t = netdev_priv(dev); 1006 struct net *net = t->net; 1007 struct net_device_stats *stats = &t->dev->stats; 1008 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1009 struct ipv6_tel_txoption opt; 1010 struct dst_entry *dst = NULL, *ndst = NULL; 1011 struct net_device *tdev; 1012 int mtu; 1013 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1014 unsigned int max_headroom = psh_hlen; 1015 int err = -1; 1016 1017 /* NBMA tunnel */ 1018 if (ipv6_addr_any(&t->parms.raddr)) { 1019 struct in6_addr *addr6; 1020 struct neighbour *neigh; 1021 int addr_type; 1022 1023 if (!skb_dst(skb)) 1024 goto tx_err_link_failure; 1025 1026 neigh = dst_neigh_lookup(skb_dst(skb), 1027 &ipv6_hdr(skb)->daddr); 1028 if (!neigh) 1029 goto tx_err_link_failure; 1030 1031 addr6 = (struct in6_addr *)&neigh->primary_key; 1032 addr_type = ipv6_addr_type(addr6); 1033 1034 if (addr_type == IPV6_ADDR_ANY) 1035 addr6 = &ipv6_hdr(skb)->daddr; 1036 1037 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1038 neigh_release(neigh); 1039 } else if (!fl6->flowi6_mark) 1040 dst = dst_cache_get(&t->dst_cache); 1041 1042 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1043 goto tx_err_link_failure; 1044 1045 if (!dst) { 1046 dst = ip6_route_output(net, NULL, fl6); 1047 1048 if (dst->error) 1049 goto tx_err_link_failure; 1050 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); 1051 if (IS_ERR(dst)) { 1052 err = PTR_ERR(dst); 1053 dst = NULL; 1054 goto tx_err_link_failure; 1055 } 1056 ndst = dst; 1057 } 1058 1059 tdev = dst->dev; 1060 1061 if (tdev == dev) { 1062 stats->collisions++; 1063 net_warn_ratelimited("%s: Local routing loop detected!\n", 1064 t->parms.name); 1065 goto tx_err_dst_release; 1066 } 1067 mtu = dst_mtu(dst) - psh_hlen; 1068 if (encap_limit >= 0) { 1069 max_headroom += 8; 1070 mtu -= 8; 1071 } 1072 if (mtu < IPV6_MIN_MTU) 1073 mtu = IPV6_MIN_MTU; 1074 if (skb_dst(skb)) 1075 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 1076 if (skb->len > mtu && !skb_is_gso(skb)) { 1077 *pmtu = mtu; 1078 err = -EMSGSIZE; 1079 goto tx_err_dst_release; 1080 } 1081 1082 if (t->err_count > 0) { 1083 if (time_before(jiffies, 1084 t->err_time + IP6TUNNEL_ERR_TIMEO)) { 1085 t->err_count--; 1086 1087 dst_link_failure(skb); 1088 } else { 1089 t->err_count = 0; 1090 } 1091 } 1092 1093 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 1094 1095 /* 1096 * Okay, now see if we can stuff it in the buffer as-is. 1097 */ 1098 max_headroom += LL_RESERVED_SPACE(tdev); 1099 1100 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 1101 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 1102 struct sk_buff *new_skb; 1103 1104 new_skb = skb_realloc_headroom(skb, max_headroom); 1105 if (!new_skb) 1106 goto tx_err_dst_release; 1107 1108 if (skb->sk) 1109 skb_set_owner_w(new_skb, skb->sk); 1110 consume_skb(skb); 1111 skb = new_skb; 1112 } 1113 1114 if (!fl6->flowi6_mark && ndst) 1115 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1116 skb_dst_set(skb, dst); 1117 1118 if (encap_limit >= 0) { 1119 init_tel_txopt(&opt, encap_limit); 1120 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 1121 } 1122 1123 /* Calculate max headroom for all the headers and adjust 1124 * needed_headroom if necessary. 1125 */ 1126 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) 1127 + dst->header_len + t->hlen; 1128 if (max_headroom > dev->needed_headroom) 1129 dev->needed_headroom = max_headroom; 1130 1131 err = ip6_tnl_encap(skb, t, &proto, fl6); 1132 if (err) 1133 return err; 1134 1135 skb_push(skb, sizeof(struct ipv6hdr)); 1136 skb_reset_network_header(skb); 1137 ipv6h = ipv6_hdr(skb); 1138 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), 1139 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); 1140 ipv6h->hop_limit = t->parms.hop_limit; 1141 ipv6h->nexthdr = proto; 1142 ipv6h->saddr = fl6->saddr; 1143 ipv6h->daddr = fl6->daddr; 1144 ip6tunnel_xmit(NULL, skb, dev); 1145 return 0; 1146 tx_err_link_failure: 1147 stats->tx_carrier_errors++; 1148 dst_link_failure(skb); 1149 tx_err_dst_release: 1150 dst_release(dst); 1151 return err; 1152 } 1153 EXPORT_SYMBOL(ip6_tnl_xmit); 1154 1155 static inline int 1156 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1157 { 1158 struct ip6_tnl *t = netdev_priv(dev); 1159 const struct iphdr *iph = ip_hdr(skb); 1160 int encap_limit = -1; 1161 struct flowi6 fl6; 1162 __u8 dsfield; 1163 __u32 mtu; 1164 u8 tproto; 1165 int err; 1166 1167 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1168 1169 tproto = ACCESS_ONCE(t->parms.proto); 1170 if (tproto != IPPROTO_IPIP && tproto != 0) 1171 return -1; 1172 1173 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1174 encap_limit = t->parms.encap_limit; 1175 1176 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1177 1178 dsfield = ipv4_get_dsfield(iph); 1179 1180 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1181 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 1182 & IPV6_TCLASS_MASK; 1183 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1184 fl6.flowi6_mark = skb->mark; 1185 1186 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1187 return -1; 1188 1189 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1190 1191 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1192 IPPROTO_IPIP); 1193 if (err != 0) { 1194 /* XXX: send ICMP error even if DF is not set. */ 1195 if (err == -EMSGSIZE) 1196 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 1197 htonl(mtu)); 1198 return -1; 1199 } 1200 1201 return 0; 1202 } 1203 1204 static inline int 1205 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1206 { 1207 struct ip6_tnl *t = netdev_priv(dev); 1208 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1209 int encap_limit = -1; 1210 __u16 offset; 1211 struct flowi6 fl6; 1212 __u8 dsfield; 1213 __u32 mtu; 1214 u8 tproto; 1215 int err; 1216 1217 tproto = ACCESS_ONCE(t->parms.proto); 1218 if ((tproto != IPPROTO_IPV6 && tproto != 0) || 1219 ip6_tnl_addr_conflict(t, ipv6h)) 1220 return -1; 1221 1222 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1223 if (offset > 0) { 1224 struct ipv6_tlv_tnl_enc_lim *tel; 1225 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 1226 if (tel->encap_limit == 0) { 1227 icmpv6_send(skb, ICMPV6_PARAMPROB, 1228 ICMPV6_HDR_FIELD, offset + 2); 1229 return -1; 1230 } 1231 encap_limit = tel->encap_limit - 1; 1232 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1233 encap_limit = t->parms.encap_limit; 1234 1235 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1236 1237 dsfield = ipv6_get_dsfield(ipv6h); 1238 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1239 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); 1240 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1241 fl6.flowlabel |= ip6_flowlabel(ipv6h); 1242 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1243 fl6.flowi6_mark = skb->mark; 1244 1245 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1246 return -1; 1247 1248 skb_set_inner_ipproto(skb, IPPROTO_IPV6); 1249 1250 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1251 IPPROTO_IPV6); 1252 if (err != 0) { 1253 if (err == -EMSGSIZE) 1254 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1255 return -1; 1256 } 1257 1258 return 0; 1259 } 1260 1261 static netdev_tx_t 1262 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev) 1263 { 1264 struct ip6_tnl *t = netdev_priv(dev); 1265 struct net_device_stats *stats = &t->dev->stats; 1266 int ret; 1267 1268 switch (skb->protocol) { 1269 case htons(ETH_P_IP): 1270 ret = ip4ip6_tnl_xmit(skb, dev); 1271 break; 1272 case htons(ETH_P_IPV6): 1273 ret = ip6ip6_tnl_xmit(skb, dev); 1274 break; 1275 default: 1276 goto tx_err; 1277 } 1278 1279 if (ret < 0) 1280 goto tx_err; 1281 1282 return NETDEV_TX_OK; 1283 1284 tx_err: 1285 stats->tx_errors++; 1286 stats->tx_dropped++; 1287 kfree_skb(skb); 1288 return NETDEV_TX_OK; 1289 } 1290 1291 static void ip6_tnl_link_config(struct ip6_tnl *t) 1292 { 1293 struct net_device *dev = t->dev; 1294 struct __ip6_tnl_parm *p = &t->parms; 1295 struct flowi6 *fl6 = &t->fl.u.ip6; 1296 int t_hlen; 1297 1298 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1299 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1300 1301 /* Set up flowi template */ 1302 fl6->saddr = p->laddr; 1303 fl6->daddr = p->raddr; 1304 fl6->flowi6_oif = p->link; 1305 fl6->flowlabel = 0; 1306 1307 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1308 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1309 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1310 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1311 1312 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1313 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1314 1315 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1316 dev->flags |= IFF_POINTOPOINT; 1317 else 1318 dev->flags &= ~IFF_POINTOPOINT; 1319 1320 t->tun_hlen = 0; 1321 t->hlen = t->encap_hlen + t->tun_hlen; 1322 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1323 1324 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1325 int strict = (ipv6_addr_type(&p->raddr) & 1326 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1327 1328 struct rt6_info *rt = rt6_lookup(t->net, 1329 &p->raddr, &p->laddr, 1330 p->link, strict); 1331 1332 if (!rt) 1333 return; 1334 1335 if (rt->dst.dev) { 1336 dev->hard_header_len = rt->dst.dev->hard_header_len + 1337 t_hlen; 1338 1339 dev->mtu = rt->dst.dev->mtu - t_hlen; 1340 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1341 dev->mtu -= 8; 1342 1343 if (dev->mtu < IPV6_MIN_MTU) 1344 dev->mtu = IPV6_MIN_MTU; 1345 } 1346 ip6_rt_put(rt); 1347 } 1348 } 1349 1350 /** 1351 * ip6_tnl_change - update the tunnel parameters 1352 * @t: tunnel to be changed 1353 * @p: tunnel configuration parameters 1354 * 1355 * Description: 1356 * ip6_tnl_change() updates the tunnel parameters 1357 **/ 1358 1359 static int 1360 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) 1361 { 1362 t->parms.laddr = p->laddr; 1363 t->parms.raddr = p->raddr; 1364 t->parms.flags = p->flags; 1365 t->parms.hop_limit = p->hop_limit; 1366 t->parms.encap_limit = p->encap_limit; 1367 t->parms.flowinfo = p->flowinfo; 1368 t->parms.link = p->link; 1369 t->parms.proto = p->proto; 1370 dst_cache_reset(&t->dst_cache); 1371 ip6_tnl_link_config(t); 1372 return 0; 1373 } 1374 1375 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1376 { 1377 struct net *net = t->net; 1378 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1379 int err; 1380 1381 ip6_tnl_unlink(ip6n, t); 1382 synchronize_net(); 1383 err = ip6_tnl_change(t, p); 1384 ip6_tnl_link(ip6n, t); 1385 netdev_state_change(t->dev); 1386 return err; 1387 } 1388 1389 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1390 { 1391 /* for default tnl0 device allow to change only the proto */ 1392 t->parms.proto = p->proto; 1393 netdev_state_change(t->dev); 1394 return 0; 1395 } 1396 1397 static void 1398 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) 1399 { 1400 p->laddr = u->laddr; 1401 p->raddr = u->raddr; 1402 p->flags = u->flags; 1403 p->hop_limit = u->hop_limit; 1404 p->encap_limit = u->encap_limit; 1405 p->flowinfo = u->flowinfo; 1406 p->link = u->link; 1407 p->proto = u->proto; 1408 memcpy(p->name, u->name, sizeof(u->name)); 1409 } 1410 1411 static void 1412 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p) 1413 { 1414 u->laddr = p->laddr; 1415 u->raddr = p->raddr; 1416 u->flags = p->flags; 1417 u->hop_limit = p->hop_limit; 1418 u->encap_limit = p->encap_limit; 1419 u->flowinfo = p->flowinfo; 1420 u->link = p->link; 1421 u->proto = p->proto; 1422 memcpy(u->name, p->name, sizeof(u->name)); 1423 } 1424 1425 /** 1426 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace 1427 * @dev: virtual device associated with tunnel 1428 * @ifr: parameters passed from userspace 1429 * @cmd: command to be performed 1430 * 1431 * Description: 1432 * ip6_tnl_ioctl() is used for managing IPv6 tunnels 1433 * from userspace. 1434 * 1435 * The possible commands are the following: 1436 * %SIOCGETTUNNEL: get tunnel parameters for device 1437 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 1438 * %SIOCCHGTUNNEL: change tunnel parameters to those given 1439 * %SIOCDELTUNNEL: delete tunnel 1440 * 1441 * The fallback device "ip6tnl0", created during module 1442 * initialization, can be used for creating other tunnel devices. 1443 * 1444 * Return: 1445 * 0 on success, 1446 * %-EFAULT if unable to copy data to or from userspace, 1447 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 1448 * %-EINVAL if passed tunnel parameters are invalid, 1449 * %-EEXIST if changing a tunnel's parameters would cause a conflict 1450 * %-ENODEV if attempting to change or delete a nonexisting device 1451 **/ 1452 1453 static int 1454 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1455 { 1456 int err = 0; 1457 struct ip6_tnl_parm p; 1458 struct __ip6_tnl_parm p1; 1459 struct ip6_tnl *t = netdev_priv(dev); 1460 struct net *net = t->net; 1461 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1462 1463 memset(&p1, 0, sizeof(p1)); 1464 1465 switch (cmd) { 1466 case SIOCGETTUNNEL: 1467 if (dev == ip6n->fb_tnl_dev) { 1468 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1469 err = -EFAULT; 1470 break; 1471 } 1472 ip6_tnl_parm_from_user(&p1, &p); 1473 t = ip6_tnl_locate(net, &p1, 0); 1474 if (IS_ERR(t)) 1475 t = netdev_priv(dev); 1476 } else { 1477 memset(&p, 0, sizeof(p)); 1478 } 1479 ip6_tnl_parm_to_user(&p, &t->parms); 1480 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) { 1481 err = -EFAULT; 1482 } 1483 break; 1484 case SIOCADDTUNNEL: 1485 case SIOCCHGTUNNEL: 1486 err = -EPERM; 1487 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1488 break; 1489 err = -EFAULT; 1490 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1491 break; 1492 err = -EINVAL; 1493 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1494 p.proto != 0) 1495 break; 1496 ip6_tnl_parm_from_user(&p1, &p); 1497 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); 1498 if (cmd == SIOCCHGTUNNEL) { 1499 if (!IS_ERR(t)) { 1500 if (t->dev != dev) { 1501 err = -EEXIST; 1502 break; 1503 } 1504 } else 1505 t = netdev_priv(dev); 1506 if (dev == ip6n->fb_tnl_dev) 1507 err = ip6_tnl0_update(t, &p1); 1508 else 1509 err = ip6_tnl_update(t, &p1); 1510 } 1511 if (!IS_ERR(t)) { 1512 err = 0; 1513 ip6_tnl_parm_to_user(&p, &t->parms); 1514 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1515 err = -EFAULT; 1516 1517 } else { 1518 err = PTR_ERR(t); 1519 } 1520 break; 1521 case SIOCDELTUNNEL: 1522 err = -EPERM; 1523 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1524 break; 1525 1526 if (dev == ip6n->fb_tnl_dev) { 1527 err = -EFAULT; 1528 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1529 break; 1530 err = -ENOENT; 1531 ip6_tnl_parm_from_user(&p1, &p); 1532 t = ip6_tnl_locate(net, &p1, 0); 1533 if (IS_ERR(t)) 1534 break; 1535 err = -EPERM; 1536 if (t->dev == ip6n->fb_tnl_dev) 1537 break; 1538 dev = t->dev; 1539 } 1540 err = 0; 1541 unregister_netdevice(dev); 1542 break; 1543 default: 1544 err = -EINVAL; 1545 } 1546 return err; 1547 } 1548 1549 /** 1550 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1551 * @dev: virtual device associated with tunnel 1552 * @new_mtu: the new mtu 1553 * 1554 * Return: 1555 * 0 on success, 1556 * %-EINVAL if mtu too small 1557 **/ 1558 1559 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1560 { 1561 struct ip6_tnl *tnl = netdev_priv(dev); 1562 1563 if (tnl->parms.proto == IPPROTO_IPIP) { 1564 if (new_mtu < 68) 1565 return -EINVAL; 1566 } else { 1567 if (new_mtu < IPV6_MIN_MTU) 1568 return -EINVAL; 1569 } 1570 if (new_mtu > 0xFFF8 - dev->hard_header_len) 1571 return -EINVAL; 1572 dev->mtu = new_mtu; 1573 return 0; 1574 } 1575 EXPORT_SYMBOL(ip6_tnl_change_mtu); 1576 1577 int ip6_tnl_get_iflink(const struct net_device *dev) 1578 { 1579 struct ip6_tnl *t = netdev_priv(dev); 1580 1581 return t->parms.link; 1582 } 1583 EXPORT_SYMBOL(ip6_tnl_get_iflink); 1584 1585 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops, 1586 unsigned int num) 1587 { 1588 if (num >= MAX_IPTUN_ENCAP_OPS) 1589 return -ERANGE; 1590 1591 return !cmpxchg((const struct ip6_tnl_encap_ops **) 1592 &ip6tun_encaps[num], 1593 NULL, ops) ? 0 : -1; 1594 } 1595 EXPORT_SYMBOL(ip6_tnl_encap_add_ops); 1596 1597 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops, 1598 unsigned int num) 1599 { 1600 int ret; 1601 1602 if (num >= MAX_IPTUN_ENCAP_OPS) 1603 return -ERANGE; 1604 1605 ret = (cmpxchg((const struct ip6_tnl_encap_ops **) 1606 &ip6tun_encaps[num], 1607 ops, NULL) == ops) ? 0 : -1; 1608 1609 synchronize_net(); 1610 1611 return ret; 1612 } 1613 EXPORT_SYMBOL(ip6_tnl_encap_del_ops); 1614 1615 int ip6_tnl_encap_setup(struct ip6_tnl *t, 1616 struct ip_tunnel_encap *ipencap) 1617 { 1618 int hlen; 1619 1620 memset(&t->encap, 0, sizeof(t->encap)); 1621 1622 hlen = ip6_encap_hlen(ipencap); 1623 if (hlen < 0) 1624 return hlen; 1625 1626 t->encap.type = ipencap->type; 1627 t->encap.sport = ipencap->sport; 1628 t->encap.dport = ipencap->dport; 1629 t->encap.flags = ipencap->flags; 1630 1631 t->encap_hlen = hlen; 1632 t->hlen = t->encap_hlen + t->tun_hlen; 1633 1634 return 0; 1635 } 1636 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup); 1637 1638 static const struct net_device_ops ip6_tnl_netdev_ops = { 1639 .ndo_init = ip6_tnl_dev_init, 1640 .ndo_uninit = ip6_tnl_dev_uninit, 1641 .ndo_start_xmit = ip6_tnl_start_xmit, 1642 .ndo_do_ioctl = ip6_tnl_ioctl, 1643 .ndo_change_mtu = ip6_tnl_change_mtu, 1644 .ndo_get_stats = ip6_get_stats, 1645 .ndo_get_iflink = ip6_tnl_get_iflink, 1646 }; 1647 1648 #define IPXIPX_FEATURES (NETIF_F_SG | \ 1649 NETIF_F_FRAGLIST | \ 1650 NETIF_F_HIGHDMA | \ 1651 NETIF_F_GSO_SOFTWARE | \ 1652 NETIF_F_HW_CSUM) 1653 1654 /** 1655 * ip6_tnl_dev_setup - setup virtual tunnel device 1656 * @dev: virtual device associated with tunnel 1657 * 1658 * Description: 1659 * Initialize function pointers and device parameters 1660 **/ 1661 1662 static void ip6_tnl_dev_setup(struct net_device *dev) 1663 { 1664 dev->netdev_ops = &ip6_tnl_netdev_ops; 1665 dev->destructor = ip6_dev_free; 1666 1667 dev->type = ARPHRD_TUNNEL6; 1668 dev->flags |= IFF_NOARP; 1669 dev->addr_len = sizeof(struct in6_addr); 1670 dev->features |= NETIF_F_LLTX; 1671 netif_keep_dst(dev); 1672 1673 dev->features |= IPXIPX_FEATURES; 1674 dev->hw_features |= IPXIPX_FEATURES; 1675 1676 /* This perm addr will be used as interface identifier by IPv6 */ 1677 dev->addr_assign_type = NET_ADDR_RANDOM; 1678 eth_random_addr(dev->perm_addr); 1679 } 1680 1681 1682 /** 1683 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices 1684 * @dev: virtual device associated with tunnel 1685 **/ 1686 1687 static inline int 1688 ip6_tnl_dev_init_gen(struct net_device *dev) 1689 { 1690 struct ip6_tnl *t = netdev_priv(dev); 1691 int ret; 1692 int t_hlen; 1693 1694 t->dev = dev; 1695 t->net = dev_net(dev); 1696 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1697 if (!dev->tstats) 1698 return -ENOMEM; 1699 1700 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); 1701 if (ret) 1702 goto free_stats; 1703 1704 ret = gro_cells_init(&t->gro_cells, dev); 1705 if (ret) 1706 goto destroy_dst; 1707 1708 t->tun_hlen = 0; 1709 t->hlen = t->encap_hlen + t->tun_hlen; 1710 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1711 1712 dev->type = ARPHRD_TUNNEL6; 1713 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1714 dev->mtu = ETH_DATA_LEN - t_hlen; 1715 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1716 dev->mtu -= 8; 1717 1718 return 0; 1719 1720 destroy_dst: 1721 dst_cache_destroy(&t->dst_cache); 1722 free_stats: 1723 free_percpu(dev->tstats); 1724 dev->tstats = NULL; 1725 1726 return ret; 1727 } 1728 1729 /** 1730 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices 1731 * @dev: virtual device associated with tunnel 1732 **/ 1733 1734 static int ip6_tnl_dev_init(struct net_device *dev) 1735 { 1736 struct ip6_tnl *t = netdev_priv(dev); 1737 int err = ip6_tnl_dev_init_gen(dev); 1738 1739 if (err) 1740 return err; 1741 ip6_tnl_link_config(t); 1742 return 0; 1743 } 1744 1745 /** 1746 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device 1747 * @dev: fallback device 1748 * 1749 * Return: 0 1750 **/ 1751 1752 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) 1753 { 1754 struct ip6_tnl *t = netdev_priv(dev); 1755 struct net *net = dev_net(dev); 1756 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1757 1758 t->parms.proto = IPPROTO_IPV6; 1759 dev_hold(dev); 1760 1761 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1762 return 0; 1763 } 1764 1765 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[]) 1766 { 1767 u8 proto; 1768 1769 if (!data || !data[IFLA_IPTUN_PROTO]) 1770 return 0; 1771 1772 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1773 if (proto != IPPROTO_IPV6 && 1774 proto != IPPROTO_IPIP && 1775 proto != 0) 1776 return -EINVAL; 1777 1778 return 0; 1779 } 1780 1781 static void ip6_tnl_netlink_parms(struct nlattr *data[], 1782 struct __ip6_tnl_parm *parms) 1783 { 1784 memset(parms, 0, sizeof(*parms)); 1785 1786 if (!data) 1787 return; 1788 1789 if (data[IFLA_IPTUN_LINK]) 1790 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); 1791 1792 if (data[IFLA_IPTUN_LOCAL]) 1793 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]); 1794 1795 if (data[IFLA_IPTUN_REMOTE]) 1796 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]); 1797 1798 if (data[IFLA_IPTUN_TTL]) 1799 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]); 1800 1801 if (data[IFLA_IPTUN_ENCAP_LIMIT]) 1802 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]); 1803 1804 if (data[IFLA_IPTUN_FLOWINFO]) 1805 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]); 1806 1807 if (data[IFLA_IPTUN_FLAGS]) 1808 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]); 1809 1810 if (data[IFLA_IPTUN_PROTO]) 1811 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1812 } 1813 1814 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[], 1815 struct ip_tunnel_encap *ipencap) 1816 { 1817 bool ret = false; 1818 1819 memset(ipencap, 0, sizeof(*ipencap)); 1820 1821 if (!data) 1822 return ret; 1823 1824 if (data[IFLA_IPTUN_ENCAP_TYPE]) { 1825 ret = true; 1826 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]); 1827 } 1828 1829 if (data[IFLA_IPTUN_ENCAP_FLAGS]) { 1830 ret = true; 1831 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]); 1832 } 1833 1834 if (data[IFLA_IPTUN_ENCAP_SPORT]) { 1835 ret = true; 1836 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]); 1837 } 1838 1839 if (data[IFLA_IPTUN_ENCAP_DPORT]) { 1840 ret = true; 1841 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]); 1842 } 1843 1844 return ret; 1845 } 1846 1847 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, 1848 struct nlattr *tb[], struct nlattr *data[]) 1849 { 1850 struct net *net = dev_net(dev); 1851 struct ip6_tnl *nt, *t; 1852 struct ip_tunnel_encap ipencap; 1853 1854 nt = netdev_priv(dev); 1855 1856 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 1857 int err = ip6_tnl_encap_setup(nt, &ipencap); 1858 1859 if (err < 0) 1860 return err; 1861 } 1862 1863 ip6_tnl_netlink_parms(data, &nt->parms); 1864 1865 t = ip6_tnl_locate(net, &nt->parms, 0); 1866 if (!IS_ERR(t)) 1867 return -EEXIST; 1868 1869 return ip6_tnl_create2(dev); 1870 } 1871 1872 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], 1873 struct nlattr *data[]) 1874 { 1875 struct ip6_tnl *t = netdev_priv(dev); 1876 struct __ip6_tnl_parm p; 1877 struct net *net = t->net; 1878 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1879 struct ip_tunnel_encap ipencap; 1880 1881 if (dev == ip6n->fb_tnl_dev) 1882 return -EINVAL; 1883 1884 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 1885 int err = ip6_tnl_encap_setup(t, &ipencap); 1886 1887 if (err < 0) 1888 return err; 1889 } 1890 ip6_tnl_netlink_parms(data, &p); 1891 1892 t = ip6_tnl_locate(net, &p, 0); 1893 if (!IS_ERR(t)) { 1894 if (t->dev != dev) 1895 return -EEXIST; 1896 } else 1897 t = netdev_priv(dev); 1898 1899 return ip6_tnl_update(t, &p); 1900 } 1901 1902 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head) 1903 { 1904 struct net *net = dev_net(dev); 1905 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1906 1907 if (dev != ip6n->fb_tnl_dev) 1908 unregister_netdevice_queue(dev, head); 1909 } 1910 1911 static size_t ip6_tnl_get_size(const struct net_device *dev) 1912 { 1913 return 1914 /* IFLA_IPTUN_LINK */ 1915 nla_total_size(4) + 1916 /* IFLA_IPTUN_LOCAL */ 1917 nla_total_size(sizeof(struct in6_addr)) + 1918 /* IFLA_IPTUN_REMOTE */ 1919 nla_total_size(sizeof(struct in6_addr)) + 1920 /* IFLA_IPTUN_TTL */ 1921 nla_total_size(1) + 1922 /* IFLA_IPTUN_ENCAP_LIMIT */ 1923 nla_total_size(1) + 1924 /* IFLA_IPTUN_FLOWINFO */ 1925 nla_total_size(4) + 1926 /* IFLA_IPTUN_FLAGS */ 1927 nla_total_size(4) + 1928 /* IFLA_IPTUN_PROTO */ 1929 nla_total_size(1) + 1930 /* IFLA_IPTUN_ENCAP_TYPE */ 1931 nla_total_size(2) + 1932 /* IFLA_IPTUN_ENCAP_FLAGS */ 1933 nla_total_size(2) + 1934 /* IFLA_IPTUN_ENCAP_SPORT */ 1935 nla_total_size(2) + 1936 /* IFLA_IPTUN_ENCAP_DPORT */ 1937 nla_total_size(2) + 1938 0; 1939 } 1940 1941 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) 1942 { 1943 struct ip6_tnl *tunnel = netdev_priv(dev); 1944 struct __ip6_tnl_parm *parm = &tunnel->parms; 1945 1946 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 1947 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) || 1948 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) || 1949 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || 1950 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || 1951 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || 1952 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || 1953 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto)) 1954 goto nla_put_failure; 1955 1956 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, 1957 tunnel->encap.type) || 1958 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, 1959 tunnel->encap.sport) || 1960 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, 1961 tunnel->encap.dport) || 1962 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, 1963 tunnel->encap.flags)) 1964 goto nla_put_failure; 1965 1966 return 0; 1967 1968 nla_put_failure: 1969 return -EMSGSIZE; 1970 } 1971 1972 struct net *ip6_tnl_get_link_net(const struct net_device *dev) 1973 { 1974 struct ip6_tnl *tunnel = netdev_priv(dev); 1975 1976 return tunnel->net; 1977 } 1978 EXPORT_SYMBOL(ip6_tnl_get_link_net); 1979 1980 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { 1981 [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, 1982 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) }, 1983 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) }, 1984 [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, 1985 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 }, 1986 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 }, 1987 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 }, 1988 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 }, 1989 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, 1990 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, 1991 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, 1992 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, 1993 }; 1994 1995 static struct rtnl_link_ops ip6_link_ops __read_mostly = { 1996 .kind = "ip6tnl", 1997 .maxtype = IFLA_IPTUN_MAX, 1998 .policy = ip6_tnl_policy, 1999 .priv_size = sizeof(struct ip6_tnl), 2000 .setup = ip6_tnl_dev_setup, 2001 .validate = ip6_tnl_validate, 2002 .newlink = ip6_tnl_newlink, 2003 .changelink = ip6_tnl_changelink, 2004 .dellink = ip6_tnl_dellink, 2005 .get_size = ip6_tnl_get_size, 2006 .fill_info = ip6_tnl_fill_info, 2007 .get_link_net = ip6_tnl_get_link_net, 2008 }; 2009 2010 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { 2011 .handler = ip4ip6_rcv, 2012 .err_handler = ip4ip6_err, 2013 .priority = 1, 2014 }; 2015 2016 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { 2017 .handler = ip6ip6_rcv, 2018 .err_handler = ip6ip6_err, 2019 .priority = 1, 2020 }; 2021 2022 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net) 2023 { 2024 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2025 struct net_device *dev, *aux; 2026 int h; 2027 struct ip6_tnl *t; 2028 LIST_HEAD(list); 2029 2030 for_each_netdev_safe(net, dev, aux) 2031 if (dev->rtnl_link_ops == &ip6_link_ops) 2032 unregister_netdevice_queue(dev, &list); 2033 2034 for (h = 0; h < HASH_SIZE; h++) { 2035 t = rtnl_dereference(ip6n->tnls_r_l[h]); 2036 while (t) { 2037 /* If dev is in the same netns, it has already 2038 * been added to the list by the previous loop. 2039 */ 2040 if (!net_eq(dev_net(t->dev), net)) 2041 unregister_netdevice_queue(t->dev, &list); 2042 t = rtnl_dereference(t->next); 2043 } 2044 } 2045 2046 unregister_netdevice_many(&list); 2047 } 2048 2049 static int __net_init ip6_tnl_init_net(struct net *net) 2050 { 2051 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2052 struct ip6_tnl *t = NULL; 2053 int err; 2054 2055 ip6n->tnls[0] = ip6n->tnls_wc; 2056 ip6n->tnls[1] = ip6n->tnls_r_l; 2057 2058 err = -ENOMEM; 2059 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", 2060 NET_NAME_UNKNOWN, ip6_tnl_dev_setup); 2061 2062 if (!ip6n->fb_tnl_dev) 2063 goto err_alloc_dev; 2064 dev_net_set(ip6n->fb_tnl_dev, net); 2065 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops; 2066 /* FB netdevice is special: we have one, and only one per netns. 2067 * Allowing to move it to another netns is clearly unsafe. 2068 */ 2069 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL; 2070 2071 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 2072 if (err < 0) 2073 goto err_register; 2074 2075 err = register_netdev(ip6n->fb_tnl_dev); 2076 if (err < 0) 2077 goto err_register; 2078 2079 t = netdev_priv(ip6n->fb_tnl_dev); 2080 2081 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 2082 return 0; 2083 2084 err_register: 2085 ip6_dev_free(ip6n->fb_tnl_dev); 2086 err_alloc_dev: 2087 return err; 2088 } 2089 2090 static void __net_exit ip6_tnl_exit_net(struct net *net) 2091 { 2092 rtnl_lock(); 2093 ip6_tnl_destroy_tunnels(net); 2094 rtnl_unlock(); 2095 } 2096 2097 static struct pernet_operations ip6_tnl_net_ops = { 2098 .init = ip6_tnl_init_net, 2099 .exit = ip6_tnl_exit_net, 2100 .id = &ip6_tnl_net_id, 2101 .size = sizeof(struct ip6_tnl_net), 2102 }; 2103 2104 /** 2105 * ip6_tunnel_init - register protocol and reserve needed resources 2106 * 2107 * Return: 0 on success 2108 **/ 2109 2110 static int __init ip6_tunnel_init(void) 2111 { 2112 int err; 2113 2114 err = register_pernet_device(&ip6_tnl_net_ops); 2115 if (err < 0) 2116 goto out_pernet; 2117 2118 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); 2119 if (err < 0) { 2120 pr_err("%s: can't register ip4ip6\n", __func__); 2121 goto out_ip4ip6; 2122 } 2123 2124 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); 2125 if (err < 0) { 2126 pr_err("%s: can't register ip6ip6\n", __func__); 2127 goto out_ip6ip6; 2128 } 2129 err = rtnl_link_register(&ip6_link_ops); 2130 if (err < 0) 2131 goto rtnl_link_failed; 2132 2133 return 0; 2134 2135 rtnl_link_failed: 2136 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 2137 out_ip6ip6: 2138 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 2139 out_ip4ip6: 2140 unregister_pernet_device(&ip6_tnl_net_ops); 2141 out_pernet: 2142 return err; 2143 } 2144 2145 /** 2146 * ip6_tunnel_cleanup - free resources and unregister protocol 2147 **/ 2148 2149 static void __exit ip6_tunnel_cleanup(void) 2150 { 2151 rtnl_link_unregister(&ip6_link_ops); 2152 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 2153 pr_info("%s: can't deregister ip4ip6\n", __func__); 2154 2155 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 2156 pr_info("%s: can't deregister ip6ip6\n", __func__); 2157 2158 unregister_pernet_device(&ip6_tnl_net_ops); 2159 } 2160 2161 module_init(ip6_tunnel_init); 2162 module_exit(ip6_tunnel_cleanup); 2163