1 /* 2 * IPv6 tunneling device 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 8 * 9 * Based on: 10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 11 * 12 * RFC 2473 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 * 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/capability.h> 25 #include <linux/errno.h> 26 #include <linux/types.h> 27 #include <linux/sockios.h> 28 #include <linux/icmp.h> 29 #include <linux/if.h> 30 #include <linux/in.h> 31 #include <linux/ip.h> 32 #include <linux/net.h> 33 #include <linux/in6.h> 34 #include <linux/netdevice.h> 35 #include <linux/if_arp.h> 36 #include <linux/icmpv6.h> 37 #include <linux/init.h> 38 #include <linux/route.h> 39 #include <linux/rtnetlink.h> 40 #include <linux/netfilter_ipv6.h> 41 #include <linux/slab.h> 42 #include <linux/hash.h> 43 #include <linux/etherdevice.h> 44 45 #include <linux/uaccess.h> 46 #include <linux/atomic.h> 47 48 #include <net/icmp.h> 49 #include <net/ip.h> 50 #include <net/ip_tunnels.h> 51 #include <net/ipv6.h> 52 #include <net/ip6_route.h> 53 #include <net/addrconf.h> 54 #include <net/ip6_tunnel.h> 55 #include <net/xfrm.h> 56 #include <net/dsfield.h> 57 #include <net/inet_ecn.h> 58 #include <net/net_namespace.h> 59 #include <net/netns/generic.h> 60 #include <net/dst_metadata.h> 61 62 MODULE_AUTHOR("Ville Nuorvala"); 63 MODULE_DESCRIPTION("IPv6 tunneling device"); 64 MODULE_LICENSE("GPL"); 65 MODULE_ALIAS_RTNL_LINK("ip6tnl"); 66 MODULE_ALIAS_NETDEV("ip6tnl0"); 67 68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5 69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT) 70 71 static bool log_ecn_error = true; 72 module_param(log_ecn_error, bool, 0644); 73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 74 75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 76 { 77 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 78 79 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT); 80 } 81 82 static int ip6_tnl_dev_init(struct net_device *dev); 83 static void ip6_tnl_dev_setup(struct net_device *dev); 84 static struct rtnl_link_ops ip6_link_ops __read_mostly; 85 86 static unsigned int ip6_tnl_net_id __read_mostly; 87 struct ip6_tnl_net { 88 /* the IPv6 tunnel fallback device */ 89 struct net_device *fb_tnl_dev; 90 /* lists for storing tunnels in use */ 91 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE]; 92 struct ip6_tnl __rcu *tnls_wc[1]; 93 struct ip6_tnl __rcu **tnls[2]; 94 struct ip6_tnl __rcu *collect_md_tun; 95 }; 96 97 static struct net_device_stats *ip6_get_stats(struct net_device *dev) 98 { 99 struct pcpu_sw_netstats tmp, sum = { 0 }; 100 int i; 101 102 for_each_possible_cpu(i) { 103 unsigned int start; 104 const struct pcpu_sw_netstats *tstats = 105 per_cpu_ptr(dev->tstats, i); 106 107 do { 108 start = u64_stats_fetch_begin_irq(&tstats->syncp); 109 tmp.rx_packets = tstats->rx_packets; 110 tmp.rx_bytes = tstats->rx_bytes; 111 tmp.tx_packets = tstats->tx_packets; 112 tmp.tx_bytes = tstats->tx_bytes; 113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); 114 115 sum.rx_packets += tmp.rx_packets; 116 sum.rx_bytes += tmp.rx_bytes; 117 sum.tx_packets += tmp.tx_packets; 118 sum.tx_bytes += tmp.tx_bytes; 119 } 120 dev->stats.rx_packets = sum.rx_packets; 121 dev->stats.rx_bytes = sum.rx_bytes; 122 dev->stats.tx_packets = sum.tx_packets; 123 dev->stats.tx_bytes = sum.tx_bytes; 124 return &dev->stats; 125 } 126 127 /** 128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 129 * @remote: the address of the tunnel exit-point 130 * @local: the address of the tunnel entry-point 131 * 132 * Return: 133 * tunnel matching given end-points if found, 134 * else fallback tunnel if its device is up, 135 * else %NULL 136 **/ 137 138 #define for_each_ip6_tunnel_rcu(start) \ 139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 140 141 static struct ip6_tnl * 142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) 143 { 144 unsigned int hash = HASH(remote, local); 145 struct ip6_tnl *t; 146 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 147 struct in6_addr any; 148 149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 150 if (ipv6_addr_equal(local, &t->parms.laddr) && 151 ipv6_addr_equal(remote, &t->parms.raddr) && 152 (t->dev->flags & IFF_UP)) 153 return t; 154 } 155 156 memset(&any, 0, sizeof(any)); 157 hash = HASH(&any, local); 158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 159 if (ipv6_addr_equal(local, &t->parms.laddr) && 160 ipv6_addr_any(&t->parms.raddr) && 161 (t->dev->flags & IFF_UP)) 162 return t; 163 } 164 165 hash = HASH(remote, &any); 166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 167 if (ipv6_addr_equal(remote, &t->parms.raddr) && 168 ipv6_addr_any(&t->parms.laddr) && 169 (t->dev->flags & IFF_UP)) 170 return t; 171 } 172 173 t = rcu_dereference(ip6n->collect_md_tun); 174 if (t) 175 return t; 176 177 t = rcu_dereference(ip6n->tnls_wc[0]); 178 if (t && (t->dev->flags & IFF_UP)) 179 return t; 180 181 return NULL; 182 } 183 184 /** 185 * ip6_tnl_bucket - get head of list matching given tunnel parameters 186 * @p: parameters containing tunnel end-points 187 * 188 * Description: 189 * ip6_tnl_bucket() returns the head of the list matching the 190 * &struct in6_addr entries laddr and raddr in @p. 191 * 192 * Return: head of IPv6 tunnel list 193 **/ 194 195 static struct ip6_tnl __rcu ** 196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p) 197 { 198 const struct in6_addr *remote = &p->raddr; 199 const struct in6_addr *local = &p->laddr; 200 unsigned int h = 0; 201 int prio = 0; 202 203 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 204 prio = 1; 205 h = HASH(remote, local); 206 } 207 return &ip6n->tnls[prio][h]; 208 } 209 210 /** 211 * ip6_tnl_link - add tunnel to hash table 212 * @t: tunnel to be added 213 **/ 214 215 static void 216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 217 { 218 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 219 220 if (t->parms.collect_md) 221 rcu_assign_pointer(ip6n->collect_md_tun, t); 222 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 223 rcu_assign_pointer(*tp, t); 224 } 225 226 /** 227 * ip6_tnl_unlink - remove tunnel from hash table 228 * @t: tunnel to be removed 229 **/ 230 231 static void 232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 233 { 234 struct ip6_tnl __rcu **tp; 235 struct ip6_tnl *iter; 236 237 if (t->parms.collect_md) 238 rcu_assign_pointer(ip6n->collect_md_tun, NULL); 239 240 for (tp = ip6_tnl_bucket(ip6n, &t->parms); 241 (iter = rtnl_dereference(*tp)) != NULL; 242 tp = &iter->next) { 243 if (t == iter) { 244 rcu_assign_pointer(*tp, t->next); 245 break; 246 } 247 } 248 } 249 250 static void ip6_dev_free(struct net_device *dev) 251 { 252 struct ip6_tnl *t = netdev_priv(dev); 253 254 gro_cells_destroy(&t->gro_cells); 255 dst_cache_destroy(&t->dst_cache); 256 free_percpu(dev->tstats); 257 } 258 259 static int ip6_tnl_create2(struct net_device *dev) 260 { 261 struct ip6_tnl *t = netdev_priv(dev); 262 struct net *net = dev_net(dev); 263 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 264 int err; 265 266 t = netdev_priv(dev); 267 268 dev->rtnl_link_ops = &ip6_link_ops; 269 err = register_netdevice(dev); 270 if (err < 0) 271 goto out; 272 273 strcpy(t->parms.name, dev->name); 274 275 dev_hold(dev); 276 ip6_tnl_link(ip6n, t); 277 return 0; 278 279 out: 280 return err; 281 } 282 283 /** 284 * ip6_tnl_create - create a new tunnel 285 * @p: tunnel parameters 286 * @pt: pointer to new tunnel 287 * 288 * Description: 289 * Create tunnel matching given parameters. 290 * 291 * Return: 292 * created tunnel or error pointer 293 **/ 294 295 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 296 { 297 struct net_device *dev; 298 struct ip6_tnl *t; 299 char name[IFNAMSIZ]; 300 int err = -ENOMEM; 301 302 if (p->name[0]) 303 strlcpy(name, p->name, IFNAMSIZ); 304 else 305 sprintf(name, "ip6tnl%%d"); 306 307 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 308 ip6_tnl_dev_setup); 309 if (!dev) 310 goto failed; 311 312 dev_net_set(dev, net); 313 314 t = netdev_priv(dev); 315 t->parms = *p; 316 t->net = dev_net(dev); 317 err = ip6_tnl_create2(dev); 318 if (err < 0) 319 goto failed_free; 320 321 return t; 322 323 failed_free: 324 free_netdev(dev); 325 failed: 326 return ERR_PTR(err); 327 } 328 329 /** 330 * ip6_tnl_locate - find or create tunnel matching given parameters 331 * @p: tunnel parameters 332 * @create: != 0 if allowed to create new tunnel if no match found 333 * 334 * Description: 335 * ip6_tnl_locate() first tries to locate an existing tunnel 336 * based on @parms. If this is unsuccessful, but @create is set a new 337 * tunnel device is created and registered for use. 338 * 339 * Return: 340 * matching tunnel or error pointer 341 **/ 342 343 static struct ip6_tnl *ip6_tnl_locate(struct net *net, 344 struct __ip6_tnl_parm *p, int create) 345 { 346 const struct in6_addr *remote = &p->raddr; 347 const struct in6_addr *local = &p->laddr; 348 struct ip6_tnl __rcu **tp; 349 struct ip6_tnl *t; 350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 351 352 for (tp = ip6_tnl_bucket(ip6n, p); 353 (t = rtnl_dereference(*tp)) != NULL; 354 tp = &t->next) { 355 if (ipv6_addr_equal(local, &t->parms.laddr) && 356 ipv6_addr_equal(remote, &t->parms.raddr)) { 357 if (create) 358 return ERR_PTR(-EEXIST); 359 360 return t; 361 } 362 } 363 if (!create) 364 return ERR_PTR(-ENODEV); 365 return ip6_tnl_create(net, p); 366 } 367 368 /** 369 * ip6_tnl_dev_uninit - tunnel device uninitializer 370 * @dev: the device to be destroyed 371 * 372 * Description: 373 * ip6_tnl_dev_uninit() removes tunnel from its list 374 **/ 375 376 static void 377 ip6_tnl_dev_uninit(struct net_device *dev) 378 { 379 struct ip6_tnl *t = netdev_priv(dev); 380 struct net *net = t->net; 381 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 382 383 if (dev == ip6n->fb_tnl_dev) 384 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 385 else 386 ip6_tnl_unlink(ip6n, t); 387 dst_cache_reset(&t->dst_cache); 388 dev_put(dev); 389 } 390 391 /** 392 * parse_tvl_tnl_enc_lim - handle encapsulation limit option 393 * @skb: received socket buffer 394 * 395 * Return: 396 * 0 if none was found, 397 * else index to encapsulation limit 398 **/ 399 400 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 401 { 402 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw; 403 unsigned int nhoff = raw - skb->data; 404 unsigned int off = nhoff + sizeof(*ipv6h); 405 u8 next, nexthdr = ipv6h->nexthdr; 406 407 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 408 struct ipv6_opt_hdr *hdr; 409 u16 optlen; 410 411 if (!pskb_may_pull(skb, off + sizeof(*hdr))) 412 break; 413 414 hdr = (struct ipv6_opt_hdr *)(skb->data + off); 415 if (nexthdr == NEXTHDR_FRAGMENT) { 416 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 417 if (frag_hdr->frag_off) 418 break; 419 optlen = 8; 420 } else if (nexthdr == NEXTHDR_AUTH) { 421 optlen = (hdr->hdrlen + 2) << 2; 422 } else { 423 optlen = ipv6_optlen(hdr); 424 } 425 /* cache hdr->nexthdr, since pskb_may_pull() might 426 * invalidate hdr 427 */ 428 next = hdr->nexthdr; 429 if (nexthdr == NEXTHDR_DEST) { 430 u16 i = 2; 431 432 /* Remember : hdr is no longer valid at this point. */ 433 if (!pskb_may_pull(skb, off + optlen)) 434 break; 435 436 while (1) { 437 struct ipv6_tlv_tnl_enc_lim *tel; 438 439 /* No more room for encapsulation limit */ 440 if (i + sizeof(*tel) > optlen) 441 break; 442 443 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i); 444 /* return index of option if found and valid */ 445 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 446 tel->length == 1) 447 return i + off - nhoff; 448 /* else jump to next option */ 449 if (tel->type) 450 i += tel->length + 2; 451 else 452 i++; 453 } 454 } 455 nexthdr = next; 456 off += optlen; 457 } 458 return 0; 459 } 460 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim); 461 462 /** 463 * ip6_tnl_err - tunnel error handler 464 * 465 * Description: 466 * ip6_tnl_err() should handle errors in the tunnel according 467 * to the specifications in RFC 2473. 468 **/ 469 470 static int 471 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 472 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 473 { 474 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data; 475 struct ip6_tnl *t; 476 int rel_msg = 0; 477 u8 rel_type = ICMPV6_DEST_UNREACH; 478 u8 rel_code = ICMPV6_ADDR_UNREACH; 479 u8 tproto; 480 __u32 rel_info = 0; 481 __u16 len; 482 int err = -ENOENT; 483 484 /* If the packet doesn't contain the original IPv6 header we are 485 in trouble since we might need the source address for further 486 processing of the error. */ 487 488 rcu_read_lock(); 489 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr); 490 if (!t) 491 goto out; 492 493 tproto = ACCESS_ONCE(t->parms.proto); 494 if (tproto != ipproto && tproto != 0) 495 goto out; 496 497 err = 0; 498 499 switch (*type) { 500 __u32 teli; 501 struct ipv6_tlv_tnl_enc_lim *tel; 502 __u32 mtu; 503 case ICMPV6_DEST_UNREACH: 504 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 505 t->parms.name); 506 rel_msg = 1; 507 break; 508 case ICMPV6_TIME_EXCEED: 509 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 510 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 511 t->parms.name); 512 rel_msg = 1; 513 } 514 break; 515 case ICMPV6_PARAMPROB: 516 teli = 0; 517 if ((*code) == ICMPV6_HDR_FIELD) 518 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 519 520 if (teli && teli == *info - 2) { 521 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 522 if (tel->encap_limit == 0) { 523 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 524 t->parms.name); 525 rel_msg = 1; 526 } 527 } else { 528 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 529 t->parms.name); 530 } 531 break; 532 case ICMPV6_PKT_TOOBIG: 533 mtu = *info - offset; 534 if (mtu < IPV6_MIN_MTU) 535 mtu = IPV6_MIN_MTU; 536 t->dev->mtu = mtu; 537 538 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len); 539 if (len > mtu) { 540 rel_type = ICMPV6_PKT_TOOBIG; 541 rel_code = 0; 542 rel_info = mtu; 543 rel_msg = 1; 544 } 545 break; 546 } 547 548 *type = rel_type; 549 *code = rel_code; 550 *info = rel_info; 551 *msg = rel_msg; 552 553 out: 554 rcu_read_unlock(); 555 return err; 556 } 557 558 static int 559 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 560 u8 type, u8 code, int offset, __be32 info) 561 { 562 int rel_msg = 0; 563 u8 rel_type = type; 564 u8 rel_code = code; 565 __u32 rel_info = ntohl(info); 566 int err; 567 struct sk_buff *skb2; 568 const struct iphdr *eiph; 569 struct rtable *rt; 570 struct flowi4 fl4; 571 572 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 573 &rel_msg, &rel_info, offset); 574 if (err < 0) 575 return err; 576 577 if (rel_msg == 0) 578 return 0; 579 580 switch (rel_type) { 581 case ICMPV6_DEST_UNREACH: 582 if (rel_code != ICMPV6_ADDR_UNREACH) 583 return 0; 584 rel_type = ICMP_DEST_UNREACH; 585 rel_code = ICMP_HOST_UNREACH; 586 break; 587 case ICMPV6_PKT_TOOBIG: 588 if (rel_code != 0) 589 return 0; 590 rel_type = ICMP_DEST_UNREACH; 591 rel_code = ICMP_FRAG_NEEDED; 592 break; 593 case NDISC_REDIRECT: 594 rel_type = ICMP_REDIRECT; 595 rel_code = ICMP_REDIR_HOST; 596 default: 597 return 0; 598 } 599 600 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) 601 return 0; 602 603 skb2 = skb_clone(skb, GFP_ATOMIC); 604 if (!skb2) 605 return 0; 606 607 skb_dst_drop(skb2); 608 609 skb_pull(skb2, offset); 610 skb_reset_network_header(skb2); 611 eiph = ip_hdr(skb2); 612 613 /* Try to guess incoming interface */ 614 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 615 eiph->saddr, 0, 616 0, 0, 617 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 618 if (IS_ERR(rt)) 619 goto out; 620 621 skb2->dev = rt->dst.dev; 622 623 /* route "incoming" packet */ 624 if (rt->rt_flags & RTCF_LOCAL) { 625 ip_rt_put(rt); 626 rt = NULL; 627 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 628 eiph->daddr, eiph->saddr, 629 0, 0, 630 IPPROTO_IPIP, 631 RT_TOS(eiph->tos), 0); 632 if (IS_ERR(rt) || 633 rt->dst.dev->type != ARPHRD_TUNNEL) { 634 if (!IS_ERR(rt)) 635 ip_rt_put(rt); 636 goto out; 637 } 638 skb_dst_set(skb2, &rt->dst); 639 } else { 640 ip_rt_put(rt); 641 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 642 skb2->dev) || 643 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) 644 goto out; 645 } 646 647 /* change mtu on this route */ 648 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { 649 if (rel_info > dst_mtu(skb_dst(skb2))) 650 goto out; 651 652 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info); 653 } 654 if (rel_type == ICMP_REDIRECT) 655 skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2); 656 657 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 658 659 out: 660 kfree_skb(skb2); 661 return 0; 662 } 663 664 static int 665 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 666 u8 type, u8 code, int offset, __be32 info) 667 { 668 int rel_msg = 0; 669 u8 rel_type = type; 670 u8 rel_code = code; 671 __u32 rel_info = ntohl(info); 672 int err; 673 674 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, 675 &rel_msg, &rel_info, offset); 676 if (err < 0) 677 return err; 678 679 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { 680 struct rt6_info *rt; 681 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 682 683 if (!skb2) 684 return 0; 685 686 skb_dst_drop(skb2); 687 skb_pull(skb2, offset); 688 skb_reset_network_header(skb2); 689 690 /* Try to guess incoming interface */ 691 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, 692 NULL, 0, 0); 693 694 if (rt && rt->dst.dev) 695 skb2->dev = rt->dst.dev; 696 697 icmpv6_send(skb2, rel_type, rel_code, rel_info); 698 699 ip6_rt_put(rt); 700 701 kfree_skb(skb2); 702 } 703 704 return 0; 705 } 706 707 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 708 const struct ipv6hdr *ipv6h, 709 struct sk_buff *skb) 710 { 711 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 712 713 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 714 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); 715 716 return IP6_ECN_decapsulate(ipv6h, skb); 717 } 718 719 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 720 const struct ipv6hdr *ipv6h, 721 struct sk_buff *skb) 722 { 723 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 724 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); 725 726 return IP6_ECN_decapsulate(ipv6h, skb); 727 } 728 729 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, 730 const struct in6_addr *laddr, 731 const struct in6_addr *raddr) 732 { 733 struct __ip6_tnl_parm *p = &t->parms; 734 int ltype = ipv6_addr_type(laddr); 735 int rtype = ipv6_addr_type(raddr); 736 __u32 flags = 0; 737 738 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) { 739 flags = IP6_TNL_F_CAP_PER_PACKET; 740 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 741 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 742 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && 743 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { 744 if (ltype&IPV6_ADDR_UNICAST) 745 flags |= IP6_TNL_F_CAP_XMIT; 746 if (rtype&IPV6_ADDR_UNICAST) 747 flags |= IP6_TNL_F_CAP_RCV; 748 } 749 return flags; 750 } 751 EXPORT_SYMBOL(ip6_tnl_get_cap); 752 753 /* called with rcu_read_lock() */ 754 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, 755 const struct in6_addr *laddr, 756 const struct in6_addr *raddr) 757 { 758 struct __ip6_tnl_parm *p = &t->parms; 759 int ret = 0; 760 struct net *net = t->net; 761 762 if ((p->flags & IP6_TNL_F_CAP_RCV) || 763 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 764 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) { 765 struct net_device *ldev = NULL; 766 767 if (p->link) 768 ldev = dev_get_by_index_rcu(net, p->link); 769 770 if ((ipv6_addr_is_multicast(laddr) || 771 likely(ipv6_chk_addr(net, laddr, ldev, 0))) && 772 likely(!ipv6_chk_addr(net, raddr, NULL, 0))) 773 ret = 1; 774 } 775 return ret; 776 } 777 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); 778 779 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, 780 const struct tnl_ptk_info *tpi, 781 struct metadata_dst *tun_dst, 782 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 783 const struct ipv6hdr *ipv6h, 784 struct sk_buff *skb), 785 bool log_ecn_err) 786 { 787 struct pcpu_sw_netstats *tstats; 788 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 789 int err; 790 791 if ((!(tpi->flags & TUNNEL_CSUM) && 792 (tunnel->parms.i_flags & TUNNEL_CSUM)) || 793 ((tpi->flags & TUNNEL_CSUM) && 794 !(tunnel->parms.i_flags & TUNNEL_CSUM))) { 795 tunnel->dev->stats.rx_crc_errors++; 796 tunnel->dev->stats.rx_errors++; 797 goto drop; 798 } 799 800 if (tunnel->parms.i_flags & TUNNEL_SEQ) { 801 if (!(tpi->flags & TUNNEL_SEQ) || 802 (tunnel->i_seqno && 803 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { 804 tunnel->dev->stats.rx_fifo_errors++; 805 tunnel->dev->stats.rx_errors++; 806 goto drop; 807 } 808 tunnel->i_seqno = ntohl(tpi->seq) + 1; 809 } 810 811 skb->protocol = tpi->proto; 812 813 /* Warning: All skb pointers will be invalidated! */ 814 if (tunnel->dev->type == ARPHRD_ETHER) { 815 if (!pskb_may_pull(skb, ETH_HLEN)) { 816 tunnel->dev->stats.rx_length_errors++; 817 tunnel->dev->stats.rx_errors++; 818 goto drop; 819 } 820 821 ipv6h = ipv6_hdr(skb); 822 skb->protocol = eth_type_trans(skb, tunnel->dev); 823 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 824 } else { 825 skb->dev = tunnel->dev; 826 } 827 828 skb_reset_network_header(skb); 829 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 830 831 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); 832 833 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb); 834 if (unlikely(err)) { 835 if (log_ecn_err) 836 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n", 837 &ipv6h->saddr, 838 ipv6_get_dsfield(ipv6h)); 839 if (err > 1) { 840 ++tunnel->dev->stats.rx_frame_errors; 841 ++tunnel->dev->stats.rx_errors; 842 goto drop; 843 } 844 } 845 846 tstats = this_cpu_ptr(tunnel->dev->tstats); 847 u64_stats_update_begin(&tstats->syncp); 848 tstats->rx_packets++; 849 tstats->rx_bytes += skb->len; 850 u64_stats_update_end(&tstats->syncp); 851 852 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); 853 854 if (tun_dst) 855 skb_dst_set(skb, (struct dst_entry *)tun_dst); 856 857 gro_cells_receive(&tunnel->gro_cells, skb); 858 return 0; 859 860 drop: 861 kfree_skb(skb); 862 return 0; 863 } 864 865 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, 866 const struct tnl_ptk_info *tpi, 867 struct metadata_dst *tun_dst, 868 bool log_ecn_err) 869 { 870 return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate, 871 log_ecn_err); 872 } 873 EXPORT_SYMBOL(ip6_tnl_rcv); 874 875 static const struct tnl_ptk_info tpi_v6 = { 876 /* no tunnel info required for ipxip6. */ 877 .proto = htons(ETH_P_IPV6), 878 }; 879 880 static const struct tnl_ptk_info tpi_v4 = { 881 /* no tunnel info required for ipxip6. */ 882 .proto = htons(ETH_P_IP), 883 }; 884 885 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, 886 const struct tnl_ptk_info *tpi, 887 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 888 const struct ipv6hdr *ipv6h, 889 struct sk_buff *skb)) 890 { 891 struct ip6_tnl *t; 892 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 893 struct metadata_dst *tun_dst = NULL; 894 int ret = -1; 895 896 rcu_read_lock(); 897 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); 898 899 if (t) { 900 u8 tproto = ACCESS_ONCE(t->parms.proto); 901 902 if (tproto != ipproto && tproto != 0) 903 goto drop; 904 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 905 goto drop; 906 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) 907 goto drop; 908 if (iptunnel_pull_header(skb, 0, tpi->proto, false)) 909 goto drop; 910 if (t->parms.collect_md) { 911 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0); 912 if (!tun_dst) 913 return 0; 914 } 915 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, 916 log_ecn_error); 917 } 918 919 rcu_read_unlock(); 920 921 return ret; 922 923 drop: 924 rcu_read_unlock(); 925 kfree_skb(skb); 926 return 0; 927 } 928 929 static int ip4ip6_rcv(struct sk_buff *skb) 930 { 931 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4, 932 ip4ip6_dscp_ecn_decapsulate); 933 } 934 935 static int ip6ip6_rcv(struct sk_buff *skb) 936 { 937 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6, 938 ip6ip6_dscp_ecn_decapsulate); 939 } 940 941 struct ipv6_tel_txoption { 942 struct ipv6_txoptions ops; 943 __u8 dst_opt[8]; 944 }; 945 946 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) 947 { 948 memset(opt, 0, sizeof(struct ipv6_tel_txoption)); 949 950 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; 951 opt->dst_opt[3] = 1; 952 opt->dst_opt[4] = encap_limit; 953 opt->dst_opt[5] = IPV6_TLV_PADN; 954 opt->dst_opt[6] = 1; 955 956 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt; 957 opt->ops.opt_nflen = 8; 958 } 959 960 /** 961 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 962 * @t: the outgoing tunnel device 963 * @hdr: IPv6 header from the incoming packet 964 * 965 * Description: 966 * Avoid trivial tunneling loop by checking that tunnel exit-point 967 * doesn't match source of incoming packet. 968 * 969 * Return: 970 * 1 if conflict, 971 * 0 else 972 **/ 973 974 static inline bool 975 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 976 { 977 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 978 } 979 980 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, 981 const struct in6_addr *laddr, 982 const struct in6_addr *raddr) 983 { 984 struct __ip6_tnl_parm *p = &t->parms; 985 int ret = 0; 986 struct net *net = t->net; 987 988 if ((p->flags & IP6_TNL_F_CAP_XMIT) || 989 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 990 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) { 991 struct net_device *ldev = NULL; 992 993 rcu_read_lock(); 994 if (p->link) 995 ldev = dev_get_by_index_rcu(net, p->link); 996 997 if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0))) 998 pr_warn("%s xmit: Local address not yet configured!\n", 999 p->name); 1000 else if (!ipv6_addr_is_multicast(raddr) && 1001 unlikely(ipv6_chk_addr(net, raddr, NULL, 0))) 1002 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", 1003 p->name); 1004 else 1005 ret = 1; 1006 rcu_read_unlock(); 1007 } 1008 return ret; 1009 } 1010 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); 1011 1012 /** 1013 * ip6_tnl_xmit - encapsulate packet and send 1014 * @skb: the outgoing socket buffer 1015 * @dev: the outgoing tunnel device 1016 * @dsfield: dscp code for outer header 1017 * @fl6: flow of tunneled packet 1018 * @encap_limit: encapsulation limit 1019 * @pmtu: Path MTU is stored if packet is too big 1020 * @proto: next header value 1021 * 1022 * Description: 1023 * Build new header and do some sanity checks on the packet before sending 1024 * it. 1025 * 1026 * Return: 1027 * 0 on success 1028 * -1 fail 1029 * %-EMSGSIZE message too big. return mtu in this case. 1030 **/ 1031 1032 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, 1033 struct flowi6 *fl6, int encap_limit, __u32 *pmtu, 1034 __u8 proto) 1035 { 1036 struct ip6_tnl *t = netdev_priv(dev); 1037 struct net *net = t->net; 1038 struct net_device_stats *stats = &t->dev->stats; 1039 struct ipv6hdr *ipv6h; 1040 struct ipv6_tel_txoption opt; 1041 struct dst_entry *dst = NULL, *ndst = NULL; 1042 struct net_device *tdev; 1043 int mtu; 1044 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1045 unsigned int max_headroom = psh_hlen; 1046 bool use_cache = false; 1047 u8 hop_limit; 1048 int err = -1; 1049 1050 if (t->parms.collect_md) { 1051 hop_limit = skb_tunnel_info(skb)->key.ttl; 1052 goto route_lookup; 1053 } else { 1054 hop_limit = t->parms.hop_limit; 1055 } 1056 1057 /* NBMA tunnel */ 1058 if (ipv6_addr_any(&t->parms.raddr)) { 1059 if (skb->protocol == htons(ETH_P_IPV6)) { 1060 struct in6_addr *addr6; 1061 struct neighbour *neigh; 1062 int addr_type; 1063 1064 if (!skb_dst(skb)) 1065 goto tx_err_link_failure; 1066 1067 neigh = dst_neigh_lookup(skb_dst(skb), 1068 &ipv6_hdr(skb)->daddr); 1069 if (!neigh) 1070 goto tx_err_link_failure; 1071 1072 addr6 = (struct in6_addr *)&neigh->primary_key; 1073 addr_type = ipv6_addr_type(addr6); 1074 1075 if (addr_type == IPV6_ADDR_ANY) 1076 addr6 = &ipv6_hdr(skb)->daddr; 1077 1078 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1079 neigh_release(neigh); 1080 } 1081 } else if (!(t->parms.flags & 1082 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1083 /* enable the cache only only if the routing decision does 1084 * not depend on the current inner header value 1085 */ 1086 use_cache = true; 1087 } 1088 1089 if (use_cache) 1090 dst = dst_cache_get(&t->dst_cache); 1091 1092 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1093 goto tx_err_link_failure; 1094 1095 if (!dst) { 1096 route_lookup: 1097 /* add dsfield to flowlabel for route lookup */ 1098 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel); 1099 1100 dst = ip6_route_output(net, NULL, fl6); 1101 1102 if (dst->error) 1103 goto tx_err_link_failure; 1104 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); 1105 if (IS_ERR(dst)) { 1106 err = PTR_ERR(dst); 1107 dst = NULL; 1108 goto tx_err_link_failure; 1109 } 1110 if (t->parms.collect_md && 1111 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev, 1112 &fl6->daddr, 0, &fl6->saddr)) 1113 goto tx_err_link_failure; 1114 ndst = dst; 1115 } 1116 1117 tdev = dst->dev; 1118 1119 if (tdev == dev) { 1120 stats->collisions++; 1121 net_warn_ratelimited("%s: Local routing loop detected!\n", 1122 t->parms.name); 1123 goto tx_err_dst_release; 1124 } 1125 mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen; 1126 if (encap_limit >= 0) { 1127 max_headroom += 8; 1128 mtu -= 8; 1129 } 1130 if (mtu < IPV6_MIN_MTU) 1131 mtu = IPV6_MIN_MTU; 1132 if (skb_dst(skb) && !t->parms.collect_md) 1133 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 1134 if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) { 1135 *pmtu = mtu; 1136 err = -EMSGSIZE; 1137 goto tx_err_dst_release; 1138 } 1139 1140 if (t->err_count > 0) { 1141 if (time_before(jiffies, 1142 t->err_time + IP6TUNNEL_ERR_TIMEO)) { 1143 t->err_count--; 1144 1145 dst_link_failure(skb); 1146 } else { 1147 t->err_count = 0; 1148 } 1149 } 1150 1151 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 1152 1153 /* 1154 * Okay, now see if we can stuff it in the buffer as-is. 1155 */ 1156 max_headroom += LL_RESERVED_SPACE(tdev); 1157 1158 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 1159 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 1160 struct sk_buff *new_skb; 1161 1162 new_skb = skb_realloc_headroom(skb, max_headroom); 1163 if (!new_skb) 1164 goto tx_err_dst_release; 1165 1166 if (skb->sk) 1167 skb_set_owner_w(new_skb, skb->sk); 1168 consume_skb(skb); 1169 skb = new_skb; 1170 } 1171 1172 if (t->parms.collect_md) { 1173 if (t->encap.type != TUNNEL_ENCAP_NONE) 1174 goto tx_err_dst_release; 1175 } else { 1176 if (use_cache && ndst) 1177 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1178 } 1179 skb_dst_set(skb, dst); 1180 1181 if (encap_limit >= 0) { 1182 init_tel_txopt(&opt, encap_limit); 1183 ipv6_push_frag_opts(skb, &opt.ops, &proto); 1184 } 1185 1186 /* Calculate max headroom for all the headers and adjust 1187 * needed_headroom if necessary. 1188 */ 1189 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) 1190 + dst->header_len + t->hlen; 1191 if (max_headroom > dev->needed_headroom) 1192 dev->needed_headroom = max_headroom; 1193 1194 err = ip6_tnl_encap(skb, t, &proto, fl6); 1195 if (err) 1196 return err; 1197 1198 skb_push(skb, sizeof(struct ipv6hdr)); 1199 skb_reset_network_header(skb); 1200 ipv6h = ipv6_hdr(skb); 1201 ip6_flow_hdr(ipv6h, dsfield, 1202 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); 1203 ipv6h->hop_limit = hop_limit; 1204 ipv6h->nexthdr = proto; 1205 ipv6h->saddr = fl6->saddr; 1206 ipv6h->daddr = fl6->daddr; 1207 ip6tunnel_xmit(NULL, skb, dev); 1208 return 0; 1209 tx_err_link_failure: 1210 stats->tx_carrier_errors++; 1211 dst_link_failure(skb); 1212 tx_err_dst_release: 1213 dst_release(dst); 1214 return err; 1215 } 1216 EXPORT_SYMBOL(ip6_tnl_xmit); 1217 1218 static inline int 1219 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1220 { 1221 struct ip6_tnl *t = netdev_priv(dev); 1222 const struct iphdr *iph = ip_hdr(skb); 1223 int encap_limit = -1; 1224 struct flowi6 fl6; 1225 __u8 dsfield; 1226 __u32 mtu; 1227 u8 tproto; 1228 int err; 1229 1230 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1231 1232 tproto = ACCESS_ONCE(t->parms.proto); 1233 if (tproto != IPPROTO_IPIP && tproto != 0) 1234 return -1; 1235 1236 if (t->parms.collect_md) { 1237 struct ip_tunnel_info *tun_info; 1238 const struct ip_tunnel_key *key; 1239 1240 tun_info = skb_tunnel_info(skb); 1241 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 1242 ip_tunnel_info_af(tun_info) != AF_INET6)) 1243 return -1; 1244 key = &tun_info->key; 1245 memset(&fl6, 0, sizeof(fl6)); 1246 fl6.flowi6_proto = IPPROTO_IPIP; 1247 fl6.daddr = key->u.ipv6.dst; 1248 fl6.flowlabel = key->label; 1249 dsfield = ip6_tclass(key->label); 1250 } else { 1251 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1252 encap_limit = t->parms.encap_limit; 1253 1254 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1255 fl6.flowi6_proto = IPPROTO_IPIP; 1256 1257 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1258 dsfield = ipv4_get_dsfield(iph); 1259 else 1260 dsfield = ip6_tclass(t->parms.flowinfo); 1261 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1262 fl6.flowi6_mark = skb->mark; 1263 else 1264 fl6.flowi6_mark = t->parms.fwmark; 1265 } 1266 1267 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1268 1269 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1270 return -1; 1271 1272 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph)); 1273 1274 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1275 1276 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1277 IPPROTO_IPIP); 1278 if (err != 0) { 1279 /* XXX: send ICMP error even if DF is not set. */ 1280 if (err == -EMSGSIZE) 1281 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 1282 htonl(mtu)); 1283 return -1; 1284 } 1285 1286 return 0; 1287 } 1288 1289 static inline int 1290 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1291 { 1292 struct ip6_tnl *t = netdev_priv(dev); 1293 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1294 int encap_limit = -1; 1295 __u16 offset; 1296 struct flowi6 fl6; 1297 __u8 dsfield; 1298 __u32 mtu; 1299 u8 tproto; 1300 int err; 1301 1302 tproto = ACCESS_ONCE(t->parms.proto); 1303 if ((tproto != IPPROTO_IPV6 && tproto != 0) || 1304 ip6_tnl_addr_conflict(t, ipv6h)) 1305 return -1; 1306 1307 if (t->parms.collect_md) { 1308 struct ip_tunnel_info *tun_info; 1309 const struct ip_tunnel_key *key; 1310 1311 tun_info = skb_tunnel_info(skb); 1312 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 1313 ip_tunnel_info_af(tun_info) != AF_INET6)) 1314 return -1; 1315 key = &tun_info->key; 1316 memset(&fl6, 0, sizeof(fl6)); 1317 fl6.flowi6_proto = IPPROTO_IPV6; 1318 fl6.daddr = key->u.ipv6.dst; 1319 fl6.flowlabel = key->label; 1320 dsfield = ip6_tclass(key->label); 1321 } else { 1322 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1323 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 1324 ipv6h = ipv6_hdr(skb); 1325 if (offset > 0) { 1326 struct ipv6_tlv_tnl_enc_lim *tel; 1327 1328 tel = (void *)&skb_network_header(skb)[offset]; 1329 if (tel->encap_limit == 0) { 1330 icmpv6_send(skb, ICMPV6_PARAMPROB, 1331 ICMPV6_HDR_FIELD, offset + 2); 1332 return -1; 1333 } 1334 encap_limit = tel->encap_limit - 1; 1335 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { 1336 encap_limit = t->parms.encap_limit; 1337 } 1338 1339 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1340 fl6.flowi6_proto = IPPROTO_IPV6; 1341 1342 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1343 dsfield = ipv6_get_dsfield(ipv6h); 1344 else 1345 dsfield = ip6_tclass(t->parms.flowinfo); 1346 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1347 fl6.flowlabel |= ip6_flowlabel(ipv6h); 1348 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1349 fl6.flowi6_mark = skb->mark; 1350 else 1351 fl6.flowi6_mark = t->parms.fwmark; 1352 } 1353 1354 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1355 1356 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1357 return -1; 1358 1359 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h)); 1360 1361 skb_set_inner_ipproto(skb, IPPROTO_IPV6); 1362 1363 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1364 IPPROTO_IPV6); 1365 if (err != 0) { 1366 if (err == -EMSGSIZE) 1367 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1368 return -1; 1369 } 1370 1371 return 0; 1372 } 1373 1374 static netdev_tx_t 1375 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev) 1376 { 1377 struct ip6_tnl *t = netdev_priv(dev); 1378 struct net_device_stats *stats = &t->dev->stats; 1379 int ret; 1380 1381 switch (skb->protocol) { 1382 case htons(ETH_P_IP): 1383 ret = ip4ip6_tnl_xmit(skb, dev); 1384 break; 1385 case htons(ETH_P_IPV6): 1386 ret = ip6ip6_tnl_xmit(skb, dev); 1387 break; 1388 default: 1389 goto tx_err; 1390 } 1391 1392 if (ret < 0) 1393 goto tx_err; 1394 1395 return NETDEV_TX_OK; 1396 1397 tx_err: 1398 stats->tx_errors++; 1399 stats->tx_dropped++; 1400 kfree_skb(skb); 1401 return NETDEV_TX_OK; 1402 } 1403 1404 static void ip6_tnl_link_config(struct ip6_tnl *t) 1405 { 1406 struct net_device *dev = t->dev; 1407 struct __ip6_tnl_parm *p = &t->parms; 1408 struct flowi6 *fl6 = &t->fl.u.ip6; 1409 int t_hlen; 1410 1411 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1412 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1413 1414 /* Set up flowi template */ 1415 fl6->saddr = p->laddr; 1416 fl6->daddr = p->raddr; 1417 fl6->flowi6_oif = p->link; 1418 fl6->flowlabel = 0; 1419 1420 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1421 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1422 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1423 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1424 1425 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1426 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1427 1428 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1429 dev->flags |= IFF_POINTOPOINT; 1430 else 1431 dev->flags &= ~IFF_POINTOPOINT; 1432 1433 t->tun_hlen = 0; 1434 t->hlen = t->encap_hlen + t->tun_hlen; 1435 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1436 1437 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1438 int strict = (ipv6_addr_type(&p->raddr) & 1439 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1440 1441 struct rt6_info *rt = rt6_lookup(t->net, 1442 &p->raddr, &p->laddr, 1443 p->link, strict); 1444 1445 if (!rt) 1446 return; 1447 1448 if (rt->dst.dev) { 1449 dev->hard_header_len = rt->dst.dev->hard_header_len + 1450 t_hlen; 1451 1452 dev->mtu = rt->dst.dev->mtu - t_hlen; 1453 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1454 dev->mtu -= 8; 1455 1456 if (dev->mtu < IPV6_MIN_MTU) 1457 dev->mtu = IPV6_MIN_MTU; 1458 } 1459 ip6_rt_put(rt); 1460 } 1461 } 1462 1463 /** 1464 * ip6_tnl_change - update the tunnel parameters 1465 * @t: tunnel to be changed 1466 * @p: tunnel configuration parameters 1467 * 1468 * Description: 1469 * ip6_tnl_change() updates the tunnel parameters 1470 **/ 1471 1472 static int 1473 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) 1474 { 1475 t->parms.laddr = p->laddr; 1476 t->parms.raddr = p->raddr; 1477 t->parms.flags = p->flags; 1478 t->parms.hop_limit = p->hop_limit; 1479 t->parms.encap_limit = p->encap_limit; 1480 t->parms.flowinfo = p->flowinfo; 1481 t->parms.link = p->link; 1482 t->parms.proto = p->proto; 1483 t->parms.fwmark = p->fwmark; 1484 dst_cache_reset(&t->dst_cache); 1485 ip6_tnl_link_config(t); 1486 return 0; 1487 } 1488 1489 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1490 { 1491 struct net *net = t->net; 1492 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1493 int err; 1494 1495 ip6_tnl_unlink(ip6n, t); 1496 synchronize_net(); 1497 err = ip6_tnl_change(t, p); 1498 ip6_tnl_link(ip6n, t); 1499 netdev_state_change(t->dev); 1500 return err; 1501 } 1502 1503 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1504 { 1505 /* for default tnl0 device allow to change only the proto */ 1506 t->parms.proto = p->proto; 1507 netdev_state_change(t->dev); 1508 return 0; 1509 } 1510 1511 static void 1512 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) 1513 { 1514 p->laddr = u->laddr; 1515 p->raddr = u->raddr; 1516 p->flags = u->flags; 1517 p->hop_limit = u->hop_limit; 1518 p->encap_limit = u->encap_limit; 1519 p->flowinfo = u->flowinfo; 1520 p->link = u->link; 1521 p->proto = u->proto; 1522 memcpy(p->name, u->name, sizeof(u->name)); 1523 } 1524 1525 static void 1526 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p) 1527 { 1528 u->laddr = p->laddr; 1529 u->raddr = p->raddr; 1530 u->flags = p->flags; 1531 u->hop_limit = p->hop_limit; 1532 u->encap_limit = p->encap_limit; 1533 u->flowinfo = p->flowinfo; 1534 u->link = p->link; 1535 u->proto = p->proto; 1536 memcpy(u->name, p->name, sizeof(u->name)); 1537 } 1538 1539 /** 1540 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace 1541 * @dev: virtual device associated with tunnel 1542 * @ifr: parameters passed from userspace 1543 * @cmd: command to be performed 1544 * 1545 * Description: 1546 * ip6_tnl_ioctl() is used for managing IPv6 tunnels 1547 * from userspace. 1548 * 1549 * The possible commands are the following: 1550 * %SIOCGETTUNNEL: get tunnel parameters for device 1551 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 1552 * %SIOCCHGTUNNEL: change tunnel parameters to those given 1553 * %SIOCDELTUNNEL: delete tunnel 1554 * 1555 * The fallback device "ip6tnl0", created during module 1556 * initialization, can be used for creating other tunnel devices. 1557 * 1558 * Return: 1559 * 0 on success, 1560 * %-EFAULT if unable to copy data to or from userspace, 1561 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 1562 * %-EINVAL if passed tunnel parameters are invalid, 1563 * %-EEXIST if changing a tunnel's parameters would cause a conflict 1564 * %-ENODEV if attempting to change or delete a nonexisting device 1565 **/ 1566 1567 static int 1568 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1569 { 1570 int err = 0; 1571 struct ip6_tnl_parm p; 1572 struct __ip6_tnl_parm p1; 1573 struct ip6_tnl *t = netdev_priv(dev); 1574 struct net *net = t->net; 1575 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1576 1577 memset(&p1, 0, sizeof(p1)); 1578 1579 switch (cmd) { 1580 case SIOCGETTUNNEL: 1581 if (dev == ip6n->fb_tnl_dev) { 1582 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1583 err = -EFAULT; 1584 break; 1585 } 1586 ip6_tnl_parm_from_user(&p1, &p); 1587 t = ip6_tnl_locate(net, &p1, 0); 1588 if (IS_ERR(t)) 1589 t = netdev_priv(dev); 1590 } else { 1591 memset(&p, 0, sizeof(p)); 1592 } 1593 ip6_tnl_parm_to_user(&p, &t->parms); 1594 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) { 1595 err = -EFAULT; 1596 } 1597 break; 1598 case SIOCADDTUNNEL: 1599 case SIOCCHGTUNNEL: 1600 err = -EPERM; 1601 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1602 break; 1603 err = -EFAULT; 1604 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1605 break; 1606 err = -EINVAL; 1607 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1608 p.proto != 0) 1609 break; 1610 ip6_tnl_parm_from_user(&p1, &p); 1611 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); 1612 if (cmd == SIOCCHGTUNNEL) { 1613 if (!IS_ERR(t)) { 1614 if (t->dev != dev) { 1615 err = -EEXIST; 1616 break; 1617 } 1618 } else 1619 t = netdev_priv(dev); 1620 if (dev == ip6n->fb_tnl_dev) 1621 err = ip6_tnl0_update(t, &p1); 1622 else 1623 err = ip6_tnl_update(t, &p1); 1624 } 1625 if (!IS_ERR(t)) { 1626 err = 0; 1627 ip6_tnl_parm_to_user(&p, &t->parms); 1628 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1629 err = -EFAULT; 1630 1631 } else { 1632 err = PTR_ERR(t); 1633 } 1634 break; 1635 case SIOCDELTUNNEL: 1636 err = -EPERM; 1637 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1638 break; 1639 1640 if (dev == ip6n->fb_tnl_dev) { 1641 err = -EFAULT; 1642 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1643 break; 1644 err = -ENOENT; 1645 ip6_tnl_parm_from_user(&p1, &p); 1646 t = ip6_tnl_locate(net, &p1, 0); 1647 if (IS_ERR(t)) 1648 break; 1649 err = -EPERM; 1650 if (t->dev == ip6n->fb_tnl_dev) 1651 break; 1652 dev = t->dev; 1653 } 1654 err = 0; 1655 unregister_netdevice(dev); 1656 break; 1657 default: 1658 err = -EINVAL; 1659 } 1660 return err; 1661 } 1662 1663 /** 1664 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1665 * @dev: virtual device associated with tunnel 1666 * @new_mtu: the new mtu 1667 * 1668 * Return: 1669 * 0 on success, 1670 * %-EINVAL if mtu too small 1671 **/ 1672 1673 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1674 { 1675 struct ip6_tnl *tnl = netdev_priv(dev); 1676 1677 if (tnl->parms.proto == IPPROTO_IPIP) { 1678 if (new_mtu < ETH_MIN_MTU) 1679 return -EINVAL; 1680 } else { 1681 if (new_mtu < IPV6_MIN_MTU) 1682 return -EINVAL; 1683 } 1684 if (new_mtu > 0xFFF8 - dev->hard_header_len) 1685 return -EINVAL; 1686 dev->mtu = new_mtu; 1687 return 0; 1688 } 1689 EXPORT_SYMBOL(ip6_tnl_change_mtu); 1690 1691 int ip6_tnl_get_iflink(const struct net_device *dev) 1692 { 1693 struct ip6_tnl *t = netdev_priv(dev); 1694 1695 return t->parms.link; 1696 } 1697 EXPORT_SYMBOL(ip6_tnl_get_iflink); 1698 1699 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops, 1700 unsigned int num) 1701 { 1702 if (num >= MAX_IPTUN_ENCAP_OPS) 1703 return -ERANGE; 1704 1705 return !cmpxchg((const struct ip6_tnl_encap_ops **) 1706 &ip6tun_encaps[num], 1707 NULL, ops) ? 0 : -1; 1708 } 1709 EXPORT_SYMBOL(ip6_tnl_encap_add_ops); 1710 1711 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops, 1712 unsigned int num) 1713 { 1714 int ret; 1715 1716 if (num >= MAX_IPTUN_ENCAP_OPS) 1717 return -ERANGE; 1718 1719 ret = (cmpxchg((const struct ip6_tnl_encap_ops **) 1720 &ip6tun_encaps[num], 1721 ops, NULL) == ops) ? 0 : -1; 1722 1723 synchronize_net(); 1724 1725 return ret; 1726 } 1727 EXPORT_SYMBOL(ip6_tnl_encap_del_ops); 1728 1729 int ip6_tnl_encap_setup(struct ip6_tnl *t, 1730 struct ip_tunnel_encap *ipencap) 1731 { 1732 int hlen; 1733 1734 memset(&t->encap, 0, sizeof(t->encap)); 1735 1736 hlen = ip6_encap_hlen(ipencap); 1737 if (hlen < 0) 1738 return hlen; 1739 1740 t->encap.type = ipencap->type; 1741 t->encap.sport = ipencap->sport; 1742 t->encap.dport = ipencap->dport; 1743 t->encap.flags = ipencap->flags; 1744 1745 t->encap_hlen = hlen; 1746 t->hlen = t->encap_hlen + t->tun_hlen; 1747 1748 return 0; 1749 } 1750 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup); 1751 1752 static const struct net_device_ops ip6_tnl_netdev_ops = { 1753 .ndo_init = ip6_tnl_dev_init, 1754 .ndo_uninit = ip6_tnl_dev_uninit, 1755 .ndo_start_xmit = ip6_tnl_start_xmit, 1756 .ndo_do_ioctl = ip6_tnl_ioctl, 1757 .ndo_change_mtu = ip6_tnl_change_mtu, 1758 .ndo_get_stats = ip6_get_stats, 1759 .ndo_get_iflink = ip6_tnl_get_iflink, 1760 }; 1761 1762 #define IPXIPX_FEATURES (NETIF_F_SG | \ 1763 NETIF_F_FRAGLIST | \ 1764 NETIF_F_HIGHDMA | \ 1765 NETIF_F_GSO_SOFTWARE | \ 1766 NETIF_F_HW_CSUM) 1767 1768 /** 1769 * ip6_tnl_dev_setup - setup virtual tunnel device 1770 * @dev: virtual device associated with tunnel 1771 * 1772 * Description: 1773 * Initialize function pointers and device parameters 1774 **/ 1775 1776 static void ip6_tnl_dev_setup(struct net_device *dev) 1777 { 1778 dev->netdev_ops = &ip6_tnl_netdev_ops; 1779 dev->needs_free_netdev = true; 1780 dev->priv_destructor = ip6_dev_free; 1781 1782 dev->type = ARPHRD_TUNNEL6; 1783 dev->flags |= IFF_NOARP; 1784 dev->addr_len = sizeof(struct in6_addr); 1785 dev->features |= NETIF_F_LLTX; 1786 netif_keep_dst(dev); 1787 1788 dev->features |= IPXIPX_FEATURES; 1789 dev->hw_features |= IPXIPX_FEATURES; 1790 1791 /* This perm addr will be used as interface identifier by IPv6 */ 1792 dev->addr_assign_type = NET_ADDR_RANDOM; 1793 eth_random_addr(dev->perm_addr); 1794 } 1795 1796 1797 /** 1798 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices 1799 * @dev: virtual device associated with tunnel 1800 **/ 1801 1802 static inline int 1803 ip6_tnl_dev_init_gen(struct net_device *dev) 1804 { 1805 struct ip6_tnl *t = netdev_priv(dev); 1806 int ret; 1807 int t_hlen; 1808 1809 t->dev = dev; 1810 t->net = dev_net(dev); 1811 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1812 if (!dev->tstats) 1813 return -ENOMEM; 1814 1815 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); 1816 if (ret) 1817 goto free_stats; 1818 1819 ret = gro_cells_init(&t->gro_cells, dev); 1820 if (ret) 1821 goto destroy_dst; 1822 1823 t->tun_hlen = 0; 1824 t->hlen = t->encap_hlen + t->tun_hlen; 1825 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1826 1827 dev->type = ARPHRD_TUNNEL6; 1828 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1829 dev->mtu = ETH_DATA_LEN - t_hlen; 1830 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1831 dev->mtu -= 8; 1832 dev->min_mtu = ETH_MIN_MTU; 1833 dev->max_mtu = 0xFFF8 - dev->hard_header_len; 1834 1835 return 0; 1836 1837 destroy_dst: 1838 dst_cache_destroy(&t->dst_cache); 1839 free_stats: 1840 free_percpu(dev->tstats); 1841 dev->tstats = NULL; 1842 1843 return ret; 1844 } 1845 1846 /** 1847 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices 1848 * @dev: virtual device associated with tunnel 1849 **/ 1850 1851 static int ip6_tnl_dev_init(struct net_device *dev) 1852 { 1853 struct ip6_tnl *t = netdev_priv(dev); 1854 int err = ip6_tnl_dev_init_gen(dev); 1855 1856 if (err) 1857 return err; 1858 ip6_tnl_link_config(t); 1859 if (t->parms.collect_md) { 1860 dev->features |= NETIF_F_NETNS_LOCAL; 1861 netif_keep_dst(dev); 1862 } 1863 return 0; 1864 } 1865 1866 /** 1867 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device 1868 * @dev: fallback device 1869 * 1870 * Return: 0 1871 **/ 1872 1873 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) 1874 { 1875 struct ip6_tnl *t = netdev_priv(dev); 1876 struct net *net = dev_net(dev); 1877 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1878 1879 t->parms.proto = IPPROTO_IPV6; 1880 dev_hold(dev); 1881 1882 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1883 return 0; 1884 } 1885 1886 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[]) 1887 { 1888 u8 proto; 1889 1890 if (!data || !data[IFLA_IPTUN_PROTO]) 1891 return 0; 1892 1893 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1894 if (proto != IPPROTO_IPV6 && 1895 proto != IPPROTO_IPIP && 1896 proto != 0) 1897 return -EINVAL; 1898 1899 return 0; 1900 } 1901 1902 static void ip6_tnl_netlink_parms(struct nlattr *data[], 1903 struct __ip6_tnl_parm *parms) 1904 { 1905 memset(parms, 0, sizeof(*parms)); 1906 1907 if (!data) 1908 return; 1909 1910 if (data[IFLA_IPTUN_LINK]) 1911 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); 1912 1913 if (data[IFLA_IPTUN_LOCAL]) 1914 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]); 1915 1916 if (data[IFLA_IPTUN_REMOTE]) 1917 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]); 1918 1919 if (data[IFLA_IPTUN_TTL]) 1920 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]); 1921 1922 if (data[IFLA_IPTUN_ENCAP_LIMIT]) 1923 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]); 1924 1925 if (data[IFLA_IPTUN_FLOWINFO]) 1926 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]); 1927 1928 if (data[IFLA_IPTUN_FLAGS]) 1929 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]); 1930 1931 if (data[IFLA_IPTUN_PROTO]) 1932 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1933 1934 if (data[IFLA_IPTUN_COLLECT_METADATA]) 1935 parms->collect_md = true; 1936 1937 if (data[IFLA_IPTUN_FWMARK]) 1938 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); 1939 } 1940 1941 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[], 1942 struct ip_tunnel_encap *ipencap) 1943 { 1944 bool ret = false; 1945 1946 memset(ipencap, 0, sizeof(*ipencap)); 1947 1948 if (!data) 1949 return ret; 1950 1951 if (data[IFLA_IPTUN_ENCAP_TYPE]) { 1952 ret = true; 1953 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]); 1954 } 1955 1956 if (data[IFLA_IPTUN_ENCAP_FLAGS]) { 1957 ret = true; 1958 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]); 1959 } 1960 1961 if (data[IFLA_IPTUN_ENCAP_SPORT]) { 1962 ret = true; 1963 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]); 1964 } 1965 1966 if (data[IFLA_IPTUN_ENCAP_DPORT]) { 1967 ret = true; 1968 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]); 1969 } 1970 1971 return ret; 1972 } 1973 1974 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, 1975 struct nlattr *tb[], struct nlattr *data[]) 1976 { 1977 struct net *net = dev_net(dev); 1978 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1979 struct ip6_tnl *nt, *t; 1980 struct ip_tunnel_encap ipencap; 1981 1982 nt = netdev_priv(dev); 1983 1984 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 1985 int err = ip6_tnl_encap_setup(nt, &ipencap); 1986 1987 if (err < 0) 1988 return err; 1989 } 1990 1991 ip6_tnl_netlink_parms(data, &nt->parms); 1992 1993 if (nt->parms.collect_md) { 1994 if (rtnl_dereference(ip6n->collect_md_tun)) 1995 return -EEXIST; 1996 } else { 1997 t = ip6_tnl_locate(net, &nt->parms, 0); 1998 if (!IS_ERR(t)) 1999 return -EEXIST; 2000 } 2001 2002 return ip6_tnl_create2(dev); 2003 } 2004 2005 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], 2006 struct nlattr *data[]) 2007 { 2008 struct ip6_tnl *t = netdev_priv(dev); 2009 struct __ip6_tnl_parm p; 2010 struct net *net = t->net; 2011 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2012 struct ip_tunnel_encap ipencap; 2013 2014 if (dev == ip6n->fb_tnl_dev) 2015 return -EINVAL; 2016 2017 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 2018 int err = ip6_tnl_encap_setup(t, &ipencap); 2019 2020 if (err < 0) 2021 return err; 2022 } 2023 ip6_tnl_netlink_parms(data, &p); 2024 if (p.collect_md) 2025 return -EINVAL; 2026 2027 t = ip6_tnl_locate(net, &p, 0); 2028 if (!IS_ERR(t)) { 2029 if (t->dev != dev) 2030 return -EEXIST; 2031 } else 2032 t = netdev_priv(dev); 2033 2034 return ip6_tnl_update(t, &p); 2035 } 2036 2037 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head) 2038 { 2039 struct net *net = dev_net(dev); 2040 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2041 2042 if (dev != ip6n->fb_tnl_dev) 2043 unregister_netdevice_queue(dev, head); 2044 } 2045 2046 static size_t ip6_tnl_get_size(const struct net_device *dev) 2047 { 2048 return 2049 /* IFLA_IPTUN_LINK */ 2050 nla_total_size(4) + 2051 /* IFLA_IPTUN_LOCAL */ 2052 nla_total_size(sizeof(struct in6_addr)) + 2053 /* IFLA_IPTUN_REMOTE */ 2054 nla_total_size(sizeof(struct in6_addr)) + 2055 /* IFLA_IPTUN_TTL */ 2056 nla_total_size(1) + 2057 /* IFLA_IPTUN_ENCAP_LIMIT */ 2058 nla_total_size(1) + 2059 /* IFLA_IPTUN_FLOWINFO */ 2060 nla_total_size(4) + 2061 /* IFLA_IPTUN_FLAGS */ 2062 nla_total_size(4) + 2063 /* IFLA_IPTUN_PROTO */ 2064 nla_total_size(1) + 2065 /* IFLA_IPTUN_ENCAP_TYPE */ 2066 nla_total_size(2) + 2067 /* IFLA_IPTUN_ENCAP_FLAGS */ 2068 nla_total_size(2) + 2069 /* IFLA_IPTUN_ENCAP_SPORT */ 2070 nla_total_size(2) + 2071 /* IFLA_IPTUN_ENCAP_DPORT */ 2072 nla_total_size(2) + 2073 /* IFLA_IPTUN_COLLECT_METADATA */ 2074 nla_total_size(0) + 2075 /* IFLA_IPTUN_FWMARK */ 2076 nla_total_size(4) + 2077 0; 2078 } 2079 2080 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) 2081 { 2082 struct ip6_tnl *tunnel = netdev_priv(dev); 2083 struct __ip6_tnl_parm *parm = &tunnel->parms; 2084 2085 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 2086 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) || 2087 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) || 2088 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || 2089 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || 2090 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || 2091 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || 2092 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) || 2093 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark)) 2094 goto nla_put_failure; 2095 2096 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || 2097 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || 2098 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || 2099 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags)) 2100 goto nla_put_failure; 2101 2102 if (parm->collect_md) 2103 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA)) 2104 goto nla_put_failure; 2105 2106 return 0; 2107 2108 nla_put_failure: 2109 return -EMSGSIZE; 2110 } 2111 2112 struct net *ip6_tnl_get_link_net(const struct net_device *dev) 2113 { 2114 struct ip6_tnl *tunnel = netdev_priv(dev); 2115 2116 return tunnel->net; 2117 } 2118 EXPORT_SYMBOL(ip6_tnl_get_link_net); 2119 2120 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { 2121 [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, 2122 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) }, 2123 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) }, 2124 [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, 2125 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 }, 2126 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 }, 2127 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 }, 2128 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 }, 2129 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, 2130 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, 2131 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, 2132 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, 2133 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, 2134 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 }, 2135 }; 2136 2137 static struct rtnl_link_ops ip6_link_ops __read_mostly = { 2138 .kind = "ip6tnl", 2139 .maxtype = IFLA_IPTUN_MAX, 2140 .policy = ip6_tnl_policy, 2141 .priv_size = sizeof(struct ip6_tnl), 2142 .setup = ip6_tnl_dev_setup, 2143 .validate = ip6_tnl_validate, 2144 .newlink = ip6_tnl_newlink, 2145 .changelink = ip6_tnl_changelink, 2146 .dellink = ip6_tnl_dellink, 2147 .get_size = ip6_tnl_get_size, 2148 .fill_info = ip6_tnl_fill_info, 2149 .get_link_net = ip6_tnl_get_link_net, 2150 }; 2151 2152 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { 2153 .handler = ip4ip6_rcv, 2154 .err_handler = ip4ip6_err, 2155 .priority = 1, 2156 }; 2157 2158 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { 2159 .handler = ip6ip6_rcv, 2160 .err_handler = ip6ip6_err, 2161 .priority = 1, 2162 }; 2163 2164 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net) 2165 { 2166 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2167 struct net_device *dev, *aux; 2168 int h; 2169 struct ip6_tnl *t; 2170 LIST_HEAD(list); 2171 2172 for_each_netdev_safe(net, dev, aux) 2173 if (dev->rtnl_link_ops == &ip6_link_ops) 2174 unregister_netdevice_queue(dev, &list); 2175 2176 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) { 2177 t = rtnl_dereference(ip6n->tnls_r_l[h]); 2178 while (t) { 2179 /* If dev is in the same netns, it has already 2180 * been added to the list by the previous loop. 2181 */ 2182 if (!net_eq(dev_net(t->dev), net)) 2183 unregister_netdevice_queue(t->dev, &list); 2184 t = rtnl_dereference(t->next); 2185 } 2186 } 2187 2188 unregister_netdevice_many(&list); 2189 } 2190 2191 static int __net_init ip6_tnl_init_net(struct net *net) 2192 { 2193 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2194 struct ip6_tnl *t = NULL; 2195 int err; 2196 2197 ip6n->tnls[0] = ip6n->tnls_wc; 2198 ip6n->tnls[1] = ip6n->tnls_r_l; 2199 2200 err = -ENOMEM; 2201 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", 2202 NET_NAME_UNKNOWN, ip6_tnl_dev_setup); 2203 2204 if (!ip6n->fb_tnl_dev) 2205 goto err_alloc_dev; 2206 dev_net_set(ip6n->fb_tnl_dev, net); 2207 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops; 2208 /* FB netdevice is special: we have one, and only one per netns. 2209 * Allowing to move it to another netns is clearly unsafe. 2210 */ 2211 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL; 2212 2213 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 2214 if (err < 0) 2215 goto err_register; 2216 2217 err = register_netdev(ip6n->fb_tnl_dev); 2218 if (err < 0) 2219 goto err_register; 2220 2221 t = netdev_priv(ip6n->fb_tnl_dev); 2222 2223 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 2224 return 0; 2225 2226 err_register: 2227 free_netdev(ip6n->fb_tnl_dev); 2228 err_alloc_dev: 2229 return err; 2230 } 2231 2232 static void __net_exit ip6_tnl_exit_net(struct net *net) 2233 { 2234 rtnl_lock(); 2235 ip6_tnl_destroy_tunnels(net); 2236 rtnl_unlock(); 2237 } 2238 2239 static struct pernet_operations ip6_tnl_net_ops = { 2240 .init = ip6_tnl_init_net, 2241 .exit = ip6_tnl_exit_net, 2242 .id = &ip6_tnl_net_id, 2243 .size = sizeof(struct ip6_tnl_net), 2244 }; 2245 2246 /** 2247 * ip6_tunnel_init - register protocol and reserve needed resources 2248 * 2249 * Return: 0 on success 2250 **/ 2251 2252 static int __init ip6_tunnel_init(void) 2253 { 2254 int err; 2255 2256 err = register_pernet_device(&ip6_tnl_net_ops); 2257 if (err < 0) 2258 goto out_pernet; 2259 2260 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); 2261 if (err < 0) { 2262 pr_err("%s: can't register ip4ip6\n", __func__); 2263 goto out_ip4ip6; 2264 } 2265 2266 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); 2267 if (err < 0) { 2268 pr_err("%s: can't register ip6ip6\n", __func__); 2269 goto out_ip6ip6; 2270 } 2271 err = rtnl_link_register(&ip6_link_ops); 2272 if (err < 0) 2273 goto rtnl_link_failed; 2274 2275 return 0; 2276 2277 rtnl_link_failed: 2278 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 2279 out_ip6ip6: 2280 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 2281 out_ip4ip6: 2282 unregister_pernet_device(&ip6_tnl_net_ops); 2283 out_pernet: 2284 return err; 2285 } 2286 2287 /** 2288 * ip6_tunnel_cleanup - free resources and unregister protocol 2289 **/ 2290 2291 static void __exit ip6_tunnel_cleanup(void) 2292 { 2293 rtnl_link_unregister(&ip6_link_ops); 2294 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 2295 pr_info("%s: can't deregister ip4ip6\n", __func__); 2296 2297 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 2298 pr_info("%s: can't deregister ip6ip6\n", __func__); 2299 2300 unregister_pernet_device(&ip6_tnl_net_ops); 2301 } 2302 2303 module_init(ip6_tunnel_init); 2304 module_exit(ip6_tunnel_cleanup); 2305