1 /* 2 * IPv6 tunneling device 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 8 * 9 * Based on: 10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 11 * 12 * RFC 2473 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 * 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/capability.h> 25 #include <linux/errno.h> 26 #include <linux/types.h> 27 #include <linux/sockios.h> 28 #include <linux/icmp.h> 29 #include <linux/if.h> 30 #include <linux/in.h> 31 #include <linux/ip.h> 32 #include <linux/net.h> 33 #include <linux/in6.h> 34 #include <linux/netdevice.h> 35 #include <linux/if_arp.h> 36 #include <linux/icmpv6.h> 37 #include <linux/init.h> 38 #include <linux/route.h> 39 #include <linux/rtnetlink.h> 40 #include <linux/netfilter_ipv6.h> 41 #include <linux/slab.h> 42 #include <linux/hash.h> 43 #include <linux/etherdevice.h> 44 45 #include <linux/uaccess.h> 46 #include <linux/atomic.h> 47 48 #include <net/icmp.h> 49 #include <net/ip.h> 50 #include <net/ip_tunnels.h> 51 #include <net/ipv6.h> 52 #include <net/ip6_route.h> 53 #include <net/addrconf.h> 54 #include <net/ip6_tunnel.h> 55 #include <net/xfrm.h> 56 #include <net/dsfield.h> 57 #include <net/inet_ecn.h> 58 #include <net/net_namespace.h> 59 #include <net/netns/generic.h> 60 #include <net/dst_metadata.h> 61 62 MODULE_AUTHOR("Ville Nuorvala"); 63 MODULE_DESCRIPTION("IPv6 tunneling device"); 64 MODULE_LICENSE("GPL"); 65 MODULE_ALIAS_RTNL_LINK("ip6tnl"); 66 MODULE_ALIAS_NETDEV("ip6tnl0"); 67 68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5 69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT) 70 71 static bool log_ecn_error = true; 72 module_param(log_ecn_error, bool, 0644); 73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 74 75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 76 { 77 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 78 79 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT); 80 } 81 82 static int ip6_tnl_dev_init(struct net_device *dev); 83 static void ip6_tnl_dev_setup(struct net_device *dev); 84 static struct rtnl_link_ops ip6_link_ops __read_mostly; 85 86 static unsigned int ip6_tnl_net_id __read_mostly; 87 struct ip6_tnl_net { 88 /* the IPv6 tunnel fallback device */ 89 struct net_device *fb_tnl_dev; 90 /* lists for storing tunnels in use */ 91 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE]; 92 struct ip6_tnl __rcu *tnls_wc[1]; 93 struct ip6_tnl __rcu **tnls[2]; 94 struct ip6_tnl __rcu *collect_md_tun; 95 }; 96 97 static struct net_device_stats *ip6_get_stats(struct net_device *dev) 98 { 99 struct pcpu_sw_netstats tmp, sum = { 0 }; 100 int i; 101 102 for_each_possible_cpu(i) { 103 unsigned int start; 104 const struct pcpu_sw_netstats *tstats = 105 per_cpu_ptr(dev->tstats, i); 106 107 do { 108 start = u64_stats_fetch_begin_irq(&tstats->syncp); 109 tmp.rx_packets = tstats->rx_packets; 110 tmp.rx_bytes = tstats->rx_bytes; 111 tmp.tx_packets = tstats->tx_packets; 112 tmp.tx_bytes = tstats->tx_bytes; 113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); 114 115 sum.rx_packets += tmp.rx_packets; 116 sum.rx_bytes += tmp.rx_bytes; 117 sum.tx_packets += tmp.tx_packets; 118 sum.tx_bytes += tmp.tx_bytes; 119 } 120 dev->stats.rx_packets = sum.rx_packets; 121 dev->stats.rx_bytes = sum.rx_bytes; 122 dev->stats.tx_packets = sum.tx_packets; 123 dev->stats.tx_bytes = sum.tx_bytes; 124 return &dev->stats; 125 } 126 127 /** 128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 129 * @remote: the address of the tunnel exit-point 130 * @local: the address of the tunnel entry-point 131 * 132 * Return: 133 * tunnel matching given end-points if found, 134 * else fallback tunnel if its device is up, 135 * else %NULL 136 **/ 137 138 #define for_each_ip6_tunnel_rcu(start) \ 139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 140 141 static struct ip6_tnl * 142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) 143 { 144 unsigned int hash = HASH(remote, local); 145 struct ip6_tnl *t; 146 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 147 struct in6_addr any; 148 149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 150 if (ipv6_addr_equal(local, &t->parms.laddr) && 151 ipv6_addr_equal(remote, &t->parms.raddr) && 152 (t->dev->flags & IFF_UP)) 153 return t; 154 } 155 156 memset(&any, 0, sizeof(any)); 157 hash = HASH(&any, local); 158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 159 if (ipv6_addr_equal(local, &t->parms.laddr) && 160 ipv6_addr_any(&t->parms.raddr) && 161 (t->dev->flags & IFF_UP)) 162 return t; 163 } 164 165 hash = HASH(remote, &any); 166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 167 if (ipv6_addr_equal(remote, &t->parms.raddr) && 168 ipv6_addr_any(&t->parms.laddr) && 169 (t->dev->flags & IFF_UP)) 170 return t; 171 } 172 173 t = rcu_dereference(ip6n->collect_md_tun); 174 if (t && t->dev->flags & IFF_UP) 175 return t; 176 177 t = rcu_dereference(ip6n->tnls_wc[0]); 178 if (t && (t->dev->flags & IFF_UP)) 179 return t; 180 181 return NULL; 182 } 183 184 /** 185 * ip6_tnl_bucket - get head of list matching given tunnel parameters 186 * @p: parameters containing tunnel end-points 187 * 188 * Description: 189 * ip6_tnl_bucket() returns the head of the list matching the 190 * &struct in6_addr entries laddr and raddr in @p. 191 * 192 * Return: head of IPv6 tunnel list 193 **/ 194 195 static struct ip6_tnl __rcu ** 196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p) 197 { 198 const struct in6_addr *remote = &p->raddr; 199 const struct in6_addr *local = &p->laddr; 200 unsigned int h = 0; 201 int prio = 0; 202 203 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 204 prio = 1; 205 h = HASH(remote, local); 206 } 207 return &ip6n->tnls[prio][h]; 208 } 209 210 /** 211 * ip6_tnl_link - add tunnel to hash table 212 * @t: tunnel to be added 213 **/ 214 215 static void 216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 217 { 218 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); 219 220 if (t->parms.collect_md) 221 rcu_assign_pointer(ip6n->collect_md_tun, t); 222 rcu_assign_pointer(t->next , rtnl_dereference(*tp)); 223 rcu_assign_pointer(*tp, t); 224 } 225 226 /** 227 * ip6_tnl_unlink - remove tunnel from hash table 228 * @t: tunnel to be removed 229 **/ 230 231 static void 232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 233 { 234 struct ip6_tnl __rcu **tp; 235 struct ip6_tnl *iter; 236 237 if (t->parms.collect_md) 238 rcu_assign_pointer(ip6n->collect_md_tun, NULL); 239 240 for (tp = ip6_tnl_bucket(ip6n, &t->parms); 241 (iter = rtnl_dereference(*tp)) != NULL; 242 tp = &iter->next) { 243 if (t == iter) { 244 rcu_assign_pointer(*tp, t->next); 245 break; 246 } 247 } 248 } 249 250 static void ip6_dev_free(struct net_device *dev) 251 { 252 struct ip6_tnl *t = netdev_priv(dev); 253 254 gro_cells_destroy(&t->gro_cells); 255 dst_cache_destroy(&t->dst_cache); 256 free_percpu(dev->tstats); 257 } 258 259 static int ip6_tnl_create2(struct net_device *dev) 260 { 261 struct ip6_tnl *t = netdev_priv(dev); 262 struct net *net = dev_net(dev); 263 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 264 int err; 265 266 t = netdev_priv(dev); 267 268 dev->rtnl_link_ops = &ip6_link_ops; 269 err = register_netdevice(dev); 270 if (err < 0) 271 goto out; 272 273 strcpy(t->parms.name, dev->name); 274 275 dev_hold(dev); 276 ip6_tnl_link(ip6n, t); 277 return 0; 278 279 out: 280 return err; 281 } 282 283 /** 284 * ip6_tnl_create - create a new tunnel 285 * @p: tunnel parameters 286 * @pt: pointer to new tunnel 287 * 288 * Description: 289 * Create tunnel matching given parameters. 290 * 291 * Return: 292 * created tunnel or error pointer 293 **/ 294 295 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) 296 { 297 struct net_device *dev; 298 struct ip6_tnl *t; 299 char name[IFNAMSIZ]; 300 int err = -ENOMEM; 301 302 if (p->name[0]) 303 strlcpy(name, p->name, IFNAMSIZ); 304 else 305 sprintf(name, "ip6tnl%%d"); 306 307 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 308 ip6_tnl_dev_setup); 309 if (!dev) 310 goto failed; 311 312 dev_net_set(dev, net); 313 314 t = netdev_priv(dev); 315 t->parms = *p; 316 t->net = dev_net(dev); 317 err = ip6_tnl_create2(dev); 318 if (err < 0) 319 goto failed_free; 320 321 return t; 322 323 failed_free: 324 free_netdev(dev); 325 failed: 326 return ERR_PTR(err); 327 } 328 329 /** 330 * ip6_tnl_locate - find or create tunnel matching given parameters 331 * @p: tunnel parameters 332 * @create: != 0 if allowed to create new tunnel if no match found 333 * 334 * Description: 335 * ip6_tnl_locate() first tries to locate an existing tunnel 336 * based on @parms. If this is unsuccessful, but @create is set a new 337 * tunnel device is created and registered for use. 338 * 339 * Return: 340 * matching tunnel or error pointer 341 **/ 342 343 static struct ip6_tnl *ip6_tnl_locate(struct net *net, 344 struct __ip6_tnl_parm *p, int create) 345 { 346 const struct in6_addr *remote = &p->raddr; 347 const struct in6_addr *local = &p->laddr; 348 struct ip6_tnl __rcu **tp; 349 struct ip6_tnl *t; 350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 351 352 for (tp = ip6_tnl_bucket(ip6n, p); 353 (t = rtnl_dereference(*tp)) != NULL; 354 tp = &t->next) { 355 if (ipv6_addr_equal(local, &t->parms.laddr) && 356 ipv6_addr_equal(remote, &t->parms.raddr)) { 357 if (create) 358 return ERR_PTR(-EEXIST); 359 360 return t; 361 } 362 } 363 if (!create) 364 return ERR_PTR(-ENODEV); 365 return ip6_tnl_create(net, p); 366 } 367 368 /** 369 * ip6_tnl_dev_uninit - tunnel device uninitializer 370 * @dev: the device to be destroyed 371 * 372 * Description: 373 * ip6_tnl_dev_uninit() removes tunnel from its list 374 **/ 375 376 static void 377 ip6_tnl_dev_uninit(struct net_device *dev) 378 { 379 struct ip6_tnl *t = netdev_priv(dev); 380 struct net *net = t->net; 381 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 382 383 if (dev == ip6n->fb_tnl_dev) 384 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 385 else 386 ip6_tnl_unlink(ip6n, t); 387 dst_cache_reset(&t->dst_cache); 388 dev_put(dev); 389 } 390 391 /** 392 * parse_tvl_tnl_enc_lim - handle encapsulation limit option 393 * @skb: received socket buffer 394 * 395 * Return: 396 * 0 if none was found, 397 * else index to encapsulation limit 398 **/ 399 400 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 401 { 402 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw; 403 unsigned int nhoff = raw - skb->data; 404 unsigned int off = nhoff + sizeof(*ipv6h); 405 u8 next, nexthdr = ipv6h->nexthdr; 406 407 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 408 struct ipv6_opt_hdr *hdr; 409 u16 optlen; 410 411 if (!pskb_may_pull(skb, off + sizeof(*hdr))) 412 break; 413 414 hdr = (struct ipv6_opt_hdr *)(skb->data + off); 415 if (nexthdr == NEXTHDR_FRAGMENT) { 416 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 417 if (frag_hdr->frag_off) 418 break; 419 optlen = 8; 420 } else if (nexthdr == NEXTHDR_AUTH) { 421 optlen = (hdr->hdrlen + 2) << 2; 422 } else { 423 optlen = ipv6_optlen(hdr); 424 } 425 /* cache hdr->nexthdr, since pskb_may_pull() might 426 * invalidate hdr 427 */ 428 next = hdr->nexthdr; 429 if (nexthdr == NEXTHDR_DEST) { 430 u16 i = 2; 431 432 /* Remember : hdr is no longer valid at this point. */ 433 if (!pskb_may_pull(skb, off + optlen)) 434 break; 435 436 while (1) { 437 struct ipv6_tlv_tnl_enc_lim *tel; 438 439 /* No more room for encapsulation limit */ 440 if (i + sizeof(*tel) > optlen) 441 break; 442 443 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i); 444 /* return index of option if found and valid */ 445 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 446 tel->length == 1) 447 return i + off - nhoff; 448 /* else jump to next option */ 449 if (tel->type) 450 i += tel->length + 2; 451 else 452 i++; 453 } 454 } 455 nexthdr = next; 456 off += optlen; 457 } 458 return 0; 459 } 460 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim); 461 462 /** 463 * ip6_tnl_err - tunnel error handler 464 * 465 * Description: 466 * ip6_tnl_err() should handle errors in the tunnel according 467 * to the specifications in RFC 2473. 468 **/ 469 470 static int 471 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 472 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 473 { 474 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; 475 struct net *net = dev_net(skb->dev); 476 u8 rel_type = ICMPV6_DEST_UNREACH; 477 u8 rel_code = ICMPV6_ADDR_UNREACH; 478 __u32 rel_info = 0; 479 struct ip6_tnl *t; 480 int err = -ENOENT; 481 int rel_msg = 0; 482 u8 tproto; 483 __u16 len; 484 485 /* If the packet doesn't contain the original IPv6 header we are 486 in trouble since we might need the source address for further 487 processing of the error. */ 488 489 rcu_read_lock(); 490 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr); 491 if (!t) 492 goto out; 493 494 tproto = READ_ONCE(t->parms.proto); 495 if (tproto != ipproto && tproto != 0) 496 goto out; 497 498 err = 0; 499 500 switch (*type) { 501 struct ipv6_tlv_tnl_enc_lim *tel; 502 __u32 mtu, teli; 503 case ICMPV6_DEST_UNREACH: 504 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 505 t->parms.name); 506 rel_msg = 1; 507 break; 508 case ICMPV6_TIME_EXCEED: 509 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 510 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 511 t->parms.name); 512 rel_msg = 1; 513 } 514 break; 515 case ICMPV6_PARAMPROB: 516 teli = 0; 517 if ((*code) == ICMPV6_HDR_FIELD) 518 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 519 520 if (teli && teli == *info - 2) { 521 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 522 if (tel->encap_limit == 0) { 523 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 524 t->parms.name); 525 rel_msg = 1; 526 } 527 } else { 528 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 529 t->parms.name); 530 } 531 break; 532 case ICMPV6_PKT_TOOBIG: 533 ip6_update_pmtu(skb, net, htonl(*info), 0, 0, 534 sock_net_uid(net, NULL)); 535 mtu = *info - offset; 536 if (mtu < IPV6_MIN_MTU) 537 mtu = IPV6_MIN_MTU; 538 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len); 539 if (len > mtu) { 540 rel_type = ICMPV6_PKT_TOOBIG; 541 rel_code = 0; 542 rel_info = mtu; 543 rel_msg = 1; 544 } 545 break; 546 case NDISC_REDIRECT: 547 ip6_redirect(skb, net, skb->dev->ifindex, 0, 548 sock_net_uid(net, NULL)); 549 break; 550 } 551 552 *type = rel_type; 553 *code = rel_code; 554 *info = rel_info; 555 *msg = rel_msg; 556 557 out: 558 rcu_read_unlock(); 559 return err; 560 } 561 562 static int 563 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 564 u8 type, u8 code, int offset, __be32 info) 565 { 566 __u32 rel_info = ntohl(info); 567 const struct iphdr *eiph; 568 struct sk_buff *skb2; 569 int err, rel_msg = 0; 570 u8 rel_type = type; 571 u8 rel_code = code; 572 struct rtable *rt; 573 struct flowi4 fl4; 574 575 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 576 &rel_msg, &rel_info, offset); 577 if (err < 0) 578 return err; 579 580 if (rel_msg == 0) 581 return 0; 582 583 switch (rel_type) { 584 case ICMPV6_DEST_UNREACH: 585 if (rel_code != ICMPV6_ADDR_UNREACH) 586 return 0; 587 rel_type = ICMP_DEST_UNREACH; 588 rel_code = ICMP_HOST_UNREACH; 589 break; 590 case ICMPV6_PKT_TOOBIG: 591 if (rel_code != 0) 592 return 0; 593 rel_type = ICMP_DEST_UNREACH; 594 rel_code = ICMP_FRAG_NEEDED; 595 break; 596 default: 597 return 0; 598 } 599 600 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) 601 return 0; 602 603 skb2 = skb_clone(skb, GFP_ATOMIC); 604 if (!skb2) 605 return 0; 606 607 skb_dst_drop(skb2); 608 609 skb_pull(skb2, offset); 610 skb_reset_network_header(skb2); 611 eiph = ip_hdr(skb2); 612 613 /* Try to guess incoming interface */ 614 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr, 615 0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 616 if (IS_ERR(rt)) 617 goto out; 618 619 skb2->dev = rt->dst.dev; 620 ip_rt_put(rt); 621 622 /* route "incoming" packet */ 623 if (rt->rt_flags & RTCF_LOCAL) { 624 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 625 eiph->daddr, eiph->saddr, 0, 0, 626 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 627 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) { 628 if (!IS_ERR(rt)) 629 ip_rt_put(rt); 630 goto out; 631 } 632 skb_dst_set(skb2, &rt->dst); 633 } else { 634 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 635 skb2->dev) || 636 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) 637 goto out; 638 } 639 640 /* change mtu on this route */ 641 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { 642 if (rel_info > dst_mtu(skb_dst(skb2))) 643 goto out; 644 645 skb_dst_update_pmtu(skb2, rel_info); 646 } 647 648 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 649 650 out: 651 kfree_skb(skb2); 652 return 0; 653 } 654 655 static int 656 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 657 u8 type, u8 code, int offset, __be32 info) 658 { 659 __u32 rel_info = ntohl(info); 660 int err, rel_msg = 0; 661 u8 rel_type = type; 662 u8 rel_code = code; 663 664 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, 665 &rel_msg, &rel_info, offset); 666 if (err < 0) 667 return err; 668 669 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { 670 struct rt6_info *rt; 671 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 672 673 if (!skb2) 674 return 0; 675 676 skb_dst_drop(skb2); 677 skb_pull(skb2, offset); 678 skb_reset_network_header(skb2); 679 680 /* Try to guess incoming interface */ 681 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, 682 NULL, 0, skb2, 0); 683 684 if (rt && rt->dst.dev) 685 skb2->dev = rt->dst.dev; 686 687 icmpv6_send(skb2, rel_type, rel_code, rel_info); 688 689 ip6_rt_put(rt); 690 691 kfree_skb(skb2); 692 } 693 694 return 0; 695 } 696 697 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 698 const struct ipv6hdr *ipv6h, 699 struct sk_buff *skb) 700 { 701 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 702 703 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 704 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); 705 706 return IP6_ECN_decapsulate(ipv6h, skb); 707 } 708 709 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 710 const struct ipv6hdr *ipv6h, 711 struct sk_buff *skb) 712 { 713 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 714 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); 715 716 return IP6_ECN_decapsulate(ipv6h, skb); 717 } 718 719 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, 720 const struct in6_addr *laddr, 721 const struct in6_addr *raddr) 722 { 723 struct __ip6_tnl_parm *p = &t->parms; 724 int ltype = ipv6_addr_type(laddr); 725 int rtype = ipv6_addr_type(raddr); 726 __u32 flags = 0; 727 728 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) { 729 flags = IP6_TNL_F_CAP_PER_PACKET; 730 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 731 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && 732 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && 733 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { 734 if (ltype&IPV6_ADDR_UNICAST) 735 flags |= IP6_TNL_F_CAP_XMIT; 736 if (rtype&IPV6_ADDR_UNICAST) 737 flags |= IP6_TNL_F_CAP_RCV; 738 } 739 return flags; 740 } 741 EXPORT_SYMBOL(ip6_tnl_get_cap); 742 743 /* called with rcu_read_lock() */ 744 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, 745 const struct in6_addr *laddr, 746 const struct in6_addr *raddr) 747 { 748 struct __ip6_tnl_parm *p = &t->parms; 749 int ret = 0; 750 struct net *net = t->net; 751 752 if ((p->flags & IP6_TNL_F_CAP_RCV) || 753 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 754 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) { 755 struct net_device *ldev = NULL; 756 757 if (p->link) 758 ldev = dev_get_by_index_rcu(net, p->link); 759 760 if ((ipv6_addr_is_multicast(laddr) || 761 likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false, 762 0, IFA_F_TENTATIVE))) && 763 ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) || 764 likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true, 765 0, IFA_F_TENTATIVE)))) 766 ret = 1; 767 } 768 return ret; 769 } 770 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); 771 772 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, 773 const struct tnl_ptk_info *tpi, 774 struct metadata_dst *tun_dst, 775 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 776 const struct ipv6hdr *ipv6h, 777 struct sk_buff *skb), 778 bool log_ecn_err) 779 { 780 struct pcpu_sw_netstats *tstats; 781 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 782 int err; 783 784 if ((!(tpi->flags & TUNNEL_CSUM) && 785 (tunnel->parms.i_flags & TUNNEL_CSUM)) || 786 ((tpi->flags & TUNNEL_CSUM) && 787 !(tunnel->parms.i_flags & TUNNEL_CSUM))) { 788 tunnel->dev->stats.rx_crc_errors++; 789 tunnel->dev->stats.rx_errors++; 790 goto drop; 791 } 792 793 if (tunnel->parms.i_flags & TUNNEL_SEQ) { 794 if (!(tpi->flags & TUNNEL_SEQ) || 795 (tunnel->i_seqno && 796 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { 797 tunnel->dev->stats.rx_fifo_errors++; 798 tunnel->dev->stats.rx_errors++; 799 goto drop; 800 } 801 tunnel->i_seqno = ntohl(tpi->seq) + 1; 802 } 803 804 skb->protocol = tpi->proto; 805 806 /* Warning: All skb pointers will be invalidated! */ 807 if (tunnel->dev->type == ARPHRD_ETHER) { 808 if (!pskb_may_pull(skb, ETH_HLEN)) { 809 tunnel->dev->stats.rx_length_errors++; 810 tunnel->dev->stats.rx_errors++; 811 goto drop; 812 } 813 814 ipv6h = ipv6_hdr(skb); 815 skb->protocol = eth_type_trans(skb, tunnel->dev); 816 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 817 } else { 818 skb->dev = tunnel->dev; 819 } 820 821 skb_reset_network_header(skb); 822 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 823 824 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); 825 826 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb); 827 if (unlikely(err)) { 828 if (log_ecn_err) 829 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n", 830 &ipv6h->saddr, 831 ipv6_get_dsfield(ipv6h)); 832 if (err > 1) { 833 ++tunnel->dev->stats.rx_frame_errors; 834 ++tunnel->dev->stats.rx_errors; 835 goto drop; 836 } 837 } 838 839 tstats = this_cpu_ptr(tunnel->dev->tstats); 840 u64_stats_update_begin(&tstats->syncp); 841 tstats->rx_packets++; 842 tstats->rx_bytes += skb->len; 843 u64_stats_update_end(&tstats->syncp); 844 845 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); 846 847 if (tun_dst) 848 skb_dst_set(skb, (struct dst_entry *)tun_dst); 849 850 gro_cells_receive(&tunnel->gro_cells, skb); 851 return 0; 852 853 drop: 854 if (tun_dst) 855 dst_release((struct dst_entry *)tun_dst); 856 kfree_skb(skb); 857 return 0; 858 } 859 860 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, 861 const struct tnl_ptk_info *tpi, 862 struct metadata_dst *tun_dst, 863 bool log_ecn_err) 864 { 865 return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate, 866 log_ecn_err); 867 } 868 EXPORT_SYMBOL(ip6_tnl_rcv); 869 870 static const struct tnl_ptk_info tpi_v6 = { 871 /* no tunnel info required for ipxip6. */ 872 .proto = htons(ETH_P_IPV6), 873 }; 874 875 static const struct tnl_ptk_info tpi_v4 = { 876 /* no tunnel info required for ipxip6. */ 877 .proto = htons(ETH_P_IP), 878 }; 879 880 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, 881 const struct tnl_ptk_info *tpi, 882 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 883 const struct ipv6hdr *ipv6h, 884 struct sk_buff *skb)) 885 { 886 struct ip6_tnl *t; 887 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 888 struct metadata_dst *tun_dst = NULL; 889 int ret = -1; 890 891 rcu_read_lock(); 892 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); 893 894 if (t) { 895 u8 tproto = READ_ONCE(t->parms.proto); 896 897 if (tproto != ipproto && tproto != 0) 898 goto drop; 899 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 900 goto drop; 901 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) 902 goto drop; 903 if (iptunnel_pull_header(skb, 0, tpi->proto, false)) 904 goto drop; 905 if (t->parms.collect_md) { 906 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0); 907 if (!tun_dst) 908 goto drop; 909 } 910 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, 911 log_ecn_error); 912 } 913 914 rcu_read_unlock(); 915 916 return ret; 917 918 drop: 919 rcu_read_unlock(); 920 kfree_skb(skb); 921 return 0; 922 } 923 924 static int ip4ip6_rcv(struct sk_buff *skb) 925 { 926 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4, 927 ip4ip6_dscp_ecn_decapsulate); 928 } 929 930 static int ip6ip6_rcv(struct sk_buff *skb) 931 { 932 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6, 933 ip6ip6_dscp_ecn_decapsulate); 934 } 935 936 struct ipv6_tel_txoption { 937 struct ipv6_txoptions ops; 938 __u8 dst_opt[8]; 939 }; 940 941 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) 942 { 943 memset(opt, 0, sizeof(struct ipv6_tel_txoption)); 944 945 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; 946 opt->dst_opt[3] = 1; 947 opt->dst_opt[4] = encap_limit; 948 opt->dst_opt[5] = IPV6_TLV_PADN; 949 opt->dst_opt[6] = 1; 950 951 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt; 952 opt->ops.opt_nflen = 8; 953 } 954 955 /** 956 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 957 * @t: the outgoing tunnel device 958 * @hdr: IPv6 header from the incoming packet 959 * 960 * Description: 961 * Avoid trivial tunneling loop by checking that tunnel exit-point 962 * doesn't match source of incoming packet. 963 * 964 * Return: 965 * 1 if conflict, 966 * 0 else 967 **/ 968 969 static inline bool 970 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) 971 { 972 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 973 } 974 975 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, 976 const struct in6_addr *laddr, 977 const struct in6_addr *raddr) 978 { 979 struct __ip6_tnl_parm *p = &t->parms; 980 int ret = 0; 981 struct net *net = t->net; 982 983 if (t->parms.collect_md) 984 return 1; 985 986 if ((p->flags & IP6_TNL_F_CAP_XMIT) || 987 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && 988 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) { 989 struct net_device *ldev = NULL; 990 991 rcu_read_lock(); 992 if (p->link) 993 ldev = dev_get_by_index_rcu(net, p->link); 994 995 if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false, 996 0, IFA_F_TENTATIVE))) 997 pr_warn("%s xmit: Local address not yet configured!\n", 998 p->name); 999 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) && 1000 !ipv6_addr_is_multicast(raddr) && 1001 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev, 1002 true, 0, IFA_F_TENTATIVE))) 1003 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", 1004 p->name); 1005 else 1006 ret = 1; 1007 rcu_read_unlock(); 1008 } 1009 return ret; 1010 } 1011 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); 1012 1013 /** 1014 * ip6_tnl_xmit - encapsulate packet and send 1015 * @skb: the outgoing socket buffer 1016 * @dev: the outgoing tunnel device 1017 * @dsfield: dscp code for outer header 1018 * @fl6: flow of tunneled packet 1019 * @encap_limit: encapsulation limit 1020 * @pmtu: Path MTU is stored if packet is too big 1021 * @proto: next header value 1022 * 1023 * Description: 1024 * Build new header and do some sanity checks on the packet before sending 1025 * it. 1026 * 1027 * Return: 1028 * 0 on success 1029 * -1 fail 1030 * %-EMSGSIZE message too big. return mtu in this case. 1031 **/ 1032 1033 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, 1034 struct flowi6 *fl6, int encap_limit, __u32 *pmtu, 1035 __u8 proto) 1036 { 1037 struct ip6_tnl *t = netdev_priv(dev); 1038 struct net *net = t->net; 1039 struct net_device_stats *stats = &t->dev->stats; 1040 struct ipv6hdr *ipv6h; 1041 struct ipv6_tel_txoption opt; 1042 struct dst_entry *dst = NULL, *ndst = NULL; 1043 struct net_device *tdev; 1044 int mtu; 1045 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0; 1046 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1047 unsigned int max_headroom = psh_hlen; 1048 bool use_cache = false; 1049 u8 hop_limit; 1050 int err = -1; 1051 1052 if (t->parms.collect_md) { 1053 hop_limit = skb_tunnel_info(skb)->key.ttl; 1054 goto route_lookup; 1055 } else { 1056 hop_limit = t->parms.hop_limit; 1057 } 1058 1059 /* NBMA tunnel */ 1060 if (ipv6_addr_any(&t->parms.raddr)) { 1061 if (skb->protocol == htons(ETH_P_IPV6)) { 1062 struct in6_addr *addr6; 1063 struct neighbour *neigh; 1064 int addr_type; 1065 1066 if (!skb_dst(skb)) 1067 goto tx_err_link_failure; 1068 1069 neigh = dst_neigh_lookup(skb_dst(skb), 1070 &ipv6_hdr(skb)->daddr); 1071 if (!neigh) 1072 goto tx_err_link_failure; 1073 1074 addr6 = (struct in6_addr *)&neigh->primary_key; 1075 addr_type = ipv6_addr_type(addr6); 1076 1077 if (addr_type == IPV6_ADDR_ANY) 1078 addr6 = &ipv6_hdr(skb)->daddr; 1079 1080 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1081 neigh_release(neigh); 1082 } 1083 } else if (t->parms.proto != 0 && !(t->parms.flags & 1084 (IP6_TNL_F_USE_ORIG_TCLASS | 1085 IP6_TNL_F_USE_ORIG_FWMARK))) { 1086 /* enable the cache only if neither the outer protocol nor the 1087 * routing decision depends on the current inner header value 1088 */ 1089 use_cache = true; 1090 } 1091 1092 if (use_cache) 1093 dst = dst_cache_get(&t->dst_cache); 1094 1095 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1096 goto tx_err_link_failure; 1097 1098 if (!dst) { 1099 route_lookup: 1100 /* add dsfield to flowlabel for route lookup */ 1101 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel); 1102 1103 dst = ip6_route_output(net, NULL, fl6); 1104 1105 if (dst->error) 1106 goto tx_err_link_failure; 1107 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); 1108 if (IS_ERR(dst)) { 1109 err = PTR_ERR(dst); 1110 dst = NULL; 1111 goto tx_err_link_failure; 1112 } 1113 if (t->parms.collect_md && 1114 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev, 1115 &fl6->daddr, 0, &fl6->saddr)) 1116 goto tx_err_link_failure; 1117 ndst = dst; 1118 } 1119 1120 tdev = dst->dev; 1121 1122 if (tdev == dev) { 1123 stats->collisions++; 1124 net_warn_ratelimited("%s: Local routing loop detected!\n", 1125 t->parms.name); 1126 goto tx_err_dst_release; 1127 } 1128 mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen; 1129 if (encap_limit >= 0) { 1130 max_headroom += 8; 1131 mtu -= 8; 1132 } 1133 if (skb->protocol == htons(ETH_P_IPV6)) { 1134 if (mtu < IPV6_MIN_MTU) 1135 mtu = IPV6_MIN_MTU; 1136 } else if (mtu < 576) { 1137 mtu = 576; 1138 } 1139 1140 skb_dst_update_pmtu(skb, mtu); 1141 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { 1142 *pmtu = mtu; 1143 err = -EMSGSIZE; 1144 goto tx_err_dst_release; 1145 } 1146 1147 if (t->err_count > 0) { 1148 if (time_before(jiffies, 1149 t->err_time + IP6TUNNEL_ERR_TIMEO)) { 1150 t->err_count--; 1151 1152 dst_link_failure(skb); 1153 } else { 1154 t->err_count = 0; 1155 } 1156 } 1157 1158 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); 1159 1160 /* 1161 * Okay, now see if we can stuff it in the buffer as-is. 1162 */ 1163 max_headroom += LL_RESERVED_SPACE(tdev); 1164 1165 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 1166 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 1167 struct sk_buff *new_skb; 1168 1169 new_skb = skb_realloc_headroom(skb, max_headroom); 1170 if (!new_skb) 1171 goto tx_err_dst_release; 1172 1173 if (skb->sk) 1174 skb_set_owner_w(new_skb, skb->sk); 1175 consume_skb(skb); 1176 skb = new_skb; 1177 } 1178 1179 if (t->parms.collect_md) { 1180 if (t->encap.type != TUNNEL_ENCAP_NONE) 1181 goto tx_err_dst_release; 1182 } else { 1183 if (use_cache && ndst) 1184 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1185 } 1186 skb_dst_set(skb, dst); 1187 1188 if (encap_limit >= 0) { 1189 init_tel_txopt(&opt, encap_limit); 1190 ipv6_push_frag_opts(skb, &opt.ops, &proto); 1191 } 1192 hop_limit = hop_limit ? : ip6_dst_hoplimit(dst); 1193 1194 /* Calculate max headroom for all the headers and adjust 1195 * needed_headroom if necessary. 1196 */ 1197 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) 1198 + dst->header_len + t->hlen; 1199 if (max_headroom > dev->needed_headroom) 1200 dev->needed_headroom = max_headroom; 1201 1202 err = ip6_tnl_encap(skb, t, &proto, fl6); 1203 if (err) 1204 return err; 1205 1206 skb_push(skb, sizeof(struct ipv6hdr)); 1207 skb_reset_network_header(skb); 1208 ipv6h = ipv6_hdr(skb); 1209 ip6_flow_hdr(ipv6h, dsfield, 1210 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); 1211 ipv6h->hop_limit = hop_limit; 1212 ipv6h->nexthdr = proto; 1213 ipv6h->saddr = fl6->saddr; 1214 ipv6h->daddr = fl6->daddr; 1215 ip6tunnel_xmit(NULL, skb, dev); 1216 return 0; 1217 tx_err_link_failure: 1218 stats->tx_carrier_errors++; 1219 dst_link_failure(skb); 1220 tx_err_dst_release: 1221 dst_release(dst); 1222 return err; 1223 } 1224 EXPORT_SYMBOL(ip6_tnl_xmit); 1225 1226 static inline int 1227 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1228 { 1229 struct ip6_tnl *t = netdev_priv(dev); 1230 const struct iphdr *iph = ip_hdr(skb); 1231 int encap_limit = -1; 1232 struct flowi6 fl6; 1233 __u8 dsfield; 1234 __u32 mtu; 1235 u8 tproto; 1236 int err; 1237 1238 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1239 1240 tproto = READ_ONCE(t->parms.proto); 1241 if (tproto != IPPROTO_IPIP && tproto != 0) 1242 return -1; 1243 1244 if (t->parms.collect_md) { 1245 struct ip_tunnel_info *tun_info; 1246 const struct ip_tunnel_key *key; 1247 1248 tun_info = skb_tunnel_info(skb); 1249 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 1250 ip_tunnel_info_af(tun_info) != AF_INET6)) 1251 return -1; 1252 key = &tun_info->key; 1253 memset(&fl6, 0, sizeof(fl6)); 1254 fl6.flowi6_proto = IPPROTO_IPIP; 1255 fl6.daddr = key->u.ipv6.dst; 1256 fl6.flowlabel = key->label; 1257 dsfield = key->tos; 1258 } else { 1259 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1260 encap_limit = t->parms.encap_limit; 1261 1262 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1263 fl6.flowi6_proto = IPPROTO_IPIP; 1264 1265 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1266 dsfield = ipv4_get_dsfield(iph); 1267 else 1268 dsfield = ip6_tclass(t->parms.flowinfo); 1269 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1270 fl6.flowi6_mark = skb->mark; 1271 else 1272 fl6.flowi6_mark = t->parms.fwmark; 1273 } 1274 1275 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1276 1277 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1278 return -1; 1279 1280 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph)); 1281 1282 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1283 1284 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1285 IPPROTO_IPIP); 1286 if (err != 0) { 1287 /* XXX: send ICMP error even if DF is not set. */ 1288 if (err == -EMSGSIZE) 1289 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 1290 htonl(mtu)); 1291 return -1; 1292 } 1293 1294 return 0; 1295 } 1296 1297 static inline int 1298 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1299 { 1300 struct ip6_tnl *t = netdev_priv(dev); 1301 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1302 int encap_limit = -1; 1303 __u16 offset; 1304 struct flowi6 fl6; 1305 __u8 dsfield; 1306 __u32 mtu; 1307 u8 tproto; 1308 int err; 1309 1310 tproto = READ_ONCE(t->parms.proto); 1311 if ((tproto != IPPROTO_IPV6 && tproto != 0) || 1312 ip6_tnl_addr_conflict(t, ipv6h)) 1313 return -1; 1314 1315 if (t->parms.collect_md) { 1316 struct ip_tunnel_info *tun_info; 1317 const struct ip_tunnel_key *key; 1318 1319 tun_info = skb_tunnel_info(skb); 1320 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 1321 ip_tunnel_info_af(tun_info) != AF_INET6)) 1322 return -1; 1323 key = &tun_info->key; 1324 memset(&fl6, 0, sizeof(fl6)); 1325 fl6.flowi6_proto = IPPROTO_IPV6; 1326 fl6.daddr = key->u.ipv6.dst; 1327 fl6.flowlabel = key->label; 1328 dsfield = key->tos; 1329 } else { 1330 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1331 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 1332 ipv6h = ipv6_hdr(skb); 1333 if (offset > 0) { 1334 struct ipv6_tlv_tnl_enc_lim *tel; 1335 1336 tel = (void *)&skb_network_header(skb)[offset]; 1337 if (tel->encap_limit == 0) { 1338 icmpv6_send(skb, ICMPV6_PARAMPROB, 1339 ICMPV6_HDR_FIELD, offset + 2); 1340 return -1; 1341 } 1342 encap_limit = tel->encap_limit - 1; 1343 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { 1344 encap_limit = t->parms.encap_limit; 1345 } 1346 1347 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1348 fl6.flowi6_proto = IPPROTO_IPV6; 1349 1350 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1351 dsfield = ipv6_get_dsfield(ipv6h); 1352 else 1353 dsfield = ip6_tclass(t->parms.flowinfo); 1354 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1355 fl6.flowlabel |= ip6_flowlabel(ipv6h); 1356 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1357 fl6.flowi6_mark = skb->mark; 1358 else 1359 fl6.flowi6_mark = t->parms.fwmark; 1360 } 1361 1362 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1363 1364 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1365 return -1; 1366 1367 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h)); 1368 1369 skb_set_inner_ipproto(skb, IPPROTO_IPV6); 1370 1371 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1372 IPPROTO_IPV6); 1373 if (err != 0) { 1374 if (err == -EMSGSIZE) 1375 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1376 return -1; 1377 } 1378 1379 return 0; 1380 } 1381 1382 static netdev_tx_t 1383 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev) 1384 { 1385 struct ip6_tnl *t = netdev_priv(dev); 1386 struct net_device_stats *stats = &t->dev->stats; 1387 int ret; 1388 1389 switch (skb->protocol) { 1390 case htons(ETH_P_IP): 1391 ret = ip4ip6_tnl_xmit(skb, dev); 1392 break; 1393 case htons(ETH_P_IPV6): 1394 ret = ip6ip6_tnl_xmit(skb, dev); 1395 break; 1396 default: 1397 goto tx_err; 1398 } 1399 1400 if (ret < 0) 1401 goto tx_err; 1402 1403 return NETDEV_TX_OK; 1404 1405 tx_err: 1406 stats->tx_errors++; 1407 stats->tx_dropped++; 1408 kfree_skb(skb); 1409 return NETDEV_TX_OK; 1410 } 1411 1412 static void ip6_tnl_link_config(struct ip6_tnl *t) 1413 { 1414 struct net_device *dev = t->dev; 1415 struct __ip6_tnl_parm *p = &t->parms; 1416 struct flowi6 *fl6 = &t->fl.u.ip6; 1417 int t_hlen; 1418 1419 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1420 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1421 1422 /* Set up flowi template */ 1423 fl6->saddr = p->laddr; 1424 fl6->daddr = p->raddr; 1425 fl6->flowi6_oif = p->link; 1426 fl6->flowlabel = 0; 1427 1428 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1429 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1430 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1431 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1432 1433 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1434 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1435 1436 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1437 dev->flags |= IFF_POINTOPOINT; 1438 else 1439 dev->flags &= ~IFF_POINTOPOINT; 1440 1441 t->tun_hlen = 0; 1442 t->hlen = t->encap_hlen + t->tun_hlen; 1443 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1444 1445 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1446 int strict = (ipv6_addr_type(&p->raddr) & 1447 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1448 1449 struct rt6_info *rt = rt6_lookup(t->net, 1450 &p->raddr, &p->laddr, 1451 p->link, NULL, strict); 1452 1453 if (!rt) 1454 return; 1455 1456 if (rt->dst.dev) { 1457 dev->hard_header_len = rt->dst.dev->hard_header_len + 1458 t_hlen; 1459 1460 dev->mtu = rt->dst.dev->mtu - t_hlen; 1461 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1462 dev->mtu -= 8; 1463 1464 if (dev->mtu < IPV6_MIN_MTU) 1465 dev->mtu = IPV6_MIN_MTU; 1466 } 1467 ip6_rt_put(rt); 1468 } 1469 } 1470 1471 /** 1472 * ip6_tnl_change - update the tunnel parameters 1473 * @t: tunnel to be changed 1474 * @p: tunnel configuration parameters 1475 * 1476 * Description: 1477 * ip6_tnl_change() updates the tunnel parameters 1478 **/ 1479 1480 static int 1481 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) 1482 { 1483 t->parms.laddr = p->laddr; 1484 t->parms.raddr = p->raddr; 1485 t->parms.flags = p->flags; 1486 t->parms.hop_limit = p->hop_limit; 1487 t->parms.encap_limit = p->encap_limit; 1488 t->parms.flowinfo = p->flowinfo; 1489 t->parms.link = p->link; 1490 t->parms.proto = p->proto; 1491 t->parms.fwmark = p->fwmark; 1492 dst_cache_reset(&t->dst_cache); 1493 ip6_tnl_link_config(t); 1494 return 0; 1495 } 1496 1497 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1498 { 1499 struct net *net = t->net; 1500 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1501 int err; 1502 1503 ip6_tnl_unlink(ip6n, t); 1504 synchronize_net(); 1505 err = ip6_tnl_change(t, p); 1506 ip6_tnl_link(ip6n, t); 1507 netdev_state_change(t->dev); 1508 return err; 1509 } 1510 1511 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) 1512 { 1513 /* for default tnl0 device allow to change only the proto */ 1514 t->parms.proto = p->proto; 1515 netdev_state_change(t->dev); 1516 return 0; 1517 } 1518 1519 static void 1520 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) 1521 { 1522 p->laddr = u->laddr; 1523 p->raddr = u->raddr; 1524 p->flags = u->flags; 1525 p->hop_limit = u->hop_limit; 1526 p->encap_limit = u->encap_limit; 1527 p->flowinfo = u->flowinfo; 1528 p->link = u->link; 1529 p->proto = u->proto; 1530 memcpy(p->name, u->name, sizeof(u->name)); 1531 } 1532 1533 static void 1534 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p) 1535 { 1536 u->laddr = p->laddr; 1537 u->raddr = p->raddr; 1538 u->flags = p->flags; 1539 u->hop_limit = p->hop_limit; 1540 u->encap_limit = p->encap_limit; 1541 u->flowinfo = p->flowinfo; 1542 u->link = p->link; 1543 u->proto = p->proto; 1544 memcpy(u->name, p->name, sizeof(u->name)); 1545 } 1546 1547 /** 1548 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace 1549 * @dev: virtual device associated with tunnel 1550 * @ifr: parameters passed from userspace 1551 * @cmd: command to be performed 1552 * 1553 * Description: 1554 * ip6_tnl_ioctl() is used for managing IPv6 tunnels 1555 * from userspace. 1556 * 1557 * The possible commands are the following: 1558 * %SIOCGETTUNNEL: get tunnel parameters for device 1559 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters 1560 * %SIOCCHGTUNNEL: change tunnel parameters to those given 1561 * %SIOCDELTUNNEL: delete tunnel 1562 * 1563 * The fallback device "ip6tnl0", created during module 1564 * initialization, can be used for creating other tunnel devices. 1565 * 1566 * Return: 1567 * 0 on success, 1568 * %-EFAULT if unable to copy data to or from userspace, 1569 * %-EPERM if current process hasn't %CAP_NET_ADMIN set 1570 * %-EINVAL if passed tunnel parameters are invalid, 1571 * %-EEXIST if changing a tunnel's parameters would cause a conflict 1572 * %-ENODEV if attempting to change or delete a nonexisting device 1573 **/ 1574 1575 static int 1576 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1577 { 1578 int err = 0; 1579 struct ip6_tnl_parm p; 1580 struct __ip6_tnl_parm p1; 1581 struct ip6_tnl *t = netdev_priv(dev); 1582 struct net *net = t->net; 1583 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1584 1585 memset(&p1, 0, sizeof(p1)); 1586 1587 switch (cmd) { 1588 case SIOCGETTUNNEL: 1589 if (dev == ip6n->fb_tnl_dev) { 1590 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1591 err = -EFAULT; 1592 break; 1593 } 1594 ip6_tnl_parm_from_user(&p1, &p); 1595 t = ip6_tnl_locate(net, &p1, 0); 1596 if (IS_ERR(t)) 1597 t = netdev_priv(dev); 1598 } else { 1599 memset(&p, 0, sizeof(p)); 1600 } 1601 ip6_tnl_parm_to_user(&p, &t->parms); 1602 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) { 1603 err = -EFAULT; 1604 } 1605 break; 1606 case SIOCADDTUNNEL: 1607 case SIOCCHGTUNNEL: 1608 err = -EPERM; 1609 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1610 break; 1611 err = -EFAULT; 1612 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1613 break; 1614 err = -EINVAL; 1615 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1616 p.proto != 0) 1617 break; 1618 ip6_tnl_parm_from_user(&p1, &p); 1619 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); 1620 if (cmd == SIOCCHGTUNNEL) { 1621 if (!IS_ERR(t)) { 1622 if (t->dev != dev) { 1623 err = -EEXIST; 1624 break; 1625 } 1626 } else 1627 t = netdev_priv(dev); 1628 if (dev == ip6n->fb_tnl_dev) 1629 err = ip6_tnl0_update(t, &p1); 1630 else 1631 err = ip6_tnl_update(t, &p1); 1632 } 1633 if (!IS_ERR(t)) { 1634 err = 0; 1635 ip6_tnl_parm_to_user(&p, &t->parms); 1636 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1637 err = -EFAULT; 1638 1639 } else { 1640 err = PTR_ERR(t); 1641 } 1642 break; 1643 case SIOCDELTUNNEL: 1644 err = -EPERM; 1645 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1646 break; 1647 1648 if (dev == ip6n->fb_tnl_dev) { 1649 err = -EFAULT; 1650 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1651 break; 1652 err = -ENOENT; 1653 ip6_tnl_parm_from_user(&p1, &p); 1654 t = ip6_tnl_locate(net, &p1, 0); 1655 if (IS_ERR(t)) 1656 break; 1657 err = -EPERM; 1658 if (t->dev == ip6n->fb_tnl_dev) 1659 break; 1660 dev = t->dev; 1661 } 1662 err = 0; 1663 unregister_netdevice(dev); 1664 break; 1665 default: 1666 err = -EINVAL; 1667 } 1668 return err; 1669 } 1670 1671 /** 1672 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1673 * @dev: virtual device associated with tunnel 1674 * @new_mtu: the new mtu 1675 * 1676 * Return: 1677 * 0 on success, 1678 * %-EINVAL if mtu too small 1679 **/ 1680 1681 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1682 { 1683 struct ip6_tnl *tnl = netdev_priv(dev); 1684 1685 if (tnl->parms.proto == IPPROTO_IPV6) { 1686 if (new_mtu < IPV6_MIN_MTU) 1687 return -EINVAL; 1688 } else { 1689 if (new_mtu < ETH_MIN_MTU) 1690 return -EINVAL; 1691 } 1692 if (new_mtu > 0xFFF8 - dev->hard_header_len) 1693 return -EINVAL; 1694 dev->mtu = new_mtu; 1695 return 0; 1696 } 1697 EXPORT_SYMBOL(ip6_tnl_change_mtu); 1698 1699 int ip6_tnl_get_iflink(const struct net_device *dev) 1700 { 1701 struct ip6_tnl *t = netdev_priv(dev); 1702 1703 return t->parms.link; 1704 } 1705 EXPORT_SYMBOL(ip6_tnl_get_iflink); 1706 1707 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops, 1708 unsigned int num) 1709 { 1710 if (num >= MAX_IPTUN_ENCAP_OPS) 1711 return -ERANGE; 1712 1713 return !cmpxchg((const struct ip6_tnl_encap_ops **) 1714 &ip6tun_encaps[num], 1715 NULL, ops) ? 0 : -1; 1716 } 1717 EXPORT_SYMBOL(ip6_tnl_encap_add_ops); 1718 1719 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops, 1720 unsigned int num) 1721 { 1722 int ret; 1723 1724 if (num >= MAX_IPTUN_ENCAP_OPS) 1725 return -ERANGE; 1726 1727 ret = (cmpxchg((const struct ip6_tnl_encap_ops **) 1728 &ip6tun_encaps[num], 1729 ops, NULL) == ops) ? 0 : -1; 1730 1731 synchronize_net(); 1732 1733 return ret; 1734 } 1735 EXPORT_SYMBOL(ip6_tnl_encap_del_ops); 1736 1737 int ip6_tnl_encap_setup(struct ip6_tnl *t, 1738 struct ip_tunnel_encap *ipencap) 1739 { 1740 int hlen; 1741 1742 memset(&t->encap, 0, sizeof(t->encap)); 1743 1744 hlen = ip6_encap_hlen(ipencap); 1745 if (hlen < 0) 1746 return hlen; 1747 1748 t->encap.type = ipencap->type; 1749 t->encap.sport = ipencap->sport; 1750 t->encap.dport = ipencap->dport; 1751 t->encap.flags = ipencap->flags; 1752 1753 t->encap_hlen = hlen; 1754 t->hlen = t->encap_hlen + t->tun_hlen; 1755 1756 return 0; 1757 } 1758 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup); 1759 1760 static const struct net_device_ops ip6_tnl_netdev_ops = { 1761 .ndo_init = ip6_tnl_dev_init, 1762 .ndo_uninit = ip6_tnl_dev_uninit, 1763 .ndo_start_xmit = ip6_tnl_start_xmit, 1764 .ndo_do_ioctl = ip6_tnl_ioctl, 1765 .ndo_change_mtu = ip6_tnl_change_mtu, 1766 .ndo_get_stats = ip6_get_stats, 1767 .ndo_get_iflink = ip6_tnl_get_iflink, 1768 }; 1769 1770 #define IPXIPX_FEATURES (NETIF_F_SG | \ 1771 NETIF_F_FRAGLIST | \ 1772 NETIF_F_HIGHDMA | \ 1773 NETIF_F_GSO_SOFTWARE | \ 1774 NETIF_F_HW_CSUM) 1775 1776 /** 1777 * ip6_tnl_dev_setup - setup virtual tunnel device 1778 * @dev: virtual device associated with tunnel 1779 * 1780 * Description: 1781 * Initialize function pointers and device parameters 1782 **/ 1783 1784 static void ip6_tnl_dev_setup(struct net_device *dev) 1785 { 1786 dev->netdev_ops = &ip6_tnl_netdev_ops; 1787 dev->needs_free_netdev = true; 1788 dev->priv_destructor = ip6_dev_free; 1789 1790 dev->type = ARPHRD_TUNNEL6; 1791 dev->flags |= IFF_NOARP; 1792 dev->addr_len = sizeof(struct in6_addr); 1793 dev->features |= NETIF_F_LLTX; 1794 netif_keep_dst(dev); 1795 1796 dev->features |= IPXIPX_FEATURES; 1797 dev->hw_features |= IPXIPX_FEATURES; 1798 1799 /* This perm addr will be used as interface identifier by IPv6 */ 1800 dev->addr_assign_type = NET_ADDR_RANDOM; 1801 eth_random_addr(dev->perm_addr); 1802 } 1803 1804 1805 /** 1806 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices 1807 * @dev: virtual device associated with tunnel 1808 **/ 1809 1810 static inline int 1811 ip6_tnl_dev_init_gen(struct net_device *dev) 1812 { 1813 struct ip6_tnl *t = netdev_priv(dev); 1814 int ret; 1815 int t_hlen; 1816 1817 t->dev = dev; 1818 t->net = dev_net(dev); 1819 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1820 if (!dev->tstats) 1821 return -ENOMEM; 1822 1823 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); 1824 if (ret) 1825 goto free_stats; 1826 1827 ret = gro_cells_init(&t->gro_cells, dev); 1828 if (ret) 1829 goto destroy_dst; 1830 1831 t->tun_hlen = 0; 1832 t->hlen = t->encap_hlen + t->tun_hlen; 1833 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1834 1835 dev->type = ARPHRD_TUNNEL6; 1836 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1837 dev->mtu = ETH_DATA_LEN - t_hlen; 1838 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1839 dev->mtu -= 8; 1840 dev->min_mtu = ETH_MIN_MTU; 1841 dev->max_mtu = 0xFFF8 - dev->hard_header_len; 1842 1843 return 0; 1844 1845 destroy_dst: 1846 dst_cache_destroy(&t->dst_cache); 1847 free_stats: 1848 free_percpu(dev->tstats); 1849 dev->tstats = NULL; 1850 1851 return ret; 1852 } 1853 1854 /** 1855 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices 1856 * @dev: virtual device associated with tunnel 1857 **/ 1858 1859 static int ip6_tnl_dev_init(struct net_device *dev) 1860 { 1861 struct ip6_tnl *t = netdev_priv(dev); 1862 int err = ip6_tnl_dev_init_gen(dev); 1863 1864 if (err) 1865 return err; 1866 ip6_tnl_link_config(t); 1867 if (t->parms.collect_md) { 1868 dev->features |= NETIF_F_NETNS_LOCAL; 1869 netif_keep_dst(dev); 1870 } 1871 return 0; 1872 } 1873 1874 /** 1875 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device 1876 * @dev: fallback device 1877 * 1878 * Return: 0 1879 **/ 1880 1881 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) 1882 { 1883 struct ip6_tnl *t = netdev_priv(dev); 1884 struct net *net = dev_net(dev); 1885 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1886 1887 t->parms.proto = IPPROTO_IPV6; 1888 dev_hold(dev); 1889 1890 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1891 return 0; 1892 } 1893 1894 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[], 1895 struct netlink_ext_ack *extack) 1896 { 1897 u8 proto; 1898 1899 if (!data || !data[IFLA_IPTUN_PROTO]) 1900 return 0; 1901 1902 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1903 if (proto != IPPROTO_IPV6 && 1904 proto != IPPROTO_IPIP && 1905 proto != 0) 1906 return -EINVAL; 1907 1908 return 0; 1909 } 1910 1911 static void ip6_tnl_netlink_parms(struct nlattr *data[], 1912 struct __ip6_tnl_parm *parms) 1913 { 1914 memset(parms, 0, sizeof(*parms)); 1915 1916 if (!data) 1917 return; 1918 1919 if (data[IFLA_IPTUN_LINK]) 1920 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); 1921 1922 if (data[IFLA_IPTUN_LOCAL]) 1923 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]); 1924 1925 if (data[IFLA_IPTUN_REMOTE]) 1926 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]); 1927 1928 if (data[IFLA_IPTUN_TTL]) 1929 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]); 1930 1931 if (data[IFLA_IPTUN_ENCAP_LIMIT]) 1932 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]); 1933 1934 if (data[IFLA_IPTUN_FLOWINFO]) 1935 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]); 1936 1937 if (data[IFLA_IPTUN_FLAGS]) 1938 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]); 1939 1940 if (data[IFLA_IPTUN_PROTO]) 1941 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1942 1943 if (data[IFLA_IPTUN_COLLECT_METADATA]) 1944 parms->collect_md = true; 1945 1946 if (data[IFLA_IPTUN_FWMARK]) 1947 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); 1948 } 1949 1950 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[], 1951 struct ip_tunnel_encap *ipencap) 1952 { 1953 bool ret = false; 1954 1955 memset(ipencap, 0, sizeof(*ipencap)); 1956 1957 if (!data) 1958 return ret; 1959 1960 if (data[IFLA_IPTUN_ENCAP_TYPE]) { 1961 ret = true; 1962 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]); 1963 } 1964 1965 if (data[IFLA_IPTUN_ENCAP_FLAGS]) { 1966 ret = true; 1967 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]); 1968 } 1969 1970 if (data[IFLA_IPTUN_ENCAP_SPORT]) { 1971 ret = true; 1972 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]); 1973 } 1974 1975 if (data[IFLA_IPTUN_ENCAP_DPORT]) { 1976 ret = true; 1977 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]); 1978 } 1979 1980 return ret; 1981 } 1982 1983 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, 1984 struct nlattr *tb[], struct nlattr *data[], 1985 struct netlink_ext_ack *extack) 1986 { 1987 struct net *net = dev_net(dev); 1988 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1989 struct ip_tunnel_encap ipencap; 1990 struct ip6_tnl *nt, *t; 1991 int err; 1992 1993 nt = netdev_priv(dev); 1994 1995 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 1996 err = ip6_tnl_encap_setup(nt, &ipencap); 1997 if (err < 0) 1998 return err; 1999 } 2000 2001 ip6_tnl_netlink_parms(data, &nt->parms); 2002 2003 if (nt->parms.collect_md) { 2004 if (rtnl_dereference(ip6n->collect_md_tun)) 2005 return -EEXIST; 2006 } else { 2007 t = ip6_tnl_locate(net, &nt->parms, 0); 2008 if (!IS_ERR(t)) 2009 return -EEXIST; 2010 } 2011 2012 err = ip6_tnl_create2(dev); 2013 if (!err && tb[IFLA_MTU]) 2014 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 2015 2016 return err; 2017 } 2018 2019 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], 2020 struct nlattr *data[], 2021 struct netlink_ext_ack *extack) 2022 { 2023 struct ip6_tnl *t = netdev_priv(dev); 2024 struct __ip6_tnl_parm p; 2025 struct net *net = t->net; 2026 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2027 struct ip_tunnel_encap ipencap; 2028 2029 if (dev == ip6n->fb_tnl_dev) 2030 return -EINVAL; 2031 2032 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 2033 int err = ip6_tnl_encap_setup(t, &ipencap); 2034 2035 if (err < 0) 2036 return err; 2037 } 2038 ip6_tnl_netlink_parms(data, &p); 2039 if (p.collect_md) 2040 return -EINVAL; 2041 2042 t = ip6_tnl_locate(net, &p, 0); 2043 if (!IS_ERR(t)) { 2044 if (t->dev != dev) 2045 return -EEXIST; 2046 } else 2047 t = netdev_priv(dev); 2048 2049 return ip6_tnl_update(t, &p); 2050 } 2051 2052 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head) 2053 { 2054 struct net *net = dev_net(dev); 2055 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2056 2057 if (dev != ip6n->fb_tnl_dev) 2058 unregister_netdevice_queue(dev, head); 2059 } 2060 2061 static size_t ip6_tnl_get_size(const struct net_device *dev) 2062 { 2063 return 2064 /* IFLA_IPTUN_LINK */ 2065 nla_total_size(4) + 2066 /* IFLA_IPTUN_LOCAL */ 2067 nla_total_size(sizeof(struct in6_addr)) + 2068 /* IFLA_IPTUN_REMOTE */ 2069 nla_total_size(sizeof(struct in6_addr)) + 2070 /* IFLA_IPTUN_TTL */ 2071 nla_total_size(1) + 2072 /* IFLA_IPTUN_ENCAP_LIMIT */ 2073 nla_total_size(1) + 2074 /* IFLA_IPTUN_FLOWINFO */ 2075 nla_total_size(4) + 2076 /* IFLA_IPTUN_FLAGS */ 2077 nla_total_size(4) + 2078 /* IFLA_IPTUN_PROTO */ 2079 nla_total_size(1) + 2080 /* IFLA_IPTUN_ENCAP_TYPE */ 2081 nla_total_size(2) + 2082 /* IFLA_IPTUN_ENCAP_FLAGS */ 2083 nla_total_size(2) + 2084 /* IFLA_IPTUN_ENCAP_SPORT */ 2085 nla_total_size(2) + 2086 /* IFLA_IPTUN_ENCAP_DPORT */ 2087 nla_total_size(2) + 2088 /* IFLA_IPTUN_COLLECT_METADATA */ 2089 nla_total_size(0) + 2090 /* IFLA_IPTUN_FWMARK */ 2091 nla_total_size(4) + 2092 0; 2093 } 2094 2095 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) 2096 { 2097 struct ip6_tnl *tunnel = netdev_priv(dev); 2098 struct __ip6_tnl_parm *parm = &tunnel->parms; 2099 2100 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 2101 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) || 2102 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) || 2103 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || 2104 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || 2105 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || 2106 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || 2107 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) || 2108 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark)) 2109 goto nla_put_failure; 2110 2111 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || 2112 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || 2113 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || 2114 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags)) 2115 goto nla_put_failure; 2116 2117 if (parm->collect_md) 2118 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA)) 2119 goto nla_put_failure; 2120 2121 return 0; 2122 2123 nla_put_failure: 2124 return -EMSGSIZE; 2125 } 2126 2127 struct net *ip6_tnl_get_link_net(const struct net_device *dev) 2128 { 2129 struct ip6_tnl *tunnel = netdev_priv(dev); 2130 2131 return tunnel->net; 2132 } 2133 EXPORT_SYMBOL(ip6_tnl_get_link_net); 2134 2135 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { 2136 [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, 2137 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) }, 2138 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) }, 2139 [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, 2140 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 }, 2141 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 }, 2142 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 }, 2143 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 }, 2144 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, 2145 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, 2146 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, 2147 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, 2148 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, 2149 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 }, 2150 }; 2151 2152 static struct rtnl_link_ops ip6_link_ops __read_mostly = { 2153 .kind = "ip6tnl", 2154 .maxtype = IFLA_IPTUN_MAX, 2155 .policy = ip6_tnl_policy, 2156 .priv_size = sizeof(struct ip6_tnl), 2157 .setup = ip6_tnl_dev_setup, 2158 .validate = ip6_tnl_validate, 2159 .newlink = ip6_tnl_newlink, 2160 .changelink = ip6_tnl_changelink, 2161 .dellink = ip6_tnl_dellink, 2162 .get_size = ip6_tnl_get_size, 2163 .fill_info = ip6_tnl_fill_info, 2164 .get_link_net = ip6_tnl_get_link_net, 2165 }; 2166 2167 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { 2168 .handler = ip4ip6_rcv, 2169 .err_handler = ip4ip6_err, 2170 .priority = 1, 2171 }; 2172 2173 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { 2174 .handler = ip6ip6_rcv, 2175 .err_handler = ip6ip6_err, 2176 .priority = 1, 2177 }; 2178 2179 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list) 2180 { 2181 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2182 struct net_device *dev, *aux; 2183 int h; 2184 struct ip6_tnl *t; 2185 2186 for_each_netdev_safe(net, dev, aux) 2187 if (dev->rtnl_link_ops == &ip6_link_ops) 2188 unregister_netdevice_queue(dev, list); 2189 2190 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) { 2191 t = rtnl_dereference(ip6n->tnls_r_l[h]); 2192 while (t) { 2193 /* If dev is in the same netns, it has already 2194 * been added to the list by the previous loop. 2195 */ 2196 if (!net_eq(dev_net(t->dev), net)) 2197 unregister_netdevice_queue(t->dev, list); 2198 t = rtnl_dereference(t->next); 2199 } 2200 } 2201 } 2202 2203 static int __net_init ip6_tnl_init_net(struct net *net) 2204 { 2205 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 2206 struct ip6_tnl *t = NULL; 2207 int err; 2208 2209 ip6n->tnls[0] = ip6n->tnls_wc; 2210 ip6n->tnls[1] = ip6n->tnls_r_l; 2211 2212 if (!net_has_fallback_tunnels(net)) 2213 return 0; 2214 err = -ENOMEM; 2215 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", 2216 NET_NAME_UNKNOWN, ip6_tnl_dev_setup); 2217 2218 if (!ip6n->fb_tnl_dev) 2219 goto err_alloc_dev; 2220 dev_net_set(ip6n->fb_tnl_dev, net); 2221 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops; 2222 /* FB netdevice is special: we have one, and only one per netns. 2223 * Allowing to move it to another netns is clearly unsafe. 2224 */ 2225 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL; 2226 2227 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 2228 if (err < 0) 2229 goto err_register; 2230 2231 err = register_netdev(ip6n->fb_tnl_dev); 2232 if (err < 0) 2233 goto err_register; 2234 2235 t = netdev_priv(ip6n->fb_tnl_dev); 2236 2237 strcpy(t->parms.name, ip6n->fb_tnl_dev->name); 2238 return 0; 2239 2240 err_register: 2241 free_netdev(ip6n->fb_tnl_dev); 2242 err_alloc_dev: 2243 return err; 2244 } 2245 2246 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list) 2247 { 2248 struct net *net; 2249 LIST_HEAD(list); 2250 2251 rtnl_lock(); 2252 list_for_each_entry(net, net_list, exit_list) 2253 ip6_tnl_destroy_tunnels(net, &list); 2254 unregister_netdevice_many(&list); 2255 rtnl_unlock(); 2256 } 2257 2258 static struct pernet_operations ip6_tnl_net_ops = { 2259 .init = ip6_tnl_init_net, 2260 .exit_batch = ip6_tnl_exit_batch_net, 2261 .id = &ip6_tnl_net_id, 2262 .size = sizeof(struct ip6_tnl_net), 2263 }; 2264 2265 /** 2266 * ip6_tunnel_init - register protocol and reserve needed resources 2267 * 2268 * Return: 0 on success 2269 **/ 2270 2271 static int __init ip6_tunnel_init(void) 2272 { 2273 int err; 2274 2275 if (!ipv6_mod_enabled()) 2276 return -EOPNOTSUPP; 2277 2278 err = register_pernet_device(&ip6_tnl_net_ops); 2279 if (err < 0) 2280 goto out_pernet; 2281 2282 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); 2283 if (err < 0) { 2284 pr_err("%s: can't register ip4ip6\n", __func__); 2285 goto out_ip4ip6; 2286 } 2287 2288 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); 2289 if (err < 0) { 2290 pr_err("%s: can't register ip6ip6\n", __func__); 2291 goto out_ip6ip6; 2292 } 2293 err = rtnl_link_register(&ip6_link_ops); 2294 if (err < 0) 2295 goto rtnl_link_failed; 2296 2297 return 0; 2298 2299 rtnl_link_failed: 2300 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 2301 out_ip6ip6: 2302 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 2303 out_ip4ip6: 2304 unregister_pernet_device(&ip6_tnl_net_ops); 2305 out_pernet: 2306 return err; 2307 } 2308 2309 /** 2310 * ip6_tunnel_cleanup - free resources and unregister protocol 2311 **/ 2312 2313 static void __exit ip6_tunnel_cleanup(void) 2314 { 2315 rtnl_link_unregister(&ip6_link_ops); 2316 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 2317 pr_info("%s: can't deregister ip4ip6\n", __func__); 2318 2319 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 2320 pr_info("%s: can't deregister ip6ip6\n", __func__); 2321 2322 unregister_pernet_device(&ip6_tnl_net_ops); 2323 } 2324 2325 module_init(ip6_tunnel_init); 2326 module_exit(ip6_tunnel_cleanup); 2327