1 /* 2 * GRE over IPv6 protocol decoder. 3 * 4 * Authors: Dmitry Kozlov (xeb@mail.ru) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/capability.h> 16 #include <linux/module.h> 17 #include <linux/types.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/uaccess.h> 21 #include <linux/skbuff.h> 22 #include <linux/netdevice.h> 23 #include <linux/in.h> 24 #include <linux/tcp.h> 25 #include <linux/udp.h> 26 #include <linux/if_arp.h> 27 #include <linux/init.h> 28 #include <linux/in6.h> 29 #include <linux/inetdevice.h> 30 #include <linux/igmp.h> 31 #include <linux/netfilter_ipv4.h> 32 #include <linux/etherdevice.h> 33 #include <linux/if_ether.h> 34 #include <linux/hash.h> 35 #include <linux/if_tunnel.h> 36 #include <linux/ip6_tunnel.h> 37 38 #include <net/sock.h> 39 #include <net/ip.h> 40 #include <net/ip_tunnels.h> 41 #include <net/icmp.h> 42 #include <net/protocol.h> 43 #include <net/addrconf.h> 44 #include <net/arp.h> 45 #include <net/checksum.h> 46 #include <net/dsfield.h> 47 #include <net/inet_ecn.h> 48 #include <net/xfrm.h> 49 #include <net/net_namespace.h> 50 #include <net/netns/generic.h> 51 #include <net/rtnetlink.h> 52 53 #include <net/ipv6.h> 54 #include <net/ip6_fib.h> 55 #include <net/ip6_route.h> 56 #include <net/ip6_tunnel.h> 57 #include <net/gre.h> 58 #include <net/erspan.h> 59 #include <net/dst_metadata.h> 60 61 62 static bool log_ecn_error = true; 63 module_param(log_ecn_error, bool, 0644); 64 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 65 66 #define IP6_GRE_HASH_SIZE_SHIFT 5 67 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT) 68 69 static unsigned int ip6gre_net_id __read_mostly; 70 struct ip6gre_net { 71 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE]; 72 73 struct ip6_tnl __rcu *collect_md_tun; 74 struct ip6_tnl __rcu *collect_md_tun_erspan; 75 struct net_device *fb_tunnel_dev; 76 }; 77 78 static struct rtnl_link_ops ip6gre_link_ops __read_mostly; 79 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly; 80 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly; 81 static int ip6gre_tunnel_init(struct net_device *dev); 82 static void ip6gre_tunnel_setup(struct net_device *dev); 83 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); 84 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); 85 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu); 86 87 /* Tunnel hash table */ 88 89 /* 90 4 hash tables: 91 92 3: (remote,local) 93 2: (remote,*) 94 1: (*,local) 95 0: (*,*) 96 97 We require exact key match i.e. if a key is present in packet 98 it will match only tunnel with the same key; if it is not present, 99 it will match only keyless tunnel. 100 101 All keysless packets, if not matched configured keyless tunnels 102 will match fallback tunnel. 103 */ 104 105 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1)) 106 static u32 HASH_ADDR(const struct in6_addr *addr) 107 { 108 u32 hash = ipv6_addr_hash(addr); 109 110 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT); 111 } 112 113 #define tunnels_r_l tunnels[3] 114 #define tunnels_r tunnels[2] 115 #define tunnels_l tunnels[1] 116 #define tunnels_wc tunnels[0] 117 118 /* Given src, dst and key, find appropriate for input tunnel. */ 119 120 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, 121 const struct in6_addr *remote, const struct in6_addr *local, 122 __be32 key, __be16 gre_proto) 123 { 124 struct net *net = dev_net(dev); 125 int link = dev->ifindex; 126 unsigned int h0 = HASH_ADDR(remote); 127 unsigned int h1 = HASH_KEY(key); 128 struct ip6_tnl *t, *cand = NULL; 129 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 130 int dev_type = (gre_proto == htons(ETH_P_TEB) || 131 gre_proto == htons(ETH_P_ERSPAN) || 132 gre_proto == htons(ETH_P_ERSPAN2)) ? 133 ARPHRD_ETHER : ARPHRD_IP6GRE; 134 int score, cand_score = 4; 135 136 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { 137 if (!ipv6_addr_equal(local, &t->parms.laddr) || 138 !ipv6_addr_equal(remote, &t->parms.raddr) || 139 key != t->parms.i_key || 140 !(t->dev->flags & IFF_UP)) 141 continue; 142 143 if (t->dev->type != ARPHRD_IP6GRE && 144 t->dev->type != dev_type) 145 continue; 146 147 score = 0; 148 if (t->parms.link != link) 149 score |= 1; 150 if (t->dev->type != dev_type) 151 score |= 2; 152 if (score == 0) 153 return t; 154 155 if (score < cand_score) { 156 cand = t; 157 cand_score = score; 158 } 159 } 160 161 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) { 162 if (!ipv6_addr_equal(remote, &t->parms.raddr) || 163 key != t->parms.i_key || 164 !(t->dev->flags & IFF_UP)) 165 continue; 166 167 if (t->dev->type != ARPHRD_IP6GRE && 168 t->dev->type != dev_type) 169 continue; 170 171 score = 0; 172 if (t->parms.link != link) 173 score |= 1; 174 if (t->dev->type != dev_type) 175 score |= 2; 176 if (score == 0) 177 return t; 178 179 if (score < cand_score) { 180 cand = t; 181 cand_score = score; 182 } 183 } 184 185 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) { 186 if ((!ipv6_addr_equal(local, &t->parms.laddr) && 187 (!ipv6_addr_equal(local, &t->parms.raddr) || 188 !ipv6_addr_is_multicast(local))) || 189 key != t->parms.i_key || 190 !(t->dev->flags & IFF_UP)) 191 continue; 192 193 if (t->dev->type != ARPHRD_IP6GRE && 194 t->dev->type != dev_type) 195 continue; 196 197 score = 0; 198 if (t->parms.link != link) 199 score |= 1; 200 if (t->dev->type != dev_type) 201 score |= 2; 202 if (score == 0) 203 return t; 204 205 if (score < cand_score) { 206 cand = t; 207 cand_score = score; 208 } 209 } 210 211 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) { 212 if (t->parms.i_key != key || 213 !(t->dev->flags & IFF_UP)) 214 continue; 215 216 if (t->dev->type != ARPHRD_IP6GRE && 217 t->dev->type != dev_type) 218 continue; 219 220 score = 0; 221 if (t->parms.link != link) 222 score |= 1; 223 if (t->dev->type != dev_type) 224 score |= 2; 225 if (score == 0) 226 return t; 227 228 if (score < cand_score) { 229 cand = t; 230 cand_score = score; 231 } 232 } 233 234 if (cand) 235 return cand; 236 237 if (gre_proto == htons(ETH_P_ERSPAN) || 238 gre_proto == htons(ETH_P_ERSPAN2)) 239 t = rcu_dereference(ign->collect_md_tun_erspan); 240 else 241 t = rcu_dereference(ign->collect_md_tun); 242 243 if (t && t->dev->flags & IFF_UP) 244 return t; 245 246 dev = ign->fb_tunnel_dev; 247 if (dev && dev->flags & IFF_UP) 248 return netdev_priv(dev); 249 250 return NULL; 251 } 252 253 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign, 254 const struct __ip6_tnl_parm *p) 255 { 256 const struct in6_addr *remote = &p->raddr; 257 const struct in6_addr *local = &p->laddr; 258 unsigned int h = HASH_KEY(p->i_key); 259 int prio = 0; 260 261 if (!ipv6_addr_any(local)) 262 prio |= 1; 263 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) { 264 prio |= 2; 265 h ^= HASH_ADDR(remote); 266 } 267 268 return &ign->tunnels[prio][h]; 269 } 270 271 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) 272 { 273 if (t->parms.collect_md) 274 rcu_assign_pointer(ign->collect_md_tun, t); 275 } 276 277 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) 278 { 279 if (t->parms.collect_md) 280 rcu_assign_pointer(ign->collect_md_tun_erspan, t); 281 } 282 283 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t) 284 { 285 if (t->parms.collect_md) 286 rcu_assign_pointer(ign->collect_md_tun, NULL); 287 } 288 289 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign, 290 struct ip6_tnl *t) 291 { 292 if (t->parms.collect_md) 293 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL); 294 } 295 296 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign, 297 const struct ip6_tnl *t) 298 { 299 return __ip6gre_bucket(ign, &t->parms); 300 } 301 302 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t) 303 { 304 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t); 305 306 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 307 rcu_assign_pointer(*tp, t); 308 } 309 310 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t) 311 { 312 struct ip6_tnl __rcu **tp; 313 struct ip6_tnl *iter; 314 315 for (tp = ip6gre_bucket(ign, t); 316 (iter = rtnl_dereference(*tp)) != NULL; 317 tp = &iter->next) { 318 if (t == iter) { 319 rcu_assign_pointer(*tp, t->next); 320 break; 321 } 322 } 323 } 324 325 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net, 326 const struct __ip6_tnl_parm *parms, 327 int type) 328 { 329 const struct in6_addr *remote = &parms->raddr; 330 const struct in6_addr *local = &parms->laddr; 331 __be32 key = parms->i_key; 332 int link = parms->link; 333 struct ip6_tnl *t; 334 struct ip6_tnl __rcu **tp; 335 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 336 337 for (tp = __ip6gre_bucket(ign, parms); 338 (t = rtnl_dereference(*tp)) != NULL; 339 tp = &t->next) 340 if (ipv6_addr_equal(local, &t->parms.laddr) && 341 ipv6_addr_equal(remote, &t->parms.raddr) && 342 key == t->parms.i_key && 343 link == t->parms.link && 344 type == t->dev->type) 345 break; 346 347 return t; 348 } 349 350 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, 351 const struct __ip6_tnl_parm *parms, int create) 352 { 353 struct ip6_tnl *t, *nt; 354 struct net_device *dev; 355 char name[IFNAMSIZ]; 356 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 357 358 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE); 359 if (t && create) 360 return NULL; 361 if (t || !create) 362 return t; 363 364 if (parms->name[0]) { 365 if (!dev_valid_name(parms->name)) 366 return NULL; 367 strlcpy(name, parms->name, IFNAMSIZ); 368 } else { 369 strcpy(name, "ip6gre%d"); 370 } 371 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 372 ip6gre_tunnel_setup); 373 if (!dev) 374 return NULL; 375 376 dev_net_set(dev, net); 377 378 nt = netdev_priv(dev); 379 nt->parms = *parms; 380 dev->rtnl_link_ops = &ip6gre_link_ops; 381 382 nt->dev = dev; 383 nt->net = dev_net(dev); 384 385 if (register_netdevice(dev) < 0) 386 goto failed_free; 387 388 ip6gre_tnl_link_config(nt, 1); 389 390 /* Can use a lockless transmit, unless we generate output sequences */ 391 if (!(nt->parms.o_flags & TUNNEL_SEQ)) 392 dev->features |= NETIF_F_LLTX; 393 394 dev_hold(dev); 395 ip6gre_tunnel_link(ign, nt); 396 return nt; 397 398 failed_free: 399 free_netdev(dev); 400 return NULL; 401 } 402 403 static void ip6erspan_tunnel_uninit(struct net_device *dev) 404 { 405 struct ip6_tnl *t = netdev_priv(dev); 406 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 407 408 ip6erspan_tunnel_unlink_md(ign, t); 409 ip6gre_tunnel_unlink(ign, t); 410 dst_cache_reset(&t->dst_cache); 411 dev_put(dev); 412 } 413 414 static void ip6gre_tunnel_uninit(struct net_device *dev) 415 { 416 struct ip6_tnl *t = netdev_priv(dev); 417 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 418 419 ip6gre_tunnel_unlink_md(ign, t); 420 ip6gre_tunnel_unlink(ign, t); 421 dst_cache_reset(&t->dst_cache); 422 dev_put(dev); 423 } 424 425 426 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 427 u8 type, u8 code, int offset, __be32 info) 428 { 429 struct net *net = dev_net(skb->dev); 430 const struct ipv6hdr *ipv6h; 431 struct tnl_ptk_info tpi; 432 struct ip6_tnl *t; 433 434 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6), 435 offset) < 0) 436 return; 437 438 ipv6h = (const struct ipv6hdr *)skb->data; 439 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, 440 tpi.key, tpi.proto); 441 if (!t) 442 return; 443 444 switch (type) { 445 struct ipv6_tlv_tnl_enc_lim *tel; 446 __u32 teli; 447 case ICMPV6_DEST_UNREACH: 448 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 449 t->parms.name); 450 if (code != ICMPV6_PORT_UNREACH) 451 break; 452 return; 453 case ICMPV6_TIME_EXCEED: 454 if (code == ICMPV6_EXC_HOPLIMIT) { 455 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 456 t->parms.name); 457 break; 458 } 459 return; 460 case ICMPV6_PARAMPROB: 461 teli = 0; 462 if (code == ICMPV6_HDR_FIELD) 463 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 464 465 if (teli && teli == be32_to_cpu(info) - 2) { 466 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 467 if (tel->encap_limit == 0) { 468 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 469 t->parms.name); 470 } 471 } else { 472 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 473 t->parms.name); 474 } 475 return; 476 case ICMPV6_PKT_TOOBIG: 477 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); 478 return; 479 case NDISC_REDIRECT: 480 ip6_redirect(skb, net, skb->dev->ifindex, 0, 481 sock_net_uid(net, NULL)); 482 return; 483 } 484 485 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) 486 t->err_count++; 487 else 488 t->err_count = 1; 489 t->err_time = jiffies; 490 } 491 492 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) 493 { 494 const struct ipv6hdr *ipv6h; 495 struct ip6_tnl *tunnel; 496 497 ipv6h = ipv6_hdr(skb); 498 tunnel = ip6gre_tunnel_lookup(skb->dev, 499 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 500 tpi->proto); 501 if (tunnel) { 502 if (tunnel->parms.collect_md) { 503 struct metadata_dst *tun_dst; 504 __be64 tun_id; 505 __be16 flags; 506 507 flags = tpi->flags; 508 tun_id = key32_to_tunnel_id(tpi->key); 509 510 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0); 511 if (!tun_dst) 512 return PACKET_REJECT; 513 514 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 515 } else { 516 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); 517 } 518 519 return PACKET_RCVD; 520 } 521 522 return PACKET_REJECT; 523 } 524 525 static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len, 526 struct tnl_ptk_info *tpi) 527 { 528 struct erspan_base_hdr *ershdr; 529 struct erspan_metadata *pkt_md; 530 const struct ipv6hdr *ipv6h; 531 struct erspan_md2 *md2; 532 struct ip6_tnl *tunnel; 533 u8 ver; 534 535 if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr)))) 536 return PACKET_REJECT; 537 538 ipv6h = ipv6_hdr(skb); 539 ershdr = (struct erspan_base_hdr *)skb->data; 540 ver = ershdr->ver; 541 tpi->key = cpu_to_be32(get_session_id(ershdr)); 542 543 tunnel = ip6gre_tunnel_lookup(skb->dev, 544 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 545 tpi->proto); 546 if (tunnel) { 547 int len = erspan_hdr_len(ver); 548 549 if (unlikely(!pskb_may_pull(skb, len))) 550 return PACKET_REJECT; 551 552 ershdr = (struct erspan_base_hdr *)skb->data; 553 pkt_md = (struct erspan_metadata *)(ershdr + 1); 554 555 if (__iptunnel_pull_header(skb, len, 556 htons(ETH_P_TEB), 557 false, false) < 0) 558 return PACKET_REJECT; 559 560 if (tunnel->parms.collect_md) { 561 struct metadata_dst *tun_dst; 562 struct ip_tunnel_info *info; 563 struct erspan_metadata *md; 564 __be64 tun_id; 565 __be16 flags; 566 567 tpi->flags |= TUNNEL_KEY; 568 flags = tpi->flags; 569 tun_id = key32_to_tunnel_id(tpi->key); 570 571 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 572 sizeof(*md)); 573 if (!tun_dst) 574 return PACKET_REJECT; 575 576 info = &tun_dst->u.tun_info; 577 md = ip_tunnel_info_opts(info); 578 md->version = ver; 579 md2 = &md->u.md2; 580 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE : 581 ERSPAN_V2_MDSIZE); 582 info->key.tun_flags |= TUNNEL_ERSPAN_OPT; 583 info->options_len = sizeof(*md); 584 585 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 586 587 } else { 588 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); 589 } 590 591 return PACKET_RCVD; 592 } 593 594 return PACKET_REJECT; 595 } 596 597 static int gre_rcv(struct sk_buff *skb) 598 { 599 struct tnl_ptk_info tpi; 600 bool csum_err = false; 601 int hdr_len; 602 603 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0); 604 if (hdr_len < 0) 605 goto drop; 606 607 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) 608 goto drop; 609 610 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || 611 tpi.proto == htons(ETH_P_ERSPAN2))) { 612 if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD) 613 return 0; 614 goto out; 615 } 616 617 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD) 618 return 0; 619 620 out: 621 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 622 drop: 623 kfree_skb(skb); 624 return 0; 625 } 626 627 static int gre_handle_offloads(struct sk_buff *skb, bool csum) 628 { 629 return iptunnel_handle_offloads(skb, 630 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); 631 } 632 633 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb, 634 struct net_device *dev, 635 struct flowi6 *fl6, __u8 *dsfield, 636 int *encap_limit) 637 { 638 const struct iphdr *iph = ip_hdr(skb); 639 struct ip6_tnl *t = netdev_priv(dev); 640 641 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 642 *encap_limit = t->parms.encap_limit; 643 644 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 645 646 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 647 *dsfield = ipv4_get_dsfield(iph); 648 else 649 *dsfield = ip6_tclass(t->parms.flowinfo); 650 651 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 652 fl6->flowi6_mark = skb->mark; 653 else 654 fl6->flowi6_mark = t->parms.fwmark; 655 656 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 657 } 658 659 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb, 660 struct net_device *dev, 661 struct flowi6 *fl6, __u8 *dsfield, 662 int *encap_limit) 663 { 664 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 665 struct ip6_tnl *t = netdev_priv(dev); 666 __u16 offset; 667 668 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 669 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 670 671 if (offset > 0) { 672 struct ipv6_tlv_tnl_enc_lim *tel; 673 674 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 675 if (tel->encap_limit == 0) { 676 icmpv6_send(skb, ICMPV6_PARAMPROB, 677 ICMPV6_HDR_FIELD, offset + 2); 678 return -1; 679 } 680 *encap_limit = tel->encap_limit - 1; 681 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { 682 *encap_limit = t->parms.encap_limit; 683 } 684 685 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 686 687 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 688 *dsfield = ipv6_get_dsfield(ipv6h); 689 else 690 *dsfield = ip6_tclass(t->parms.flowinfo); 691 692 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 693 fl6->flowlabel |= ip6_flowlabel(ipv6h); 694 695 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 696 fl6->flowi6_mark = skb->mark; 697 else 698 fl6->flowi6_mark = t->parms.fwmark; 699 700 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 701 702 return 0; 703 } 704 705 static netdev_tx_t __gre6_xmit(struct sk_buff *skb, 706 struct net_device *dev, __u8 dsfield, 707 struct flowi6 *fl6, int encap_limit, 708 __u32 *pmtu, __be16 proto) 709 { 710 struct ip6_tnl *tunnel = netdev_priv(dev); 711 __be16 protocol; 712 713 if (dev->type == ARPHRD_ETHER) 714 IPCB(skb)->flags = 0; 715 716 if (dev->header_ops && dev->type == ARPHRD_IP6GRE) 717 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr; 718 else 719 fl6->daddr = tunnel->parms.raddr; 720 721 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen)) 722 return -ENOMEM; 723 724 /* Push GRE header. */ 725 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; 726 727 if (tunnel->parms.collect_md) { 728 struct ip_tunnel_info *tun_info; 729 const struct ip_tunnel_key *key; 730 __be16 flags; 731 732 tun_info = skb_tunnel_info(skb); 733 if (unlikely(!tun_info || 734 !(tun_info->mode & IP_TUNNEL_INFO_TX) || 735 ip_tunnel_info_af(tun_info) != AF_INET6)) 736 return -EINVAL; 737 738 key = &tun_info->key; 739 memset(fl6, 0, sizeof(*fl6)); 740 fl6->flowi6_proto = IPPROTO_GRE; 741 fl6->daddr = key->u.ipv6.dst; 742 fl6->flowlabel = key->label; 743 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 744 745 dsfield = key->tos; 746 flags = key->tun_flags & 747 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ); 748 tunnel->tun_hlen = gre_calc_hlen(flags); 749 750 gre_build_header(skb, tunnel->tun_hlen, 751 flags, protocol, 752 tunnel_id_to_key32(tun_info->key.tun_id), 753 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) 754 : 0); 755 756 } else { 757 if (tunnel->parms.o_flags & TUNNEL_SEQ) 758 tunnel->o_seqno++; 759 760 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, 761 protocol, tunnel->parms.o_key, 762 htonl(tunnel->o_seqno)); 763 } 764 765 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 766 NEXTHDR_GRE); 767 } 768 769 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) 770 { 771 struct ip6_tnl *t = netdev_priv(dev); 772 int encap_limit = -1; 773 struct flowi6 fl6; 774 __u8 dsfield = 0; 775 __u32 mtu; 776 int err; 777 778 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 779 780 if (!t->parms.collect_md) 781 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 782 &dsfield, &encap_limit); 783 784 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); 785 if (err) 786 return -1; 787 788 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 789 skb->protocol); 790 if (err != 0) { 791 /* XXX: send ICMP error even if DF is not set. */ 792 if (err == -EMSGSIZE) 793 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 794 htonl(mtu)); 795 return -1; 796 } 797 798 return 0; 799 } 800 801 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) 802 { 803 struct ip6_tnl *t = netdev_priv(dev); 804 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 805 int encap_limit = -1; 806 struct flowi6 fl6; 807 __u8 dsfield = 0; 808 __u32 mtu; 809 int err; 810 811 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr)) 812 return -1; 813 814 if (!t->parms.collect_md && 815 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit)) 816 return -1; 817 818 if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM))) 819 return -1; 820 821 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, 822 &mtu, skb->protocol); 823 if (err != 0) { 824 if (err == -EMSGSIZE) 825 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 826 return -1; 827 } 828 829 return 0; 830 } 831 832 /** 833 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own 834 * @t: the outgoing tunnel device 835 * @hdr: IPv6 header from the incoming packet 836 * 837 * Description: 838 * Avoid trivial tunneling loop by checking that tunnel exit-point 839 * doesn't match source of incoming packet. 840 * 841 * Return: 842 * 1 if conflict, 843 * 0 else 844 **/ 845 846 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t, 847 const struct ipv6hdr *hdr) 848 { 849 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 850 } 851 852 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) 853 { 854 struct ip6_tnl *t = netdev_priv(dev); 855 int encap_limit = -1; 856 struct flowi6 fl6; 857 __u32 mtu; 858 int err; 859 860 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 861 encap_limit = t->parms.encap_limit; 862 863 if (!t->parms.collect_md) 864 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 865 866 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); 867 if (err) 868 return err; 869 870 err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol); 871 872 return err; 873 } 874 875 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, 876 struct net_device *dev) 877 { 878 struct ip6_tnl *t = netdev_priv(dev); 879 struct net_device_stats *stats = &t->dev->stats; 880 int ret; 881 882 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 883 goto tx_err; 884 885 switch (skb->protocol) { 886 case htons(ETH_P_IP): 887 ret = ip6gre_xmit_ipv4(skb, dev); 888 break; 889 case htons(ETH_P_IPV6): 890 ret = ip6gre_xmit_ipv6(skb, dev); 891 break; 892 default: 893 ret = ip6gre_xmit_other(skb, dev); 894 break; 895 } 896 897 if (ret < 0) 898 goto tx_err; 899 900 return NETDEV_TX_OK; 901 902 tx_err: 903 stats->tx_errors++; 904 stats->tx_dropped++; 905 kfree_skb(skb); 906 return NETDEV_TX_OK; 907 } 908 909 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, 910 struct net_device *dev) 911 { 912 struct ip6_tnl *t = netdev_priv(dev); 913 struct dst_entry *dst = skb_dst(skb); 914 struct net_device_stats *stats; 915 bool truncate = false; 916 int encap_limit = -1; 917 __u8 dsfield = false; 918 struct flowi6 fl6; 919 int err = -EINVAL; 920 __u32 mtu; 921 int nhoff; 922 int thoff; 923 924 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 925 goto tx_err; 926 927 if (gre_handle_offloads(skb, false)) 928 goto tx_err; 929 930 if (skb->len > dev->mtu + dev->hard_header_len) { 931 pskb_trim(skb, dev->mtu + dev->hard_header_len); 932 truncate = true; 933 } 934 935 nhoff = skb_network_header(skb) - skb_mac_header(skb); 936 if (skb->protocol == htons(ETH_P_IP) && 937 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) 938 truncate = true; 939 940 thoff = skb_transport_header(skb) - skb_mac_header(skb); 941 if (skb->protocol == htons(ETH_P_IPV6) && 942 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)) 943 truncate = true; 944 945 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen)) 946 goto tx_err; 947 948 t->parms.o_flags &= ~TUNNEL_KEY; 949 IPCB(skb)->flags = 0; 950 951 /* For collect_md mode, derive fl6 from the tunnel key, 952 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}. 953 */ 954 if (t->parms.collect_md) { 955 struct ip_tunnel_info *tun_info; 956 const struct ip_tunnel_key *key; 957 struct erspan_metadata *md; 958 __be32 tun_id; 959 960 tun_info = skb_tunnel_info(skb); 961 if (unlikely(!tun_info || 962 !(tun_info->mode & IP_TUNNEL_INFO_TX) || 963 ip_tunnel_info_af(tun_info) != AF_INET6)) 964 return -EINVAL; 965 966 key = &tun_info->key; 967 memset(&fl6, 0, sizeof(fl6)); 968 fl6.flowi6_proto = IPPROTO_GRE; 969 fl6.daddr = key->u.ipv6.dst; 970 fl6.flowlabel = key->label; 971 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 972 973 dsfield = key->tos; 974 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) 975 goto tx_err; 976 md = ip_tunnel_info_opts(tun_info); 977 if (!md) 978 goto tx_err; 979 980 tun_id = tunnel_id_to_key32(key->tun_id); 981 if (md->version == 1) { 982 erspan_build_header(skb, 983 ntohl(tun_id), 984 ntohl(md->u.index), truncate, 985 false); 986 } else if (md->version == 2) { 987 erspan_build_header_v2(skb, 988 ntohl(tun_id), 989 md->u.md2.dir, 990 get_hwid(&md->u.md2), 991 truncate, false); 992 } else { 993 goto tx_err; 994 } 995 } else { 996 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 997 998 switch (skb->protocol) { 999 case htons(ETH_P_IP): 1000 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1001 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 1002 &dsfield, &encap_limit); 1003 break; 1004 case htons(ETH_P_IPV6): 1005 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr)) 1006 goto tx_err; 1007 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, 1008 &dsfield, &encap_limit)) 1009 goto tx_err; 1010 break; 1011 default: 1012 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1013 break; 1014 } 1015 1016 if (t->parms.erspan_ver == 1) 1017 erspan_build_header(skb, ntohl(t->parms.o_key), 1018 t->parms.index, 1019 truncate, false); 1020 else if (t->parms.erspan_ver == 2) 1021 erspan_build_header_v2(skb, ntohl(t->parms.o_key), 1022 t->parms.dir, 1023 t->parms.hwid, 1024 truncate, false); 1025 else 1026 goto tx_err; 1027 1028 fl6.daddr = t->parms.raddr; 1029 } 1030 1031 /* Push GRE header. */ 1032 gre_build_header(skb, 8, TUNNEL_SEQ, 1033 htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++)); 1034 1035 /* TooBig packet may have updated dst->dev's mtu */ 1036 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) 1037 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu); 1038 1039 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1040 NEXTHDR_GRE); 1041 if (err != 0) { 1042 /* XXX: send ICMP error even if DF is not set. */ 1043 if (err == -EMSGSIZE) { 1044 if (skb->protocol == htons(ETH_P_IP)) 1045 icmp_send(skb, ICMP_DEST_UNREACH, 1046 ICMP_FRAG_NEEDED, htonl(mtu)); 1047 else 1048 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1049 } 1050 1051 goto tx_err; 1052 } 1053 return NETDEV_TX_OK; 1054 1055 tx_err: 1056 stats = &t->dev->stats; 1057 stats->tx_errors++; 1058 stats->tx_dropped++; 1059 kfree_skb(skb); 1060 return NETDEV_TX_OK; 1061 } 1062 1063 static void ip6gre_tnl_link_config_common(struct ip6_tnl *t) 1064 { 1065 struct net_device *dev = t->dev; 1066 struct __ip6_tnl_parm *p = &t->parms; 1067 struct flowi6 *fl6 = &t->fl.u.ip6; 1068 1069 if (dev->type != ARPHRD_ETHER) { 1070 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1071 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1072 } 1073 1074 /* Set up flowi template */ 1075 fl6->saddr = p->laddr; 1076 fl6->daddr = p->raddr; 1077 fl6->flowi6_oif = p->link; 1078 fl6->flowlabel = 0; 1079 fl6->flowi6_proto = IPPROTO_GRE; 1080 1081 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1082 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1083 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1084 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1085 1086 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1087 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1088 1089 if (p->flags&IP6_TNL_F_CAP_XMIT && 1090 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER) 1091 dev->flags |= IFF_POINTOPOINT; 1092 else 1093 dev->flags &= ~IFF_POINTOPOINT; 1094 } 1095 1096 static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu, 1097 int t_hlen) 1098 { 1099 const struct __ip6_tnl_parm *p = &t->parms; 1100 struct net_device *dev = t->dev; 1101 1102 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1103 int strict = (ipv6_addr_type(&p->raddr) & 1104 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1105 1106 struct rt6_info *rt = rt6_lookup(t->net, 1107 &p->raddr, &p->laddr, 1108 p->link, NULL, strict); 1109 1110 if (!rt) 1111 return; 1112 1113 if (rt->dst.dev) { 1114 dev->needed_headroom = rt->dst.dev->hard_header_len + 1115 t_hlen; 1116 1117 if (set_mtu) { 1118 dev->mtu = rt->dst.dev->mtu - t_hlen; 1119 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1120 dev->mtu -= 8; 1121 if (dev->type == ARPHRD_ETHER) 1122 dev->mtu -= ETH_HLEN; 1123 1124 if (dev->mtu < IPV6_MIN_MTU) 1125 dev->mtu = IPV6_MIN_MTU; 1126 } 1127 } 1128 ip6_rt_put(rt); 1129 } 1130 } 1131 1132 static int ip6gre_calc_hlen(struct ip6_tnl *tunnel) 1133 { 1134 int t_hlen; 1135 1136 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 1137 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; 1138 1139 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); 1140 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; 1141 return t_hlen; 1142 } 1143 1144 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) 1145 { 1146 ip6gre_tnl_link_config_common(t); 1147 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t)); 1148 } 1149 1150 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, 1151 const struct __ip6_tnl_parm *p) 1152 { 1153 t->parms.laddr = p->laddr; 1154 t->parms.raddr = p->raddr; 1155 t->parms.flags = p->flags; 1156 t->parms.hop_limit = p->hop_limit; 1157 t->parms.encap_limit = p->encap_limit; 1158 t->parms.flowinfo = p->flowinfo; 1159 t->parms.link = p->link; 1160 t->parms.proto = p->proto; 1161 t->parms.i_key = p->i_key; 1162 t->parms.o_key = p->o_key; 1163 t->parms.i_flags = p->i_flags; 1164 t->parms.o_flags = p->o_flags; 1165 t->parms.fwmark = p->fwmark; 1166 dst_cache_reset(&t->dst_cache); 1167 } 1168 1169 static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p, 1170 int set_mtu) 1171 { 1172 ip6gre_tnl_copy_tnl_parm(t, p); 1173 ip6gre_tnl_link_config(t, set_mtu); 1174 return 0; 1175 } 1176 1177 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p, 1178 const struct ip6_tnl_parm2 *u) 1179 { 1180 p->laddr = u->laddr; 1181 p->raddr = u->raddr; 1182 p->flags = u->flags; 1183 p->hop_limit = u->hop_limit; 1184 p->encap_limit = u->encap_limit; 1185 p->flowinfo = u->flowinfo; 1186 p->link = u->link; 1187 p->i_key = u->i_key; 1188 p->o_key = u->o_key; 1189 p->i_flags = gre_flags_to_tnl_flags(u->i_flags); 1190 p->o_flags = gre_flags_to_tnl_flags(u->o_flags); 1191 memcpy(p->name, u->name, sizeof(u->name)); 1192 } 1193 1194 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u, 1195 const struct __ip6_tnl_parm *p) 1196 { 1197 u->proto = IPPROTO_GRE; 1198 u->laddr = p->laddr; 1199 u->raddr = p->raddr; 1200 u->flags = p->flags; 1201 u->hop_limit = p->hop_limit; 1202 u->encap_limit = p->encap_limit; 1203 u->flowinfo = p->flowinfo; 1204 u->link = p->link; 1205 u->i_key = p->i_key; 1206 u->o_key = p->o_key; 1207 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags); 1208 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags); 1209 memcpy(u->name, p->name, sizeof(u->name)); 1210 } 1211 1212 static int ip6gre_tunnel_ioctl(struct net_device *dev, 1213 struct ifreq *ifr, int cmd) 1214 { 1215 int err = 0; 1216 struct ip6_tnl_parm2 p; 1217 struct __ip6_tnl_parm p1; 1218 struct ip6_tnl *t = netdev_priv(dev); 1219 struct net *net = t->net; 1220 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1221 1222 memset(&p1, 0, sizeof(p1)); 1223 1224 switch (cmd) { 1225 case SIOCGETTUNNEL: 1226 if (dev == ign->fb_tunnel_dev) { 1227 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1228 err = -EFAULT; 1229 break; 1230 } 1231 ip6gre_tnl_parm_from_user(&p1, &p); 1232 t = ip6gre_tunnel_locate(net, &p1, 0); 1233 if (!t) 1234 t = netdev_priv(dev); 1235 } 1236 memset(&p, 0, sizeof(p)); 1237 ip6gre_tnl_parm_to_user(&p, &t->parms); 1238 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1239 err = -EFAULT; 1240 break; 1241 1242 case SIOCADDTUNNEL: 1243 case SIOCCHGTUNNEL: 1244 err = -EPERM; 1245 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1246 goto done; 1247 1248 err = -EFAULT; 1249 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1250 goto done; 1251 1252 err = -EINVAL; 1253 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)) 1254 goto done; 1255 1256 if (!(p.i_flags&GRE_KEY)) 1257 p.i_key = 0; 1258 if (!(p.o_flags&GRE_KEY)) 1259 p.o_key = 0; 1260 1261 ip6gre_tnl_parm_from_user(&p1, &p); 1262 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL); 1263 1264 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 1265 if (t) { 1266 if (t->dev != dev) { 1267 err = -EEXIST; 1268 break; 1269 } 1270 } else { 1271 t = netdev_priv(dev); 1272 1273 ip6gre_tunnel_unlink(ign, t); 1274 synchronize_net(); 1275 ip6gre_tnl_change(t, &p1, 1); 1276 ip6gre_tunnel_link(ign, t); 1277 netdev_state_change(dev); 1278 } 1279 } 1280 1281 if (t) { 1282 err = 0; 1283 1284 memset(&p, 0, sizeof(p)); 1285 ip6gre_tnl_parm_to_user(&p, &t->parms); 1286 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1287 err = -EFAULT; 1288 } else 1289 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1290 break; 1291 1292 case SIOCDELTUNNEL: 1293 err = -EPERM; 1294 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1295 goto done; 1296 1297 if (dev == ign->fb_tunnel_dev) { 1298 err = -EFAULT; 1299 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1300 goto done; 1301 err = -ENOENT; 1302 ip6gre_tnl_parm_from_user(&p1, &p); 1303 t = ip6gre_tunnel_locate(net, &p1, 0); 1304 if (!t) 1305 goto done; 1306 err = -EPERM; 1307 if (t == netdev_priv(ign->fb_tunnel_dev)) 1308 goto done; 1309 dev = t->dev; 1310 } 1311 unregister_netdevice(dev); 1312 err = 0; 1313 break; 1314 1315 default: 1316 err = -EINVAL; 1317 } 1318 1319 done: 1320 return err; 1321 } 1322 1323 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, 1324 unsigned short type, const void *daddr, 1325 const void *saddr, unsigned int len) 1326 { 1327 struct ip6_tnl *t = netdev_priv(dev); 1328 struct ipv6hdr *ipv6h; 1329 __be16 *p; 1330 1331 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h)); 1332 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb, 1333 t->fl.u.ip6.flowlabel, 1334 true, &t->fl.u.ip6)); 1335 ipv6h->hop_limit = t->parms.hop_limit; 1336 ipv6h->nexthdr = NEXTHDR_GRE; 1337 ipv6h->saddr = t->parms.laddr; 1338 ipv6h->daddr = t->parms.raddr; 1339 1340 p = (__be16 *)(ipv6h + 1); 1341 p[0] = t->parms.o_flags; 1342 p[1] = htons(type); 1343 1344 /* 1345 * Set the source hardware address. 1346 */ 1347 1348 if (saddr) 1349 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr)); 1350 if (daddr) 1351 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr)); 1352 if (!ipv6_addr_any(&ipv6h->daddr)) 1353 return t->hlen; 1354 1355 return -t->hlen; 1356 } 1357 1358 static const struct header_ops ip6gre_header_ops = { 1359 .create = ip6gre_header, 1360 }; 1361 1362 static const struct net_device_ops ip6gre_netdev_ops = { 1363 .ndo_init = ip6gre_tunnel_init, 1364 .ndo_uninit = ip6gre_tunnel_uninit, 1365 .ndo_start_xmit = ip6gre_tunnel_xmit, 1366 .ndo_do_ioctl = ip6gre_tunnel_ioctl, 1367 .ndo_change_mtu = ip6_tnl_change_mtu, 1368 .ndo_get_stats64 = ip_tunnel_get_stats64, 1369 .ndo_get_iflink = ip6_tnl_get_iflink, 1370 }; 1371 1372 static void ip6gre_dev_free(struct net_device *dev) 1373 { 1374 struct ip6_tnl *t = netdev_priv(dev); 1375 1376 gro_cells_destroy(&t->gro_cells); 1377 dst_cache_destroy(&t->dst_cache); 1378 free_percpu(dev->tstats); 1379 } 1380 1381 static void ip6gre_tunnel_setup(struct net_device *dev) 1382 { 1383 dev->netdev_ops = &ip6gre_netdev_ops; 1384 dev->needs_free_netdev = true; 1385 dev->priv_destructor = ip6gre_dev_free; 1386 1387 dev->type = ARPHRD_IP6GRE; 1388 1389 dev->flags |= IFF_NOARP; 1390 dev->addr_len = sizeof(struct in6_addr); 1391 netif_keep_dst(dev); 1392 /* This perm addr will be used as interface identifier by IPv6 */ 1393 dev->addr_assign_type = NET_ADDR_RANDOM; 1394 eth_random_addr(dev->perm_addr); 1395 } 1396 1397 #define GRE6_FEATURES (NETIF_F_SG | \ 1398 NETIF_F_FRAGLIST | \ 1399 NETIF_F_HIGHDMA | \ 1400 NETIF_F_HW_CSUM) 1401 1402 static void ip6gre_tnl_init_features(struct net_device *dev) 1403 { 1404 struct ip6_tnl *nt = netdev_priv(dev); 1405 1406 dev->features |= GRE6_FEATURES; 1407 dev->hw_features |= GRE6_FEATURES; 1408 1409 if (!(nt->parms.o_flags & TUNNEL_SEQ)) { 1410 /* TCP offload with GRE SEQ is not supported, nor 1411 * can we support 2 levels of outer headers requiring 1412 * an update. 1413 */ 1414 if (!(nt->parms.o_flags & TUNNEL_CSUM) || 1415 nt->encap.type == TUNNEL_ENCAP_NONE) { 1416 dev->features |= NETIF_F_GSO_SOFTWARE; 1417 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1418 } 1419 1420 /* Can use a lockless transmit, unless we generate 1421 * output sequences 1422 */ 1423 dev->features |= NETIF_F_LLTX; 1424 } 1425 } 1426 1427 static int ip6gre_tunnel_init_common(struct net_device *dev) 1428 { 1429 struct ip6_tnl *tunnel; 1430 int ret; 1431 int t_hlen; 1432 1433 tunnel = netdev_priv(dev); 1434 1435 tunnel->dev = dev; 1436 tunnel->net = dev_net(dev); 1437 strcpy(tunnel->parms.name, dev->name); 1438 1439 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1440 if (!dev->tstats) 1441 return -ENOMEM; 1442 1443 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1444 if (ret) 1445 goto cleanup_alloc_pcpu_stats; 1446 1447 ret = gro_cells_init(&tunnel->gro_cells, dev); 1448 if (ret) 1449 goto cleanup_dst_cache_init; 1450 1451 t_hlen = ip6gre_calc_hlen(tunnel); 1452 dev->mtu = ETH_DATA_LEN - t_hlen; 1453 if (dev->type == ARPHRD_ETHER) 1454 dev->mtu -= ETH_HLEN; 1455 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1456 dev->mtu -= 8; 1457 1458 if (tunnel->parms.collect_md) { 1459 dev->features |= NETIF_F_NETNS_LOCAL; 1460 netif_keep_dst(dev); 1461 } 1462 ip6gre_tnl_init_features(dev); 1463 1464 return 0; 1465 1466 cleanup_dst_cache_init: 1467 dst_cache_destroy(&tunnel->dst_cache); 1468 cleanup_alloc_pcpu_stats: 1469 free_percpu(dev->tstats); 1470 dev->tstats = NULL; 1471 return ret; 1472 } 1473 1474 static int ip6gre_tunnel_init(struct net_device *dev) 1475 { 1476 struct ip6_tnl *tunnel; 1477 int ret; 1478 1479 ret = ip6gre_tunnel_init_common(dev); 1480 if (ret) 1481 return ret; 1482 1483 tunnel = netdev_priv(dev); 1484 1485 if (tunnel->parms.collect_md) 1486 return 0; 1487 1488 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); 1489 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); 1490 1491 if (ipv6_addr_any(&tunnel->parms.raddr)) 1492 dev->header_ops = &ip6gre_header_ops; 1493 1494 return 0; 1495 } 1496 1497 static void ip6gre_fb_tunnel_init(struct net_device *dev) 1498 { 1499 struct ip6_tnl *tunnel = netdev_priv(dev); 1500 1501 tunnel->dev = dev; 1502 tunnel->net = dev_net(dev); 1503 strcpy(tunnel->parms.name, dev->name); 1504 1505 tunnel->hlen = sizeof(struct ipv6hdr) + 4; 1506 1507 dev_hold(dev); 1508 } 1509 1510 static struct inet6_protocol ip6gre_protocol __read_mostly = { 1511 .handler = gre_rcv, 1512 .err_handler = ip6gre_err, 1513 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1514 }; 1515 1516 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) 1517 { 1518 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1519 struct net_device *dev, *aux; 1520 int prio; 1521 1522 for_each_netdev_safe(net, dev, aux) 1523 if (dev->rtnl_link_ops == &ip6gre_link_ops || 1524 dev->rtnl_link_ops == &ip6gre_tap_ops || 1525 dev->rtnl_link_ops == &ip6erspan_tap_ops) 1526 unregister_netdevice_queue(dev, head); 1527 1528 for (prio = 0; prio < 4; prio++) { 1529 int h; 1530 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) { 1531 struct ip6_tnl *t; 1532 1533 t = rtnl_dereference(ign->tunnels[prio][h]); 1534 1535 while (t) { 1536 /* If dev is in the same netns, it has already 1537 * been added to the list by the previous loop. 1538 */ 1539 if (!net_eq(dev_net(t->dev), net)) 1540 unregister_netdevice_queue(t->dev, 1541 head); 1542 t = rtnl_dereference(t->next); 1543 } 1544 } 1545 } 1546 } 1547 1548 static int __net_init ip6gre_init_net(struct net *net) 1549 { 1550 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1551 int err; 1552 1553 if (!net_has_fallback_tunnels(net)) 1554 return 0; 1555 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", 1556 NET_NAME_UNKNOWN, 1557 ip6gre_tunnel_setup); 1558 if (!ign->fb_tunnel_dev) { 1559 err = -ENOMEM; 1560 goto err_alloc_dev; 1561 } 1562 dev_net_set(ign->fb_tunnel_dev, net); 1563 /* FB netdevice is special: we have one, and only one per netns. 1564 * Allowing to move it to another netns is clearly unsafe. 1565 */ 1566 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; 1567 1568 1569 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev); 1570 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops; 1571 1572 err = register_netdev(ign->fb_tunnel_dev); 1573 if (err) 1574 goto err_reg_dev; 1575 1576 rcu_assign_pointer(ign->tunnels_wc[0], 1577 netdev_priv(ign->fb_tunnel_dev)); 1578 return 0; 1579 1580 err_reg_dev: 1581 free_netdev(ign->fb_tunnel_dev); 1582 err_alloc_dev: 1583 return err; 1584 } 1585 1586 static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list) 1587 { 1588 struct net *net; 1589 LIST_HEAD(list); 1590 1591 rtnl_lock(); 1592 list_for_each_entry(net, net_list, exit_list) 1593 ip6gre_destroy_tunnels(net, &list); 1594 unregister_netdevice_many(&list); 1595 rtnl_unlock(); 1596 } 1597 1598 static struct pernet_operations ip6gre_net_ops = { 1599 .init = ip6gre_init_net, 1600 .exit_batch = ip6gre_exit_batch_net, 1601 .id = &ip6gre_net_id, 1602 .size = sizeof(struct ip6gre_net), 1603 }; 1604 1605 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], 1606 struct netlink_ext_ack *extack) 1607 { 1608 __be16 flags; 1609 1610 if (!data) 1611 return 0; 1612 1613 flags = 0; 1614 if (data[IFLA_GRE_IFLAGS]) 1615 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1616 if (data[IFLA_GRE_OFLAGS]) 1617 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1618 if (flags & (GRE_VERSION|GRE_ROUTING)) 1619 return -EINVAL; 1620 1621 return 0; 1622 } 1623 1624 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1625 struct netlink_ext_ack *extack) 1626 { 1627 struct in6_addr daddr; 1628 1629 if (tb[IFLA_ADDRESS]) { 1630 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1631 return -EINVAL; 1632 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1633 return -EADDRNOTAVAIL; 1634 } 1635 1636 if (!data) 1637 goto out; 1638 1639 if (data[IFLA_GRE_REMOTE]) { 1640 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]); 1641 if (ipv6_addr_any(&daddr)) 1642 return -EINVAL; 1643 } 1644 1645 out: 1646 return ip6gre_tunnel_validate(tb, data, extack); 1647 } 1648 1649 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1650 struct netlink_ext_ack *extack) 1651 { 1652 __be16 flags = 0; 1653 int ret, ver = 0; 1654 1655 if (!data) 1656 return 0; 1657 1658 ret = ip6gre_tap_validate(tb, data, extack); 1659 if (ret) 1660 return ret; 1661 1662 /* ERSPAN should only have GRE sequence and key flag */ 1663 if (data[IFLA_GRE_OFLAGS]) 1664 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1665 if (data[IFLA_GRE_IFLAGS]) 1666 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1667 if (!data[IFLA_GRE_COLLECT_METADATA] && 1668 flags != (GRE_SEQ | GRE_KEY)) 1669 return -EINVAL; 1670 1671 /* ERSPAN Session ID only has 10-bit. Since we reuse 1672 * 32-bit key field as ID, check it's range. 1673 */ 1674 if (data[IFLA_GRE_IKEY] && 1675 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK)) 1676 return -EINVAL; 1677 1678 if (data[IFLA_GRE_OKEY] && 1679 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK)) 1680 return -EINVAL; 1681 1682 if (data[IFLA_GRE_ERSPAN_VER]) { 1683 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1684 if (ver != 1 && ver != 2) 1685 return -EINVAL; 1686 } 1687 1688 if (ver == 1) { 1689 if (data[IFLA_GRE_ERSPAN_INDEX]) { 1690 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1691 1692 if (index & ~INDEX_MASK) 1693 return -EINVAL; 1694 } 1695 } else if (ver == 2) { 1696 if (data[IFLA_GRE_ERSPAN_DIR]) { 1697 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1698 1699 if (dir & ~(DIR_MASK >> DIR_OFFSET)) 1700 return -EINVAL; 1701 } 1702 1703 if (data[IFLA_GRE_ERSPAN_HWID]) { 1704 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1705 1706 if (hwid & ~(HWID_MASK >> HWID_OFFSET)) 1707 return -EINVAL; 1708 } 1709 } 1710 1711 return 0; 1712 } 1713 1714 static void ip6gre_netlink_parms(struct nlattr *data[], 1715 struct __ip6_tnl_parm *parms) 1716 { 1717 memset(parms, 0, sizeof(*parms)); 1718 1719 if (!data) 1720 return; 1721 1722 if (data[IFLA_GRE_LINK]) 1723 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1724 1725 if (data[IFLA_GRE_IFLAGS]) 1726 parms->i_flags = gre_flags_to_tnl_flags( 1727 nla_get_be16(data[IFLA_GRE_IFLAGS])); 1728 1729 if (data[IFLA_GRE_OFLAGS]) 1730 parms->o_flags = gre_flags_to_tnl_flags( 1731 nla_get_be16(data[IFLA_GRE_OFLAGS])); 1732 1733 if (data[IFLA_GRE_IKEY]) 1734 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 1735 1736 if (data[IFLA_GRE_OKEY]) 1737 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); 1738 1739 if (data[IFLA_GRE_LOCAL]) 1740 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]); 1741 1742 if (data[IFLA_GRE_REMOTE]) 1743 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]); 1744 1745 if (data[IFLA_GRE_TTL]) 1746 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]); 1747 1748 if (data[IFLA_GRE_ENCAP_LIMIT]) 1749 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]); 1750 1751 if (data[IFLA_GRE_FLOWINFO]) 1752 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]); 1753 1754 if (data[IFLA_GRE_FLAGS]) 1755 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]); 1756 1757 if (data[IFLA_GRE_FWMARK]) 1758 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); 1759 1760 if (data[IFLA_GRE_COLLECT_METADATA]) 1761 parms->collect_md = true; 1762 1763 parms->erspan_ver = 1; 1764 if (data[IFLA_GRE_ERSPAN_VER]) 1765 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1766 1767 if (parms->erspan_ver == 1) { 1768 if (data[IFLA_GRE_ERSPAN_INDEX]) 1769 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1770 } else if (parms->erspan_ver == 2) { 1771 if (data[IFLA_GRE_ERSPAN_DIR]) 1772 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1773 if (data[IFLA_GRE_ERSPAN_HWID]) 1774 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1775 } 1776 } 1777 1778 static int ip6gre_tap_init(struct net_device *dev) 1779 { 1780 int ret; 1781 1782 ret = ip6gre_tunnel_init_common(dev); 1783 if (ret) 1784 return ret; 1785 1786 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1787 1788 return 0; 1789 } 1790 1791 static const struct net_device_ops ip6gre_tap_netdev_ops = { 1792 .ndo_init = ip6gre_tap_init, 1793 .ndo_uninit = ip6gre_tunnel_uninit, 1794 .ndo_start_xmit = ip6gre_tunnel_xmit, 1795 .ndo_set_mac_address = eth_mac_addr, 1796 .ndo_validate_addr = eth_validate_addr, 1797 .ndo_change_mtu = ip6_tnl_change_mtu, 1798 .ndo_get_stats64 = ip_tunnel_get_stats64, 1799 .ndo_get_iflink = ip6_tnl_get_iflink, 1800 }; 1801 1802 static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel) 1803 { 1804 int t_hlen; 1805 1806 tunnel->tun_hlen = 8; 1807 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + 1808 erspan_hdr_len(tunnel->parms.erspan_ver); 1809 1810 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); 1811 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; 1812 return t_hlen; 1813 } 1814 1815 static int ip6erspan_tap_init(struct net_device *dev) 1816 { 1817 struct ip6_tnl *tunnel; 1818 int t_hlen; 1819 int ret; 1820 1821 tunnel = netdev_priv(dev); 1822 1823 tunnel->dev = dev; 1824 tunnel->net = dev_net(dev); 1825 strcpy(tunnel->parms.name, dev->name); 1826 1827 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1828 if (!dev->tstats) 1829 return -ENOMEM; 1830 1831 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1832 if (ret) 1833 goto cleanup_alloc_pcpu_stats; 1834 1835 ret = gro_cells_init(&tunnel->gro_cells, dev); 1836 if (ret) 1837 goto cleanup_dst_cache_init; 1838 1839 t_hlen = ip6erspan_calc_hlen(tunnel); 1840 dev->mtu = ETH_DATA_LEN - t_hlen; 1841 if (dev->type == ARPHRD_ETHER) 1842 dev->mtu -= ETH_HLEN; 1843 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1844 dev->mtu -= 8; 1845 1846 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1847 ip6erspan_tnl_link_config(tunnel, 1); 1848 1849 return 0; 1850 1851 cleanup_dst_cache_init: 1852 dst_cache_destroy(&tunnel->dst_cache); 1853 cleanup_alloc_pcpu_stats: 1854 free_percpu(dev->tstats); 1855 dev->tstats = NULL; 1856 return ret; 1857 } 1858 1859 static const struct net_device_ops ip6erspan_netdev_ops = { 1860 .ndo_init = ip6erspan_tap_init, 1861 .ndo_uninit = ip6erspan_tunnel_uninit, 1862 .ndo_start_xmit = ip6erspan_tunnel_xmit, 1863 .ndo_set_mac_address = eth_mac_addr, 1864 .ndo_validate_addr = eth_validate_addr, 1865 .ndo_change_mtu = ip6_tnl_change_mtu, 1866 .ndo_get_stats64 = ip_tunnel_get_stats64, 1867 .ndo_get_iflink = ip6_tnl_get_iflink, 1868 }; 1869 1870 static void ip6gre_tap_setup(struct net_device *dev) 1871 { 1872 1873 ether_setup(dev); 1874 1875 dev->max_mtu = 0; 1876 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1877 dev->needs_free_netdev = true; 1878 dev->priv_destructor = ip6gre_dev_free; 1879 1880 dev->features |= NETIF_F_NETNS_LOCAL; 1881 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1882 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1883 netif_keep_dst(dev); 1884 } 1885 1886 bool is_ip6gretap_dev(const struct net_device *dev) 1887 { 1888 return dev->netdev_ops == &ip6gre_tap_netdev_ops; 1889 } 1890 EXPORT_SYMBOL_GPL(is_ip6gretap_dev); 1891 1892 static bool ip6gre_netlink_encap_parms(struct nlattr *data[], 1893 struct ip_tunnel_encap *ipencap) 1894 { 1895 bool ret = false; 1896 1897 memset(ipencap, 0, sizeof(*ipencap)); 1898 1899 if (!data) 1900 return ret; 1901 1902 if (data[IFLA_GRE_ENCAP_TYPE]) { 1903 ret = true; 1904 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); 1905 } 1906 1907 if (data[IFLA_GRE_ENCAP_FLAGS]) { 1908 ret = true; 1909 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); 1910 } 1911 1912 if (data[IFLA_GRE_ENCAP_SPORT]) { 1913 ret = true; 1914 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]); 1915 } 1916 1917 if (data[IFLA_GRE_ENCAP_DPORT]) { 1918 ret = true; 1919 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]); 1920 } 1921 1922 return ret; 1923 } 1924 1925 static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev, 1926 struct nlattr *tb[], struct nlattr *data[], 1927 struct netlink_ext_ack *extack) 1928 { 1929 struct ip6_tnl *nt; 1930 struct ip_tunnel_encap ipencap; 1931 int err; 1932 1933 nt = netdev_priv(dev); 1934 1935 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 1936 int err = ip6_tnl_encap_setup(nt, &ipencap); 1937 1938 if (err < 0) 1939 return err; 1940 } 1941 1942 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1943 eth_hw_addr_random(dev); 1944 1945 nt->dev = dev; 1946 nt->net = dev_net(dev); 1947 1948 err = register_netdevice(dev); 1949 if (err) 1950 goto out; 1951 1952 if (tb[IFLA_MTU]) 1953 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 1954 1955 dev_hold(dev); 1956 1957 out: 1958 return err; 1959 } 1960 1961 static int ip6gre_newlink(struct net *src_net, struct net_device *dev, 1962 struct nlattr *tb[], struct nlattr *data[], 1963 struct netlink_ext_ack *extack) 1964 { 1965 struct ip6_tnl *nt = netdev_priv(dev); 1966 struct net *net = dev_net(dev); 1967 struct ip6gre_net *ign; 1968 int err; 1969 1970 ip6gre_netlink_parms(data, &nt->parms); 1971 ign = net_generic(net, ip6gre_net_id); 1972 1973 if (nt->parms.collect_md) { 1974 if (rtnl_dereference(ign->collect_md_tun)) 1975 return -EEXIST; 1976 } else { 1977 if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) 1978 return -EEXIST; 1979 } 1980 1981 err = ip6gre_newlink_common(src_net, dev, tb, data, extack); 1982 if (!err) { 1983 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); 1984 ip6gre_tunnel_link_md(ign, nt); 1985 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); 1986 } 1987 return err; 1988 } 1989 1990 static struct ip6_tnl * 1991 ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[], 1992 struct nlattr *data[], struct __ip6_tnl_parm *p_p, 1993 struct netlink_ext_ack *extack) 1994 { 1995 struct ip6_tnl *t, *nt = netdev_priv(dev); 1996 struct net *net = nt->net; 1997 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1998 struct ip_tunnel_encap ipencap; 1999 2000 if (dev == ign->fb_tunnel_dev) 2001 return ERR_PTR(-EINVAL); 2002 2003 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 2004 int err = ip6_tnl_encap_setup(nt, &ipencap); 2005 2006 if (err < 0) 2007 return ERR_PTR(err); 2008 } 2009 2010 ip6gre_netlink_parms(data, p_p); 2011 2012 t = ip6gre_tunnel_locate(net, p_p, 0); 2013 2014 if (t) { 2015 if (t->dev != dev) 2016 return ERR_PTR(-EEXIST); 2017 } else { 2018 t = nt; 2019 } 2020 2021 return t; 2022 } 2023 2024 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], 2025 struct nlattr *data[], 2026 struct netlink_ext_ack *extack) 2027 { 2028 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); 2029 struct __ip6_tnl_parm p; 2030 struct ip6_tnl *t; 2031 2032 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2033 if (IS_ERR(t)) 2034 return PTR_ERR(t); 2035 2036 ip6gre_tunnel_unlink_md(ign, t); 2037 ip6gre_tunnel_unlink(ign, t); 2038 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); 2039 ip6gre_tunnel_link_md(ign, t); 2040 ip6gre_tunnel_link(ign, t); 2041 return 0; 2042 } 2043 2044 static void ip6gre_dellink(struct net_device *dev, struct list_head *head) 2045 { 2046 struct net *net = dev_net(dev); 2047 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 2048 2049 if (dev != ign->fb_tunnel_dev) 2050 unregister_netdevice_queue(dev, head); 2051 } 2052 2053 static size_t ip6gre_get_size(const struct net_device *dev) 2054 { 2055 return 2056 /* IFLA_GRE_LINK */ 2057 nla_total_size(4) + 2058 /* IFLA_GRE_IFLAGS */ 2059 nla_total_size(2) + 2060 /* IFLA_GRE_OFLAGS */ 2061 nla_total_size(2) + 2062 /* IFLA_GRE_IKEY */ 2063 nla_total_size(4) + 2064 /* IFLA_GRE_OKEY */ 2065 nla_total_size(4) + 2066 /* IFLA_GRE_LOCAL */ 2067 nla_total_size(sizeof(struct in6_addr)) + 2068 /* IFLA_GRE_REMOTE */ 2069 nla_total_size(sizeof(struct in6_addr)) + 2070 /* IFLA_GRE_TTL */ 2071 nla_total_size(1) + 2072 /* IFLA_GRE_ENCAP_LIMIT */ 2073 nla_total_size(1) + 2074 /* IFLA_GRE_FLOWINFO */ 2075 nla_total_size(4) + 2076 /* IFLA_GRE_FLAGS */ 2077 nla_total_size(4) + 2078 /* IFLA_GRE_ENCAP_TYPE */ 2079 nla_total_size(2) + 2080 /* IFLA_GRE_ENCAP_FLAGS */ 2081 nla_total_size(2) + 2082 /* IFLA_GRE_ENCAP_SPORT */ 2083 nla_total_size(2) + 2084 /* IFLA_GRE_ENCAP_DPORT */ 2085 nla_total_size(2) + 2086 /* IFLA_GRE_COLLECT_METADATA */ 2087 nla_total_size(0) + 2088 /* IFLA_GRE_FWMARK */ 2089 nla_total_size(4) + 2090 /* IFLA_GRE_ERSPAN_INDEX */ 2091 nla_total_size(4) + 2092 0; 2093 } 2094 2095 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) 2096 { 2097 struct ip6_tnl *t = netdev_priv(dev); 2098 struct __ip6_tnl_parm *p = &t->parms; 2099 2100 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2101 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2102 gre_tnl_flags_to_gre_flags(p->i_flags)) || 2103 nla_put_be16(skb, IFLA_GRE_OFLAGS, 2104 gre_tnl_flags_to_gre_flags(p->o_flags)) || 2105 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 2106 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 2107 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || 2108 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) || 2109 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || 2110 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || 2111 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || 2112 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || 2113 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) || 2114 nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) 2115 goto nla_put_failure; 2116 2117 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 2118 t->encap.type) || 2119 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT, 2120 t->encap.sport) || 2121 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT, 2122 t->encap.dport) || 2123 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, 2124 t->encap.flags)) 2125 goto nla_put_failure; 2126 2127 if (p->collect_md) { 2128 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) 2129 goto nla_put_failure; 2130 } 2131 2132 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) 2133 goto nla_put_failure; 2134 2135 if (p->erspan_ver == 1) { 2136 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) 2137 goto nla_put_failure; 2138 } else if (p->erspan_ver == 2) { 2139 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) 2140 goto nla_put_failure; 2141 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) 2142 goto nla_put_failure; 2143 } 2144 2145 return 0; 2146 2147 nla_put_failure: 2148 return -EMSGSIZE; 2149 } 2150 2151 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = { 2152 [IFLA_GRE_LINK] = { .type = NLA_U32 }, 2153 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, 2154 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 2155 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 2156 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 2157 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) }, 2158 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) }, 2159 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 2160 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 }, 2161 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 }, 2162 [IFLA_GRE_FLAGS] = { .type = NLA_U32 }, 2163 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, 2164 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, 2165 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, 2166 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, 2167 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, 2168 [IFLA_GRE_FWMARK] = { .type = NLA_U32 }, 2169 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 }, 2170 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 }, 2171 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 }, 2172 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 }, 2173 }; 2174 2175 static void ip6erspan_tap_setup(struct net_device *dev) 2176 { 2177 ether_setup(dev); 2178 2179 dev->netdev_ops = &ip6erspan_netdev_ops; 2180 dev->needs_free_netdev = true; 2181 dev->priv_destructor = ip6gre_dev_free; 2182 2183 dev->features |= NETIF_F_NETNS_LOCAL; 2184 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2185 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2186 netif_keep_dst(dev); 2187 } 2188 2189 static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, 2190 struct nlattr *tb[], struct nlattr *data[], 2191 struct netlink_ext_ack *extack) 2192 { 2193 struct ip6_tnl *nt = netdev_priv(dev); 2194 struct net *net = dev_net(dev); 2195 struct ip6gre_net *ign; 2196 int err; 2197 2198 ip6gre_netlink_parms(data, &nt->parms); 2199 ign = net_generic(net, ip6gre_net_id); 2200 2201 if (nt->parms.collect_md) { 2202 if (rtnl_dereference(ign->collect_md_tun_erspan)) 2203 return -EEXIST; 2204 } else { 2205 if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) 2206 return -EEXIST; 2207 } 2208 2209 err = ip6gre_newlink_common(src_net, dev, tb, data, extack); 2210 if (!err) { 2211 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]); 2212 ip6erspan_tunnel_link_md(ign, nt); 2213 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); 2214 } 2215 return err; 2216 } 2217 2218 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu) 2219 { 2220 ip6gre_tnl_link_config_common(t); 2221 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t)); 2222 } 2223 2224 static int ip6erspan_tnl_change(struct ip6_tnl *t, 2225 const struct __ip6_tnl_parm *p, int set_mtu) 2226 { 2227 ip6gre_tnl_copy_tnl_parm(t, p); 2228 ip6erspan_tnl_link_config(t, set_mtu); 2229 return 0; 2230 } 2231 2232 static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], 2233 struct nlattr *data[], 2234 struct netlink_ext_ack *extack) 2235 { 2236 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); 2237 struct __ip6_tnl_parm p; 2238 struct ip6_tnl *t; 2239 2240 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2241 if (IS_ERR(t)) 2242 return PTR_ERR(t); 2243 2244 ip6gre_tunnel_unlink_md(ign, t); 2245 ip6gre_tunnel_unlink(ign, t); 2246 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); 2247 ip6erspan_tunnel_link_md(ign, t); 2248 ip6gre_tunnel_link(ign, t); 2249 return 0; 2250 } 2251 2252 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { 2253 .kind = "ip6gre", 2254 .maxtype = IFLA_GRE_MAX, 2255 .policy = ip6gre_policy, 2256 .priv_size = sizeof(struct ip6_tnl), 2257 .setup = ip6gre_tunnel_setup, 2258 .validate = ip6gre_tunnel_validate, 2259 .newlink = ip6gre_newlink, 2260 .changelink = ip6gre_changelink, 2261 .dellink = ip6gre_dellink, 2262 .get_size = ip6gre_get_size, 2263 .fill_info = ip6gre_fill_info, 2264 .get_link_net = ip6_tnl_get_link_net, 2265 }; 2266 2267 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = { 2268 .kind = "ip6gretap", 2269 .maxtype = IFLA_GRE_MAX, 2270 .policy = ip6gre_policy, 2271 .priv_size = sizeof(struct ip6_tnl), 2272 .setup = ip6gre_tap_setup, 2273 .validate = ip6gre_tap_validate, 2274 .newlink = ip6gre_newlink, 2275 .changelink = ip6gre_changelink, 2276 .get_size = ip6gre_get_size, 2277 .fill_info = ip6gre_fill_info, 2278 .get_link_net = ip6_tnl_get_link_net, 2279 }; 2280 2281 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = { 2282 .kind = "ip6erspan", 2283 .maxtype = IFLA_GRE_MAX, 2284 .policy = ip6gre_policy, 2285 .priv_size = sizeof(struct ip6_tnl), 2286 .setup = ip6erspan_tap_setup, 2287 .validate = ip6erspan_tap_validate, 2288 .newlink = ip6erspan_newlink, 2289 .changelink = ip6erspan_changelink, 2290 .get_size = ip6gre_get_size, 2291 .fill_info = ip6gre_fill_info, 2292 .get_link_net = ip6_tnl_get_link_net, 2293 }; 2294 2295 /* 2296 * And now the modules code and kernel interface. 2297 */ 2298 2299 static int __init ip6gre_init(void) 2300 { 2301 int err; 2302 2303 pr_info("GRE over IPv6 tunneling driver\n"); 2304 2305 err = register_pernet_device(&ip6gre_net_ops); 2306 if (err < 0) 2307 return err; 2308 2309 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE); 2310 if (err < 0) { 2311 pr_info("%s: can't add protocol\n", __func__); 2312 goto add_proto_failed; 2313 } 2314 2315 err = rtnl_link_register(&ip6gre_link_ops); 2316 if (err < 0) 2317 goto rtnl_link_failed; 2318 2319 err = rtnl_link_register(&ip6gre_tap_ops); 2320 if (err < 0) 2321 goto tap_ops_failed; 2322 2323 err = rtnl_link_register(&ip6erspan_tap_ops); 2324 if (err < 0) 2325 goto erspan_link_failed; 2326 2327 out: 2328 return err; 2329 2330 erspan_link_failed: 2331 rtnl_link_unregister(&ip6gre_tap_ops); 2332 tap_ops_failed: 2333 rtnl_link_unregister(&ip6gre_link_ops); 2334 rtnl_link_failed: 2335 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); 2336 add_proto_failed: 2337 unregister_pernet_device(&ip6gre_net_ops); 2338 goto out; 2339 } 2340 2341 static void __exit ip6gre_fini(void) 2342 { 2343 rtnl_link_unregister(&ip6gre_tap_ops); 2344 rtnl_link_unregister(&ip6gre_link_ops); 2345 rtnl_link_unregister(&ip6erspan_tap_ops); 2346 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); 2347 unregister_pernet_device(&ip6gre_net_ops); 2348 } 2349 2350 module_init(ip6gre_init); 2351 module_exit(ip6gre_fini); 2352 MODULE_LICENSE("GPL"); 2353 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); 2354 MODULE_DESCRIPTION("GRE over IPv6 tunneling device"); 2355 MODULE_ALIAS_RTNL_LINK("ip6gre"); 2356 MODULE_ALIAS_RTNL_LINK("ip6gretap"); 2357 MODULE_ALIAS_RTNL_LINK("ip6erspan"); 2358 MODULE_ALIAS_NETDEV("ip6gre0"); 2359