1 /* 2 * GRE over IPv6 protocol decoder. 3 * 4 * Authors: Dmitry Kozlov (xeb@mail.ru) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/capability.h> 16 #include <linux/module.h> 17 #include <linux/types.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/uaccess.h> 21 #include <linux/skbuff.h> 22 #include <linux/netdevice.h> 23 #include <linux/in.h> 24 #include <linux/tcp.h> 25 #include <linux/udp.h> 26 #include <linux/if_arp.h> 27 #include <linux/init.h> 28 #include <linux/in6.h> 29 #include <linux/inetdevice.h> 30 #include <linux/igmp.h> 31 #include <linux/netfilter_ipv4.h> 32 #include <linux/etherdevice.h> 33 #include <linux/if_ether.h> 34 #include <linux/hash.h> 35 #include <linux/if_tunnel.h> 36 #include <linux/ip6_tunnel.h> 37 38 #include <net/sock.h> 39 #include <net/ip.h> 40 #include <net/ip_tunnels.h> 41 #include <net/icmp.h> 42 #include <net/protocol.h> 43 #include <net/addrconf.h> 44 #include <net/arp.h> 45 #include <net/checksum.h> 46 #include <net/dsfield.h> 47 #include <net/inet_ecn.h> 48 #include <net/xfrm.h> 49 #include <net/net_namespace.h> 50 #include <net/netns/generic.h> 51 #include <net/rtnetlink.h> 52 53 #include <net/ipv6.h> 54 #include <net/ip6_fib.h> 55 #include <net/ip6_route.h> 56 #include <net/ip6_tunnel.h> 57 #include <net/gre.h> 58 #include <net/erspan.h> 59 #include <net/dst_metadata.h> 60 61 62 static bool log_ecn_error = true; 63 module_param(log_ecn_error, bool, 0644); 64 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 65 66 #define IP6_GRE_HASH_SIZE_SHIFT 5 67 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT) 68 69 static unsigned int ip6gre_net_id __read_mostly; 70 struct ip6gre_net { 71 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE]; 72 73 struct ip6_tnl __rcu *collect_md_tun; 74 struct ip6_tnl __rcu *collect_md_tun_erspan; 75 struct net_device *fb_tunnel_dev; 76 }; 77 78 static struct rtnl_link_ops ip6gre_link_ops __read_mostly; 79 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly; 80 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly; 81 static int ip6gre_tunnel_init(struct net_device *dev); 82 static void ip6gre_tunnel_setup(struct net_device *dev); 83 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); 84 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); 85 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu); 86 87 /* Tunnel hash table */ 88 89 /* 90 4 hash tables: 91 92 3: (remote,local) 93 2: (remote,*) 94 1: (*,local) 95 0: (*,*) 96 97 We require exact key match i.e. if a key is present in packet 98 it will match only tunnel with the same key; if it is not present, 99 it will match only keyless tunnel. 100 101 All keysless packets, if not matched configured keyless tunnels 102 will match fallback tunnel. 103 */ 104 105 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1)) 106 static u32 HASH_ADDR(const struct in6_addr *addr) 107 { 108 u32 hash = ipv6_addr_hash(addr); 109 110 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT); 111 } 112 113 #define tunnels_r_l tunnels[3] 114 #define tunnels_r tunnels[2] 115 #define tunnels_l tunnels[1] 116 #define tunnels_wc tunnels[0] 117 118 /* Given src, dst and key, find appropriate for input tunnel. */ 119 120 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, 121 const struct in6_addr *remote, const struct in6_addr *local, 122 __be32 key, __be16 gre_proto) 123 { 124 struct net *net = dev_net(dev); 125 int link = dev->ifindex; 126 unsigned int h0 = HASH_ADDR(remote); 127 unsigned int h1 = HASH_KEY(key); 128 struct ip6_tnl *t, *cand = NULL; 129 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 130 int dev_type = (gre_proto == htons(ETH_P_TEB) || 131 gre_proto == htons(ETH_P_ERSPAN) || 132 gre_proto == htons(ETH_P_ERSPAN2)) ? 133 ARPHRD_ETHER : ARPHRD_IP6GRE; 134 int score, cand_score = 4; 135 136 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { 137 if (!ipv6_addr_equal(local, &t->parms.laddr) || 138 !ipv6_addr_equal(remote, &t->parms.raddr) || 139 key != t->parms.i_key || 140 !(t->dev->flags & IFF_UP)) 141 continue; 142 143 if (t->dev->type != ARPHRD_IP6GRE && 144 t->dev->type != dev_type) 145 continue; 146 147 score = 0; 148 if (t->parms.link != link) 149 score |= 1; 150 if (t->dev->type != dev_type) 151 score |= 2; 152 if (score == 0) 153 return t; 154 155 if (score < cand_score) { 156 cand = t; 157 cand_score = score; 158 } 159 } 160 161 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) { 162 if (!ipv6_addr_equal(remote, &t->parms.raddr) || 163 key != t->parms.i_key || 164 !(t->dev->flags & IFF_UP)) 165 continue; 166 167 if (t->dev->type != ARPHRD_IP6GRE && 168 t->dev->type != dev_type) 169 continue; 170 171 score = 0; 172 if (t->parms.link != link) 173 score |= 1; 174 if (t->dev->type != dev_type) 175 score |= 2; 176 if (score == 0) 177 return t; 178 179 if (score < cand_score) { 180 cand = t; 181 cand_score = score; 182 } 183 } 184 185 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) { 186 if ((!ipv6_addr_equal(local, &t->parms.laddr) && 187 (!ipv6_addr_equal(local, &t->parms.raddr) || 188 !ipv6_addr_is_multicast(local))) || 189 key != t->parms.i_key || 190 !(t->dev->flags & IFF_UP)) 191 continue; 192 193 if (t->dev->type != ARPHRD_IP6GRE && 194 t->dev->type != dev_type) 195 continue; 196 197 score = 0; 198 if (t->parms.link != link) 199 score |= 1; 200 if (t->dev->type != dev_type) 201 score |= 2; 202 if (score == 0) 203 return t; 204 205 if (score < cand_score) { 206 cand = t; 207 cand_score = score; 208 } 209 } 210 211 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) { 212 if (t->parms.i_key != key || 213 !(t->dev->flags & IFF_UP)) 214 continue; 215 216 if (t->dev->type != ARPHRD_IP6GRE && 217 t->dev->type != dev_type) 218 continue; 219 220 score = 0; 221 if (t->parms.link != link) 222 score |= 1; 223 if (t->dev->type != dev_type) 224 score |= 2; 225 if (score == 0) 226 return t; 227 228 if (score < cand_score) { 229 cand = t; 230 cand_score = score; 231 } 232 } 233 234 if (cand) 235 return cand; 236 237 if (gre_proto == htons(ETH_P_ERSPAN) || 238 gre_proto == htons(ETH_P_ERSPAN2)) 239 t = rcu_dereference(ign->collect_md_tun_erspan); 240 else 241 t = rcu_dereference(ign->collect_md_tun); 242 243 if (t && t->dev->flags & IFF_UP) 244 return t; 245 246 dev = ign->fb_tunnel_dev; 247 if (dev && dev->flags & IFF_UP) 248 return netdev_priv(dev); 249 250 return NULL; 251 } 252 253 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign, 254 const struct __ip6_tnl_parm *p) 255 { 256 const struct in6_addr *remote = &p->raddr; 257 const struct in6_addr *local = &p->laddr; 258 unsigned int h = HASH_KEY(p->i_key); 259 int prio = 0; 260 261 if (!ipv6_addr_any(local)) 262 prio |= 1; 263 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) { 264 prio |= 2; 265 h ^= HASH_ADDR(remote); 266 } 267 268 return &ign->tunnels[prio][h]; 269 } 270 271 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) 272 { 273 if (t->parms.collect_md) 274 rcu_assign_pointer(ign->collect_md_tun, t); 275 } 276 277 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) 278 { 279 if (t->parms.collect_md) 280 rcu_assign_pointer(ign->collect_md_tun_erspan, t); 281 } 282 283 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t) 284 { 285 if (t->parms.collect_md) 286 rcu_assign_pointer(ign->collect_md_tun, NULL); 287 } 288 289 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign, 290 struct ip6_tnl *t) 291 { 292 if (t->parms.collect_md) 293 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL); 294 } 295 296 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign, 297 const struct ip6_tnl *t) 298 { 299 return __ip6gre_bucket(ign, &t->parms); 300 } 301 302 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t) 303 { 304 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t); 305 306 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 307 rcu_assign_pointer(*tp, t); 308 } 309 310 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t) 311 { 312 struct ip6_tnl __rcu **tp; 313 struct ip6_tnl *iter; 314 315 for (tp = ip6gre_bucket(ign, t); 316 (iter = rtnl_dereference(*tp)) != NULL; 317 tp = &iter->next) { 318 if (t == iter) { 319 rcu_assign_pointer(*tp, t->next); 320 break; 321 } 322 } 323 } 324 325 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net, 326 const struct __ip6_tnl_parm *parms, 327 int type) 328 { 329 const struct in6_addr *remote = &parms->raddr; 330 const struct in6_addr *local = &parms->laddr; 331 __be32 key = parms->i_key; 332 int link = parms->link; 333 struct ip6_tnl *t; 334 struct ip6_tnl __rcu **tp; 335 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 336 337 for (tp = __ip6gre_bucket(ign, parms); 338 (t = rtnl_dereference(*tp)) != NULL; 339 tp = &t->next) 340 if (ipv6_addr_equal(local, &t->parms.laddr) && 341 ipv6_addr_equal(remote, &t->parms.raddr) && 342 key == t->parms.i_key && 343 link == t->parms.link && 344 type == t->dev->type) 345 break; 346 347 return t; 348 } 349 350 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, 351 const struct __ip6_tnl_parm *parms, int create) 352 { 353 struct ip6_tnl *t, *nt; 354 struct net_device *dev; 355 char name[IFNAMSIZ]; 356 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 357 358 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE); 359 if (t && create) 360 return NULL; 361 if (t || !create) 362 return t; 363 364 if (parms->name[0]) { 365 if (!dev_valid_name(parms->name)) 366 return NULL; 367 strlcpy(name, parms->name, IFNAMSIZ); 368 } else { 369 strcpy(name, "ip6gre%d"); 370 } 371 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 372 ip6gre_tunnel_setup); 373 if (!dev) 374 return NULL; 375 376 dev_net_set(dev, net); 377 378 nt = netdev_priv(dev); 379 nt->parms = *parms; 380 dev->rtnl_link_ops = &ip6gre_link_ops; 381 382 nt->dev = dev; 383 nt->net = dev_net(dev); 384 385 if (register_netdevice(dev) < 0) 386 goto failed_free; 387 388 ip6gre_tnl_link_config(nt, 1); 389 390 /* Can use a lockless transmit, unless we generate output sequences */ 391 if (!(nt->parms.o_flags & TUNNEL_SEQ)) 392 dev->features |= NETIF_F_LLTX; 393 394 dev_hold(dev); 395 ip6gre_tunnel_link(ign, nt); 396 return nt; 397 398 failed_free: 399 free_netdev(dev); 400 return NULL; 401 } 402 403 static void ip6erspan_tunnel_uninit(struct net_device *dev) 404 { 405 struct ip6_tnl *t = netdev_priv(dev); 406 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 407 408 ip6erspan_tunnel_unlink_md(ign, t); 409 ip6gre_tunnel_unlink(ign, t); 410 dst_cache_reset(&t->dst_cache); 411 dev_put(dev); 412 } 413 414 static void ip6gre_tunnel_uninit(struct net_device *dev) 415 { 416 struct ip6_tnl *t = netdev_priv(dev); 417 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 418 419 ip6gre_tunnel_unlink_md(ign, t); 420 ip6gre_tunnel_unlink(ign, t); 421 dst_cache_reset(&t->dst_cache); 422 dev_put(dev); 423 } 424 425 426 static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 427 u8 type, u8 code, int offset, __be32 info) 428 { 429 struct net *net = dev_net(skb->dev); 430 const struct ipv6hdr *ipv6h; 431 struct tnl_ptk_info tpi; 432 struct ip6_tnl *t; 433 434 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6), 435 offset) < 0) 436 return -EINVAL; 437 438 ipv6h = (const struct ipv6hdr *)skb->data; 439 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, 440 tpi.key, tpi.proto); 441 if (!t) 442 return -ENOENT; 443 444 switch (type) { 445 struct ipv6_tlv_tnl_enc_lim *tel; 446 __u32 teli; 447 case ICMPV6_DEST_UNREACH: 448 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 449 t->parms.name); 450 if (code != ICMPV6_PORT_UNREACH) 451 break; 452 return 0; 453 case ICMPV6_TIME_EXCEED: 454 if (code == ICMPV6_EXC_HOPLIMIT) { 455 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 456 t->parms.name); 457 break; 458 } 459 return 0; 460 case ICMPV6_PARAMPROB: 461 teli = 0; 462 if (code == ICMPV6_HDR_FIELD) 463 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 464 465 if (teli && teli == be32_to_cpu(info) - 2) { 466 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 467 if (tel->encap_limit == 0) { 468 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 469 t->parms.name); 470 } 471 } else { 472 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 473 t->parms.name); 474 } 475 return 0; 476 case ICMPV6_PKT_TOOBIG: 477 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); 478 return 0; 479 case NDISC_REDIRECT: 480 ip6_redirect(skb, net, skb->dev->ifindex, 0, 481 sock_net_uid(net, NULL)); 482 return 0; 483 } 484 485 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) 486 t->err_count++; 487 else 488 t->err_count = 1; 489 t->err_time = jiffies; 490 491 return 0; 492 } 493 494 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) 495 { 496 const struct ipv6hdr *ipv6h; 497 struct ip6_tnl *tunnel; 498 499 ipv6h = ipv6_hdr(skb); 500 tunnel = ip6gre_tunnel_lookup(skb->dev, 501 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 502 tpi->proto); 503 if (tunnel) { 504 if (tunnel->parms.collect_md) { 505 struct metadata_dst *tun_dst; 506 __be64 tun_id; 507 __be16 flags; 508 509 flags = tpi->flags; 510 tun_id = key32_to_tunnel_id(tpi->key); 511 512 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0); 513 if (!tun_dst) 514 return PACKET_REJECT; 515 516 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 517 } else { 518 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); 519 } 520 521 return PACKET_RCVD; 522 } 523 524 return PACKET_REJECT; 525 } 526 527 static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len, 528 struct tnl_ptk_info *tpi) 529 { 530 struct erspan_base_hdr *ershdr; 531 struct erspan_metadata *pkt_md; 532 const struct ipv6hdr *ipv6h; 533 struct erspan_md2 *md2; 534 struct ip6_tnl *tunnel; 535 u8 ver; 536 537 ipv6h = ipv6_hdr(skb); 538 ershdr = (struct erspan_base_hdr *)skb->data; 539 ver = ershdr->ver; 540 541 tunnel = ip6gre_tunnel_lookup(skb->dev, 542 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 543 tpi->proto); 544 if (tunnel) { 545 int len = erspan_hdr_len(ver); 546 547 if (unlikely(!pskb_may_pull(skb, len))) 548 return PACKET_REJECT; 549 550 ershdr = (struct erspan_base_hdr *)skb->data; 551 pkt_md = (struct erspan_metadata *)(ershdr + 1); 552 553 if (__iptunnel_pull_header(skb, len, 554 htons(ETH_P_TEB), 555 false, false) < 0) 556 return PACKET_REJECT; 557 558 if (tunnel->parms.collect_md) { 559 struct metadata_dst *tun_dst; 560 struct ip_tunnel_info *info; 561 struct erspan_metadata *md; 562 __be64 tun_id; 563 __be16 flags; 564 565 tpi->flags |= TUNNEL_KEY; 566 flags = tpi->flags; 567 tun_id = key32_to_tunnel_id(tpi->key); 568 569 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 570 sizeof(*md)); 571 if (!tun_dst) 572 return PACKET_REJECT; 573 574 info = &tun_dst->u.tun_info; 575 md = ip_tunnel_info_opts(info); 576 md->version = ver; 577 md2 = &md->u.md2; 578 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE : 579 ERSPAN_V2_MDSIZE); 580 info->key.tun_flags |= TUNNEL_ERSPAN_OPT; 581 info->options_len = sizeof(*md); 582 583 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 584 585 } else { 586 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); 587 } 588 589 return PACKET_RCVD; 590 } 591 592 return PACKET_REJECT; 593 } 594 595 static int gre_rcv(struct sk_buff *skb) 596 { 597 struct tnl_ptk_info tpi; 598 bool csum_err = false; 599 int hdr_len; 600 601 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0); 602 if (hdr_len < 0) 603 goto drop; 604 605 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) 606 goto drop; 607 608 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || 609 tpi.proto == htons(ETH_P_ERSPAN2))) { 610 if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD) 611 return 0; 612 goto out; 613 } 614 615 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD) 616 return 0; 617 618 out: 619 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 620 drop: 621 kfree_skb(skb); 622 return 0; 623 } 624 625 static int gre_handle_offloads(struct sk_buff *skb, bool csum) 626 { 627 return iptunnel_handle_offloads(skb, 628 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); 629 } 630 631 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb, 632 struct net_device *dev, 633 struct flowi6 *fl6, __u8 *dsfield, 634 int *encap_limit) 635 { 636 const struct iphdr *iph = ip_hdr(skb); 637 struct ip6_tnl *t = netdev_priv(dev); 638 639 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 640 *encap_limit = t->parms.encap_limit; 641 642 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 643 644 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 645 *dsfield = ipv4_get_dsfield(iph); 646 else 647 *dsfield = ip6_tclass(t->parms.flowinfo); 648 649 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 650 fl6->flowi6_mark = skb->mark; 651 else 652 fl6->flowi6_mark = t->parms.fwmark; 653 654 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 655 } 656 657 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb, 658 struct net_device *dev, 659 struct flowi6 *fl6, __u8 *dsfield, 660 int *encap_limit) 661 { 662 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 663 struct ip6_tnl *t = netdev_priv(dev); 664 __u16 offset; 665 666 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 667 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 668 669 if (offset > 0) { 670 struct ipv6_tlv_tnl_enc_lim *tel; 671 672 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 673 if (tel->encap_limit == 0) { 674 icmpv6_send(skb, ICMPV6_PARAMPROB, 675 ICMPV6_HDR_FIELD, offset + 2); 676 return -1; 677 } 678 *encap_limit = tel->encap_limit - 1; 679 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { 680 *encap_limit = t->parms.encap_limit; 681 } 682 683 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 684 685 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 686 *dsfield = ipv6_get_dsfield(ipv6h); 687 else 688 *dsfield = ip6_tclass(t->parms.flowinfo); 689 690 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 691 fl6->flowlabel |= ip6_flowlabel(ipv6h); 692 693 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 694 fl6->flowi6_mark = skb->mark; 695 else 696 fl6->flowi6_mark = t->parms.fwmark; 697 698 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 699 700 return 0; 701 } 702 703 static netdev_tx_t __gre6_xmit(struct sk_buff *skb, 704 struct net_device *dev, __u8 dsfield, 705 struct flowi6 *fl6, int encap_limit, 706 __u32 *pmtu, __be16 proto) 707 { 708 struct ip6_tnl *tunnel = netdev_priv(dev); 709 __be16 protocol; 710 711 if (dev->type == ARPHRD_ETHER) 712 IPCB(skb)->flags = 0; 713 714 if (dev->header_ops && dev->type == ARPHRD_IP6GRE) 715 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr; 716 else 717 fl6->daddr = tunnel->parms.raddr; 718 719 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen)) 720 return -ENOMEM; 721 722 /* Push GRE header. */ 723 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; 724 725 if (tunnel->parms.collect_md) { 726 struct ip_tunnel_info *tun_info; 727 const struct ip_tunnel_key *key; 728 __be16 flags; 729 730 tun_info = skb_tunnel_info(skb); 731 if (unlikely(!tun_info || 732 !(tun_info->mode & IP_TUNNEL_INFO_TX) || 733 ip_tunnel_info_af(tun_info) != AF_INET6)) 734 return -EINVAL; 735 736 key = &tun_info->key; 737 memset(fl6, 0, sizeof(*fl6)); 738 fl6->flowi6_proto = IPPROTO_GRE; 739 fl6->daddr = key->u.ipv6.dst; 740 fl6->flowlabel = key->label; 741 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 742 743 dsfield = key->tos; 744 flags = key->tun_flags & 745 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ); 746 tunnel->tun_hlen = gre_calc_hlen(flags); 747 748 gre_build_header(skb, tunnel->tun_hlen, 749 flags, protocol, 750 tunnel_id_to_key32(tun_info->key.tun_id), 751 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) 752 : 0); 753 754 } else { 755 if (tunnel->parms.o_flags & TUNNEL_SEQ) 756 tunnel->o_seqno++; 757 758 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, 759 protocol, tunnel->parms.o_key, 760 htonl(tunnel->o_seqno)); 761 } 762 763 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 764 NEXTHDR_GRE); 765 } 766 767 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) 768 { 769 struct ip6_tnl *t = netdev_priv(dev); 770 int encap_limit = -1; 771 struct flowi6 fl6; 772 __u8 dsfield = 0; 773 __u32 mtu; 774 int err; 775 776 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 777 778 if (!t->parms.collect_md) 779 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 780 &dsfield, &encap_limit); 781 782 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); 783 if (err) 784 return -1; 785 786 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 787 skb->protocol); 788 if (err != 0) { 789 /* XXX: send ICMP error even if DF is not set. */ 790 if (err == -EMSGSIZE) 791 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 792 htonl(mtu)); 793 return -1; 794 } 795 796 return 0; 797 } 798 799 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) 800 { 801 struct ip6_tnl *t = netdev_priv(dev); 802 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 803 int encap_limit = -1; 804 struct flowi6 fl6; 805 __u8 dsfield = 0; 806 __u32 mtu; 807 int err; 808 809 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr)) 810 return -1; 811 812 if (!t->parms.collect_md && 813 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit)) 814 return -1; 815 816 if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM))) 817 return -1; 818 819 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, 820 &mtu, skb->protocol); 821 if (err != 0) { 822 if (err == -EMSGSIZE) 823 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 824 return -1; 825 } 826 827 return 0; 828 } 829 830 /** 831 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own 832 * @t: the outgoing tunnel device 833 * @hdr: IPv6 header from the incoming packet 834 * 835 * Description: 836 * Avoid trivial tunneling loop by checking that tunnel exit-point 837 * doesn't match source of incoming packet. 838 * 839 * Return: 840 * 1 if conflict, 841 * 0 else 842 **/ 843 844 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t, 845 const struct ipv6hdr *hdr) 846 { 847 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 848 } 849 850 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) 851 { 852 struct ip6_tnl *t = netdev_priv(dev); 853 int encap_limit = -1; 854 struct flowi6 fl6; 855 __u32 mtu; 856 int err; 857 858 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 859 encap_limit = t->parms.encap_limit; 860 861 if (!t->parms.collect_md) 862 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 863 864 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); 865 if (err) 866 return err; 867 868 err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol); 869 870 return err; 871 } 872 873 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, 874 struct net_device *dev) 875 { 876 struct ip6_tnl *t = netdev_priv(dev); 877 struct net_device_stats *stats = &t->dev->stats; 878 int ret; 879 880 if (!pskb_inet_may_pull(skb)) 881 goto tx_err; 882 883 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 884 goto tx_err; 885 886 switch (skb->protocol) { 887 case htons(ETH_P_IP): 888 ret = ip6gre_xmit_ipv4(skb, dev); 889 break; 890 case htons(ETH_P_IPV6): 891 ret = ip6gre_xmit_ipv6(skb, dev); 892 break; 893 default: 894 ret = ip6gre_xmit_other(skb, dev); 895 break; 896 } 897 898 if (ret < 0) 899 goto tx_err; 900 901 return NETDEV_TX_OK; 902 903 tx_err: 904 stats->tx_errors++; 905 stats->tx_dropped++; 906 kfree_skb(skb); 907 return NETDEV_TX_OK; 908 } 909 910 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, 911 struct net_device *dev) 912 { 913 struct ip6_tnl *t = netdev_priv(dev); 914 struct dst_entry *dst = skb_dst(skb); 915 struct net_device_stats *stats; 916 bool truncate = false; 917 int encap_limit = -1; 918 __u8 dsfield = false; 919 struct flowi6 fl6; 920 int err = -EINVAL; 921 __be16 proto; 922 __u32 mtu; 923 int nhoff; 924 int thoff; 925 926 if (!pskb_inet_may_pull(skb)) 927 goto tx_err; 928 929 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 930 goto tx_err; 931 932 if (gre_handle_offloads(skb, false)) 933 goto tx_err; 934 935 if (skb->len > dev->mtu + dev->hard_header_len) { 936 pskb_trim(skb, dev->mtu + dev->hard_header_len); 937 truncate = true; 938 } 939 940 nhoff = skb_network_header(skb) - skb_mac_header(skb); 941 if (skb->protocol == htons(ETH_P_IP) && 942 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) 943 truncate = true; 944 945 thoff = skb_transport_header(skb) - skb_mac_header(skb); 946 if (skb->protocol == htons(ETH_P_IPV6) && 947 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)) 948 truncate = true; 949 950 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen)) 951 goto tx_err; 952 953 t->parms.o_flags &= ~TUNNEL_KEY; 954 IPCB(skb)->flags = 0; 955 956 /* For collect_md mode, derive fl6 from the tunnel key, 957 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}. 958 */ 959 if (t->parms.collect_md) { 960 struct ip_tunnel_info *tun_info; 961 const struct ip_tunnel_key *key; 962 struct erspan_metadata *md; 963 __be32 tun_id; 964 965 tun_info = skb_tunnel_info(skb); 966 if (unlikely(!tun_info || 967 !(tun_info->mode & IP_TUNNEL_INFO_TX) || 968 ip_tunnel_info_af(tun_info) != AF_INET6)) 969 return -EINVAL; 970 971 key = &tun_info->key; 972 memset(&fl6, 0, sizeof(fl6)); 973 fl6.flowi6_proto = IPPROTO_GRE; 974 fl6.daddr = key->u.ipv6.dst; 975 fl6.flowlabel = key->label; 976 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 977 978 dsfield = key->tos; 979 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) 980 goto tx_err; 981 md = ip_tunnel_info_opts(tun_info); 982 if (!md) 983 goto tx_err; 984 985 tun_id = tunnel_id_to_key32(key->tun_id); 986 if (md->version == 1) { 987 erspan_build_header(skb, 988 ntohl(tun_id), 989 ntohl(md->u.index), truncate, 990 false); 991 } else if (md->version == 2) { 992 erspan_build_header_v2(skb, 993 ntohl(tun_id), 994 md->u.md2.dir, 995 get_hwid(&md->u.md2), 996 truncate, false); 997 } else { 998 goto tx_err; 999 } 1000 } else { 1001 switch (skb->protocol) { 1002 case htons(ETH_P_IP): 1003 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1004 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 1005 &dsfield, &encap_limit); 1006 break; 1007 case htons(ETH_P_IPV6): 1008 if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr)) 1009 goto tx_err; 1010 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, 1011 &dsfield, &encap_limit)) 1012 goto tx_err; 1013 break; 1014 default: 1015 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1016 break; 1017 } 1018 1019 if (t->parms.erspan_ver == 1) 1020 erspan_build_header(skb, ntohl(t->parms.o_key), 1021 t->parms.index, 1022 truncate, false); 1023 else if (t->parms.erspan_ver == 2) 1024 erspan_build_header_v2(skb, ntohl(t->parms.o_key), 1025 t->parms.dir, 1026 t->parms.hwid, 1027 truncate, false); 1028 else 1029 goto tx_err; 1030 1031 fl6.daddr = t->parms.raddr; 1032 } 1033 1034 /* Push GRE header. */ 1035 proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) 1036 : htons(ETH_P_ERSPAN2); 1037 gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++)); 1038 1039 /* TooBig packet may have updated dst->dev's mtu */ 1040 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) 1041 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu); 1042 1043 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1044 NEXTHDR_GRE); 1045 if (err != 0) { 1046 /* XXX: send ICMP error even if DF is not set. */ 1047 if (err == -EMSGSIZE) { 1048 if (skb->protocol == htons(ETH_P_IP)) 1049 icmp_send(skb, ICMP_DEST_UNREACH, 1050 ICMP_FRAG_NEEDED, htonl(mtu)); 1051 else 1052 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1053 } 1054 1055 goto tx_err; 1056 } 1057 return NETDEV_TX_OK; 1058 1059 tx_err: 1060 stats = &t->dev->stats; 1061 stats->tx_errors++; 1062 stats->tx_dropped++; 1063 kfree_skb(skb); 1064 return NETDEV_TX_OK; 1065 } 1066 1067 static void ip6gre_tnl_link_config_common(struct ip6_tnl *t) 1068 { 1069 struct net_device *dev = t->dev; 1070 struct __ip6_tnl_parm *p = &t->parms; 1071 struct flowi6 *fl6 = &t->fl.u.ip6; 1072 1073 if (dev->type != ARPHRD_ETHER) { 1074 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1075 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1076 } 1077 1078 /* Set up flowi template */ 1079 fl6->saddr = p->laddr; 1080 fl6->daddr = p->raddr; 1081 fl6->flowi6_oif = p->link; 1082 fl6->flowlabel = 0; 1083 fl6->flowi6_proto = IPPROTO_GRE; 1084 1085 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1086 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1087 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1088 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1089 1090 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1091 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1092 1093 if (p->flags&IP6_TNL_F_CAP_XMIT && 1094 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER) 1095 dev->flags |= IFF_POINTOPOINT; 1096 else 1097 dev->flags &= ~IFF_POINTOPOINT; 1098 } 1099 1100 static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu, 1101 int t_hlen) 1102 { 1103 const struct __ip6_tnl_parm *p = &t->parms; 1104 struct net_device *dev = t->dev; 1105 1106 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1107 int strict = (ipv6_addr_type(&p->raddr) & 1108 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1109 1110 struct rt6_info *rt = rt6_lookup(t->net, 1111 &p->raddr, &p->laddr, 1112 p->link, NULL, strict); 1113 1114 if (!rt) 1115 return; 1116 1117 if (rt->dst.dev) { 1118 dev->needed_headroom = rt->dst.dev->hard_header_len + 1119 t_hlen; 1120 1121 if (set_mtu) { 1122 dev->mtu = rt->dst.dev->mtu - t_hlen; 1123 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1124 dev->mtu -= 8; 1125 if (dev->type == ARPHRD_ETHER) 1126 dev->mtu -= ETH_HLEN; 1127 1128 if (dev->mtu < IPV6_MIN_MTU) 1129 dev->mtu = IPV6_MIN_MTU; 1130 } 1131 } 1132 ip6_rt_put(rt); 1133 } 1134 } 1135 1136 static int ip6gre_calc_hlen(struct ip6_tnl *tunnel) 1137 { 1138 int t_hlen; 1139 1140 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 1141 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; 1142 1143 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); 1144 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; 1145 return t_hlen; 1146 } 1147 1148 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) 1149 { 1150 ip6gre_tnl_link_config_common(t); 1151 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t)); 1152 } 1153 1154 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, 1155 const struct __ip6_tnl_parm *p) 1156 { 1157 t->parms.laddr = p->laddr; 1158 t->parms.raddr = p->raddr; 1159 t->parms.flags = p->flags; 1160 t->parms.hop_limit = p->hop_limit; 1161 t->parms.encap_limit = p->encap_limit; 1162 t->parms.flowinfo = p->flowinfo; 1163 t->parms.link = p->link; 1164 t->parms.proto = p->proto; 1165 t->parms.i_key = p->i_key; 1166 t->parms.o_key = p->o_key; 1167 t->parms.i_flags = p->i_flags; 1168 t->parms.o_flags = p->o_flags; 1169 t->parms.fwmark = p->fwmark; 1170 t->parms.erspan_ver = p->erspan_ver; 1171 t->parms.index = p->index; 1172 t->parms.dir = p->dir; 1173 t->parms.hwid = p->hwid; 1174 dst_cache_reset(&t->dst_cache); 1175 } 1176 1177 static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p, 1178 int set_mtu) 1179 { 1180 ip6gre_tnl_copy_tnl_parm(t, p); 1181 ip6gre_tnl_link_config(t, set_mtu); 1182 return 0; 1183 } 1184 1185 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p, 1186 const struct ip6_tnl_parm2 *u) 1187 { 1188 p->laddr = u->laddr; 1189 p->raddr = u->raddr; 1190 p->flags = u->flags; 1191 p->hop_limit = u->hop_limit; 1192 p->encap_limit = u->encap_limit; 1193 p->flowinfo = u->flowinfo; 1194 p->link = u->link; 1195 p->i_key = u->i_key; 1196 p->o_key = u->o_key; 1197 p->i_flags = gre_flags_to_tnl_flags(u->i_flags); 1198 p->o_flags = gre_flags_to_tnl_flags(u->o_flags); 1199 memcpy(p->name, u->name, sizeof(u->name)); 1200 } 1201 1202 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u, 1203 const struct __ip6_tnl_parm *p) 1204 { 1205 u->proto = IPPROTO_GRE; 1206 u->laddr = p->laddr; 1207 u->raddr = p->raddr; 1208 u->flags = p->flags; 1209 u->hop_limit = p->hop_limit; 1210 u->encap_limit = p->encap_limit; 1211 u->flowinfo = p->flowinfo; 1212 u->link = p->link; 1213 u->i_key = p->i_key; 1214 u->o_key = p->o_key; 1215 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags); 1216 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags); 1217 memcpy(u->name, p->name, sizeof(u->name)); 1218 } 1219 1220 static int ip6gre_tunnel_ioctl(struct net_device *dev, 1221 struct ifreq *ifr, int cmd) 1222 { 1223 int err = 0; 1224 struct ip6_tnl_parm2 p; 1225 struct __ip6_tnl_parm p1; 1226 struct ip6_tnl *t = netdev_priv(dev); 1227 struct net *net = t->net; 1228 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1229 1230 memset(&p1, 0, sizeof(p1)); 1231 1232 switch (cmd) { 1233 case SIOCGETTUNNEL: 1234 if (dev == ign->fb_tunnel_dev) { 1235 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1236 err = -EFAULT; 1237 break; 1238 } 1239 ip6gre_tnl_parm_from_user(&p1, &p); 1240 t = ip6gre_tunnel_locate(net, &p1, 0); 1241 if (!t) 1242 t = netdev_priv(dev); 1243 } 1244 memset(&p, 0, sizeof(p)); 1245 ip6gre_tnl_parm_to_user(&p, &t->parms); 1246 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1247 err = -EFAULT; 1248 break; 1249 1250 case SIOCADDTUNNEL: 1251 case SIOCCHGTUNNEL: 1252 err = -EPERM; 1253 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1254 goto done; 1255 1256 err = -EFAULT; 1257 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1258 goto done; 1259 1260 err = -EINVAL; 1261 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)) 1262 goto done; 1263 1264 if (!(p.i_flags&GRE_KEY)) 1265 p.i_key = 0; 1266 if (!(p.o_flags&GRE_KEY)) 1267 p.o_key = 0; 1268 1269 ip6gre_tnl_parm_from_user(&p1, &p); 1270 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL); 1271 1272 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 1273 if (t) { 1274 if (t->dev != dev) { 1275 err = -EEXIST; 1276 break; 1277 } 1278 } else { 1279 t = netdev_priv(dev); 1280 1281 ip6gre_tunnel_unlink(ign, t); 1282 synchronize_net(); 1283 ip6gre_tnl_change(t, &p1, 1); 1284 ip6gre_tunnel_link(ign, t); 1285 netdev_state_change(dev); 1286 } 1287 } 1288 1289 if (t) { 1290 err = 0; 1291 1292 memset(&p, 0, sizeof(p)); 1293 ip6gre_tnl_parm_to_user(&p, &t->parms); 1294 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1295 err = -EFAULT; 1296 } else 1297 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1298 break; 1299 1300 case SIOCDELTUNNEL: 1301 err = -EPERM; 1302 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1303 goto done; 1304 1305 if (dev == ign->fb_tunnel_dev) { 1306 err = -EFAULT; 1307 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1308 goto done; 1309 err = -ENOENT; 1310 ip6gre_tnl_parm_from_user(&p1, &p); 1311 t = ip6gre_tunnel_locate(net, &p1, 0); 1312 if (!t) 1313 goto done; 1314 err = -EPERM; 1315 if (t == netdev_priv(ign->fb_tunnel_dev)) 1316 goto done; 1317 dev = t->dev; 1318 } 1319 unregister_netdevice(dev); 1320 err = 0; 1321 break; 1322 1323 default: 1324 err = -EINVAL; 1325 } 1326 1327 done: 1328 return err; 1329 } 1330 1331 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, 1332 unsigned short type, const void *daddr, 1333 const void *saddr, unsigned int len) 1334 { 1335 struct ip6_tnl *t = netdev_priv(dev); 1336 struct ipv6hdr *ipv6h; 1337 __be16 *p; 1338 1339 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h)); 1340 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb, 1341 t->fl.u.ip6.flowlabel, 1342 true, &t->fl.u.ip6)); 1343 ipv6h->hop_limit = t->parms.hop_limit; 1344 ipv6h->nexthdr = NEXTHDR_GRE; 1345 ipv6h->saddr = t->parms.laddr; 1346 ipv6h->daddr = t->parms.raddr; 1347 1348 p = (__be16 *)(ipv6h + 1); 1349 p[0] = t->parms.o_flags; 1350 p[1] = htons(type); 1351 1352 /* 1353 * Set the source hardware address. 1354 */ 1355 1356 if (saddr) 1357 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr)); 1358 if (daddr) 1359 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr)); 1360 if (!ipv6_addr_any(&ipv6h->daddr)) 1361 return t->hlen; 1362 1363 return -t->hlen; 1364 } 1365 1366 static const struct header_ops ip6gre_header_ops = { 1367 .create = ip6gre_header, 1368 }; 1369 1370 static const struct net_device_ops ip6gre_netdev_ops = { 1371 .ndo_init = ip6gre_tunnel_init, 1372 .ndo_uninit = ip6gre_tunnel_uninit, 1373 .ndo_start_xmit = ip6gre_tunnel_xmit, 1374 .ndo_do_ioctl = ip6gre_tunnel_ioctl, 1375 .ndo_change_mtu = ip6_tnl_change_mtu, 1376 .ndo_get_stats64 = ip_tunnel_get_stats64, 1377 .ndo_get_iflink = ip6_tnl_get_iflink, 1378 }; 1379 1380 static void ip6gre_dev_free(struct net_device *dev) 1381 { 1382 struct ip6_tnl *t = netdev_priv(dev); 1383 1384 gro_cells_destroy(&t->gro_cells); 1385 dst_cache_destroy(&t->dst_cache); 1386 free_percpu(dev->tstats); 1387 } 1388 1389 static void ip6gre_tunnel_setup(struct net_device *dev) 1390 { 1391 dev->netdev_ops = &ip6gre_netdev_ops; 1392 dev->needs_free_netdev = true; 1393 dev->priv_destructor = ip6gre_dev_free; 1394 1395 dev->type = ARPHRD_IP6GRE; 1396 1397 dev->flags |= IFF_NOARP; 1398 dev->addr_len = sizeof(struct in6_addr); 1399 netif_keep_dst(dev); 1400 /* This perm addr will be used as interface identifier by IPv6 */ 1401 dev->addr_assign_type = NET_ADDR_RANDOM; 1402 eth_random_addr(dev->perm_addr); 1403 } 1404 1405 #define GRE6_FEATURES (NETIF_F_SG | \ 1406 NETIF_F_FRAGLIST | \ 1407 NETIF_F_HIGHDMA | \ 1408 NETIF_F_HW_CSUM) 1409 1410 static void ip6gre_tnl_init_features(struct net_device *dev) 1411 { 1412 struct ip6_tnl *nt = netdev_priv(dev); 1413 1414 dev->features |= GRE6_FEATURES; 1415 dev->hw_features |= GRE6_FEATURES; 1416 1417 if (!(nt->parms.o_flags & TUNNEL_SEQ)) { 1418 /* TCP offload with GRE SEQ is not supported, nor 1419 * can we support 2 levels of outer headers requiring 1420 * an update. 1421 */ 1422 if (!(nt->parms.o_flags & TUNNEL_CSUM) || 1423 nt->encap.type == TUNNEL_ENCAP_NONE) { 1424 dev->features |= NETIF_F_GSO_SOFTWARE; 1425 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1426 } 1427 1428 /* Can use a lockless transmit, unless we generate 1429 * output sequences 1430 */ 1431 dev->features |= NETIF_F_LLTX; 1432 } 1433 } 1434 1435 static int ip6gre_tunnel_init_common(struct net_device *dev) 1436 { 1437 struct ip6_tnl *tunnel; 1438 int ret; 1439 int t_hlen; 1440 1441 tunnel = netdev_priv(dev); 1442 1443 tunnel->dev = dev; 1444 tunnel->net = dev_net(dev); 1445 strcpy(tunnel->parms.name, dev->name); 1446 1447 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1448 if (!dev->tstats) 1449 return -ENOMEM; 1450 1451 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1452 if (ret) 1453 goto cleanup_alloc_pcpu_stats; 1454 1455 ret = gro_cells_init(&tunnel->gro_cells, dev); 1456 if (ret) 1457 goto cleanup_dst_cache_init; 1458 1459 t_hlen = ip6gre_calc_hlen(tunnel); 1460 dev->mtu = ETH_DATA_LEN - t_hlen; 1461 if (dev->type == ARPHRD_ETHER) 1462 dev->mtu -= ETH_HLEN; 1463 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1464 dev->mtu -= 8; 1465 1466 if (tunnel->parms.collect_md) { 1467 dev->features |= NETIF_F_NETNS_LOCAL; 1468 netif_keep_dst(dev); 1469 } 1470 ip6gre_tnl_init_features(dev); 1471 1472 return 0; 1473 1474 cleanup_dst_cache_init: 1475 dst_cache_destroy(&tunnel->dst_cache); 1476 cleanup_alloc_pcpu_stats: 1477 free_percpu(dev->tstats); 1478 dev->tstats = NULL; 1479 return ret; 1480 } 1481 1482 static int ip6gre_tunnel_init(struct net_device *dev) 1483 { 1484 struct ip6_tnl *tunnel; 1485 int ret; 1486 1487 ret = ip6gre_tunnel_init_common(dev); 1488 if (ret) 1489 return ret; 1490 1491 tunnel = netdev_priv(dev); 1492 1493 if (tunnel->parms.collect_md) 1494 return 0; 1495 1496 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); 1497 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); 1498 1499 if (ipv6_addr_any(&tunnel->parms.raddr)) 1500 dev->header_ops = &ip6gre_header_ops; 1501 1502 return 0; 1503 } 1504 1505 static void ip6gre_fb_tunnel_init(struct net_device *dev) 1506 { 1507 struct ip6_tnl *tunnel = netdev_priv(dev); 1508 1509 tunnel->dev = dev; 1510 tunnel->net = dev_net(dev); 1511 strcpy(tunnel->parms.name, dev->name); 1512 1513 tunnel->hlen = sizeof(struct ipv6hdr) + 4; 1514 1515 dev_hold(dev); 1516 } 1517 1518 static struct inet6_protocol ip6gre_protocol __read_mostly = { 1519 .handler = gre_rcv, 1520 .err_handler = ip6gre_err, 1521 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1522 }; 1523 1524 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) 1525 { 1526 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1527 struct net_device *dev, *aux; 1528 int prio; 1529 1530 for_each_netdev_safe(net, dev, aux) 1531 if (dev->rtnl_link_ops == &ip6gre_link_ops || 1532 dev->rtnl_link_ops == &ip6gre_tap_ops || 1533 dev->rtnl_link_ops == &ip6erspan_tap_ops) 1534 unregister_netdevice_queue(dev, head); 1535 1536 for (prio = 0; prio < 4; prio++) { 1537 int h; 1538 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) { 1539 struct ip6_tnl *t; 1540 1541 t = rtnl_dereference(ign->tunnels[prio][h]); 1542 1543 while (t) { 1544 /* If dev is in the same netns, it has already 1545 * been added to the list by the previous loop. 1546 */ 1547 if (!net_eq(dev_net(t->dev), net)) 1548 unregister_netdevice_queue(t->dev, 1549 head); 1550 t = rtnl_dereference(t->next); 1551 } 1552 } 1553 } 1554 } 1555 1556 static int __net_init ip6gre_init_net(struct net *net) 1557 { 1558 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1559 int err; 1560 1561 if (!net_has_fallback_tunnels(net)) 1562 return 0; 1563 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", 1564 NET_NAME_UNKNOWN, 1565 ip6gre_tunnel_setup); 1566 if (!ign->fb_tunnel_dev) { 1567 err = -ENOMEM; 1568 goto err_alloc_dev; 1569 } 1570 dev_net_set(ign->fb_tunnel_dev, net); 1571 /* FB netdevice is special: we have one, and only one per netns. 1572 * Allowing to move it to another netns is clearly unsafe. 1573 */ 1574 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; 1575 1576 1577 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev); 1578 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops; 1579 1580 err = register_netdev(ign->fb_tunnel_dev); 1581 if (err) 1582 goto err_reg_dev; 1583 1584 rcu_assign_pointer(ign->tunnels_wc[0], 1585 netdev_priv(ign->fb_tunnel_dev)); 1586 return 0; 1587 1588 err_reg_dev: 1589 free_netdev(ign->fb_tunnel_dev); 1590 err_alloc_dev: 1591 return err; 1592 } 1593 1594 static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list) 1595 { 1596 struct net *net; 1597 LIST_HEAD(list); 1598 1599 rtnl_lock(); 1600 list_for_each_entry(net, net_list, exit_list) 1601 ip6gre_destroy_tunnels(net, &list); 1602 unregister_netdevice_many(&list); 1603 rtnl_unlock(); 1604 } 1605 1606 static struct pernet_operations ip6gre_net_ops = { 1607 .init = ip6gre_init_net, 1608 .exit_batch = ip6gre_exit_batch_net, 1609 .id = &ip6gre_net_id, 1610 .size = sizeof(struct ip6gre_net), 1611 }; 1612 1613 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], 1614 struct netlink_ext_ack *extack) 1615 { 1616 __be16 flags; 1617 1618 if (!data) 1619 return 0; 1620 1621 flags = 0; 1622 if (data[IFLA_GRE_IFLAGS]) 1623 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1624 if (data[IFLA_GRE_OFLAGS]) 1625 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1626 if (flags & (GRE_VERSION|GRE_ROUTING)) 1627 return -EINVAL; 1628 1629 return 0; 1630 } 1631 1632 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1633 struct netlink_ext_ack *extack) 1634 { 1635 struct in6_addr daddr; 1636 1637 if (tb[IFLA_ADDRESS]) { 1638 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1639 return -EINVAL; 1640 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1641 return -EADDRNOTAVAIL; 1642 } 1643 1644 if (!data) 1645 goto out; 1646 1647 if (data[IFLA_GRE_REMOTE]) { 1648 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]); 1649 if (ipv6_addr_any(&daddr)) 1650 return -EINVAL; 1651 } 1652 1653 out: 1654 return ip6gre_tunnel_validate(tb, data, extack); 1655 } 1656 1657 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1658 struct netlink_ext_ack *extack) 1659 { 1660 __be16 flags = 0; 1661 int ret, ver = 0; 1662 1663 if (!data) 1664 return 0; 1665 1666 ret = ip6gre_tap_validate(tb, data, extack); 1667 if (ret) 1668 return ret; 1669 1670 /* ERSPAN should only have GRE sequence and key flag */ 1671 if (data[IFLA_GRE_OFLAGS]) 1672 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1673 if (data[IFLA_GRE_IFLAGS]) 1674 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1675 if (!data[IFLA_GRE_COLLECT_METADATA] && 1676 flags != (GRE_SEQ | GRE_KEY)) 1677 return -EINVAL; 1678 1679 /* ERSPAN Session ID only has 10-bit. Since we reuse 1680 * 32-bit key field as ID, check it's range. 1681 */ 1682 if (data[IFLA_GRE_IKEY] && 1683 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK)) 1684 return -EINVAL; 1685 1686 if (data[IFLA_GRE_OKEY] && 1687 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK)) 1688 return -EINVAL; 1689 1690 if (data[IFLA_GRE_ERSPAN_VER]) { 1691 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1692 if (ver != 1 && ver != 2) 1693 return -EINVAL; 1694 } 1695 1696 if (ver == 1) { 1697 if (data[IFLA_GRE_ERSPAN_INDEX]) { 1698 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1699 1700 if (index & ~INDEX_MASK) 1701 return -EINVAL; 1702 } 1703 } else if (ver == 2) { 1704 if (data[IFLA_GRE_ERSPAN_DIR]) { 1705 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1706 1707 if (dir & ~(DIR_MASK >> DIR_OFFSET)) 1708 return -EINVAL; 1709 } 1710 1711 if (data[IFLA_GRE_ERSPAN_HWID]) { 1712 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1713 1714 if (hwid & ~(HWID_MASK >> HWID_OFFSET)) 1715 return -EINVAL; 1716 } 1717 } 1718 1719 return 0; 1720 } 1721 1722 static void ip6gre_netlink_parms(struct nlattr *data[], 1723 struct __ip6_tnl_parm *parms) 1724 { 1725 memset(parms, 0, sizeof(*parms)); 1726 1727 if (!data) 1728 return; 1729 1730 if (data[IFLA_GRE_LINK]) 1731 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1732 1733 if (data[IFLA_GRE_IFLAGS]) 1734 parms->i_flags = gre_flags_to_tnl_flags( 1735 nla_get_be16(data[IFLA_GRE_IFLAGS])); 1736 1737 if (data[IFLA_GRE_OFLAGS]) 1738 parms->o_flags = gre_flags_to_tnl_flags( 1739 nla_get_be16(data[IFLA_GRE_OFLAGS])); 1740 1741 if (data[IFLA_GRE_IKEY]) 1742 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 1743 1744 if (data[IFLA_GRE_OKEY]) 1745 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); 1746 1747 if (data[IFLA_GRE_LOCAL]) 1748 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]); 1749 1750 if (data[IFLA_GRE_REMOTE]) 1751 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]); 1752 1753 if (data[IFLA_GRE_TTL]) 1754 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]); 1755 1756 if (data[IFLA_GRE_ENCAP_LIMIT]) 1757 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]); 1758 1759 if (data[IFLA_GRE_FLOWINFO]) 1760 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]); 1761 1762 if (data[IFLA_GRE_FLAGS]) 1763 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]); 1764 1765 if (data[IFLA_GRE_FWMARK]) 1766 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); 1767 1768 if (data[IFLA_GRE_COLLECT_METADATA]) 1769 parms->collect_md = true; 1770 1771 parms->erspan_ver = 1; 1772 if (data[IFLA_GRE_ERSPAN_VER]) 1773 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1774 1775 if (parms->erspan_ver == 1) { 1776 if (data[IFLA_GRE_ERSPAN_INDEX]) 1777 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1778 } else if (parms->erspan_ver == 2) { 1779 if (data[IFLA_GRE_ERSPAN_DIR]) 1780 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1781 if (data[IFLA_GRE_ERSPAN_HWID]) 1782 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1783 } 1784 } 1785 1786 static int ip6gre_tap_init(struct net_device *dev) 1787 { 1788 int ret; 1789 1790 ret = ip6gre_tunnel_init_common(dev); 1791 if (ret) 1792 return ret; 1793 1794 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1795 1796 return 0; 1797 } 1798 1799 static const struct net_device_ops ip6gre_tap_netdev_ops = { 1800 .ndo_init = ip6gre_tap_init, 1801 .ndo_uninit = ip6gre_tunnel_uninit, 1802 .ndo_start_xmit = ip6gre_tunnel_xmit, 1803 .ndo_set_mac_address = eth_mac_addr, 1804 .ndo_validate_addr = eth_validate_addr, 1805 .ndo_change_mtu = ip6_tnl_change_mtu, 1806 .ndo_get_stats64 = ip_tunnel_get_stats64, 1807 .ndo_get_iflink = ip6_tnl_get_iflink, 1808 }; 1809 1810 static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel) 1811 { 1812 int t_hlen; 1813 1814 tunnel->tun_hlen = 8; 1815 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + 1816 erspan_hdr_len(tunnel->parms.erspan_ver); 1817 1818 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); 1819 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; 1820 return t_hlen; 1821 } 1822 1823 static int ip6erspan_tap_init(struct net_device *dev) 1824 { 1825 struct ip6_tnl *tunnel; 1826 int t_hlen; 1827 int ret; 1828 1829 tunnel = netdev_priv(dev); 1830 1831 tunnel->dev = dev; 1832 tunnel->net = dev_net(dev); 1833 strcpy(tunnel->parms.name, dev->name); 1834 1835 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1836 if (!dev->tstats) 1837 return -ENOMEM; 1838 1839 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1840 if (ret) 1841 goto cleanup_alloc_pcpu_stats; 1842 1843 ret = gro_cells_init(&tunnel->gro_cells, dev); 1844 if (ret) 1845 goto cleanup_dst_cache_init; 1846 1847 t_hlen = ip6erspan_calc_hlen(tunnel); 1848 dev->mtu = ETH_DATA_LEN - t_hlen; 1849 if (dev->type == ARPHRD_ETHER) 1850 dev->mtu -= ETH_HLEN; 1851 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1852 dev->mtu -= 8; 1853 1854 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1855 ip6erspan_tnl_link_config(tunnel, 1); 1856 1857 return 0; 1858 1859 cleanup_dst_cache_init: 1860 dst_cache_destroy(&tunnel->dst_cache); 1861 cleanup_alloc_pcpu_stats: 1862 free_percpu(dev->tstats); 1863 dev->tstats = NULL; 1864 return ret; 1865 } 1866 1867 static const struct net_device_ops ip6erspan_netdev_ops = { 1868 .ndo_init = ip6erspan_tap_init, 1869 .ndo_uninit = ip6erspan_tunnel_uninit, 1870 .ndo_start_xmit = ip6erspan_tunnel_xmit, 1871 .ndo_set_mac_address = eth_mac_addr, 1872 .ndo_validate_addr = eth_validate_addr, 1873 .ndo_change_mtu = ip6_tnl_change_mtu, 1874 .ndo_get_stats64 = ip_tunnel_get_stats64, 1875 .ndo_get_iflink = ip6_tnl_get_iflink, 1876 }; 1877 1878 static void ip6gre_tap_setup(struct net_device *dev) 1879 { 1880 1881 ether_setup(dev); 1882 1883 dev->max_mtu = 0; 1884 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1885 dev->needs_free_netdev = true; 1886 dev->priv_destructor = ip6gre_dev_free; 1887 1888 dev->features |= NETIF_F_NETNS_LOCAL; 1889 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1890 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1891 netif_keep_dst(dev); 1892 } 1893 1894 static bool ip6gre_netlink_encap_parms(struct nlattr *data[], 1895 struct ip_tunnel_encap *ipencap) 1896 { 1897 bool ret = false; 1898 1899 memset(ipencap, 0, sizeof(*ipencap)); 1900 1901 if (!data) 1902 return ret; 1903 1904 if (data[IFLA_GRE_ENCAP_TYPE]) { 1905 ret = true; 1906 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); 1907 } 1908 1909 if (data[IFLA_GRE_ENCAP_FLAGS]) { 1910 ret = true; 1911 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); 1912 } 1913 1914 if (data[IFLA_GRE_ENCAP_SPORT]) { 1915 ret = true; 1916 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]); 1917 } 1918 1919 if (data[IFLA_GRE_ENCAP_DPORT]) { 1920 ret = true; 1921 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]); 1922 } 1923 1924 return ret; 1925 } 1926 1927 static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev, 1928 struct nlattr *tb[], struct nlattr *data[], 1929 struct netlink_ext_ack *extack) 1930 { 1931 struct ip6_tnl *nt; 1932 struct ip_tunnel_encap ipencap; 1933 int err; 1934 1935 nt = netdev_priv(dev); 1936 1937 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 1938 int err = ip6_tnl_encap_setup(nt, &ipencap); 1939 1940 if (err < 0) 1941 return err; 1942 } 1943 1944 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1945 eth_hw_addr_random(dev); 1946 1947 nt->dev = dev; 1948 nt->net = dev_net(dev); 1949 1950 err = register_netdevice(dev); 1951 if (err) 1952 goto out; 1953 1954 if (tb[IFLA_MTU]) 1955 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 1956 1957 dev_hold(dev); 1958 1959 out: 1960 return err; 1961 } 1962 1963 static int ip6gre_newlink(struct net *src_net, struct net_device *dev, 1964 struct nlattr *tb[], struct nlattr *data[], 1965 struct netlink_ext_ack *extack) 1966 { 1967 struct ip6_tnl *nt = netdev_priv(dev); 1968 struct net *net = dev_net(dev); 1969 struct ip6gre_net *ign; 1970 int err; 1971 1972 ip6gre_netlink_parms(data, &nt->parms); 1973 ign = net_generic(net, ip6gre_net_id); 1974 1975 if (nt->parms.collect_md) { 1976 if (rtnl_dereference(ign->collect_md_tun)) 1977 return -EEXIST; 1978 } else { 1979 if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) 1980 return -EEXIST; 1981 } 1982 1983 err = ip6gre_newlink_common(src_net, dev, tb, data, extack); 1984 if (!err) { 1985 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); 1986 ip6gre_tunnel_link_md(ign, nt); 1987 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); 1988 } 1989 return err; 1990 } 1991 1992 static struct ip6_tnl * 1993 ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[], 1994 struct nlattr *data[], struct __ip6_tnl_parm *p_p, 1995 struct netlink_ext_ack *extack) 1996 { 1997 struct ip6_tnl *t, *nt = netdev_priv(dev); 1998 struct net *net = nt->net; 1999 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 2000 struct ip_tunnel_encap ipencap; 2001 2002 if (dev == ign->fb_tunnel_dev) 2003 return ERR_PTR(-EINVAL); 2004 2005 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 2006 int err = ip6_tnl_encap_setup(nt, &ipencap); 2007 2008 if (err < 0) 2009 return ERR_PTR(err); 2010 } 2011 2012 ip6gre_netlink_parms(data, p_p); 2013 2014 t = ip6gre_tunnel_locate(net, p_p, 0); 2015 2016 if (t) { 2017 if (t->dev != dev) 2018 return ERR_PTR(-EEXIST); 2019 } else { 2020 t = nt; 2021 } 2022 2023 return t; 2024 } 2025 2026 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], 2027 struct nlattr *data[], 2028 struct netlink_ext_ack *extack) 2029 { 2030 struct ip6_tnl *t = netdev_priv(dev); 2031 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 2032 struct __ip6_tnl_parm p; 2033 2034 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2035 if (IS_ERR(t)) 2036 return PTR_ERR(t); 2037 2038 ip6gre_tunnel_unlink_md(ign, t); 2039 ip6gre_tunnel_unlink(ign, t); 2040 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); 2041 ip6gre_tunnel_link_md(ign, t); 2042 ip6gre_tunnel_link(ign, t); 2043 return 0; 2044 } 2045 2046 static void ip6gre_dellink(struct net_device *dev, struct list_head *head) 2047 { 2048 struct net *net = dev_net(dev); 2049 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 2050 2051 if (dev != ign->fb_tunnel_dev) 2052 unregister_netdevice_queue(dev, head); 2053 } 2054 2055 static size_t ip6gre_get_size(const struct net_device *dev) 2056 { 2057 return 2058 /* IFLA_GRE_LINK */ 2059 nla_total_size(4) + 2060 /* IFLA_GRE_IFLAGS */ 2061 nla_total_size(2) + 2062 /* IFLA_GRE_OFLAGS */ 2063 nla_total_size(2) + 2064 /* IFLA_GRE_IKEY */ 2065 nla_total_size(4) + 2066 /* IFLA_GRE_OKEY */ 2067 nla_total_size(4) + 2068 /* IFLA_GRE_LOCAL */ 2069 nla_total_size(sizeof(struct in6_addr)) + 2070 /* IFLA_GRE_REMOTE */ 2071 nla_total_size(sizeof(struct in6_addr)) + 2072 /* IFLA_GRE_TTL */ 2073 nla_total_size(1) + 2074 /* IFLA_GRE_ENCAP_LIMIT */ 2075 nla_total_size(1) + 2076 /* IFLA_GRE_FLOWINFO */ 2077 nla_total_size(4) + 2078 /* IFLA_GRE_FLAGS */ 2079 nla_total_size(4) + 2080 /* IFLA_GRE_ENCAP_TYPE */ 2081 nla_total_size(2) + 2082 /* IFLA_GRE_ENCAP_FLAGS */ 2083 nla_total_size(2) + 2084 /* IFLA_GRE_ENCAP_SPORT */ 2085 nla_total_size(2) + 2086 /* IFLA_GRE_ENCAP_DPORT */ 2087 nla_total_size(2) + 2088 /* IFLA_GRE_COLLECT_METADATA */ 2089 nla_total_size(0) + 2090 /* IFLA_GRE_FWMARK */ 2091 nla_total_size(4) + 2092 /* IFLA_GRE_ERSPAN_INDEX */ 2093 nla_total_size(4) + 2094 0; 2095 } 2096 2097 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) 2098 { 2099 struct ip6_tnl *t = netdev_priv(dev); 2100 struct __ip6_tnl_parm *p = &t->parms; 2101 2102 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2103 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2104 gre_tnl_flags_to_gre_flags(p->i_flags)) || 2105 nla_put_be16(skb, IFLA_GRE_OFLAGS, 2106 gre_tnl_flags_to_gre_flags(p->o_flags)) || 2107 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 2108 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 2109 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || 2110 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) || 2111 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || 2112 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || 2113 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || 2114 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || 2115 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) || 2116 nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) 2117 goto nla_put_failure; 2118 2119 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 2120 t->encap.type) || 2121 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT, 2122 t->encap.sport) || 2123 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT, 2124 t->encap.dport) || 2125 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, 2126 t->encap.flags)) 2127 goto nla_put_failure; 2128 2129 if (p->collect_md) { 2130 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) 2131 goto nla_put_failure; 2132 } 2133 2134 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) 2135 goto nla_put_failure; 2136 2137 if (p->erspan_ver == 1) { 2138 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) 2139 goto nla_put_failure; 2140 } else if (p->erspan_ver == 2) { 2141 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) 2142 goto nla_put_failure; 2143 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) 2144 goto nla_put_failure; 2145 } 2146 2147 return 0; 2148 2149 nla_put_failure: 2150 return -EMSGSIZE; 2151 } 2152 2153 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = { 2154 [IFLA_GRE_LINK] = { .type = NLA_U32 }, 2155 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, 2156 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 2157 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 2158 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 2159 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) }, 2160 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) }, 2161 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 2162 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 }, 2163 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 }, 2164 [IFLA_GRE_FLAGS] = { .type = NLA_U32 }, 2165 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, 2166 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, 2167 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, 2168 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, 2169 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, 2170 [IFLA_GRE_FWMARK] = { .type = NLA_U32 }, 2171 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 }, 2172 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 }, 2173 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 }, 2174 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 }, 2175 }; 2176 2177 static void ip6erspan_tap_setup(struct net_device *dev) 2178 { 2179 ether_setup(dev); 2180 2181 dev->netdev_ops = &ip6erspan_netdev_ops; 2182 dev->needs_free_netdev = true; 2183 dev->priv_destructor = ip6gre_dev_free; 2184 2185 dev->features |= NETIF_F_NETNS_LOCAL; 2186 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2187 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2188 netif_keep_dst(dev); 2189 } 2190 2191 static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, 2192 struct nlattr *tb[], struct nlattr *data[], 2193 struct netlink_ext_ack *extack) 2194 { 2195 struct ip6_tnl *nt = netdev_priv(dev); 2196 struct net *net = dev_net(dev); 2197 struct ip6gre_net *ign; 2198 int err; 2199 2200 ip6gre_netlink_parms(data, &nt->parms); 2201 ign = net_generic(net, ip6gre_net_id); 2202 2203 if (nt->parms.collect_md) { 2204 if (rtnl_dereference(ign->collect_md_tun_erspan)) 2205 return -EEXIST; 2206 } else { 2207 if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) 2208 return -EEXIST; 2209 } 2210 2211 err = ip6gre_newlink_common(src_net, dev, tb, data, extack); 2212 if (!err) { 2213 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]); 2214 ip6erspan_tunnel_link_md(ign, nt); 2215 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); 2216 } 2217 return err; 2218 } 2219 2220 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu) 2221 { 2222 ip6gre_tnl_link_config_common(t); 2223 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t)); 2224 } 2225 2226 static int ip6erspan_tnl_change(struct ip6_tnl *t, 2227 const struct __ip6_tnl_parm *p, int set_mtu) 2228 { 2229 ip6gre_tnl_copy_tnl_parm(t, p); 2230 ip6erspan_tnl_link_config(t, set_mtu); 2231 return 0; 2232 } 2233 2234 static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], 2235 struct nlattr *data[], 2236 struct netlink_ext_ack *extack) 2237 { 2238 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); 2239 struct __ip6_tnl_parm p; 2240 struct ip6_tnl *t; 2241 2242 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2243 if (IS_ERR(t)) 2244 return PTR_ERR(t); 2245 2246 ip6gre_tunnel_unlink_md(ign, t); 2247 ip6gre_tunnel_unlink(ign, t); 2248 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); 2249 ip6erspan_tunnel_link_md(ign, t); 2250 ip6gre_tunnel_link(ign, t); 2251 return 0; 2252 } 2253 2254 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { 2255 .kind = "ip6gre", 2256 .maxtype = IFLA_GRE_MAX, 2257 .policy = ip6gre_policy, 2258 .priv_size = sizeof(struct ip6_tnl), 2259 .setup = ip6gre_tunnel_setup, 2260 .validate = ip6gre_tunnel_validate, 2261 .newlink = ip6gre_newlink, 2262 .changelink = ip6gre_changelink, 2263 .dellink = ip6gre_dellink, 2264 .get_size = ip6gre_get_size, 2265 .fill_info = ip6gre_fill_info, 2266 .get_link_net = ip6_tnl_get_link_net, 2267 }; 2268 2269 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = { 2270 .kind = "ip6gretap", 2271 .maxtype = IFLA_GRE_MAX, 2272 .policy = ip6gre_policy, 2273 .priv_size = sizeof(struct ip6_tnl), 2274 .setup = ip6gre_tap_setup, 2275 .validate = ip6gre_tap_validate, 2276 .newlink = ip6gre_newlink, 2277 .changelink = ip6gre_changelink, 2278 .get_size = ip6gre_get_size, 2279 .fill_info = ip6gre_fill_info, 2280 .get_link_net = ip6_tnl_get_link_net, 2281 }; 2282 2283 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = { 2284 .kind = "ip6erspan", 2285 .maxtype = IFLA_GRE_MAX, 2286 .policy = ip6gre_policy, 2287 .priv_size = sizeof(struct ip6_tnl), 2288 .setup = ip6erspan_tap_setup, 2289 .validate = ip6erspan_tap_validate, 2290 .newlink = ip6erspan_newlink, 2291 .changelink = ip6erspan_changelink, 2292 .get_size = ip6gre_get_size, 2293 .fill_info = ip6gre_fill_info, 2294 .get_link_net = ip6_tnl_get_link_net, 2295 }; 2296 2297 /* 2298 * And now the modules code and kernel interface. 2299 */ 2300 2301 static int __init ip6gre_init(void) 2302 { 2303 int err; 2304 2305 pr_info("GRE over IPv6 tunneling driver\n"); 2306 2307 err = register_pernet_device(&ip6gre_net_ops); 2308 if (err < 0) 2309 return err; 2310 2311 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE); 2312 if (err < 0) { 2313 pr_info("%s: can't add protocol\n", __func__); 2314 goto add_proto_failed; 2315 } 2316 2317 err = rtnl_link_register(&ip6gre_link_ops); 2318 if (err < 0) 2319 goto rtnl_link_failed; 2320 2321 err = rtnl_link_register(&ip6gre_tap_ops); 2322 if (err < 0) 2323 goto tap_ops_failed; 2324 2325 err = rtnl_link_register(&ip6erspan_tap_ops); 2326 if (err < 0) 2327 goto erspan_link_failed; 2328 2329 out: 2330 return err; 2331 2332 erspan_link_failed: 2333 rtnl_link_unregister(&ip6gre_tap_ops); 2334 tap_ops_failed: 2335 rtnl_link_unregister(&ip6gre_link_ops); 2336 rtnl_link_failed: 2337 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); 2338 add_proto_failed: 2339 unregister_pernet_device(&ip6gre_net_ops); 2340 goto out; 2341 } 2342 2343 static void __exit ip6gre_fini(void) 2344 { 2345 rtnl_link_unregister(&ip6gre_tap_ops); 2346 rtnl_link_unregister(&ip6gre_link_ops); 2347 rtnl_link_unregister(&ip6erspan_tap_ops); 2348 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); 2349 unregister_pernet_device(&ip6gre_net_ops); 2350 } 2351 2352 module_init(ip6gre_init); 2353 module_exit(ip6gre_fini); 2354 MODULE_LICENSE("GPL"); 2355 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); 2356 MODULE_DESCRIPTION("GRE over IPv6 tunneling device"); 2357 MODULE_ALIAS_RTNL_LINK("ip6gre"); 2358 MODULE_ALIAS_RTNL_LINK("ip6gretap"); 2359 MODULE_ALIAS_RTNL_LINK("ip6erspan"); 2360 MODULE_ALIAS_NETDEV("ip6gre0"); 2361