1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * GRE over IPv6 protocol decoder. 4 * 5 * Authors: Dmitry Kozlov (xeb@mail.ru) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/capability.h> 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/uaccess.h> 16 #include <linux/skbuff.h> 17 #include <linux/netdevice.h> 18 #include <linux/in.h> 19 #include <linux/tcp.h> 20 #include <linux/udp.h> 21 #include <linux/if_arp.h> 22 #include <linux/init.h> 23 #include <linux/in6.h> 24 #include <linux/inetdevice.h> 25 #include <linux/igmp.h> 26 #include <linux/netfilter_ipv4.h> 27 #include <linux/etherdevice.h> 28 #include <linux/if_ether.h> 29 #include <linux/hash.h> 30 #include <linux/if_tunnel.h> 31 #include <linux/ip6_tunnel.h> 32 33 #include <net/sock.h> 34 #include <net/ip.h> 35 #include <net/ip_tunnels.h> 36 #include <net/icmp.h> 37 #include <net/protocol.h> 38 #include <net/addrconf.h> 39 #include <net/arp.h> 40 #include <net/checksum.h> 41 #include <net/dsfield.h> 42 #include <net/inet_ecn.h> 43 #include <net/xfrm.h> 44 #include <net/net_namespace.h> 45 #include <net/netns/generic.h> 46 #include <net/rtnetlink.h> 47 48 #include <net/ipv6.h> 49 #include <net/ip6_fib.h> 50 #include <net/ip6_route.h> 51 #include <net/ip6_tunnel.h> 52 #include <net/gre.h> 53 #include <net/erspan.h> 54 #include <net/dst_metadata.h> 55 56 57 static bool log_ecn_error = true; 58 module_param(log_ecn_error, bool, 0644); 59 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 60 61 #define IP6_GRE_HASH_SIZE_SHIFT 5 62 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT) 63 64 static unsigned int ip6gre_net_id __read_mostly; 65 struct ip6gre_net { 66 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE]; 67 68 struct ip6_tnl __rcu *collect_md_tun; 69 struct ip6_tnl __rcu *collect_md_tun_erspan; 70 struct net_device *fb_tunnel_dev; 71 }; 72 73 static struct rtnl_link_ops ip6gre_link_ops __read_mostly; 74 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly; 75 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly; 76 static int ip6gre_tunnel_init(struct net_device *dev); 77 static void ip6gre_tunnel_setup(struct net_device *dev); 78 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); 79 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); 80 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu); 81 82 /* Tunnel hash table */ 83 84 /* 85 4 hash tables: 86 87 3: (remote,local) 88 2: (remote,*) 89 1: (*,local) 90 0: (*,*) 91 92 We require exact key match i.e. if a key is present in packet 93 it will match only tunnel with the same key; if it is not present, 94 it will match only keyless tunnel. 95 96 All keysless packets, if not matched configured keyless tunnels 97 will match fallback tunnel. 98 */ 99 100 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1)) 101 static u32 HASH_ADDR(const struct in6_addr *addr) 102 { 103 u32 hash = ipv6_addr_hash(addr); 104 105 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT); 106 } 107 108 #define tunnels_r_l tunnels[3] 109 #define tunnels_r tunnels[2] 110 #define tunnels_l tunnels[1] 111 #define tunnels_wc tunnels[0] 112 113 /* Given src, dst and key, find appropriate for input tunnel. */ 114 115 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, 116 const struct in6_addr *remote, const struct in6_addr *local, 117 __be32 key, __be16 gre_proto) 118 { 119 struct net *net = dev_net(dev); 120 int link = dev->ifindex; 121 unsigned int h0 = HASH_ADDR(remote); 122 unsigned int h1 = HASH_KEY(key); 123 struct ip6_tnl *t, *cand = NULL; 124 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 125 int dev_type = (gre_proto == htons(ETH_P_TEB) || 126 gre_proto == htons(ETH_P_ERSPAN) || 127 gre_proto == htons(ETH_P_ERSPAN2)) ? 128 ARPHRD_ETHER : ARPHRD_IP6GRE; 129 int score, cand_score = 4; 130 struct net_device *ndev; 131 132 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { 133 if (!ipv6_addr_equal(local, &t->parms.laddr) || 134 !ipv6_addr_equal(remote, &t->parms.raddr) || 135 key != t->parms.i_key || 136 !(t->dev->flags & IFF_UP)) 137 continue; 138 139 if (t->dev->type != ARPHRD_IP6GRE && 140 t->dev->type != dev_type) 141 continue; 142 143 score = 0; 144 if (t->parms.link != link) 145 score |= 1; 146 if (t->dev->type != dev_type) 147 score |= 2; 148 if (score == 0) 149 return t; 150 151 if (score < cand_score) { 152 cand = t; 153 cand_score = score; 154 } 155 } 156 157 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) { 158 if (!ipv6_addr_equal(remote, &t->parms.raddr) || 159 key != t->parms.i_key || 160 !(t->dev->flags & IFF_UP)) 161 continue; 162 163 if (t->dev->type != ARPHRD_IP6GRE && 164 t->dev->type != dev_type) 165 continue; 166 167 score = 0; 168 if (t->parms.link != link) 169 score |= 1; 170 if (t->dev->type != dev_type) 171 score |= 2; 172 if (score == 0) 173 return t; 174 175 if (score < cand_score) { 176 cand = t; 177 cand_score = score; 178 } 179 } 180 181 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) { 182 if ((!ipv6_addr_equal(local, &t->parms.laddr) && 183 (!ipv6_addr_equal(local, &t->parms.raddr) || 184 !ipv6_addr_is_multicast(local))) || 185 key != t->parms.i_key || 186 !(t->dev->flags & IFF_UP)) 187 continue; 188 189 if (t->dev->type != ARPHRD_IP6GRE && 190 t->dev->type != dev_type) 191 continue; 192 193 score = 0; 194 if (t->parms.link != link) 195 score |= 1; 196 if (t->dev->type != dev_type) 197 score |= 2; 198 if (score == 0) 199 return t; 200 201 if (score < cand_score) { 202 cand = t; 203 cand_score = score; 204 } 205 } 206 207 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) { 208 if (t->parms.i_key != key || 209 !(t->dev->flags & IFF_UP)) 210 continue; 211 212 if (t->dev->type != ARPHRD_IP6GRE && 213 t->dev->type != dev_type) 214 continue; 215 216 score = 0; 217 if (t->parms.link != link) 218 score |= 1; 219 if (t->dev->type != dev_type) 220 score |= 2; 221 if (score == 0) 222 return t; 223 224 if (score < cand_score) { 225 cand = t; 226 cand_score = score; 227 } 228 } 229 230 if (cand) 231 return cand; 232 233 if (gre_proto == htons(ETH_P_ERSPAN) || 234 gre_proto == htons(ETH_P_ERSPAN2)) 235 t = rcu_dereference(ign->collect_md_tun_erspan); 236 else 237 t = rcu_dereference(ign->collect_md_tun); 238 239 if (t && t->dev->flags & IFF_UP) 240 return t; 241 242 ndev = READ_ONCE(ign->fb_tunnel_dev); 243 if (ndev && ndev->flags & IFF_UP) 244 return netdev_priv(ndev); 245 246 return NULL; 247 } 248 249 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign, 250 const struct __ip6_tnl_parm *p) 251 { 252 const struct in6_addr *remote = &p->raddr; 253 const struct in6_addr *local = &p->laddr; 254 unsigned int h = HASH_KEY(p->i_key); 255 int prio = 0; 256 257 if (!ipv6_addr_any(local)) 258 prio |= 1; 259 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) { 260 prio |= 2; 261 h ^= HASH_ADDR(remote); 262 } 263 264 return &ign->tunnels[prio][h]; 265 } 266 267 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) 268 { 269 if (t->parms.collect_md) 270 rcu_assign_pointer(ign->collect_md_tun, t); 271 } 272 273 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) 274 { 275 if (t->parms.collect_md) 276 rcu_assign_pointer(ign->collect_md_tun_erspan, t); 277 } 278 279 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t) 280 { 281 if (t->parms.collect_md) 282 rcu_assign_pointer(ign->collect_md_tun, NULL); 283 } 284 285 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign, 286 struct ip6_tnl *t) 287 { 288 if (t->parms.collect_md) 289 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL); 290 } 291 292 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign, 293 const struct ip6_tnl *t) 294 { 295 return __ip6gre_bucket(ign, &t->parms); 296 } 297 298 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t) 299 { 300 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t); 301 302 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 303 rcu_assign_pointer(*tp, t); 304 } 305 306 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t) 307 { 308 struct ip6_tnl __rcu **tp; 309 struct ip6_tnl *iter; 310 311 for (tp = ip6gre_bucket(ign, t); 312 (iter = rtnl_dereference(*tp)) != NULL; 313 tp = &iter->next) { 314 if (t == iter) { 315 rcu_assign_pointer(*tp, t->next); 316 break; 317 } 318 } 319 } 320 321 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net, 322 const struct __ip6_tnl_parm *parms, 323 int type) 324 { 325 const struct in6_addr *remote = &parms->raddr; 326 const struct in6_addr *local = &parms->laddr; 327 __be32 key = parms->i_key; 328 int link = parms->link; 329 struct ip6_tnl *t; 330 struct ip6_tnl __rcu **tp; 331 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 332 333 for (tp = __ip6gre_bucket(ign, parms); 334 (t = rtnl_dereference(*tp)) != NULL; 335 tp = &t->next) 336 if (ipv6_addr_equal(local, &t->parms.laddr) && 337 ipv6_addr_equal(remote, &t->parms.raddr) && 338 key == t->parms.i_key && 339 link == t->parms.link && 340 type == t->dev->type) 341 break; 342 343 return t; 344 } 345 346 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, 347 const struct __ip6_tnl_parm *parms, int create) 348 { 349 struct ip6_tnl *t, *nt; 350 struct net_device *dev; 351 char name[IFNAMSIZ]; 352 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 353 354 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE); 355 if (t && create) 356 return NULL; 357 if (t || !create) 358 return t; 359 360 if (parms->name[0]) { 361 if (!dev_valid_name(parms->name)) 362 return NULL; 363 strscpy(name, parms->name, IFNAMSIZ); 364 } else { 365 strcpy(name, "ip6gre%d"); 366 } 367 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 368 ip6gre_tunnel_setup); 369 if (!dev) 370 return NULL; 371 372 dev_net_set(dev, net); 373 374 nt = netdev_priv(dev); 375 nt->parms = *parms; 376 dev->rtnl_link_ops = &ip6gre_link_ops; 377 378 nt->dev = dev; 379 nt->net = dev_net(dev); 380 381 if (register_netdevice(dev) < 0) 382 goto failed_free; 383 384 ip6gre_tnl_link_config(nt, 1); 385 ip6gre_tunnel_link(ign, nt); 386 return nt; 387 388 failed_free: 389 free_netdev(dev); 390 return NULL; 391 } 392 393 static void ip6erspan_tunnel_uninit(struct net_device *dev) 394 { 395 struct ip6_tnl *t = netdev_priv(dev); 396 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 397 398 ip6erspan_tunnel_unlink_md(ign, t); 399 ip6gre_tunnel_unlink(ign, t); 400 dst_cache_reset(&t->dst_cache); 401 netdev_put(dev, &t->dev_tracker); 402 } 403 404 static void ip6gre_tunnel_uninit(struct net_device *dev) 405 { 406 struct ip6_tnl *t = netdev_priv(dev); 407 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 408 409 ip6gre_tunnel_unlink_md(ign, t); 410 ip6gre_tunnel_unlink(ign, t); 411 if (ign->fb_tunnel_dev == dev) 412 WRITE_ONCE(ign->fb_tunnel_dev, NULL); 413 dst_cache_reset(&t->dst_cache); 414 netdev_put(dev, &t->dev_tracker); 415 } 416 417 418 static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 419 u8 type, u8 code, int offset, __be32 info) 420 { 421 struct net *net = dev_net(skb->dev); 422 const struct ipv6hdr *ipv6h; 423 struct tnl_ptk_info tpi; 424 struct ip6_tnl *t; 425 426 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6), 427 offset) < 0) 428 return -EINVAL; 429 430 ipv6h = (const struct ipv6hdr *)skb->data; 431 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, 432 tpi.key, tpi.proto); 433 if (!t) 434 return -ENOENT; 435 436 switch (type) { 437 case ICMPV6_DEST_UNREACH: 438 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 439 t->parms.name); 440 if (code != ICMPV6_PORT_UNREACH) 441 break; 442 return 0; 443 case ICMPV6_TIME_EXCEED: 444 if (code == ICMPV6_EXC_HOPLIMIT) { 445 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 446 t->parms.name); 447 break; 448 } 449 return 0; 450 case ICMPV6_PARAMPROB: { 451 struct ipv6_tlv_tnl_enc_lim *tel; 452 __u32 teli; 453 454 teli = 0; 455 if (code == ICMPV6_HDR_FIELD) 456 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); 457 458 if (teli && teli == be32_to_cpu(info) - 2) { 459 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 460 if (tel->encap_limit == 0) { 461 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 462 t->parms.name); 463 } 464 } else { 465 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 466 t->parms.name); 467 } 468 return 0; 469 } 470 case ICMPV6_PKT_TOOBIG: 471 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); 472 return 0; 473 case NDISC_REDIRECT: 474 ip6_redirect(skb, net, skb->dev->ifindex, 0, 475 sock_net_uid(net, NULL)); 476 return 0; 477 } 478 479 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) 480 t->err_count++; 481 else 482 t->err_count = 1; 483 t->err_time = jiffies; 484 485 return 0; 486 } 487 488 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) 489 { 490 const struct ipv6hdr *ipv6h; 491 struct ip6_tnl *tunnel; 492 493 ipv6h = ipv6_hdr(skb); 494 tunnel = ip6gre_tunnel_lookup(skb->dev, 495 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 496 tpi->proto); 497 if (tunnel) { 498 if (tunnel->parms.collect_md) { 499 struct metadata_dst *tun_dst; 500 __be64 tun_id; 501 __be16 flags; 502 503 flags = tpi->flags; 504 tun_id = key32_to_tunnel_id(tpi->key); 505 506 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0); 507 if (!tun_dst) 508 return PACKET_REJECT; 509 510 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 511 } else { 512 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); 513 } 514 515 return PACKET_RCVD; 516 } 517 518 return PACKET_REJECT; 519 } 520 521 static int ip6erspan_rcv(struct sk_buff *skb, 522 struct tnl_ptk_info *tpi, 523 int gre_hdr_len) 524 { 525 struct erspan_base_hdr *ershdr; 526 const struct ipv6hdr *ipv6h; 527 struct erspan_md2 *md2; 528 struct ip6_tnl *tunnel; 529 u8 ver; 530 531 ipv6h = ipv6_hdr(skb); 532 ershdr = (struct erspan_base_hdr *)skb->data; 533 ver = ershdr->ver; 534 535 tunnel = ip6gre_tunnel_lookup(skb->dev, 536 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 537 tpi->proto); 538 if (tunnel) { 539 int len = erspan_hdr_len(ver); 540 541 if (unlikely(!pskb_may_pull(skb, len))) 542 return PACKET_REJECT; 543 544 if (__iptunnel_pull_header(skb, len, 545 htons(ETH_P_TEB), 546 false, false) < 0) 547 return PACKET_REJECT; 548 549 if (tunnel->parms.collect_md) { 550 struct erspan_metadata *pkt_md, *md; 551 struct metadata_dst *tun_dst; 552 struct ip_tunnel_info *info; 553 unsigned char *gh; 554 __be64 tun_id; 555 __be16 flags; 556 557 tpi->flags |= TUNNEL_KEY; 558 flags = tpi->flags; 559 tun_id = key32_to_tunnel_id(tpi->key); 560 561 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 562 sizeof(*md)); 563 if (!tun_dst) 564 return PACKET_REJECT; 565 566 /* skb can be uncloned in __iptunnel_pull_header, so 567 * old pkt_md is no longer valid and we need to reset 568 * it 569 */ 570 gh = skb_network_header(skb) + 571 skb_network_header_len(skb); 572 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + 573 sizeof(*ershdr)); 574 info = &tun_dst->u.tun_info; 575 md = ip_tunnel_info_opts(info); 576 md->version = ver; 577 md2 = &md->u.md2; 578 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE : 579 ERSPAN_V2_MDSIZE); 580 info->key.tun_flags |= TUNNEL_ERSPAN_OPT; 581 info->options_len = sizeof(*md); 582 583 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 584 585 } else { 586 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); 587 } 588 589 return PACKET_RCVD; 590 } 591 592 return PACKET_REJECT; 593 } 594 595 static int gre_rcv(struct sk_buff *skb) 596 { 597 struct tnl_ptk_info tpi; 598 bool csum_err = false; 599 int hdr_len; 600 601 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0); 602 if (hdr_len < 0) 603 goto drop; 604 605 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false)) 606 goto drop; 607 608 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || 609 tpi.proto == htons(ETH_P_ERSPAN2))) { 610 if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) 611 return 0; 612 goto out; 613 } 614 615 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD) 616 return 0; 617 618 out: 619 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 620 drop: 621 kfree_skb(skb); 622 return 0; 623 } 624 625 static int gre_handle_offloads(struct sk_buff *skb, bool csum) 626 { 627 return iptunnel_handle_offloads(skb, 628 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); 629 } 630 631 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb, 632 struct net_device *dev, 633 struct flowi6 *fl6, __u8 *dsfield, 634 int *encap_limit) 635 { 636 const struct iphdr *iph = ip_hdr(skb); 637 struct ip6_tnl *t = netdev_priv(dev); 638 639 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 640 *encap_limit = t->parms.encap_limit; 641 642 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 643 644 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 645 *dsfield = ipv4_get_dsfield(iph); 646 else 647 *dsfield = ip6_tclass(t->parms.flowinfo); 648 649 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 650 fl6->flowi6_mark = skb->mark; 651 else 652 fl6->flowi6_mark = t->parms.fwmark; 653 654 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 655 } 656 657 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb, 658 struct net_device *dev, 659 struct flowi6 *fl6, __u8 *dsfield, 660 int *encap_limit) 661 { 662 struct ipv6hdr *ipv6h; 663 struct ip6_tnl *t = netdev_priv(dev); 664 __u16 offset; 665 666 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 667 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 668 ipv6h = ipv6_hdr(skb); 669 670 if (offset > 0) { 671 struct ipv6_tlv_tnl_enc_lim *tel; 672 673 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 674 if (tel->encap_limit == 0) { 675 icmpv6_ndo_send(skb, ICMPV6_PARAMPROB, 676 ICMPV6_HDR_FIELD, offset + 2); 677 return -1; 678 } 679 *encap_limit = tel->encap_limit - 1; 680 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { 681 *encap_limit = t->parms.encap_limit; 682 } 683 684 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 685 686 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 687 *dsfield = ipv6_get_dsfield(ipv6h); 688 else 689 *dsfield = ip6_tclass(t->parms.flowinfo); 690 691 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 692 fl6->flowlabel |= ip6_flowlabel(ipv6h); 693 694 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 695 fl6->flowi6_mark = skb->mark; 696 else 697 fl6->flowi6_mark = t->parms.fwmark; 698 699 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 700 701 return 0; 702 } 703 704 static int prepare_ip6gre_xmit_other(struct sk_buff *skb, 705 struct net_device *dev, 706 struct flowi6 *fl6, __u8 *dsfield, 707 int *encap_limit) 708 { 709 struct ip6_tnl *t = netdev_priv(dev); 710 711 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 712 *encap_limit = t->parms.encap_limit; 713 714 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); 715 716 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 717 *dsfield = 0; 718 else 719 *dsfield = ip6_tclass(t->parms.flowinfo); 720 721 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 722 fl6->flowi6_mark = skb->mark; 723 else 724 fl6->flowi6_mark = t->parms.fwmark; 725 726 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 727 728 return 0; 729 } 730 731 static struct ip_tunnel_info *skb_tunnel_info_txcheck(struct sk_buff *skb) 732 { 733 struct ip_tunnel_info *tun_info; 734 735 tun_info = skb_tunnel_info(skb); 736 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX))) 737 return ERR_PTR(-EINVAL); 738 739 return tun_info; 740 } 741 742 static netdev_tx_t __gre6_xmit(struct sk_buff *skb, 743 struct net_device *dev, __u8 dsfield, 744 struct flowi6 *fl6, int encap_limit, 745 __u32 *pmtu, __be16 proto) 746 { 747 struct ip6_tnl *tunnel = netdev_priv(dev); 748 __be16 protocol; 749 __be16 flags; 750 751 if (dev->type == ARPHRD_ETHER) 752 IPCB(skb)->flags = 0; 753 754 if (dev->header_ops && dev->type == ARPHRD_IP6GRE) 755 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr; 756 else 757 fl6->daddr = tunnel->parms.raddr; 758 759 /* Push GRE header. */ 760 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; 761 762 if (tunnel->parms.collect_md) { 763 struct ip_tunnel_info *tun_info; 764 const struct ip_tunnel_key *key; 765 int tun_hlen; 766 767 tun_info = skb_tunnel_info_txcheck(skb); 768 if (IS_ERR(tun_info) || 769 unlikely(ip_tunnel_info_af(tun_info) != AF_INET6)) 770 return -EINVAL; 771 772 key = &tun_info->key; 773 memset(fl6, 0, sizeof(*fl6)); 774 fl6->flowi6_proto = IPPROTO_GRE; 775 fl6->daddr = key->u.ipv6.dst; 776 fl6->flowlabel = key->label; 777 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); 778 fl6->fl6_gre_key = tunnel_id_to_key32(key->tun_id); 779 780 dsfield = key->tos; 781 flags = key->tun_flags & 782 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ); 783 tun_hlen = gre_calc_hlen(flags); 784 785 if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen)) 786 return -ENOMEM; 787 788 gre_build_header(skb, tun_hlen, 789 flags, protocol, 790 tunnel_id_to_key32(tun_info->key.tun_id), 791 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) 792 : 0); 793 794 } else { 795 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen)) 796 return -ENOMEM; 797 798 flags = tunnel->parms.o_flags; 799 800 gre_build_header(skb, tunnel->tun_hlen, flags, 801 protocol, tunnel->parms.o_key, 802 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) 803 : 0); 804 } 805 806 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 807 NEXTHDR_GRE); 808 } 809 810 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) 811 { 812 struct ip6_tnl *t = netdev_priv(dev); 813 int encap_limit = -1; 814 struct flowi6 fl6; 815 __u8 dsfield = 0; 816 __u32 mtu; 817 int err; 818 819 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 820 821 if (!t->parms.collect_md) 822 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 823 &dsfield, &encap_limit); 824 825 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); 826 if (err) 827 return -1; 828 829 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 830 skb->protocol); 831 if (err != 0) { 832 /* XXX: send ICMP error even if DF is not set. */ 833 if (err == -EMSGSIZE) 834 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 835 htonl(mtu)); 836 return -1; 837 } 838 839 return 0; 840 } 841 842 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) 843 { 844 struct ip6_tnl *t = netdev_priv(dev); 845 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 846 int encap_limit = -1; 847 struct flowi6 fl6; 848 __u8 dsfield = 0; 849 __u32 mtu; 850 int err; 851 852 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr)) 853 return -1; 854 855 if (!t->parms.collect_md && 856 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit)) 857 return -1; 858 859 if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM))) 860 return -1; 861 862 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, 863 &mtu, skb->protocol); 864 if (err != 0) { 865 if (err == -EMSGSIZE) 866 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 867 return -1; 868 } 869 870 return 0; 871 } 872 873 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) 874 { 875 struct ip6_tnl *t = netdev_priv(dev); 876 int encap_limit = -1; 877 struct flowi6 fl6; 878 __u8 dsfield = 0; 879 __u32 mtu; 880 int err; 881 882 if (!t->parms.collect_md && 883 prepare_ip6gre_xmit_other(skb, dev, &fl6, &dsfield, &encap_limit)) 884 return -1; 885 886 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); 887 if (err) 888 return err; 889 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, skb->protocol); 890 891 return err; 892 } 893 894 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, 895 struct net_device *dev) 896 { 897 struct ip6_tnl *t = netdev_priv(dev); 898 __be16 payload_protocol; 899 int ret; 900 901 if (!pskb_inet_may_pull(skb)) 902 goto tx_err; 903 904 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 905 goto tx_err; 906 907 payload_protocol = skb_protocol(skb, true); 908 switch (payload_protocol) { 909 case htons(ETH_P_IP): 910 ret = ip6gre_xmit_ipv4(skb, dev); 911 break; 912 case htons(ETH_P_IPV6): 913 ret = ip6gre_xmit_ipv6(skb, dev); 914 break; 915 default: 916 ret = ip6gre_xmit_other(skb, dev); 917 break; 918 } 919 920 if (ret < 0) 921 goto tx_err; 922 923 return NETDEV_TX_OK; 924 925 tx_err: 926 if (!t->parms.collect_md || !IS_ERR(skb_tunnel_info_txcheck(skb))) 927 DEV_STATS_INC(dev, tx_errors); 928 DEV_STATS_INC(dev, tx_dropped); 929 kfree_skb(skb); 930 return NETDEV_TX_OK; 931 } 932 933 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, 934 struct net_device *dev) 935 { 936 struct ip_tunnel_info *tun_info = NULL; 937 struct ip6_tnl *t = netdev_priv(dev); 938 struct dst_entry *dst = skb_dst(skb); 939 bool truncate = false; 940 int encap_limit = -1; 941 __u8 dsfield = false; 942 struct flowi6 fl6; 943 int err = -EINVAL; 944 __be16 proto; 945 __u32 mtu; 946 int nhoff; 947 948 if (!pskb_inet_may_pull(skb)) 949 goto tx_err; 950 951 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 952 goto tx_err; 953 954 if (gre_handle_offloads(skb, false)) 955 goto tx_err; 956 957 if (skb->len > dev->mtu + dev->hard_header_len) { 958 pskb_trim(skb, dev->mtu + dev->hard_header_len); 959 truncate = true; 960 } 961 962 nhoff = skb_network_header(skb) - skb_mac_header(skb); 963 if (skb->protocol == htons(ETH_P_IP) && 964 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) 965 truncate = true; 966 967 if (skb->protocol == htons(ETH_P_IPV6)) { 968 int thoff; 969 970 if (skb_transport_header_was_set(skb)) 971 thoff = skb_transport_header(skb) - skb_mac_header(skb); 972 else 973 thoff = nhoff + sizeof(struct ipv6hdr); 974 if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) 975 truncate = true; 976 } 977 978 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen)) 979 goto tx_err; 980 981 t->parms.o_flags &= ~TUNNEL_KEY; 982 IPCB(skb)->flags = 0; 983 984 /* For collect_md mode, derive fl6 from the tunnel key, 985 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}. 986 */ 987 if (t->parms.collect_md) { 988 const struct ip_tunnel_key *key; 989 struct erspan_metadata *md; 990 __be32 tun_id; 991 992 tun_info = skb_tunnel_info_txcheck(skb); 993 if (IS_ERR(tun_info) || 994 unlikely(ip_tunnel_info_af(tun_info) != AF_INET6)) 995 goto tx_err; 996 997 key = &tun_info->key; 998 memset(&fl6, 0, sizeof(fl6)); 999 fl6.flowi6_proto = IPPROTO_GRE; 1000 fl6.daddr = key->u.ipv6.dst; 1001 fl6.flowlabel = key->label; 1002 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); 1003 fl6.fl6_gre_key = tunnel_id_to_key32(key->tun_id); 1004 1005 dsfield = key->tos; 1006 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) 1007 goto tx_err; 1008 if (tun_info->options_len < sizeof(*md)) 1009 goto tx_err; 1010 md = ip_tunnel_info_opts(tun_info); 1011 1012 tun_id = tunnel_id_to_key32(key->tun_id); 1013 if (md->version == 1) { 1014 erspan_build_header(skb, 1015 ntohl(tun_id), 1016 ntohl(md->u.index), truncate, 1017 false); 1018 } else if (md->version == 2) { 1019 erspan_build_header_v2(skb, 1020 ntohl(tun_id), 1021 md->u.md2.dir, 1022 get_hwid(&md->u.md2), 1023 truncate, false); 1024 } else { 1025 goto tx_err; 1026 } 1027 } else { 1028 switch (skb->protocol) { 1029 case htons(ETH_P_IP): 1030 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1031 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, 1032 &dsfield, &encap_limit); 1033 break; 1034 case htons(ETH_P_IPV6): 1035 if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr)) 1036 goto tx_err; 1037 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, 1038 &dsfield, &encap_limit)) 1039 goto tx_err; 1040 break; 1041 default: 1042 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1043 break; 1044 } 1045 1046 if (t->parms.erspan_ver == 1) 1047 erspan_build_header(skb, ntohl(t->parms.o_key), 1048 t->parms.index, 1049 truncate, false); 1050 else if (t->parms.erspan_ver == 2) 1051 erspan_build_header_v2(skb, ntohl(t->parms.o_key), 1052 t->parms.dir, 1053 t->parms.hwid, 1054 truncate, false); 1055 else 1056 goto tx_err; 1057 1058 fl6.daddr = t->parms.raddr; 1059 } 1060 1061 /* Push GRE header. */ 1062 proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) 1063 : htons(ETH_P_ERSPAN2); 1064 gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno))); 1065 1066 /* TooBig packet may have updated dst->dev's mtu */ 1067 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) 1068 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false); 1069 1070 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1071 NEXTHDR_GRE); 1072 if (err != 0) { 1073 /* XXX: send ICMP error even if DF is not set. */ 1074 if (err == -EMSGSIZE) { 1075 if (skb->protocol == htons(ETH_P_IP)) 1076 icmp_ndo_send(skb, ICMP_DEST_UNREACH, 1077 ICMP_FRAG_NEEDED, htonl(mtu)); 1078 else 1079 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1080 } 1081 1082 goto tx_err; 1083 } 1084 return NETDEV_TX_OK; 1085 1086 tx_err: 1087 if (!IS_ERR(tun_info)) 1088 DEV_STATS_INC(dev, tx_errors); 1089 DEV_STATS_INC(dev, tx_dropped); 1090 kfree_skb(skb); 1091 return NETDEV_TX_OK; 1092 } 1093 1094 static void ip6gre_tnl_link_config_common(struct ip6_tnl *t) 1095 { 1096 struct net_device *dev = t->dev; 1097 struct __ip6_tnl_parm *p = &t->parms; 1098 struct flowi6 *fl6 = &t->fl.u.ip6; 1099 1100 if (dev->type != ARPHRD_ETHER) { 1101 __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr)); 1102 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); 1103 } 1104 1105 /* Set up flowi template */ 1106 fl6->saddr = p->laddr; 1107 fl6->daddr = p->raddr; 1108 fl6->flowi6_oif = p->link; 1109 fl6->flowlabel = 0; 1110 fl6->flowi6_proto = IPPROTO_GRE; 1111 fl6->fl6_gre_key = t->parms.o_key; 1112 1113 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 1114 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 1115 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1116 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1117 1118 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); 1119 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); 1120 1121 if (p->flags&IP6_TNL_F_CAP_XMIT && 1122 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER) 1123 dev->flags |= IFF_POINTOPOINT; 1124 else 1125 dev->flags &= ~IFF_POINTOPOINT; 1126 } 1127 1128 static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu, 1129 int t_hlen) 1130 { 1131 const struct __ip6_tnl_parm *p = &t->parms; 1132 struct net_device *dev = t->dev; 1133 1134 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1135 int strict = (ipv6_addr_type(&p->raddr) & 1136 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1137 1138 struct rt6_info *rt = rt6_lookup(t->net, 1139 &p->raddr, &p->laddr, 1140 p->link, NULL, strict); 1141 1142 if (!rt) 1143 return; 1144 1145 if (rt->dst.dev) { 1146 unsigned short dst_len = rt->dst.dev->hard_header_len + 1147 t_hlen; 1148 1149 if (t->dev->header_ops) 1150 dev->hard_header_len = dst_len; 1151 else 1152 dev->needed_headroom = dst_len; 1153 1154 if (set_mtu) { 1155 int mtu = rt->dst.dev->mtu - t_hlen; 1156 1157 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1158 mtu -= 8; 1159 if (dev->type == ARPHRD_ETHER) 1160 mtu -= ETH_HLEN; 1161 1162 if (mtu < IPV6_MIN_MTU) 1163 mtu = IPV6_MIN_MTU; 1164 WRITE_ONCE(dev->mtu, mtu); 1165 } 1166 } 1167 ip6_rt_put(rt); 1168 } 1169 } 1170 1171 static int ip6gre_calc_hlen(struct ip6_tnl *tunnel) 1172 { 1173 int t_hlen; 1174 1175 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 1176 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; 1177 1178 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); 1179 1180 if (tunnel->dev->header_ops) 1181 tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1182 else 1183 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; 1184 1185 return t_hlen; 1186 } 1187 1188 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) 1189 { 1190 ip6gre_tnl_link_config_common(t); 1191 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t)); 1192 } 1193 1194 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, 1195 const struct __ip6_tnl_parm *p) 1196 { 1197 t->parms.laddr = p->laddr; 1198 t->parms.raddr = p->raddr; 1199 t->parms.flags = p->flags; 1200 t->parms.hop_limit = p->hop_limit; 1201 t->parms.encap_limit = p->encap_limit; 1202 t->parms.flowinfo = p->flowinfo; 1203 t->parms.link = p->link; 1204 t->parms.proto = p->proto; 1205 t->parms.i_key = p->i_key; 1206 t->parms.o_key = p->o_key; 1207 t->parms.i_flags = p->i_flags; 1208 t->parms.o_flags = p->o_flags; 1209 t->parms.fwmark = p->fwmark; 1210 t->parms.erspan_ver = p->erspan_ver; 1211 t->parms.index = p->index; 1212 t->parms.dir = p->dir; 1213 t->parms.hwid = p->hwid; 1214 dst_cache_reset(&t->dst_cache); 1215 } 1216 1217 static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p, 1218 int set_mtu) 1219 { 1220 ip6gre_tnl_copy_tnl_parm(t, p); 1221 ip6gre_tnl_link_config(t, set_mtu); 1222 return 0; 1223 } 1224 1225 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p, 1226 const struct ip6_tnl_parm2 *u) 1227 { 1228 p->laddr = u->laddr; 1229 p->raddr = u->raddr; 1230 p->flags = u->flags; 1231 p->hop_limit = u->hop_limit; 1232 p->encap_limit = u->encap_limit; 1233 p->flowinfo = u->flowinfo; 1234 p->link = u->link; 1235 p->i_key = u->i_key; 1236 p->o_key = u->o_key; 1237 p->i_flags = gre_flags_to_tnl_flags(u->i_flags); 1238 p->o_flags = gre_flags_to_tnl_flags(u->o_flags); 1239 memcpy(p->name, u->name, sizeof(u->name)); 1240 } 1241 1242 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u, 1243 const struct __ip6_tnl_parm *p) 1244 { 1245 u->proto = IPPROTO_GRE; 1246 u->laddr = p->laddr; 1247 u->raddr = p->raddr; 1248 u->flags = p->flags; 1249 u->hop_limit = p->hop_limit; 1250 u->encap_limit = p->encap_limit; 1251 u->flowinfo = p->flowinfo; 1252 u->link = p->link; 1253 u->i_key = p->i_key; 1254 u->o_key = p->o_key; 1255 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags); 1256 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags); 1257 memcpy(u->name, p->name, sizeof(u->name)); 1258 } 1259 1260 static int ip6gre_tunnel_siocdevprivate(struct net_device *dev, 1261 struct ifreq *ifr, void __user *data, 1262 int cmd) 1263 { 1264 int err = 0; 1265 struct ip6_tnl_parm2 p; 1266 struct __ip6_tnl_parm p1; 1267 struct ip6_tnl *t = netdev_priv(dev); 1268 struct net *net = t->net; 1269 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1270 1271 memset(&p1, 0, sizeof(p1)); 1272 1273 switch (cmd) { 1274 case SIOCGETTUNNEL: 1275 if (dev == ign->fb_tunnel_dev) { 1276 if (copy_from_user(&p, data, sizeof(p))) { 1277 err = -EFAULT; 1278 break; 1279 } 1280 ip6gre_tnl_parm_from_user(&p1, &p); 1281 t = ip6gre_tunnel_locate(net, &p1, 0); 1282 if (!t) 1283 t = netdev_priv(dev); 1284 } 1285 memset(&p, 0, sizeof(p)); 1286 ip6gre_tnl_parm_to_user(&p, &t->parms); 1287 if (copy_to_user(data, &p, sizeof(p))) 1288 err = -EFAULT; 1289 break; 1290 1291 case SIOCADDTUNNEL: 1292 case SIOCCHGTUNNEL: 1293 err = -EPERM; 1294 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1295 goto done; 1296 1297 err = -EFAULT; 1298 if (copy_from_user(&p, data, sizeof(p))) 1299 goto done; 1300 1301 err = -EINVAL; 1302 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)) 1303 goto done; 1304 1305 if (!(p.i_flags&GRE_KEY)) 1306 p.i_key = 0; 1307 if (!(p.o_flags&GRE_KEY)) 1308 p.o_key = 0; 1309 1310 ip6gre_tnl_parm_from_user(&p1, &p); 1311 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL); 1312 1313 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 1314 if (t) { 1315 if (t->dev != dev) { 1316 err = -EEXIST; 1317 break; 1318 } 1319 } else { 1320 t = netdev_priv(dev); 1321 1322 ip6gre_tunnel_unlink(ign, t); 1323 synchronize_net(); 1324 ip6gre_tnl_change(t, &p1, 1); 1325 ip6gre_tunnel_link(ign, t); 1326 netdev_state_change(dev); 1327 } 1328 } 1329 1330 if (t) { 1331 err = 0; 1332 1333 memset(&p, 0, sizeof(p)); 1334 ip6gre_tnl_parm_to_user(&p, &t->parms); 1335 if (copy_to_user(data, &p, sizeof(p))) 1336 err = -EFAULT; 1337 } else 1338 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1339 break; 1340 1341 case SIOCDELTUNNEL: 1342 err = -EPERM; 1343 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1344 goto done; 1345 1346 if (dev == ign->fb_tunnel_dev) { 1347 err = -EFAULT; 1348 if (copy_from_user(&p, data, sizeof(p))) 1349 goto done; 1350 err = -ENOENT; 1351 ip6gre_tnl_parm_from_user(&p1, &p); 1352 t = ip6gre_tunnel_locate(net, &p1, 0); 1353 if (!t) 1354 goto done; 1355 err = -EPERM; 1356 if (t == netdev_priv(ign->fb_tunnel_dev)) 1357 goto done; 1358 dev = t->dev; 1359 } 1360 unregister_netdevice(dev); 1361 err = 0; 1362 break; 1363 1364 default: 1365 err = -EINVAL; 1366 } 1367 1368 done: 1369 return err; 1370 } 1371 1372 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, 1373 unsigned short type, const void *daddr, 1374 const void *saddr, unsigned int len) 1375 { 1376 struct ip6_tnl *t = netdev_priv(dev); 1377 struct ipv6hdr *ipv6h; 1378 __be16 *p; 1379 1380 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h)); 1381 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb, 1382 t->fl.u.ip6.flowlabel, 1383 true, &t->fl.u.ip6)); 1384 ipv6h->hop_limit = t->parms.hop_limit; 1385 ipv6h->nexthdr = NEXTHDR_GRE; 1386 ipv6h->saddr = t->parms.laddr; 1387 ipv6h->daddr = t->parms.raddr; 1388 1389 p = (__be16 *)(ipv6h + 1); 1390 p[0] = t->parms.o_flags; 1391 p[1] = htons(type); 1392 1393 /* 1394 * Set the source hardware address. 1395 */ 1396 1397 if (saddr) 1398 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr)); 1399 if (daddr) 1400 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr)); 1401 if (!ipv6_addr_any(&ipv6h->daddr)) 1402 return t->hlen; 1403 1404 return -t->hlen; 1405 } 1406 1407 static const struct header_ops ip6gre_header_ops = { 1408 .create = ip6gre_header, 1409 }; 1410 1411 static const struct net_device_ops ip6gre_netdev_ops = { 1412 .ndo_init = ip6gre_tunnel_init, 1413 .ndo_uninit = ip6gre_tunnel_uninit, 1414 .ndo_start_xmit = ip6gre_tunnel_xmit, 1415 .ndo_siocdevprivate = ip6gre_tunnel_siocdevprivate, 1416 .ndo_change_mtu = ip6_tnl_change_mtu, 1417 .ndo_get_stats64 = dev_get_tstats64, 1418 .ndo_get_iflink = ip6_tnl_get_iflink, 1419 }; 1420 1421 static void ip6gre_dev_free(struct net_device *dev) 1422 { 1423 struct ip6_tnl *t = netdev_priv(dev); 1424 1425 gro_cells_destroy(&t->gro_cells); 1426 dst_cache_destroy(&t->dst_cache); 1427 free_percpu(dev->tstats); 1428 } 1429 1430 static void ip6gre_tunnel_setup(struct net_device *dev) 1431 { 1432 dev->netdev_ops = &ip6gre_netdev_ops; 1433 dev->needs_free_netdev = true; 1434 dev->priv_destructor = ip6gre_dev_free; 1435 1436 dev->type = ARPHRD_IP6GRE; 1437 1438 dev->flags |= IFF_NOARP; 1439 dev->addr_len = sizeof(struct in6_addr); 1440 netif_keep_dst(dev); 1441 /* This perm addr will be used as interface identifier by IPv6 */ 1442 dev->addr_assign_type = NET_ADDR_RANDOM; 1443 eth_random_addr(dev->perm_addr); 1444 } 1445 1446 #define GRE6_FEATURES (NETIF_F_SG | \ 1447 NETIF_F_FRAGLIST | \ 1448 NETIF_F_HIGHDMA | \ 1449 NETIF_F_HW_CSUM) 1450 1451 static void ip6gre_tnl_init_features(struct net_device *dev) 1452 { 1453 struct ip6_tnl *nt = netdev_priv(dev); 1454 __be16 flags; 1455 1456 dev->features |= GRE6_FEATURES | NETIF_F_LLTX; 1457 dev->hw_features |= GRE6_FEATURES; 1458 1459 flags = nt->parms.o_flags; 1460 1461 /* TCP offload with GRE SEQ is not supported, nor can we support 2 1462 * levels of outer headers requiring an update. 1463 */ 1464 if (flags & TUNNEL_SEQ) 1465 return; 1466 if (flags & TUNNEL_CSUM && nt->encap.type != TUNNEL_ENCAP_NONE) 1467 return; 1468 1469 dev->features |= NETIF_F_GSO_SOFTWARE; 1470 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1471 } 1472 1473 static int ip6gre_tunnel_init_common(struct net_device *dev) 1474 { 1475 struct ip6_tnl *tunnel; 1476 int ret; 1477 int t_hlen; 1478 1479 tunnel = netdev_priv(dev); 1480 1481 tunnel->dev = dev; 1482 tunnel->net = dev_net(dev); 1483 strcpy(tunnel->parms.name, dev->name); 1484 1485 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1486 if (!dev->tstats) 1487 return -ENOMEM; 1488 1489 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1490 if (ret) 1491 goto cleanup_alloc_pcpu_stats; 1492 1493 ret = gro_cells_init(&tunnel->gro_cells, dev); 1494 if (ret) 1495 goto cleanup_dst_cache_init; 1496 1497 t_hlen = ip6gre_calc_hlen(tunnel); 1498 dev->mtu = ETH_DATA_LEN - t_hlen; 1499 if (dev->type == ARPHRD_ETHER) 1500 dev->mtu -= ETH_HLEN; 1501 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1502 dev->mtu -= 8; 1503 1504 if (tunnel->parms.collect_md) { 1505 netif_keep_dst(dev); 1506 } 1507 ip6gre_tnl_init_features(dev); 1508 1509 netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL); 1510 return 0; 1511 1512 cleanup_dst_cache_init: 1513 dst_cache_destroy(&tunnel->dst_cache); 1514 cleanup_alloc_pcpu_stats: 1515 free_percpu(dev->tstats); 1516 dev->tstats = NULL; 1517 return ret; 1518 } 1519 1520 static int ip6gre_tunnel_init(struct net_device *dev) 1521 { 1522 struct ip6_tnl *tunnel; 1523 int ret; 1524 1525 ret = ip6gre_tunnel_init_common(dev); 1526 if (ret) 1527 return ret; 1528 1529 tunnel = netdev_priv(dev); 1530 1531 if (tunnel->parms.collect_md) 1532 return 0; 1533 1534 __dev_addr_set(dev, &tunnel->parms.laddr, sizeof(struct in6_addr)); 1535 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); 1536 1537 if (ipv6_addr_any(&tunnel->parms.raddr)) 1538 dev->header_ops = &ip6gre_header_ops; 1539 1540 return 0; 1541 } 1542 1543 static void ip6gre_fb_tunnel_init(struct net_device *dev) 1544 { 1545 struct ip6_tnl *tunnel = netdev_priv(dev); 1546 1547 tunnel->dev = dev; 1548 tunnel->net = dev_net(dev); 1549 strcpy(tunnel->parms.name, dev->name); 1550 1551 tunnel->hlen = sizeof(struct ipv6hdr) + 4; 1552 } 1553 1554 static struct inet6_protocol ip6gre_protocol __read_mostly = { 1555 .handler = gre_rcv, 1556 .err_handler = ip6gre_err, 1557 .flags = INET6_PROTO_FINAL, 1558 }; 1559 1560 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) 1561 { 1562 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1563 struct net_device *dev, *aux; 1564 int prio; 1565 1566 for_each_netdev_safe(net, dev, aux) 1567 if (dev->rtnl_link_ops == &ip6gre_link_ops || 1568 dev->rtnl_link_ops == &ip6gre_tap_ops || 1569 dev->rtnl_link_ops == &ip6erspan_tap_ops) 1570 unregister_netdevice_queue(dev, head); 1571 1572 for (prio = 0; prio < 4; prio++) { 1573 int h; 1574 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) { 1575 struct ip6_tnl *t; 1576 1577 t = rtnl_dereference(ign->tunnels[prio][h]); 1578 1579 while (t) { 1580 /* If dev is in the same netns, it has already 1581 * been added to the list by the previous loop. 1582 */ 1583 if (!net_eq(dev_net(t->dev), net)) 1584 unregister_netdevice_queue(t->dev, 1585 head); 1586 t = rtnl_dereference(t->next); 1587 } 1588 } 1589 } 1590 } 1591 1592 static int __net_init ip6gre_init_net(struct net *net) 1593 { 1594 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1595 struct net_device *ndev; 1596 int err; 1597 1598 if (!net_has_fallback_tunnels(net)) 1599 return 0; 1600 ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", 1601 NET_NAME_UNKNOWN, ip6gre_tunnel_setup); 1602 if (!ndev) { 1603 err = -ENOMEM; 1604 goto err_alloc_dev; 1605 } 1606 ign->fb_tunnel_dev = ndev; 1607 dev_net_set(ign->fb_tunnel_dev, net); 1608 /* FB netdevice is special: we have one, and only one per netns. 1609 * Allowing to move it to another netns is clearly unsafe. 1610 */ 1611 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; 1612 1613 1614 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev); 1615 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops; 1616 1617 err = register_netdev(ign->fb_tunnel_dev); 1618 if (err) 1619 goto err_reg_dev; 1620 1621 rcu_assign_pointer(ign->tunnels_wc[0], 1622 netdev_priv(ign->fb_tunnel_dev)); 1623 return 0; 1624 1625 err_reg_dev: 1626 free_netdev(ndev); 1627 err_alloc_dev: 1628 return err; 1629 } 1630 1631 static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list) 1632 { 1633 struct net *net; 1634 LIST_HEAD(list); 1635 1636 rtnl_lock(); 1637 list_for_each_entry(net, net_list, exit_list) 1638 ip6gre_destroy_tunnels(net, &list); 1639 unregister_netdevice_many(&list); 1640 rtnl_unlock(); 1641 } 1642 1643 static struct pernet_operations ip6gre_net_ops = { 1644 .init = ip6gre_init_net, 1645 .exit_batch = ip6gre_exit_batch_net, 1646 .id = &ip6gre_net_id, 1647 .size = sizeof(struct ip6gre_net), 1648 }; 1649 1650 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], 1651 struct netlink_ext_ack *extack) 1652 { 1653 __be16 flags; 1654 1655 if (!data) 1656 return 0; 1657 1658 flags = 0; 1659 if (data[IFLA_GRE_IFLAGS]) 1660 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1661 if (data[IFLA_GRE_OFLAGS]) 1662 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1663 if (flags & (GRE_VERSION|GRE_ROUTING)) 1664 return -EINVAL; 1665 1666 return 0; 1667 } 1668 1669 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1670 struct netlink_ext_ack *extack) 1671 { 1672 struct in6_addr daddr; 1673 1674 if (tb[IFLA_ADDRESS]) { 1675 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1676 return -EINVAL; 1677 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1678 return -EADDRNOTAVAIL; 1679 } 1680 1681 if (!data) 1682 goto out; 1683 1684 if (data[IFLA_GRE_REMOTE]) { 1685 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]); 1686 if (ipv6_addr_any(&daddr)) 1687 return -EINVAL; 1688 } 1689 1690 out: 1691 return ip6gre_tunnel_validate(tb, data, extack); 1692 } 1693 1694 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], 1695 struct netlink_ext_ack *extack) 1696 { 1697 __be16 flags = 0; 1698 int ret, ver = 0; 1699 1700 if (!data) 1701 return 0; 1702 1703 ret = ip6gre_tap_validate(tb, data, extack); 1704 if (ret) 1705 return ret; 1706 1707 /* ERSPAN should only have GRE sequence and key flag */ 1708 if (data[IFLA_GRE_OFLAGS]) 1709 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1710 if (data[IFLA_GRE_IFLAGS]) 1711 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1712 if (!data[IFLA_GRE_COLLECT_METADATA] && 1713 flags != (GRE_SEQ | GRE_KEY)) 1714 return -EINVAL; 1715 1716 /* ERSPAN Session ID only has 10-bit. Since we reuse 1717 * 32-bit key field as ID, check it's range. 1718 */ 1719 if (data[IFLA_GRE_IKEY] && 1720 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK)) 1721 return -EINVAL; 1722 1723 if (data[IFLA_GRE_OKEY] && 1724 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK)) 1725 return -EINVAL; 1726 1727 if (data[IFLA_GRE_ERSPAN_VER]) { 1728 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1729 if (ver != 1 && ver != 2) 1730 return -EINVAL; 1731 } 1732 1733 if (ver == 1) { 1734 if (data[IFLA_GRE_ERSPAN_INDEX]) { 1735 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1736 1737 if (index & ~INDEX_MASK) 1738 return -EINVAL; 1739 } 1740 } else if (ver == 2) { 1741 if (data[IFLA_GRE_ERSPAN_DIR]) { 1742 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1743 1744 if (dir & ~(DIR_MASK >> DIR_OFFSET)) 1745 return -EINVAL; 1746 } 1747 1748 if (data[IFLA_GRE_ERSPAN_HWID]) { 1749 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1750 1751 if (hwid & ~(HWID_MASK >> HWID_OFFSET)) 1752 return -EINVAL; 1753 } 1754 } 1755 1756 return 0; 1757 } 1758 1759 static void ip6erspan_set_version(struct nlattr *data[], 1760 struct __ip6_tnl_parm *parms) 1761 { 1762 if (!data) 1763 return; 1764 1765 parms->erspan_ver = 1; 1766 if (data[IFLA_GRE_ERSPAN_VER]) 1767 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1768 1769 if (parms->erspan_ver == 1) { 1770 if (data[IFLA_GRE_ERSPAN_INDEX]) 1771 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); 1772 } else if (parms->erspan_ver == 2) { 1773 if (data[IFLA_GRE_ERSPAN_DIR]) 1774 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); 1775 if (data[IFLA_GRE_ERSPAN_HWID]) 1776 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); 1777 } 1778 } 1779 1780 static void ip6gre_netlink_parms(struct nlattr *data[], 1781 struct __ip6_tnl_parm *parms) 1782 { 1783 memset(parms, 0, sizeof(*parms)); 1784 1785 if (!data) 1786 return; 1787 1788 if (data[IFLA_GRE_LINK]) 1789 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1790 1791 if (data[IFLA_GRE_IFLAGS]) 1792 parms->i_flags = gre_flags_to_tnl_flags( 1793 nla_get_be16(data[IFLA_GRE_IFLAGS])); 1794 1795 if (data[IFLA_GRE_OFLAGS]) 1796 parms->o_flags = gre_flags_to_tnl_flags( 1797 nla_get_be16(data[IFLA_GRE_OFLAGS])); 1798 1799 if (data[IFLA_GRE_IKEY]) 1800 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 1801 1802 if (data[IFLA_GRE_OKEY]) 1803 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); 1804 1805 if (data[IFLA_GRE_LOCAL]) 1806 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]); 1807 1808 if (data[IFLA_GRE_REMOTE]) 1809 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]); 1810 1811 if (data[IFLA_GRE_TTL]) 1812 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]); 1813 1814 if (data[IFLA_GRE_ENCAP_LIMIT]) 1815 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]); 1816 1817 if (data[IFLA_GRE_FLOWINFO]) 1818 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]); 1819 1820 if (data[IFLA_GRE_FLAGS]) 1821 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]); 1822 1823 if (data[IFLA_GRE_FWMARK]) 1824 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); 1825 1826 if (data[IFLA_GRE_COLLECT_METADATA]) 1827 parms->collect_md = true; 1828 } 1829 1830 static int ip6gre_tap_init(struct net_device *dev) 1831 { 1832 int ret; 1833 1834 ret = ip6gre_tunnel_init_common(dev); 1835 if (ret) 1836 return ret; 1837 1838 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1839 1840 return 0; 1841 } 1842 1843 static const struct net_device_ops ip6gre_tap_netdev_ops = { 1844 .ndo_init = ip6gre_tap_init, 1845 .ndo_uninit = ip6gre_tunnel_uninit, 1846 .ndo_start_xmit = ip6gre_tunnel_xmit, 1847 .ndo_set_mac_address = eth_mac_addr, 1848 .ndo_validate_addr = eth_validate_addr, 1849 .ndo_change_mtu = ip6_tnl_change_mtu, 1850 .ndo_get_stats64 = dev_get_tstats64, 1851 .ndo_get_iflink = ip6_tnl_get_iflink, 1852 }; 1853 1854 static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel) 1855 { 1856 int t_hlen; 1857 1858 tunnel->tun_hlen = 8; 1859 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + 1860 erspan_hdr_len(tunnel->parms.erspan_ver); 1861 1862 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); 1863 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; 1864 return t_hlen; 1865 } 1866 1867 static int ip6erspan_tap_init(struct net_device *dev) 1868 { 1869 struct ip6_tnl *tunnel; 1870 int t_hlen; 1871 int ret; 1872 1873 tunnel = netdev_priv(dev); 1874 1875 tunnel->dev = dev; 1876 tunnel->net = dev_net(dev); 1877 strcpy(tunnel->parms.name, dev->name); 1878 1879 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1880 if (!dev->tstats) 1881 return -ENOMEM; 1882 1883 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1884 if (ret) 1885 goto cleanup_alloc_pcpu_stats; 1886 1887 ret = gro_cells_init(&tunnel->gro_cells, dev); 1888 if (ret) 1889 goto cleanup_dst_cache_init; 1890 1891 t_hlen = ip6erspan_calc_hlen(tunnel); 1892 dev->mtu = ETH_DATA_LEN - t_hlen; 1893 if (dev->type == ARPHRD_ETHER) 1894 dev->mtu -= ETH_HLEN; 1895 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1896 dev->mtu -= 8; 1897 1898 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1899 ip6erspan_tnl_link_config(tunnel, 1); 1900 1901 netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL); 1902 return 0; 1903 1904 cleanup_dst_cache_init: 1905 dst_cache_destroy(&tunnel->dst_cache); 1906 cleanup_alloc_pcpu_stats: 1907 free_percpu(dev->tstats); 1908 dev->tstats = NULL; 1909 return ret; 1910 } 1911 1912 static const struct net_device_ops ip6erspan_netdev_ops = { 1913 .ndo_init = ip6erspan_tap_init, 1914 .ndo_uninit = ip6erspan_tunnel_uninit, 1915 .ndo_start_xmit = ip6erspan_tunnel_xmit, 1916 .ndo_set_mac_address = eth_mac_addr, 1917 .ndo_validate_addr = eth_validate_addr, 1918 .ndo_change_mtu = ip6_tnl_change_mtu, 1919 .ndo_get_stats64 = dev_get_tstats64, 1920 .ndo_get_iflink = ip6_tnl_get_iflink, 1921 }; 1922 1923 static void ip6gre_tap_setup(struct net_device *dev) 1924 { 1925 1926 ether_setup(dev); 1927 1928 dev->max_mtu = 0; 1929 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1930 dev->needs_free_netdev = true; 1931 dev->priv_destructor = ip6gre_dev_free; 1932 1933 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1934 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1935 netif_keep_dst(dev); 1936 } 1937 1938 static bool ip6gre_netlink_encap_parms(struct nlattr *data[], 1939 struct ip_tunnel_encap *ipencap) 1940 { 1941 bool ret = false; 1942 1943 memset(ipencap, 0, sizeof(*ipencap)); 1944 1945 if (!data) 1946 return ret; 1947 1948 if (data[IFLA_GRE_ENCAP_TYPE]) { 1949 ret = true; 1950 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); 1951 } 1952 1953 if (data[IFLA_GRE_ENCAP_FLAGS]) { 1954 ret = true; 1955 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); 1956 } 1957 1958 if (data[IFLA_GRE_ENCAP_SPORT]) { 1959 ret = true; 1960 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]); 1961 } 1962 1963 if (data[IFLA_GRE_ENCAP_DPORT]) { 1964 ret = true; 1965 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]); 1966 } 1967 1968 return ret; 1969 } 1970 1971 static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev, 1972 struct nlattr *tb[], struct nlattr *data[], 1973 struct netlink_ext_ack *extack) 1974 { 1975 struct ip6_tnl *nt; 1976 struct ip_tunnel_encap ipencap; 1977 int err; 1978 1979 nt = netdev_priv(dev); 1980 1981 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 1982 int err = ip6_tnl_encap_setup(nt, &ipencap); 1983 1984 if (err < 0) 1985 return err; 1986 } 1987 1988 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1989 eth_hw_addr_random(dev); 1990 1991 nt->dev = dev; 1992 nt->net = dev_net(dev); 1993 1994 err = register_netdevice(dev); 1995 if (err) 1996 goto out; 1997 1998 if (tb[IFLA_MTU]) 1999 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 2000 2001 out: 2002 return err; 2003 } 2004 2005 static int ip6gre_newlink(struct net *src_net, struct net_device *dev, 2006 struct nlattr *tb[], struct nlattr *data[], 2007 struct netlink_ext_ack *extack) 2008 { 2009 struct ip6_tnl *nt = netdev_priv(dev); 2010 struct net *net = dev_net(dev); 2011 struct ip6gre_net *ign; 2012 int err; 2013 2014 ip6gre_netlink_parms(data, &nt->parms); 2015 ign = net_generic(net, ip6gre_net_id); 2016 2017 if (nt->parms.collect_md) { 2018 if (rtnl_dereference(ign->collect_md_tun)) 2019 return -EEXIST; 2020 } else { 2021 if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) 2022 return -EEXIST; 2023 } 2024 2025 err = ip6gre_newlink_common(src_net, dev, tb, data, extack); 2026 if (!err) { 2027 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); 2028 ip6gre_tunnel_link_md(ign, nt); 2029 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); 2030 } 2031 return err; 2032 } 2033 2034 static struct ip6_tnl * 2035 ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[], 2036 struct nlattr *data[], struct __ip6_tnl_parm *p_p, 2037 struct netlink_ext_ack *extack) 2038 { 2039 struct ip6_tnl *t, *nt = netdev_priv(dev); 2040 struct net *net = nt->net; 2041 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 2042 struct ip_tunnel_encap ipencap; 2043 2044 if (dev == ign->fb_tunnel_dev) 2045 return ERR_PTR(-EINVAL); 2046 2047 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 2048 int err = ip6_tnl_encap_setup(nt, &ipencap); 2049 2050 if (err < 0) 2051 return ERR_PTR(err); 2052 } 2053 2054 ip6gre_netlink_parms(data, p_p); 2055 2056 t = ip6gre_tunnel_locate(net, p_p, 0); 2057 2058 if (t) { 2059 if (t->dev != dev) 2060 return ERR_PTR(-EEXIST); 2061 } else { 2062 t = nt; 2063 } 2064 2065 return t; 2066 } 2067 2068 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], 2069 struct nlattr *data[], 2070 struct netlink_ext_ack *extack) 2071 { 2072 struct ip6_tnl *t = netdev_priv(dev); 2073 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 2074 struct __ip6_tnl_parm p; 2075 2076 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2077 if (IS_ERR(t)) 2078 return PTR_ERR(t); 2079 2080 ip6gre_tunnel_unlink_md(ign, t); 2081 ip6gre_tunnel_unlink(ign, t); 2082 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); 2083 ip6gre_tunnel_link_md(ign, t); 2084 ip6gre_tunnel_link(ign, t); 2085 return 0; 2086 } 2087 2088 static void ip6gre_dellink(struct net_device *dev, struct list_head *head) 2089 { 2090 struct net *net = dev_net(dev); 2091 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 2092 2093 if (dev != ign->fb_tunnel_dev) 2094 unregister_netdevice_queue(dev, head); 2095 } 2096 2097 static size_t ip6gre_get_size(const struct net_device *dev) 2098 { 2099 return 2100 /* IFLA_GRE_LINK */ 2101 nla_total_size(4) + 2102 /* IFLA_GRE_IFLAGS */ 2103 nla_total_size(2) + 2104 /* IFLA_GRE_OFLAGS */ 2105 nla_total_size(2) + 2106 /* IFLA_GRE_IKEY */ 2107 nla_total_size(4) + 2108 /* IFLA_GRE_OKEY */ 2109 nla_total_size(4) + 2110 /* IFLA_GRE_LOCAL */ 2111 nla_total_size(sizeof(struct in6_addr)) + 2112 /* IFLA_GRE_REMOTE */ 2113 nla_total_size(sizeof(struct in6_addr)) + 2114 /* IFLA_GRE_TTL */ 2115 nla_total_size(1) + 2116 /* IFLA_GRE_ENCAP_LIMIT */ 2117 nla_total_size(1) + 2118 /* IFLA_GRE_FLOWINFO */ 2119 nla_total_size(4) + 2120 /* IFLA_GRE_FLAGS */ 2121 nla_total_size(4) + 2122 /* IFLA_GRE_ENCAP_TYPE */ 2123 nla_total_size(2) + 2124 /* IFLA_GRE_ENCAP_FLAGS */ 2125 nla_total_size(2) + 2126 /* IFLA_GRE_ENCAP_SPORT */ 2127 nla_total_size(2) + 2128 /* IFLA_GRE_ENCAP_DPORT */ 2129 nla_total_size(2) + 2130 /* IFLA_GRE_COLLECT_METADATA */ 2131 nla_total_size(0) + 2132 /* IFLA_GRE_FWMARK */ 2133 nla_total_size(4) + 2134 /* IFLA_GRE_ERSPAN_INDEX */ 2135 nla_total_size(4) + 2136 0; 2137 } 2138 2139 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) 2140 { 2141 struct ip6_tnl *t = netdev_priv(dev); 2142 struct __ip6_tnl_parm *p = &t->parms; 2143 __be16 o_flags = p->o_flags; 2144 2145 if (p->erspan_ver == 1 || p->erspan_ver == 2) { 2146 if (!p->collect_md) 2147 o_flags |= TUNNEL_KEY; 2148 2149 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) 2150 goto nla_put_failure; 2151 2152 if (p->erspan_ver == 1) { 2153 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) 2154 goto nla_put_failure; 2155 } else { 2156 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) 2157 goto nla_put_failure; 2158 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) 2159 goto nla_put_failure; 2160 } 2161 } 2162 2163 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2164 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2165 gre_tnl_flags_to_gre_flags(p->i_flags)) || 2166 nla_put_be16(skb, IFLA_GRE_OFLAGS, 2167 gre_tnl_flags_to_gre_flags(o_flags)) || 2168 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 2169 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 2170 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || 2171 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) || 2172 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || 2173 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || 2174 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || 2175 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || 2176 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark)) 2177 goto nla_put_failure; 2178 2179 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 2180 t->encap.type) || 2181 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT, 2182 t->encap.sport) || 2183 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT, 2184 t->encap.dport) || 2185 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, 2186 t->encap.flags)) 2187 goto nla_put_failure; 2188 2189 if (p->collect_md) { 2190 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) 2191 goto nla_put_failure; 2192 } 2193 2194 return 0; 2195 2196 nla_put_failure: 2197 return -EMSGSIZE; 2198 } 2199 2200 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = { 2201 [IFLA_GRE_LINK] = { .type = NLA_U32 }, 2202 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, 2203 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 2204 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 2205 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 2206 [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct ipv6hdr, saddr) }, 2207 [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct ipv6hdr, daddr) }, 2208 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 2209 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 }, 2210 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 }, 2211 [IFLA_GRE_FLAGS] = { .type = NLA_U32 }, 2212 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, 2213 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, 2214 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, 2215 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, 2216 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, 2217 [IFLA_GRE_FWMARK] = { .type = NLA_U32 }, 2218 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 }, 2219 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 }, 2220 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 }, 2221 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 }, 2222 }; 2223 2224 static void ip6erspan_tap_setup(struct net_device *dev) 2225 { 2226 ether_setup(dev); 2227 2228 dev->max_mtu = 0; 2229 dev->netdev_ops = &ip6erspan_netdev_ops; 2230 dev->needs_free_netdev = true; 2231 dev->priv_destructor = ip6gre_dev_free; 2232 2233 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2234 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 2235 netif_keep_dst(dev); 2236 } 2237 2238 static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, 2239 struct nlattr *tb[], struct nlattr *data[], 2240 struct netlink_ext_ack *extack) 2241 { 2242 struct ip6_tnl *nt = netdev_priv(dev); 2243 struct net *net = dev_net(dev); 2244 struct ip6gre_net *ign; 2245 int err; 2246 2247 ip6gre_netlink_parms(data, &nt->parms); 2248 ip6erspan_set_version(data, &nt->parms); 2249 ign = net_generic(net, ip6gre_net_id); 2250 2251 if (nt->parms.collect_md) { 2252 if (rtnl_dereference(ign->collect_md_tun_erspan)) 2253 return -EEXIST; 2254 } else { 2255 if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) 2256 return -EEXIST; 2257 } 2258 2259 err = ip6gre_newlink_common(src_net, dev, tb, data, extack); 2260 if (!err) { 2261 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]); 2262 ip6erspan_tunnel_link_md(ign, nt); 2263 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); 2264 } 2265 return err; 2266 } 2267 2268 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu) 2269 { 2270 ip6gre_tnl_link_config_common(t); 2271 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t)); 2272 } 2273 2274 static int ip6erspan_tnl_change(struct ip6_tnl *t, 2275 const struct __ip6_tnl_parm *p, int set_mtu) 2276 { 2277 ip6gre_tnl_copy_tnl_parm(t, p); 2278 ip6erspan_tnl_link_config(t, set_mtu); 2279 return 0; 2280 } 2281 2282 static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], 2283 struct nlattr *data[], 2284 struct netlink_ext_ack *extack) 2285 { 2286 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); 2287 struct __ip6_tnl_parm p; 2288 struct ip6_tnl *t; 2289 2290 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2291 if (IS_ERR(t)) 2292 return PTR_ERR(t); 2293 2294 ip6erspan_set_version(data, &p); 2295 ip6gre_tunnel_unlink_md(ign, t); 2296 ip6gre_tunnel_unlink(ign, t); 2297 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); 2298 ip6erspan_tunnel_link_md(ign, t); 2299 ip6gre_tunnel_link(ign, t); 2300 return 0; 2301 } 2302 2303 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { 2304 .kind = "ip6gre", 2305 .maxtype = IFLA_GRE_MAX, 2306 .policy = ip6gre_policy, 2307 .priv_size = sizeof(struct ip6_tnl), 2308 .setup = ip6gre_tunnel_setup, 2309 .validate = ip6gre_tunnel_validate, 2310 .newlink = ip6gre_newlink, 2311 .changelink = ip6gre_changelink, 2312 .dellink = ip6gre_dellink, 2313 .get_size = ip6gre_get_size, 2314 .fill_info = ip6gre_fill_info, 2315 .get_link_net = ip6_tnl_get_link_net, 2316 }; 2317 2318 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = { 2319 .kind = "ip6gretap", 2320 .maxtype = IFLA_GRE_MAX, 2321 .policy = ip6gre_policy, 2322 .priv_size = sizeof(struct ip6_tnl), 2323 .setup = ip6gre_tap_setup, 2324 .validate = ip6gre_tap_validate, 2325 .newlink = ip6gre_newlink, 2326 .changelink = ip6gre_changelink, 2327 .get_size = ip6gre_get_size, 2328 .fill_info = ip6gre_fill_info, 2329 .get_link_net = ip6_tnl_get_link_net, 2330 }; 2331 2332 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = { 2333 .kind = "ip6erspan", 2334 .maxtype = IFLA_GRE_MAX, 2335 .policy = ip6gre_policy, 2336 .priv_size = sizeof(struct ip6_tnl), 2337 .setup = ip6erspan_tap_setup, 2338 .validate = ip6erspan_tap_validate, 2339 .newlink = ip6erspan_newlink, 2340 .changelink = ip6erspan_changelink, 2341 .get_size = ip6gre_get_size, 2342 .fill_info = ip6gre_fill_info, 2343 .get_link_net = ip6_tnl_get_link_net, 2344 }; 2345 2346 /* 2347 * And now the modules code and kernel interface. 2348 */ 2349 2350 static int __init ip6gre_init(void) 2351 { 2352 int err; 2353 2354 pr_info("GRE over IPv6 tunneling driver\n"); 2355 2356 err = register_pernet_device(&ip6gre_net_ops); 2357 if (err < 0) 2358 return err; 2359 2360 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE); 2361 if (err < 0) { 2362 pr_info("%s: can't add protocol\n", __func__); 2363 goto add_proto_failed; 2364 } 2365 2366 err = rtnl_link_register(&ip6gre_link_ops); 2367 if (err < 0) 2368 goto rtnl_link_failed; 2369 2370 err = rtnl_link_register(&ip6gre_tap_ops); 2371 if (err < 0) 2372 goto tap_ops_failed; 2373 2374 err = rtnl_link_register(&ip6erspan_tap_ops); 2375 if (err < 0) 2376 goto erspan_link_failed; 2377 2378 out: 2379 return err; 2380 2381 erspan_link_failed: 2382 rtnl_link_unregister(&ip6gre_tap_ops); 2383 tap_ops_failed: 2384 rtnl_link_unregister(&ip6gre_link_ops); 2385 rtnl_link_failed: 2386 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); 2387 add_proto_failed: 2388 unregister_pernet_device(&ip6gre_net_ops); 2389 goto out; 2390 } 2391 2392 static void __exit ip6gre_fini(void) 2393 { 2394 rtnl_link_unregister(&ip6gre_tap_ops); 2395 rtnl_link_unregister(&ip6gre_link_ops); 2396 rtnl_link_unregister(&ip6erspan_tap_ops); 2397 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); 2398 unregister_pernet_device(&ip6gre_net_ops); 2399 } 2400 2401 module_init(ip6gre_init); 2402 module_exit(ip6gre_fini); 2403 MODULE_LICENSE("GPL"); 2404 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); 2405 MODULE_DESCRIPTION("GRE over IPv6 tunneling device"); 2406 MODULE_ALIAS_RTNL_LINK("ip6gre"); 2407 MODULE_ALIAS_RTNL_LINK("ip6gretap"); 2408 MODULE_ALIAS_RTNL_LINK("ip6erspan"); 2409 MODULE_ALIAS_NETDEV("ip6gre0"); 2410