1 /* 2 * Linux NET3: GRE over IP protocol decoder. 3 * 4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/capability.h> 16 #include <linux/module.h> 17 #include <linux/types.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <asm/uaccess.h> 21 #include <linux/skbuff.h> 22 #include <linux/netdevice.h> 23 #include <linux/in.h> 24 #include <linux/tcp.h> 25 #include <linux/udp.h> 26 #include <linux/if_arp.h> 27 #include <linux/mroute.h> 28 #include <linux/init.h> 29 #include <linux/in6.h> 30 #include <linux/inetdevice.h> 31 #include <linux/igmp.h> 32 #include <linux/netfilter_ipv4.h> 33 #include <linux/etherdevice.h> 34 #include <linux/if_ether.h> 35 36 #include <net/sock.h> 37 #include <net/ip.h> 38 #include <net/icmp.h> 39 #include <net/protocol.h> 40 #include <net/ipip.h> 41 #include <net/arp.h> 42 #include <net/checksum.h> 43 #include <net/dsfield.h> 44 #include <net/inet_ecn.h> 45 #include <net/xfrm.h> 46 #include <net/net_namespace.h> 47 #include <net/netns/generic.h> 48 #include <net/rtnetlink.h> 49 #include <net/gre.h> 50 51 #if IS_ENABLED(CONFIG_IPV6) 52 #include <net/ipv6.h> 53 #include <net/ip6_fib.h> 54 #include <net/ip6_route.h> 55 #endif 56 57 /* 58 Problems & solutions 59 -------------------- 60 61 1. The most important issue is detecting local dead loops. 62 They would cause complete host lockup in transmit, which 63 would be "resolved" by stack overflow or, if queueing is enabled, 64 with infinite looping in net_bh. 65 66 We cannot track such dead loops during route installation, 67 it is infeasible task. The most general solutions would be 68 to keep skb->encapsulation counter (sort of local ttl), 69 and silently drop packet when it expires. It is a good 70 solution, but it supposes maintaining new variable in ALL 71 skb, even if no tunneling is used. 72 73 Current solution: xmit_recursion breaks dead loops. This is a percpu 74 counter, since when we enter the first ndo_xmit(), cpu migration is 75 forbidden. We force an exit if this counter reaches RECURSION_LIMIT 76 77 2. Networking dead loops would not kill routers, but would really 78 kill network. IP hop limit plays role of "t->recursion" in this case, 79 if we copy it from packet being encapsulated to upper header. 80 It is very good solution, but it introduces two problems: 81 82 - Routing protocols, using packets with ttl=1 (OSPF, RIP2), 83 do not work over tunnels. 84 - traceroute does not work. I planned to relay ICMP from tunnel, 85 so that this problem would be solved and traceroute output 86 would even more informative. This idea appeared to be wrong: 87 only Linux complies to rfc1812 now (yes, guys, Linux is the only 88 true router now :-)), all routers (at least, in neighbourhood of mine) 89 return only 8 bytes of payload. It is the end. 90 91 Hence, if we want that OSPF worked or traceroute said something reasonable, 92 we should search for another solution. 93 94 One of them is to parse packet trying to detect inner encapsulation 95 made by our node. It is difficult or even impossible, especially, 96 taking into account fragmentation. TO be short, ttl is not solution at all. 97 98 Current solution: The solution was UNEXPECTEDLY SIMPLE. 99 We force DF flag on tunnels with preconfigured hop limit, 100 that is ALL. :-) Well, it does not remove the problem completely, 101 but exponential growth of network traffic is changed to linear 102 (branches, that exceed pmtu are pruned) and tunnel mtu 103 rapidly degrades to value <68, where looping stops. 104 Yes, it is not good if there exists a router in the loop, 105 which does not force DF, even when encapsulating packets have DF set. 106 But it is not our problem! Nobody could accuse us, we made 107 all that we could make. Even if it is your gated who injected 108 fatal route to network, even if it were you who configured 109 fatal static route: you are innocent. :-) 110 111 112 113 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain 114 practically identical code. It would be good to glue them 115 together, but it is not very evident, how to make them modular. 116 sit is integral part of IPv6, ipip and gre are naturally modular. 117 We could extract common parts (hash table, ioctl etc) 118 to a separate module (ip_tunnel.c). 119 120 Alexey Kuznetsov. 121 */ 122 123 static bool log_ecn_error = true; 124 module_param(log_ecn_error, bool, 0644); 125 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 126 127 static struct rtnl_link_ops ipgre_link_ops __read_mostly; 128 static int ipgre_tunnel_init(struct net_device *dev); 129 static void ipgre_tunnel_setup(struct net_device *dev); 130 static int ipgre_tunnel_bind_dev(struct net_device *dev); 131 132 /* Fallback tunnel: no source, no destination, no key, no options */ 133 134 #define HASH_SIZE 16 135 136 static int ipgre_net_id __read_mostly; 137 struct ipgre_net { 138 struct ip_tunnel __rcu *tunnels[4][HASH_SIZE]; 139 140 struct net_device *fb_tunnel_dev; 141 }; 142 143 /* Tunnel hash table */ 144 145 /* 146 4 hash tables: 147 148 3: (remote,local) 149 2: (remote,*) 150 1: (*,local) 151 0: (*,*) 152 153 We require exact key match i.e. if a key is present in packet 154 it will match only tunnel with the same key; if it is not present, 155 it will match only keyless tunnel. 156 157 All keysless packets, if not matched configured keyless tunnels 158 will match fallback tunnel. 159 */ 160 161 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 162 163 #define tunnels_r_l tunnels[3] 164 #define tunnels_r tunnels[2] 165 #define tunnels_l tunnels[1] 166 #define tunnels_wc tunnels[0] 167 168 static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev, 169 struct rtnl_link_stats64 *tot) 170 { 171 int i; 172 173 for_each_possible_cpu(i) { 174 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 175 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 176 unsigned int start; 177 178 do { 179 start = u64_stats_fetch_begin_bh(&tstats->syncp); 180 rx_packets = tstats->rx_packets; 181 tx_packets = tstats->tx_packets; 182 rx_bytes = tstats->rx_bytes; 183 tx_bytes = tstats->tx_bytes; 184 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); 185 186 tot->rx_packets += rx_packets; 187 tot->tx_packets += tx_packets; 188 tot->rx_bytes += rx_bytes; 189 tot->tx_bytes += tx_bytes; 190 } 191 192 tot->multicast = dev->stats.multicast; 193 tot->rx_crc_errors = dev->stats.rx_crc_errors; 194 tot->rx_fifo_errors = dev->stats.rx_fifo_errors; 195 tot->rx_length_errors = dev->stats.rx_length_errors; 196 tot->rx_frame_errors = dev->stats.rx_frame_errors; 197 tot->rx_errors = dev->stats.rx_errors; 198 199 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 200 tot->tx_carrier_errors = dev->stats.tx_carrier_errors; 201 tot->tx_dropped = dev->stats.tx_dropped; 202 tot->tx_aborted_errors = dev->stats.tx_aborted_errors; 203 tot->tx_errors = dev->stats.tx_errors; 204 205 return tot; 206 } 207 208 /* Does key in tunnel parameters match packet */ 209 static bool ipgre_key_match(const struct ip_tunnel_parm *p, 210 __be16 flags, __be32 key) 211 { 212 if (p->i_flags & GRE_KEY) { 213 if (flags & GRE_KEY) 214 return key == p->i_key; 215 else 216 return false; /* key expected, none present */ 217 } else 218 return !(flags & GRE_KEY); 219 } 220 221 /* Given src, dst and key, find appropriate for input tunnel. */ 222 223 static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev, 224 __be32 remote, __be32 local, 225 __be16 flags, __be32 key, 226 __be16 gre_proto) 227 { 228 struct net *net = dev_net(dev); 229 int link = dev->ifindex; 230 unsigned int h0 = HASH(remote); 231 unsigned int h1 = HASH(key); 232 struct ip_tunnel *t, *cand = NULL; 233 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 234 int dev_type = (gre_proto == htons(ETH_P_TEB)) ? 235 ARPHRD_ETHER : ARPHRD_IPGRE; 236 int score, cand_score = 4; 237 238 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { 239 if (local != t->parms.iph.saddr || 240 remote != t->parms.iph.daddr || 241 !(t->dev->flags & IFF_UP)) 242 continue; 243 244 if (!ipgre_key_match(&t->parms, flags, key)) 245 continue; 246 247 if (t->dev->type != ARPHRD_IPGRE && 248 t->dev->type != dev_type) 249 continue; 250 251 score = 0; 252 if (t->parms.link != link) 253 score |= 1; 254 if (t->dev->type != dev_type) 255 score |= 2; 256 if (score == 0) 257 return t; 258 259 if (score < cand_score) { 260 cand = t; 261 cand_score = score; 262 } 263 } 264 265 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) { 266 if (remote != t->parms.iph.daddr || 267 !(t->dev->flags & IFF_UP)) 268 continue; 269 270 if (!ipgre_key_match(&t->parms, flags, key)) 271 continue; 272 273 if (t->dev->type != ARPHRD_IPGRE && 274 t->dev->type != dev_type) 275 continue; 276 277 score = 0; 278 if (t->parms.link != link) 279 score |= 1; 280 if (t->dev->type != dev_type) 281 score |= 2; 282 if (score == 0) 283 return t; 284 285 if (score < cand_score) { 286 cand = t; 287 cand_score = score; 288 } 289 } 290 291 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) { 292 if ((local != t->parms.iph.saddr && 293 (local != t->parms.iph.daddr || 294 !ipv4_is_multicast(local))) || 295 !(t->dev->flags & IFF_UP)) 296 continue; 297 298 if (!ipgre_key_match(&t->parms, flags, key)) 299 continue; 300 301 if (t->dev->type != ARPHRD_IPGRE && 302 t->dev->type != dev_type) 303 continue; 304 305 score = 0; 306 if (t->parms.link != link) 307 score |= 1; 308 if (t->dev->type != dev_type) 309 score |= 2; 310 if (score == 0) 311 return t; 312 313 if (score < cand_score) { 314 cand = t; 315 cand_score = score; 316 } 317 } 318 319 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) { 320 if (t->parms.i_key != key || 321 !(t->dev->flags & IFF_UP)) 322 continue; 323 324 if (t->dev->type != ARPHRD_IPGRE && 325 t->dev->type != dev_type) 326 continue; 327 328 score = 0; 329 if (t->parms.link != link) 330 score |= 1; 331 if (t->dev->type != dev_type) 332 score |= 2; 333 if (score == 0) 334 return t; 335 336 if (score < cand_score) { 337 cand = t; 338 cand_score = score; 339 } 340 } 341 342 if (cand != NULL) 343 return cand; 344 345 dev = ign->fb_tunnel_dev; 346 if (dev->flags & IFF_UP) 347 return netdev_priv(dev); 348 349 return NULL; 350 } 351 352 static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign, 353 struct ip_tunnel_parm *parms) 354 { 355 __be32 remote = parms->iph.daddr; 356 __be32 local = parms->iph.saddr; 357 __be32 key = parms->i_key; 358 unsigned int h = HASH(key); 359 int prio = 0; 360 361 if (local) 362 prio |= 1; 363 if (remote && !ipv4_is_multicast(remote)) { 364 prio |= 2; 365 h ^= HASH(remote); 366 } 367 368 return &ign->tunnels[prio][h]; 369 } 370 371 static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign, 372 struct ip_tunnel *t) 373 { 374 return __ipgre_bucket(ign, &t->parms); 375 } 376 377 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t) 378 { 379 struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t); 380 381 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 382 rcu_assign_pointer(*tp, t); 383 } 384 385 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t) 386 { 387 struct ip_tunnel __rcu **tp; 388 struct ip_tunnel *iter; 389 390 for (tp = ipgre_bucket(ign, t); 391 (iter = rtnl_dereference(*tp)) != NULL; 392 tp = &iter->next) { 393 if (t == iter) { 394 rcu_assign_pointer(*tp, t->next); 395 break; 396 } 397 } 398 } 399 400 static struct ip_tunnel *ipgre_tunnel_find(struct net *net, 401 struct ip_tunnel_parm *parms, 402 int type) 403 { 404 __be32 remote = parms->iph.daddr; 405 __be32 local = parms->iph.saddr; 406 __be32 key = parms->i_key; 407 int link = parms->link; 408 struct ip_tunnel *t; 409 struct ip_tunnel __rcu **tp; 410 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 411 412 for (tp = __ipgre_bucket(ign, parms); 413 (t = rtnl_dereference(*tp)) != NULL; 414 tp = &t->next) 415 if (local == t->parms.iph.saddr && 416 remote == t->parms.iph.daddr && 417 key == t->parms.i_key && 418 link == t->parms.link && 419 type == t->dev->type) 420 break; 421 422 return t; 423 } 424 425 static struct ip_tunnel *ipgre_tunnel_locate(struct net *net, 426 struct ip_tunnel_parm *parms, int create) 427 { 428 struct ip_tunnel *t, *nt; 429 struct net_device *dev; 430 char name[IFNAMSIZ]; 431 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 432 433 t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE); 434 if (t || !create) 435 return t; 436 437 if (parms->name[0]) 438 strlcpy(name, parms->name, IFNAMSIZ); 439 else 440 strcpy(name, "gre%d"); 441 442 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup); 443 if (!dev) 444 return NULL; 445 446 dev_net_set(dev, net); 447 448 nt = netdev_priv(dev); 449 nt->parms = *parms; 450 dev->rtnl_link_ops = &ipgre_link_ops; 451 452 dev->mtu = ipgre_tunnel_bind_dev(dev); 453 454 if (register_netdevice(dev) < 0) 455 goto failed_free; 456 457 /* Can use a lockless transmit, unless we generate output sequences */ 458 if (!(nt->parms.o_flags & GRE_SEQ)) 459 dev->features |= NETIF_F_LLTX; 460 461 dev_hold(dev); 462 ipgre_tunnel_link(ign, nt); 463 return nt; 464 465 failed_free: 466 free_netdev(dev); 467 return NULL; 468 } 469 470 static void ipgre_tunnel_uninit(struct net_device *dev) 471 { 472 struct net *net = dev_net(dev); 473 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 474 475 ipgre_tunnel_unlink(ign, netdev_priv(dev)); 476 dev_put(dev); 477 } 478 479 480 static void ipgre_err(struct sk_buff *skb, u32 info) 481 { 482 483 /* All the routers (except for Linux) return only 484 8 bytes of packet payload. It means, that precise relaying of 485 ICMP in the real Internet is absolutely infeasible. 486 487 Moreover, Cisco "wise men" put GRE key to the third word 488 in GRE header. It makes impossible maintaining even soft state for keyed 489 GRE tunnels with enabled checksum. Tell them "thank you". 490 491 Well, I wonder, rfc1812 was written by Cisco employee, 492 what the hell these idiots break standards established 493 by themselves??? 494 */ 495 496 const struct iphdr *iph = (const struct iphdr *)skb->data; 497 __be16 *p = (__be16 *)(skb->data+(iph->ihl<<2)); 498 int grehlen = (iph->ihl<<2) + 4; 499 const int type = icmp_hdr(skb)->type; 500 const int code = icmp_hdr(skb)->code; 501 struct ip_tunnel *t; 502 __be16 flags; 503 __be32 key = 0; 504 505 flags = p[0]; 506 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { 507 if (flags&(GRE_VERSION|GRE_ROUTING)) 508 return; 509 if (flags&GRE_KEY) { 510 grehlen += 4; 511 if (flags&GRE_CSUM) 512 grehlen += 4; 513 } 514 } 515 516 /* If only 8 bytes returned, keyed message will be dropped here */ 517 if (skb_headlen(skb) < grehlen) 518 return; 519 520 if (flags & GRE_KEY) 521 key = *(((__be32 *)p) + (grehlen / 4) - 1); 522 523 switch (type) { 524 default: 525 case ICMP_PARAMETERPROB: 526 return; 527 528 case ICMP_DEST_UNREACH: 529 switch (code) { 530 case ICMP_SR_FAILED: 531 case ICMP_PORT_UNREACH: 532 /* Impossible event. */ 533 return; 534 default: 535 /* All others are translated to HOST_UNREACH. 536 rfc2003 contains "deep thoughts" about NET_UNREACH, 537 I believe they are just ether pollution. --ANK 538 */ 539 break; 540 } 541 break; 542 case ICMP_TIME_EXCEEDED: 543 if (code != ICMP_EXC_TTL) 544 return; 545 break; 546 547 case ICMP_REDIRECT: 548 break; 549 } 550 551 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr, 552 flags, key, p[1]); 553 554 if (t == NULL) 555 return; 556 557 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 558 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 559 t->parms.link, 0, IPPROTO_GRE, 0); 560 return; 561 } 562 if (type == ICMP_REDIRECT) { 563 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, 564 IPPROTO_GRE, 0); 565 return; 566 } 567 if (t->parms.iph.daddr == 0 || 568 ipv4_is_multicast(t->parms.iph.daddr)) 569 return; 570 571 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 572 return; 573 574 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) 575 t->err_count++; 576 else 577 t->err_count = 1; 578 t->err_time = jiffies; 579 } 580 581 static inline u8 582 ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb) 583 { 584 u8 inner = 0; 585 if (skb->protocol == htons(ETH_P_IP)) 586 inner = old_iph->tos; 587 else if (skb->protocol == htons(ETH_P_IPV6)) 588 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph); 589 return INET_ECN_encapsulate(tos, inner); 590 } 591 592 static int ipgre_rcv(struct sk_buff *skb) 593 { 594 const struct iphdr *iph; 595 u8 *h; 596 __be16 flags; 597 __sum16 csum = 0; 598 __be32 key = 0; 599 u32 seqno = 0; 600 struct ip_tunnel *tunnel; 601 int offset = 4; 602 __be16 gre_proto; 603 int err; 604 605 if (!pskb_may_pull(skb, 16)) 606 goto drop; 607 608 iph = ip_hdr(skb); 609 h = skb->data; 610 flags = *(__be16 *)h; 611 612 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { 613 /* - Version must be 0. 614 - We do not support routing headers. 615 */ 616 if (flags&(GRE_VERSION|GRE_ROUTING)) 617 goto drop; 618 619 if (flags&GRE_CSUM) { 620 switch (skb->ip_summed) { 621 case CHECKSUM_COMPLETE: 622 csum = csum_fold(skb->csum); 623 if (!csum) 624 break; 625 /* fall through */ 626 case CHECKSUM_NONE: 627 skb->csum = 0; 628 csum = __skb_checksum_complete(skb); 629 skb->ip_summed = CHECKSUM_COMPLETE; 630 } 631 offset += 4; 632 } 633 if (flags&GRE_KEY) { 634 key = *(__be32 *)(h + offset); 635 offset += 4; 636 } 637 if (flags&GRE_SEQ) { 638 seqno = ntohl(*(__be32 *)(h + offset)); 639 offset += 4; 640 } 641 } 642 643 gre_proto = *(__be16 *)(h + 2); 644 645 tunnel = ipgre_tunnel_lookup(skb->dev, 646 iph->saddr, iph->daddr, flags, key, 647 gre_proto); 648 if (tunnel) { 649 struct pcpu_tstats *tstats; 650 651 secpath_reset(skb); 652 653 skb->protocol = gre_proto; 654 /* WCCP version 1 and 2 protocol decoding. 655 * - Change protocol to IP 656 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header 657 */ 658 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { 659 skb->protocol = htons(ETH_P_IP); 660 if ((*(h + offset) & 0xF0) != 0x40) 661 offset += 4; 662 } 663 664 skb->mac_header = skb->network_header; 665 __pskb_pull(skb, offset); 666 skb_postpull_rcsum(skb, skb_transport_header(skb), offset); 667 skb->pkt_type = PACKET_HOST; 668 #ifdef CONFIG_NET_IPGRE_BROADCAST 669 if (ipv4_is_multicast(iph->daddr)) { 670 /* Looped back packet, drop it! */ 671 if (rt_is_output_route(skb_rtable(skb))) 672 goto drop; 673 tunnel->dev->stats.multicast++; 674 skb->pkt_type = PACKET_BROADCAST; 675 } 676 #endif 677 678 if (((flags&GRE_CSUM) && csum) || 679 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { 680 tunnel->dev->stats.rx_crc_errors++; 681 tunnel->dev->stats.rx_errors++; 682 goto drop; 683 } 684 if (tunnel->parms.i_flags&GRE_SEQ) { 685 if (!(flags&GRE_SEQ) || 686 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) { 687 tunnel->dev->stats.rx_fifo_errors++; 688 tunnel->dev->stats.rx_errors++; 689 goto drop; 690 } 691 tunnel->i_seqno = seqno + 1; 692 } 693 694 /* Warning: All skb pointers will be invalidated! */ 695 if (tunnel->dev->type == ARPHRD_ETHER) { 696 if (!pskb_may_pull(skb, ETH_HLEN)) { 697 tunnel->dev->stats.rx_length_errors++; 698 tunnel->dev->stats.rx_errors++; 699 goto drop; 700 } 701 702 iph = ip_hdr(skb); 703 skb->protocol = eth_type_trans(skb, tunnel->dev); 704 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 705 } 706 707 __skb_tunnel_rx(skb, tunnel->dev); 708 709 skb_reset_network_header(skb); 710 err = IP_ECN_decapsulate(iph, skb); 711 if (unlikely(err)) { 712 if (log_ecn_error) 713 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 714 &iph->saddr, iph->tos); 715 if (err > 1) { 716 ++tunnel->dev->stats.rx_frame_errors; 717 ++tunnel->dev->stats.rx_errors; 718 goto drop; 719 } 720 } 721 722 tstats = this_cpu_ptr(tunnel->dev->tstats); 723 u64_stats_update_begin(&tstats->syncp); 724 tstats->rx_packets++; 725 tstats->rx_bytes += skb->len; 726 u64_stats_update_end(&tstats->syncp); 727 728 gro_cells_receive(&tunnel->gro_cells, skb); 729 return 0; 730 } 731 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 732 733 drop: 734 kfree_skb(skb); 735 return 0; 736 } 737 738 static struct sk_buff *handle_offloads(struct sk_buff *skb) 739 { 740 int err; 741 742 if (skb_is_gso(skb)) { 743 err = skb_unclone(skb, GFP_ATOMIC); 744 if (unlikely(err)) 745 goto error; 746 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; 747 return skb; 748 } 749 if (skb->ip_summed != CHECKSUM_PARTIAL) 750 skb->ip_summed = CHECKSUM_NONE; 751 752 return skb; 753 754 error: 755 kfree_skb(skb); 756 return ERR_PTR(err); 757 } 758 759 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 760 { 761 struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats); 762 struct ip_tunnel *tunnel = netdev_priv(dev); 763 const struct iphdr *old_iph; 764 const struct iphdr *tiph; 765 struct flowi4 fl4; 766 u8 tos; 767 __be16 df; 768 struct rtable *rt; /* Route to the other host */ 769 struct net_device *tdev; /* Device to other host */ 770 struct iphdr *iph; /* Our new IP header */ 771 unsigned int max_headroom; /* The extra header space needed */ 772 int gre_hlen; 773 __be32 dst; 774 int mtu; 775 u8 ttl; 776 int err; 777 int pkt_len; 778 779 skb = handle_offloads(skb); 780 if (IS_ERR(skb)) { 781 dev->stats.tx_dropped++; 782 return NETDEV_TX_OK; 783 } 784 785 if (!skb->encapsulation) { 786 skb_reset_inner_headers(skb); 787 skb->encapsulation = 1; 788 } 789 790 old_iph = ip_hdr(skb); 791 792 if (dev->type == ARPHRD_ETHER) 793 IPCB(skb)->flags = 0; 794 795 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 796 gre_hlen = 0; 797 if (skb->protocol == htons(ETH_P_IP)) 798 tiph = (const struct iphdr *)skb->data; 799 else 800 tiph = &tunnel->parms.iph; 801 } else { 802 gre_hlen = tunnel->hlen; 803 tiph = &tunnel->parms.iph; 804 } 805 806 if ((dst = tiph->daddr) == 0) { 807 /* NBMA tunnel */ 808 809 if (skb_dst(skb) == NULL) { 810 dev->stats.tx_fifo_errors++; 811 goto tx_error; 812 } 813 814 if (skb->protocol == htons(ETH_P_IP)) { 815 rt = skb_rtable(skb); 816 dst = rt_nexthop(rt, old_iph->daddr); 817 } 818 #if IS_ENABLED(CONFIG_IPV6) 819 else if (skb->protocol == htons(ETH_P_IPV6)) { 820 const struct in6_addr *addr6; 821 struct neighbour *neigh; 822 bool do_tx_error_icmp; 823 int addr_type; 824 825 neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr); 826 if (neigh == NULL) 827 goto tx_error; 828 829 addr6 = (const struct in6_addr *)&neigh->primary_key; 830 addr_type = ipv6_addr_type(addr6); 831 832 if (addr_type == IPV6_ADDR_ANY) { 833 addr6 = &ipv6_hdr(skb)->daddr; 834 addr_type = ipv6_addr_type(addr6); 835 } 836 837 if ((addr_type & IPV6_ADDR_COMPATv4) == 0) 838 do_tx_error_icmp = true; 839 else { 840 do_tx_error_icmp = false; 841 dst = addr6->s6_addr32[3]; 842 } 843 neigh_release(neigh); 844 if (do_tx_error_icmp) 845 goto tx_error_icmp; 846 } 847 #endif 848 else 849 goto tx_error; 850 } 851 852 ttl = tiph->ttl; 853 tos = tiph->tos; 854 if (tos & 0x1) { 855 tos &= ~0x1; 856 if (skb->protocol == htons(ETH_P_IP)) 857 tos = old_iph->tos; 858 else if (skb->protocol == htons(ETH_P_IPV6)) 859 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph); 860 } 861 862 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr, 863 tunnel->parms.o_key, RT_TOS(tos), 864 tunnel->parms.link); 865 if (IS_ERR(rt)) { 866 dev->stats.tx_carrier_errors++; 867 goto tx_error; 868 } 869 tdev = rt->dst.dev; 870 871 if (tdev == dev) { 872 ip_rt_put(rt); 873 dev->stats.collisions++; 874 goto tx_error; 875 } 876 877 df = tiph->frag_off; 878 if (df) 879 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen; 880 else 881 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 882 883 if (skb_dst(skb)) 884 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 885 886 if (skb->protocol == htons(ETH_P_IP)) { 887 df |= (old_iph->frag_off&htons(IP_DF)); 888 889 if (!skb_is_gso(skb) && 890 (old_iph->frag_off&htons(IP_DF)) && 891 mtu < ntohs(old_iph->tot_len)) { 892 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 893 ip_rt_put(rt); 894 goto tx_error; 895 } 896 } 897 #if IS_ENABLED(CONFIG_IPV6) 898 else if (skb->protocol == htons(ETH_P_IPV6)) { 899 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); 900 901 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) { 902 if ((tunnel->parms.iph.daddr && 903 !ipv4_is_multicast(tunnel->parms.iph.daddr)) || 904 rt6->rt6i_dst.plen == 128) { 905 rt6->rt6i_flags |= RTF_MODIFIED; 906 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu); 907 } 908 } 909 910 if (!skb_is_gso(skb) && 911 mtu >= IPV6_MIN_MTU && 912 mtu < skb->len - tunnel->hlen + gre_hlen) { 913 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 914 ip_rt_put(rt); 915 goto tx_error; 916 } 917 } 918 #endif 919 920 if (tunnel->err_count > 0) { 921 if (time_before(jiffies, 922 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { 923 tunnel->err_count--; 924 925 dst_link_failure(skb); 926 } else 927 tunnel->err_count = 0; 928 } 929 930 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len; 931 932 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 933 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 934 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 935 if (max_headroom > dev->needed_headroom) 936 dev->needed_headroom = max_headroom; 937 if (!new_skb) { 938 ip_rt_put(rt); 939 dev->stats.tx_dropped++; 940 dev_kfree_skb(skb); 941 return NETDEV_TX_OK; 942 } 943 if (skb->sk) 944 skb_set_owner_w(new_skb, skb->sk); 945 dev_kfree_skb(skb); 946 skb = new_skb; 947 old_iph = ip_hdr(skb); 948 /* Warning : tiph value might point to freed memory */ 949 } 950 951 skb_push(skb, gre_hlen); 952 skb_reset_network_header(skb); 953 skb_set_transport_header(skb, sizeof(*iph)); 954 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 955 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 956 IPSKB_REROUTED); 957 skb_dst_drop(skb); 958 skb_dst_set(skb, &rt->dst); 959 960 /* 961 * Push down and install the IPIP header. 962 */ 963 964 iph = ip_hdr(skb); 965 iph->version = 4; 966 iph->ihl = sizeof(struct iphdr) >> 2; 967 iph->frag_off = df; 968 iph->protocol = IPPROTO_GRE; 969 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb); 970 iph->daddr = fl4.daddr; 971 iph->saddr = fl4.saddr; 972 iph->ttl = ttl; 973 iph->id = 0; 974 975 if (ttl == 0) { 976 if (skb->protocol == htons(ETH_P_IP)) 977 iph->ttl = old_iph->ttl; 978 #if IS_ENABLED(CONFIG_IPV6) 979 else if (skb->protocol == htons(ETH_P_IPV6)) 980 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit; 981 #endif 982 else 983 iph->ttl = ip4_dst_hoplimit(&rt->dst); 984 } 985 986 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags; 987 ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ? 988 htons(ETH_P_TEB) : skb->protocol; 989 990 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { 991 __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4); 992 993 if (tunnel->parms.o_flags&GRE_SEQ) { 994 ++tunnel->o_seqno; 995 *ptr = htonl(tunnel->o_seqno); 996 ptr--; 997 } 998 if (tunnel->parms.o_flags&GRE_KEY) { 999 *ptr = tunnel->parms.o_key; 1000 ptr--; 1001 } 1002 /* Skip GRE checksum if skb is getting offloaded. */ 1003 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE) && 1004 (tunnel->parms.o_flags&GRE_CSUM)) { 1005 int offset = skb_transport_offset(skb); 1006 1007 if (skb_has_shared_frag(skb)) { 1008 err = __skb_linearize(skb); 1009 if (err) 1010 goto tx_error; 1011 } 1012 1013 *ptr = 0; 1014 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset, 1015 skb->len - offset, 1016 0)); 1017 } 1018 } 1019 1020 nf_reset(skb); 1021 1022 pkt_len = skb->len - skb_transport_offset(skb); 1023 err = ip_local_out(skb); 1024 if (likely(net_xmit_eval(err) == 0)) { 1025 u64_stats_update_begin(&tstats->syncp); 1026 tstats->tx_bytes += pkt_len; 1027 tstats->tx_packets++; 1028 u64_stats_update_end(&tstats->syncp); 1029 } else { 1030 dev->stats.tx_errors++; 1031 dev->stats.tx_aborted_errors++; 1032 } 1033 return NETDEV_TX_OK; 1034 1035 #if IS_ENABLED(CONFIG_IPV6) 1036 tx_error_icmp: 1037 dst_link_failure(skb); 1038 #endif 1039 tx_error: 1040 dev->stats.tx_errors++; 1041 dev_kfree_skb(skb); 1042 return NETDEV_TX_OK; 1043 } 1044 1045 static int ipgre_tunnel_bind_dev(struct net_device *dev) 1046 { 1047 struct net_device *tdev = NULL; 1048 struct ip_tunnel *tunnel; 1049 const struct iphdr *iph; 1050 int hlen = LL_MAX_HEADER; 1051 int mtu = ETH_DATA_LEN; 1052 int addend = sizeof(struct iphdr) + 4; 1053 1054 tunnel = netdev_priv(dev); 1055 iph = &tunnel->parms.iph; 1056 1057 /* Guess output device to choose reasonable mtu and needed_headroom */ 1058 1059 if (iph->daddr) { 1060 struct flowi4 fl4; 1061 struct rtable *rt; 1062 1063 rt = ip_route_output_gre(dev_net(dev), &fl4, 1064 iph->daddr, iph->saddr, 1065 tunnel->parms.o_key, 1066 RT_TOS(iph->tos), 1067 tunnel->parms.link); 1068 if (!IS_ERR(rt)) { 1069 tdev = rt->dst.dev; 1070 ip_rt_put(rt); 1071 } 1072 1073 if (dev->type != ARPHRD_ETHER) 1074 dev->flags |= IFF_POINTOPOINT; 1075 } 1076 1077 if (!tdev && tunnel->parms.link) 1078 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link); 1079 1080 if (tdev) { 1081 hlen = tdev->hard_header_len + tdev->needed_headroom; 1082 mtu = tdev->mtu; 1083 } 1084 dev->iflink = tunnel->parms.link; 1085 1086 /* Precalculate GRE options length */ 1087 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { 1088 if (tunnel->parms.o_flags&GRE_CSUM) 1089 addend += 4; 1090 if (tunnel->parms.o_flags&GRE_KEY) 1091 addend += 4; 1092 if (tunnel->parms.o_flags&GRE_SEQ) 1093 addend += 4; 1094 } 1095 dev->needed_headroom = addend + hlen; 1096 mtu -= dev->hard_header_len + addend; 1097 1098 if (mtu < 68) 1099 mtu = 68; 1100 1101 tunnel->hlen = addend; 1102 /* TCP offload with GRE SEQ is not supported. */ 1103 if (!(tunnel->parms.o_flags & GRE_SEQ)) { 1104 /* device supports enc gso offload*/ 1105 if (tdev->hw_enc_features & NETIF_F_GRE_GSO) { 1106 dev->features |= NETIF_F_TSO; 1107 dev->hw_features |= NETIF_F_TSO; 1108 } else { 1109 dev->features |= NETIF_F_GSO_SOFTWARE; 1110 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1111 } 1112 } 1113 1114 return mtu; 1115 } 1116 1117 static int 1118 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) 1119 { 1120 int err = 0; 1121 struct ip_tunnel_parm p; 1122 struct ip_tunnel *t; 1123 struct net *net = dev_net(dev); 1124 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1125 1126 switch (cmd) { 1127 case SIOCGETTUNNEL: 1128 t = NULL; 1129 if (dev == ign->fb_tunnel_dev) { 1130 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1131 err = -EFAULT; 1132 break; 1133 } 1134 t = ipgre_tunnel_locate(net, &p, 0); 1135 } 1136 if (t == NULL) 1137 t = netdev_priv(dev); 1138 memcpy(&p, &t->parms, sizeof(p)); 1139 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1140 err = -EFAULT; 1141 break; 1142 1143 case SIOCADDTUNNEL: 1144 case SIOCCHGTUNNEL: 1145 err = -EPERM; 1146 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1147 goto done; 1148 1149 err = -EFAULT; 1150 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1151 goto done; 1152 1153 err = -EINVAL; 1154 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE || 1155 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) || 1156 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))) 1157 goto done; 1158 if (p.iph.ttl) 1159 p.iph.frag_off |= htons(IP_DF); 1160 1161 if (!(p.i_flags&GRE_KEY)) 1162 p.i_key = 0; 1163 if (!(p.o_flags&GRE_KEY)) 1164 p.o_key = 0; 1165 1166 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL); 1167 1168 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 1169 if (t != NULL) { 1170 if (t->dev != dev) { 1171 err = -EEXIST; 1172 break; 1173 } 1174 } else { 1175 unsigned int nflags = 0; 1176 1177 t = netdev_priv(dev); 1178 1179 if (ipv4_is_multicast(p.iph.daddr)) 1180 nflags = IFF_BROADCAST; 1181 else if (p.iph.daddr) 1182 nflags = IFF_POINTOPOINT; 1183 1184 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) { 1185 err = -EINVAL; 1186 break; 1187 } 1188 ipgre_tunnel_unlink(ign, t); 1189 synchronize_net(); 1190 t->parms.iph.saddr = p.iph.saddr; 1191 t->parms.iph.daddr = p.iph.daddr; 1192 t->parms.i_key = p.i_key; 1193 t->parms.o_key = p.o_key; 1194 memcpy(dev->dev_addr, &p.iph.saddr, 4); 1195 memcpy(dev->broadcast, &p.iph.daddr, 4); 1196 ipgre_tunnel_link(ign, t); 1197 netdev_state_change(dev); 1198 } 1199 } 1200 1201 if (t) { 1202 err = 0; 1203 if (cmd == SIOCCHGTUNNEL) { 1204 t->parms.iph.ttl = p.iph.ttl; 1205 t->parms.iph.tos = p.iph.tos; 1206 t->parms.iph.frag_off = p.iph.frag_off; 1207 if (t->parms.link != p.link) { 1208 t->parms.link = p.link; 1209 dev->mtu = ipgre_tunnel_bind_dev(dev); 1210 netdev_state_change(dev); 1211 } 1212 } 1213 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) 1214 err = -EFAULT; 1215 } else 1216 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1217 break; 1218 1219 case SIOCDELTUNNEL: 1220 err = -EPERM; 1221 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1222 goto done; 1223 1224 if (dev == ign->fb_tunnel_dev) { 1225 err = -EFAULT; 1226 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1227 goto done; 1228 err = -ENOENT; 1229 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL) 1230 goto done; 1231 err = -EPERM; 1232 if (t == netdev_priv(ign->fb_tunnel_dev)) 1233 goto done; 1234 dev = t->dev; 1235 } 1236 unregister_netdevice(dev); 1237 err = 0; 1238 break; 1239 1240 default: 1241 err = -EINVAL; 1242 } 1243 1244 done: 1245 return err; 1246 } 1247 1248 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 1249 { 1250 struct ip_tunnel *tunnel = netdev_priv(dev); 1251 if (new_mtu < 68 || 1252 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen) 1253 return -EINVAL; 1254 dev->mtu = new_mtu; 1255 return 0; 1256 } 1257 1258 /* Nice toy. Unfortunately, useless in real life :-) 1259 It allows to construct virtual multiprotocol broadcast "LAN" 1260 over the Internet, provided multicast routing is tuned. 1261 1262 1263 I have no idea was this bicycle invented before me, 1264 so that I had to set ARPHRD_IPGRE to a random value. 1265 I have an impression, that Cisco could make something similar, 1266 but this feature is apparently missing in IOS<=11.2(8). 1267 1268 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks 1269 with broadcast 224.66.66.66. If you have access to mbone, play with me :-) 1270 1271 ping -t 255 224.66.66.66 1272 1273 If nobody answers, mbone does not work. 1274 1275 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255 1276 ip addr add 10.66.66.<somewhat>/24 dev Universe 1277 ifconfig Universe up 1278 ifconfig Universe add fe80::<Your_real_addr>/10 1279 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96 1280 ftp 10.66.66.66 1281 ... 1282 ftp fec0:6666:6666::193.233.7.65 1283 ... 1284 1285 */ 1286 1287 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, 1288 unsigned short type, 1289 const void *daddr, const void *saddr, unsigned int len) 1290 { 1291 struct ip_tunnel *t = netdev_priv(dev); 1292 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); 1293 __be16 *p = (__be16 *)(iph+1); 1294 1295 memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); 1296 p[0] = t->parms.o_flags; 1297 p[1] = htons(type); 1298 1299 /* 1300 * Set the source hardware address. 1301 */ 1302 1303 if (saddr) 1304 memcpy(&iph->saddr, saddr, 4); 1305 if (daddr) 1306 memcpy(&iph->daddr, daddr, 4); 1307 if (iph->daddr) 1308 return t->hlen; 1309 1310 return -t->hlen; 1311 } 1312 1313 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 1314 { 1315 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb); 1316 memcpy(haddr, &iph->saddr, 4); 1317 return 4; 1318 } 1319 1320 static const struct header_ops ipgre_header_ops = { 1321 .create = ipgre_header, 1322 .parse = ipgre_header_parse, 1323 }; 1324 1325 #ifdef CONFIG_NET_IPGRE_BROADCAST 1326 static int ipgre_open(struct net_device *dev) 1327 { 1328 struct ip_tunnel *t = netdev_priv(dev); 1329 1330 if (ipv4_is_multicast(t->parms.iph.daddr)) { 1331 struct flowi4 fl4; 1332 struct rtable *rt; 1333 1334 rt = ip_route_output_gre(dev_net(dev), &fl4, 1335 t->parms.iph.daddr, 1336 t->parms.iph.saddr, 1337 t->parms.o_key, 1338 RT_TOS(t->parms.iph.tos), 1339 t->parms.link); 1340 if (IS_ERR(rt)) 1341 return -EADDRNOTAVAIL; 1342 dev = rt->dst.dev; 1343 ip_rt_put(rt); 1344 if (__in_dev_get_rtnl(dev) == NULL) 1345 return -EADDRNOTAVAIL; 1346 t->mlink = dev->ifindex; 1347 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); 1348 } 1349 return 0; 1350 } 1351 1352 static int ipgre_close(struct net_device *dev) 1353 { 1354 struct ip_tunnel *t = netdev_priv(dev); 1355 1356 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { 1357 struct in_device *in_dev; 1358 in_dev = inetdev_by_index(dev_net(dev), t->mlink); 1359 if (in_dev) 1360 ip_mc_dec_group(in_dev, t->parms.iph.daddr); 1361 } 1362 return 0; 1363 } 1364 1365 #endif 1366 1367 static const struct net_device_ops ipgre_netdev_ops = { 1368 .ndo_init = ipgre_tunnel_init, 1369 .ndo_uninit = ipgre_tunnel_uninit, 1370 #ifdef CONFIG_NET_IPGRE_BROADCAST 1371 .ndo_open = ipgre_open, 1372 .ndo_stop = ipgre_close, 1373 #endif 1374 .ndo_start_xmit = ipgre_tunnel_xmit, 1375 .ndo_do_ioctl = ipgre_tunnel_ioctl, 1376 .ndo_change_mtu = ipgre_tunnel_change_mtu, 1377 .ndo_get_stats64 = ipgre_get_stats64, 1378 }; 1379 1380 static void ipgre_dev_free(struct net_device *dev) 1381 { 1382 struct ip_tunnel *tunnel = netdev_priv(dev); 1383 1384 gro_cells_destroy(&tunnel->gro_cells); 1385 free_percpu(dev->tstats); 1386 free_netdev(dev); 1387 } 1388 1389 #define GRE_FEATURES (NETIF_F_SG | \ 1390 NETIF_F_FRAGLIST | \ 1391 NETIF_F_HIGHDMA | \ 1392 NETIF_F_HW_CSUM) 1393 1394 static void ipgre_tunnel_setup(struct net_device *dev) 1395 { 1396 dev->netdev_ops = &ipgre_netdev_ops; 1397 dev->destructor = ipgre_dev_free; 1398 1399 dev->type = ARPHRD_IPGRE; 1400 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4; 1401 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4; 1402 dev->flags = IFF_NOARP; 1403 dev->iflink = 0; 1404 dev->addr_len = 4; 1405 dev->features |= NETIF_F_NETNS_LOCAL; 1406 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1407 1408 dev->features |= GRE_FEATURES; 1409 dev->hw_features |= GRE_FEATURES; 1410 } 1411 1412 static int ipgre_tunnel_init(struct net_device *dev) 1413 { 1414 struct ip_tunnel *tunnel; 1415 struct iphdr *iph; 1416 int err; 1417 1418 tunnel = netdev_priv(dev); 1419 iph = &tunnel->parms.iph; 1420 1421 tunnel->dev = dev; 1422 strcpy(tunnel->parms.name, dev->name); 1423 1424 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); 1425 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 1426 1427 if (iph->daddr) { 1428 #ifdef CONFIG_NET_IPGRE_BROADCAST 1429 if (ipv4_is_multicast(iph->daddr)) { 1430 if (!iph->saddr) 1431 return -EINVAL; 1432 dev->flags = IFF_BROADCAST; 1433 dev->header_ops = &ipgre_header_ops; 1434 } 1435 #endif 1436 } else 1437 dev->header_ops = &ipgre_header_ops; 1438 1439 dev->tstats = alloc_percpu(struct pcpu_tstats); 1440 if (!dev->tstats) 1441 return -ENOMEM; 1442 1443 err = gro_cells_init(&tunnel->gro_cells, dev); 1444 if (err) { 1445 free_percpu(dev->tstats); 1446 return err; 1447 } 1448 1449 return 0; 1450 } 1451 1452 static void ipgre_fb_tunnel_init(struct net_device *dev) 1453 { 1454 struct ip_tunnel *tunnel = netdev_priv(dev); 1455 struct iphdr *iph = &tunnel->parms.iph; 1456 1457 tunnel->dev = dev; 1458 strcpy(tunnel->parms.name, dev->name); 1459 1460 iph->version = 4; 1461 iph->protocol = IPPROTO_GRE; 1462 iph->ihl = 5; 1463 tunnel->hlen = sizeof(struct iphdr) + 4; 1464 1465 dev_hold(dev); 1466 } 1467 1468 1469 static const struct gre_protocol ipgre_protocol = { 1470 .handler = ipgre_rcv, 1471 .err_handler = ipgre_err, 1472 }; 1473 1474 static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head) 1475 { 1476 int prio; 1477 1478 for (prio = 0; prio < 4; prio++) { 1479 int h; 1480 for (h = 0; h < HASH_SIZE; h++) { 1481 struct ip_tunnel *t; 1482 1483 t = rtnl_dereference(ign->tunnels[prio][h]); 1484 1485 while (t != NULL) { 1486 unregister_netdevice_queue(t->dev, head); 1487 t = rtnl_dereference(t->next); 1488 } 1489 } 1490 } 1491 } 1492 1493 static int __net_init ipgre_init_net(struct net *net) 1494 { 1495 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1496 int err; 1497 1498 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0", 1499 ipgre_tunnel_setup); 1500 if (!ign->fb_tunnel_dev) { 1501 err = -ENOMEM; 1502 goto err_alloc_dev; 1503 } 1504 dev_net_set(ign->fb_tunnel_dev, net); 1505 1506 ipgre_fb_tunnel_init(ign->fb_tunnel_dev); 1507 ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops; 1508 1509 if ((err = register_netdev(ign->fb_tunnel_dev))) 1510 goto err_reg_dev; 1511 1512 rcu_assign_pointer(ign->tunnels_wc[0], 1513 netdev_priv(ign->fb_tunnel_dev)); 1514 return 0; 1515 1516 err_reg_dev: 1517 ipgre_dev_free(ign->fb_tunnel_dev); 1518 err_alloc_dev: 1519 return err; 1520 } 1521 1522 static void __net_exit ipgre_exit_net(struct net *net) 1523 { 1524 struct ipgre_net *ign; 1525 LIST_HEAD(list); 1526 1527 ign = net_generic(net, ipgre_net_id); 1528 rtnl_lock(); 1529 ipgre_destroy_tunnels(ign, &list); 1530 unregister_netdevice_many(&list); 1531 rtnl_unlock(); 1532 } 1533 1534 static struct pernet_operations ipgre_net_ops = { 1535 .init = ipgre_init_net, 1536 .exit = ipgre_exit_net, 1537 .id = &ipgre_net_id, 1538 .size = sizeof(struct ipgre_net), 1539 }; 1540 1541 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) 1542 { 1543 __be16 flags; 1544 1545 if (!data) 1546 return 0; 1547 1548 flags = 0; 1549 if (data[IFLA_GRE_IFLAGS]) 1550 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1551 if (data[IFLA_GRE_OFLAGS]) 1552 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1553 if (flags & (GRE_VERSION|GRE_ROUTING)) 1554 return -EINVAL; 1555 1556 return 0; 1557 } 1558 1559 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[]) 1560 { 1561 __be32 daddr; 1562 1563 if (tb[IFLA_ADDRESS]) { 1564 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1565 return -EINVAL; 1566 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1567 return -EADDRNOTAVAIL; 1568 } 1569 1570 if (!data) 1571 goto out; 1572 1573 if (data[IFLA_GRE_REMOTE]) { 1574 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4); 1575 if (!daddr) 1576 return -EINVAL; 1577 } 1578 1579 out: 1580 return ipgre_tunnel_validate(tb, data); 1581 } 1582 1583 static void ipgre_netlink_parms(struct nlattr *data[], 1584 struct ip_tunnel_parm *parms) 1585 { 1586 memset(parms, 0, sizeof(*parms)); 1587 1588 parms->iph.protocol = IPPROTO_GRE; 1589 1590 if (!data) 1591 return; 1592 1593 if (data[IFLA_GRE_LINK]) 1594 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1595 1596 if (data[IFLA_GRE_IFLAGS]) 1597 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]); 1598 1599 if (data[IFLA_GRE_OFLAGS]) 1600 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]); 1601 1602 if (data[IFLA_GRE_IKEY]) 1603 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 1604 1605 if (data[IFLA_GRE_OKEY]) 1606 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); 1607 1608 if (data[IFLA_GRE_LOCAL]) 1609 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]); 1610 1611 if (data[IFLA_GRE_REMOTE]) 1612 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]); 1613 1614 if (data[IFLA_GRE_TTL]) 1615 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]); 1616 1617 if (data[IFLA_GRE_TOS]) 1618 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]); 1619 1620 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) 1621 parms->iph.frag_off = htons(IP_DF); 1622 } 1623 1624 static int ipgre_tap_init(struct net_device *dev) 1625 { 1626 struct ip_tunnel *tunnel; 1627 1628 tunnel = netdev_priv(dev); 1629 1630 tunnel->dev = dev; 1631 strcpy(tunnel->parms.name, dev->name); 1632 1633 ipgre_tunnel_bind_dev(dev); 1634 1635 dev->tstats = alloc_percpu(struct pcpu_tstats); 1636 if (!dev->tstats) 1637 return -ENOMEM; 1638 1639 return 0; 1640 } 1641 1642 static const struct net_device_ops ipgre_tap_netdev_ops = { 1643 .ndo_init = ipgre_tap_init, 1644 .ndo_uninit = ipgre_tunnel_uninit, 1645 .ndo_start_xmit = ipgre_tunnel_xmit, 1646 .ndo_set_mac_address = eth_mac_addr, 1647 .ndo_validate_addr = eth_validate_addr, 1648 .ndo_change_mtu = ipgre_tunnel_change_mtu, 1649 .ndo_get_stats64 = ipgre_get_stats64, 1650 }; 1651 1652 static void ipgre_tap_setup(struct net_device *dev) 1653 { 1654 1655 ether_setup(dev); 1656 1657 dev->netdev_ops = &ipgre_tap_netdev_ops; 1658 dev->destructor = ipgre_dev_free; 1659 1660 dev->iflink = 0; 1661 dev->features |= NETIF_F_NETNS_LOCAL; 1662 1663 dev->features |= GRE_FEATURES; 1664 dev->hw_features |= GRE_FEATURES; 1665 } 1666 1667 static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], 1668 struct nlattr *data[]) 1669 { 1670 struct ip_tunnel *nt; 1671 struct net *net = dev_net(dev); 1672 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1673 int mtu; 1674 int err; 1675 1676 nt = netdev_priv(dev); 1677 ipgre_netlink_parms(data, &nt->parms); 1678 1679 if (ipgre_tunnel_find(net, &nt->parms, dev->type)) 1680 return -EEXIST; 1681 1682 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1683 eth_hw_addr_random(dev); 1684 1685 mtu = ipgre_tunnel_bind_dev(dev); 1686 if (!tb[IFLA_MTU]) 1687 dev->mtu = mtu; 1688 1689 /* Can use a lockless transmit, unless we generate output sequences */ 1690 if (!(nt->parms.o_flags & GRE_SEQ)) 1691 dev->features |= NETIF_F_LLTX; 1692 1693 err = register_netdevice(dev); 1694 if (err) 1695 goto out; 1696 1697 dev_hold(dev); 1698 ipgre_tunnel_link(ign, nt); 1699 1700 out: 1701 return err; 1702 } 1703 1704 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], 1705 struct nlattr *data[]) 1706 { 1707 struct ip_tunnel *t, *nt; 1708 struct net *net = dev_net(dev); 1709 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1710 struct ip_tunnel_parm p; 1711 int mtu; 1712 1713 if (dev == ign->fb_tunnel_dev) 1714 return -EINVAL; 1715 1716 nt = netdev_priv(dev); 1717 ipgre_netlink_parms(data, &p); 1718 1719 t = ipgre_tunnel_locate(net, &p, 0); 1720 1721 if (t) { 1722 if (t->dev != dev) 1723 return -EEXIST; 1724 } else { 1725 t = nt; 1726 1727 if (dev->type != ARPHRD_ETHER) { 1728 unsigned int nflags = 0; 1729 1730 if (ipv4_is_multicast(p.iph.daddr)) 1731 nflags = IFF_BROADCAST; 1732 else if (p.iph.daddr) 1733 nflags = IFF_POINTOPOINT; 1734 1735 if ((dev->flags ^ nflags) & 1736 (IFF_POINTOPOINT | IFF_BROADCAST)) 1737 return -EINVAL; 1738 } 1739 1740 ipgre_tunnel_unlink(ign, t); 1741 t->parms.iph.saddr = p.iph.saddr; 1742 t->parms.iph.daddr = p.iph.daddr; 1743 t->parms.i_key = p.i_key; 1744 if (dev->type != ARPHRD_ETHER) { 1745 memcpy(dev->dev_addr, &p.iph.saddr, 4); 1746 memcpy(dev->broadcast, &p.iph.daddr, 4); 1747 } 1748 ipgre_tunnel_link(ign, t); 1749 netdev_state_change(dev); 1750 } 1751 1752 t->parms.o_key = p.o_key; 1753 t->parms.iph.ttl = p.iph.ttl; 1754 t->parms.iph.tos = p.iph.tos; 1755 t->parms.iph.frag_off = p.iph.frag_off; 1756 1757 if (t->parms.link != p.link) { 1758 t->parms.link = p.link; 1759 mtu = ipgre_tunnel_bind_dev(dev); 1760 if (!tb[IFLA_MTU]) 1761 dev->mtu = mtu; 1762 netdev_state_change(dev); 1763 } 1764 1765 return 0; 1766 } 1767 1768 static size_t ipgre_get_size(const struct net_device *dev) 1769 { 1770 return 1771 /* IFLA_GRE_LINK */ 1772 nla_total_size(4) + 1773 /* IFLA_GRE_IFLAGS */ 1774 nla_total_size(2) + 1775 /* IFLA_GRE_OFLAGS */ 1776 nla_total_size(2) + 1777 /* IFLA_GRE_IKEY */ 1778 nla_total_size(4) + 1779 /* IFLA_GRE_OKEY */ 1780 nla_total_size(4) + 1781 /* IFLA_GRE_LOCAL */ 1782 nla_total_size(4) + 1783 /* IFLA_GRE_REMOTE */ 1784 nla_total_size(4) + 1785 /* IFLA_GRE_TTL */ 1786 nla_total_size(1) + 1787 /* IFLA_GRE_TOS */ 1788 nla_total_size(1) + 1789 /* IFLA_GRE_PMTUDISC */ 1790 nla_total_size(1) + 1791 0; 1792 } 1793 1794 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) 1795 { 1796 struct ip_tunnel *t = netdev_priv(dev); 1797 struct ip_tunnel_parm *p = &t->parms; 1798 1799 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1800 nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) || 1801 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) || 1802 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1803 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1804 nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) || 1805 nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) || 1806 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) || 1807 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) || 1808 nla_put_u8(skb, IFLA_GRE_PMTUDISC, 1809 !!(p->iph.frag_off & htons(IP_DF)))) 1810 goto nla_put_failure; 1811 return 0; 1812 1813 nla_put_failure: 1814 return -EMSGSIZE; 1815 } 1816 1817 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { 1818 [IFLA_GRE_LINK] = { .type = NLA_U32 }, 1819 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, 1820 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 1821 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 1822 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 1823 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 1824 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 1825 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 1826 [IFLA_GRE_TOS] = { .type = NLA_U8 }, 1827 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 }, 1828 }; 1829 1830 static struct rtnl_link_ops ipgre_link_ops __read_mostly = { 1831 .kind = "gre", 1832 .maxtype = IFLA_GRE_MAX, 1833 .policy = ipgre_policy, 1834 .priv_size = sizeof(struct ip_tunnel), 1835 .setup = ipgre_tunnel_setup, 1836 .validate = ipgre_tunnel_validate, 1837 .newlink = ipgre_newlink, 1838 .changelink = ipgre_changelink, 1839 .get_size = ipgre_get_size, 1840 .fill_info = ipgre_fill_info, 1841 }; 1842 1843 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = { 1844 .kind = "gretap", 1845 .maxtype = IFLA_GRE_MAX, 1846 .policy = ipgre_policy, 1847 .priv_size = sizeof(struct ip_tunnel), 1848 .setup = ipgre_tap_setup, 1849 .validate = ipgre_tap_validate, 1850 .newlink = ipgre_newlink, 1851 .changelink = ipgre_changelink, 1852 .get_size = ipgre_get_size, 1853 .fill_info = ipgre_fill_info, 1854 }; 1855 1856 /* 1857 * And now the modules code and kernel interface. 1858 */ 1859 1860 static int __init ipgre_init(void) 1861 { 1862 int err; 1863 1864 pr_info("GRE over IPv4 tunneling driver\n"); 1865 1866 err = register_pernet_device(&ipgre_net_ops); 1867 if (err < 0) 1868 return err; 1869 1870 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); 1871 if (err < 0) { 1872 pr_info("%s: can't add protocol\n", __func__); 1873 goto add_proto_failed; 1874 } 1875 1876 err = rtnl_link_register(&ipgre_link_ops); 1877 if (err < 0) 1878 goto rtnl_link_failed; 1879 1880 err = rtnl_link_register(&ipgre_tap_ops); 1881 if (err < 0) 1882 goto tap_ops_failed; 1883 1884 out: 1885 return err; 1886 1887 tap_ops_failed: 1888 rtnl_link_unregister(&ipgre_link_ops); 1889 rtnl_link_failed: 1890 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); 1891 add_proto_failed: 1892 unregister_pernet_device(&ipgre_net_ops); 1893 goto out; 1894 } 1895 1896 static void __exit ipgre_fini(void) 1897 { 1898 rtnl_link_unregister(&ipgre_tap_ops); 1899 rtnl_link_unregister(&ipgre_link_ops); 1900 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) 1901 pr_info("%s: can't remove protocol\n", __func__); 1902 unregister_pernet_device(&ipgre_net_ops); 1903 } 1904 1905 module_init(ipgre_init); 1906 module_exit(ipgre_fini); 1907 MODULE_LICENSE("GPL"); 1908 MODULE_ALIAS_RTNL_LINK("gre"); 1909 MODULE_ALIAS_RTNL_LINK("gretap"); 1910 MODULE_ALIAS_NETDEV("gre0"); 1911