1 /* 2 * Linux NET3: GRE over IP protocol decoder. 3 * 4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/capability.h> 16 #include <linux/module.h> 17 #include <linux/types.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <asm/uaccess.h> 21 #include <linux/skbuff.h> 22 #include <linux/netdevice.h> 23 #include <linux/in.h> 24 #include <linux/tcp.h> 25 #include <linux/udp.h> 26 #include <linux/if_arp.h> 27 #include <linux/if_vlan.h> 28 #include <linux/init.h> 29 #include <linux/in6.h> 30 #include <linux/inetdevice.h> 31 #include <linux/igmp.h> 32 #include <linux/netfilter_ipv4.h> 33 #include <linux/etherdevice.h> 34 #include <linux/if_ether.h> 35 36 #include <net/sock.h> 37 #include <net/ip.h> 38 #include <net/icmp.h> 39 #include <net/protocol.h> 40 #include <net/ip_tunnels.h> 41 #include <net/arp.h> 42 #include <net/checksum.h> 43 #include <net/dsfield.h> 44 #include <net/inet_ecn.h> 45 #include <net/xfrm.h> 46 #include <net/net_namespace.h> 47 #include <net/netns/generic.h> 48 #include <net/rtnetlink.h> 49 #include <net/gre.h> 50 #include <net/dst_metadata.h> 51 52 #if IS_ENABLED(CONFIG_IPV6) 53 #include <net/ipv6.h> 54 #include <net/ip6_fib.h> 55 #include <net/ip6_route.h> 56 #endif 57 58 /* 59 Problems & solutions 60 -------------------- 61 62 1. The most important issue is detecting local dead loops. 63 They would cause complete host lockup in transmit, which 64 would be "resolved" by stack overflow or, if queueing is enabled, 65 with infinite looping in net_bh. 66 67 We cannot track such dead loops during route installation, 68 it is infeasible task. The most general solutions would be 69 to keep skb->encapsulation counter (sort of local ttl), 70 and silently drop packet when it expires. It is a good 71 solution, but it supposes maintaining new variable in ALL 72 skb, even if no tunneling is used. 73 74 Current solution: xmit_recursion breaks dead loops. This is a percpu 75 counter, since when we enter the first ndo_xmit(), cpu migration is 76 forbidden. We force an exit if this counter reaches RECURSION_LIMIT 77 78 2. Networking dead loops would not kill routers, but would really 79 kill network. IP hop limit plays role of "t->recursion" in this case, 80 if we copy it from packet being encapsulated to upper header. 81 It is very good solution, but it introduces two problems: 82 83 - Routing protocols, using packets with ttl=1 (OSPF, RIP2), 84 do not work over tunnels. 85 - traceroute does not work. I planned to relay ICMP from tunnel, 86 so that this problem would be solved and traceroute output 87 would even more informative. This idea appeared to be wrong: 88 only Linux complies to rfc1812 now (yes, guys, Linux is the only 89 true router now :-)), all routers (at least, in neighbourhood of mine) 90 return only 8 bytes of payload. It is the end. 91 92 Hence, if we want that OSPF worked or traceroute said something reasonable, 93 we should search for another solution. 94 95 One of them is to parse packet trying to detect inner encapsulation 96 made by our node. It is difficult or even impossible, especially, 97 taking into account fragmentation. TO be short, ttl is not solution at all. 98 99 Current solution: The solution was UNEXPECTEDLY SIMPLE. 100 We force DF flag on tunnels with preconfigured hop limit, 101 that is ALL. :-) Well, it does not remove the problem completely, 102 but exponential growth of network traffic is changed to linear 103 (branches, that exceed pmtu are pruned) and tunnel mtu 104 rapidly degrades to value <68, where looping stops. 105 Yes, it is not good if there exists a router in the loop, 106 which does not force DF, even when encapsulating packets have DF set. 107 But it is not our problem! Nobody could accuse us, we made 108 all that we could make. Even if it is your gated who injected 109 fatal route to network, even if it were you who configured 110 fatal static route: you are innocent. :-) 111 112 Alexey Kuznetsov. 113 */ 114 115 static bool log_ecn_error = true; 116 module_param(log_ecn_error, bool, 0644); 117 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 118 119 static struct rtnl_link_ops ipgre_link_ops __read_mostly; 120 static int ipgre_tunnel_init(struct net_device *dev); 121 122 static int ipgre_net_id __read_mostly; 123 static int gre_tap_net_id __read_mostly; 124 125 static int ip_gre_calc_hlen(__be16 o_flags) 126 { 127 int addend = 4; 128 129 if (o_flags & TUNNEL_CSUM) 130 addend += 4; 131 if (o_flags & TUNNEL_KEY) 132 addend += 4; 133 if (o_flags & TUNNEL_SEQ) 134 addend += 4; 135 return addend; 136 } 137 138 static __be16 gre_flags_to_tnl_flags(__be16 flags) 139 { 140 __be16 tflags = 0; 141 142 if (flags & GRE_CSUM) 143 tflags |= TUNNEL_CSUM; 144 if (flags & GRE_ROUTING) 145 tflags |= TUNNEL_ROUTING; 146 if (flags & GRE_KEY) 147 tflags |= TUNNEL_KEY; 148 if (flags & GRE_SEQ) 149 tflags |= TUNNEL_SEQ; 150 if (flags & GRE_STRICT) 151 tflags |= TUNNEL_STRICT; 152 if (flags & GRE_REC) 153 tflags |= TUNNEL_REC; 154 if (flags & GRE_VERSION) 155 tflags |= TUNNEL_VERSION; 156 157 return tflags; 158 } 159 160 static __be16 tnl_flags_to_gre_flags(__be16 tflags) 161 { 162 __be16 flags = 0; 163 164 if (tflags & TUNNEL_CSUM) 165 flags |= GRE_CSUM; 166 if (tflags & TUNNEL_ROUTING) 167 flags |= GRE_ROUTING; 168 if (tflags & TUNNEL_KEY) 169 flags |= GRE_KEY; 170 if (tflags & TUNNEL_SEQ) 171 flags |= GRE_SEQ; 172 if (tflags & TUNNEL_STRICT) 173 flags |= GRE_STRICT; 174 if (tflags & TUNNEL_REC) 175 flags |= GRE_REC; 176 if (tflags & TUNNEL_VERSION) 177 flags |= GRE_VERSION; 178 179 return flags; 180 } 181 182 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, 183 bool *csum_err) 184 { 185 const struct gre_base_hdr *greh; 186 __be32 *options; 187 int hdr_len; 188 189 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) 190 return -EINVAL; 191 192 greh = (struct gre_base_hdr *)skb_transport_header(skb); 193 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) 194 return -EINVAL; 195 196 tpi->flags = gre_flags_to_tnl_flags(greh->flags); 197 hdr_len = ip_gre_calc_hlen(tpi->flags); 198 199 if (!pskb_may_pull(skb, hdr_len)) 200 return -EINVAL; 201 202 greh = (struct gre_base_hdr *)skb_transport_header(skb); 203 tpi->proto = greh->protocol; 204 205 options = (__be32 *)(greh + 1); 206 if (greh->flags & GRE_CSUM) { 207 if (skb_checksum_simple_validate(skb)) { 208 *csum_err = true; 209 return -EINVAL; 210 } 211 212 skb_checksum_try_convert(skb, IPPROTO_GRE, 0, 213 null_compute_pseudo); 214 options++; 215 } 216 217 if (greh->flags & GRE_KEY) { 218 tpi->key = *options; 219 options++; 220 } else { 221 tpi->key = 0; 222 } 223 if (unlikely(greh->flags & GRE_SEQ)) { 224 tpi->seq = *options; 225 options++; 226 } else { 227 tpi->seq = 0; 228 } 229 /* WCCP version 1 and 2 protocol decoding. 230 * - Change protocol to IP 231 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header 232 */ 233 if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { 234 tpi->proto = htons(ETH_P_IP); 235 if ((*(u8 *)options & 0xF0) != 0x40) { 236 hdr_len += 4; 237 if (!pskb_may_pull(skb, hdr_len)) 238 return -EINVAL; 239 } 240 } 241 return iptunnel_pull_header(skb, hdr_len, tpi->proto, false); 242 } 243 244 static void ipgre_err(struct sk_buff *skb, u32 info, 245 const struct tnl_ptk_info *tpi) 246 { 247 248 /* All the routers (except for Linux) return only 249 8 bytes of packet payload. It means, that precise relaying of 250 ICMP in the real Internet is absolutely infeasible. 251 252 Moreover, Cisco "wise men" put GRE key to the third word 253 in GRE header. It makes impossible maintaining even soft 254 state for keyed GRE tunnels with enabled checksum. Tell 255 them "thank you". 256 257 Well, I wonder, rfc1812 was written by Cisco employee, 258 what the hell these idiots break standards established 259 by themselves??? 260 */ 261 struct net *net = dev_net(skb->dev); 262 struct ip_tunnel_net *itn; 263 const struct iphdr *iph; 264 const int type = icmp_hdr(skb)->type; 265 const int code = icmp_hdr(skb)->code; 266 struct ip_tunnel *t; 267 268 switch (type) { 269 default: 270 case ICMP_PARAMETERPROB: 271 return; 272 273 case ICMP_DEST_UNREACH: 274 switch (code) { 275 case ICMP_SR_FAILED: 276 case ICMP_PORT_UNREACH: 277 /* Impossible event. */ 278 return; 279 default: 280 /* All others are translated to HOST_UNREACH. 281 rfc2003 contains "deep thoughts" about NET_UNREACH, 282 I believe they are just ether pollution. --ANK 283 */ 284 break; 285 } 286 break; 287 288 case ICMP_TIME_EXCEEDED: 289 if (code != ICMP_EXC_TTL) 290 return; 291 break; 292 293 case ICMP_REDIRECT: 294 break; 295 } 296 297 if (tpi->proto == htons(ETH_P_TEB)) 298 itn = net_generic(net, gre_tap_net_id); 299 else 300 itn = net_generic(net, ipgre_net_id); 301 302 iph = (const struct iphdr *)(icmp_hdr(skb) + 1); 303 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, 304 iph->daddr, iph->saddr, tpi->key); 305 306 if (!t) 307 return; 308 309 if (t->parms.iph.daddr == 0 || 310 ipv4_is_multicast(t->parms.iph.daddr)) 311 return; 312 313 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 314 return; 315 316 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) 317 t->err_count++; 318 else 319 t->err_count = 1; 320 t->err_time = jiffies; 321 } 322 323 static void gre_err(struct sk_buff *skb, u32 info) 324 { 325 /* All the routers (except for Linux) return only 326 * 8 bytes of packet payload. It means, that precise relaying of 327 * ICMP in the real Internet is absolutely infeasible. 328 * 329 * Moreover, Cisco "wise men" put GRE key to the third word 330 * in GRE header. It makes impossible maintaining even soft 331 * state for keyed 332 * GRE tunnels with enabled checksum. Tell them "thank you". 333 * 334 * Well, I wonder, rfc1812 was written by Cisco employee, 335 * what the hell these idiots break standards established 336 * by themselves??? 337 */ 338 339 const int type = icmp_hdr(skb)->type; 340 const int code = icmp_hdr(skb)->code; 341 struct tnl_ptk_info tpi; 342 bool csum_err = false; 343 344 if (parse_gre_header(skb, &tpi, &csum_err)) { 345 if (!csum_err) /* ignore csum errors. */ 346 return; 347 } 348 349 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 350 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 351 skb->dev->ifindex, 0, IPPROTO_GRE, 0); 352 return; 353 } 354 if (type == ICMP_REDIRECT) { 355 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0, 356 IPPROTO_GRE, 0); 357 return; 358 } 359 360 ipgre_err(skb, info, &tpi); 361 } 362 363 static __be64 key_to_tunnel_id(__be32 key) 364 { 365 #ifdef __BIG_ENDIAN 366 return (__force __be64)((__force u32)key); 367 #else 368 return (__force __be64)((__force u64)key << 32); 369 #endif 370 } 371 372 /* Returns the least-significant 32 bits of a __be64. */ 373 static __be32 tunnel_id_to_key(__be64 x) 374 { 375 #ifdef __BIG_ENDIAN 376 return (__force __be32)x; 377 #else 378 return (__force __be32)((__force u64)x >> 32); 379 #endif 380 } 381 382 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) 383 { 384 struct net *net = dev_net(skb->dev); 385 struct metadata_dst *tun_dst = NULL; 386 struct ip_tunnel_net *itn; 387 const struct iphdr *iph; 388 struct ip_tunnel *tunnel; 389 390 if (tpi->proto == htons(ETH_P_TEB)) 391 itn = net_generic(net, gre_tap_net_id); 392 else 393 itn = net_generic(net, ipgre_net_id); 394 395 iph = ip_hdr(skb); 396 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, 397 iph->saddr, iph->daddr, tpi->key); 398 399 if (tunnel) { 400 skb_pop_mac_header(skb); 401 if (tunnel->collect_md) { 402 __be16 flags; 403 __be64 tun_id; 404 405 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY); 406 tun_id = key_to_tunnel_id(tpi->key); 407 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0); 408 if (!tun_dst) 409 return PACKET_REJECT; 410 } 411 412 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 413 return PACKET_RCVD; 414 } 415 return PACKET_REJECT; 416 } 417 418 static int gre_rcv(struct sk_buff *skb) 419 { 420 struct tnl_ptk_info tpi; 421 bool csum_err = false; 422 423 #ifdef CONFIG_NET_IPGRE_BROADCAST 424 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { 425 /* Looped back packet, drop it! */ 426 if (rt_is_output_route(skb_rtable(skb))) 427 goto drop; 428 } 429 #endif 430 431 if (parse_gre_header(skb, &tpi, &csum_err) < 0) 432 goto drop; 433 434 if (ipgre_rcv(skb, &tpi) == PACKET_RCVD) 435 return 0; 436 437 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 438 drop: 439 kfree_skb(skb); 440 return 0; 441 } 442 443 static __sum16 gre_checksum(struct sk_buff *skb) 444 { 445 __wsum csum; 446 447 if (skb->ip_summed == CHECKSUM_PARTIAL) 448 csum = lco_csum(skb); 449 else 450 csum = skb_checksum(skb, 0, skb->len, 0); 451 return csum_fold(csum); 452 } 453 454 static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags, 455 __be16 proto, __be32 key, __be32 seq) 456 { 457 struct gre_base_hdr *greh; 458 459 skb_push(skb, hdr_len); 460 461 skb_reset_transport_header(skb); 462 greh = (struct gre_base_hdr *)skb->data; 463 greh->flags = tnl_flags_to_gre_flags(flags); 464 greh->protocol = proto; 465 466 if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) { 467 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); 468 469 if (flags & TUNNEL_SEQ) { 470 *ptr = seq; 471 ptr--; 472 } 473 if (flags & TUNNEL_KEY) { 474 *ptr = key; 475 ptr--; 476 } 477 if (flags & TUNNEL_CSUM && 478 !(skb_shinfo(skb)->gso_type & 479 (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { 480 *ptr = 0; 481 *(__sum16 *)ptr = gre_checksum(skb); 482 } 483 } 484 } 485 486 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, 487 const struct iphdr *tnl_params, 488 __be16 proto) 489 { 490 struct ip_tunnel *tunnel = netdev_priv(dev); 491 492 if (tunnel->parms.o_flags & TUNNEL_SEQ) 493 tunnel->o_seqno++; 494 495 /* Push GRE header. */ 496 build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, 497 proto, tunnel->parms.o_key, htonl(tunnel->o_seqno)); 498 499 skb_set_inner_protocol(skb, proto); 500 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); 501 } 502 503 static struct sk_buff *gre_handle_offloads(struct sk_buff *skb, 504 bool csum) 505 { 506 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); 507 } 508 509 static struct rtable *gre_get_rt(struct sk_buff *skb, 510 struct net_device *dev, 511 struct flowi4 *fl, 512 const struct ip_tunnel_key *key) 513 { 514 struct net *net = dev_net(dev); 515 516 memset(fl, 0, sizeof(*fl)); 517 fl->daddr = key->u.ipv4.dst; 518 fl->saddr = key->u.ipv4.src; 519 fl->flowi4_tos = RT_TOS(key->tos); 520 fl->flowi4_mark = skb->mark; 521 fl->flowi4_proto = IPPROTO_GRE; 522 523 return ip_route_output_key(net, fl); 524 } 525 526 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) 527 { 528 struct ip_tunnel_info *tun_info; 529 const struct ip_tunnel_key *key; 530 struct rtable *rt = NULL; 531 struct flowi4 fl; 532 int min_headroom; 533 int tunnel_hlen; 534 __be16 df, flags; 535 bool use_cache; 536 int err; 537 538 tun_info = skb_tunnel_info(skb); 539 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 540 ip_tunnel_info_af(tun_info) != AF_INET)) 541 goto err_free_skb; 542 543 key = &tun_info->key; 544 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info); 545 if (use_cache) 546 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr); 547 if (!rt) { 548 rt = gre_get_rt(skb, dev, &fl, key); 549 if (IS_ERR(rt)) 550 goto err_free_skb; 551 if (use_cache) 552 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst, 553 fl.saddr); 554 } 555 556 tunnel_hlen = ip_gre_calc_hlen(key->tun_flags); 557 558 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 559 + tunnel_hlen + sizeof(struct iphdr); 560 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { 561 int head_delta = SKB_DATA_ALIGN(min_headroom - 562 skb_headroom(skb) + 563 16); 564 err = pskb_expand_head(skb, max_t(int, head_delta, 0), 565 0, GFP_ATOMIC); 566 if (unlikely(err)) 567 goto err_free_rt; 568 } 569 570 /* Push Tunnel header. */ 571 skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)); 572 if (IS_ERR(skb)) { 573 skb = NULL; 574 goto err_free_rt; 575 } 576 577 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); 578 build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), 579 tunnel_id_to_key(tun_info->key.tun_id), 0); 580 581 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 582 583 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE, 584 key->tos, key->ttl, df, false); 585 return; 586 587 err_free_rt: 588 ip_rt_put(rt); 589 err_free_skb: 590 kfree_skb(skb); 591 dev->stats.tx_dropped++; 592 } 593 594 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 595 { 596 struct ip_tunnel_info *info = skb_tunnel_info(skb); 597 struct rtable *rt; 598 struct flowi4 fl4; 599 600 if (ip_tunnel_info_af(info) != AF_INET) 601 return -EINVAL; 602 603 rt = gre_get_rt(skb, dev, &fl4, &info->key); 604 if (IS_ERR(rt)) 605 return PTR_ERR(rt); 606 607 ip_rt_put(rt); 608 info->key.u.ipv4.src = fl4.saddr; 609 return 0; 610 } 611 612 static netdev_tx_t ipgre_xmit(struct sk_buff *skb, 613 struct net_device *dev) 614 { 615 struct ip_tunnel *tunnel = netdev_priv(dev); 616 const struct iphdr *tnl_params; 617 618 if (tunnel->collect_md) { 619 gre_fb_xmit(skb, dev); 620 return NETDEV_TX_OK; 621 } 622 623 if (dev->header_ops) { 624 /* Need space for new headers */ 625 if (skb_cow_head(skb, dev->needed_headroom - 626 (tunnel->hlen + sizeof(struct iphdr)))) 627 goto free_skb; 628 629 tnl_params = (const struct iphdr *)skb->data; 630 631 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing 632 * to gre header. 633 */ 634 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr)); 635 skb_reset_mac_header(skb); 636 } else { 637 if (skb_cow_head(skb, dev->needed_headroom)) 638 goto free_skb; 639 640 tnl_params = &tunnel->parms.iph; 641 } 642 643 skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); 644 if (IS_ERR(skb)) 645 goto out; 646 647 __gre_xmit(skb, dev, tnl_params, skb->protocol); 648 return NETDEV_TX_OK; 649 650 free_skb: 651 kfree_skb(skb); 652 out: 653 dev->stats.tx_dropped++; 654 return NETDEV_TX_OK; 655 } 656 657 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, 658 struct net_device *dev) 659 { 660 struct ip_tunnel *tunnel = netdev_priv(dev); 661 662 if (tunnel->collect_md) { 663 gre_fb_xmit(skb, dev); 664 return NETDEV_TX_OK; 665 } 666 667 skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); 668 if (IS_ERR(skb)) 669 goto out; 670 671 if (skb_cow_head(skb, dev->needed_headroom)) 672 goto free_skb; 673 674 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB)); 675 return NETDEV_TX_OK; 676 677 free_skb: 678 kfree_skb(skb); 679 out: 680 dev->stats.tx_dropped++; 681 return NETDEV_TX_OK; 682 } 683 684 static int ipgre_tunnel_ioctl(struct net_device *dev, 685 struct ifreq *ifr, int cmd) 686 { 687 int err; 688 struct ip_tunnel_parm p; 689 690 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 691 return -EFAULT; 692 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) { 693 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE || 694 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) || 695 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))) 696 return -EINVAL; 697 } 698 p.i_flags = gre_flags_to_tnl_flags(p.i_flags); 699 p.o_flags = gre_flags_to_tnl_flags(p.o_flags); 700 701 err = ip_tunnel_ioctl(dev, &p, cmd); 702 if (err) 703 return err; 704 705 p.i_flags = tnl_flags_to_gre_flags(p.i_flags); 706 p.o_flags = tnl_flags_to_gre_flags(p.o_flags); 707 708 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 709 return -EFAULT; 710 return 0; 711 } 712 713 /* Nice toy. Unfortunately, useless in real life :-) 714 It allows to construct virtual multiprotocol broadcast "LAN" 715 over the Internet, provided multicast routing is tuned. 716 717 718 I have no idea was this bicycle invented before me, 719 so that I had to set ARPHRD_IPGRE to a random value. 720 I have an impression, that Cisco could make something similar, 721 but this feature is apparently missing in IOS<=11.2(8). 722 723 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks 724 with broadcast 224.66.66.66. If you have access to mbone, play with me :-) 725 726 ping -t 255 224.66.66.66 727 728 If nobody answers, mbone does not work. 729 730 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255 731 ip addr add 10.66.66.<somewhat>/24 dev Universe 732 ifconfig Universe up 733 ifconfig Universe add fe80::<Your_real_addr>/10 734 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96 735 ftp 10.66.66.66 736 ... 737 ftp fec0:6666:6666::193.233.7.65 738 ... 739 */ 740 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, 741 unsigned short type, 742 const void *daddr, const void *saddr, unsigned int len) 743 { 744 struct ip_tunnel *t = netdev_priv(dev); 745 struct iphdr *iph; 746 struct gre_base_hdr *greh; 747 748 iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph)); 749 greh = (struct gre_base_hdr *)(iph+1); 750 greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags); 751 greh->protocol = htons(type); 752 753 memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); 754 755 /* Set the source hardware address. */ 756 if (saddr) 757 memcpy(&iph->saddr, saddr, 4); 758 if (daddr) 759 memcpy(&iph->daddr, daddr, 4); 760 if (iph->daddr) 761 return t->hlen + sizeof(*iph); 762 763 return -(t->hlen + sizeof(*iph)); 764 } 765 766 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 767 { 768 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb); 769 memcpy(haddr, &iph->saddr, 4); 770 return 4; 771 } 772 773 static const struct header_ops ipgre_header_ops = { 774 .create = ipgre_header, 775 .parse = ipgre_header_parse, 776 }; 777 778 #ifdef CONFIG_NET_IPGRE_BROADCAST 779 static int ipgre_open(struct net_device *dev) 780 { 781 struct ip_tunnel *t = netdev_priv(dev); 782 783 if (ipv4_is_multicast(t->parms.iph.daddr)) { 784 struct flowi4 fl4; 785 struct rtable *rt; 786 787 rt = ip_route_output_gre(t->net, &fl4, 788 t->parms.iph.daddr, 789 t->parms.iph.saddr, 790 t->parms.o_key, 791 RT_TOS(t->parms.iph.tos), 792 t->parms.link); 793 if (IS_ERR(rt)) 794 return -EADDRNOTAVAIL; 795 dev = rt->dst.dev; 796 ip_rt_put(rt); 797 if (!__in_dev_get_rtnl(dev)) 798 return -EADDRNOTAVAIL; 799 t->mlink = dev->ifindex; 800 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); 801 } 802 return 0; 803 } 804 805 static int ipgre_close(struct net_device *dev) 806 { 807 struct ip_tunnel *t = netdev_priv(dev); 808 809 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { 810 struct in_device *in_dev; 811 in_dev = inetdev_by_index(t->net, t->mlink); 812 if (in_dev) 813 ip_mc_dec_group(in_dev, t->parms.iph.daddr); 814 } 815 return 0; 816 } 817 #endif 818 819 static const struct net_device_ops ipgre_netdev_ops = { 820 .ndo_init = ipgre_tunnel_init, 821 .ndo_uninit = ip_tunnel_uninit, 822 #ifdef CONFIG_NET_IPGRE_BROADCAST 823 .ndo_open = ipgre_open, 824 .ndo_stop = ipgre_close, 825 #endif 826 .ndo_start_xmit = ipgre_xmit, 827 .ndo_do_ioctl = ipgre_tunnel_ioctl, 828 .ndo_change_mtu = ip_tunnel_change_mtu, 829 .ndo_get_stats64 = ip_tunnel_get_stats64, 830 .ndo_get_iflink = ip_tunnel_get_iflink, 831 }; 832 833 #define GRE_FEATURES (NETIF_F_SG | \ 834 NETIF_F_FRAGLIST | \ 835 NETIF_F_HIGHDMA | \ 836 NETIF_F_HW_CSUM) 837 838 static void ipgre_tunnel_setup(struct net_device *dev) 839 { 840 dev->netdev_ops = &ipgre_netdev_ops; 841 dev->type = ARPHRD_IPGRE; 842 ip_tunnel_setup(dev, ipgre_net_id); 843 } 844 845 static void __gre_tunnel_init(struct net_device *dev) 846 { 847 struct ip_tunnel *tunnel; 848 int t_hlen; 849 850 tunnel = netdev_priv(dev); 851 tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags); 852 tunnel->parms.iph.protocol = IPPROTO_GRE; 853 854 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; 855 856 t_hlen = tunnel->hlen + sizeof(struct iphdr); 857 858 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; 859 dev->mtu = ETH_DATA_LEN - t_hlen - 4; 860 861 dev->features |= GRE_FEATURES; 862 dev->hw_features |= GRE_FEATURES; 863 864 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) { 865 /* TCP offload with GRE SEQ is not supported. */ 866 dev->features |= NETIF_F_GSO_SOFTWARE; 867 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 868 /* Can use a lockless transmit, unless we generate 869 * output sequences 870 */ 871 dev->features |= NETIF_F_LLTX; 872 } 873 } 874 875 static int ipgre_tunnel_init(struct net_device *dev) 876 { 877 struct ip_tunnel *tunnel = netdev_priv(dev); 878 struct iphdr *iph = &tunnel->parms.iph; 879 880 __gre_tunnel_init(dev); 881 882 memcpy(dev->dev_addr, &iph->saddr, 4); 883 memcpy(dev->broadcast, &iph->daddr, 4); 884 885 dev->flags = IFF_NOARP; 886 netif_keep_dst(dev); 887 dev->addr_len = 4; 888 889 if (iph->daddr) { 890 #ifdef CONFIG_NET_IPGRE_BROADCAST 891 if (ipv4_is_multicast(iph->daddr)) { 892 if (!iph->saddr) 893 return -EINVAL; 894 dev->flags = IFF_BROADCAST; 895 dev->header_ops = &ipgre_header_ops; 896 } 897 #endif 898 } else 899 dev->header_ops = &ipgre_header_ops; 900 901 return ip_tunnel_init(dev); 902 } 903 904 static const struct gre_protocol ipgre_protocol = { 905 .handler = gre_rcv, 906 .err_handler = gre_err, 907 }; 908 909 static int __net_init ipgre_init_net(struct net *net) 910 { 911 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL); 912 } 913 914 static void __net_exit ipgre_exit_net(struct net *net) 915 { 916 struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id); 917 ip_tunnel_delete_net(itn, &ipgre_link_ops); 918 } 919 920 static struct pernet_operations ipgre_net_ops = { 921 .init = ipgre_init_net, 922 .exit = ipgre_exit_net, 923 .id = &ipgre_net_id, 924 .size = sizeof(struct ip_tunnel_net), 925 }; 926 927 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) 928 { 929 __be16 flags; 930 931 if (!data) 932 return 0; 933 934 flags = 0; 935 if (data[IFLA_GRE_IFLAGS]) 936 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 937 if (data[IFLA_GRE_OFLAGS]) 938 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 939 if (flags & (GRE_VERSION|GRE_ROUTING)) 940 return -EINVAL; 941 942 return 0; 943 } 944 945 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[]) 946 { 947 __be32 daddr; 948 949 if (tb[IFLA_ADDRESS]) { 950 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 951 return -EINVAL; 952 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 953 return -EADDRNOTAVAIL; 954 } 955 956 if (!data) 957 goto out; 958 959 if (data[IFLA_GRE_REMOTE]) { 960 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4); 961 if (!daddr) 962 return -EINVAL; 963 } 964 965 out: 966 return ipgre_tunnel_validate(tb, data); 967 } 968 969 static void ipgre_netlink_parms(struct net_device *dev, 970 struct nlattr *data[], 971 struct nlattr *tb[], 972 struct ip_tunnel_parm *parms) 973 { 974 memset(parms, 0, sizeof(*parms)); 975 976 parms->iph.protocol = IPPROTO_GRE; 977 978 if (!data) 979 return; 980 981 if (data[IFLA_GRE_LINK]) 982 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 983 984 if (data[IFLA_GRE_IFLAGS]) 985 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS])); 986 987 if (data[IFLA_GRE_OFLAGS]) 988 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS])); 989 990 if (data[IFLA_GRE_IKEY]) 991 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 992 993 if (data[IFLA_GRE_OKEY]) 994 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); 995 996 if (data[IFLA_GRE_LOCAL]) 997 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]); 998 999 if (data[IFLA_GRE_REMOTE]) 1000 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]); 1001 1002 if (data[IFLA_GRE_TTL]) 1003 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]); 1004 1005 if (data[IFLA_GRE_TOS]) 1006 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]); 1007 1008 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) 1009 parms->iph.frag_off = htons(IP_DF); 1010 1011 if (data[IFLA_GRE_COLLECT_METADATA]) { 1012 struct ip_tunnel *t = netdev_priv(dev); 1013 1014 t->collect_md = true; 1015 } 1016 } 1017 1018 /* This function returns true when ENCAP attributes are present in the nl msg */ 1019 static bool ipgre_netlink_encap_parms(struct nlattr *data[], 1020 struct ip_tunnel_encap *ipencap) 1021 { 1022 bool ret = false; 1023 1024 memset(ipencap, 0, sizeof(*ipencap)); 1025 1026 if (!data) 1027 return ret; 1028 1029 if (data[IFLA_GRE_ENCAP_TYPE]) { 1030 ret = true; 1031 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); 1032 } 1033 1034 if (data[IFLA_GRE_ENCAP_FLAGS]) { 1035 ret = true; 1036 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); 1037 } 1038 1039 if (data[IFLA_GRE_ENCAP_SPORT]) { 1040 ret = true; 1041 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]); 1042 } 1043 1044 if (data[IFLA_GRE_ENCAP_DPORT]) { 1045 ret = true; 1046 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]); 1047 } 1048 1049 return ret; 1050 } 1051 1052 static int gre_tap_init(struct net_device *dev) 1053 { 1054 __gre_tunnel_init(dev); 1055 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1056 1057 return ip_tunnel_init(dev); 1058 } 1059 1060 static const struct net_device_ops gre_tap_netdev_ops = { 1061 .ndo_init = gre_tap_init, 1062 .ndo_uninit = ip_tunnel_uninit, 1063 .ndo_start_xmit = gre_tap_xmit, 1064 .ndo_set_mac_address = eth_mac_addr, 1065 .ndo_validate_addr = eth_validate_addr, 1066 .ndo_change_mtu = ip_tunnel_change_mtu, 1067 .ndo_get_stats64 = ip_tunnel_get_stats64, 1068 .ndo_get_iflink = ip_tunnel_get_iflink, 1069 .ndo_fill_metadata_dst = gre_fill_metadata_dst, 1070 }; 1071 1072 static void ipgre_tap_setup(struct net_device *dev) 1073 { 1074 ether_setup(dev); 1075 dev->netdev_ops = &gre_tap_netdev_ops; 1076 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1077 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1078 ip_tunnel_setup(dev, gre_tap_net_id); 1079 } 1080 1081 static int ipgre_newlink(struct net *src_net, struct net_device *dev, 1082 struct nlattr *tb[], struct nlattr *data[]) 1083 { 1084 struct ip_tunnel_parm p; 1085 struct ip_tunnel_encap ipencap; 1086 1087 if (ipgre_netlink_encap_parms(data, &ipencap)) { 1088 struct ip_tunnel *t = netdev_priv(dev); 1089 int err = ip_tunnel_encap_setup(t, &ipencap); 1090 1091 if (err < 0) 1092 return err; 1093 } 1094 1095 ipgre_netlink_parms(dev, data, tb, &p); 1096 return ip_tunnel_newlink(dev, tb, &p); 1097 } 1098 1099 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], 1100 struct nlattr *data[]) 1101 { 1102 struct ip_tunnel_parm p; 1103 struct ip_tunnel_encap ipencap; 1104 1105 if (ipgre_netlink_encap_parms(data, &ipencap)) { 1106 struct ip_tunnel *t = netdev_priv(dev); 1107 int err = ip_tunnel_encap_setup(t, &ipencap); 1108 1109 if (err < 0) 1110 return err; 1111 } 1112 1113 ipgre_netlink_parms(dev, data, tb, &p); 1114 return ip_tunnel_changelink(dev, tb, &p); 1115 } 1116 1117 static size_t ipgre_get_size(const struct net_device *dev) 1118 { 1119 return 1120 /* IFLA_GRE_LINK */ 1121 nla_total_size(4) + 1122 /* IFLA_GRE_IFLAGS */ 1123 nla_total_size(2) + 1124 /* IFLA_GRE_OFLAGS */ 1125 nla_total_size(2) + 1126 /* IFLA_GRE_IKEY */ 1127 nla_total_size(4) + 1128 /* IFLA_GRE_OKEY */ 1129 nla_total_size(4) + 1130 /* IFLA_GRE_LOCAL */ 1131 nla_total_size(4) + 1132 /* IFLA_GRE_REMOTE */ 1133 nla_total_size(4) + 1134 /* IFLA_GRE_TTL */ 1135 nla_total_size(1) + 1136 /* IFLA_GRE_TOS */ 1137 nla_total_size(1) + 1138 /* IFLA_GRE_PMTUDISC */ 1139 nla_total_size(1) + 1140 /* IFLA_GRE_ENCAP_TYPE */ 1141 nla_total_size(2) + 1142 /* IFLA_GRE_ENCAP_FLAGS */ 1143 nla_total_size(2) + 1144 /* IFLA_GRE_ENCAP_SPORT */ 1145 nla_total_size(2) + 1146 /* IFLA_GRE_ENCAP_DPORT */ 1147 nla_total_size(2) + 1148 /* IFLA_GRE_COLLECT_METADATA */ 1149 nla_total_size(0) + 1150 0; 1151 } 1152 1153 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) 1154 { 1155 struct ip_tunnel *t = netdev_priv(dev); 1156 struct ip_tunnel_parm *p = &t->parms; 1157 1158 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1159 nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) || 1160 nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) || 1161 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1162 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1163 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || 1164 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) || 1165 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) || 1166 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) || 1167 nla_put_u8(skb, IFLA_GRE_PMTUDISC, 1168 !!(p->iph.frag_off & htons(IP_DF)))) 1169 goto nla_put_failure; 1170 1171 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 1172 t->encap.type) || 1173 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT, 1174 t->encap.sport) || 1175 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT, 1176 t->encap.dport) || 1177 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, 1178 t->encap.flags)) 1179 goto nla_put_failure; 1180 1181 if (t->collect_md) { 1182 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) 1183 goto nla_put_failure; 1184 } 1185 1186 return 0; 1187 1188 nla_put_failure: 1189 return -EMSGSIZE; 1190 } 1191 1192 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { 1193 [IFLA_GRE_LINK] = { .type = NLA_U32 }, 1194 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, 1195 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 1196 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 1197 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 1198 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 1199 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 1200 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 1201 [IFLA_GRE_TOS] = { .type = NLA_U8 }, 1202 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 }, 1203 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, 1204 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, 1205 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, 1206 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, 1207 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, 1208 }; 1209 1210 static struct rtnl_link_ops ipgre_link_ops __read_mostly = { 1211 .kind = "gre", 1212 .maxtype = IFLA_GRE_MAX, 1213 .policy = ipgre_policy, 1214 .priv_size = sizeof(struct ip_tunnel), 1215 .setup = ipgre_tunnel_setup, 1216 .validate = ipgre_tunnel_validate, 1217 .newlink = ipgre_newlink, 1218 .changelink = ipgre_changelink, 1219 .dellink = ip_tunnel_dellink, 1220 .get_size = ipgre_get_size, 1221 .fill_info = ipgre_fill_info, 1222 .get_link_net = ip_tunnel_get_link_net, 1223 }; 1224 1225 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = { 1226 .kind = "gretap", 1227 .maxtype = IFLA_GRE_MAX, 1228 .policy = ipgre_policy, 1229 .priv_size = sizeof(struct ip_tunnel), 1230 .setup = ipgre_tap_setup, 1231 .validate = ipgre_tap_validate, 1232 .newlink = ipgre_newlink, 1233 .changelink = ipgre_changelink, 1234 .dellink = ip_tunnel_dellink, 1235 .get_size = ipgre_get_size, 1236 .fill_info = ipgre_fill_info, 1237 .get_link_net = ip_tunnel_get_link_net, 1238 }; 1239 1240 struct net_device *gretap_fb_dev_create(struct net *net, const char *name, 1241 u8 name_assign_type) 1242 { 1243 struct nlattr *tb[IFLA_MAX + 1]; 1244 struct net_device *dev; 1245 struct ip_tunnel *t; 1246 int err; 1247 1248 memset(&tb, 0, sizeof(tb)); 1249 1250 dev = rtnl_create_link(net, name, name_assign_type, 1251 &ipgre_tap_ops, tb); 1252 if (IS_ERR(dev)) 1253 return dev; 1254 1255 /* Configure flow based GRE device. */ 1256 t = netdev_priv(dev); 1257 t->collect_md = true; 1258 1259 err = ipgre_newlink(net, dev, tb, NULL); 1260 if (err < 0) 1261 goto out; 1262 1263 /* openvswitch users expect packet sizes to be unrestricted, 1264 * so set the largest MTU we can. 1265 */ 1266 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false); 1267 if (err) 1268 goto out; 1269 1270 return dev; 1271 out: 1272 free_netdev(dev); 1273 return ERR_PTR(err); 1274 } 1275 EXPORT_SYMBOL_GPL(gretap_fb_dev_create); 1276 1277 static int __net_init ipgre_tap_init_net(struct net *net) 1278 { 1279 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0"); 1280 } 1281 1282 static void __net_exit ipgre_tap_exit_net(struct net *net) 1283 { 1284 struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id); 1285 ip_tunnel_delete_net(itn, &ipgre_tap_ops); 1286 } 1287 1288 static struct pernet_operations ipgre_tap_net_ops = { 1289 .init = ipgre_tap_init_net, 1290 .exit = ipgre_tap_exit_net, 1291 .id = &gre_tap_net_id, 1292 .size = sizeof(struct ip_tunnel_net), 1293 }; 1294 1295 static int __init ipgre_init(void) 1296 { 1297 int err; 1298 1299 pr_info("GRE over IPv4 tunneling driver\n"); 1300 1301 err = register_pernet_device(&ipgre_net_ops); 1302 if (err < 0) 1303 return err; 1304 1305 err = register_pernet_device(&ipgre_tap_net_ops); 1306 if (err < 0) 1307 goto pnet_tap_faied; 1308 1309 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); 1310 if (err < 0) { 1311 pr_info("%s: can't add protocol\n", __func__); 1312 goto add_proto_failed; 1313 } 1314 1315 err = rtnl_link_register(&ipgre_link_ops); 1316 if (err < 0) 1317 goto rtnl_link_failed; 1318 1319 err = rtnl_link_register(&ipgre_tap_ops); 1320 if (err < 0) 1321 goto tap_ops_failed; 1322 1323 return 0; 1324 1325 tap_ops_failed: 1326 rtnl_link_unregister(&ipgre_link_ops); 1327 rtnl_link_failed: 1328 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); 1329 add_proto_failed: 1330 unregister_pernet_device(&ipgre_tap_net_ops); 1331 pnet_tap_faied: 1332 unregister_pernet_device(&ipgre_net_ops); 1333 return err; 1334 } 1335 1336 static void __exit ipgre_fini(void) 1337 { 1338 rtnl_link_unregister(&ipgre_tap_ops); 1339 rtnl_link_unregister(&ipgre_link_ops); 1340 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); 1341 unregister_pernet_device(&ipgre_tap_net_ops); 1342 unregister_pernet_device(&ipgre_net_ops); 1343 } 1344 1345 module_init(ipgre_init); 1346 module_exit(ipgre_fini); 1347 MODULE_LICENSE("GPL"); 1348 MODULE_ALIAS_RTNL_LINK("gre"); 1349 MODULE_ALIAS_RTNL_LINK("gretap"); 1350 MODULE_ALIAS_NETDEV("gre0"); 1351 MODULE_ALIAS_NETDEV("gretap0"); 1352