xref: /openbmc/linux/net/ipv4/ip_gre.c (revision 3805e6a1)
1 /*
2  *	Linux NET3:	GRE over IP protocol decoder.
3  *
4  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35 
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51 
52 #if IS_ENABLED(CONFIG_IPV6)
53 #include <net/ipv6.h>
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
56 #endif
57 
58 /*
59    Problems & solutions
60    --------------------
61 
62    1. The most important issue is detecting local dead loops.
63    They would cause complete host lockup in transmit, which
64    would be "resolved" by stack overflow or, if queueing is enabled,
65    with infinite looping in net_bh.
66 
67    We cannot track such dead loops during route installation,
68    it is infeasible task. The most general solutions would be
69    to keep skb->encapsulation counter (sort of local ttl),
70    and silently drop packet when it expires. It is a good
71    solution, but it supposes maintaining new variable in ALL
72    skb, even if no tunneling is used.
73 
74    Current solution: xmit_recursion breaks dead loops. This is a percpu
75    counter, since when we enter the first ndo_xmit(), cpu migration is
76    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
77 
78    2. Networking dead loops would not kill routers, but would really
79    kill network. IP hop limit plays role of "t->recursion" in this case,
80    if we copy it from packet being encapsulated to upper header.
81    It is very good solution, but it introduces two problems:
82 
83    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
84      do not work over tunnels.
85    - traceroute does not work. I planned to relay ICMP from tunnel,
86      so that this problem would be solved and traceroute output
87      would even more informative. This idea appeared to be wrong:
88      only Linux complies to rfc1812 now (yes, guys, Linux is the only
89      true router now :-)), all routers (at least, in neighbourhood of mine)
90      return only 8 bytes of payload. It is the end.
91 
92    Hence, if we want that OSPF worked or traceroute said something reasonable,
93    we should search for another solution.
94 
95    One of them is to parse packet trying to detect inner encapsulation
96    made by our node. It is difficult or even impossible, especially,
97    taking into account fragmentation. TO be short, ttl is not solution at all.
98 
99    Current solution: The solution was UNEXPECTEDLY SIMPLE.
100    We force DF flag on tunnels with preconfigured hop limit,
101    that is ALL. :-) Well, it does not remove the problem completely,
102    but exponential growth of network traffic is changed to linear
103    (branches, that exceed pmtu are pruned) and tunnel mtu
104    rapidly degrades to value <68, where looping stops.
105    Yes, it is not good if there exists a router in the loop,
106    which does not force DF, even when encapsulating packets have DF set.
107    But it is not our problem! Nobody could accuse us, we made
108    all that we could make. Even if it is your gated who injected
109    fatal route to network, even if it were you who configured
110    fatal static route: you are innocent. :-)
111 
112    Alexey Kuznetsov.
113  */
114 
115 static bool log_ecn_error = true;
116 module_param(log_ecn_error, bool, 0644);
117 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
118 
119 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
120 static int ipgre_tunnel_init(struct net_device *dev);
121 
122 static int ipgre_net_id __read_mostly;
123 static int gre_tap_net_id __read_mostly;
124 
125 static void ipgre_err(struct sk_buff *skb, u32 info,
126 		      const struct tnl_ptk_info *tpi)
127 {
128 
129 	/* All the routers (except for Linux) return only
130 	   8 bytes of packet payload. It means, that precise relaying of
131 	   ICMP in the real Internet is absolutely infeasible.
132 
133 	   Moreover, Cisco "wise men" put GRE key to the third word
134 	   in GRE header. It makes impossible maintaining even soft
135 	   state for keyed GRE tunnels with enabled checksum. Tell
136 	   them "thank you".
137 
138 	   Well, I wonder, rfc1812 was written by Cisco employee,
139 	   what the hell these idiots break standards established
140 	   by themselves???
141 	   */
142 	struct net *net = dev_net(skb->dev);
143 	struct ip_tunnel_net *itn;
144 	const struct iphdr *iph;
145 	const int type = icmp_hdr(skb)->type;
146 	const int code = icmp_hdr(skb)->code;
147 	struct ip_tunnel *t;
148 
149 	switch (type) {
150 	default:
151 	case ICMP_PARAMETERPROB:
152 		return;
153 
154 	case ICMP_DEST_UNREACH:
155 		switch (code) {
156 		case ICMP_SR_FAILED:
157 		case ICMP_PORT_UNREACH:
158 			/* Impossible event. */
159 			return;
160 		default:
161 			/* All others are translated to HOST_UNREACH.
162 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
163 			   I believe they are just ether pollution. --ANK
164 			 */
165 			break;
166 		}
167 		break;
168 
169 	case ICMP_TIME_EXCEEDED:
170 		if (code != ICMP_EXC_TTL)
171 			return;
172 		break;
173 
174 	case ICMP_REDIRECT:
175 		break;
176 	}
177 
178 	if (tpi->proto == htons(ETH_P_TEB))
179 		itn = net_generic(net, gre_tap_net_id);
180 	else
181 		itn = net_generic(net, ipgre_net_id);
182 
183 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
184 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
185 			     iph->daddr, iph->saddr, tpi->key);
186 
187 	if (!t)
188 		return;
189 
190 	if (t->parms.iph.daddr == 0 ||
191 	    ipv4_is_multicast(t->parms.iph.daddr))
192 		return;
193 
194 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
195 		return;
196 
197 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
198 		t->err_count++;
199 	else
200 		t->err_count = 1;
201 	t->err_time = jiffies;
202 }
203 
204 static void gre_err(struct sk_buff *skb, u32 info)
205 {
206 	/* All the routers (except for Linux) return only
207 	 * 8 bytes of packet payload. It means, that precise relaying of
208 	 * ICMP in the real Internet is absolutely infeasible.
209 	 *
210 	 * Moreover, Cisco "wise men" put GRE key to the third word
211 	 * in GRE header. It makes impossible maintaining even soft
212 	 * state for keyed
213 	 * GRE tunnels with enabled checksum. Tell them "thank you".
214 	 *
215 	 * Well, I wonder, rfc1812 was written by Cisco employee,
216 	 * what the hell these idiots break standards established
217 	 * by themselves???
218 	 */
219 
220 	const int type = icmp_hdr(skb)->type;
221 	const int code = icmp_hdr(skb)->code;
222 	struct tnl_ptk_info tpi;
223 	bool csum_err = false;
224 
225 	if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) {
226 		if (!csum_err)		/* ignore csum errors. */
227 			return;
228 	}
229 
230 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
231 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
232 				 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
233 		return;
234 	}
235 	if (type == ICMP_REDIRECT) {
236 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
237 			      IPPROTO_GRE, 0);
238 		return;
239 	}
240 
241 	ipgre_err(skb, info, &tpi);
242 }
243 
244 static __be64 key_to_tunnel_id(__be32 key)
245 {
246 #ifdef __BIG_ENDIAN
247 	return (__force __be64)((__force u32)key);
248 #else
249 	return (__force __be64)((__force u64)key << 32);
250 #endif
251 }
252 
253 /* Returns the least-significant 32 bits of a __be64. */
254 static __be32 tunnel_id_to_key(__be64 x)
255 {
256 #ifdef __BIG_ENDIAN
257 	return (__force __be32)x;
258 #else
259 	return (__force __be32)((__force u64)x >> 32);
260 #endif
261 }
262 
263 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
264 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
265 {
266 	struct metadata_dst *tun_dst = NULL;
267 	const struct iphdr *iph;
268 	struct ip_tunnel *tunnel;
269 
270 	iph = ip_hdr(skb);
271 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
272 				  iph->saddr, iph->daddr, tpi->key);
273 
274 	if (tunnel) {
275 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
276 					   raw_proto, false) < 0)
277 			goto drop;
278 
279 		if (tunnel->dev->type != ARPHRD_NONE)
280 			skb_pop_mac_header(skb);
281 		else
282 			skb_reset_mac_header(skb);
283 		if (tunnel->collect_md) {
284 			__be16 flags;
285 			__be64 tun_id;
286 
287 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
288 			tun_id = key_to_tunnel_id(tpi->key);
289 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
290 			if (!tun_dst)
291 				return PACKET_REJECT;
292 		}
293 
294 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
295 		return PACKET_RCVD;
296 	}
297 	return PACKET_NEXT;
298 
299 drop:
300 	kfree_skb(skb);
301 	return PACKET_RCVD;
302 }
303 
304 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
305 		     int hdr_len)
306 {
307 	struct net *net = dev_net(skb->dev);
308 	struct ip_tunnel_net *itn;
309 	int res;
310 
311 	if (tpi->proto == htons(ETH_P_TEB))
312 		itn = net_generic(net, gre_tap_net_id);
313 	else
314 		itn = net_generic(net, ipgre_net_id);
315 
316 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
317 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
318 		/* ipgre tunnels in collect metadata mode should receive
319 		 * also ETH_P_TEB traffic.
320 		 */
321 		itn = net_generic(net, ipgre_net_id);
322 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
323 	}
324 	return res;
325 }
326 
327 static int gre_rcv(struct sk_buff *skb)
328 {
329 	struct tnl_ptk_info tpi;
330 	bool csum_err = false;
331 	int hdr_len;
332 
333 #ifdef CONFIG_NET_IPGRE_BROADCAST
334 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
335 		/* Looped back packet, drop it! */
336 		if (rt_is_output_route(skb_rtable(skb)))
337 			goto drop;
338 	}
339 #endif
340 
341 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP));
342 	if (hdr_len < 0)
343 		goto drop;
344 
345 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
346 		return 0;
347 
348 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
349 drop:
350 	kfree_skb(skb);
351 	return 0;
352 }
353 
354 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
355 		       const struct iphdr *tnl_params,
356 		       __be16 proto)
357 {
358 	struct ip_tunnel *tunnel = netdev_priv(dev);
359 
360 	if (tunnel->parms.o_flags & TUNNEL_SEQ)
361 		tunnel->o_seqno++;
362 
363 	/* Push GRE header. */
364 	gre_build_header(skb, tunnel->tun_hlen,
365 			 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
366 			 htonl(tunnel->o_seqno));
367 
368 	skb_set_inner_protocol(skb, proto);
369 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
370 }
371 
372 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
373 {
374 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
375 }
376 
377 static struct rtable *gre_get_rt(struct sk_buff *skb,
378 				 struct net_device *dev,
379 				 struct flowi4 *fl,
380 				 const struct ip_tunnel_key *key)
381 {
382 	struct net *net = dev_net(dev);
383 
384 	memset(fl, 0, sizeof(*fl));
385 	fl->daddr = key->u.ipv4.dst;
386 	fl->saddr = key->u.ipv4.src;
387 	fl->flowi4_tos = RT_TOS(key->tos);
388 	fl->flowi4_mark = skb->mark;
389 	fl->flowi4_proto = IPPROTO_GRE;
390 
391 	return ip_route_output_key(net, fl);
392 }
393 
394 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
395 			__be16 proto)
396 {
397 	struct ip_tunnel_info *tun_info;
398 	const struct ip_tunnel_key *key;
399 	struct rtable *rt = NULL;
400 	struct flowi4 fl;
401 	int min_headroom;
402 	int tunnel_hlen;
403 	__be16 df, flags;
404 	bool use_cache;
405 	int err;
406 
407 	tun_info = skb_tunnel_info(skb);
408 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
409 		     ip_tunnel_info_af(tun_info) != AF_INET))
410 		goto err_free_skb;
411 
412 	key = &tun_info->key;
413 	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
414 	if (use_cache)
415 		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
416 	if (!rt) {
417 		rt = gre_get_rt(skb, dev, &fl, key);
418 		if (IS_ERR(rt))
419 				goto err_free_skb;
420 		if (use_cache)
421 			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
422 					  fl.saddr);
423 	}
424 
425 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
426 
427 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
428 			+ tunnel_hlen + sizeof(struct iphdr);
429 	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
430 		int head_delta = SKB_DATA_ALIGN(min_headroom -
431 						skb_headroom(skb) +
432 						16);
433 		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
434 				       0, GFP_ATOMIC);
435 		if (unlikely(err))
436 			goto err_free_rt;
437 	}
438 
439 	/* Push Tunnel header. */
440 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
441 		goto err_free_rt;
442 
443 	flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
444 	gre_build_header(skb, tunnel_hlen, flags, proto,
445 			 tunnel_id_to_key(tun_info->key.tun_id), 0);
446 
447 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
448 
449 	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
450 		      key->tos, key->ttl, df, false);
451 	return;
452 
453 err_free_rt:
454 	ip_rt_put(rt);
455 err_free_skb:
456 	kfree_skb(skb);
457 	dev->stats.tx_dropped++;
458 }
459 
460 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
461 {
462 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
463 	struct rtable *rt;
464 	struct flowi4 fl4;
465 
466 	if (ip_tunnel_info_af(info) != AF_INET)
467 		return -EINVAL;
468 
469 	rt = gre_get_rt(skb, dev, &fl4, &info->key);
470 	if (IS_ERR(rt))
471 		return PTR_ERR(rt);
472 
473 	ip_rt_put(rt);
474 	info->key.u.ipv4.src = fl4.saddr;
475 	return 0;
476 }
477 
478 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
479 			      struct net_device *dev)
480 {
481 	struct ip_tunnel *tunnel = netdev_priv(dev);
482 	const struct iphdr *tnl_params;
483 
484 	if (tunnel->collect_md) {
485 		gre_fb_xmit(skb, dev, skb->protocol);
486 		return NETDEV_TX_OK;
487 	}
488 
489 	if (dev->header_ops) {
490 		/* Need space for new headers */
491 		if (skb_cow_head(skb, dev->needed_headroom -
492 				      (tunnel->hlen + sizeof(struct iphdr))))
493 			goto free_skb;
494 
495 		tnl_params = (const struct iphdr *)skb->data;
496 
497 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
498 		 * to gre header.
499 		 */
500 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
501 		skb_reset_mac_header(skb);
502 	} else {
503 		if (skb_cow_head(skb, dev->needed_headroom))
504 			goto free_skb;
505 
506 		tnl_params = &tunnel->parms.iph;
507 	}
508 
509 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
510 		goto free_skb;
511 
512 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
513 	return NETDEV_TX_OK;
514 
515 free_skb:
516 	kfree_skb(skb);
517 	dev->stats.tx_dropped++;
518 	return NETDEV_TX_OK;
519 }
520 
521 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
522 				struct net_device *dev)
523 {
524 	struct ip_tunnel *tunnel = netdev_priv(dev);
525 
526 	if (tunnel->collect_md) {
527 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
528 		return NETDEV_TX_OK;
529 	}
530 
531 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
532 		goto free_skb;
533 
534 	if (skb_cow_head(skb, dev->needed_headroom))
535 		goto free_skb;
536 
537 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
538 	return NETDEV_TX_OK;
539 
540 free_skb:
541 	kfree_skb(skb);
542 	dev->stats.tx_dropped++;
543 	return NETDEV_TX_OK;
544 }
545 
546 static int ipgre_tunnel_ioctl(struct net_device *dev,
547 			      struct ifreq *ifr, int cmd)
548 {
549 	int err;
550 	struct ip_tunnel_parm p;
551 
552 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
553 		return -EFAULT;
554 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
555 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
556 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
557 		    ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
558 			return -EINVAL;
559 	}
560 	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
561 	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
562 
563 	err = ip_tunnel_ioctl(dev, &p, cmd);
564 	if (err)
565 		return err;
566 
567 	p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
568 	p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
569 
570 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
571 		return -EFAULT;
572 	return 0;
573 }
574 
575 /* Nice toy. Unfortunately, useless in real life :-)
576    It allows to construct virtual multiprotocol broadcast "LAN"
577    over the Internet, provided multicast routing is tuned.
578 
579 
580    I have no idea was this bicycle invented before me,
581    so that I had to set ARPHRD_IPGRE to a random value.
582    I have an impression, that Cisco could make something similar,
583    but this feature is apparently missing in IOS<=11.2(8).
584 
585    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
586    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
587 
588    ping -t 255 224.66.66.66
589 
590    If nobody answers, mbone does not work.
591 
592    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
593    ip addr add 10.66.66.<somewhat>/24 dev Universe
594    ifconfig Universe up
595    ifconfig Universe add fe80::<Your_real_addr>/10
596    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
597    ftp 10.66.66.66
598    ...
599    ftp fec0:6666:6666::193.233.7.65
600    ...
601  */
602 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
603 			unsigned short type,
604 			const void *daddr, const void *saddr, unsigned int len)
605 {
606 	struct ip_tunnel *t = netdev_priv(dev);
607 	struct iphdr *iph;
608 	struct gre_base_hdr *greh;
609 
610 	iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
611 	greh = (struct gre_base_hdr *)(iph+1);
612 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
613 	greh->protocol = htons(type);
614 
615 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
616 
617 	/* Set the source hardware address. */
618 	if (saddr)
619 		memcpy(&iph->saddr, saddr, 4);
620 	if (daddr)
621 		memcpy(&iph->daddr, daddr, 4);
622 	if (iph->daddr)
623 		return t->hlen + sizeof(*iph);
624 
625 	return -(t->hlen + sizeof(*iph));
626 }
627 
628 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
629 {
630 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
631 	memcpy(haddr, &iph->saddr, 4);
632 	return 4;
633 }
634 
635 static const struct header_ops ipgre_header_ops = {
636 	.create	= ipgre_header,
637 	.parse	= ipgre_header_parse,
638 };
639 
640 #ifdef CONFIG_NET_IPGRE_BROADCAST
641 static int ipgre_open(struct net_device *dev)
642 {
643 	struct ip_tunnel *t = netdev_priv(dev);
644 
645 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
646 		struct flowi4 fl4;
647 		struct rtable *rt;
648 
649 		rt = ip_route_output_gre(t->net, &fl4,
650 					 t->parms.iph.daddr,
651 					 t->parms.iph.saddr,
652 					 t->parms.o_key,
653 					 RT_TOS(t->parms.iph.tos),
654 					 t->parms.link);
655 		if (IS_ERR(rt))
656 			return -EADDRNOTAVAIL;
657 		dev = rt->dst.dev;
658 		ip_rt_put(rt);
659 		if (!__in_dev_get_rtnl(dev))
660 			return -EADDRNOTAVAIL;
661 		t->mlink = dev->ifindex;
662 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
663 	}
664 	return 0;
665 }
666 
667 static int ipgre_close(struct net_device *dev)
668 {
669 	struct ip_tunnel *t = netdev_priv(dev);
670 
671 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
672 		struct in_device *in_dev;
673 		in_dev = inetdev_by_index(t->net, t->mlink);
674 		if (in_dev)
675 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
676 	}
677 	return 0;
678 }
679 #endif
680 
681 static const struct net_device_ops ipgre_netdev_ops = {
682 	.ndo_init		= ipgre_tunnel_init,
683 	.ndo_uninit		= ip_tunnel_uninit,
684 #ifdef CONFIG_NET_IPGRE_BROADCAST
685 	.ndo_open		= ipgre_open,
686 	.ndo_stop		= ipgre_close,
687 #endif
688 	.ndo_start_xmit		= ipgre_xmit,
689 	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
690 	.ndo_change_mtu		= ip_tunnel_change_mtu,
691 	.ndo_get_stats64	= ip_tunnel_get_stats64,
692 	.ndo_get_iflink		= ip_tunnel_get_iflink,
693 };
694 
695 #define GRE_FEATURES (NETIF_F_SG |		\
696 		      NETIF_F_FRAGLIST |	\
697 		      NETIF_F_HIGHDMA |		\
698 		      NETIF_F_HW_CSUM)
699 
700 static void ipgre_tunnel_setup(struct net_device *dev)
701 {
702 	dev->netdev_ops		= &ipgre_netdev_ops;
703 	dev->type		= ARPHRD_IPGRE;
704 	ip_tunnel_setup(dev, ipgre_net_id);
705 }
706 
707 static void __gre_tunnel_init(struct net_device *dev)
708 {
709 	struct ip_tunnel *tunnel;
710 	int t_hlen;
711 
712 	tunnel = netdev_priv(dev);
713 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
714 	tunnel->parms.iph.protocol = IPPROTO_GRE;
715 
716 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
717 
718 	t_hlen = tunnel->hlen + sizeof(struct iphdr);
719 
720 	dev->needed_headroom	= LL_MAX_HEADER + t_hlen + 4;
721 	dev->mtu		= ETH_DATA_LEN - t_hlen - 4;
722 
723 	dev->features		|= GRE_FEATURES;
724 	dev->hw_features	|= GRE_FEATURES;
725 
726 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
727 		/* TCP offload with GRE SEQ is not supported, nor
728 		 * can we support 2 levels of outer headers requiring
729 		 * an update.
730 		 */
731 		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
732 		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
733 			dev->features    |= NETIF_F_GSO_SOFTWARE;
734 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
735 		}
736 
737 		/* Can use a lockless transmit, unless we generate
738 		 * output sequences
739 		 */
740 		dev->features |= NETIF_F_LLTX;
741 	}
742 }
743 
744 static int ipgre_tunnel_init(struct net_device *dev)
745 {
746 	struct ip_tunnel *tunnel = netdev_priv(dev);
747 	struct iphdr *iph = &tunnel->parms.iph;
748 
749 	__gre_tunnel_init(dev);
750 
751 	memcpy(dev->dev_addr, &iph->saddr, 4);
752 	memcpy(dev->broadcast, &iph->daddr, 4);
753 
754 	dev->flags		= IFF_NOARP;
755 	netif_keep_dst(dev);
756 	dev->addr_len		= 4;
757 
758 	if (iph->daddr && !tunnel->collect_md) {
759 #ifdef CONFIG_NET_IPGRE_BROADCAST
760 		if (ipv4_is_multicast(iph->daddr)) {
761 			if (!iph->saddr)
762 				return -EINVAL;
763 			dev->flags = IFF_BROADCAST;
764 			dev->header_ops = &ipgre_header_ops;
765 		}
766 #endif
767 	} else if (!tunnel->collect_md) {
768 		dev->header_ops = &ipgre_header_ops;
769 	}
770 
771 	return ip_tunnel_init(dev);
772 }
773 
774 static const struct gre_protocol ipgre_protocol = {
775 	.handler     = gre_rcv,
776 	.err_handler = gre_err,
777 };
778 
779 static int __net_init ipgre_init_net(struct net *net)
780 {
781 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
782 }
783 
784 static void __net_exit ipgre_exit_net(struct net *net)
785 {
786 	struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
787 	ip_tunnel_delete_net(itn, &ipgre_link_ops);
788 }
789 
790 static struct pernet_operations ipgre_net_ops = {
791 	.init = ipgre_init_net,
792 	.exit = ipgre_exit_net,
793 	.id   = &ipgre_net_id,
794 	.size = sizeof(struct ip_tunnel_net),
795 };
796 
797 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
798 {
799 	__be16 flags;
800 
801 	if (!data)
802 		return 0;
803 
804 	flags = 0;
805 	if (data[IFLA_GRE_IFLAGS])
806 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
807 	if (data[IFLA_GRE_OFLAGS])
808 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
809 	if (flags & (GRE_VERSION|GRE_ROUTING))
810 		return -EINVAL;
811 
812 	if (data[IFLA_GRE_COLLECT_METADATA] &&
813 	    data[IFLA_GRE_ENCAP_TYPE] &&
814 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
815 		return -EINVAL;
816 
817 	return 0;
818 }
819 
820 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
821 {
822 	__be32 daddr;
823 
824 	if (tb[IFLA_ADDRESS]) {
825 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
826 			return -EINVAL;
827 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
828 			return -EADDRNOTAVAIL;
829 	}
830 
831 	if (!data)
832 		goto out;
833 
834 	if (data[IFLA_GRE_REMOTE]) {
835 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
836 		if (!daddr)
837 			return -EINVAL;
838 	}
839 
840 out:
841 	return ipgre_tunnel_validate(tb, data);
842 }
843 
844 static void ipgre_netlink_parms(struct net_device *dev,
845 				struct nlattr *data[],
846 				struct nlattr *tb[],
847 				struct ip_tunnel_parm *parms)
848 {
849 	memset(parms, 0, sizeof(*parms));
850 
851 	parms->iph.protocol = IPPROTO_GRE;
852 
853 	if (!data)
854 		return;
855 
856 	if (data[IFLA_GRE_LINK])
857 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
858 
859 	if (data[IFLA_GRE_IFLAGS])
860 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
861 
862 	if (data[IFLA_GRE_OFLAGS])
863 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
864 
865 	if (data[IFLA_GRE_IKEY])
866 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
867 
868 	if (data[IFLA_GRE_OKEY])
869 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
870 
871 	if (data[IFLA_GRE_LOCAL])
872 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
873 
874 	if (data[IFLA_GRE_REMOTE])
875 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
876 
877 	if (data[IFLA_GRE_TTL])
878 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
879 
880 	if (data[IFLA_GRE_TOS])
881 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
882 
883 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
884 		parms->iph.frag_off = htons(IP_DF);
885 
886 	if (data[IFLA_GRE_COLLECT_METADATA]) {
887 		struct ip_tunnel *t = netdev_priv(dev);
888 
889 		t->collect_md = true;
890 		if (dev->type == ARPHRD_IPGRE)
891 			dev->type = ARPHRD_NONE;
892 	}
893 }
894 
895 /* This function returns true when ENCAP attributes are present in the nl msg */
896 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
897 				      struct ip_tunnel_encap *ipencap)
898 {
899 	bool ret = false;
900 
901 	memset(ipencap, 0, sizeof(*ipencap));
902 
903 	if (!data)
904 		return ret;
905 
906 	if (data[IFLA_GRE_ENCAP_TYPE]) {
907 		ret = true;
908 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
909 	}
910 
911 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
912 		ret = true;
913 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
914 	}
915 
916 	if (data[IFLA_GRE_ENCAP_SPORT]) {
917 		ret = true;
918 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
919 	}
920 
921 	if (data[IFLA_GRE_ENCAP_DPORT]) {
922 		ret = true;
923 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
924 	}
925 
926 	return ret;
927 }
928 
929 static int gre_tap_init(struct net_device *dev)
930 {
931 	__gre_tunnel_init(dev);
932 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
933 
934 	return ip_tunnel_init(dev);
935 }
936 
937 static const struct net_device_ops gre_tap_netdev_ops = {
938 	.ndo_init		= gre_tap_init,
939 	.ndo_uninit		= ip_tunnel_uninit,
940 	.ndo_start_xmit		= gre_tap_xmit,
941 	.ndo_set_mac_address 	= eth_mac_addr,
942 	.ndo_validate_addr	= eth_validate_addr,
943 	.ndo_change_mtu		= ip_tunnel_change_mtu,
944 	.ndo_get_stats64	= ip_tunnel_get_stats64,
945 	.ndo_get_iflink		= ip_tunnel_get_iflink,
946 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
947 };
948 
949 static void ipgre_tap_setup(struct net_device *dev)
950 {
951 	ether_setup(dev);
952 	dev->netdev_ops	= &gre_tap_netdev_ops;
953 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
954 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
955 	ip_tunnel_setup(dev, gre_tap_net_id);
956 }
957 
958 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
959 			 struct nlattr *tb[], struct nlattr *data[])
960 {
961 	struct ip_tunnel_parm p;
962 	struct ip_tunnel_encap ipencap;
963 
964 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
965 		struct ip_tunnel *t = netdev_priv(dev);
966 		int err = ip_tunnel_encap_setup(t, &ipencap);
967 
968 		if (err < 0)
969 			return err;
970 	}
971 
972 	ipgre_netlink_parms(dev, data, tb, &p);
973 	return ip_tunnel_newlink(dev, tb, &p);
974 }
975 
976 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
977 			    struct nlattr *data[])
978 {
979 	struct ip_tunnel_parm p;
980 	struct ip_tunnel_encap ipencap;
981 
982 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
983 		struct ip_tunnel *t = netdev_priv(dev);
984 		int err = ip_tunnel_encap_setup(t, &ipencap);
985 
986 		if (err < 0)
987 			return err;
988 	}
989 
990 	ipgre_netlink_parms(dev, data, tb, &p);
991 	return ip_tunnel_changelink(dev, tb, &p);
992 }
993 
994 static size_t ipgre_get_size(const struct net_device *dev)
995 {
996 	return
997 		/* IFLA_GRE_LINK */
998 		nla_total_size(4) +
999 		/* IFLA_GRE_IFLAGS */
1000 		nla_total_size(2) +
1001 		/* IFLA_GRE_OFLAGS */
1002 		nla_total_size(2) +
1003 		/* IFLA_GRE_IKEY */
1004 		nla_total_size(4) +
1005 		/* IFLA_GRE_OKEY */
1006 		nla_total_size(4) +
1007 		/* IFLA_GRE_LOCAL */
1008 		nla_total_size(4) +
1009 		/* IFLA_GRE_REMOTE */
1010 		nla_total_size(4) +
1011 		/* IFLA_GRE_TTL */
1012 		nla_total_size(1) +
1013 		/* IFLA_GRE_TOS */
1014 		nla_total_size(1) +
1015 		/* IFLA_GRE_PMTUDISC */
1016 		nla_total_size(1) +
1017 		/* IFLA_GRE_ENCAP_TYPE */
1018 		nla_total_size(2) +
1019 		/* IFLA_GRE_ENCAP_FLAGS */
1020 		nla_total_size(2) +
1021 		/* IFLA_GRE_ENCAP_SPORT */
1022 		nla_total_size(2) +
1023 		/* IFLA_GRE_ENCAP_DPORT */
1024 		nla_total_size(2) +
1025 		/* IFLA_GRE_COLLECT_METADATA */
1026 		nla_total_size(0) +
1027 		0;
1028 }
1029 
1030 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1031 {
1032 	struct ip_tunnel *t = netdev_priv(dev);
1033 	struct ip_tunnel_parm *p = &t->parms;
1034 
1035 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1036 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1037 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1038 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1039 			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1040 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1041 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1042 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1043 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1044 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1045 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1046 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1047 		       !!(p->iph.frag_off & htons(IP_DF))))
1048 		goto nla_put_failure;
1049 
1050 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1051 			t->encap.type) ||
1052 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1053 			 t->encap.sport) ||
1054 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1055 			 t->encap.dport) ||
1056 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1057 			t->encap.flags))
1058 		goto nla_put_failure;
1059 
1060 	if (t->collect_md) {
1061 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1062 			goto nla_put_failure;
1063 	}
1064 
1065 	return 0;
1066 
1067 nla_put_failure:
1068 	return -EMSGSIZE;
1069 }
1070 
1071 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1072 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1073 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1074 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1075 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1076 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1077 	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1078 	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1079 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1080 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1081 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1082 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1083 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1084 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1085 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1086 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1087 };
1088 
1089 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1090 	.kind		= "gre",
1091 	.maxtype	= IFLA_GRE_MAX,
1092 	.policy		= ipgre_policy,
1093 	.priv_size	= sizeof(struct ip_tunnel),
1094 	.setup		= ipgre_tunnel_setup,
1095 	.validate	= ipgre_tunnel_validate,
1096 	.newlink	= ipgre_newlink,
1097 	.changelink	= ipgre_changelink,
1098 	.dellink	= ip_tunnel_dellink,
1099 	.get_size	= ipgre_get_size,
1100 	.fill_info	= ipgre_fill_info,
1101 	.get_link_net	= ip_tunnel_get_link_net,
1102 };
1103 
1104 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1105 	.kind		= "gretap",
1106 	.maxtype	= IFLA_GRE_MAX,
1107 	.policy		= ipgre_policy,
1108 	.priv_size	= sizeof(struct ip_tunnel),
1109 	.setup		= ipgre_tap_setup,
1110 	.validate	= ipgre_tap_validate,
1111 	.newlink	= ipgre_newlink,
1112 	.changelink	= ipgre_changelink,
1113 	.dellink	= ip_tunnel_dellink,
1114 	.get_size	= ipgre_get_size,
1115 	.fill_info	= ipgre_fill_info,
1116 	.get_link_net	= ip_tunnel_get_link_net,
1117 };
1118 
1119 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1120 					u8 name_assign_type)
1121 {
1122 	struct nlattr *tb[IFLA_MAX + 1];
1123 	struct net_device *dev;
1124 	struct ip_tunnel *t;
1125 	int err;
1126 
1127 	memset(&tb, 0, sizeof(tb));
1128 
1129 	dev = rtnl_create_link(net, name, name_assign_type,
1130 			       &ipgre_tap_ops, tb);
1131 	if (IS_ERR(dev))
1132 		return dev;
1133 
1134 	/* Configure flow based GRE device. */
1135 	t = netdev_priv(dev);
1136 	t->collect_md = true;
1137 
1138 	err = ipgre_newlink(net, dev, tb, NULL);
1139 	if (err < 0)
1140 		goto out;
1141 
1142 	/* openvswitch users expect packet sizes to be unrestricted,
1143 	 * so set the largest MTU we can.
1144 	 */
1145 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1146 	if (err)
1147 		goto out;
1148 
1149 	return dev;
1150 out:
1151 	free_netdev(dev);
1152 	return ERR_PTR(err);
1153 }
1154 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1155 
1156 static int __net_init ipgre_tap_init_net(struct net *net)
1157 {
1158 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1159 }
1160 
1161 static void __net_exit ipgre_tap_exit_net(struct net *net)
1162 {
1163 	struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1164 	ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1165 }
1166 
1167 static struct pernet_operations ipgre_tap_net_ops = {
1168 	.init = ipgre_tap_init_net,
1169 	.exit = ipgre_tap_exit_net,
1170 	.id   = &gre_tap_net_id,
1171 	.size = sizeof(struct ip_tunnel_net),
1172 };
1173 
1174 static int __init ipgre_init(void)
1175 {
1176 	int err;
1177 
1178 	pr_info("GRE over IPv4 tunneling driver\n");
1179 
1180 	err = register_pernet_device(&ipgre_net_ops);
1181 	if (err < 0)
1182 		return err;
1183 
1184 	err = register_pernet_device(&ipgre_tap_net_ops);
1185 	if (err < 0)
1186 		goto pnet_tap_faied;
1187 
1188 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1189 	if (err < 0) {
1190 		pr_info("%s: can't add protocol\n", __func__);
1191 		goto add_proto_failed;
1192 	}
1193 
1194 	err = rtnl_link_register(&ipgre_link_ops);
1195 	if (err < 0)
1196 		goto rtnl_link_failed;
1197 
1198 	err = rtnl_link_register(&ipgre_tap_ops);
1199 	if (err < 0)
1200 		goto tap_ops_failed;
1201 
1202 	return 0;
1203 
1204 tap_ops_failed:
1205 	rtnl_link_unregister(&ipgre_link_ops);
1206 rtnl_link_failed:
1207 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1208 add_proto_failed:
1209 	unregister_pernet_device(&ipgre_tap_net_ops);
1210 pnet_tap_faied:
1211 	unregister_pernet_device(&ipgre_net_ops);
1212 	return err;
1213 }
1214 
1215 static void __exit ipgre_fini(void)
1216 {
1217 	rtnl_link_unregister(&ipgre_tap_ops);
1218 	rtnl_link_unregister(&ipgre_link_ops);
1219 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1220 	unregister_pernet_device(&ipgre_tap_net_ops);
1221 	unregister_pernet_device(&ipgre_net_ops);
1222 }
1223 
1224 module_init(ipgre_init);
1225 module_exit(ipgre_fini);
1226 MODULE_LICENSE("GPL");
1227 MODULE_ALIAS_RTNL_LINK("gre");
1228 MODULE_ALIAS_RTNL_LINK("gretap");
1229 MODULE_ALIAS_NETDEV("gre0");
1230 MODULE_ALIAS_NETDEV("gretap0");
1231