xref: /openbmc/linux/net/ipv4/ip_gre.c (revision ec33fbd5)
1 /*
2  *	Linux NET3:	GRE over IP protocol decoder.
3  *
4  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35 
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51 
52 /*
53    Problems & solutions
54    --------------------
55 
56    1. The most important issue is detecting local dead loops.
57    They would cause complete host lockup in transmit, which
58    would be "resolved" by stack overflow or, if queueing is enabled,
59    with infinite looping in net_bh.
60 
61    We cannot track such dead loops during route installation,
62    it is infeasible task. The most general solutions would be
63    to keep skb->encapsulation counter (sort of local ttl),
64    and silently drop packet when it expires. It is a good
65    solution, but it supposes maintaining new variable in ALL
66    skb, even if no tunneling is used.
67 
68    Current solution: xmit_recursion breaks dead loops. This is a percpu
69    counter, since when we enter the first ndo_xmit(), cpu migration is
70    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
71 
72    2. Networking dead loops would not kill routers, but would really
73    kill network. IP hop limit plays role of "t->recursion" in this case,
74    if we copy it from packet being encapsulated to upper header.
75    It is very good solution, but it introduces two problems:
76 
77    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
78      do not work over tunnels.
79    - traceroute does not work. I planned to relay ICMP from tunnel,
80      so that this problem would be solved and traceroute output
81      would even more informative. This idea appeared to be wrong:
82      only Linux complies to rfc1812 now (yes, guys, Linux is the only
83      true router now :-)), all routers (at least, in neighbourhood of mine)
84      return only 8 bytes of payload. It is the end.
85 
86    Hence, if we want that OSPF worked or traceroute said something reasonable,
87    we should search for another solution.
88 
89    One of them is to parse packet trying to detect inner encapsulation
90    made by our node. It is difficult or even impossible, especially,
91    taking into account fragmentation. TO be short, ttl is not solution at all.
92 
93    Current solution: The solution was UNEXPECTEDLY SIMPLE.
94    We force DF flag on tunnels with preconfigured hop limit,
95    that is ALL. :-) Well, it does not remove the problem completely,
96    but exponential growth of network traffic is changed to linear
97    (branches, that exceed pmtu are pruned) and tunnel mtu
98    rapidly degrades to value <68, where looping stops.
99    Yes, it is not good if there exists a router in the loop,
100    which does not force DF, even when encapsulating packets have DF set.
101    But it is not our problem! Nobody could accuse us, we made
102    all that we could make. Even if it is your gated who injected
103    fatal route to network, even if it were you who configured
104    fatal static route: you are innocent. :-)
105 
106    Alexey Kuznetsov.
107  */
108 
109 static bool log_ecn_error = true;
110 module_param(log_ecn_error, bool, 0644);
111 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
112 
113 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
114 static int ipgre_tunnel_init(struct net_device *dev);
115 
116 static unsigned int ipgre_net_id __read_mostly;
117 static unsigned int gre_tap_net_id __read_mostly;
118 
119 static void ipgre_err(struct sk_buff *skb, u32 info,
120 		      const struct tnl_ptk_info *tpi)
121 {
122 
123 	/* All the routers (except for Linux) return only
124 	   8 bytes of packet payload. It means, that precise relaying of
125 	   ICMP in the real Internet is absolutely infeasible.
126 
127 	   Moreover, Cisco "wise men" put GRE key to the third word
128 	   in GRE header. It makes impossible maintaining even soft
129 	   state for keyed GRE tunnels with enabled checksum. Tell
130 	   them "thank you".
131 
132 	   Well, I wonder, rfc1812 was written by Cisco employee,
133 	   what the hell these idiots break standards established
134 	   by themselves???
135 	   */
136 	struct net *net = dev_net(skb->dev);
137 	struct ip_tunnel_net *itn;
138 	const struct iphdr *iph;
139 	const int type = icmp_hdr(skb)->type;
140 	const int code = icmp_hdr(skb)->code;
141 	unsigned int data_len = 0;
142 	struct ip_tunnel *t;
143 
144 	switch (type) {
145 	default:
146 	case ICMP_PARAMETERPROB:
147 		return;
148 
149 	case ICMP_DEST_UNREACH:
150 		switch (code) {
151 		case ICMP_SR_FAILED:
152 		case ICMP_PORT_UNREACH:
153 			/* Impossible event. */
154 			return;
155 		default:
156 			/* All others are translated to HOST_UNREACH.
157 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
158 			   I believe they are just ether pollution. --ANK
159 			 */
160 			break;
161 		}
162 		break;
163 
164 	case ICMP_TIME_EXCEEDED:
165 		if (code != ICMP_EXC_TTL)
166 			return;
167 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
168 		break;
169 
170 	case ICMP_REDIRECT:
171 		break;
172 	}
173 
174 	if (tpi->proto == htons(ETH_P_TEB))
175 		itn = net_generic(net, gre_tap_net_id);
176 	else
177 		itn = net_generic(net, ipgre_net_id);
178 
179 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
180 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
181 			     iph->daddr, iph->saddr, tpi->key);
182 
183 	if (!t)
184 		return;
185 
186 #if IS_ENABLED(CONFIG_IPV6)
187        if (tpi->proto == htons(ETH_P_IPV6) &&
188            !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
189 				       type, data_len))
190                return;
191 #endif
192 
193 	if (t->parms.iph.daddr == 0 ||
194 	    ipv4_is_multicast(t->parms.iph.daddr))
195 		return;
196 
197 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
198 		return;
199 
200 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
201 		t->err_count++;
202 	else
203 		t->err_count = 1;
204 	t->err_time = jiffies;
205 }
206 
207 static void gre_err(struct sk_buff *skb, u32 info)
208 {
209 	/* All the routers (except for Linux) return only
210 	 * 8 bytes of packet payload. It means, that precise relaying of
211 	 * ICMP in the real Internet is absolutely infeasible.
212 	 *
213 	 * Moreover, Cisco "wise men" put GRE key to the third word
214 	 * in GRE header. It makes impossible maintaining even soft
215 	 * state for keyed
216 	 * GRE tunnels with enabled checksum. Tell them "thank you".
217 	 *
218 	 * Well, I wonder, rfc1812 was written by Cisco employee,
219 	 * what the hell these idiots break standards established
220 	 * by themselves???
221 	 */
222 
223 	const struct iphdr *iph = (struct iphdr *)skb->data;
224 	const int type = icmp_hdr(skb)->type;
225 	const int code = icmp_hdr(skb)->code;
226 	struct tnl_ptk_info tpi;
227 	bool csum_err = false;
228 
229 	if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
230 			     iph->ihl * 4) < 0) {
231 		if (!csum_err)		/* ignore csum errors. */
232 			return;
233 	}
234 
235 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
236 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
237 				 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
238 		return;
239 	}
240 	if (type == ICMP_REDIRECT) {
241 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
242 			      IPPROTO_GRE, 0);
243 		return;
244 	}
245 
246 	ipgre_err(skb, info, &tpi);
247 }
248 
249 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
250 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
251 {
252 	struct metadata_dst *tun_dst = NULL;
253 	const struct iphdr *iph;
254 	struct ip_tunnel *tunnel;
255 
256 	iph = ip_hdr(skb);
257 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
258 				  iph->saddr, iph->daddr, tpi->key);
259 
260 	if (tunnel) {
261 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
262 					   raw_proto, false) < 0)
263 			goto drop;
264 
265 		if (tunnel->dev->type != ARPHRD_NONE)
266 			skb_pop_mac_header(skb);
267 		else
268 			skb_reset_mac_header(skb);
269 		if (tunnel->collect_md) {
270 			__be16 flags;
271 			__be64 tun_id;
272 
273 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
274 			tun_id = key32_to_tunnel_id(tpi->key);
275 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
276 			if (!tun_dst)
277 				return PACKET_REJECT;
278 		}
279 
280 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
281 		return PACKET_RCVD;
282 	}
283 	return PACKET_NEXT;
284 
285 drop:
286 	kfree_skb(skb);
287 	return PACKET_RCVD;
288 }
289 
290 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
291 		     int hdr_len)
292 {
293 	struct net *net = dev_net(skb->dev);
294 	struct ip_tunnel_net *itn;
295 	int res;
296 
297 	if (tpi->proto == htons(ETH_P_TEB))
298 		itn = net_generic(net, gre_tap_net_id);
299 	else
300 		itn = net_generic(net, ipgre_net_id);
301 
302 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
303 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
304 		/* ipgre tunnels in collect metadata mode should receive
305 		 * also ETH_P_TEB traffic.
306 		 */
307 		itn = net_generic(net, ipgre_net_id);
308 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
309 	}
310 	return res;
311 }
312 
313 static int gre_rcv(struct sk_buff *skb)
314 {
315 	struct tnl_ptk_info tpi;
316 	bool csum_err = false;
317 	int hdr_len;
318 
319 #ifdef CONFIG_NET_IPGRE_BROADCAST
320 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
321 		/* Looped back packet, drop it! */
322 		if (rt_is_output_route(skb_rtable(skb)))
323 			goto drop;
324 	}
325 #endif
326 
327 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
328 	if (hdr_len < 0)
329 		goto drop;
330 
331 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
332 		return 0;
333 
334 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
335 drop:
336 	kfree_skb(skb);
337 	return 0;
338 }
339 
340 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
341 		       const struct iphdr *tnl_params,
342 		       __be16 proto)
343 {
344 	struct ip_tunnel *tunnel = netdev_priv(dev);
345 
346 	if (tunnel->parms.o_flags & TUNNEL_SEQ)
347 		tunnel->o_seqno++;
348 
349 	/* Push GRE header. */
350 	gre_build_header(skb, tunnel->tun_hlen,
351 			 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
352 			 htonl(tunnel->o_seqno));
353 
354 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
355 }
356 
357 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
358 {
359 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
360 }
361 
362 static struct rtable *gre_get_rt(struct sk_buff *skb,
363 				 struct net_device *dev,
364 				 struct flowi4 *fl,
365 				 const struct ip_tunnel_key *key)
366 {
367 	struct net *net = dev_net(dev);
368 
369 	memset(fl, 0, sizeof(*fl));
370 	fl->daddr = key->u.ipv4.dst;
371 	fl->saddr = key->u.ipv4.src;
372 	fl->flowi4_tos = RT_TOS(key->tos);
373 	fl->flowi4_mark = skb->mark;
374 	fl->flowi4_proto = IPPROTO_GRE;
375 
376 	return ip_route_output_key(net, fl);
377 }
378 
379 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
380 			__be16 proto)
381 {
382 	struct ip_tunnel_info *tun_info;
383 	const struct ip_tunnel_key *key;
384 	struct rtable *rt = NULL;
385 	struct flowi4 fl;
386 	int min_headroom;
387 	int tunnel_hlen;
388 	__be16 df, flags;
389 	bool use_cache;
390 	int err;
391 
392 	tun_info = skb_tunnel_info(skb);
393 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
394 		     ip_tunnel_info_af(tun_info) != AF_INET))
395 		goto err_free_skb;
396 
397 	key = &tun_info->key;
398 	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
399 	if (use_cache)
400 		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
401 	if (!rt) {
402 		rt = gre_get_rt(skb, dev, &fl, key);
403 		if (IS_ERR(rt))
404 				goto err_free_skb;
405 		if (use_cache)
406 			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
407 					  fl.saddr);
408 	}
409 
410 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
411 
412 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
413 			+ tunnel_hlen + sizeof(struct iphdr);
414 	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
415 		int head_delta = SKB_DATA_ALIGN(min_headroom -
416 						skb_headroom(skb) +
417 						16);
418 		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
419 				       0, GFP_ATOMIC);
420 		if (unlikely(err))
421 			goto err_free_rt;
422 	}
423 
424 	/* Push Tunnel header. */
425 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
426 		goto err_free_rt;
427 
428 	flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
429 	gre_build_header(skb, tunnel_hlen, flags, proto,
430 			 tunnel_id_to_key32(tun_info->key.tun_id), 0);
431 
432 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
433 
434 	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
435 		      key->tos, key->ttl, df, false);
436 	return;
437 
438 err_free_rt:
439 	ip_rt_put(rt);
440 err_free_skb:
441 	kfree_skb(skb);
442 	dev->stats.tx_dropped++;
443 }
444 
445 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
446 {
447 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
448 	struct rtable *rt;
449 	struct flowi4 fl4;
450 
451 	if (ip_tunnel_info_af(info) != AF_INET)
452 		return -EINVAL;
453 
454 	rt = gre_get_rt(skb, dev, &fl4, &info->key);
455 	if (IS_ERR(rt))
456 		return PTR_ERR(rt);
457 
458 	ip_rt_put(rt);
459 	info->key.u.ipv4.src = fl4.saddr;
460 	return 0;
461 }
462 
463 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
464 			      struct net_device *dev)
465 {
466 	struct ip_tunnel *tunnel = netdev_priv(dev);
467 	const struct iphdr *tnl_params;
468 
469 	if (tunnel->collect_md) {
470 		gre_fb_xmit(skb, dev, skb->protocol);
471 		return NETDEV_TX_OK;
472 	}
473 
474 	if (dev->header_ops) {
475 		/* Need space for new headers */
476 		if (skb_cow_head(skb, dev->needed_headroom -
477 				      (tunnel->hlen + sizeof(struct iphdr))))
478 			goto free_skb;
479 
480 		tnl_params = (const struct iphdr *)skb->data;
481 
482 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
483 		 * to gre header.
484 		 */
485 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
486 		skb_reset_mac_header(skb);
487 	} else {
488 		if (skb_cow_head(skb, dev->needed_headroom))
489 			goto free_skb;
490 
491 		tnl_params = &tunnel->parms.iph;
492 	}
493 
494 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
495 		goto free_skb;
496 
497 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
498 	return NETDEV_TX_OK;
499 
500 free_skb:
501 	kfree_skb(skb);
502 	dev->stats.tx_dropped++;
503 	return NETDEV_TX_OK;
504 }
505 
506 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
507 				struct net_device *dev)
508 {
509 	struct ip_tunnel *tunnel = netdev_priv(dev);
510 
511 	if (tunnel->collect_md) {
512 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
513 		return NETDEV_TX_OK;
514 	}
515 
516 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
517 		goto free_skb;
518 
519 	if (skb_cow_head(skb, dev->needed_headroom))
520 		goto free_skb;
521 
522 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
523 	return NETDEV_TX_OK;
524 
525 free_skb:
526 	kfree_skb(skb);
527 	dev->stats.tx_dropped++;
528 	return NETDEV_TX_OK;
529 }
530 
531 static int ipgre_tunnel_ioctl(struct net_device *dev,
532 			      struct ifreq *ifr, int cmd)
533 {
534 	int err;
535 	struct ip_tunnel_parm p;
536 
537 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
538 		return -EFAULT;
539 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
540 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
541 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
542 		    ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
543 			return -EINVAL;
544 	}
545 	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
546 	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
547 
548 	err = ip_tunnel_ioctl(dev, &p, cmd);
549 	if (err)
550 		return err;
551 
552 	p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
553 	p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
554 
555 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
556 		return -EFAULT;
557 	return 0;
558 }
559 
560 /* Nice toy. Unfortunately, useless in real life :-)
561    It allows to construct virtual multiprotocol broadcast "LAN"
562    over the Internet, provided multicast routing is tuned.
563 
564 
565    I have no idea was this bicycle invented before me,
566    so that I had to set ARPHRD_IPGRE to a random value.
567    I have an impression, that Cisco could make something similar,
568    but this feature is apparently missing in IOS<=11.2(8).
569 
570    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
571    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
572 
573    ping -t 255 224.66.66.66
574 
575    If nobody answers, mbone does not work.
576 
577    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
578    ip addr add 10.66.66.<somewhat>/24 dev Universe
579    ifconfig Universe up
580    ifconfig Universe add fe80::<Your_real_addr>/10
581    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
582    ftp 10.66.66.66
583    ...
584    ftp fec0:6666:6666::193.233.7.65
585    ...
586  */
587 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
588 			unsigned short type,
589 			const void *daddr, const void *saddr, unsigned int len)
590 {
591 	struct ip_tunnel *t = netdev_priv(dev);
592 	struct iphdr *iph;
593 	struct gre_base_hdr *greh;
594 
595 	iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
596 	greh = (struct gre_base_hdr *)(iph+1);
597 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
598 	greh->protocol = htons(type);
599 
600 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
601 
602 	/* Set the source hardware address. */
603 	if (saddr)
604 		memcpy(&iph->saddr, saddr, 4);
605 	if (daddr)
606 		memcpy(&iph->daddr, daddr, 4);
607 	if (iph->daddr)
608 		return t->hlen + sizeof(*iph);
609 
610 	return -(t->hlen + sizeof(*iph));
611 }
612 
613 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
614 {
615 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
616 	memcpy(haddr, &iph->saddr, 4);
617 	return 4;
618 }
619 
620 static const struct header_ops ipgre_header_ops = {
621 	.create	= ipgre_header,
622 	.parse	= ipgre_header_parse,
623 };
624 
625 #ifdef CONFIG_NET_IPGRE_BROADCAST
626 static int ipgre_open(struct net_device *dev)
627 {
628 	struct ip_tunnel *t = netdev_priv(dev);
629 
630 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
631 		struct flowi4 fl4;
632 		struct rtable *rt;
633 
634 		rt = ip_route_output_gre(t->net, &fl4,
635 					 t->parms.iph.daddr,
636 					 t->parms.iph.saddr,
637 					 t->parms.o_key,
638 					 RT_TOS(t->parms.iph.tos),
639 					 t->parms.link);
640 		if (IS_ERR(rt))
641 			return -EADDRNOTAVAIL;
642 		dev = rt->dst.dev;
643 		ip_rt_put(rt);
644 		if (!__in_dev_get_rtnl(dev))
645 			return -EADDRNOTAVAIL;
646 		t->mlink = dev->ifindex;
647 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
648 	}
649 	return 0;
650 }
651 
652 static int ipgre_close(struct net_device *dev)
653 {
654 	struct ip_tunnel *t = netdev_priv(dev);
655 
656 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
657 		struct in_device *in_dev;
658 		in_dev = inetdev_by_index(t->net, t->mlink);
659 		if (in_dev)
660 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
661 	}
662 	return 0;
663 }
664 #endif
665 
666 static const struct net_device_ops ipgre_netdev_ops = {
667 	.ndo_init		= ipgre_tunnel_init,
668 	.ndo_uninit		= ip_tunnel_uninit,
669 #ifdef CONFIG_NET_IPGRE_BROADCAST
670 	.ndo_open		= ipgre_open,
671 	.ndo_stop		= ipgre_close,
672 #endif
673 	.ndo_start_xmit		= ipgre_xmit,
674 	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
675 	.ndo_change_mtu		= ip_tunnel_change_mtu,
676 	.ndo_get_stats64	= ip_tunnel_get_stats64,
677 	.ndo_get_iflink		= ip_tunnel_get_iflink,
678 };
679 
680 #define GRE_FEATURES (NETIF_F_SG |		\
681 		      NETIF_F_FRAGLIST |	\
682 		      NETIF_F_HIGHDMA |		\
683 		      NETIF_F_HW_CSUM)
684 
685 static void ipgre_tunnel_setup(struct net_device *dev)
686 {
687 	dev->netdev_ops		= &ipgre_netdev_ops;
688 	dev->type		= ARPHRD_IPGRE;
689 	ip_tunnel_setup(dev, ipgre_net_id);
690 }
691 
692 static void __gre_tunnel_init(struct net_device *dev)
693 {
694 	struct ip_tunnel *tunnel;
695 	int t_hlen;
696 
697 	tunnel = netdev_priv(dev);
698 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
699 	tunnel->parms.iph.protocol = IPPROTO_GRE;
700 
701 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
702 
703 	t_hlen = tunnel->hlen + sizeof(struct iphdr);
704 
705 	dev->needed_headroom	= LL_MAX_HEADER + t_hlen + 4;
706 	dev->mtu		= ETH_DATA_LEN - t_hlen - 4;
707 
708 	dev->features		|= GRE_FEATURES;
709 	dev->hw_features	|= GRE_FEATURES;
710 
711 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
712 		/* TCP offload with GRE SEQ is not supported, nor
713 		 * can we support 2 levels of outer headers requiring
714 		 * an update.
715 		 */
716 		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
717 		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
718 			dev->features    |= NETIF_F_GSO_SOFTWARE;
719 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
720 		}
721 
722 		/* Can use a lockless transmit, unless we generate
723 		 * output sequences
724 		 */
725 		dev->features |= NETIF_F_LLTX;
726 	}
727 }
728 
729 static int ipgre_tunnel_init(struct net_device *dev)
730 {
731 	struct ip_tunnel *tunnel = netdev_priv(dev);
732 	struct iphdr *iph = &tunnel->parms.iph;
733 
734 	__gre_tunnel_init(dev);
735 
736 	memcpy(dev->dev_addr, &iph->saddr, 4);
737 	memcpy(dev->broadcast, &iph->daddr, 4);
738 
739 	dev->flags		= IFF_NOARP;
740 	netif_keep_dst(dev);
741 	dev->addr_len		= 4;
742 
743 	if (iph->daddr && !tunnel->collect_md) {
744 #ifdef CONFIG_NET_IPGRE_BROADCAST
745 		if (ipv4_is_multicast(iph->daddr)) {
746 			if (!iph->saddr)
747 				return -EINVAL;
748 			dev->flags = IFF_BROADCAST;
749 			dev->header_ops = &ipgre_header_ops;
750 		}
751 #endif
752 	} else if (!tunnel->collect_md) {
753 		dev->header_ops = &ipgre_header_ops;
754 	}
755 
756 	return ip_tunnel_init(dev);
757 }
758 
759 static const struct gre_protocol ipgre_protocol = {
760 	.handler     = gre_rcv,
761 	.err_handler = gre_err,
762 };
763 
764 static int __net_init ipgre_init_net(struct net *net)
765 {
766 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
767 }
768 
769 static void __net_exit ipgre_exit_net(struct net *net)
770 {
771 	struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
772 	ip_tunnel_delete_net(itn, &ipgre_link_ops);
773 }
774 
775 static struct pernet_operations ipgre_net_ops = {
776 	.init = ipgre_init_net,
777 	.exit = ipgre_exit_net,
778 	.id   = &ipgre_net_id,
779 	.size = sizeof(struct ip_tunnel_net),
780 };
781 
782 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
783 {
784 	__be16 flags;
785 
786 	if (!data)
787 		return 0;
788 
789 	flags = 0;
790 	if (data[IFLA_GRE_IFLAGS])
791 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
792 	if (data[IFLA_GRE_OFLAGS])
793 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
794 	if (flags & (GRE_VERSION|GRE_ROUTING))
795 		return -EINVAL;
796 
797 	if (data[IFLA_GRE_COLLECT_METADATA] &&
798 	    data[IFLA_GRE_ENCAP_TYPE] &&
799 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
800 		return -EINVAL;
801 
802 	return 0;
803 }
804 
805 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
806 {
807 	__be32 daddr;
808 
809 	if (tb[IFLA_ADDRESS]) {
810 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
811 			return -EINVAL;
812 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
813 			return -EADDRNOTAVAIL;
814 	}
815 
816 	if (!data)
817 		goto out;
818 
819 	if (data[IFLA_GRE_REMOTE]) {
820 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
821 		if (!daddr)
822 			return -EINVAL;
823 	}
824 
825 out:
826 	return ipgre_tunnel_validate(tb, data);
827 }
828 
829 static int ipgre_netlink_parms(struct net_device *dev,
830 				struct nlattr *data[],
831 				struct nlattr *tb[],
832 				struct ip_tunnel_parm *parms,
833 				__u32 *fwmark)
834 {
835 	struct ip_tunnel *t = netdev_priv(dev);
836 
837 	memset(parms, 0, sizeof(*parms));
838 
839 	parms->iph.protocol = IPPROTO_GRE;
840 
841 	if (!data)
842 		return 0;
843 
844 	if (data[IFLA_GRE_LINK])
845 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
846 
847 	if (data[IFLA_GRE_IFLAGS])
848 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
849 
850 	if (data[IFLA_GRE_OFLAGS])
851 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
852 
853 	if (data[IFLA_GRE_IKEY])
854 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
855 
856 	if (data[IFLA_GRE_OKEY])
857 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
858 
859 	if (data[IFLA_GRE_LOCAL])
860 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
861 
862 	if (data[IFLA_GRE_REMOTE])
863 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
864 
865 	if (data[IFLA_GRE_TTL])
866 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
867 
868 	if (data[IFLA_GRE_TOS])
869 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
870 
871 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
872 		if (t->ignore_df)
873 			return -EINVAL;
874 		parms->iph.frag_off = htons(IP_DF);
875 	}
876 
877 	if (data[IFLA_GRE_COLLECT_METADATA]) {
878 		t->collect_md = true;
879 		if (dev->type == ARPHRD_IPGRE)
880 			dev->type = ARPHRD_NONE;
881 	}
882 
883 	if (data[IFLA_GRE_IGNORE_DF]) {
884 		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
885 		  && (parms->iph.frag_off & htons(IP_DF)))
886 			return -EINVAL;
887 		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
888 	}
889 
890 	if (data[IFLA_GRE_FWMARK])
891 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
892 
893 	return 0;
894 }
895 
896 /* This function returns true when ENCAP attributes are present in the nl msg */
897 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
898 				      struct ip_tunnel_encap *ipencap)
899 {
900 	bool ret = false;
901 
902 	memset(ipencap, 0, sizeof(*ipencap));
903 
904 	if (!data)
905 		return ret;
906 
907 	if (data[IFLA_GRE_ENCAP_TYPE]) {
908 		ret = true;
909 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
910 	}
911 
912 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
913 		ret = true;
914 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
915 	}
916 
917 	if (data[IFLA_GRE_ENCAP_SPORT]) {
918 		ret = true;
919 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
920 	}
921 
922 	if (data[IFLA_GRE_ENCAP_DPORT]) {
923 		ret = true;
924 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
925 	}
926 
927 	return ret;
928 }
929 
930 static int gre_tap_init(struct net_device *dev)
931 {
932 	__gre_tunnel_init(dev);
933 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
934 
935 	return ip_tunnel_init(dev);
936 }
937 
938 static const struct net_device_ops gre_tap_netdev_ops = {
939 	.ndo_init		= gre_tap_init,
940 	.ndo_uninit		= ip_tunnel_uninit,
941 	.ndo_start_xmit		= gre_tap_xmit,
942 	.ndo_set_mac_address 	= eth_mac_addr,
943 	.ndo_validate_addr	= eth_validate_addr,
944 	.ndo_change_mtu		= ip_tunnel_change_mtu,
945 	.ndo_get_stats64	= ip_tunnel_get_stats64,
946 	.ndo_get_iflink		= ip_tunnel_get_iflink,
947 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
948 };
949 
950 static void ipgre_tap_setup(struct net_device *dev)
951 {
952 	ether_setup(dev);
953 	dev->netdev_ops	= &gre_tap_netdev_ops;
954 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
955 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
956 	ip_tunnel_setup(dev, gre_tap_net_id);
957 }
958 
959 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
960 			 struct nlattr *tb[], struct nlattr *data[])
961 {
962 	struct ip_tunnel_parm p;
963 	struct ip_tunnel_encap ipencap;
964 	__u32 fwmark = 0;
965 	int err;
966 
967 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
968 		struct ip_tunnel *t = netdev_priv(dev);
969 		err = ip_tunnel_encap_setup(t, &ipencap);
970 
971 		if (err < 0)
972 			return err;
973 	}
974 
975 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
976 	if (err < 0)
977 		return err;
978 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
979 }
980 
981 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
982 			    struct nlattr *data[])
983 {
984 	struct ip_tunnel *t = netdev_priv(dev);
985 	struct ip_tunnel_parm p;
986 	struct ip_tunnel_encap ipencap;
987 	__u32 fwmark = t->fwmark;
988 	int err;
989 
990 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
991 		err = ip_tunnel_encap_setup(t, &ipencap);
992 
993 		if (err < 0)
994 			return err;
995 	}
996 
997 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
998 	if (err < 0)
999 		return err;
1000 	return ip_tunnel_changelink(dev, tb, &p, fwmark);
1001 }
1002 
1003 static size_t ipgre_get_size(const struct net_device *dev)
1004 {
1005 	return
1006 		/* IFLA_GRE_LINK */
1007 		nla_total_size(4) +
1008 		/* IFLA_GRE_IFLAGS */
1009 		nla_total_size(2) +
1010 		/* IFLA_GRE_OFLAGS */
1011 		nla_total_size(2) +
1012 		/* IFLA_GRE_IKEY */
1013 		nla_total_size(4) +
1014 		/* IFLA_GRE_OKEY */
1015 		nla_total_size(4) +
1016 		/* IFLA_GRE_LOCAL */
1017 		nla_total_size(4) +
1018 		/* IFLA_GRE_REMOTE */
1019 		nla_total_size(4) +
1020 		/* IFLA_GRE_TTL */
1021 		nla_total_size(1) +
1022 		/* IFLA_GRE_TOS */
1023 		nla_total_size(1) +
1024 		/* IFLA_GRE_PMTUDISC */
1025 		nla_total_size(1) +
1026 		/* IFLA_GRE_ENCAP_TYPE */
1027 		nla_total_size(2) +
1028 		/* IFLA_GRE_ENCAP_FLAGS */
1029 		nla_total_size(2) +
1030 		/* IFLA_GRE_ENCAP_SPORT */
1031 		nla_total_size(2) +
1032 		/* IFLA_GRE_ENCAP_DPORT */
1033 		nla_total_size(2) +
1034 		/* IFLA_GRE_COLLECT_METADATA */
1035 		nla_total_size(0) +
1036 		/* IFLA_GRE_IGNORE_DF */
1037 		nla_total_size(1) +
1038 		/* IFLA_GRE_FWMARK */
1039 		nla_total_size(4) +
1040 		0;
1041 }
1042 
1043 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1044 {
1045 	struct ip_tunnel *t = netdev_priv(dev);
1046 	struct ip_tunnel_parm *p = &t->parms;
1047 
1048 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1049 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1050 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1051 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1052 			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1053 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1054 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1055 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1056 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1057 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1058 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1059 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1060 		       !!(p->iph.frag_off & htons(IP_DF))) ||
1061 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1062 		goto nla_put_failure;
1063 
1064 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1065 			t->encap.type) ||
1066 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1067 			 t->encap.sport) ||
1068 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1069 			 t->encap.dport) ||
1070 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1071 			t->encap.flags))
1072 		goto nla_put_failure;
1073 
1074 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1075 		goto nla_put_failure;
1076 
1077 	if (t->collect_md) {
1078 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1079 			goto nla_put_failure;
1080 	}
1081 
1082 	return 0;
1083 
1084 nla_put_failure:
1085 	return -EMSGSIZE;
1086 }
1087 
1088 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1089 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1090 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1091 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1092 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1093 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1094 	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1095 	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1096 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1097 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1098 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1099 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1100 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1101 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1102 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1103 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1104 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1105 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1106 };
1107 
1108 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1109 	.kind		= "gre",
1110 	.maxtype	= IFLA_GRE_MAX,
1111 	.policy		= ipgre_policy,
1112 	.priv_size	= sizeof(struct ip_tunnel),
1113 	.setup		= ipgre_tunnel_setup,
1114 	.validate	= ipgre_tunnel_validate,
1115 	.newlink	= ipgre_newlink,
1116 	.changelink	= ipgre_changelink,
1117 	.dellink	= ip_tunnel_dellink,
1118 	.get_size	= ipgre_get_size,
1119 	.fill_info	= ipgre_fill_info,
1120 	.get_link_net	= ip_tunnel_get_link_net,
1121 };
1122 
1123 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1124 	.kind		= "gretap",
1125 	.maxtype	= IFLA_GRE_MAX,
1126 	.policy		= ipgre_policy,
1127 	.priv_size	= sizeof(struct ip_tunnel),
1128 	.setup		= ipgre_tap_setup,
1129 	.validate	= ipgre_tap_validate,
1130 	.newlink	= ipgre_newlink,
1131 	.changelink	= ipgre_changelink,
1132 	.dellink	= ip_tunnel_dellink,
1133 	.get_size	= ipgre_get_size,
1134 	.fill_info	= ipgre_fill_info,
1135 	.get_link_net	= ip_tunnel_get_link_net,
1136 };
1137 
1138 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1139 					u8 name_assign_type)
1140 {
1141 	struct nlattr *tb[IFLA_MAX + 1];
1142 	struct net_device *dev;
1143 	LIST_HEAD(list_kill);
1144 	struct ip_tunnel *t;
1145 	int err;
1146 
1147 	memset(&tb, 0, sizeof(tb));
1148 
1149 	dev = rtnl_create_link(net, name, name_assign_type,
1150 			       &ipgre_tap_ops, tb);
1151 	if (IS_ERR(dev))
1152 		return dev;
1153 
1154 	/* Configure flow based GRE device. */
1155 	t = netdev_priv(dev);
1156 	t->collect_md = true;
1157 
1158 	err = ipgre_newlink(net, dev, tb, NULL);
1159 	if (err < 0) {
1160 		free_netdev(dev);
1161 		return ERR_PTR(err);
1162 	}
1163 
1164 	/* openvswitch users expect packet sizes to be unrestricted,
1165 	 * so set the largest MTU we can.
1166 	 */
1167 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1168 	if (err)
1169 		goto out;
1170 
1171 	err = rtnl_configure_link(dev, NULL);
1172 	if (err < 0)
1173 		goto out;
1174 
1175 	return dev;
1176 out:
1177 	ip_tunnel_dellink(dev, &list_kill);
1178 	unregister_netdevice_many(&list_kill);
1179 	return ERR_PTR(err);
1180 }
1181 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1182 
1183 static int __net_init ipgre_tap_init_net(struct net *net)
1184 {
1185 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1186 }
1187 
1188 static void __net_exit ipgre_tap_exit_net(struct net *net)
1189 {
1190 	struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1191 	ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1192 }
1193 
1194 static struct pernet_operations ipgre_tap_net_ops = {
1195 	.init = ipgre_tap_init_net,
1196 	.exit = ipgre_tap_exit_net,
1197 	.id   = &gre_tap_net_id,
1198 	.size = sizeof(struct ip_tunnel_net),
1199 };
1200 
1201 static int __init ipgre_init(void)
1202 {
1203 	int err;
1204 
1205 	pr_info("GRE over IPv4 tunneling driver\n");
1206 
1207 	err = register_pernet_device(&ipgre_net_ops);
1208 	if (err < 0)
1209 		return err;
1210 
1211 	err = register_pernet_device(&ipgre_tap_net_ops);
1212 	if (err < 0)
1213 		goto pnet_tap_faied;
1214 
1215 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1216 	if (err < 0) {
1217 		pr_info("%s: can't add protocol\n", __func__);
1218 		goto add_proto_failed;
1219 	}
1220 
1221 	err = rtnl_link_register(&ipgre_link_ops);
1222 	if (err < 0)
1223 		goto rtnl_link_failed;
1224 
1225 	err = rtnl_link_register(&ipgre_tap_ops);
1226 	if (err < 0)
1227 		goto tap_ops_failed;
1228 
1229 	return 0;
1230 
1231 tap_ops_failed:
1232 	rtnl_link_unregister(&ipgre_link_ops);
1233 rtnl_link_failed:
1234 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1235 add_proto_failed:
1236 	unregister_pernet_device(&ipgre_tap_net_ops);
1237 pnet_tap_faied:
1238 	unregister_pernet_device(&ipgre_net_ops);
1239 	return err;
1240 }
1241 
1242 static void __exit ipgre_fini(void)
1243 {
1244 	rtnl_link_unregister(&ipgre_tap_ops);
1245 	rtnl_link_unregister(&ipgre_link_ops);
1246 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1247 	unregister_pernet_device(&ipgre_tap_net_ops);
1248 	unregister_pernet_device(&ipgre_net_ops);
1249 }
1250 
1251 module_init(ipgre_init);
1252 module_exit(ipgre_fini);
1253 MODULE_LICENSE("GPL");
1254 MODULE_ALIAS_RTNL_LINK("gre");
1255 MODULE_ALIAS_RTNL_LINK("gretap");
1256 MODULE_ALIAS_NETDEV("gre0");
1257 MODULE_ALIAS_NETDEV("gretap0");
1258