xref: /openbmc/linux/net/ipv4/ip_gre.c (revision 9b68f30b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux NET3:	GRE over IP protocol decoder.
4  *
5  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
18 #include <linux/in.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/in6.h>
25 #include <linux/inetdevice.h>
26 #include <linux/igmp.h>
27 #include <linux/netfilter_ipv4.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 
31 #include <net/sock.h>
32 #include <net/ip.h>
33 #include <net/icmp.h>
34 #include <net/protocol.h>
35 #include <net/ip_tunnels.h>
36 #include <net/arp.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
40 #include <net/xfrm.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/rtnetlink.h>
44 #include <net/gre.h>
45 #include <net/dst_metadata.h>
46 #include <net/erspan.h>
47 
48 /*
49    Problems & solutions
50    --------------------
51 
52    1. The most important issue is detecting local dead loops.
53    They would cause complete host lockup in transmit, which
54    would be "resolved" by stack overflow or, if queueing is enabled,
55    with infinite looping in net_bh.
56 
57    We cannot track such dead loops during route installation,
58    it is infeasible task. The most general solutions would be
59    to keep skb->encapsulation counter (sort of local ttl),
60    and silently drop packet when it expires. It is a good
61    solution, but it supposes maintaining new variable in ALL
62    skb, even if no tunneling is used.
63 
64    Current solution: xmit_recursion breaks dead loops. This is a percpu
65    counter, since when we enter the first ndo_xmit(), cpu migration is
66    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
67 
68    2. Networking dead loops would not kill routers, but would really
69    kill network. IP hop limit plays role of "t->recursion" in this case,
70    if we copy it from packet being encapsulated to upper header.
71    It is very good solution, but it introduces two problems:
72 
73    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
74      do not work over tunnels.
75    - traceroute does not work. I planned to relay ICMP from tunnel,
76      so that this problem would be solved and traceroute output
77      would even more informative. This idea appeared to be wrong:
78      only Linux complies to rfc1812 now (yes, guys, Linux is the only
79      true router now :-)), all routers (at least, in neighbourhood of mine)
80      return only 8 bytes of payload. It is the end.
81 
82    Hence, if we want that OSPF worked or traceroute said something reasonable,
83    we should search for another solution.
84 
85    One of them is to parse packet trying to detect inner encapsulation
86    made by our node. It is difficult or even impossible, especially,
87    taking into account fragmentation. TO be short, ttl is not solution at all.
88 
89    Current solution: The solution was UNEXPECTEDLY SIMPLE.
90    We force DF flag on tunnels with preconfigured hop limit,
91    that is ALL. :-) Well, it does not remove the problem completely,
92    but exponential growth of network traffic is changed to linear
93    (branches, that exceed pmtu are pruned) and tunnel mtu
94    rapidly degrades to value <68, where looping stops.
95    Yes, it is not good if there exists a router in the loop,
96    which does not force DF, even when encapsulating packets have DF set.
97    But it is not our problem! Nobody could accuse us, we made
98    all that we could make. Even if it is your gated who injected
99    fatal route to network, even if it were you who configured
100    fatal static route: you are innocent. :-)
101 
102    Alexey Kuznetsov.
103  */
104 
105 static bool log_ecn_error = true;
106 module_param(log_ecn_error, bool, 0644);
107 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108 
109 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
110 static const struct header_ops ipgre_header_ops;
111 
112 static int ipgre_tunnel_init(struct net_device *dev);
113 static void erspan_build_header(struct sk_buff *skb,
114 				u32 id, u32 index,
115 				bool truncate, bool is_ipv4);
116 
117 static unsigned int ipgre_net_id __read_mostly;
118 static unsigned int gre_tap_net_id __read_mostly;
119 static unsigned int erspan_net_id __read_mostly;
120 
121 static int ipgre_err(struct sk_buff *skb, u32 info,
122 		     const struct tnl_ptk_info *tpi)
123 {
124 
125 	/* All the routers (except for Linux) return only
126 	   8 bytes of packet payload. It means, that precise relaying of
127 	   ICMP in the real Internet is absolutely infeasible.
128 
129 	   Moreover, Cisco "wise men" put GRE key to the third word
130 	   in GRE header. It makes impossible maintaining even soft
131 	   state for keyed GRE tunnels with enabled checksum. Tell
132 	   them "thank you".
133 
134 	   Well, I wonder, rfc1812 was written by Cisco employee,
135 	   what the hell these idiots break standards established
136 	   by themselves???
137 	   */
138 	struct net *net = dev_net(skb->dev);
139 	struct ip_tunnel_net *itn;
140 	const struct iphdr *iph;
141 	const int type = icmp_hdr(skb)->type;
142 	const int code = icmp_hdr(skb)->code;
143 	unsigned int data_len = 0;
144 	struct ip_tunnel *t;
145 
146 	if (tpi->proto == htons(ETH_P_TEB))
147 		itn = net_generic(net, gre_tap_net_id);
148 	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
149 		 tpi->proto == htons(ETH_P_ERSPAN2))
150 		itn = net_generic(net, erspan_net_id);
151 	else
152 		itn = net_generic(net, ipgre_net_id);
153 
154 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
155 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
156 			     iph->daddr, iph->saddr, tpi->key);
157 
158 	if (!t)
159 		return -ENOENT;
160 
161 	switch (type) {
162 	default:
163 	case ICMP_PARAMETERPROB:
164 		return 0;
165 
166 	case ICMP_DEST_UNREACH:
167 		switch (code) {
168 		case ICMP_SR_FAILED:
169 		case ICMP_PORT_UNREACH:
170 			/* Impossible event. */
171 			return 0;
172 		default:
173 			/* All others are translated to HOST_UNREACH.
174 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
175 			   I believe they are just ether pollution. --ANK
176 			 */
177 			break;
178 		}
179 		break;
180 
181 	case ICMP_TIME_EXCEEDED:
182 		if (code != ICMP_EXC_TTL)
183 			return 0;
184 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
185 		break;
186 
187 	case ICMP_REDIRECT:
188 		break;
189 	}
190 
191 #if IS_ENABLED(CONFIG_IPV6)
192        if (tpi->proto == htons(ETH_P_IPV6) &&
193            !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
194 				       type, data_len))
195                return 0;
196 #endif
197 
198 	if (t->parms.iph.daddr == 0 ||
199 	    ipv4_is_multicast(t->parms.iph.daddr))
200 		return 0;
201 
202 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
203 		return 0;
204 
205 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
206 		t->err_count++;
207 	else
208 		t->err_count = 1;
209 	t->err_time = jiffies;
210 
211 	return 0;
212 }
213 
214 static void gre_err(struct sk_buff *skb, u32 info)
215 {
216 	/* All the routers (except for Linux) return only
217 	 * 8 bytes of packet payload. It means, that precise relaying of
218 	 * ICMP in the real Internet is absolutely infeasible.
219 	 *
220 	 * Moreover, Cisco "wise men" put GRE key to the third word
221 	 * in GRE header. It makes impossible maintaining even soft
222 	 * state for keyed
223 	 * GRE tunnels with enabled checksum. Tell them "thank you".
224 	 *
225 	 * Well, I wonder, rfc1812 was written by Cisco employee,
226 	 * what the hell these idiots break standards established
227 	 * by themselves???
228 	 */
229 
230 	const struct iphdr *iph = (struct iphdr *)skb->data;
231 	const int type = icmp_hdr(skb)->type;
232 	const int code = icmp_hdr(skb)->code;
233 	struct tnl_ptk_info tpi;
234 
235 	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
236 			     iph->ihl * 4) < 0)
237 		return;
238 
239 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
240 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
241 				 skb->dev->ifindex, IPPROTO_GRE);
242 		return;
243 	}
244 	if (type == ICMP_REDIRECT) {
245 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
246 			      IPPROTO_GRE);
247 		return;
248 	}
249 
250 	ipgre_err(skb, info, &tpi);
251 }
252 
253 static bool is_erspan_type1(int gre_hdr_len)
254 {
255 	/* Both ERSPAN type I (version 0) and type II (version 1) use
256 	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
257 	 * while type II has 8-byte.
258 	 */
259 	return gre_hdr_len == 4;
260 }
261 
262 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
263 		      int gre_hdr_len)
264 {
265 	struct net *net = dev_net(skb->dev);
266 	struct metadata_dst *tun_dst = NULL;
267 	struct erspan_base_hdr *ershdr;
268 	struct ip_tunnel_net *itn;
269 	struct ip_tunnel *tunnel;
270 	const struct iphdr *iph;
271 	struct erspan_md2 *md2;
272 	int ver;
273 	int len;
274 
275 	itn = net_generic(net, erspan_net_id);
276 	iph = ip_hdr(skb);
277 	if (is_erspan_type1(gre_hdr_len)) {
278 		ver = 0;
279 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
280 					  tpi->flags | TUNNEL_NO_KEY,
281 					  iph->saddr, iph->daddr, 0);
282 	} else {
283 		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
284 		ver = ershdr->ver;
285 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
286 					  tpi->flags | TUNNEL_KEY,
287 					  iph->saddr, iph->daddr, tpi->key);
288 	}
289 
290 	if (tunnel) {
291 		if (is_erspan_type1(gre_hdr_len))
292 			len = gre_hdr_len;
293 		else
294 			len = gre_hdr_len + erspan_hdr_len(ver);
295 
296 		if (unlikely(!pskb_may_pull(skb, len)))
297 			return PACKET_REJECT;
298 
299 		if (__iptunnel_pull_header(skb,
300 					   len,
301 					   htons(ETH_P_TEB),
302 					   false, false) < 0)
303 			goto drop;
304 
305 		if (tunnel->collect_md) {
306 			struct erspan_metadata *pkt_md, *md;
307 			struct ip_tunnel_info *info;
308 			unsigned char *gh;
309 			__be64 tun_id;
310 			__be16 flags;
311 
312 			tpi->flags |= TUNNEL_KEY;
313 			flags = tpi->flags;
314 			tun_id = key32_to_tunnel_id(tpi->key);
315 
316 			tun_dst = ip_tun_rx_dst(skb, flags,
317 						tun_id, sizeof(*md));
318 			if (!tun_dst)
319 				return PACKET_REJECT;
320 
321 			/* skb can be uncloned in __iptunnel_pull_header, so
322 			 * old pkt_md is no longer valid and we need to reset
323 			 * it
324 			 */
325 			gh = skb_network_header(skb) +
326 			     skb_network_header_len(skb);
327 			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
328 							    sizeof(*ershdr));
329 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
330 			md->version = ver;
331 			md2 = &md->u.md2;
332 			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
333 						       ERSPAN_V2_MDSIZE);
334 
335 			info = &tun_dst->u.tun_info;
336 			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
337 			info->options_len = sizeof(*md);
338 		}
339 
340 		skb_reset_mac_header(skb);
341 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
342 		return PACKET_RCVD;
343 	}
344 	return PACKET_REJECT;
345 
346 drop:
347 	kfree_skb(skb);
348 	return PACKET_RCVD;
349 }
350 
351 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
352 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
353 {
354 	struct metadata_dst *tun_dst = NULL;
355 	const struct iphdr *iph;
356 	struct ip_tunnel *tunnel;
357 
358 	iph = ip_hdr(skb);
359 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
360 				  iph->saddr, iph->daddr, tpi->key);
361 
362 	if (tunnel) {
363 		const struct iphdr *tnl_params;
364 
365 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
366 					   raw_proto, false) < 0)
367 			goto drop;
368 
369 		/* Special case for ipgre_header_parse(), which expects the
370 		 * mac_header to point to the outer IP header.
371 		 */
372 		if (tunnel->dev->header_ops == &ipgre_header_ops)
373 			skb_pop_mac_header(skb);
374 		else
375 			skb_reset_mac_header(skb);
376 
377 		tnl_params = &tunnel->parms.iph;
378 		if (tunnel->collect_md || tnl_params->daddr == 0) {
379 			__be16 flags;
380 			__be64 tun_id;
381 
382 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
383 			tun_id = key32_to_tunnel_id(tpi->key);
384 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
385 			if (!tun_dst)
386 				return PACKET_REJECT;
387 		}
388 
389 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
390 		return PACKET_RCVD;
391 	}
392 	return PACKET_NEXT;
393 
394 drop:
395 	kfree_skb(skb);
396 	return PACKET_RCVD;
397 }
398 
399 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
400 		     int hdr_len)
401 {
402 	struct net *net = dev_net(skb->dev);
403 	struct ip_tunnel_net *itn;
404 	int res;
405 
406 	if (tpi->proto == htons(ETH_P_TEB))
407 		itn = net_generic(net, gre_tap_net_id);
408 	else
409 		itn = net_generic(net, ipgre_net_id);
410 
411 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
412 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
413 		/* ipgre tunnels in collect metadata mode should receive
414 		 * also ETH_P_TEB traffic.
415 		 */
416 		itn = net_generic(net, ipgre_net_id);
417 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
418 	}
419 	return res;
420 }
421 
422 static int gre_rcv(struct sk_buff *skb)
423 {
424 	struct tnl_ptk_info tpi;
425 	bool csum_err = false;
426 	int hdr_len;
427 
428 #ifdef CONFIG_NET_IPGRE_BROADCAST
429 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
430 		/* Looped back packet, drop it! */
431 		if (rt_is_output_route(skb_rtable(skb)))
432 			goto drop;
433 	}
434 #endif
435 
436 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
437 	if (hdr_len < 0)
438 		goto drop;
439 
440 	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
441 		     tpi.proto == htons(ETH_P_ERSPAN2))) {
442 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
443 			return 0;
444 		goto out;
445 	}
446 
447 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
448 		return 0;
449 
450 out:
451 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
452 drop:
453 	kfree_skb(skb);
454 	return 0;
455 }
456 
457 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
458 		       const struct iphdr *tnl_params,
459 		       __be16 proto)
460 {
461 	struct ip_tunnel *tunnel = netdev_priv(dev);
462 	__be16 flags = tunnel->parms.o_flags;
463 
464 	/* Push GRE header. */
465 	gre_build_header(skb, tunnel->tun_hlen,
466 			 flags, proto, tunnel->parms.o_key,
467 			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
468 
469 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
470 }
471 
472 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
473 {
474 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
475 }
476 
477 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
478 			__be16 proto)
479 {
480 	struct ip_tunnel *tunnel = netdev_priv(dev);
481 	struct ip_tunnel_info *tun_info;
482 	const struct ip_tunnel_key *key;
483 	int tunnel_hlen;
484 	__be16 flags;
485 
486 	tun_info = skb_tunnel_info(skb);
487 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
488 		     ip_tunnel_info_af(tun_info) != AF_INET))
489 		goto err_free_skb;
490 
491 	key = &tun_info->key;
492 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
493 
494 	if (skb_cow_head(skb, dev->needed_headroom))
495 		goto err_free_skb;
496 
497 	/* Push Tunnel header. */
498 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
499 		goto err_free_skb;
500 
501 	flags = tun_info->key.tun_flags &
502 		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
503 	gre_build_header(skb, tunnel_hlen, flags, proto,
504 			 tunnel_id_to_key32(tun_info->key.tun_id),
505 			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
506 
507 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
508 
509 	return;
510 
511 err_free_skb:
512 	kfree_skb(skb);
513 	DEV_STATS_INC(dev, tx_dropped);
514 }
515 
516 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
517 {
518 	struct ip_tunnel *tunnel = netdev_priv(dev);
519 	struct ip_tunnel_info *tun_info;
520 	const struct ip_tunnel_key *key;
521 	struct erspan_metadata *md;
522 	bool truncate = false;
523 	__be16 proto;
524 	int tunnel_hlen;
525 	int version;
526 	int nhoff;
527 
528 	tun_info = skb_tunnel_info(skb);
529 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
530 		     ip_tunnel_info_af(tun_info) != AF_INET))
531 		goto err_free_skb;
532 
533 	key = &tun_info->key;
534 	if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
535 		goto err_free_skb;
536 	if (tun_info->options_len < sizeof(*md))
537 		goto err_free_skb;
538 	md = ip_tunnel_info_opts(tun_info);
539 
540 	/* ERSPAN has fixed 8 byte GRE header */
541 	version = md->version;
542 	tunnel_hlen = 8 + erspan_hdr_len(version);
543 
544 	if (skb_cow_head(skb, dev->needed_headroom))
545 		goto err_free_skb;
546 
547 	if (gre_handle_offloads(skb, false))
548 		goto err_free_skb;
549 
550 	if (skb->len > dev->mtu + dev->hard_header_len) {
551 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
552 		truncate = true;
553 	}
554 
555 	nhoff = skb_network_offset(skb);
556 	if (skb->protocol == htons(ETH_P_IP) &&
557 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
558 		truncate = true;
559 
560 	if (skb->protocol == htons(ETH_P_IPV6)) {
561 		int thoff;
562 
563 		if (skb_transport_header_was_set(skb))
564 			thoff = skb_transport_offset(skb);
565 		else
566 			thoff = nhoff + sizeof(struct ipv6hdr);
567 		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
568 			truncate = true;
569 	}
570 
571 	if (version == 1) {
572 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
573 				    ntohl(md->u.index), truncate, true);
574 		proto = htons(ETH_P_ERSPAN);
575 	} else if (version == 2) {
576 		erspan_build_header_v2(skb,
577 				       ntohl(tunnel_id_to_key32(key->tun_id)),
578 				       md->u.md2.dir,
579 				       get_hwid(&md->u.md2),
580 				       truncate, true);
581 		proto = htons(ETH_P_ERSPAN2);
582 	} else {
583 		goto err_free_skb;
584 	}
585 
586 	gre_build_header(skb, 8, TUNNEL_SEQ,
587 			 proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
588 
589 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
590 
591 	return;
592 
593 err_free_skb:
594 	kfree_skb(skb);
595 	DEV_STATS_INC(dev, tx_dropped);
596 }
597 
598 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
599 {
600 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
601 	const struct ip_tunnel_key *key;
602 	struct rtable *rt;
603 	struct flowi4 fl4;
604 
605 	if (ip_tunnel_info_af(info) != AF_INET)
606 		return -EINVAL;
607 
608 	key = &info->key;
609 	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
610 			    tunnel_id_to_key32(key->tun_id),
611 			    key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
612 			    skb->mark, skb_get_hash(skb), key->flow_flags);
613 	rt = ip_route_output_key(dev_net(dev), &fl4);
614 	if (IS_ERR(rt))
615 		return PTR_ERR(rt);
616 
617 	ip_rt_put(rt);
618 	info->key.u.ipv4.src = fl4.saddr;
619 	return 0;
620 }
621 
622 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
623 			      struct net_device *dev)
624 {
625 	struct ip_tunnel *tunnel = netdev_priv(dev);
626 	const struct iphdr *tnl_params;
627 
628 	if (!pskb_inet_may_pull(skb))
629 		goto free_skb;
630 
631 	if (tunnel->collect_md) {
632 		gre_fb_xmit(skb, dev, skb->protocol);
633 		return NETDEV_TX_OK;
634 	}
635 
636 	if (dev->header_ops) {
637 		if (skb_cow_head(skb, 0))
638 			goto free_skb;
639 
640 		tnl_params = (const struct iphdr *)skb->data;
641 
642 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
643 		 * to gre header.
644 		 */
645 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
646 		skb_reset_mac_header(skb);
647 
648 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
649 		    skb_checksum_start(skb) < skb->data)
650 			goto free_skb;
651 	} else {
652 		if (skb_cow_head(skb, dev->needed_headroom))
653 			goto free_skb;
654 
655 		tnl_params = &tunnel->parms.iph;
656 	}
657 
658 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
659 		goto free_skb;
660 
661 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
662 	return NETDEV_TX_OK;
663 
664 free_skb:
665 	kfree_skb(skb);
666 	DEV_STATS_INC(dev, tx_dropped);
667 	return NETDEV_TX_OK;
668 }
669 
670 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
671 			       struct net_device *dev)
672 {
673 	struct ip_tunnel *tunnel = netdev_priv(dev);
674 	bool truncate = false;
675 	__be16 proto;
676 
677 	if (!pskb_inet_may_pull(skb))
678 		goto free_skb;
679 
680 	if (tunnel->collect_md) {
681 		erspan_fb_xmit(skb, dev);
682 		return NETDEV_TX_OK;
683 	}
684 
685 	if (gre_handle_offloads(skb, false))
686 		goto free_skb;
687 
688 	if (skb_cow_head(skb, dev->needed_headroom))
689 		goto free_skb;
690 
691 	if (skb->len > dev->mtu + dev->hard_header_len) {
692 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
693 		truncate = true;
694 	}
695 
696 	/* Push ERSPAN header */
697 	if (tunnel->erspan_ver == 0) {
698 		proto = htons(ETH_P_ERSPAN);
699 		tunnel->parms.o_flags &= ~TUNNEL_SEQ;
700 	} else if (tunnel->erspan_ver == 1) {
701 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
702 				    tunnel->index,
703 				    truncate, true);
704 		proto = htons(ETH_P_ERSPAN);
705 	} else if (tunnel->erspan_ver == 2) {
706 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
707 				       tunnel->dir, tunnel->hwid,
708 				       truncate, true);
709 		proto = htons(ETH_P_ERSPAN2);
710 	} else {
711 		goto free_skb;
712 	}
713 
714 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
715 	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
716 	return NETDEV_TX_OK;
717 
718 free_skb:
719 	kfree_skb(skb);
720 	DEV_STATS_INC(dev, tx_dropped);
721 	return NETDEV_TX_OK;
722 }
723 
724 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
725 				struct net_device *dev)
726 {
727 	struct ip_tunnel *tunnel = netdev_priv(dev);
728 
729 	if (!pskb_inet_may_pull(skb))
730 		goto free_skb;
731 
732 	if (tunnel->collect_md) {
733 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
734 		return NETDEV_TX_OK;
735 	}
736 
737 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
738 		goto free_skb;
739 
740 	if (skb_cow_head(skb, dev->needed_headroom))
741 		goto free_skb;
742 
743 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
744 	return NETDEV_TX_OK;
745 
746 free_skb:
747 	kfree_skb(skb);
748 	DEV_STATS_INC(dev, tx_dropped);
749 	return NETDEV_TX_OK;
750 }
751 
752 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
753 {
754 	struct ip_tunnel *tunnel = netdev_priv(dev);
755 	__be16 flags;
756 	int len;
757 
758 	len = tunnel->tun_hlen;
759 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
760 	len = tunnel->tun_hlen - len;
761 	tunnel->hlen = tunnel->hlen + len;
762 
763 	if (dev->header_ops)
764 		dev->hard_header_len += len;
765 	else
766 		dev->needed_headroom += len;
767 
768 	if (set_mtu)
769 		dev->mtu = max_t(int, dev->mtu - len, 68);
770 
771 	flags = tunnel->parms.o_flags;
772 
773 	if (flags & TUNNEL_SEQ ||
774 	    (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
775 		dev->features &= ~NETIF_F_GSO_SOFTWARE;
776 		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
777 	} else {
778 		dev->features |= NETIF_F_GSO_SOFTWARE;
779 		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
780 	}
781 }
782 
783 static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
784 			    int cmd)
785 {
786 	int err;
787 
788 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
789 		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
790 		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
791 		    ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
792 			return -EINVAL;
793 	}
794 
795 	p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
796 	p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
797 
798 	err = ip_tunnel_ctl(dev, p, cmd);
799 	if (err)
800 		return err;
801 
802 	if (cmd == SIOCCHGTUNNEL) {
803 		struct ip_tunnel *t = netdev_priv(dev);
804 
805 		t->parms.i_flags = p->i_flags;
806 		t->parms.o_flags = p->o_flags;
807 
808 		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
809 			ipgre_link_update(dev, true);
810 	}
811 
812 	p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
813 	p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
814 	return 0;
815 }
816 
817 /* Nice toy. Unfortunately, useless in real life :-)
818    It allows to construct virtual multiprotocol broadcast "LAN"
819    over the Internet, provided multicast routing is tuned.
820 
821 
822    I have no idea was this bicycle invented before me,
823    so that I had to set ARPHRD_IPGRE to a random value.
824    I have an impression, that Cisco could make something similar,
825    but this feature is apparently missing in IOS<=11.2(8).
826 
827    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
828    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
829 
830    ping -t 255 224.66.66.66
831 
832    If nobody answers, mbone does not work.
833 
834    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
835    ip addr add 10.66.66.<somewhat>/24 dev Universe
836    ifconfig Universe up
837    ifconfig Universe add fe80::<Your_real_addr>/10
838    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
839    ftp 10.66.66.66
840    ...
841    ftp fec0:6666:6666::193.233.7.65
842    ...
843  */
844 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
845 			unsigned short type,
846 			const void *daddr, const void *saddr, unsigned int len)
847 {
848 	struct ip_tunnel *t = netdev_priv(dev);
849 	struct iphdr *iph;
850 	struct gre_base_hdr *greh;
851 
852 	iph = skb_push(skb, t->hlen + sizeof(*iph));
853 	greh = (struct gre_base_hdr *)(iph+1);
854 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
855 	greh->protocol = htons(type);
856 
857 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
858 
859 	/* Set the source hardware address. */
860 	if (saddr)
861 		memcpy(&iph->saddr, saddr, 4);
862 	if (daddr)
863 		memcpy(&iph->daddr, daddr, 4);
864 	if (iph->daddr)
865 		return t->hlen + sizeof(*iph);
866 
867 	return -(t->hlen + sizeof(*iph));
868 }
869 
870 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
871 {
872 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
873 	memcpy(haddr, &iph->saddr, 4);
874 	return 4;
875 }
876 
877 static const struct header_ops ipgre_header_ops = {
878 	.create	= ipgre_header,
879 	.parse	= ipgre_header_parse,
880 };
881 
882 #ifdef CONFIG_NET_IPGRE_BROADCAST
883 static int ipgre_open(struct net_device *dev)
884 {
885 	struct ip_tunnel *t = netdev_priv(dev);
886 
887 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
888 		struct flowi4 fl4;
889 		struct rtable *rt;
890 
891 		rt = ip_route_output_gre(t->net, &fl4,
892 					 t->parms.iph.daddr,
893 					 t->parms.iph.saddr,
894 					 t->parms.o_key,
895 					 RT_TOS(t->parms.iph.tos),
896 					 t->parms.link);
897 		if (IS_ERR(rt))
898 			return -EADDRNOTAVAIL;
899 		dev = rt->dst.dev;
900 		ip_rt_put(rt);
901 		if (!__in_dev_get_rtnl(dev))
902 			return -EADDRNOTAVAIL;
903 		t->mlink = dev->ifindex;
904 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
905 	}
906 	return 0;
907 }
908 
909 static int ipgre_close(struct net_device *dev)
910 {
911 	struct ip_tunnel *t = netdev_priv(dev);
912 
913 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
914 		struct in_device *in_dev;
915 		in_dev = inetdev_by_index(t->net, t->mlink);
916 		if (in_dev)
917 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
918 	}
919 	return 0;
920 }
921 #endif
922 
923 static const struct net_device_ops ipgre_netdev_ops = {
924 	.ndo_init		= ipgre_tunnel_init,
925 	.ndo_uninit		= ip_tunnel_uninit,
926 #ifdef CONFIG_NET_IPGRE_BROADCAST
927 	.ndo_open		= ipgre_open,
928 	.ndo_stop		= ipgre_close,
929 #endif
930 	.ndo_start_xmit		= ipgre_xmit,
931 	.ndo_siocdevprivate	= ip_tunnel_siocdevprivate,
932 	.ndo_change_mtu		= ip_tunnel_change_mtu,
933 	.ndo_get_stats64	= dev_get_tstats64,
934 	.ndo_get_iflink		= ip_tunnel_get_iflink,
935 	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
936 };
937 
938 #define GRE_FEATURES (NETIF_F_SG |		\
939 		      NETIF_F_FRAGLIST |	\
940 		      NETIF_F_HIGHDMA |		\
941 		      NETIF_F_HW_CSUM)
942 
943 static void ipgre_tunnel_setup(struct net_device *dev)
944 {
945 	dev->netdev_ops		= &ipgre_netdev_ops;
946 	dev->type		= ARPHRD_IPGRE;
947 	ip_tunnel_setup(dev, ipgre_net_id);
948 }
949 
950 static void __gre_tunnel_init(struct net_device *dev)
951 {
952 	struct ip_tunnel *tunnel;
953 	__be16 flags;
954 
955 	tunnel = netdev_priv(dev);
956 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
957 	tunnel->parms.iph.protocol = IPPROTO_GRE;
958 
959 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
960 	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
961 
962 	dev->features		|= GRE_FEATURES | NETIF_F_LLTX;
963 	dev->hw_features	|= GRE_FEATURES;
964 
965 	flags = tunnel->parms.o_flags;
966 
967 	/* TCP offload with GRE SEQ is not supported, nor can we support 2
968 	 * levels of outer headers requiring an update.
969 	 */
970 	if (flags & TUNNEL_SEQ)
971 		return;
972 	if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
973 		return;
974 
975 	dev->features |= NETIF_F_GSO_SOFTWARE;
976 	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
977 }
978 
979 static int ipgre_tunnel_init(struct net_device *dev)
980 {
981 	struct ip_tunnel *tunnel = netdev_priv(dev);
982 	struct iphdr *iph = &tunnel->parms.iph;
983 
984 	__gre_tunnel_init(dev);
985 
986 	__dev_addr_set(dev, &iph->saddr, 4);
987 	memcpy(dev->broadcast, &iph->daddr, 4);
988 
989 	dev->flags		= IFF_NOARP;
990 	netif_keep_dst(dev);
991 	dev->addr_len		= 4;
992 
993 	if (iph->daddr && !tunnel->collect_md) {
994 #ifdef CONFIG_NET_IPGRE_BROADCAST
995 		if (ipv4_is_multicast(iph->daddr)) {
996 			if (!iph->saddr)
997 				return -EINVAL;
998 			dev->flags = IFF_BROADCAST;
999 			dev->header_ops = &ipgre_header_ops;
1000 			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1001 			dev->needed_headroom = 0;
1002 		}
1003 #endif
1004 	} else if (!tunnel->collect_md) {
1005 		dev->header_ops = &ipgre_header_ops;
1006 		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1007 		dev->needed_headroom = 0;
1008 	}
1009 
1010 	return ip_tunnel_init(dev);
1011 }
1012 
1013 static const struct gre_protocol ipgre_protocol = {
1014 	.handler     = gre_rcv,
1015 	.err_handler = gre_err,
1016 };
1017 
1018 static int __net_init ipgre_init_net(struct net *net)
1019 {
1020 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1021 }
1022 
1023 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1024 {
1025 	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1026 }
1027 
1028 static struct pernet_operations ipgre_net_ops = {
1029 	.init = ipgre_init_net,
1030 	.exit_batch = ipgre_exit_batch_net,
1031 	.id   = &ipgre_net_id,
1032 	.size = sizeof(struct ip_tunnel_net),
1033 };
1034 
1035 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1036 				 struct netlink_ext_ack *extack)
1037 {
1038 	__be16 flags;
1039 
1040 	if (!data)
1041 		return 0;
1042 
1043 	flags = 0;
1044 	if (data[IFLA_GRE_IFLAGS])
1045 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1046 	if (data[IFLA_GRE_OFLAGS])
1047 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1048 	if (flags & (GRE_VERSION|GRE_ROUTING))
1049 		return -EINVAL;
1050 
1051 	if (data[IFLA_GRE_COLLECT_METADATA] &&
1052 	    data[IFLA_GRE_ENCAP_TYPE] &&
1053 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1054 		return -EINVAL;
1055 
1056 	return 0;
1057 }
1058 
1059 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1060 			      struct netlink_ext_ack *extack)
1061 {
1062 	__be32 daddr;
1063 
1064 	if (tb[IFLA_ADDRESS]) {
1065 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1066 			return -EINVAL;
1067 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1068 			return -EADDRNOTAVAIL;
1069 	}
1070 
1071 	if (!data)
1072 		goto out;
1073 
1074 	if (data[IFLA_GRE_REMOTE]) {
1075 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1076 		if (!daddr)
1077 			return -EINVAL;
1078 	}
1079 
1080 out:
1081 	return ipgre_tunnel_validate(tb, data, extack);
1082 }
1083 
1084 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1085 			   struct netlink_ext_ack *extack)
1086 {
1087 	__be16 flags = 0;
1088 	int ret;
1089 
1090 	if (!data)
1091 		return 0;
1092 
1093 	ret = ipgre_tap_validate(tb, data, extack);
1094 	if (ret)
1095 		return ret;
1096 
1097 	if (data[IFLA_GRE_ERSPAN_VER] &&
1098 	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1099 		return 0;
1100 
1101 	/* ERSPAN type II/III should only have GRE sequence and key flag */
1102 	if (data[IFLA_GRE_OFLAGS])
1103 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1104 	if (data[IFLA_GRE_IFLAGS])
1105 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1106 	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1107 	    flags != (GRE_SEQ | GRE_KEY))
1108 		return -EINVAL;
1109 
1110 	/* ERSPAN Session ID only has 10-bit. Since we reuse
1111 	 * 32-bit key field as ID, check it's range.
1112 	 */
1113 	if (data[IFLA_GRE_IKEY] &&
1114 	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1115 		return -EINVAL;
1116 
1117 	if (data[IFLA_GRE_OKEY] &&
1118 	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1119 		return -EINVAL;
1120 
1121 	return 0;
1122 }
1123 
1124 static int ipgre_netlink_parms(struct net_device *dev,
1125 				struct nlattr *data[],
1126 				struct nlattr *tb[],
1127 				struct ip_tunnel_parm *parms,
1128 				__u32 *fwmark)
1129 {
1130 	struct ip_tunnel *t = netdev_priv(dev);
1131 
1132 	memset(parms, 0, sizeof(*parms));
1133 
1134 	parms->iph.protocol = IPPROTO_GRE;
1135 
1136 	if (!data)
1137 		return 0;
1138 
1139 	if (data[IFLA_GRE_LINK])
1140 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1141 
1142 	if (data[IFLA_GRE_IFLAGS])
1143 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1144 
1145 	if (data[IFLA_GRE_OFLAGS])
1146 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1147 
1148 	if (data[IFLA_GRE_IKEY])
1149 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1150 
1151 	if (data[IFLA_GRE_OKEY])
1152 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1153 
1154 	if (data[IFLA_GRE_LOCAL])
1155 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1156 
1157 	if (data[IFLA_GRE_REMOTE])
1158 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1159 
1160 	if (data[IFLA_GRE_TTL])
1161 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1162 
1163 	if (data[IFLA_GRE_TOS])
1164 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1165 
1166 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1167 		if (t->ignore_df)
1168 			return -EINVAL;
1169 		parms->iph.frag_off = htons(IP_DF);
1170 	}
1171 
1172 	if (data[IFLA_GRE_COLLECT_METADATA]) {
1173 		t->collect_md = true;
1174 		if (dev->type == ARPHRD_IPGRE)
1175 			dev->type = ARPHRD_NONE;
1176 	}
1177 
1178 	if (data[IFLA_GRE_IGNORE_DF]) {
1179 		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1180 		  && (parms->iph.frag_off & htons(IP_DF)))
1181 			return -EINVAL;
1182 		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1183 	}
1184 
1185 	if (data[IFLA_GRE_FWMARK])
1186 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1187 
1188 	return 0;
1189 }
1190 
1191 static int erspan_netlink_parms(struct net_device *dev,
1192 				struct nlattr *data[],
1193 				struct nlattr *tb[],
1194 				struct ip_tunnel_parm *parms,
1195 				__u32 *fwmark)
1196 {
1197 	struct ip_tunnel *t = netdev_priv(dev);
1198 	int err;
1199 
1200 	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1201 	if (err)
1202 		return err;
1203 	if (!data)
1204 		return 0;
1205 
1206 	if (data[IFLA_GRE_ERSPAN_VER]) {
1207 		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1208 
1209 		if (t->erspan_ver > 2)
1210 			return -EINVAL;
1211 	}
1212 
1213 	if (t->erspan_ver == 1) {
1214 		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1215 			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1216 			if (t->index & ~INDEX_MASK)
1217 				return -EINVAL;
1218 		}
1219 	} else if (t->erspan_ver == 2) {
1220 		if (data[IFLA_GRE_ERSPAN_DIR]) {
1221 			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1222 			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1223 				return -EINVAL;
1224 		}
1225 		if (data[IFLA_GRE_ERSPAN_HWID]) {
1226 			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1227 			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1228 				return -EINVAL;
1229 		}
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 /* This function returns true when ENCAP attributes are present in the nl msg */
1236 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1237 				      struct ip_tunnel_encap *ipencap)
1238 {
1239 	bool ret = false;
1240 
1241 	memset(ipencap, 0, sizeof(*ipencap));
1242 
1243 	if (!data)
1244 		return ret;
1245 
1246 	if (data[IFLA_GRE_ENCAP_TYPE]) {
1247 		ret = true;
1248 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1249 	}
1250 
1251 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1252 		ret = true;
1253 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1254 	}
1255 
1256 	if (data[IFLA_GRE_ENCAP_SPORT]) {
1257 		ret = true;
1258 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1259 	}
1260 
1261 	if (data[IFLA_GRE_ENCAP_DPORT]) {
1262 		ret = true;
1263 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1264 	}
1265 
1266 	return ret;
1267 }
1268 
1269 static int gre_tap_init(struct net_device *dev)
1270 {
1271 	__gre_tunnel_init(dev);
1272 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1273 	netif_keep_dst(dev);
1274 
1275 	return ip_tunnel_init(dev);
1276 }
1277 
1278 static const struct net_device_ops gre_tap_netdev_ops = {
1279 	.ndo_init		= gre_tap_init,
1280 	.ndo_uninit		= ip_tunnel_uninit,
1281 	.ndo_start_xmit		= gre_tap_xmit,
1282 	.ndo_set_mac_address 	= eth_mac_addr,
1283 	.ndo_validate_addr	= eth_validate_addr,
1284 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1285 	.ndo_get_stats64	= dev_get_tstats64,
1286 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1287 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1288 };
1289 
1290 static int erspan_tunnel_init(struct net_device *dev)
1291 {
1292 	struct ip_tunnel *tunnel = netdev_priv(dev);
1293 
1294 	if (tunnel->erspan_ver == 0)
1295 		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1296 	else
1297 		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1298 
1299 	tunnel->parms.iph.protocol = IPPROTO_GRE;
1300 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1301 		       erspan_hdr_len(tunnel->erspan_ver);
1302 
1303 	dev->features		|= GRE_FEATURES;
1304 	dev->hw_features	|= GRE_FEATURES;
1305 	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1306 	netif_keep_dst(dev);
1307 
1308 	return ip_tunnel_init(dev);
1309 }
1310 
1311 static const struct net_device_ops erspan_netdev_ops = {
1312 	.ndo_init		= erspan_tunnel_init,
1313 	.ndo_uninit		= ip_tunnel_uninit,
1314 	.ndo_start_xmit		= erspan_xmit,
1315 	.ndo_set_mac_address	= eth_mac_addr,
1316 	.ndo_validate_addr	= eth_validate_addr,
1317 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1318 	.ndo_get_stats64	= dev_get_tstats64,
1319 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1320 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1321 };
1322 
1323 static void ipgre_tap_setup(struct net_device *dev)
1324 {
1325 	ether_setup(dev);
1326 	dev->max_mtu = 0;
1327 	dev->netdev_ops	= &gre_tap_netdev_ops;
1328 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1329 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1330 	ip_tunnel_setup(dev, gre_tap_net_id);
1331 }
1332 
1333 static int
1334 ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1335 {
1336 	struct ip_tunnel_encap ipencap;
1337 
1338 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1339 		struct ip_tunnel *t = netdev_priv(dev);
1340 		int err = ip_tunnel_encap_setup(t, &ipencap);
1341 
1342 		if (err < 0)
1343 			return err;
1344 	}
1345 
1346 	return 0;
1347 }
1348 
1349 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1350 			 struct nlattr *tb[], struct nlattr *data[],
1351 			 struct netlink_ext_ack *extack)
1352 {
1353 	struct ip_tunnel_parm p;
1354 	__u32 fwmark = 0;
1355 	int err;
1356 
1357 	err = ipgre_newlink_encap_setup(dev, data);
1358 	if (err)
1359 		return err;
1360 
1361 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1362 	if (err < 0)
1363 		return err;
1364 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1365 }
1366 
1367 static int erspan_newlink(struct net *src_net, struct net_device *dev,
1368 			  struct nlattr *tb[], struct nlattr *data[],
1369 			  struct netlink_ext_ack *extack)
1370 {
1371 	struct ip_tunnel_parm p;
1372 	__u32 fwmark = 0;
1373 	int err;
1374 
1375 	err = ipgre_newlink_encap_setup(dev, data);
1376 	if (err)
1377 		return err;
1378 
1379 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1380 	if (err)
1381 		return err;
1382 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1383 }
1384 
1385 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1386 			    struct nlattr *data[],
1387 			    struct netlink_ext_ack *extack)
1388 {
1389 	struct ip_tunnel *t = netdev_priv(dev);
1390 	__u32 fwmark = t->fwmark;
1391 	struct ip_tunnel_parm p;
1392 	int err;
1393 
1394 	err = ipgre_newlink_encap_setup(dev, data);
1395 	if (err)
1396 		return err;
1397 
1398 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1399 	if (err < 0)
1400 		return err;
1401 
1402 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1403 	if (err < 0)
1404 		return err;
1405 
1406 	t->parms.i_flags = p.i_flags;
1407 	t->parms.o_flags = p.o_flags;
1408 
1409 	ipgre_link_update(dev, !tb[IFLA_MTU]);
1410 
1411 	return 0;
1412 }
1413 
1414 static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1415 			     struct nlattr *data[],
1416 			     struct netlink_ext_ack *extack)
1417 {
1418 	struct ip_tunnel *t = netdev_priv(dev);
1419 	__u32 fwmark = t->fwmark;
1420 	struct ip_tunnel_parm p;
1421 	int err;
1422 
1423 	err = ipgre_newlink_encap_setup(dev, data);
1424 	if (err)
1425 		return err;
1426 
1427 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1428 	if (err < 0)
1429 		return err;
1430 
1431 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1432 	if (err < 0)
1433 		return err;
1434 
1435 	t->parms.i_flags = p.i_flags;
1436 	t->parms.o_flags = p.o_flags;
1437 
1438 	return 0;
1439 }
1440 
1441 static size_t ipgre_get_size(const struct net_device *dev)
1442 {
1443 	return
1444 		/* IFLA_GRE_LINK */
1445 		nla_total_size(4) +
1446 		/* IFLA_GRE_IFLAGS */
1447 		nla_total_size(2) +
1448 		/* IFLA_GRE_OFLAGS */
1449 		nla_total_size(2) +
1450 		/* IFLA_GRE_IKEY */
1451 		nla_total_size(4) +
1452 		/* IFLA_GRE_OKEY */
1453 		nla_total_size(4) +
1454 		/* IFLA_GRE_LOCAL */
1455 		nla_total_size(4) +
1456 		/* IFLA_GRE_REMOTE */
1457 		nla_total_size(4) +
1458 		/* IFLA_GRE_TTL */
1459 		nla_total_size(1) +
1460 		/* IFLA_GRE_TOS */
1461 		nla_total_size(1) +
1462 		/* IFLA_GRE_PMTUDISC */
1463 		nla_total_size(1) +
1464 		/* IFLA_GRE_ENCAP_TYPE */
1465 		nla_total_size(2) +
1466 		/* IFLA_GRE_ENCAP_FLAGS */
1467 		nla_total_size(2) +
1468 		/* IFLA_GRE_ENCAP_SPORT */
1469 		nla_total_size(2) +
1470 		/* IFLA_GRE_ENCAP_DPORT */
1471 		nla_total_size(2) +
1472 		/* IFLA_GRE_COLLECT_METADATA */
1473 		nla_total_size(0) +
1474 		/* IFLA_GRE_IGNORE_DF */
1475 		nla_total_size(1) +
1476 		/* IFLA_GRE_FWMARK */
1477 		nla_total_size(4) +
1478 		/* IFLA_GRE_ERSPAN_INDEX */
1479 		nla_total_size(4) +
1480 		/* IFLA_GRE_ERSPAN_VER */
1481 		nla_total_size(1) +
1482 		/* IFLA_GRE_ERSPAN_DIR */
1483 		nla_total_size(1) +
1484 		/* IFLA_GRE_ERSPAN_HWID */
1485 		nla_total_size(2) +
1486 		0;
1487 }
1488 
1489 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1490 {
1491 	struct ip_tunnel *t = netdev_priv(dev);
1492 	struct ip_tunnel_parm *p = &t->parms;
1493 	__be16 o_flags = p->o_flags;
1494 
1495 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1496 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1497 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1498 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1499 			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1500 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1501 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1502 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1503 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1504 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1505 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1506 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1507 		       !!(p->iph.frag_off & htons(IP_DF))) ||
1508 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1509 		goto nla_put_failure;
1510 
1511 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1512 			t->encap.type) ||
1513 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1514 			 t->encap.sport) ||
1515 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1516 			 t->encap.dport) ||
1517 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1518 			t->encap.flags))
1519 		goto nla_put_failure;
1520 
1521 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1522 		goto nla_put_failure;
1523 
1524 	if (t->collect_md) {
1525 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1526 			goto nla_put_failure;
1527 	}
1528 
1529 	return 0;
1530 
1531 nla_put_failure:
1532 	return -EMSGSIZE;
1533 }
1534 
1535 static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1536 {
1537 	struct ip_tunnel *t = netdev_priv(dev);
1538 
1539 	if (t->erspan_ver <= 2) {
1540 		if (t->erspan_ver != 0 && !t->collect_md)
1541 			t->parms.o_flags |= TUNNEL_KEY;
1542 
1543 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1544 			goto nla_put_failure;
1545 
1546 		if (t->erspan_ver == 1) {
1547 			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1548 				goto nla_put_failure;
1549 		} else if (t->erspan_ver == 2) {
1550 			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1551 				goto nla_put_failure;
1552 			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1553 				goto nla_put_failure;
1554 		}
1555 	}
1556 
1557 	return ipgre_fill_info(skb, dev);
1558 
1559 nla_put_failure:
1560 	return -EMSGSIZE;
1561 }
1562 
1563 static void erspan_setup(struct net_device *dev)
1564 {
1565 	struct ip_tunnel *t = netdev_priv(dev);
1566 
1567 	ether_setup(dev);
1568 	dev->max_mtu = 0;
1569 	dev->netdev_ops = &erspan_netdev_ops;
1570 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1571 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1572 	ip_tunnel_setup(dev, erspan_net_id);
1573 	t->erspan_ver = 1;
1574 }
1575 
1576 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1577 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1578 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1579 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1580 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1581 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1582 	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1583 	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1584 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1585 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1586 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1587 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1588 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1589 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1590 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1591 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1592 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1593 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1594 	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1595 	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1596 	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1597 	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1598 };
1599 
1600 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1601 	.kind		= "gre",
1602 	.maxtype	= IFLA_GRE_MAX,
1603 	.policy		= ipgre_policy,
1604 	.priv_size	= sizeof(struct ip_tunnel),
1605 	.setup		= ipgre_tunnel_setup,
1606 	.validate	= ipgre_tunnel_validate,
1607 	.newlink	= ipgre_newlink,
1608 	.changelink	= ipgre_changelink,
1609 	.dellink	= ip_tunnel_dellink,
1610 	.get_size	= ipgre_get_size,
1611 	.fill_info	= ipgre_fill_info,
1612 	.get_link_net	= ip_tunnel_get_link_net,
1613 };
1614 
1615 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1616 	.kind		= "gretap",
1617 	.maxtype	= IFLA_GRE_MAX,
1618 	.policy		= ipgre_policy,
1619 	.priv_size	= sizeof(struct ip_tunnel),
1620 	.setup		= ipgre_tap_setup,
1621 	.validate	= ipgre_tap_validate,
1622 	.newlink	= ipgre_newlink,
1623 	.changelink	= ipgre_changelink,
1624 	.dellink	= ip_tunnel_dellink,
1625 	.get_size	= ipgre_get_size,
1626 	.fill_info	= ipgre_fill_info,
1627 	.get_link_net	= ip_tunnel_get_link_net,
1628 };
1629 
1630 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1631 	.kind		= "erspan",
1632 	.maxtype	= IFLA_GRE_MAX,
1633 	.policy		= ipgre_policy,
1634 	.priv_size	= sizeof(struct ip_tunnel),
1635 	.setup		= erspan_setup,
1636 	.validate	= erspan_validate,
1637 	.newlink	= erspan_newlink,
1638 	.changelink	= erspan_changelink,
1639 	.dellink	= ip_tunnel_dellink,
1640 	.get_size	= ipgre_get_size,
1641 	.fill_info	= erspan_fill_info,
1642 	.get_link_net	= ip_tunnel_get_link_net,
1643 };
1644 
1645 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1646 					u8 name_assign_type)
1647 {
1648 	struct nlattr *tb[IFLA_MAX + 1];
1649 	struct net_device *dev;
1650 	LIST_HEAD(list_kill);
1651 	struct ip_tunnel *t;
1652 	int err;
1653 
1654 	memset(&tb, 0, sizeof(tb));
1655 
1656 	dev = rtnl_create_link(net, name, name_assign_type,
1657 			       &ipgre_tap_ops, tb, NULL);
1658 	if (IS_ERR(dev))
1659 		return dev;
1660 
1661 	/* Configure flow based GRE device. */
1662 	t = netdev_priv(dev);
1663 	t->collect_md = true;
1664 
1665 	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1666 	if (err < 0) {
1667 		free_netdev(dev);
1668 		return ERR_PTR(err);
1669 	}
1670 
1671 	/* openvswitch users expect packet sizes to be unrestricted,
1672 	 * so set the largest MTU we can.
1673 	 */
1674 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1675 	if (err)
1676 		goto out;
1677 
1678 	err = rtnl_configure_link(dev, NULL, 0, NULL);
1679 	if (err < 0)
1680 		goto out;
1681 
1682 	return dev;
1683 out:
1684 	ip_tunnel_dellink(dev, &list_kill);
1685 	unregister_netdevice_many(&list_kill);
1686 	return ERR_PTR(err);
1687 }
1688 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1689 
1690 static int __net_init ipgre_tap_init_net(struct net *net)
1691 {
1692 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1693 }
1694 
1695 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1696 {
1697 	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1698 }
1699 
1700 static struct pernet_operations ipgre_tap_net_ops = {
1701 	.init = ipgre_tap_init_net,
1702 	.exit_batch = ipgre_tap_exit_batch_net,
1703 	.id   = &gre_tap_net_id,
1704 	.size = sizeof(struct ip_tunnel_net),
1705 };
1706 
1707 static int __net_init erspan_init_net(struct net *net)
1708 {
1709 	return ip_tunnel_init_net(net, erspan_net_id,
1710 				  &erspan_link_ops, "erspan0");
1711 }
1712 
1713 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1714 {
1715 	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1716 }
1717 
1718 static struct pernet_operations erspan_net_ops = {
1719 	.init = erspan_init_net,
1720 	.exit_batch = erspan_exit_batch_net,
1721 	.id   = &erspan_net_id,
1722 	.size = sizeof(struct ip_tunnel_net),
1723 };
1724 
1725 static int __init ipgre_init(void)
1726 {
1727 	int err;
1728 
1729 	pr_info("GRE over IPv4 tunneling driver\n");
1730 
1731 	err = register_pernet_device(&ipgre_net_ops);
1732 	if (err < 0)
1733 		return err;
1734 
1735 	err = register_pernet_device(&ipgre_tap_net_ops);
1736 	if (err < 0)
1737 		goto pnet_tap_failed;
1738 
1739 	err = register_pernet_device(&erspan_net_ops);
1740 	if (err < 0)
1741 		goto pnet_erspan_failed;
1742 
1743 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1744 	if (err < 0) {
1745 		pr_info("%s: can't add protocol\n", __func__);
1746 		goto add_proto_failed;
1747 	}
1748 
1749 	err = rtnl_link_register(&ipgre_link_ops);
1750 	if (err < 0)
1751 		goto rtnl_link_failed;
1752 
1753 	err = rtnl_link_register(&ipgre_tap_ops);
1754 	if (err < 0)
1755 		goto tap_ops_failed;
1756 
1757 	err = rtnl_link_register(&erspan_link_ops);
1758 	if (err < 0)
1759 		goto erspan_link_failed;
1760 
1761 	return 0;
1762 
1763 erspan_link_failed:
1764 	rtnl_link_unregister(&ipgre_tap_ops);
1765 tap_ops_failed:
1766 	rtnl_link_unregister(&ipgre_link_ops);
1767 rtnl_link_failed:
1768 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1769 add_proto_failed:
1770 	unregister_pernet_device(&erspan_net_ops);
1771 pnet_erspan_failed:
1772 	unregister_pernet_device(&ipgre_tap_net_ops);
1773 pnet_tap_failed:
1774 	unregister_pernet_device(&ipgre_net_ops);
1775 	return err;
1776 }
1777 
1778 static void __exit ipgre_fini(void)
1779 {
1780 	rtnl_link_unregister(&ipgre_tap_ops);
1781 	rtnl_link_unregister(&ipgre_link_ops);
1782 	rtnl_link_unregister(&erspan_link_ops);
1783 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1784 	unregister_pernet_device(&ipgre_tap_net_ops);
1785 	unregister_pernet_device(&ipgre_net_ops);
1786 	unregister_pernet_device(&erspan_net_ops);
1787 }
1788 
1789 module_init(ipgre_init);
1790 module_exit(ipgre_fini);
1791 MODULE_LICENSE("GPL");
1792 MODULE_ALIAS_RTNL_LINK("gre");
1793 MODULE_ALIAS_RTNL_LINK("gretap");
1794 MODULE_ALIAS_RTNL_LINK("erspan");
1795 MODULE_ALIAS_NETDEV("gre0");
1796 MODULE_ALIAS_NETDEV("gretap0");
1797 MODULE_ALIAS_NETDEV("erspan0");
1798