xref: /openbmc/linux/net/ipv4/ip_gre.c (revision 94eacb45)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux NET3:	GRE over IP protocol decoder.
4  *
5  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
18 #include <linux/in.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/in6.h>
25 #include <linux/inetdevice.h>
26 #include <linux/igmp.h>
27 #include <linux/netfilter_ipv4.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 
31 #include <net/sock.h>
32 #include <net/ip.h>
33 #include <net/icmp.h>
34 #include <net/protocol.h>
35 #include <net/ip_tunnels.h>
36 #include <net/arp.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
40 #include <net/xfrm.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/rtnetlink.h>
44 #include <net/gre.h>
45 #include <net/dst_metadata.h>
46 #include <net/erspan.h>
47 
48 /*
49    Problems & solutions
50    --------------------
51 
52    1. The most important issue is detecting local dead loops.
53    They would cause complete host lockup in transmit, which
54    would be "resolved" by stack overflow or, if queueing is enabled,
55    with infinite looping in net_bh.
56 
57    We cannot track such dead loops during route installation,
58    it is infeasible task. The most general solutions would be
59    to keep skb->encapsulation counter (sort of local ttl),
60    and silently drop packet when it expires. It is a good
61    solution, but it supposes maintaining new variable in ALL
62    skb, even if no tunneling is used.
63 
64    Current solution: xmit_recursion breaks dead loops. This is a percpu
65    counter, since when we enter the first ndo_xmit(), cpu migration is
66    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
67 
68    2. Networking dead loops would not kill routers, but would really
69    kill network. IP hop limit plays role of "t->recursion" in this case,
70    if we copy it from packet being encapsulated to upper header.
71    It is very good solution, but it introduces two problems:
72 
73    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
74      do not work over tunnels.
75    - traceroute does not work. I planned to relay ICMP from tunnel,
76      so that this problem would be solved and traceroute output
77      would even more informative. This idea appeared to be wrong:
78      only Linux complies to rfc1812 now (yes, guys, Linux is the only
79      true router now :-)), all routers (at least, in neighbourhood of mine)
80      return only 8 bytes of payload. It is the end.
81 
82    Hence, if we want that OSPF worked or traceroute said something reasonable,
83    we should search for another solution.
84 
85    One of them is to parse packet trying to detect inner encapsulation
86    made by our node. It is difficult or even impossible, especially,
87    taking into account fragmentation. TO be short, ttl is not solution at all.
88 
89    Current solution: The solution was UNEXPECTEDLY SIMPLE.
90    We force DF flag on tunnels with preconfigured hop limit,
91    that is ALL. :-) Well, it does not remove the problem completely,
92    but exponential growth of network traffic is changed to linear
93    (branches, that exceed pmtu are pruned) and tunnel mtu
94    rapidly degrades to value <68, where looping stops.
95    Yes, it is not good if there exists a router in the loop,
96    which does not force DF, even when encapsulating packets have DF set.
97    But it is not our problem! Nobody could accuse us, we made
98    all that we could make. Even if it is your gated who injected
99    fatal route to network, even if it were you who configured
100    fatal static route: you are innocent. :-)
101 
102    Alexey Kuznetsov.
103  */
104 
105 static bool log_ecn_error = true;
106 module_param(log_ecn_error, bool, 0644);
107 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108 
109 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
110 static const struct header_ops ipgre_header_ops;
111 
112 static int ipgre_tunnel_init(struct net_device *dev);
113 static void erspan_build_header(struct sk_buff *skb,
114 				u32 id, u32 index,
115 				bool truncate, bool is_ipv4);
116 
117 static unsigned int ipgre_net_id __read_mostly;
118 static unsigned int gre_tap_net_id __read_mostly;
119 static unsigned int erspan_net_id __read_mostly;
120 
121 static int ipgre_err(struct sk_buff *skb, u32 info,
122 		     const struct tnl_ptk_info *tpi)
123 {
124 
125 	/* All the routers (except for Linux) return only
126 	   8 bytes of packet payload. It means, that precise relaying of
127 	   ICMP in the real Internet is absolutely infeasible.
128 
129 	   Moreover, Cisco "wise men" put GRE key to the third word
130 	   in GRE header. It makes impossible maintaining even soft
131 	   state for keyed GRE tunnels with enabled checksum. Tell
132 	   them "thank you".
133 
134 	   Well, I wonder, rfc1812 was written by Cisco employee,
135 	   what the hell these idiots break standards established
136 	   by themselves???
137 	   */
138 	struct net *net = dev_net(skb->dev);
139 	struct ip_tunnel_net *itn;
140 	const struct iphdr *iph;
141 	const int type = icmp_hdr(skb)->type;
142 	const int code = icmp_hdr(skb)->code;
143 	unsigned int data_len = 0;
144 	struct ip_tunnel *t;
145 
146 	if (tpi->proto == htons(ETH_P_TEB))
147 		itn = net_generic(net, gre_tap_net_id);
148 	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
149 		 tpi->proto == htons(ETH_P_ERSPAN2))
150 		itn = net_generic(net, erspan_net_id);
151 	else
152 		itn = net_generic(net, ipgre_net_id);
153 
154 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
155 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
156 			     iph->daddr, iph->saddr, tpi->key);
157 
158 	if (!t)
159 		return -ENOENT;
160 
161 	switch (type) {
162 	default:
163 	case ICMP_PARAMETERPROB:
164 		return 0;
165 
166 	case ICMP_DEST_UNREACH:
167 		switch (code) {
168 		case ICMP_SR_FAILED:
169 		case ICMP_PORT_UNREACH:
170 			/* Impossible event. */
171 			return 0;
172 		default:
173 			/* All others are translated to HOST_UNREACH.
174 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
175 			   I believe they are just ether pollution. --ANK
176 			 */
177 			break;
178 		}
179 		break;
180 
181 	case ICMP_TIME_EXCEEDED:
182 		if (code != ICMP_EXC_TTL)
183 			return 0;
184 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
185 		break;
186 
187 	case ICMP_REDIRECT:
188 		break;
189 	}
190 
191 #if IS_ENABLED(CONFIG_IPV6)
192 	if (tpi->proto == htons(ETH_P_IPV6) &&
193 	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
194 					type, data_len))
195 		return 0;
196 #endif
197 
198 	if (t->parms.iph.daddr == 0 ||
199 	    ipv4_is_multicast(t->parms.iph.daddr))
200 		return 0;
201 
202 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
203 		return 0;
204 
205 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
206 		t->err_count++;
207 	else
208 		t->err_count = 1;
209 	t->err_time = jiffies;
210 
211 	return 0;
212 }
213 
214 static void gre_err(struct sk_buff *skb, u32 info)
215 {
216 	/* All the routers (except for Linux) return only
217 	 * 8 bytes of packet payload. It means, that precise relaying of
218 	 * ICMP in the real Internet is absolutely infeasible.
219 	 *
220 	 * Moreover, Cisco "wise men" put GRE key to the third word
221 	 * in GRE header. It makes impossible maintaining even soft
222 	 * state for keyed
223 	 * GRE tunnels with enabled checksum. Tell them "thank you".
224 	 *
225 	 * Well, I wonder, rfc1812 was written by Cisco employee,
226 	 * what the hell these idiots break standards established
227 	 * by themselves???
228 	 */
229 
230 	const struct iphdr *iph = (struct iphdr *)skb->data;
231 	const int type = icmp_hdr(skb)->type;
232 	const int code = icmp_hdr(skb)->code;
233 	struct tnl_ptk_info tpi;
234 
235 	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
236 			     iph->ihl * 4) < 0)
237 		return;
238 
239 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
240 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
241 				 skb->dev->ifindex, IPPROTO_GRE);
242 		return;
243 	}
244 	if (type == ICMP_REDIRECT) {
245 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
246 			      IPPROTO_GRE);
247 		return;
248 	}
249 
250 	ipgre_err(skb, info, &tpi);
251 }
252 
253 static bool is_erspan_type1(int gre_hdr_len)
254 {
255 	/* Both ERSPAN type I (version 0) and type II (version 1) use
256 	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
257 	 * while type II has 8-byte.
258 	 */
259 	return gre_hdr_len == 4;
260 }
261 
262 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
263 		      int gre_hdr_len)
264 {
265 	struct net *net = dev_net(skb->dev);
266 	struct metadata_dst *tun_dst = NULL;
267 	struct erspan_base_hdr *ershdr;
268 	struct ip_tunnel_net *itn;
269 	struct ip_tunnel *tunnel;
270 	const struct iphdr *iph;
271 	struct erspan_md2 *md2;
272 	int ver;
273 	int len;
274 
275 	itn = net_generic(net, erspan_net_id);
276 	iph = ip_hdr(skb);
277 	if (is_erspan_type1(gre_hdr_len)) {
278 		ver = 0;
279 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
280 					  tpi->flags | TUNNEL_NO_KEY,
281 					  iph->saddr, iph->daddr, 0);
282 	} else {
283 		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
284 		ver = ershdr->ver;
285 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
286 					  tpi->flags | TUNNEL_KEY,
287 					  iph->saddr, iph->daddr, tpi->key);
288 	}
289 
290 	if (tunnel) {
291 		if (is_erspan_type1(gre_hdr_len))
292 			len = gre_hdr_len;
293 		else
294 			len = gre_hdr_len + erspan_hdr_len(ver);
295 
296 		if (unlikely(!pskb_may_pull(skb, len)))
297 			return PACKET_REJECT;
298 
299 		if (__iptunnel_pull_header(skb,
300 					   len,
301 					   htons(ETH_P_TEB),
302 					   false, false) < 0)
303 			goto drop;
304 
305 		if (tunnel->collect_md) {
306 			struct erspan_metadata *pkt_md, *md;
307 			struct ip_tunnel_info *info;
308 			unsigned char *gh;
309 			__be64 tun_id;
310 			__be16 flags;
311 
312 			tpi->flags |= TUNNEL_KEY;
313 			flags = tpi->flags;
314 			tun_id = key32_to_tunnel_id(tpi->key);
315 
316 			tun_dst = ip_tun_rx_dst(skb, flags,
317 						tun_id, sizeof(*md));
318 			if (!tun_dst)
319 				return PACKET_REJECT;
320 
321 			/* skb can be uncloned in __iptunnel_pull_header, so
322 			 * old pkt_md is no longer valid and we need to reset
323 			 * it
324 			 */
325 			gh = skb_network_header(skb) +
326 			     skb_network_header_len(skb);
327 			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
328 							    sizeof(*ershdr));
329 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
330 			md->version = ver;
331 			md2 = &md->u.md2;
332 			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
333 						       ERSPAN_V2_MDSIZE);
334 
335 			info = &tun_dst->u.tun_info;
336 			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
337 			info->options_len = sizeof(*md);
338 		}
339 
340 		skb_reset_mac_header(skb);
341 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
342 		return PACKET_RCVD;
343 	}
344 	return PACKET_REJECT;
345 
346 drop:
347 	kfree_skb(skb);
348 	return PACKET_RCVD;
349 }
350 
351 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
352 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
353 {
354 	struct metadata_dst *tun_dst = NULL;
355 	const struct iphdr *iph;
356 	struct ip_tunnel *tunnel;
357 
358 	iph = ip_hdr(skb);
359 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
360 				  iph->saddr, iph->daddr, tpi->key);
361 
362 	if (tunnel) {
363 		const struct iphdr *tnl_params;
364 
365 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
366 					   raw_proto, false) < 0)
367 			goto drop;
368 
369 		/* Special case for ipgre_header_parse(), which expects the
370 		 * mac_header to point to the outer IP header.
371 		 */
372 		if (tunnel->dev->header_ops == &ipgre_header_ops)
373 			skb_pop_mac_header(skb);
374 		else
375 			skb_reset_mac_header(skb);
376 
377 		tnl_params = &tunnel->parms.iph;
378 		if (tunnel->collect_md || tnl_params->daddr == 0) {
379 			__be16 flags;
380 			__be64 tun_id;
381 
382 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
383 			tun_id = key32_to_tunnel_id(tpi->key);
384 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
385 			if (!tun_dst)
386 				return PACKET_REJECT;
387 		}
388 
389 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
390 		return PACKET_RCVD;
391 	}
392 	return PACKET_NEXT;
393 
394 drop:
395 	kfree_skb(skb);
396 	return PACKET_RCVD;
397 }
398 
399 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
400 		     int hdr_len)
401 {
402 	struct net *net = dev_net(skb->dev);
403 	struct ip_tunnel_net *itn;
404 	int res;
405 
406 	if (tpi->proto == htons(ETH_P_TEB))
407 		itn = net_generic(net, gre_tap_net_id);
408 	else
409 		itn = net_generic(net, ipgre_net_id);
410 
411 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
412 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
413 		/* ipgre tunnels in collect metadata mode should receive
414 		 * also ETH_P_TEB traffic.
415 		 */
416 		itn = net_generic(net, ipgre_net_id);
417 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
418 	}
419 	return res;
420 }
421 
422 static int gre_rcv(struct sk_buff *skb)
423 {
424 	struct tnl_ptk_info tpi;
425 	bool csum_err = false;
426 	int hdr_len;
427 
428 #ifdef CONFIG_NET_IPGRE_BROADCAST
429 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
430 		/* Looped back packet, drop it! */
431 		if (rt_is_output_route(skb_rtable(skb)))
432 			goto drop;
433 	}
434 #endif
435 
436 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
437 	if (hdr_len < 0)
438 		goto drop;
439 
440 	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
441 		     tpi.proto == htons(ETH_P_ERSPAN2))) {
442 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
443 			return 0;
444 		goto out;
445 	}
446 
447 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
448 		return 0;
449 
450 out:
451 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
452 drop:
453 	kfree_skb(skb);
454 	return 0;
455 }
456 
457 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
458 		       const struct iphdr *tnl_params,
459 		       __be16 proto)
460 {
461 	struct ip_tunnel *tunnel = netdev_priv(dev);
462 	__be16 flags = tunnel->parms.o_flags;
463 
464 	/* Push GRE header. */
465 	gre_build_header(skb, tunnel->tun_hlen,
466 			 flags, proto, tunnel->parms.o_key,
467 			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
468 
469 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
470 }
471 
472 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
473 {
474 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
475 }
476 
477 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
478 			__be16 proto)
479 {
480 	struct ip_tunnel *tunnel = netdev_priv(dev);
481 	struct ip_tunnel_info *tun_info;
482 	const struct ip_tunnel_key *key;
483 	int tunnel_hlen;
484 	__be16 flags;
485 
486 	tun_info = skb_tunnel_info(skb);
487 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
488 		     ip_tunnel_info_af(tun_info) != AF_INET))
489 		goto err_free_skb;
490 
491 	key = &tun_info->key;
492 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
493 
494 	if (skb_cow_head(skb, dev->needed_headroom))
495 		goto err_free_skb;
496 
497 	/* Push Tunnel header. */
498 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
499 		goto err_free_skb;
500 
501 	flags = tun_info->key.tun_flags &
502 		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
503 	gre_build_header(skb, tunnel_hlen, flags, proto,
504 			 tunnel_id_to_key32(tun_info->key.tun_id),
505 			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
506 
507 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
508 
509 	return;
510 
511 err_free_skb:
512 	kfree_skb(skb);
513 	DEV_STATS_INC(dev, tx_dropped);
514 }
515 
516 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
517 {
518 	struct ip_tunnel *tunnel = netdev_priv(dev);
519 	struct ip_tunnel_info *tun_info;
520 	const struct ip_tunnel_key *key;
521 	struct erspan_metadata *md;
522 	bool truncate = false;
523 	__be16 proto;
524 	int tunnel_hlen;
525 	int version;
526 	int nhoff;
527 
528 	tun_info = skb_tunnel_info(skb);
529 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
530 		     ip_tunnel_info_af(tun_info) != AF_INET))
531 		goto err_free_skb;
532 
533 	key = &tun_info->key;
534 	if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
535 		goto err_free_skb;
536 	if (tun_info->options_len < sizeof(*md))
537 		goto err_free_skb;
538 	md = ip_tunnel_info_opts(tun_info);
539 
540 	/* ERSPAN has fixed 8 byte GRE header */
541 	version = md->version;
542 	tunnel_hlen = 8 + erspan_hdr_len(version);
543 
544 	if (skb_cow_head(skb, dev->needed_headroom))
545 		goto err_free_skb;
546 
547 	if (gre_handle_offloads(skb, false))
548 		goto err_free_skb;
549 
550 	if (skb->len > dev->mtu + dev->hard_header_len) {
551 		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
552 			goto err_free_skb;
553 		truncate = true;
554 	}
555 
556 	nhoff = skb_network_offset(skb);
557 	if (skb->protocol == htons(ETH_P_IP) &&
558 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
559 		truncate = true;
560 
561 	if (skb->protocol == htons(ETH_P_IPV6)) {
562 		int thoff;
563 
564 		if (skb_transport_header_was_set(skb))
565 			thoff = skb_transport_offset(skb);
566 		else
567 			thoff = nhoff + sizeof(struct ipv6hdr);
568 		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
569 			truncate = true;
570 	}
571 
572 	if (version == 1) {
573 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
574 				    ntohl(md->u.index), truncate, true);
575 		proto = htons(ETH_P_ERSPAN);
576 	} else if (version == 2) {
577 		erspan_build_header_v2(skb,
578 				       ntohl(tunnel_id_to_key32(key->tun_id)),
579 				       md->u.md2.dir,
580 				       get_hwid(&md->u.md2),
581 				       truncate, true);
582 		proto = htons(ETH_P_ERSPAN2);
583 	} else {
584 		goto err_free_skb;
585 	}
586 
587 	gre_build_header(skb, 8, TUNNEL_SEQ,
588 			 proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
589 
590 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
591 
592 	return;
593 
594 err_free_skb:
595 	kfree_skb(skb);
596 	DEV_STATS_INC(dev, tx_dropped);
597 }
598 
599 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
600 {
601 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
602 	const struct ip_tunnel_key *key;
603 	struct rtable *rt;
604 	struct flowi4 fl4;
605 
606 	if (ip_tunnel_info_af(info) != AF_INET)
607 		return -EINVAL;
608 
609 	key = &info->key;
610 	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
611 			    tunnel_id_to_key32(key->tun_id),
612 			    key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
613 			    skb->mark, skb_get_hash(skb), key->flow_flags);
614 	rt = ip_route_output_key(dev_net(dev), &fl4);
615 	if (IS_ERR(rt))
616 		return PTR_ERR(rt);
617 
618 	ip_rt_put(rt);
619 	info->key.u.ipv4.src = fl4.saddr;
620 	return 0;
621 }
622 
623 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
624 			      struct net_device *dev)
625 {
626 	struct ip_tunnel *tunnel = netdev_priv(dev);
627 	const struct iphdr *tnl_params;
628 
629 	if (!pskb_inet_may_pull(skb))
630 		goto free_skb;
631 
632 	if (tunnel->collect_md) {
633 		gre_fb_xmit(skb, dev, skb->protocol);
634 		return NETDEV_TX_OK;
635 	}
636 
637 	if (dev->header_ops) {
638 		int pull_len = tunnel->hlen + sizeof(struct iphdr);
639 
640 		if (skb_cow_head(skb, 0))
641 			goto free_skb;
642 
643 		tnl_params = (const struct iphdr *)skb->data;
644 
645 		if (!pskb_network_may_pull(skb, pull_len))
646 			goto free_skb;
647 
648 		/* ip_tunnel_xmit() needs skb->data pointing to gre header. */
649 		skb_pull(skb, pull_len);
650 		skb_reset_mac_header(skb);
651 
652 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
653 		    skb_checksum_start(skb) < skb->data)
654 			goto free_skb;
655 	} else {
656 		if (skb_cow_head(skb, dev->needed_headroom))
657 			goto free_skb;
658 
659 		tnl_params = &tunnel->parms.iph;
660 	}
661 
662 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
663 		goto free_skb;
664 
665 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
666 	return NETDEV_TX_OK;
667 
668 free_skb:
669 	kfree_skb(skb);
670 	DEV_STATS_INC(dev, tx_dropped);
671 	return NETDEV_TX_OK;
672 }
673 
674 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
675 			       struct net_device *dev)
676 {
677 	struct ip_tunnel *tunnel = netdev_priv(dev);
678 	bool truncate = false;
679 	__be16 proto;
680 
681 	if (!pskb_inet_may_pull(skb))
682 		goto free_skb;
683 
684 	if (tunnel->collect_md) {
685 		erspan_fb_xmit(skb, dev);
686 		return NETDEV_TX_OK;
687 	}
688 
689 	if (gre_handle_offloads(skb, false))
690 		goto free_skb;
691 
692 	if (skb_cow_head(skb, dev->needed_headroom))
693 		goto free_skb;
694 
695 	if (skb->len > dev->mtu + dev->hard_header_len) {
696 		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
697 			goto free_skb;
698 		truncate = true;
699 	}
700 
701 	/* Push ERSPAN header */
702 	if (tunnel->erspan_ver == 0) {
703 		proto = htons(ETH_P_ERSPAN);
704 		tunnel->parms.o_flags &= ~TUNNEL_SEQ;
705 	} else if (tunnel->erspan_ver == 1) {
706 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
707 				    tunnel->index,
708 				    truncate, true);
709 		proto = htons(ETH_P_ERSPAN);
710 	} else if (tunnel->erspan_ver == 2) {
711 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
712 				       tunnel->dir, tunnel->hwid,
713 				       truncate, true);
714 		proto = htons(ETH_P_ERSPAN2);
715 	} else {
716 		goto free_skb;
717 	}
718 
719 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
720 	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
721 	return NETDEV_TX_OK;
722 
723 free_skb:
724 	kfree_skb(skb);
725 	DEV_STATS_INC(dev, tx_dropped);
726 	return NETDEV_TX_OK;
727 }
728 
729 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
730 				struct net_device *dev)
731 {
732 	struct ip_tunnel *tunnel = netdev_priv(dev);
733 
734 	if (!pskb_inet_may_pull(skb))
735 		goto free_skb;
736 
737 	if (tunnel->collect_md) {
738 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
739 		return NETDEV_TX_OK;
740 	}
741 
742 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
743 		goto free_skb;
744 
745 	if (skb_cow_head(skb, dev->needed_headroom))
746 		goto free_skb;
747 
748 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
749 	return NETDEV_TX_OK;
750 
751 free_skb:
752 	kfree_skb(skb);
753 	DEV_STATS_INC(dev, tx_dropped);
754 	return NETDEV_TX_OK;
755 }
756 
757 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
758 {
759 	struct ip_tunnel *tunnel = netdev_priv(dev);
760 	__be16 flags;
761 	int len;
762 
763 	len = tunnel->tun_hlen;
764 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
765 	len = tunnel->tun_hlen - len;
766 	tunnel->hlen = tunnel->hlen + len;
767 
768 	if (dev->header_ops)
769 		dev->hard_header_len += len;
770 	else
771 		dev->needed_headroom += len;
772 
773 	if (set_mtu)
774 		dev->mtu = max_t(int, dev->mtu - len, 68);
775 
776 	flags = tunnel->parms.o_flags;
777 
778 	if (flags & TUNNEL_SEQ ||
779 	    (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
780 		dev->features &= ~NETIF_F_GSO_SOFTWARE;
781 		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
782 	} else {
783 		dev->features |= NETIF_F_GSO_SOFTWARE;
784 		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
785 	}
786 }
787 
788 static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
789 			    int cmd)
790 {
791 	int err;
792 
793 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
794 		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
795 		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
796 		    ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
797 			return -EINVAL;
798 	}
799 
800 	p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
801 	p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
802 
803 	err = ip_tunnel_ctl(dev, p, cmd);
804 	if (err)
805 		return err;
806 
807 	if (cmd == SIOCCHGTUNNEL) {
808 		struct ip_tunnel *t = netdev_priv(dev);
809 
810 		t->parms.i_flags = p->i_flags;
811 		t->parms.o_flags = p->o_flags;
812 
813 		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
814 			ipgre_link_update(dev, true);
815 	}
816 
817 	p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
818 	p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
819 	return 0;
820 }
821 
822 /* Nice toy. Unfortunately, useless in real life :-)
823    It allows to construct virtual multiprotocol broadcast "LAN"
824    over the Internet, provided multicast routing is tuned.
825 
826 
827    I have no idea was this bicycle invented before me,
828    so that I had to set ARPHRD_IPGRE to a random value.
829    I have an impression, that Cisco could make something similar,
830    but this feature is apparently missing in IOS<=11.2(8).
831 
832    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
833    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
834 
835    ping -t 255 224.66.66.66
836 
837    If nobody answers, mbone does not work.
838 
839    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
840    ip addr add 10.66.66.<somewhat>/24 dev Universe
841    ifconfig Universe up
842    ifconfig Universe add fe80::<Your_real_addr>/10
843    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
844    ftp 10.66.66.66
845    ...
846    ftp fec0:6666:6666::193.233.7.65
847    ...
848  */
849 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
850 			unsigned short type,
851 			const void *daddr, const void *saddr, unsigned int len)
852 {
853 	struct ip_tunnel *t = netdev_priv(dev);
854 	struct iphdr *iph;
855 	struct gre_base_hdr *greh;
856 
857 	iph = skb_push(skb, t->hlen + sizeof(*iph));
858 	greh = (struct gre_base_hdr *)(iph+1);
859 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
860 	greh->protocol = htons(type);
861 
862 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
863 
864 	/* Set the source hardware address. */
865 	if (saddr)
866 		memcpy(&iph->saddr, saddr, 4);
867 	if (daddr)
868 		memcpy(&iph->daddr, daddr, 4);
869 	if (iph->daddr)
870 		return t->hlen + sizeof(*iph);
871 
872 	return -(t->hlen + sizeof(*iph));
873 }
874 
875 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
876 {
877 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
878 	memcpy(haddr, &iph->saddr, 4);
879 	return 4;
880 }
881 
882 static const struct header_ops ipgre_header_ops = {
883 	.create	= ipgre_header,
884 	.parse	= ipgre_header_parse,
885 };
886 
887 #ifdef CONFIG_NET_IPGRE_BROADCAST
888 static int ipgre_open(struct net_device *dev)
889 {
890 	struct ip_tunnel *t = netdev_priv(dev);
891 
892 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
893 		struct flowi4 fl4;
894 		struct rtable *rt;
895 
896 		rt = ip_route_output_gre(t->net, &fl4,
897 					 t->parms.iph.daddr,
898 					 t->parms.iph.saddr,
899 					 t->parms.o_key,
900 					 RT_TOS(t->parms.iph.tos),
901 					 t->parms.link);
902 		if (IS_ERR(rt))
903 			return -EADDRNOTAVAIL;
904 		dev = rt->dst.dev;
905 		ip_rt_put(rt);
906 		if (!__in_dev_get_rtnl(dev))
907 			return -EADDRNOTAVAIL;
908 		t->mlink = dev->ifindex;
909 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
910 	}
911 	return 0;
912 }
913 
914 static int ipgre_close(struct net_device *dev)
915 {
916 	struct ip_tunnel *t = netdev_priv(dev);
917 
918 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
919 		struct in_device *in_dev;
920 		in_dev = inetdev_by_index(t->net, t->mlink);
921 		if (in_dev)
922 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
923 	}
924 	return 0;
925 }
926 #endif
927 
928 static const struct net_device_ops ipgre_netdev_ops = {
929 	.ndo_init		= ipgre_tunnel_init,
930 	.ndo_uninit		= ip_tunnel_uninit,
931 #ifdef CONFIG_NET_IPGRE_BROADCAST
932 	.ndo_open		= ipgre_open,
933 	.ndo_stop		= ipgre_close,
934 #endif
935 	.ndo_start_xmit		= ipgre_xmit,
936 	.ndo_siocdevprivate	= ip_tunnel_siocdevprivate,
937 	.ndo_change_mtu		= ip_tunnel_change_mtu,
938 	.ndo_get_stats64	= dev_get_tstats64,
939 	.ndo_get_iflink		= ip_tunnel_get_iflink,
940 	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
941 };
942 
943 #define GRE_FEATURES (NETIF_F_SG |		\
944 		      NETIF_F_FRAGLIST |	\
945 		      NETIF_F_HIGHDMA |		\
946 		      NETIF_F_HW_CSUM)
947 
948 static void ipgre_tunnel_setup(struct net_device *dev)
949 {
950 	dev->netdev_ops		= &ipgre_netdev_ops;
951 	dev->type		= ARPHRD_IPGRE;
952 	ip_tunnel_setup(dev, ipgre_net_id);
953 }
954 
955 static void __gre_tunnel_init(struct net_device *dev)
956 {
957 	struct ip_tunnel *tunnel;
958 	__be16 flags;
959 
960 	tunnel = netdev_priv(dev);
961 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
962 	tunnel->parms.iph.protocol = IPPROTO_GRE;
963 
964 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
965 	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
966 
967 	dev->features		|= GRE_FEATURES | NETIF_F_LLTX;
968 	dev->hw_features	|= GRE_FEATURES;
969 
970 	flags = tunnel->parms.o_flags;
971 
972 	/* TCP offload with GRE SEQ is not supported, nor can we support 2
973 	 * levels of outer headers requiring an update.
974 	 */
975 	if (flags & TUNNEL_SEQ)
976 		return;
977 	if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
978 		return;
979 
980 	dev->features |= NETIF_F_GSO_SOFTWARE;
981 	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
982 }
983 
984 static int ipgre_tunnel_init(struct net_device *dev)
985 {
986 	struct ip_tunnel *tunnel = netdev_priv(dev);
987 	struct iphdr *iph = &tunnel->parms.iph;
988 
989 	__gre_tunnel_init(dev);
990 
991 	__dev_addr_set(dev, &iph->saddr, 4);
992 	memcpy(dev->broadcast, &iph->daddr, 4);
993 
994 	dev->flags		= IFF_NOARP;
995 	netif_keep_dst(dev);
996 	dev->addr_len		= 4;
997 
998 	if (iph->daddr && !tunnel->collect_md) {
999 #ifdef CONFIG_NET_IPGRE_BROADCAST
1000 		if (ipv4_is_multicast(iph->daddr)) {
1001 			if (!iph->saddr)
1002 				return -EINVAL;
1003 			dev->flags = IFF_BROADCAST;
1004 			dev->header_ops = &ipgre_header_ops;
1005 			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1006 			dev->needed_headroom = 0;
1007 		}
1008 #endif
1009 	} else if (!tunnel->collect_md) {
1010 		dev->header_ops = &ipgre_header_ops;
1011 		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1012 		dev->needed_headroom = 0;
1013 	}
1014 
1015 	return ip_tunnel_init(dev);
1016 }
1017 
1018 static const struct gre_protocol ipgre_protocol = {
1019 	.handler     = gre_rcv,
1020 	.err_handler = gre_err,
1021 };
1022 
1023 static int __net_init ipgre_init_net(struct net *net)
1024 {
1025 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1026 }
1027 
1028 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1029 {
1030 	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1031 }
1032 
1033 static struct pernet_operations ipgre_net_ops = {
1034 	.init = ipgre_init_net,
1035 	.exit_batch = ipgre_exit_batch_net,
1036 	.id   = &ipgre_net_id,
1037 	.size = sizeof(struct ip_tunnel_net),
1038 };
1039 
1040 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1041 				 struct netlink_ext_ack *extack)
1042 {
1043 	__be16 flags;
1044 
1045 	if (!data)
1046 		return 0;
1047 
1048 	flags = 0;
1049 	if (data[IFLA_GRE_IFLAGS])
1050 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1051 	if (data[IFLA_GRE_OFLAGS])
1052 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1053 	if (flags & (GRE_VERSION|GRE_ROUTING))
1054 		return -EINVAL;
1055 
1056 	if (data[IFLA_GRE_COLLECT_METADATA] &&
1057 	    data[IFLA_GRE_ENCAP_TYPE] &&
1058 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1059 		return -EINVAL;
1060 
1061 	return 0;
1062 }
1063 
1064 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1065 			      struct netlink_ext_ack *extack)
1066 {
1067 	__be32 daddr;
1068 
1069 	if (tb[IFLA_ADDRESS]) {
1070 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1071 			return -EINVAL;
1072 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1073 			return -EADDRNOTAVAIL;
1074 	}
1075 
1076 	if (!data)
1077 		goto out;
1078 
1079 	if (data[IFLA_GRE_REMOTE]) {
1080 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1081 		if (!daddr)
1082 			return -EINVAL;
1083 	}
1084 
1085 out:
1086 	return ipgre_tunnel_validate(tb, data, extack);
1087 }
1088 
1089 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1090 			   struct netlink_ext_ack *extack)
1091 {
1092 	__be16 flags = 0;
1093 	int ret;
1094 
1095 	if (!data)
1096 		return 0;
1097 
1098 	ret = ipgre_tap_validate(tb, data, extack);
1099 	if (ret)
1100 		return ret;
1101 
1102 	if (data[IFLA_GRE_ERSPAN_VER] &&
1103 	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1104 		return 0;
1105 
1106 	/* ERSPAN type II/III should only have GRE sequence and key flag */
1107 	if (data[IFLA_GRE_OFLAGS])
1108 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1109 	if (data[IFLA_GRE_IFLAGS])
1110 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1111 	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1112 	    flags != (GRE_SEQ | GRE_KEY))
1113 		return -EINVAL;
1114 
1115 	/* ERSPAN Session ID only has 10-bit. Since we reuse
1116 	 * 32-bit key field as ID, check it's range.
1117 	 */
1118 	if (data[IFLA_GRE_IKEY] &&
1119 	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1120 		return -EINVAL;
1121 
1122 	if (data[IFLA_GRE_OKEY] &&
1123 	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1124 		return -EINVAL;
1125 
1126 	return 0;
1127 }
1128 
1129 static int ipgre_netlink_parms(struct net_device *dev,
1130 				struct nlattr *data[],
1131 				struct nlattr *tb[],
1132 				struct ip_tunnel_parm *parms,
1133 				__u32 *fwmark)
1134 {
1135 	struct ip_tunnel *t = netdev_priv(dev);
1136 
1137 	memset(parms, 0, sizeof(*parms));
1138 
1139 	parms->iph.protocol = IPPROTO_GRE;
1140 
1141 	if (!data)
1142 		return 0;
1143 
1144 	if (data[IFLA_GRE_LINK])
1145 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1146 
1147 	if (data[IFLA_GRE_IFLAGS])
1148 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1149 
1150 	if (data[IFLA_GRE_OFLAGS])
1151 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1152 
1153 	if (data[IFLA_GRE_IKEY])
1154 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1155 
1156 	if (data[IFLA_GRE_OKEY])
1157 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1158 
1159 	if (data[IFLA_GRE_LOCAL])
1160 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1161 
1162 	if (data[IFLA_GRE_REMOTE])
1163 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1164 
1165 	if (data[IFLA_GRE_TTL])
1166 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1167 
1168 	if (data[IFLA_GRE_TOS])
1169 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1170 
1171 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1172 		if (t->ignore_df)
1173 			return -EINVAL;
1174 		parms->iph.frag_off = htons(IP_DF);
1175 	}
1176 
1177 	if (data[IFLA_GRE_COLLECT_METADATA]) {
1178 		t->collect_md = true;
1179 		if (dev->type == ARPHRD_IPGRE)
1180 			dev->type = ARPHRD_NONE;
1181 	}
1182 
1183 	if (data[IFLA_GRE_IGNORE_DF]) {
1184 		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1185 		  && (parms->iph.frag_off & htons(IP_DF)))
1186 			return -EINVAL;
1187 		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1188 	}
1189 
1190 	if (data[IFLA_GRE_FWMARK])
1191 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1192 
1193 	return 0;
1194 }
1195 
1196 static int erspan_netlink_parms(struct net_device *dev,
1197 				struct nlattr *data[],
1198 				struct nlattr *tb[],
1199 				struct ip_tunnel_parm *parms,
1200 				__u32 *fwmark)
1201 {
1202 	struct ip_tunnel *t = netdev_priv(dev);
1203 	int err;
1204 
1205 	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1206 	if (err)
1207 		return err;
1208 	if (!data)
1209 		return 0;
1210 
1211 	if (data[IFLA_GRE_ERSPAN_VER]) {
1212 		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1213 
1214 		if (t->erspan_ver > 2)
1215 			return -EINVAL;
1216 	}
1217 
1218 	if (t->erspan_ver == 1) {
1219 		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1220 			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1221 			if (t->index & ~INDEX_MASK)
1222 				return -EINVAL;
1223 		}
1224 	} else if (t->erspan_ver == 2) {
1225 		if (data[IFLA_GRE_ERSPAN_DIR]) {
1226 			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1227 			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1228 				return -EINVAL;
1229 		}
1230 		if (data[IFLA_GRE_ERSPAN_HWID]) {
1231 			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1232 			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1233 				return -EINVAL;
1234 		}
1235 	}
1236 
1237 	return 0;
1238 }
1239 
1240 /* This function returns true when ENCAP attributes are present in the nl msg */
1241 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1242 				      struct ip_tunnel_encap *ipencap)
1243 {
1244 	bool ret = false;
1245 
1246 	memset(ipencap, 0, sizeof(*ipencap));
1247 
1248 	if (!data)
1249 		return ret;
1250 
1251 	if (data[IFLA_GRE_ENCAP_TYPE]) {
1252 		ret = true;
1253 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1254 	}
1255 
1256 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1257 		ret = true;
1258 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1259 	}
1260 
1261 	if (data[IFLA_GRE_ENCAP_SPORT]) {
1262 		ret = true;
1263 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1264 	}
1265 
1266 	if (data[IFLA_GRE_ENCAP_DPORT]) {
1267 		ret = true;
1268 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1269 	}
1270 
1271 	return ret;
1272 }
1273 
1274 static int gre_tap_init(struct net_device *dev)
1275 {
1276 	__gre_tunnel_init(dev);
1277 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1278 	netif_keep_dst(dev);
1279 
1280 	return ip_tunnel_init(dev);
1281 }
1282 
1283 static const struct net_device_ops gre_tap_netdev_ops = {
1284 	.ndo_init		= gre_tap_init,
1285 	.ndo_uninit		= ip_tunnel_uninit,
1286 	.ndo_start_xmit		= gre_tap_xmit,
1287 	.ndo_set_mac_address 	= eth_mac_addr,
1288 	.ndo_validate_addr	= eth_validate_addr,
1289 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1290 	.ndo_get_stats64	= dev_get_tstats64,
1291 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1292 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1293 };
1294 
1295 static int erspan_tunnel_init(struct net_device *dev)
1296 {
1297 	struct ip_tunnel *tunnel = netdev_priv(dev);
1298 
1299 	if (tunnel->erspan_ver == 0)
1300 		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1301 	else
1302 		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1303 
1304 	tunnel->parms.iph.protocol = IPPROTO_GRE;
1305 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1306 		       erspan_hdr_len(tunnel->erspan_ver);
1307 
1308 	dev->features		|= GRE_FEATURES;
1309 	dev->hw_features	|= GRE_FEATURES;
1310 	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1311 	netif_keep_dst(dev);
1312 
1313 	return ip_tunnel_init(dev);
1314 }
1315 
1316 static const struct net_device_ops erspan_netdev_ops = {
1317 	.ndo_init		= erspan_tunnel_init,
1318 	.ndo_uninit		= ip_tunnel_uninit,
1319 	.ndo_start_xmit		= erspan_xmit,
1320 	.ndo_set_mac_address	= eth_mac_addr,
1321 	.ndo_validate_addr	= eth_validate_addr,
1322 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1323 	.ndo_get_stats64	= dev_get_tstats64,
1324 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1325 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1326 };
1327 
1328 static void ipgre_tap_setup(struct net_device *dev)
1329 {
1330 	ether_setup(dev);
1331 	dev->max_mtu = 0;
1332 	dev->netdev_ops	= &gre_tap_netdev_ops;
1333 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1334 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1335 	ip_tunnel_setup(dev, gre_tap_net_id);
1336 }
1337 
1338 static int
1339 ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1340 {
1341 	struct ip_tunnel_encap ipencap;
1342 
1343 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1344 		struct ip_tunnel *t = netdev_priv(dev);
1345 		int err = ip_tunnel_encap_setup(t, &ipencap);
1346 
1347 		if (err < 0)
1348 			return err;
1349 	}
1350 
1351 	return 0;
1352 }
1353 
1354 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1355 			 struct nlattr *tb[], struct nlattr *data[],
1356 			 struct netlink_ext_ack *extack)
1357 {
1358 	struct ip_tunnel_parm p;
1359 	__u32 fwmark = 0;
1360 	int err;
1361 
1362 	err = ipgre_newlink_encap_setup(dev, data);
1363 	if (err)
1364 		return err;
1365 
1366 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1367 	if (err < 0)
1368 		return err;
1369 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1370 }
1371 
1372 static int erspan_newlink(struct net *src_net, struct net_device *dev,
1373 			  struct nlattr *tb[], struct nlattr *data[],
1374 			  struct netlink_ext_ack *extack)
1375 {
1376 	struct ip_tunnel_parm p;
1377 	__u32 fwmark = 0;
1378 	int err;
1379 
1380 	err = ipgre_newlink_encap_setup(dev, data);
1381 	if (err)
1382 		return err;
1383 
1384 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1385 	if (err)
1386 		return err;
1387 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1388 }
1389 
1390 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1391 			    struct nlattr *data[],
1392 			    struct netlink_ext_ack *extack)
1393 {
1394 	struct ip_tunnel *t = netdev_priv(dev);
1395 	__u32 fwmark = t->fwmark;
1396 	struct ip_tunnel_parm p;
1397 	int err;
1398 
1399 	err = ipgre_newlink_encap_setup(dev, data);
1400 	if (err)
1401 		return err;
1402 
1403 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1404 	if (err < 0)
1405 		return err;
1406 
1407 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1408 	if (err < 0)
1409 		return err;
1410 
1411 	t->parms.i_flags = p.i_flags;
1412 	t->parms.o_flags = p.o_flags;
1413 
1414 	ipgre_link_update(dev, !tb[IFLA_MTU]);
1415 
1416 	return 0;
1417 }
1418 
1419 static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1420 			     struct nlattr *data[],
1421 			     struct netlink_ext_ack *extack)
1422 {
1423 	struct ip_tunnel *t = netdev_priv(dev);
1424 	__u32 fwmark = t->fwmark;
1425 	struct ip_tunnel_parm p;
1426 	int err;
1427 
1428 	err = ipgre_newlink_encap_setup(dev, data);
1429 	if (err)
1430 		return err;
1431 
1432 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1433 	if (err < 0)
1434 		return err;
1435 
1436 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1437 	if (err < 0)
1438 		return err;
1439 
1440 	t->parms.i_flags = p.i_flags;
1441 	t->parms.o_flags = p.o_flags;
1442 
1443 	return 0;
1444 }
1445 
1446 static size_t ipgre_get_size(const struct net_device *dev)
1447 {
1448 	return
1449 		/* IFLA_GRE_LINK */
1450 		nla_total_size(4) +
1451 		/* IFLA_GRE_IFLAGS */
1452 		nla_total_size(2) +
1453 		/* IFLA_GRE_OFLAGS */
1454 		nla_total_size(2) +
1455 		/* IFLA_GRE_IKEY */
1456 		nla_total_size(4) +
1457 		/* IFLA_GRE_OKEY */
1458 		nla_total_size(4) +
1459 		/* IFLA_GRE_LOCAL */
1460 		nla_total_size(4) +
1461 		/* IFLA_GRE_REMOTE */
1462 		nla_total_size(4) +
1463 		/* IFLA_GRE_TTL */
1464 		nla_total_size(1) +
1465 		/* IFLA_GRE_TOS */
1466 		nla_total_size(1) +
1467 		/* IFLA_GRE_PMTUDISC */
1468 		nla_total_size(1) +
1469 		/* IFLA_GRE_ENCAP_TYPE */
1470 		nla_total_size(2) +
1471 		/* IFLA_GRE_ENCAP_FLAGS */
1472 		nla_total_size(2) +
1473 		/* IFLA_GRE_ENCAP_SPORT */
1474 		nla_total_size(2) +
1475 		/* IFLA_GRE_ENCAP_DPORT */
1476 		nla_total_size(2) +
1477 		/* IFLA_GRE_COLLECT_METADATA */
1478 		nla_total_size(0) +
1479 		/* IFLA_GRE_IGNORE_DF */
1480 		nla_total_size(1) +
1481 		/* IFLA_GRE_FWMARK */
1482 		nla_total_size(4) +
1483 		/* IFLA_GRE_ERSPAN_INDEX */
1484 		nla_total_size(4) +
1485 		/* IFLA_GRE_ERSPAN_VER */
1486 		nla_total_size(1) +
1487 		/* IFLA_GRE_ERSPAN_DIR */
1488 		nla_total_size(1) +
1489 		/* IFLA_GRE_ERSPAN_HWID */
1490 		nla_total_size(2) +
1491 		0;
1492 }
1493 
1494 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1495 {
1496 	struct ip_tunnel *t = netdev_priv(dev);
1497 	struct ip_tunnel_parm *p = &t->parms;
1498 	__be16 o_flags = p->o_flags;
1499 
1500 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1501 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1502 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1503 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1504 			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1505 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1506 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1507 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1508 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1509 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1510 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1511 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1512 		       !!(p->iph.frag_off & htons(IP_DF))) ||
1513 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1514 		goto nla_put_failure;
1515 
1516 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1517 			t->encap.type) ||
1518 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1519 			 t->encap.sport) ||
1520 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1521 			 t->encap.dport) ||
1522 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1523 			t->encap.flags))
1524 		goto nla_put_failure;
1525 
1526 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1527 		goto nla_put_failure;
1528 
1529 	if (t->collect_md) {
1530 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1531 			goto nla_put_failure;
1532 	}
1533 
1534 	return 0;
1535 
1536 nla_put_failure:
1537 	return -EMSGSIZE;
1538 }
1539 
1540 static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1541 {
1542 	struct ip_tunnel *t = netdev_priv(dev);
1543 
1544 	if (t->erspan_ver <= 2) {
1545 		if (t->erspan_ver != 0 && !t->collect_md)
1546 			t->parms.o_flags |= TUNNEL_KEY;
1547 
1548 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1549 			goto nla_put_failure;
1550 
1551 		if (t->erspan_ver == 1) {
1552 			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1553 				goto nla_put_failure;
1554 		} else if (t->erspan_ver == 2) {
1555 			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1556 				goto nla_put_failure;
1557 			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1558 				goto nla_put_failure;
1559 		}
1560 	}
1561 
1562 	return ipgre_fill_info(skb, dev);
1563 
1564 nla_put_failure:
1565 	return -EMSGSIZE;
1566 }
1567 
1568 static void erspan_setup(struct net_device *dev)
1569 {
1570 	struct ip_tunnel *t = netdev_priv(dev);
1571 
1572 	ether_setup(dev);
1573 	dev->max_mtu = 0;
1574 	dev->netdev_ops = &erspan_netdev_ops;
1575 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1576 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1577 	ip_tunnel_setup(dev, erspan_net_id);
1578 	t->erspan_ver = 1;
1579 }
1580 
1581 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1582 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1583 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1584 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1585 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1586 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1587 	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1588 	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1589 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1590 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1591 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1592 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1593 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1594 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1595 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1596 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1597 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1598 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1599 	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1600 	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1601 	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1602 	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1603 };
1604 
1605 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1606 	.kind		= "gre",
1607 	.maxtype	= IFLA_GRE_MAX,
1608 	.policy		= ipgre_policy,
1609 	.priv_size	= sizeof(struct ip_tunnel),
1610 	.setup		= ipgre_tunnel_setup,
1611 	.validate	= ipgre_tunnel_validate,
1612 	.newlink	= ipgre_newlink,
1613 	.changelink	= ipgre_changelink,
1614 	.dellink	= ip_tunnel_dellink,
1615 	.get_size	= ipgre_get_size,
1616 	.fill_info	= ipgre_fill_info,
1617 	.get_link_net	= ip_tunnel_get_link_net,
1618 };
1619 
1620 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1621 	.kind		= "gretap",
1622 	.maxtype	= IFLA_GRE_MAX,
1623 	.policy		= ipgre_policy,
1624 	.priv_size	= sizeof(struct ip_tunnel),
1625 	.setup		= ipgre_tap_setup,
1626 	.validate	= ipgre_tap_validate,
1627 	.newlink	= ipgre_newlink,
1628 	.changelink	= ipgre_changelink,
1629 	.dellink	= ip_tunnel_dellink,
1630 	.get_size	= ipgre_get_size,
1631 	.fill_info	= ipgre_fill_info,
1632 	.get_link_net	= ip_tunnel_get_link_net,
1633 };
1634 
1635 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1636 	.kind		= "erspan",
1637 	.maxtype	= IFLA_GRE_MAX,
1638 	.policy		= ipgre_policy,
1639 	.priv_size	= sizeof(struct ip_tunnel),
1640 	.setup		= erspan_setup,
1641 	.validate	= erspan_validate,
1642 	.newlink	= erspan_newlink,
1643 	.changelink	= erspan_changelink,
1644 	.dellink	= ip_tunnel_dellink,
1645 	.get_size	= ipgre_get_size,
1646 	.fill_info	= erspan_fill_info,
1647 	.get_link_net	= ip_tunnel_get_link_net,
1648 };
1649 
1650 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1651 					u8 name_assign_type)
1652 {
1653 	struct nlattr *tb[IFLA_MAX + 1];
1654 	struct net_device *dev;
1655 	LIST_HEAD(list_kill);
1656 	struct ip_tunnel *t;
1657 	int err;
1658 
1659 	memset(&tb, 0, sizeof(tb));
1660 
1661 	dev = rtnl_create_link(net, name, name_assign_type,
1662 			       &ipgre_tap_ops, tb, NULL);
1663 	if (IS_ERR(dev))
1664 		return dev;
1665 
1666 	/* Configure flow based GRE device. */
1667 	t = netdev_priv(dev);
1668 	t->collect_md = true;
1669 
1670 	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1671 	if (err < 0) {
1672 		free_netdev(dev);
1673 		return ERR_PTR(err);
1674 	}
1675 
1676 	/* openvswitch users expect packet sizes to be unrestricted,
1677 	 * so set the largest MTU we can.
1678 	 */
1679 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1680 	if (err)
1681 		goto out;
1682 
1683 	err = rtnl_configure_link(dev, NULL, 0, NULL);
1684 	if (err < 0)
1685 		goto out;
1686 
1687 	return dev;
1688 out:
1689 	ip_tunnel_dellink(dev, &list_kill);
1690 	unregister_netdevice_many(&list_kill);
1691 	return ERR_PTR(err);
1692 }
1693 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1694 
1695 static int __net_init ipgre_tap_init_net(struct net *net)
1696 {
1697 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1698 }
1699 
1700 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1701 {
1702 	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1703 }
1704 
1705 static struct pernet_operations ipgre_tap_net_ops = {
1706 	.init = ipgre_tap_init_net,
1707 	.exit_batch = ipgre_tap_exit_batch_net,
1708 	.id   = &gre_tap_net_id,
1709 	.size = sizeof(struct ip_tunnel_net),
1710 };
1711 
1712 static int __net_init erspan_init_net(struct net *net)
1713 {
1714 	return ip_tunnel_init_net(net, erspan_net_id,
1715 				  &erspan_link_ops, "erspan0");
1716 }
1717 
1718 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1719 {
1720 	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1721 }
1722 
1723 static struct pernet_operations erspan_net_ops = {
1724 	.init = erspan_init_net,
1725 	.exit_batch = erspan_exit_batch_net,
1726 	.id   = &erspan_net_id,
1727 	.size = sizeof(struct ip_tunnel_net),
1728 };
1729 
1730 static int __init ipgre_init(void)
1731 {
1732 	int err;
1733 
1734 	pr_info("GRE over IPv4 tunneling driver\n");
1735 
1736 	err = register_pernet_device(&ipgre_net_ops);
1737 	if (err < 0)
1738 		return err;
1739 
1740 	err = register_pernet_device(&ipgre_tap_net_ops);
1741 	if (err < 0)
1742 		goto pnet_tap_failed;
1743 
1744 	err = register_pernet_device(&erspan_net_ops);
1745 	if (err < 0)
1746 		goto pnet_erspan_failed;
1747 
1748 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1749 	if (err < 0) {
1750 		pr_info("%s: can't add protocol\n", __func__);
1751 		goto add_proto_failed;
1752 	}
1753 
1754 	err = rtnl_link_register(&ipgre_link_ops);
1755 	if (err < 0)
1756 		goto rtnl_link_failed;
1757 
1758 	err = rtnl_link_register(&ipgre_tap_ops);
1759 	if (err < 0)
1760 		goto tap_ops_failed;
1761 
1762 	err = rtnl_link_register(&erspan_link_ops);
1763 	if (err < 0)
1764 		goto erspan_link_failed;
1765 
1766 	return 0;
1767 
1768 erspan_link_failed:
1769 	rtnl_link_unregister(&ipgre_tap_ops);
1770 tap_ops_failed:
1771 	rtnl_link_unregister(&ipgre_link_ops);
1772 rtnl_link_failed:
1773 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1774 add_proto_failed:
1775 	unregister_pernet_device(&erspan_net_ops);
1776 pnet_erspan_failed:
1777 	unregister_pernet_device(&ipgre_tap_net_ops);
1778 pnet_tap_failed:
1779 	unregister_pernet_device(&ipgre_net_ops);
1780 	return err;
1781 }
1782 
1783 static void __exit ipgre_fini(void)
1784 {
1785 	rtnl_link_unregister(&ipgre_tap_ops);
1786 	rtnl_link_unregister(&ipgre_link_ops);
1787 	rtnl_link_unregister(&erspan_link_ops);
1788 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1789 	unregister_pernet_device(&ipgre_tap_net_ops);
1790 	unregister_pernet_device(&ipgre_net_ops);
1791 	unregister_pernet_device(&erspan_net_ops);
1792 }
1793 
1794 module_init(ipgre_init);
1795 module_exit(ipgre_fini);
1796 MODULE_LICENSE("GPL");
1797 MODULE_ALIAS_RTNL_LINK("gre");
1798 MODULE_ALIAS_RTNL_LINK("gretap");
1799 MODULE_ALIAS_RTNL_LINK("erspan");
1800 MODULE_ALIAS_NETDEV("gre0");
1801 MODULE_ALIAS_NETDEV("gretap0");
1802 MODULE_ALIAS_NETDEV("erspan0");
1803