xref: /openbmc/linux/net/ipv4/ip_gre.c (revision 874c8ca1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux NET3:	GRE over IP protocol decoder.
4  *
5  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
18 #include <linux/in.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/in6.h>
25 #include <linux/inetdevice.h>
26 #include <linux/igmp.h>
27 #include <linux/netfilter_ipv4.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 
31 #include <net/sock.h>
32 #include <net/ip.h>
33 #include <net/icmp.h>
34 #include <net/protocol.h>
35 #include <net/ip_tunnels.h>
36 #include <net/arp.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
40 #include <net/xfrm.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/rtnetlink.h>
44 #include <net/gre.h>
45 #include <net/dst_metadata.h>
46 #include <net/erspan.h>
47 
48 /*
49    Problems & solutions
50    --------------------
51 
52    1. The most important issue is detecting local dead loops.
53    They would cause complete host lockup in transmit, which
54    would be "resolved" by stack overflow or, if queueing is enabled,
55    with infinite looping in net_bh.
56 
57    We cannot track such dead loops during route installation,
58    it is infeasible task. The most general solutions would be
59    to keep skb->encapsulation counter (sort of local ttl),
60    and silently drop packet when it expires. It is a good
61    solution, but it supposes maintaining new variable in ALL
62    skb, even if no tunneling is used.
63 
64    Current solution: xmit_recursion breaks dead loops. This is a percpu
65    counter, since when we enter the first ndo_xmit(), cpu migration is
66    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
67 
68    2. Networking dead loops would not kill routers, but would really
69    kill network. IP hop limit plays role of "t->recursion" in this case,
70    if we copy it from packet being encapsulated to upper header.
71    It is very good solution, but it introduces two problems:
72 
73    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
74      do not work over tunnels.
75    - traceroute does not work. I planned to relay ICMP from tunnel,
76      so that this problem would be solved and traceroute output
77      would even more informative. This idea appeared to be wrong:
78      only Linux complies to rfc1812 now (yes, guys, Linux is the only
79      true router now :-)), all routers (at least, in neighbourhood of mine)
80      return only 8 bytes of payload. It is the end.
81 
82    Hence, if we want that OSPF worked or traceroute said something reasonable,
83    we should search for another solution.
84 
85    One of them is to parse packet trying to detect inner encapsulation
86    made by our node. It is difficult or even impossible, especially,
87    taking into account fragmentation. TO be short, ttl is not solution at all.
88 
89    Current solution: The solution was UNEXPECTEDLY SIMPLE.
90    We force DF flag on tunnels with preconfigured hop limit,
91    that is ALL. :-) Well, it does not remove the problem completely,
92    but exponential growth of network traffic is changed to linear
93    (branches, that exceed pmtu are pruned) and tunnel mtu
94    rapidly degrades to value <68, where looping stops.
95    Yes, it is not good if there exists a router in the loop,
96    which does not force DF, even when encapsulating packets have DF set.
97    But it is not our problem! Nobody could accuse us, we made
98    all that we could make. Even if it is your gated who injected
99    fatal route to network, even if it were you who configured
100    fatal static route: you are innocent. :-)
101 
102    Alexey Kuznetsov.
103  */
104 
105 static bool log_ecn_error = true;
106 module_param(log_ecn_error, bool, 0644);
107 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108 
109 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
110 static const struct header_ops ipgre_header_ops;
111 
112 static int ipgre_tunnel_init(struct net_device *dev);
113 static void erspan_build_header(struct sk_buff *skb,
114 				u32 id, u32 index,
115 				bool truncate, bool is_ipv4);
116 
117 static unsigned int ipgre_net_id __read_mostly;
118 static unsigned int gre_tap_net_id __read_mostly;
119 static unsigned int erspan_net_id __read_mostly;
120 
121 static int ipgre_err(struct sk_buff *skb, u32 info,
122 		     const struct tnl_ptk_info *tpi)
123 {
124 
125 	/* All the routers (except for Linux) return only
126 	   8 bytes of packet payload. It means, that precise relaying of
127 	   ICMP in the real Internet is absolutely infeasible.
128 
129 	   Moreover, Cisco "wise men" put GRE key to the third word
130 	   in GRE header. It makes impossible maintaining even soft
131 	   state for keyed GRE tunnels with enabled checksum. Tell
132 	   them "thank you".
133 
134 	   Well, I wonder, rfc1812 was written by Cisco employee,
135 	   what the hell these idiots break standards established
136 	   by themselves???
137 	   */
138 	struct net *net = dev_net(skb->dev);
139 	struct ip_tunnel_net *itn;
140 	const struct iphdr *iph;
141 	const int type = icmp_hdr(skb)->type;
142 	const int code = icmp_hdr(skb)->code;
143 	unsigned int data_len = 0;
144 	struct ip_tunnel *t;
145 
146 	if (tpi->proto == htons(ETH_P_TEB))
147 		itn = net_generic(net, gre_tap_net_id);
148 	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
149 		 tpi->proto == htons(ETH_P_ERSPAN2))
150 		itn = net_generic(net, erspan_net_id);
151 	else
152 		itn = net_generic(net, ipgre_net_id);
153 
154 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
155 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
156 			     iph->daddr, iph->saddr, tpi->key);
157 
158 	if (!t)
159 		return -ENOENT;
160 
161 	switch (type) {
162 	default:
163 	case ICMP_PARAMETERPROB:
164 		return 0;
165 
166 	case ICMP_DEST_UNREACH:
167 		switch (code) {
168 		case ICMP_SR_FAILED:
169 		case ICMP_PORT_UNREACH:
170 			/* Impossible event. */
171 			return 0;
172 		default:
173 			/* All others are translated to HOST_UNREACH.
174 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
175 			   I believe they are just ether pollution. --ANK
176 			 */
177 			break;
178 		}
179 		break;
180 
181 	case ICMP_TIME_EXCEEDED:
182 		if (code != ICMP_EXC_TTL)
183 			return 0;
184 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
185 		break;
186 
187 	case ICMP_REDIRECT:
188 		break;
189 	}
190 
191 #if IS_ENABLED(CONFIG_IPV6)
192        if (tpi->proto == htons(ETH_P_IPV6) &&
193            !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
194 				       type, data_len))
195                return 0;
196 #endif
197 
198 	if (t->parms.iph.daddr == 0 ||
199 	    ipv4_is_multicast(t->parms.iph.daddr))
200 		return 0;
201 
202 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
203 		return 0;
204 
205 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
206 		t->err_count++;
207 	else
208 		t->err_count = 1;
209 	t->err_time = jiffies;
210 
211 	return 0;
212 }
213 
214 static void gre_err(struct sk_buff *skb, u32 info)
215 {
216 	/* All the routers (except for Linux) return only
217 	 * 8 bytes of packet payload. It means, that precise relaying of
218 	 * ICMP in the real Internet is absolutely infeasible.
219 	 *
220 	 * Moreover, Cisco "wise men" put GRE key to the third word
221 	 * in GRE header. It makes impossible maintaining even soft
222 	 * state for keyed
223 	 * GRE tunnels with enabled checksum. Tell them "thank you".
224 	 *
225 	 * Well, I wonder, rfc1812 was written by Cisco employee,
226 	 * what the hell these idiots break standards established
227 	 * by themselves???
228 	 */
229 
230 	const struct iphdr *iph = (struct iphdr *)skb->data;
231 	const int type = icmp_hdr(skb)->type;
232 	const int code = icmp_hdr(skb)->code;
233 	struct tnl_ptk_info tpi;
234 
235 	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
236 			     iph->ihl * 4) < 0)
237 		return;
238 
239 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
240 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
241 				 skb->dev->ifindex, IPPROTO_GRE);
242 		return;
243 	}
244 	if (type == ICMP_REDIRECT) {
245 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
246 			      IPPROTO_GRE);
247 		return;
248 	}
249 
250 	ipgre_err(skb, info, &tpi);
251 }
252 
253 static bool is_erspan_type1(int gre_hdr_len)
254 {
255 	/* Both ERSPAN type I (version 0) and type II (version 1) use
256 	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
257 	 * while type II has 8-byte.
258 	 */
259 	return gre_hdr_len == 4;
260 }
261 
262 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
263 		      int gre_hdr_len)
264 {
265 	struct net *net = dev_net(skb->dev);
266 	struct metadata_dst *tun_dst = NULL;
267 	struct erspan_base_hdr *ershdr;
268 	struct ip_tunnel_net *itn;
269 	struct ip_tunnel *tunnel;
270 	const struct iphdr *iph;
271 	struct erspan_md2 *md2;
272 	int ver;
273 	int len;
274 
275 	itn = net_generic(net, erspan_net_id);
276 	iph = ip_hdr(skb);
277 	if (is_erspan_type1(gre_hdr_len)) {
278 		ver = 0;
279 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
280 					  tpi->flags | TUNNEL_NO_KEY,
281 					  iph->saddr, iph->daddr, 0);
282 	} else {
283 		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
284 		ver = ershdr->ver;
285 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
286 					  tpi->flags | TUNNEL_KEY,
287 					  iph->saddr, iph->daddr, tpi->key);
288 	}
289 
290 	if (tunnel) {
291 		if (is_erspan_type1(gre_hdr_len))
292 			len = gre_hdr_len;
293 		else
294 			len = gre_hdr_len + erspan_hdr_len(ver);
295 
296 		if (unlikely(!pskb_may_pull(skb, len)))
297 			return PACKET_REJECT;
298 
299 		if (__iptunnel_pull_header(skb,
300 					   len,
301 					   htons(ETH_P_TEB),
302 					   false, false) < 0)
303 			goto drop;
304 
305 		if (tunnel->collect_md) {
306 			struct erspan_metadata *pkt_md, *md;
307 			struct ip_tunnel_info *info;
308 			unsigned char *gh;
309 			__be64 tun_id;
310 			__be16 flags;
311 
312 			tpi->flags |= TUNNEL_KEY;
313 			flags = tpi->flags;
314 			tun_id = key32_to_tunnel_id(tpi->key);
315 
316 			tun_dst = ip_tun_rx_dst(skb, flags,
317 						tun_id, sizeof(*md));
318 			if (!tun_dst)
319 				return PACKET_REJECT;
320 
321 			/* skb can be uncloned in __iptunnel_pull_header, so
322 			 * old pkt_md is no longer valid and we need to reset
323 			 * it
324 			 */
325 			gh = skb_network_header(skb) +
326 			     skb_network_header_len(skb);
327 			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
328 							    sizeof(*ershdr));
329 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
330 			md->version = ver;
331 			md2 = &md->u.md2;
332 			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
333 						       ERSPAN_V2_MDSIZE);
334 
335 			info = &tun_dst->u.tun_info;
336 			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
337 			info->options_len = sizeof(*md);
338 		}
339 
340 		skb_reset_mac_header(skb);
341 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
342 		return PACKET_RCVD;
343 	}
344 	return PACKET_REJECT;
345 
346 drop:
347 	kfree_skb(skb);
348 	return PACKET_RCVD;
349 }
350 
351 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
352 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
353 {
354 	struct metadata_dst *tun_dst = NULL;
355 	const struct iphdr *iph;
356 	struct ip_tunnel *tunnel;
357 
358 	iph = ip_hdr(skb);
359 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
360 				  iph->saddr, iph->daddr, tpi->key);
361 
362 	if (tunnel) {
363 		const struct iphdr *tnl_params;
364 
365 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
366 					   raw_proto, false) < 0)
367 			goto drop;
368 
369 		/* Special case for ipgre_header_parse(), which expects the
370 		 * mac_header to point to the outer IP header.
371 		 */
372 		if (tunnel->dev->header_ops == &ipgre_header_ops)
373 			skb_pop_mac_header(skb);
374 		else
375 			skb_reset_mac_header(skb);
376 
377 		tnl_params = &tunnel->parms.iph;
378 		if (tunnel->collect_md || tnl_params->daddr == 0) {
379 			__be16 flags;
380 			__be64 tun_id;
381 
382 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
383 			tun_id = key32_to_tunnel_id(tpi->key);
384 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
385 			if (!tun_dst)
386 				return PACKET_REJECT;
387 		}
388 
389 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
390 		return PACKET_RCVD;
391 	}
392 	return PACKET_NEXT;
393 
394 drop:
395 	kfree_skb(skb);
396 	return PACKET_RCVD;
397 }
398 
399 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
400 		     int hdr_len)
401 {
402 	struct net *net = dev_net(skb->dev);
403 	struct ip_tunnel_net *itn;
404 	int res;
405 
406 	if (tpi->proto == htons(ETH_P_TEB))
407 		itn = net_generic(net, gre_tap_net_id);
408 	else
409 		itn = net_generic(net, ipgre_net_id);
410 
411 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
412 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
413 		/* ipgre tunnels in collect metadata mode should receive
414 		 * also ETH_P_TEB traffic.
415 		 */
416 		itn = net_generic(net, ipgre_net_id);
417 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
418 	}
419 	return res;
420 }
421 
422 static int gre_rcv(struct sk_buff *skb)
423 {
424 	struct tnl_ptk_info tpi;
425 	bool csum_err = false;
426 	int hdr_len;
427 
428 #ifdef CONFIG_NET_IPGRE_BROADCAST
429 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
430 		/* Looped back packet, drop it! */
431 		if (rt_is_output_route(skb_rtable(skb)))
432 			goto drop;
433 	}
434 #endif
435 
436 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
437 	if (hdr_len < 0)
438 		goto drop;
439 
440 	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
441 		     tpi.proto == htons(ETH_P_ERSPAN2))) {
442 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
443 			return 0;
444 		goto out;
445 	}
446 
447 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
448 		return 0;
449 
450 out:
451 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
452 drop:
453 	kfree_skb(skb);
454 	return 0;
455 }
456 
457 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
458 		       const struct iphdr *tnl_params,
459 		       __be16 proto)
460 {
461 	struct ip_tunnel *tunnel = netdev_priv(dev);
462 	__be16 flags = tunnel->parms.o_flags;
463 
464 	/* Push GRE header. */
465 	gre_build_header(skb, tunnel->tun_hlen,
466 			 flags, proto, tunnel->parms.o_key,
467 			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
468 
469 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
470 }
471 
472 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
473 {
474 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
475 }
476 
477 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
478 			__be16 proto)
479 {
480 	struct ip_tunnel *tunnel = netdev_priv(dev);
481 	struct ip_tunnel_info *tun_info;
482 	const struct ip_tunnel_key *key;
483 	int tunnel_hlen;
484 	__be16 flags;
485 
486 	tun_info = skb_tunnel_info(skb);
487 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
488 		     ip_tunnel_info_af(tun_info) != AF_INET))
489 		goto err_free_skb;
490 
491 	key = &tun_info->key;
492 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
493 
494 	if (skb_cow_head(skb, dev->needed_headroom))
495 		goto err_free_skb;
496 
497 	/* Push Tunnel header. */
498 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
499 		goto err_free_skb;
500 
501 	flags = tun_info->key.tun_flags &
502 		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
503 	gre_build_header(skb, tunnel_hlen, flags, proto,
504 			 tunnel_id_to_key32(tun_info->key.tun_id),
505 			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
506 
507 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
508 
509 	return;
510 
511 err_free_skb:
512 	kfree_skb(skb);
513 	dev->stats.tx_dropped++;
514 }
515 
516 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
517 {
518 	struct ip_tunnel *tunnel = netdev_priv(dev);
519 	struct ip_tunnel_info *tun_info;
520 	const struct ip_tunnel_key *key;
521 	struct erspan_metadata *md;
522 	bool truncate = false;
523 	__be16 proto;
524 	int tunnel_hlen;
525 	int version;
526 	int nhoff;
527 	int thoff;
528 
529 	tun_info = skb_tunnel_info(skb);
530 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
531 		     ip_tunnel_info_af(tun_info) != AF_INET))
532 		goto err_free_skb;
533 
534 	key = &tun_info->key;
535 	if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
536 		goto err_free_skb;
537 	if (tun_info->options_len < sizeof(*md))
538 		goto err_free_skb;
539 	md = ip_tunnel_info_opts(tun_info);
540 
541 	/* ERSPAN has fixed 8 byte GRE header */
542 	version = md->version;
543 	tunnel_hlen = 8 + erspan_hdr_len(version);
544 
545 	if (skb_cow_head(skb, dev->needed_headroom))
546 		goto err_free_skb;
547 
548 	if (gre_handle_offloads(skb, false))
549 		goto err_free_skb;
550 
551 	if (skb->len > dev->mtu + dev->hard_header_len) {
552 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
553 		truncate = true;
554 	}
555 
556 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
557 	if (skb->protocol == htons(ETH_P_IP) &&
558 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
559 		truncate = true;
560 
561 	thoff = skb_transport_header(skb) - skb_mac_header(skb);
562 	if (skb->protocol == htons(ETH_P_IPV6) &&
563 	    (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
564 		truncate = true;
565 
566 	if (version == 1) {
567 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
568 				    ntohl(md->u.index), truncate, true);
569 		proto = htons(ETH_P_ERSPAN);
570 	} else if (version == 2) {
571 		erspan_build_header_v2(skb,
572 				       ntohl(tunnel_id_to_key32(key->tun_id)),
573 				       md->u.md2.dir,
574 				       get_hwid(&md->u.md2),
575 				       truncate, true);
576 		proto = htons(ETH_P_ERSPAN2);
577 	} else {
578 		goto err_free_skb;
579 	}
580 
581 	gre_build_header(skb, 8, TUNNEL_SEQ,
582 			 proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
583 
584 	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
585 
586 	return;
587 
588 err_free_skb:
589 	kfree_skb(skb);
590 	dev->stats.tx_dropped++;
591 }
592 
593 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
594 {
595 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
596 	const struct ip_tunnel_key *key;
597 	struct rtable *rt;
598 	struct flowi4 fl4;
599 
600 	if (ip_tunnel_info_af(info) != AF_INET)
601 		return -EINVAL;
602 
603 	key = &info->key;
604 	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
605 			    tunnel_id_to_key32(key->tun_id),
606 			    key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
607 			    skb->mark, skb_get_hash(skb));
608 	rt = ip_route_output_key(dev_net(dev), &fl4);
609 	if (IS_ERR(rt))
610 		return PTR_ERR(rt);
611 
612 	ip_rt_put(rt);
613 	info->key.u.ipv4.src = fl4.saddr;
614 	return 0;
615 }
616 
617 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
618 			      struct net_device *dev)
619 {
620 	struct ip_tunnel *tunnel = netdev_priv(dev);
621 	const struct iphdr *tnl_params;
622 
623 	if (!pskb_inet_may_pull(skb))
624 		goto free_skb;
625 
626 	if (tunnel->collect_md) {
627 		gre_fb_xmit(skb, dev, skb->protocol);
628 		return NETDEV_TX_OK;
629 	}
630 
631 	if (dev->header_ops) {
632 		if (skb_cow_head(skb, 0))
633 			goto free_skb;
634 
635 		tnl_params = (const struct iphdr *)skb->data;
636 
637 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
638 		 * to gre header.
639 		 */
640 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
641 		skb_reset_mac_header(skb);
642 
643 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
644 		    skb_checksum_start(skb) < skb->data)
645 			goto free_skb;
646 	} else {
647 		if (skb_cow_head(skb, dev->needed_headroom))
648 			goto free_skb;
649 
650 		tnl_params = &tunnel->parms.iph;
651 	}
652 
653 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
654 		goto free_skb;
655 
656 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
657 	return NETDEV_TX_OK;
658 
659 free_skb:
660 	kfree_skb(skb);
661 	dev->stats.tx_dropped++;
662 	return NETDEV_TX_OK;
663 }
664 
665 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
666 			       struct net_device *dev)
667 {
668 	struct ip_tunnel *tunnel = netdev_priv(dev);
669 	bool truncate = false;
670 	__be16 proto;
671 
672 	if (!pskb_inet_may_pull(skb))
673 		goto free_skb;
674 
675 	if (tunnel->collect_md) {
676 		erspan_fb_xmit(skb, dev);
677 		return NETDEV_TX_OK;
678 	}
679 
680 	if (gre_handle_offloads(skb, false))
681 		goto free_skb;
682 
683 	if (skb_cow_head(skb, dev->needed_headroom))
684 		goto free_skb;
685 
686 	if (skb->len > dev->mtu + dev->hard_header_len) {
687 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
688 		truncate = true;
689 	}
690 
691 	/* Push ERSPAN header */
692 	if (tunnel->erspan_ver == 0) {
693 		proto = htons(ETH_P_ERSPAN);
694 		tunnel->parms.o_flags &= ~TUNNEL_SEQ;
695 	} else if (tunnel->erspan_ver == 1) {
696 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
697 				    tunnel->index,
698 				    truncate, true);
699 		proto = htons(ETH_P_ERSPAN);
700 	} else if (tunnel->erspan_ver == 2) {
701 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
702 				       tunnel->dir, tunnel->hwid,
703 				       truncate, true);
704 		proto = htons(ETH_P_ERSPAN2);
705 	} else {
706 		goto free_skb;
707 	}
708 
709 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
710 	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
711 	return NETDEV_TX_OK;
712 
713 free_skb:
714 	kfree_skb(skb);
715 	dev->stats.tx_dropped++;
716 	return NETDEV_TX_OK;
717 }
718 
719 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
720 				struct net_device *dev)
721 {
722 	struct ip_tunnel *tunnel = netdev_priv(dev);
723 
724 	if (!pskb_inet_may_pull(skb))
725 		goto free_skb;
726 
727 	if (tunnel->collect_md) {
728 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
729 		return NETDEV_TX_OK;
730 	}
731 
732 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
733 		goto free_skb;
734 
735 	if (skb_cow_head(skb, dev->needed_headroom))
736 		goto free_skb;
737 
738 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
739 	return NETDEV_TX_OK;
740 
741 free_skb:
742 	kfree_skb(skb);
743 	dev->stats.tx_dropped++;
744 	return NETDEV_TX_OK;
745 }
746 
747 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
748 {
749 	struct ip_tunnel *tunnel = netdev_priv(dev);
750 	__be16 flags;
751 	int len;
752 
753 	len = tunnel->tun_hlen;
754 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
755 	len = tunnel->tun_hlen - len;
756 	tunnel->hlen = tunnel->hlen + len;
757 
758 	if (dev->header_ops)
759 		dev->hard_header_len += len;
760 	else
761 		dev->needed_headroom += len;
762 
763 	if (set_mtu)
764 		dev->mtu = max_t(int, dev->mtu - len, 68);
765 
766 	flags = tunnel->parms.o_flags;
767 
768 	if (flags & TUNNEL_SEQ ||
769 	    (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
770 		dev->features &= ~NETIF_F_GSO_SOFTWARE;
771 		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
772 	} else {
773 		dev->features |= NETIF_F_GSO_SOFTWARE;
774 		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
775 	}
776 }
777 
778 static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
779 			    int cmd)
780 {
781 	int err;
782 
783 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
784 		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
785 		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
786 		    ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
787 			return -EINVAL;
788 	}
789 
790 	p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
791 	p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
792 
793 	err = ip_tunnel_ctl(dev, p, cmd);
794 	if (err)
795 		return err;
796 
797 	if (cmd == SIOCCHGTUNNEL) {
798 		struct ip_tunnel *t = netdev_priv(dev);
799 
800 		t->parms.i_flags = p->i_flags;
801 		t->parms.o_flags = p->o_flags;
802 
803 		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
804 			ipgre_link_update(dev, true);
805 	}
806 
807 	p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
808 	p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
809 	return 0;
810 }
811 
812 /* Nice toy. Unfortunately, useless in real life :-)
813    It allows to construct virtual multiprotocol broadcast "LAN"
814    over the Internet, provided multicast routing is tuned.
815 
816 
817    I have no idea was this bicycle invented before me,
818    so that I had to set ARPHRD_IPGRE to a random value.
819    I have an impression, that Cisco could make something similar,
820    but this feature is apparently missing in IOS<=11.2(8).
821 
822    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
823    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
824 
825    ping -t 255 224.66.66.66
826 
827    If nobody answers, mbone does not work.
828 
829    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
830    ip addr add 10.66.66.<somewhat>/24 dev Universe
831    ifconfig Universe up
832    ifconfig Universe add fe80::<Your_real_addr>/10
833    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
834    ftp 10.66.66.66
835    ...
836    ftp fec0:6666:6666::193.233.7.65
837    ...
838  */
839 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
840 			unsigned short type,
841 			const void *daddr, const void *saddr, unsigned int len)
842 {
843 	struct ip_tunnel *t = netdev_priv(dev);
844 	struct iphdr *iph;
845 	struct gre_base_hdr *greh;
846 
847 	iph = skb_push(skb, t->hlen + sizeof(*iph));
848 	greh = (struct gre_base_hdr *)(iph+1);
849 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
850 	greh->protocol = htons(type);
851 
852 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
853 
854 	/* Set the source hardware address. */
855 	if (saddr)
856 		memcpy(&iph->saddr, saddr, 4);
857 	if (daddr)
858 		memcpy(&iph->daddr, daddr, 4);
859 	if (iph->daddr)
860 		return t->hlen + sizeof(*iph);
861 
862 	return -(t->hlen + sizeof(*iph));
863 }
864 
865 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
866 {
867 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
868 	memcpy(haddr, &iph->saddr, 4);
869 	return 4;
870 }
871 
872 static const struct header_ops ipgre_header_ops = {
873 	.create	= ipgre_header,
874 	.parse	= ipgre_header_parse,
875 };
876 
877 #ifdef CONFIG_NET_IPGRE_BROADCAST
878 static int ipgre_open(struct net_device *dev)
879 {
880 	struct ip_tunnel *t = netdev_priv(dev);
881 
882 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
883 		struct flowi4 fl4;
884 		struct rtable *rt;
885 
886 		rt = ip_route_output_gre(t->net, &fl4,
887 					 t->parms.iph.daddr,
888 					 t->parms.iph.saddr,
889 					 t->parms.o_key,
890 					 RT_TOS(t->parms.iph.tos),
891 					 t->parms.link);
892 		if (IS_ERR(rt))
893 			return -EADDRNOTAVAIL;
894 		dev = rt->dst.dev;
895 		ip_rt_put(rt);
896 		if (!__in_dev_get_rtnl(dev))
897 			return -EADDRNOTAVAIL;
898 		t->mlink = dev->ifindex;
899 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
900 	}
901 	return 0;
902 }
903 
904 static int ipgre_close(struct net_device *dev)
905 {
906 	struct ip_tunnel *t = netdev_priv(dev);
907 
908 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
909 		struct in_device *in_dev;
910 		in_dev = inetdev_by_index(t->net, t->mlink);
911 		if (in_dev)
912 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
913 	}
914 	return 0;
915 }
916 #endif
917 
918 static const struct net_device_ops ipgre_netdev_ops = {
919 	.ndo_init		= ipgre_tunnel_init,
920 	.ndo_uninit		= ip_tunnel_uninit,
921 #ifdef CONFIG_NET_IPGRE_BROADCAST
922 	.ndo_open		= ipgre_open,
923 	.ndo_stop		= ipgre_close,
924 #endif
925 	.ndo_start_xmit		= ipgre_xmit,
926 	.ndo_siocdevprivate	= ip_tunnel_siocdevprivate,
927 	.ndo_change_mtu		= ip_tunnel_change_mtu,
928 	.ndo_get_stats64	= dev_get_tstats64,
929 	.ndo_get_iflink		= ip_tunnel_get_iflink,
930 	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
931 };
932 
933 #define GRE_FEATURES (NETIF_F_SG |		\
934 		      NETIF_F_FRAGLIST |	\
935 		      NETIF_F_HIGHDMA |		\
936 		      NETIF_F_HW_CSUM)
937 
938 static void ipgre_tunnel_setup(struct net_device *dev)
939 {
940 	dev->netdev_ops		= &ipgre_netdev_ops;
941 	dev->type		= ARPHRD_IPGRE;
942 	ip_tunnel_setup(dev, ipgre_net_id);
943 }
944 
945 static void __gre_tunnel_init(struct net_device *dev)
946 {
947 	struct ip_tunnel *tunnel;
948 	__be16 flags;
949 
950 	tunnel = netdev_priv(dev);
951 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
952 	tunnel->parms.iph.protocol = IPPROTO_GRE;
953 
954 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
955 	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
956 
957 	dev->features		|= GRE_FEATURES | NETIF_F_LLTX;
958 	dev->hw_features	|= GRE_FEATURES;
959 
960 	flags = tunnel->parms.o_flags;
961 
962 	/* TCP offload with GRE SEQ is not supported, nor can we support 2
963 	 * levels of outer headers requiring an update.
964 	 */
965 	if (flags & TUNNEL_SEQ)
966 		return;
967 	if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
968 		return;
969 
970 	dev->features |= NETIF_F_GSO_SOFTWARE;
971 	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
972 }
973 
974 static int ipgre_tunnel_init(struct net_device *dev)
975 {
976 	struct ip_tunnel *tunnel = netdev_priv(dev);
977 	struct iphdr *iph = &tunnel->parms.iph;
978 
979 	__gre_tunnel_init(dev);
980 
981 	__dev_addr_set(dev, &iph->saddr, 4);
982 	memcpy(dev->broadcast, &iph->daddr, 4);
983 
984 	dev->flags		= IFF_NOARP;
985 	netif_keep_dst(dev);
986 	dev->addr_len		= 4;
987 
988 	if (iph->daddr && !tunnel->collect_md) {
989 #ifdef CONFIG_NET_IPGRE_BROADCAST
990 		if (ipv4_is_multicast(iph->daddr)) {
991 			if (!iph->saddr)
992 				return -EINVAL;
993 			dev->flags = IFF_BROADCAST;
994 			dev->header_ops = &ipgre_header_ops;
995 			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
996 			dev->needed_headroom = 0;
997 		}
998 #endif
999 	} else if (!tunnel->collect_md) {
1000 		dev->header_ops = &ipgre_header_ops;
1001 		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1002 		dev->needed_headroom = 0;
1003 	}
1004 
1005 	return ip_tunnel_init(dev);
1006 }
1007 
1008 static const struct gre_protocol ipgre_protocol = {
1009 	.handler     = gre_rcv,
1010 	.err_handler = gre_err,
1011 };
1012 
1013 static int __net_init ipgre_init_net(struct net *net)
1014 {
1015 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1016 }
1017 
1018 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1019 {
1020 	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1021 }
1022 
1023 static struct pernet_operations ipgre_net_ops = {
1024 	.init = ipgre_init_net,
1025 	.exit_batch = ipgre_exit_batch_net,
1026 	.id   = &ipgre_net_id,
1027 	.size = sizeof(struct ip_tunnel_net),
1028 };
1029 
1030 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1031 				 struct netlink_ext_ack *extack)
1032 {
1033 	__be16 flags;
1034 
1035 	if (!data)
1036 		return 0;
1037 
1038 	flags = 0;
1039 	if (data[IFLA_GRE_IFLAGS])
1040 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1041 	if (data[IFLA_GRE_OFLAGS])
1042 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1043 	if (flags & (GRE_VERSION|GRE_ROUTING))
1044 		return -EINVAL;
1045 
1046 	if (data[IFLA_GRE_COLLECT_METADATA] &&
1047 	    data[IFLA_GRE_ENCAP_TYPE] &&
1048 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1049 		return -EINVAL;
1050 
1051 	return 0;
1052 }
1053 
1054 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1055 			      struct netlink_ext_ack *extack)
1056 {
1057 	__be32 daddr;
1058 
1059 	if (tb[IFLA_ADDRESS]) {
1060 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1061 			return -EINVAL;
1062 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1063 			return -EADDRNOTAVAIL;
1064 	}
1065 
1066 	if (!data)
1067 		goto out;
1068 
1069 	if (data[IFLA_GRE_REMOTE]) {
1070 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1071 		if (!daddr)
1072 			return -EINVAL;
1073 	}
1074 
1075 out:
1076 	return ipgre_tunnel_validate(tb, data, extack);
1077 }
1078 
1079 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1080 			   struct netlink_ext_ack *extack)
1081 {
1082 	__be16 flags = 0;
1083 	int ret;
1084 
1085 	if (!data)
1086 		return 0;
1087 
1088 	ret = ipgre_tap_validate(tb, data, extack);
1089 	if (ret)
1090 		return ret;
1091 
1092 	if (data[IFLA_GRE_ERSPAN_VER] &&
1093 	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1094 		return 0;
1095 
1096 	/* ERSPAN type II/III should only have GRE sequence and key flag */
1097 	if (data[IFLA_GRE_OFLAGS])
1098 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1099 	if (data[IFLA_GRE_IFLAGS])
1100 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1101 	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1102 	    flags != (GRE_SEQ | GRE_KEY))
1103 		return -EINVAL;
1104 
1105 	/* ERSPAN Session ID only has 10-bit. Since we reuse
1106 	 * 32-bit key field as ID, check it's range.
1107 	 */
1108 	if (data[IFLA_GRE_IKEY] &&
1109 	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1110 		return -EINVAL;
1111 
1112 	if (data[IFLA_GRE_OKEY] &&
1113 	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1114 		return -EINVAL;
1115 
1116 	return 0;
1117 }
1118 
1119 static int ipgre_netlink_parms(struct net_device *dev,
1120 				struct nlattr *data[],
1121 				struct nlattr *tb[],
1122 				struct ip_tunnel_parm *parms,
1123 				__u32 *fwmark)
1124 {
1125 	struct ip_tunnel *t = netdev_priv(dev);
1126 
1127 	memset(parms, 0, sizeof(*parms));
1128 
1129 	parms->iph.protocol = IPPROTO_GRE;
1130 
1131 	if (!data)
1132 		return 0;
1133 
1134 	if (data[IFLA_GRE_LINK])
1135 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1136 
1137 	if (data[IFLA_GRE_IFLAGS])
1138 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1139 
1140 	if (data[IFLA_GRE_OFLAGS])
1141 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1142 
1143 	if (data[IFLA_GRE_IKEY])
1144 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1145 
1146 	if (data[IFLA_GRE_OKEY])
1147 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1148 
1149 	if (data[IFLA_GRE_LOCAL])
1150 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1151 
1152 	if (data[IFLA_GRE_REMOTE])
1153 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1154 
1155 	if (data[IFLA_GRE_TTL])
1156 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1157 
1158 	if (data[IFLA_GRE_TOS])
1159 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1160 
1161 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1162 		if (t->ignore_df)
1163 			return -EINVAL;
1164 		parms->iph.frag_off = htons(IP_DF);
1165 	}
1166 
1167 	if (data[IFLA_GRE_COLLECT_METADATA]) {
1168 		t->collect_md = true;
1169 		if (dev->type == ARPHRD_IPGRE)
1170 			dev->type = ARPHRD_NONE;
1171 	}
1172 
1173 	if (data[IFLA_GRE_IGNORE_DF]) {
1174 		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1175 		  && (parms->iph.frag_off & htons(IP_DF)))
1176 			return -EINVAL;
1177 		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1178 	}
1179 
1180 	if (data[IFLA_GRE_FWMARK])
1181 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1182 
1183 	return 0;
1184 }
1185 
1186 static int erspan_netlink_parms(struct net_device *dev,
1187 				struct nlattr *data[],
1188 				struct nlattr *tb[],
1189 				struct ip_tunnel_parm *parms,
1190 				__u32 *fwmark)
1191 {
1192 	struct ip_tunnel *t = netdev_priv(dev);
1193 	int err;
1194 
1195 	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1196 	if (err)
1197 		return err;
1198 	if (!data)
1199 		return 0;
1200 
1201 	if (data[IFLA_GRE_ERSPAN_VER]) {
1202 		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1203 
1204 		if (t->erspan_ver > 2)
1205 			return -EINVAL;
1206 	}
1207 
1208 	if (t->erspan_ver == 1) {
1209 		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1210 			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1211 			if (t->index & ~INDEX_MASK)
1212 				return -EINVAL;
1213 		}
1214 	} else if (t->erspan_ver == 2) {
1215 		if (data[IFLA_GRE_ERSPAN_DIR]) {
1216 			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1217 			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1218 				return -EINVAL;
1219 		}
1220 		if (data[IFLA_GRE_ERSPAN_HWID]) {
1221 			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1222 			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1223 				return -EINVAL;
1224 		}
1225 	}
1226 
1227 	return 0;
1228 }
1229 
1230 /* This function returns true when ENCAP attributes are present in the nl msg */
1231 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1232 				      struct ip_tunnel_encap *ipencap)
1233 {
1234 	bool ret = false;
1235 
1236 	memset(ipencap, 0, sizeof(*ipencap));
1237 
1238 	if (!data)
1239 		return ret;
1240 
1241 	if (data[IFLA_GRE_ENCAP_TYPE]) {
1242 		ret = true;
1243 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1244 	}
1245 
1246 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1247 		ret = true;
1248 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1249 	}
1250 
1251 	if (data[IFLA_GRE_ENCAP_SPORT]) {
1252 		ret = true;
1253 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1254 	}
1255 
1256 	if (data[IFLA_GRE_ENCAP_DPORT]) {
1257 		ret = true;
1258 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1259 	}
1260 
1261 	return ret;
1262 }
1263 
1264 static int gre_tap_init(struct net_device *dev)
1265 {
1266 	__gre_tunnel_init(dev);
1267 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1268 	netif_keep_dst(dev);
1269 
1270 	return ip_tunnel_init(dev);
1271 }
1272 
1273 static const struct net_device_ops gre_tap_netdev_ops = {
1274 	.ndo_init		= gre_tap_init,
1275 	.ndo_uninit		= ip_tunnel_uninit,
1276 	.ndo_start_xmit		= gre_tap_xmit,
1277 	.ndo_set_mac_address 	= eth_mac_addr,
1278 	.ndo_validate_addr	= eth_validate_addr,
1279 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1280 	.ndo_get_stats64	= dev_get_tstats64,
1281 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1282 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1283 };
1284 
1285 static int erspan_tunnel_init(struct net_device *dev)
1286 {
1287 	struct ip_tunnel *tunnel = netdev_priv(dev);
1288 
1289 	if (tunnel->erspan_ver == 0)
1290 		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1291 	else
1292 		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1293 
1294 	tunnel->parms.iph.protocol = IPPROTO_GRE;
1295 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1296 		       erspan_hdr_len(tunnel->erspan_ver);
1297 
1298 	dev->features		|= GRE_FEATURES;
1299 	dev->hw_features	|= GRE_FEATURES;
1300 	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1301 	netif_keep_dst(dev);
1302 
1303 	return ip_tunnel_init(dev);
1304 }
1305 
1306 static const struct net_device_ops erspan_netdev_ops = {
1307 	.ndo_init		= erspan_tunnel_init,
1308 	.ndo_uninit		= ip_tunnel_uninit,
1309 	.ndo_start_xmit		= erspan_xmit,
1310 	.ndo_set_mac_address	= eth_mac_addr,
1311 	.ndo_validate_addr	= eth_validate_addr,
1312 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1313 	.ndo_get_stats64	= dev_get_tstats64,
1314 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1315 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1316 };
1317 
1318 static void ipgre_tap_setup(struct net_device *dev)
1319 {
1320 	ether_setup(dev);
1321 	dev->max_mtu = 0;
1322 	dev->netdev_ops	= &gre_tap_netdev_ops;
1323 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1324 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1325 	ip_tunnel_setup(dev, gre_tap_net_id);
1326 }
1327 
1328 static int
1329 ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1330 {
1331 	struct ip_tunnel_encap ipencap;
1332 
1333 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1334 		struct ip_tunnel *t = netdev_priv(dev);
1335 		int err = ip_tunnel_encap_setup(t, &ipencap);
1336 
1337 		if (err < 0)
1338 			return err;
1339 	}
1340 
1341 	return 0;
1342 }
1343 
1344 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1345 			 struct nlattr *tb[], struct nlattr *data[],
1346 			 struct netlink_ext_ack *extack)
1347 {
1348 	struct ip_tunnel_parm p;
1349 	__u32 fwmark = 0;
1350 	int err;
1351 
1352 	err = ipgre_newlink_encap_setup(dev, data);
1353 	if (err)
1354 		return err;
1355 
1356 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1357 	if (err < 0)
1358 		return err;
1359 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1360 }
1361 
1362 static int erspan_newlink(struct net *src_net, struct net_device *dev,
1363 			  struct nlattr *tb[], struct nlattr *data[],
1364 			  struct netlink_ext_ack *extack)
1365 {
1366 	struct ip_tunnel_parm p;
1367 	__u32 fwmark = 0;
1368 	int err;
1369 
1370 	err = ipgre_newlink_encap_setup(dev, data);
1371 	if (err)
1372 		return err;
1373 
1374 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1375 	if (err)
1376 		return err;
1377 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1378 }
1379 
1380 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1381 			    struct nlattr *data[],
1382 			    struct netlink_ext_ack *extack)
1383 {
1384 	struct ip_tunnel *t = netdev_priv(dev);
1385 	__u32 fwmark = t->fwmark;
1386 	struct ip_tunnel_parm p;
1387 	int err;
1388 
1389 	err = ipgre_newlink_encap_setup(dev, data);
1390 	if (err)
1391 		return err;
1392 
1393 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1394 	if (err < 0)
1395 		return err;
1396 
1397 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1398 	if (err < 0)
1399 		return err;
1400 
1401 	t->parms.i_flags = p.i_flags;
1402 	t->parms.o_flags = p.o_flags;
1403 
1404 	ipgre_link_update(dev, !tb[IFLA_MTU]);
1405 
1406 	return 0;
1407 }
1408 
1409 static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1410 			     struct nlattr *data[],
1411 			     struct netlink_ext_ack *extack)
1412 {
1413 	struct ip_tunnel *t = netdev_priv(dev);
1414 	__u32 fwmark = t->fwmark;
1415 	struct ip_tunnel_parm p;
1416 	int err;
1417 
1418 	err = ipgre_newlink_encap_setup(dev, data);
1419 	if (err)
1420 		return err;
1421 
1422 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1423 	if (err < 0)
1424 		return err;
1425 
1426 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1427 	if (err < 0)
1428 		return err;
1429 
1430 	t->parms.i_flags = p.i_flags;
1431 	t->parms.o_flags = p.o_flags;
1432 
1433 	return 0;
1434 }
1435 
1436 static size_t ipgre_get_size(const struct net_device *dev)
1437 {
1438 	return
1439 		/* IFLA_GRE_LINK */
1440 		nla_total_size(4) +
1441 		/* IFLA_GRE_IFLAGS */
1442 		nla_total_size(2) +
1443 		/* IFLA_GRE_OFLAGS */
1444 		nla_total_size(2) +
1445 		/* IFLA_GRE_IKEY */
1446 		nla_total_size(4) +
1447 		/* IFLA_GRE_OKEY */
1448 		nla_total_size(4) +
1449 		/* IFLA_GRE_LOCAL */
1450 		nla_total_size(4) +
1451 		/* IFLA_GRE_REMOTE */
1452 		nla_total_size(4) +
1453 		/* IFLA_GRE_TTL */
1454 		nla_total_size(1) +
1455 		/* IFLA_GRE_TOS */
1456 		nla_total_size(1) +
1457 		/* IFLA_GRE_PMTUDISC */
1458 		nla_total_size(1) +
1459 		/* IFLA_GRE_ENCAP_TYPE */
1460 		nla_total_size(2) +
1461 		/* IFLA_GRE_ENCAP_FLAGS */
1462 		nla_total_size(2) +
1463 		/* IFLA_GRE_ENCAP_SPORT */
1464 		nla_total_size(2) +
1465 		/* IFLA_GRE_ENCAP_DPORT */
1466 		nla_total_size(2) +
1467 		/* IFLA_GRE_COLLECT_METADATA */
1468 		nla_total_size(0) +
1469 		/* IFLA_GRE_IGNORE_DF */
1470 		nla_total_size(1) +
1471 		/* IFLA_GRE_FWMARK */
1472 		nla_total_size(4) +
1473 		/* IFLA_GRE_ERSPAN_INDEX */
1474 		nla_total_size(4) +
1475 		/* IFLA_GRE_ERSPAN_VER */
1476 		nla_total_size(1) +
1477 		/* IFLA_GRE_ERSPAN_DIR */
1478 		nla_total_size(1) +
1479 		/* IFLA_GRE_ERSPAN_HWID */
1480 		nla_total_size(2) +
1481 		0;
1482 }
1483 
1484 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1485 {
1486 	struct ip_tunnel *t = netdev_priv(dev);
1487 	struct ip_tunnel_parm *p = &t->parms;
1488 	__be16 o_flags = p->o_flags;
1489 
1490 	if (t->erspan_ver <= 2) {
1491 		if (t->erspan_ver != 0 && !t->collect_md)
1492 			o_flags |= TUNNEL_KEY;
1493 
1494 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1495 			goto nla_put_failure;
1496 
1497 		if (t->erspan_ver == 1) {
1498 			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1499 				goto nla_put_failure;
1500 		} else if (t->erspan_ver == 2) {
1501 			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1502 				goto nla_put_failure;
1503 			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1504 				goto nla_put_failure;
1505 		}
1506 	}
1507 
1508 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1509 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1510 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1511 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1512 			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1513 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1514 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1515 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1516 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1517 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1518 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1519 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1520 		       !!(p->iph.frag_off & htons(IP_DF))) ||
1521 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1522 		goto nla_put_failure;
1523 
1524 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1525 			t->encap.type) ||
1526 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1527 			 t->encap.sport) ||
1528 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1529 			 t->encap.dport) ||
1530 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1531 			t->encap.flags))
1532 		goto nla_put_failure;
1533 
1534 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1535 		goto nla_put_failure;
1536 
1537 	if (t->collect_md) {
1538 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1539 			goto nla_put_failure;
1540 	}
1541 
1542 	return 0;
1543 
1544 nla_put_failure:
1545 	return -EMSGSIZE;
1546 }
1547 
1548 static void erspan_setup(struct net_device *dev)
1549 {
1550 	struct ip_tunnel *t = netdev_priv(dev);
1551 
1552 	ether_setup(dev);
1553 	dev->max_mtu = 0;
1554 	dev->netdev_ops = &erspan_netdev_ops;
1555 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1556 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1557 	ip_tunnel_setup(dev, erspan_net_id);
1558 	t->erspan_ver = 1;
1559 }
1560 
1561 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1562 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1563 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1564 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1565 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1566 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1567 	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1568 	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1569 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1570 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1571 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1572 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1573 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1574 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1575 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1576 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1577 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1578 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1579 	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1580 	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1581 	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1582 	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1583 };
1584 
1585 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1586 	.kind		= "gre",
1587 	.maxtype	= IFLA_GRE_MAX,
1588 	.policy		= ipgre_policy,
1589 	.priv_size	= sizeof(struct ip_tunnel),
1590 	.setup		= ipgre_tunnel_setup,
1591 	.validate	= ipgre_tunnel_validate,
1592 	.newlink	= ipgre_newlink,
1593 	.changelink	= ipgre_changelink,
1594 	.dellink	= ip_tunnel_dellink,
1595 	.get_size	= ipgre_get_size,
1596 	.fill_info	= ipgre_fill_info,
1597 	.get_link_net	= ip_tunnel_get_link_net,
1598 };
1599 
1600 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1601 	.kind		= "gretap",
1602 	.maxtype	= IFLA_GRE_MAX,
1603 	.policy		= ipgre_policy,
1604 	.priv_size	= sizeof(struct ip_tunnel),
1605 	.setup		= ipgre_tap_setup,
1606 	.validate	= ipgre_tap_validate,
1607 	.newlink	= ipgre_newlink,
1608 	.changelink	= ipgre_changelink,
1609 	.dellink	= ip_tunnel_dellink,
1610 	.get_size	= ipgre_get_size,
1611 	.fill_info	= ipgre_fill_info,
1612 	.get_link_net	= ip_tunnel_get_link_net,
1613 };
1614 
1615 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1616 	.kind		= "erspan",
1617 	.maxtype	= IFLA_GRE_MAX,
1618 	.policy		= ipgre_policy,
1619 	.priv_size	= sizeof(struct ip_tunnel),
1620 	.setup		= erspan_setup,
1621 	.validate	= erspan_validate,
1622 	.newlink	= erspan_newlink,
1623 	.changelink	= erspan_changelink,
1624 	.dellink	= ip_tunnel_dellink,
1625 	.get_size	= ipgre_get_size,
1626 	.fill_info	= ipgre_fill_info,
1627 	.get_link_net	= ip_tunnel_get_link_net,
1628 };
1629 
1630 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1631 					u8 name_assign_type)
1632 {
1633 	struct nlattr *tb[IFLA_MAX + 1];
1634 	struct net_device *dev;
1635 	LIST_HEAD(list_kill);
1636 	struct ip_tunnel *t;
1637 	int err;
1638 
1639 	memset(&tb, 0, sizeof(tb));
1640 
1641 	dev = rtnl_create_link(net, name, name_assign_type,
1642 			       &ipgre_tap_ops, tb, NULL);
1643 	if (IS_ERR(dev))
1644 		return dev;
1645 
1646 	/* Configure flow based GRE device. */
1647 	t = netdev_priv(dev);
1648 	t->collect_md = true;
1649 
1650 	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1651 	if (err < 0) {
1652 		free_netdev(dev);
1653 		return ERR_PTR(err);
1654 	}
1655 
1656 	/* openvswitch users expect packet sizes to be unrestricted,
1657 	 * so set the largest MTU we can.
1658 	 */
1659 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1660 	if (err)
1661 		goto out;
1662 
1663 	err = rtnl_configure_link(dev, NULL);
1664 	if (err < 0)
1665 		goto out;
1666 
1667 	return dev;
1668 out:
1669 	ip_tunnel_dellink(dev, &list_kill);
1670 	unregister_netdevice_many(&list_kill);
1671 	return ERR_PTR(err);
1672 }
1673 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1674 
1675 static int __net_init ipgre_tap_init_net(struct net *net)
1676 {
1677 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1678 }
1679 
1680 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1681 {
1682 	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1683 }
1684 
1685 static struct pernet_operations ipgre_tap_net_ops = {
1686 	.init = ipgre_tap_init_net,
1687 	.exit_batch = ipgre_tap_exit_batch_net,
1688 	.id   = &gre_tap_net_id,
1689 	.size = sizeof(struct ip_tunnel_net),
1690 };
1691 
1692 static int __net_init erspan_init_net(struct net *net)
1693 {
1694 	return ip_tunnel_init_net(net, erspan_net_id,
1695 				  &erspan_link_ops, "erspan0");
1696 }
1697 
1698 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1699 {
1700 	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1701 }
1702 
1703 static struct pernet_operations erspan_net_ops = {
1704 	.init = erspan_init_net,
1705 	.exit_batch = erspan_exit_batch_net,
1706 	.id   = &erspan_net_id,
1707 	.size = sizeof(struct ip_tunnel_net),
1708 };
1709 
1710 static int __init ipgre_init(void)
1711 {
1712 	int err;
1713 
1714 	pr_info("GRE over IPv4 tunneling driver\n");
1715 
1716 	err = register_pernet_device(&ipgre_net_ops);
1717 	if (err < 0)
1718 		return err;
1719 
1720 	err = register_pernet_device(&ipgre_tap_net_ops);
1721 	if (err < 0)
1722 		goto pnet_tap_failed;
1723 
1724 	err = register_pernet_device(&erspan_net_ops);
1725 	if (err < 0)
1726 		goto pnet_erspan_failed;
1727 
1728 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1729 	if (err < 0) {
1730 		pr_info("%s: can't add protocol\n", __func__);
1731 		goto add_proto_failed;
1732 	}
1733 
1734 	err = rtnl_link_register(&ipgre_link_ops);
1735 	if (err < 0)
1736 		goto rtnl_link_failed;
1737 
1738 	err = rtnl_link_register(&ipgre_tap_ops);
1739 	if (err < 0)
1740 		goto tap_ops_failed;
1741 
1742 	err = rtnl_link_register(&erspan_link_ops);
1743 	if (err < 0)
1744 		goto erspan_link_failed;
1745 
1746 	return 0;
1747 
1748 erspan_link_failed:
1749 	rtnl_link_unregister(&ipgre_tap_ops);
1750 tap_ops_failed:
1751 	rtnl_link_unregister(&ipgre_link_ops);
1752 rtnl_link_failed:
1753 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1754 add_proto_failed:
1755 	unregister_pernet_device(&erspan_net_ops);
1756 pnet_erspan_failed:
1757 	unregister_pernet_device(&ipgre_tap_net_ops);
1758 pnet_tap_failed:
1759 	unregister_pernet_device(&ipgre_net_ops);
1760 	return err;
1761 }
1762 
1763 static void __exit ipgre_fini(void)
1764 {
1765 	rtnl_link_unregister(&ipgre_tap_ops);
1766 	rtnl_link_unregister(&ipgre_link_ops);
1767 	rtnl_link_unregister(&erspan_link_ops);
1768 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1769 	unregister_pernet_device(&ipgre_tap_net_ops);
1770 	unregister_pernet_device(&ipgre_net_ops);
1771 	unregister_pernet_device(&erspan_net_ops);
1772 }
1773 
1774 module_init(ipgre_init);
1775 module_exit(ipgre_fini);
1776 MODULE_LICENSE("GPL");
1777 MODULE_ALIAS_RTNL_LINK("gre");
1778 MODULE_ALIAS_RTNL_LINK("gretap");
1779 MODULE_ALIAS_RTNL_LINK("erspan");
1780 MODULE_ALIAS_NETDEV("gre0");
1781 MODULE_ALIAS_NETDEV("gretap0");
1782 MODULE_ALIAS_NETDEV("erspan0");
1783