xref: /openbmc/linux/net/ipv4/ip_gre.c (revision 110e6f26)
1 /*
2  *	Linux NET3:	GRE over IP protocol decoder.
3  *
4  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35 
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51 
52 #if IS_ENABLED(CONFIG_IPV6)
53 #include <net/ipv6.h>
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
56 #endif
57 
58 /*
59    Problems & solutions
60    --------------------
61 
62    1. The most important issue is detecting local dead loops.
63    They would cause complete host lockup in transmit, which
64    would be "resolved" by stack overflow or, if queueing is enabled,
65    with infinite looping in net_bh.
66 
67    We cannot track such dead loops during route installation,
68    it is infeasible task. The most general solutions would be
69    to keep skb->encapsulation counter (sort of local ttl),
70    and silently drop packet when it expires. It is a good
71    solution, but it supposes maintaining new variable in ALL
72    skb, even if no tunneling is used.
73 
74    Current solution: xmit_recursion breaks dead loops. This is a percpu
75    counter, since when we enter the first ndo_xmit(), cpu migration is
76    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
77 
78    2. Networking dead loops would not kill routers, but would really
79    kill network. IP hop limit plays role of "t->recursion" in this case,
80    if we copy it from packet being encapsulated to upper header.
81    It is very good solution, but it introduces two problems:
82 
83    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
84      do not work over tunnels.
85    - traceroute does not work. I planned to relay ICMP from tunnel,
86      so that this problem would be solved and traceroute output
87      would even more informative. This idea appeared to be wrong:
88      only Linux complies to rfc1812 now (yes, guys, Linux is the only
89      true router now :-)), all routers (at least, in neighbourhood of mine)
90      return only 8 bytes of payload. It is the end.
91 
92    Hence, if we want that OSPF worked or traceroute said something reasonable,
93    we should search for another solution.
94 
95    One of them is to parse packet trying to detect inner encapsulation
96    made by our node. It is difficult or even impossible, especially,
97    taking into account fragmentation. TO be short, ttl is not solution at all.
98 
99    Current solution: The solution was UNEXPECTEDLY SIMPLE.
100    We force DF flag on tunnels with preconfigured hop limit,
101    that is ALL. :-) Well, it does not remove the problem completely,
102    but exponential growth of network traffic is changed to linear
103    (branches, that exceed pmtu are pruned) and tunnel mtu
104    rapidly degrades to value <68, where looping stops.
105    Yes, it is not good if there exists a router in the loop,
106    which does not force DF, even when encapsulating packets have DF set.
107    But it is not our problem! Nobody could accuse us, we made
108    all that we could make. Even if it is your gated who injected
109    fatal route to network, even if it were you who configured
110    fatal static route: you are innocent. :-)
111 
112    Alexey Kuznetsov.
113  */
114 
115 static bool log_ecn_error = true;
116 module_param(log_ecn_error, bool, 0644);
117 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
118 
119 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
120 static int ipgre_tunnel_init(struct net_device *dev);
121 
122 static int ipgre_net_id __read_mostly;
123 static int gre_tap_net_id __read_mostly;
124 
125 static int ip_gre_calc_hlen(__be16 o_flags)
126 {
127 	int addend = 4;
128 
129 	if (o_flags & TUNNEL_CSUM)
130 		addend += 4;
131 	if (o_flags & TUNNEL_KEY)
132 		addend += 4;
133 	if (o_flags & TUNNEL_SEQ)
134 		addend += 4;
135 	return addend;
136 }
137 
138 static __be16 gre_flags_to_tnl_flags(__be16 flags)
139 {
140 	__be16 tflags = 0;
141 
142 	if (flags & GRE_CSUM)
143 		tflags |= TUNNEL_CSUM;
144 	if (flags & GRE_ROUTING)
145 		tflags |= TUNNEL_ROUTING;
146 	if (flags & GRE_KEY)
147 		tflags |= TUNNEL_KEY;
148 	if (flags & GRE_SEQ)
149 		tflags |= TUNNEL_SEQ;
150 	if (flags & GRE_STRICT)
151 		tflags |= TUNNEL_STRICT;
152 	if (flags & GRE_REC)
153 		tflags |= TUNNEL_REC;
154 	if (flags & GRE_VERSION)
155 		tflags |= TUNNEL_VERSION;
156 
157 	return tflags;
158 }
159 
160 static __be16 tnl_flags_to_gre_flags(__be16 tflags)
161 {
162 	__be16 flags = 0;
163 
164 	if (tflags & TUNNEL_CSUM)
165 		flags |= GRE_CSUM;
166 	if (tflags & TUNNEL_ROUTING)
167 		flags |= GRE_ROUTING;
168 	if (tflags & TUNNEL_KEY)
169 		flags |= GRE_KEY;
170 	if (tflags & TUNNEL_SEQ)
171 		flags |= GRE_SEQ;
172 	if (tflags & TUNNEL_STRICT)
173 		flags |= GRE_STRICT;
174 	if (tflags & TUNNEL_REC)
175 		flags |= GRE_REC;
176 	if (tflags & TUNNEL_VERSION)
177 		flags |= GRE_VERSION;
178 
179 	return flags;
180 }
181 
182 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
183 			    bool *csum_err)
184 {
185 	const struct gre_base_hdr *greh;
186 	__be32 *options;
187 	int hdr_len;
188 
189 	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
190 		return -EINVAL;
191 
192 	greh = (struct gre_base_hdr *)skb_transport_header(skb);
193 	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
194 		return -EINVAL;
195 
196 	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
197 	hdr_len = ip_gre_calc_hlen(tpi->flags);
198 
199 	if (!pskb_may_pull(skb, hdr_len))
200 		return -EINVAL;
201 
202 	greh = (struct gre_base_hdr *)skb_transport_header(skb);
203 	tpi->proto = greh->protocol;
204 
205 	options = (__be32 *)(greh + 1);
206 	if (greh->flags & GRE_CSUM) {
207 		if (skb_checksum_simple_validate(skb)) {
208 			*csum_err = true;
209 			return -EINVAL;
210 		}
211 
212 		skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
213 					 null_compute_pseudo);
214 		options++;
215 	}
216 
217 	if (greh->flags & GRE_KEY) {
218 		tpi->key = *options;
219 		options++;
220 	} else {
221 		tpi->key = 0;
222 	}
223 	if (unlikely(greh->flags & GRE_SEQ)) {
224 		tpi->seq = *options;
225 		options++;
226 	} else {
227 		tpi->seq = 0;
228 	}
229 	/* WCCP version 1 and 2 protocol decoding.
230 	 * - Change protocol to IP
231 	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
232 	 */
233 	if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
234 		tpi->proto = htons(ETH_P_IP);
235 		if ((*(u8 *)options & 0xF0) != 0x40) {
236 			hdr_len += 4;
237 			if (!pskb_may_pull(skb, hdr_len))
238 				return -EINVAL;
239 		}
240 	}
241 	return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
242 }
243 
244 static void ipgre_err(struct sk_buff *skb, u32 info,
245 		      const struct tnl_ptk_info *tpi)
246 {
247 
248 	/* All the routers (except for Linux) return only
249 	   8 bytes of packet payload. It means, that precise relaying of
250 	   ICMP in the real Internet is absolutely infeasible.
251 
252 	   Moreover, Cisco "wise men" put GRE key to the third word
253 	   in GRE header. It makes impossible maintaining even soft
254 	   state for keyed GRE tunnels with enabled checksum. Tell
255 	   them "thank you".
256 
257 	   Well, I wonder, rfc1812 was written by Cisco employee,
258 	   what the hell these idiots break standards established
259 	   by themselves???
260 	   */
261 	struct net *net = dev_net(skb->dev);
262 	struct ip_tunnel_net *itn;
263 	const struct iphdr *iph;
264 	const int type = icmp_hdr(skb)->type;
265 	const int code = icmp_hdr(skb)->code;
266 	struct ip_tunnel *t;
267 
268 	switch (type) {
269 	default:
270 	case ICMP_PARAMETERPROB:
271 		return;
272 
273 	case ICMP_DEST_UNREACH:
274 		switch (code) {
275 		case ICMP_SR_FAILED:
276 		case ICMP_PORT_UNREACH:
277 			/* Impossible event. */
278 			return;
279 		default:
280 			/* All others are translated to HOST_UNREACH.
281 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
282 			   I believe they are just ether pollution. --ANK
283 			 */
284 			break;
285 		}
286 		break;
287 
288 	case ICMP_TIME_EXCEEDED:
289 		if (code != ICMP_EXC_TTL)
290 			return;
291 		break;
292 
293 	case ICMP_REDIRECT:
294 		break;
295 	}
296 
297 	if (tpi->proto == htons(ETH_P_TEB))
298 		itn = net_generic(net, gre_tap_net_id);
299 	else
300 		itn = net_generic(net, ipgre_net_id);
301 
302 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
303 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
304 			     iph->daddr, iph->saddr, tpi->key);
305 
306 	if (!t)
307 		return;
308 
309 	if (t->parms.iph.daddr == 0 ||
310 	    ipv4_is_multicast(t->parms.iph.daddr))
311 		return;
312 
313 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
314 		return;
315 
316 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
317 		t->err_count++;
318 	else
319 		t->err_count = 1;
320 	t->err_time = jiffies;
321 }
322 
323 static void gre_err(struct sk_buff *skb, u32 info)
324 {
325 	/* All the routers (except for Linux) return only
326 	 * 8 bytes of packet payload. It means, that precise relaying of
327 	 * ICMP in the real Internet is absolutely infeasible.
328 	 *
329 	 * Moreover, Cisco "wise men" put GRE key to the third word
330 	 * in GRE header. It makes impossible maintaining even soft
331 	 * state for keyed
332 	 * GRE tunnels with enabled checksum. Tell them "thank you".
333 	 *
334 	 * Well, I wonder, rfc1812 was written by Cisco employee,
335 	 * what the hell these idiots break standards established
336 	 * by themselves???
337 	 */
338 
339 	const int type = icmp_hdr(skb)->type;
340 	const int code = icmp_hdr(skb)->code;
341 	struct tnl_ptk_info tpi;
342 	bool csum_err = false;
343 
344 	if (parse_gre_header(skb, &tpi, &csum_err)) {
345 		if (!csum_err)		/* ignore csum errors. */
346 			return;
347 	}
348 
349 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
350 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
351 				 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
352 		return;
353 	}
354 	if (type == ICMP_REDIRECT) {
355 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
356 			      IPPROTO_GRE, 0);
357 		return;
358 	}
359 
360 	ipgre_err(skb, info, &tpi);
361 }
362 
363 static __be64 key_to_tunnel_id(__be32 key)
364 {
365 #ifdef __BIG_ENDIAN
366 	return (__force __be64)((__force u32)key);
367 #else
368 	return (__force __be64)((__force u64)key << 32);
369 #endif
370 }
371 
372 /* Returns the least-significant 32 bits of a __be64. */
373 static __be32 tunnel_id_to_key(__be64 x)
374 {
375 #ifdef __BIG_ENDIAN
376 	return (__force __be32)x;
377 #else
378 	return (__force __be32)((__force u64)x >> 32);
379 #endif
380 }
381 
382 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
383 {
384 	struct net *net = dev_net(skb->dev);
385 	struct metadata_dst *tun_dst = NULL;
386 	struct ip_tunnel_net *itn;
387 	const struct iphdr *iph;
388 	struct ip_tunnel *tunnel;
389 
390 	if (tpi->proto == htons(ETH_P_TEB))
391 		itn = net_generic(net, gre_tap_net_id);
392 	else
393 		itn = net_generic(net, ipgre_net_id);
394 
395 	iph = ip_hdr(skb);
396 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
397 				  iph->saddr, iph->daddr, tpi->key);
398 
399 	if (tunnel) {
400 		skb_pop_mac_header(skb);
401 		if (tunnel->collect_md) {
402 			__be16 flags;
403 			__be64 tun_id;
404 
405 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
406 			tun_id = key_to_tunnel_id(tpi->key);
407 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
408 			if (!tun_dst)
409 				return PACKET_REJECT;
410 		}
411 
412 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
413 		return PACKET_RCVD;
414 	}
415 	return PACKET_REJECT;
416 }
417 
418 static int gre_rcv(struct sk_buff *skb)
419 {
420 	struct tnl_ptk_info tpi;
421 	bool csum_err = false;
422 
423 #ifdef CONFIG_NET_IPGRE_BROADCAST
424 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
425 		/* Looped back packet, drop it! */
426 		if (rt_is_output_route(skb_rtable(skb)))
427 			goto drop;
428 	}
429 #endif
430 
431 	if (parse_gre_header(skb, &tpi, &csum_err) < 0)
432 		goto drop;
433 
434 	if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
435 		return 0;
436 
437 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
438 drop:
439 	kfree_skb(skb);
440 	return 0;
441 }
442 
443 static __sum16 gre_checksum(struct sk_buff *skb)
444 {
445 	__wsum csum;
446 
447 	if (skb->ip_summed == CHECKSUM_PARTIAL)
448 		csum = lco_csum(skb);
449 	else
450 		csum = skb_checksum(skb, 0, skb->len, 0);
451 	return csum_fold(csum);
452 }
453 
454 static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
455 			 __be16 proto, __be32 key, __be32 seq)
456 {
457 	struct gre_base_hdr *greh;
458 
459 	skb_push(skb, hdr_len);
460 
461 	skb_reset_transport_header(skb);
462 	greh = (struct gre_base_hdr *)skb->data;
463 	greh->flags = tnl_flags_to_gre_flags(flags);
464 	greh->protocol = proto;
465 
466 	if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
467 		__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
468 
469 		if (flags & TUNNEL_SEQ) {
470 			*ptr = seq;
471 			ptr--;
472 		}
473 		if (flags & TUNNEL_KEY) {
474 			*ptr = key;
475 			ptr--;
476 		}
477 		if (flags & TUNNEL_CSUM &&
478 		    !(skb_shinfo(skb)->gso_type &
479 		      (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
480 			*ptr = 0;
481 			*(__sum16 *)ptr = gre_checksum(skb);
482 		}
483 	}
484 }
485 
486 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
487 		       const struct iphdr *tnl_params,
488 		       __be16 proto)
489 {
490 	struct ip_tunnel *tunnel = netdev_priv(dev);
491 
492 	if (tunnel->parms.o_flags & TUNNEL_SEQ)
493 		tunnel->o_seqno++;
494 
495 	/* Push GRE header. */
496 	build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
497 		     proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
498 
499 	skb_set_inner_protocol(skb, proto);
500 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
501 }
502 
503 static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
504 					   bool csum)
505 {
506 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
507 }
508 
509 static struct rtable *gre_get_rt(struct sk_buff *skb,
510 				 struct net_device *dev,
511 				 struct flowi4 *fl,
512 				 const struct ip_tunnel_key *key)
513 {
514 	struct net *net = dev_net(dev);
515 
516 	memset(fl, 0, sizeof(*fl));
517 	fl->daddr = key->u.ipv4.dst;
518 	fl->saddr = key->u.ipv4.src;
519 	fl->flowi4_tos = RT_TOS(key->tos);
520 	fl->flowi4_mark = skb->mark;
521 	fl->flowi4_proto = IPPROTO_GRE;
522 
523 	return ip_route_output_key(net, fl);
524 }
525 
526 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
527 {
528 	struct ip_tunnel_info *tun_info;
529 	const struct ip_tunnel_key *key;
530 	struct rtable *rt = NULL;
531 	struct flowi4 fl;
532 	int min_headroom;
533 	int tunnel_hlen;
534 	__be16 df, flags;
535 	bool use_cache;
536 	int err;
537 
538 	tun_info = skb_tunnel_info(skb);
539 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
540 		     ip_tunnel_info_af(tun_info) != AF_INET))
541 		goto err_free_skb;
542 
543 	key = &tun_info->key;
544 	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
545 	if (use_cache)
546 		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
547 	if (!rt) {
548 		rt = gre_get_rt(skb, dev, &fl, key);
549 		if (IS_ERR(rt))
550 				goto err_free_skb;
551 		if (use_cache)
552 			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
553 					  fl.saddr);
554 	}
555 
556 	tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
557 
558 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
559 			+ tunnel_hlen + sizeof(struct iphdr);
560 	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
561 		int head_delta = SKB_DATA_ALIGN(min_headroom -
562 						skb_headroom(skb) +
563 						16);
564 		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
565 				       0, GFP_ATOMIC);
566 		if (unlikely(err))
567 			goto err_free_rt;
568 	}
569 
570 	/* Push Tunnel header. */
571 	skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
572 	if (IS_ERR(skb)) {
573 		skb = NULL;
574 		goto err_free_rt;
575 	}
576 
577 	flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
578 	build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
579 		     tunnel_id_to_key(tun_info->key.tun_id), 0);
580 
581 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
582 
583 	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
584 		      key->tos, key->ttl, df, false);
585 	return;
586 
587 err_free_rt:
588 	ip_rt_put(rt);
589 err_free_skb:
590 	kfree_skb(skb);
591 	dev->stats.tx_dropped++;
592 }
593 
594 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
595 {
596 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
597 	struct rtable *rt;
598 	struct flowi4 fl4;
599 
600 	if (ip_tunnel_info_af(info) != AF_INET)
601 		return -EINVAL;
602 
603 	rt = gre_get_rt(skb, dev, &fl4, &info->key);
604 	if (IS_ERR(rt))
605 		return PTR_ERR(rt);
606 
607 	ip_rt_put(rt);
608 	info->key.u.ipv4.src = fl4.saddr;
609 	return 0;
610 }
611 
612 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
613 			      struct net_device *dev)
614 {
615 	struct ip_tunnel *tunnel = netdev_priv(dev);
616 	const struct iphdr *tnl_params;
617 
618 	if (tunnel->collect_md) {
619 		gre_fb_xmit(skb, dev);
620 		return NETDEV_TX_OK;
621 	}
622 
623 	if (dev->header_ops) {
624 		/* Need space for new headers */
625 		if (skb_cow_head(skb, dev->needed_headroom -
626 				      (tunnel->hlen + sizeof(struct iphdr))))
627 			goto free_skb;
628 
629 		tnl_params = (const struct iphdr *)skb->data;
630 
631 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
632 		 * to gre header.
633 		 */
634 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
635 		skb_reset_mac_header(skb);
636 	} else {
637 		if (skb_cow_head(skb, dev->needed_headroom))
638 			goto free_skb;
639 
640 		tnl_params = &tunnel->parms.iph;
641 	}
642 
643 	skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
644 	if (IS_ERR(skb))
645 		goto out;
646 
647 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
648 	return NETDEV_TX_OK;
649 
650 free_skb:
651 	kfree_skb(skb);
652 out:
653 	dev->stats.tx_dropped++;
654 	return NETDEV_TX_OK;
655 }
656 
657 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
658 				struct net_device *dev)
659 {
660 	struct ip_tunnel *tunnel = netdev_priv(dev);
661 
662 	if (tunnel->collect_md) {
663 		gre_fb_xmit(skb, dev);
664 		return NETDEV_TX_OK;
665 	}
666 
667 	skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
668 	if (IS_ERR(skb))
669 		goto out;
670 
671 	if (skb_cow_head(skb, dev->needed_headroom))
672 		goto free_skb;
673 
674 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
675 	return NETDEV_TX_OK;
676 
677 free_skb:
678 	kfree_skb(skb);
679 out:
680 	dev->stats.tx_dropped++;
681 	return NETDEV_TX_OK;
682 }
683 
684 static int ipgre_tunnel_ioctl(struct net_device *dev,
685 			      struct ifreq *ifr, int cmd)
686 {
687 	int err;
688 	struct ip_tunnel_parm p;
689 
690 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
691 		return -EFAULT;
692 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
693 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
694 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
695 		    ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
696 			return -EINVAL;
697 	}
698 	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
699 	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
700 
701 	err = ip_tunnel_ioctl(dev, &p, cmd);
702 	if (err)
703 		return err;
704 
705 	p.i_flags = tnl_flags_to_gre_flags(p.i_flags);
706 	p.o_flags = tnl_flags_to_gre_flags(p.o_flags);
707 
708 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
709 		return -EFAULT;
710 	return 0;
711 }
712 
713 /* Nice toy. Unfortunately, useless in real life :-)
714    It allows to construct virtual multiprotocol broadcast "LAN"
715    over the Internet, provided multicast routing is tuned.
716 
717 
718    I have no idea was this bicycle invented before me,
719    so that I had to set ARPHRD_IPGRE to a random value.
720    I have an impression, that Cisco could make something similar,
721    but this feature is apparently missing in IOS<=11.2(8).
722 
723    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
724    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
725 
726    ping -t 255 224.66.66.66
727 
728    If nobody answers, mbone does not work.
729 
730    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
731    ip addr add 10.66.66.<somewhat>/24 dev Universe
732    ifconfig Universe up
733    ifconfig Universe add fe80::<Your_real_addr>/10
734    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
735    ftp 10.66.66.66
736    ...
737    ftp fec0:6666:6666::193.233.7.65
738    ...
739  */
740 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
741 			unsigned short type,
742 			const void *daddr, const void *saddr, unsigned int len)
743 {
744 	struct ip_tunnel *t = netdev_priv(dev);
745 	struct iphdr *iph;
746 	struct gre_base_hdr *greh;
747 
748 	iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
749 	greh = (struct gre_base_hdr *)(iph+1);
750 	greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags);
751 	greh->protocol = htons(type);
752 
753 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
754 
755 	/* Set the source hardware address. */
756 	if (saddr)
757 		memcpy(&iph->saddr, saddr, 4);
758 	if (daddr)
759 		memcpy(&iph->daddr, daddr, 4);
760 	if (iph->daddr)
761 		return t->hlen + sizeof(*iph);
762 
763 	return -(t->hlen + sizeof(*iph));
764 }
765 
766 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
767 {
768 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
769 	memcpy(haddr, &iph->saddr, 4);
770 	return 4;
771 }
772 
773 static const struct header_ops ipgre_header_ops = {
774 	.create	= ipgre_header,
775 	.parse	= ipgre_header_parse,
776 };
777 
778 #ifdef CONFIG_NET_IPGRE_BROADCAST
779 static int ipgre_open(struct net_device *dev)
780 {
781 	struct ip_tunnel *t = netdev_priv(dev);
782 
783 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
784 		struct flowi4 fl4;
785 		struct rtable *rt;
786 
787 		rt = ip_route_output_gre(t->net, &fl4,
788 					 t->parms.iph.daddr,
789 					 t->parms.iph.saddr,
790 					 t->parms.o_key,
791 					 RT_TOS(t->parms.iph.tos),
792 					 t->parms.link);
793 		if (IS_ERR(rt))
794 			return -EADDRNOTAVAIL;
795 		dev = rt->dst.dev;
796 		ip_rt_put(rt);
797 		if (!__in_dev_get_rtnl(dev))
798 			return -EADDRNOTAVAIL;
799 		t->mlink = dev->ifindex;
800 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
801 	}
802 	return 0;
803 }
804 
805 static int ipgre_close(struct net_device *dev)
806 {
807 	struct ip_tunnel *t = netdev_priv(dev);
808 
809 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
810 		struct in_device *in_dev;
811 		in_dev = inetdev_by_index(t->net, t->mlink);
812 		if (in_dev)
813 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
814 	}
815 	return 0;
816 }
817 #endif
818 
819 static const struct net_device_ops ipgre_netdev_ops = {
820 	.ndo_init		= ipgre_tunnel_init,
821 	.ndo_uninit		= ip_tunnel_uninit,
822 #ifdef CONFIG_NET_IPGRE_BROADCAST
823 	.ndo_open		= ipgre_open,
824 	.ndo_stop		= ipgre_close,
825 #endif
826 	.ndo_start_xmit		= ipgre_xmit,
827 	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
828 	.ndo_change_mtu		= ip_tunnel_change_mtu,
829 	.ndo_get_stats64	= ip_tunnel_get_stats64,
830 	.ndo_get_iflink		= ip_tunnel_get_iflink,
831 };
832 
833 #define GRE_FEATURES (NETIF_F_SG |		\
834 		      NETIF_F_FRAGLIST |	\
835 		      NETIF_F_HIGHDMA |		\
836 		      NETIF_F_HW_CSUM)
837 
838 static void ipgre_tunnel_setup(struct net_device *dev)
839 {
840 	dev->netdev_ops		= &ipgre_netdev_ops;
841 	dev->type		= ARPHRD_IPGRE;
842 	ip_tunnel_setup(dev, ipgre_net_id);
843 }
844 
845 static void __gre_tunnel_init(struct net_device *dev)
846 {
847 	struct ip_tunnel *tunnel;
848 	int t_hlen;
849 
850 	tunnel = netdev_priv(dev);
851 	tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags);
852 	tunnel->parms.iph.protocol = IPPROTO_GRE;
853 
854 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
855 
856 	t_hlen = tunnel->hlen + sizeof(struct iphdr);
857 
858 	dev->needed_headroom	= LL_MAX_HEADER + t_hlen + 4;
859 	dev->mtu		= ETH_DATA_LEN - t_hlen - 4;
860 
861 	dev->features		|= GRE_FEATURES;
862 	dev->hw_features	|= GRE_FEATURES;
863 
864 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
865 		/* TCP offload with GRE SEQ is not supported, nor
866 		 * can we support 2 levels of outer headers requiring
867 		 * an update.
868 		 */
869 		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
870 		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
871 			dev->features    |= NETIF_F_GSO_SOFTWARE;
872 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
873 		}
874 
875 		/* Can use a lockless transmit, unless we generate
876 		 * output sequences
877 		 */
878 		dev->features |= NETIF_F_LLTX;
879 	}
880 }
881 
882 static int ipgre_tunnel_init(struct net_device *dev)
883 {
884 	struct ip_tunnel *tunnel = netdev_priv(dev);
885 	struct iphdr *iph = &tunnel->parms.iph;
886 
887 	__gre_tunnel_init(dev);
888 
889 	memcpy(dev->dev_addr, &iph->saddr, 4);
890 	memcpy(dev->broadcast, &iph->daddr, 4);
891 
892 	dev->flags		= IFF_NOARP;
893 	netif_keep_dst(dev);
894 	dev->addr_len		= 4;
895 
896 	if (iph->daddr) {
897 #ifdef CONFIG_NET_IPGRE_BROADCAST
898 		if (ipv4_is_multicast(iph->daddr)) {
899 			if (!iph->saddr)
900 				return -EINVAL;
901 			dev->flags = IFF_BROADCAST;
902 			dev->header_ops = &ipgre_header_ops;
903 		}
904 #endif
905 	} else
906 		dev->header_ops = &ipgre_header_ops;
907 
908 	return ip_tunnel_init(dev);
909 }
910 
911 static const struct gre_protocol ipgre_protocol = {
912 	.handler     = gre_rcv,
913 	.err_handler = gre_err,
914 };
915 
916 static int __net_init ipgre_init_net(struct net *net)
917 {
918 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
919 }
920 
921 static void __net_exit ipgre_exit_net(struct net *net)
922 {
923 	struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
924 	ip_tunnel_delete_net(itn, &ipgre_link_ops);
925 }
926 
927 static struct pernet_operations ipgre_net_ops = {
928 	.init = ipgre_init_net,
929 	.exit = ipgre_exit_net,
930 	.id   = &ipgre_net_id,
931 	.size = sizeof(struct ip_tunnel_net),
932 };
933 
934 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
935 {
936 	__be16 flags;
937 
938 	if (!data)
939 		return 0;
940 
941 	flags = 0;
942 	if (data[IFLA_GRE_IFLAGS])
943 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
944 	if (data[IFLA_GRE_OFLAGS])
945 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
946 	if (flags & (GRE_VERSION|GRE_ROUTING))
947 		return -EINVAL;
948 
949 	return 0;
950 }
951 
952 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
953 {
954 	__be32 daddr;
955 
956 	if (tb[IFLA_ADDRESS]) {
957 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
958 			return -EINVAL;
959 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
960 			return -EADDRNOTAVAIL;
961 	}
962 
963 	if (!data)
964 		goto out;
965 
966 	if (data[IFLA_GRE_REMOTE]) {
967 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
968 		if (!daddr)
969 			return -EINVAL;
970 	}
971 
972 out:
973 	return ipgre_tunnel_validate(tb, data);
974 }
975 
976 static void ipgre_netlink_parms(struct net_device *dev,
977 				struct nlattr *data[],
978 				struct nlattr *tb[],
979 				struct ip_tunnel_parm *parms)
980 {
981 	memset(parms, 0, sizeof(*parms));
982 
983 	parms->iph.protocol = IPPROTO_GRE;
984 
985 	if (!data)
986 		return;
987 
988 	if (data[IFLA_GRE_LINK])
989 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
990 
991 	if (data[IFLA_GRE_IFLAGS])
992 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
993 
994 	if (data[IFLA_GRE_OFLAGS])
995 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
996 
997 	if (data[IFLA_GRE_IKEY])
998 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
999 
1000 	if (data[IFLA_GRE_OKEY])
1001 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1002 
1003 	if (data[IFLA_GRE_LOCAL])
1004 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1005 
1006 	if (data[IFLA_GRE_REMOTE])
1007 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1008 
1009 	if (data[IFLA_GRE_TTL])
1010 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1011 
1012 	if (data[IFLA_GRE_TOS])
1013 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1014 
1015 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1016 		parms->iph.frag_off = htons(IP_DF);
1017 
1018 	if (data[IFLA_GRE_COLLECT_METADATA]) {
1019 		struct ip_tunnel *t = netdev_priv(dev);
1020 
1021 		t->collect_md = true;
1022 	}
1023 }
1024 
1025 /* This function returns true when ENCAP attributes are present in the nl msg */
1026 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1027 				      struct ip_tunnel_encap *ipencap)
1028 {
1029 	bool ret = false;
1030 
1031 	memset(ipencap, 0, sizeof(*ipencap));
1032 
1033 	if (!data)
1034 		return ret;
1035 
1036 	if (data[IFLA_GRE_ENCAP_TYPE]) {
1037 		ret = true;
1038 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1039 	}
1040 
1041 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1042 		ret = true;
1043 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1044 	}
1045 
1046 	if (data[IFLA_GRE_ENCAP_SPORT]) {
1047 		ret = true;
1048 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1049 	}
1050 
1051 	if (data[IFLA_GRE_ENCAP_DPORT]) {
1052 		ret = true;
1053 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1054 	}
1055 
1056 	return ret;
1057 }
1058 
1059 static int gre_tap_init(struct net_device *dev)
1060 {
1061 	__gre_tunnel_init(dev);
1062 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1063 
1064 	return ip_tunnel_init(dev);
1065 }
1066 
1067 static const struct net_device_ops gre_tap_netdev_ops = {
1068 	.ndo_init		= gre_tap_init,
1069 	.ndo_uninit		= ip_tunnel_uninit,
1070 	.ndo_start_xmit		= gre_tap_xmit,
1071 	.ndo_set_mac_address 	= eth_mac_addr,
1072 	.ndo_validate_addr	= eth_validate_addr,
1073 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1074 	.ndo_get_stats64	= ip_tunnel_get_stats64,
1075 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1076 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1077 };
1078 
1079 static void ipgre_tap_setup(struct net_device *dev)
1080 {
1081 	ether_setup(dev);
1082 	dev->netdev_ops	= &gre_tap_netdev_ops;
1083 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1084 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1085 	ip_tunnel_setup(dev, gre_tap_net_id);
1086 }
1087 
1088 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1089 			 struct nlattr *tb[], struct nlattr *data[])
1090 {
1091 	struct ip_tunnel_parm p;
1092 	struct ip_tunnel_encap ipencap;
1093 
1094 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1095 		struct ip_tunnel *t = netdev_priv(dev);
1096 		int err = ip_tunnel_encap_setup(t, &ipencap);
1097 
1098 		if (err < 0)
1099 			return err;
1100 	}
1101 
1102 	ipgre_netlink_parms(dev, data, tb, &p);
1103 	return ip_tunnel_newlink(dev, tb, &p);
1104 }
1105 
1106 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1107 			    struct nlattr *data[])
1108 {
1109 	struct ip_tunnel_parm p;
1110 	struct ip_tunnel_encap ipencap;
1111 
1112 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1113 		struct ip_tunnel *t = netdev_priv(dev);
1114 		int err = ip_tunnel_encap_setup(t, &ipencap);
1115 
1116 		if (err < 0)
1117 			return err;
1118 	}
1119 
1120 	ipgre_netlink_parms(dev, data, tb, &p);
1121 	return ip_tunnel_changelink(dev, tb, &p);
1122 }
1123 
1124 static size_t ipgre_get_size(const struct net_device *dev)
1125 {
1126 	return
1127 		/* IFLA_GRE_LINK */
1128 		nla_total_size(4) +
1129 		/* IFLA_GRE_IFLAGS */
1130 		nla_total_size(2) +
1131 		/* IFLA_GRE_OFLAGS */
1132 		nla_total_size(2) +
1133 		/* IFLA_GRE_IKEY */
1134 		nla_total_size(4) +
1135 		/* IFLA_GRE_OKEY */
1136 		nla_total_size(4) +
1137 		/* IFLA_GRE_LOCAL */
1138 		nla_total_size(4) +
1139 		/* IFLA_GRE_REMOTE */
1140 		nla_total_size(4) +
1141 		/* IFLA_GRE_TTL */
1142 		nla_total_size(1) +
1143 		/* IFLA_GRE_TOS */
1144 		nla_total_size(1) +
1145 		/* IFLA_GRE_PMTUDISC */
1146 		nla_total_size(1) +
1147 		/* IFLA_GRE_ENCAP_TYPE */
1148 		nla_total_size(2) +
1149 		/* IFLA_GRE_ENCAP_FLAGS */
1150 		nla_total_size(2) +
1151 		/* IFLA_GRE_ENCAP_SPORT */
1152 		nla_total_size(2) +
1153 		/* IFLA_GRE_ENCAP_DPORT */
1154 		nla_total_size(2) +
1155 		/* IFLA_GRE_COLLECT_METADATA */
1156 		nla_total_size(0) +
1157 		0;
1158 }
1159 
1160 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1161 {
1162 	struct ip_tunnel *t = netdev_priv(dev);
1163 	struct ip_tunnel_parm *p = &t->parms;
1164 
1165 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1166 	    nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) ||
1167 	    nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) ||
1168 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1169 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1170 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1171 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1172 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1173 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1174 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1175 		       !!(p->iph.frag_off & htons(IP_DF))))
1176 		goto nla_put_failure;
1177 
1178 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1179 			t->encap.type) ||
1180 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1181 			 t->encap.sport) ||
1182 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1183 			 t->encap.dport) ||
1184 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1185 			t->encap.flags))
1186 		goto nla_put_failure;
1187 
1188 	if (t->collect_md) {
1189 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1190 			goto nla_put_failure;
1191 	}
1192 
1193 	return 0;
1194 
1195 nla_put_failure:
1196 	return -EMSGSIZE;
1197 }
1198 
1199 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1200 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1201 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1202 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1203 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1204 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1205 	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1206 	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1207 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1208 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1209 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1210 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1211 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1212 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1213 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1214 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1215 };
1216 
1217 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1218 	.kind		= "gre",
1219 	.maxtype	= IFLA_GRE_MAX,
1220 	.policy		= ipgre_policy,
1221 	.priv_size	= sizeof(struct ip_tunnel),
1222 	.setup		= ipgre_tunnel_setup,
1223 	.validate	= ipgre_tunnel_validate,
1224 	.newlink	= ipgre_newlink,
1225 	.changelink	= ipgre_changelink,
1226 	.dellink	= ip_tunnel_dellink,
1227 	.get_size	= ipgre_get_size,
1228 	.fill_info	= ipgre_fill_info,
1229 	.get_link_net	= ip_tunnel_get_link_net,
1230 };
1231 
1232 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1233 	.kind		= "gretap",
1234 	.maxtype	= IFLA_GRE_MAX,
1235 	.policy		= ipgre_policy,
1236 	.priv_size	= sizeof(struct ip_tunnel),
1237 	.setup		= ipgre_tap_setup,
1238 	.validate	= ipgre_tap_validate,
1239 	.newlink	= ipgre_newlink,
1240 	.changelink	= ipgre_changelink,
1241 	.dellink	= ip_tunnel_dellink,
1242 	.get_size	= ipgre_get_size,
1243 	.fill_info	= ipgre_fill_info,
1244 	.get_link_net	= ip_tunnel_get_link_net,
1245 };
1246 
1247 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1248 					u8 name_assign_type)
1249 {
1250 	struct nlattr *tb[IFLA_MAX + 1];
1251 	struct net_device *dev;
1252 	struct ip_tunnel *t;
1253 	int err;
1254 
1255 	memset(&tb, 0, sizeof(tb));
1256 
1257 	dev = rtnl_create_link(net, name, name_assign_type,
1258 			       &ipgre_tap_ops, tb);
1259 	if (IS_ERR(dev))
1260 		return dev;
1261 
1262 	/* Configure flow based GRE device. */
1263 	t = netdev_priv(dev);
1264 	t->collect_md = true;
1265 
1266 	err = ipgre_newlink(net, dev, tb, NULL);
1267 	if (err < 0)
1268 		goto out;
1269 
1270 	/* openvswitch users expect packet sizes to be unrestricted,
1271 	 * so set the largest MTU we can.
1272 	 */
1273 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1274 	if (err)
1275 		goto out;
1276 
1277 	return dev;
1278 out:
1279 	free_netdev(dev);
1280 	return ERR_PTR(err);
1281 }
1282 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1283 
1284 static int __net_init ipgre_tap_init_net(struct net *net)
1285 {
1286 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1287 }
1288 
1289 static void __net_exit ipgre_tap_exit_net(struct net *net)
1290 {
1291 	struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1292 	ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1293 }
1294 
1295 static struct pernet_operations ipgre_tap_net_ops = {
1296 	.init = ipgre_tap_init_net,
1297 	.exit = ipgre_tap_exit_net,
1298 	.id   = &gre_tap_net_id,
1299 	.size = sizeof(struct ip_tunnel_net),
1300 };
1301 
1302 static int __init ipgre_init(void)
1303 {
1304 	int err;
1305 
1306 	pr_info("GRE over IPv4 tunneling driver\n");
1307 
1308 	err = register_pernet_device(&ipgre_net_ops);
1309 	if (err < 0)
1310 		return err;
1311 
1312 	err = register_pernet_device(&ipgre_tap_net_ops);
1313 	if (err < 0)
1314 		goto pnet_tap_faied;
1315 
1316 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1317 	if (err < 0) {
1318 		pr_info("%s: can't add protocol\n", __func__);
1319 		goto add_proto_failed;
1320 	}
1321 
1322 	err = rtnl_link_register(&ipgre_link_ops);
1323 	if (err < 0)
1324 		goto rtnl_link_failed;
1325 
1326 	err = rtnl_link_register(&ipgre_tap_ops);
1327 	if (err < 0)
1328 		goto tap_ops_failed;
1329 
1330 	return 0;
1331 
1332 tap_ops_failed:
1333 	rtnl_link_unregister(&ipgre_link_ops);
1334 rtnl_link_failed:
1335 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1336 add_proto_failed:
1337 	unregister_pernet_device(&ipgre_tap_net_ops);
1338 pnet_tap_faied:
1339 	unregister_pernet_device(&ipgre_net_ops);
1340 	return err;
1341 }
1342 
1343 static void __exit ipgre_fini(void)
1344 {
1345 	rtnl_link_unregister(&ipgre_tap_ops);
1346 	rtnl_link_unregister(&ipgre_link_ops);
1347 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1348 	unregister_pernet_device(&ipgre_tap_net_ops);
1349 	unregister_pernet_device(&ipgre_net_ops);
1350 }
1351 
1352 module_init(ipgre_init);
1353 module_exit(ipgre_fini);
1354 MODULE_LICENSE("GPL");
1355 MODULE_ALIAS_RTNL_LINK("gre");
1356 MODULE_ALIAS_RTNL_LINK("gretap");
1357 MODULE_ALIAS_NETDEV("gre0");
1358 MODULE_ALIAS_NETDEV("gretap0");
1359