xref: /openbmc/linux/net/ipv4/ip_gre.c (revision 5b4cb650)
1 /*
2  *	Linux NET3:	GRE over IP protocol decoder.
3  *
4  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35 
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
52 
53 /*
54    Problems & solutions
55    --------------------
56 
57    1. The most important issue is detecting local dead loops.
58    They would cause complete host lockup in transmit, which
59    would be "resolved" by stack overflow or, if queueing is enabled,
60    with infinite looping in net_bh.
61 
62    We cannot track such dead loops during route installation,
63    it is infeasible task. The most general solutions would be
64    to keep skb->encapsulation counter (sort of local ttl),
65    and silently drop packet when it expires. It is a good
66    solution, but it supposes maintaining new variable in ALL
67    skb, even if no tunneling is used.
68 
69    Current solution: xmit_recursion breaks dead loops. This is a percpu
70    counter, since when we enter the first ndo_xmit(), cpu migration is
71    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
72 
73    2. Networking dead loops would not kill routers, but would really
74    kill network. IP hop limit plays role of "t->recursion" in this case,
75    if we copy it from packet being encapsulated to upper header.
76    It is very good solution, but it introduces two problems:
77 
78    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79      do not work over tunnels.
80    - traceroute does not work. I planned to relay ICMP from tunnel,
81      so that this problem would be solved and traceroute output
82      would even more informative. This idea appeared to be wrong:
83      only Linux complies to rfc1812 now (yes, guys, Linux is the only
84      true router now :-)), all routers (at least, in neighbourhood of mine)
85      return only 8 bytes of payload. It is the end.
86 
87    Hence, if we want that OSPF worked or traceroute said something reasonable,
88    we should search for another solution.
89 
90    One of them is to parse packet trying to detect inner encapsulation
91    made by our node. It is difficult or even impossible, especially,
92    taking into account fragmentation. TO be short, ttl is not solution at all.
93 
94    Current solution: The solution was UNEXPECTEDLY SIMPLE.
95    We force DF flag on tunnels with preconfigured hop limit,
96    that is ALL. :-) Well, it does not remove the problem completely,
97    but exponential growth of network traffic is changed to linear
98    (branches, that exceed pmtu are pruned) and tunnel mtu
99    rapidly degrades to value <68, where looping stops.
100    Yes, it is not good if there exists a router in the loop,
101    which does not force DF, even when encapsulating packets have DF set.
102    But it is not our problem! Nobody could accuse us, we made
103    all that we could make. Even if it is your gated who injected
104    fatal route to network, even if it were you who configured
105    fatal static route: you are innocent. :-)
106 
107    Alexey Kuznetsov.
108  */
109 
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
113 
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
117 				u32 id, u32 index,
118 				bool truncate, bool is_ipv4);
119 
120 static unsigned int ipgre_net_id __read_mostly;
121 static unsigned int gre_tap_net_id __read_mostly;
122 static unsigned int erspan_net_id __read_mostly;
123 
124 static int ipgre_err(struct sk_buff *skb, u32 info,
125 		     const struct tnl_ptk_info *tpi)
126 {
127 
128 	/* All the routers (except for Linux) return only
129 	   8 bytes of packet payload. It means, that precise relaying of
130 	   ICMP in the real Internet is absolutely infeasible.
131 
132 	   Moreover, Cisco "wise men" put GRE key to the third word
133 	   in GRE header. It makes impossible maintaining even soft
134 	   state for keyed GRE tunnels with enabled checksum. Tell
135 	   them "thank you".
136 
137 	   Well, I wonder, rfc1812 was written by Cisco employee,
138 	   what the hell these idiots break standards established
139 	   by themselves???
140 	   */
141 	struct net *net = dev_net(skb->dev);
142 	struct ip_tunnel_net *itn;
143 	const struct iphdr *iph;
144 	const int type = icmp_hdr(skb)->type;
145 	const int code = icmp_hdr(skb)->code;
146 	unsigned int data_len = 0;
147 	struct ip_tunnel *t;
148 
149 	if (tpi->proto == htons(ETH_P_TEB))
150 		itn = net_generic(net, gre_tap_net_id);
151 	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
152 		 tpi->proto == htons(ETH_P_ERSPAN2))
153 		itn = net_generic(net, erspan_net_id);
154 	else
155 		itn = net_generic(net, ipgre_net_id);
156 
157 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
158 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
159 			     iph->daddr, iph->saddr, tpi->key);
160 
161 	if (!t)
162 		return -ENOENT;
163 
164 	switch (type) {
165 	default:
166 	case ICMP_PARAMETERPROB:
167 		return 0;
168 
169 	case ICMP_DEST_UNREACH:
170 		switch (code) {
171 		case ICMP_SR_FAILED:
172 		case ICMP_PORT_UNREACH:
173 			/* Impossible event. */
174 			return 0;
175 		default:
176 			/* All others are translated to HOST_UNREACH.
177 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
178 			   I believe they are just ether pollution. --ANK
179 			 */
180 			break;
181 		}
182 		break;
183 
184 	case ICMP_TIME_EXCEEDED:
185 		if (code != ICMP_EXC_TTL)
186 			return 0;
187 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
188 		break;
189 
190 	case ICMP_REDIRECT:
191 		break;
192 	}
193 
194 #if IS_ENABLED(CONFIG_IPV6)
195        if (tpi->proto == htons(ETH_P_IPV6) &&
196            !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
197 				       type, data_len))
198                return 0;
199 #endif
200 
201 	if (t->parms.iph.daddr == 0 ||
202 	    ipv4_is_multicast(t->parms.iph.daddr))
203 		return 0;
204 
205 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
206 		return 0;
207 
208 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
209 		t->err_count++;
210 	else
211 		t->err_count = 1;
212 	t->err_time = jiffies;
213 
214 	return 0;
215 }
216 
217 static void gre_err(struct sk_buff *skb, u32 info)
218 {
219 	/* All the routers (except for Linux) return only
220 	 * 8 bytes of packet payload. It means, that precise relaying of
221 	 * ICMP in the real Internet is absolutely infeasible.
222 	 *
223 	 * Moreover, Cisco "wise men" put GRE key to the third word
224 	 * in GRE header. It makes impossible maintaining even soft
225 	 * state for keyed
226 	 * GRE tunnels with enabled checksum. Tell them "thank you".
227 	 *
228 	 * Well, I wonder, rfc1812 was written by Cisco employee,
229 	 * what the hell these idiots break standards established
230 	 * by themselves???
231 	 */
232 
233 	const struct iphdr *iph = (struct iphdr *)skb->data;
234 	const int type = icmp_hdr(skb)->type;
235 	const int code = icmp_hdr(skb)->code;
236 	struct tnl_ptk_info tpi;
237 
238 	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
239 			     iph->ihl * 4) < 0)
240 		return;
241 
242 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
243 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
244 				 skb->dev->ifindex, IPPROTO_GRE);
245 		return;
246 	}
247 	if (type == ICMP_REDIRECT) {
248 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
249 			      IPPROTO_GRE);
250 		return;
251 	}
252 
253 	ipgre_err(skb, info, &tpi);
254 }
255 
256 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
257 		      int gre_hdr_len)
258 {
259 	struct net *net = dev_net(skb->dev);
260 	struct metadata_dst *tun_dst = NULL;
261 	struct erspan_base_hdr *ershdr;
262 	struct erspan_metadata *pkt_md;
263 	struct ip_tunnel_net *itn;
264 	struct ip_tunnel *tunnel;
265 	const struct iphdr *iph;
266 	struct erspan_md2 *md2;
267 	int ver;
268 	int len;
269 
270 	itn = net_generic(net, erspan_net_id);
271 	len = gre_hdr_len + sizeof(*ershdr);
272 
273 	/* Check based hdr len */
274 	if (unlikely(!pskb_may_pull(skb, len)))
275 		return PACKET_REJECT;
276 
277 	iph = ip_hdr(skb);
278 	ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
279 	ver = ershdr->ver;
280 
281 	/* The original GRE header does not have key field,
282 	 * Use ERSPAN 10-bit session ID as key.
283 	 */
284 	tpi->key = cpu_to_be32(get_session_id(ershdr));
285 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
286 				  tpi->flags | TUNNEL_KEY,
287 				  iph->saddr, iph->daddr, tpi->key);
288 
289 	if (tunnel) {
290 		len = gre_hdr_len + erspan_hdr_len(ver);
291 		if (unlikely(!pskb_may_pull(skb, len)))
292 			return PACKET_REJECT;
293 
294 		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
295 		pkt_md = (struct erspan_metadata *)(ershdr + 1);
296 
297 		if (__iptunnel_pull_header(skb,
298 					   len,
299 					   htons(ETH_P_TEB),
300 					   false, false) < 0)
301 			goto drop;
302 
303 		if (tunnel->collect_md) {
304 			struct ip_tunnel_info *info;
305 			struct erspan_metadata *md;
306 			__be64 tun_id;
307 			__be16 flags;
308 
309 			tpi->flags |= TUNNEL_KEY;
310 			flags = tpi->flags;
311 			tun_id = key32_to_tunnel_id(tpi->key);
312 
313 			tun_dst = ip_tun_rx_dst(skb, flags,
314 						tun_id, sizeof(*md));
315 			if (!tun_dst)
316 				return PACKET_REJECT;
317 
318 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
319 			md->version = ver;
320 			md2 = &md->u.md2;
321 			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
322 						       ERSPAN_V2_MDSIZE);
323 
324 			info = &tun_dst->u.tun_info;
325 			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
326 			info->options_len = sizeof(*md);
327 		}
328 
329 		skb_reset_mac_header(skb);
330 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
331 		return PACKET_RCVD;
332 	}
333 	return PACKET_REJECT;
334 
335 drop:
336 	kfree_skb(skb);
337 	return PACKET_RCVD;
338 }
339 
340 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
341 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
342 {
343 	struct metadata_dst *tun_dst = NULL;
344 	const struct iphdr *iph;
345 	struct ip_tunnel *tunnel;
346 
347 	iph = ip_hdr(skb);
348 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
349 				  iph->saddr, iph->daddr, tpi->key);
350 
351 	if (tunnel) {
352 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
353 					   raw_proto, false) < 0)
354 			goto drop;
355 
356 		if (tunnel->dev->type != ARPHRD_NONE)
357 			skb_pop_mac_header(skb);
358 		else
359 			skb_reset_mac_header(skb);
360 		if (tunnel->collect_md) {
361 			__be16 flags;
362 			__be64 tun_id;
363 
364 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
365 			tun_id = key32_to_tunnel_id(tpi->key);
366 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
367 			if (!tun_dst)
368 				return PACKET_REJECT;
369 		}
370 
371 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
372 		return PACKET_RCVD;
373 	}
374 	return PACKET_NEXT;
375 
376 drop:
377 	kfree_skb(skb);
378 	return PACKET_RCVD;
379 }
380 
381 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
382 		     int hdr_len)
383 {
384 	struct net *net = dev_net(skb->dev);
385 	struct ip_tunnel_net *itn;
386 	int res;
387 
388 	if (tpi->proto == htons(ETH_P_TEB))
389 		itn = net_generic(net, gre_tap_net_id);
390 	else
391 		itn = net_generic(net, ipgre_net_id);
392 
393 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
394 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
395 		/* ipgre tunnels in collect metadata mode should receive
396 		 * also ETH_P_TEB traffic.
397 		 */
398 		itn = net_generic(net, ipgre_net_id);
399 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
400 	}
401 	return res;
402 }
403 
404 static int gre_rcv(struct sk_buff *skb)
405 {
406 	struct tnl_ptk_info tpi;
407 	bool csum_err = false;
408 	int hdr_len;
409 
410 #ifdef CONFIG_NET_IPGRE_BROADCAST
411 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
412 		/* Looped back packet, drop it! */
413 		if (rt_is_output_route(skb_rtable(skb)))
414 			goto drop;
415 	}
416 #endif
417 
418 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
419 	if (hdr_len < 0)
420 		goto drop;
421 
422 	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
423 		     tpi.proto == htons(ETH_P_ERSPAN2))) {
424 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
425 			return 0;
426 		goto out;
427 	}
428 
429 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
430 		return 0;
431 
432 out:
433 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
434 drop:
435 	kfree_skb(skb);
436 	return 0;
437 }
438 
439 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
440 		       const struct iphdr *tnl_params,
441 		       __be16 proto)
442 {
443 	struct ip_tunnel *tunnel = netdev_priv(dev);
444 
445 	if (tunnel->parms.o_flags & TUNNEL_SEQ)
446 		tunnel->o_seqno++;
447 
448 	/* Push GRE header. */
449 	gre_build_header(skb, tunnel->tun_hlen,
450 			 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
451 			 htonl(tunnel->o_seqno));
452 
453 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
454 }
455 
456 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
457 {
458 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
459 }
460 
461 static struct rtable *gre_get_rt(struct sk_buff *skb,
462 				 struct net_device *dev,
463 				 struct flowi4 *fl,
464 				 const struct ip_tunnel_key *key)
465 {
466 	struct net *net = dev_net(dev);
467 
468 	memset(fl, 0, sizeof(*fl));
469 	fl->daddr = key->u.ipv4.dst;
470 	fl->saddr = key->u.ipv4.src;
471 	fl->flowi4_tos = RT_TOS(key->tos);
472 	fl->flowi4_mark = skb->mark;
473 	fl->flowi4_proto = IPPROTO_GRE;
474 
475 	return ip_route_output_key(net, fl);
476 }
477 
478 static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
479 				      struct net_device *dev,
480 				      struct flowi4 *fl,
481 				      int tunnel_hlen)
482 {
483 	struct ip_tunnel_info *tun_info;
484 	const struct ip_tunnel_key *key;
485 	struct rtable *rt = NULL;
486 	int min_headroom;
487 	bool use_cache;
488 	int err;
489 
490 	tun_info = skb_tunnel_info(skb);
491 	key = &tun_info->key;
492 	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
493 
494 	if (use_cache)
495 		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
496 	if (!rt) {
497 		rt = gre_get_rt(skb, dev, fl, key);
498 		if (IS_ERR(rt))
499 			goto err_free_skb;
500 		if (use_cache)
501 			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
502 					  fl->saddr);
503 	}
504 
505 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
506 			+ tunnel_hlen + sizeof(struct iphdr);
507 	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
508 		int head_delta = SKB_DATA_ALIGN(min_headroom -
509 						skb_headroom(skb) +
510 						16);
511 		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
512 				       0, GFP_ATOMIC);
513 		if (unlikely(err))
514 			goto err_free_rt;
515 	}
516 	return rt;
517 
518 err_free_rt:
519 	ip_rt_put(rt);
520 err_free_skb:
521 	kfree_skb(skb);
522 	dev->stats.tx_dropped++;
523 	return NULL;
524 }
525 
526 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
527 			__be16 proto)
528 {
529 	struct ip_tunnel *tunnel = netdev_priv(dev);
530 	struct ip_tunnel_info *tun_info;
531 	const struct ip_tunnel_key *key;
532 	struct rtable *rt = NULL;
533 	struct flowi4 fl;
534 	int tunnel_hlen;
535 	__be16 df, flags;
536 
537 	tun_info = skb_tunnel_info(skb);
538 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
539 		     ip_tunnel_info_af(tun_info) != AF_INET))
540 		goto err_free_skb;
541 
542 	key = &tun_info->key;
543 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
544 
545 	rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
546 	if (!rt)
547 		return;
548 
549 	/* Push Tunnel header. */
550 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
551 		goto err_free_rt;
552 
553 	flags = tun_info->key.tun_flags &
554 		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
555 	gre_build_header(skb, tunnel_hlen, flags, proto,
556 			 tunnel_id_to_key32(tun_info->key.tun_id),
557 			 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
558 
559 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
560 
561 	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
562 		      key->tos, key->ttl, df, false);
563 	return;
564 
565 err_free_rt:
566 	ip_rt_put(rt);
567 err_free_skb:
568 	kfree_skb(skb);
569 	dev->stats.tx_dropped++;
570 }
571 
572 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
573 			   __be16 proto)
574 {
575 	struct ip_tunnel *tunnel = netdev_priv(dev);
576 	struct ip_tunnel_info *tun_info;
577 	const struct ip_tunnel_key *key;
578 	struct erspan_metadata *md;
579 	struct rtable *rt = NULL;
580 	bool truncate = false;
581 	struct flowi4 fl;
582 	int tunnel_hlen;
583 	int version;
584 	__be16 df;
585 	int nhoff;
586 	int thoff;
587 
588 	tun_info = skb_tunnel_info(skb);
589 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
590 		     ip_tunnel_info_af(tun_info) != AF_INET))
591 		goto err_free_skb;
592 
593 	key = &tun_info->key;
594 	if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
595 		goto err_free_rt;
596 	md = ip_tunnel_info_opts(tun_info);
597 	if (!md)
598 		goto err_free_rt;
599 
600 	/* ERSPAN has fixed 8 byte GRE header */
601 	version = md->version;
602 	tunnel_hlen = 8 + erspan_hdr_len(version);
603 
604 	rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
605 	if (!rt)
606 		return;
607 
608 	if (gre_handle_offloads(skb, false))
609 		goto err_free_rt;
610 
611 	if (skb->len > dev->mtu + dev->hard_header_len) {
612 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
613 		truncate = true;
614 	}
615 
616 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
617 	if (skb->protocol == htons(ETH_P_IP) &&
618 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
619 		truncate = true;
620 
621 	thoff = skb_transport_header(skb) - skb_mac_header(skb);
622 	if (skb->protocol == htons(ETH_P_IPV6) &&
623 	    (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
624 		truncate = true;
625 
626 	if (version == 1) {
627 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
628 				    ntohl(md->u.index), truncate, true);
629 	} else if (version == 2) {
630 		erspan_build_header_v2(skb,
631 				       ntohl(tunnel_id_to_key32(key->tun_id)),
632 				       md->u.md2.dir,
633 				       get_hwid(&md->u.md2),
634 				       truncate, true);
635 	} else {
636 		goto err_free_rt;
637 	}
638 
639 	gre_build_header(skb, 8, TUNNEL_SEQ,
640 			 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
641 
642 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
643 
644 	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
645 		      key->tos, key->ttl, df, false);
646 	return;
647 
648 err_free_rt:
649 	ip_rt_put(rt);
650 err_free_skb:
651 	kfree_skb(skb);
652 	dev->stats.tx_dropped++;
653 }
654 
655 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
656 {
657 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
658 	struct rtable *rt;
659 	struct flowi4 fl4;
660 
661 	if (ip_tunnel_info_af(info) != AF_INET)
662 		return -EINVAL;
663 
664 	rt = gre_get_rt(skb, dev, &fl4, &info->key);
665 	if (IS_ERR(rt))
666 		return PTR_ERR(rt);
667 
668 	ip_rt_put(rt);
669 	info->key.u.ipv4.src = fl4.saddr;
670 	return 0;
671 }
672 
673 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
674 			      struct net_device *dev)
675 {
676 	struct ip_tunnel *tunnel = netdev_priv(dev);
677 	const struct iphdr *tnl_params;
678 
679 	if (!pskb_inet_may_pull(skb))
680 		goto free_skb;
681 
682 	if (tunnel->collect_md) {
683 		gre_fb_xmit(skb, dev, skb->protocol);
684 		return NETDEV_TX_OK;
685 	}
686 
687 	if (dev->header_ops) {
688 		/* Need space for new headers */
689 		if (skb_cow_head(skb, dev->needed_headroom -
690 				      (tunnel->hlen + sizeof(struct iphdr))))
691 			goto free_skb;
692 
693 		tnl_params = (const struct iphdr *)skb->data;
694 
695 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
696 		 * to gre header.
697 		 */
698 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
699 		skb_reset_mac_header(skb);
700 	} else {
701 		if (skb_cow_head(skb, dev->needed_headroom))
702 			goto free_skb;
703 
704 		tnl_params = &tunnel->parms.iph;
705 	}
706 
707 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
708 		goto free_skb;
709 
710 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
711 	return NETDEV_TX_OK;
712 
713 free_skb:
714 	kfree_skb(skb);
715 	dev->stats.tx_dropped++;
716 	return NETDEV_TX_OK;
717 }
718 
719 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
720 			       struct net_device *dev)
721 {
722 	struct ip_tunnel *tunnel = netdev_priv(dev);
723 	bool truncate = false;
724 
725 	if (!pskb_inet_may_pull(skb))
726 		goto free_skb;
727 
728 	if (tunnel->collect_md) {
729 		erspan_fb_xmit(skb, dev, skb->protocol);
730 		return NETDEV_TX_OK;
731 	}
732 
733 	if (gre_handle_offloads(skb, false))
734 		goto free_skb;
735 
736 	if (skb_cow_head(skb, dev->needed_headroom))
737 		goto free_skb;
738 
739 	if (skb->len > dev->mtu + dev->hard_header_len) {
740 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
741 		truncate = true;
742 	}
743 
744 	/* Push ERSPAN header */
745 	if (tunnel->erspan_ver == 1)
746 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
747 				    tunnel->index,
748 				    truncate, true);
749 	else if (tunnel->erspan_ver == 2)
750 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
751 				       tunnel->dir, tunnel->hwid,
752 				       truncate, true);
753 	else
754 		goto free_skb;
755 
756 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
757 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
758 	return NETDEV_TX_OK;
759 
760 free_skb:
761 	kfree_skb(skb);
762 	dev->stats.tx_dropped++;
763 	return NETDEV_TX_OK;
764 }
765 
766 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
767 				struct net_device *dev)
768 {
769 	struct ip_tunnel *tunnel = netdev_priv(dev);
770 
771 	if (!pskb_inet_may_pull(skb))
772 		goto free_skb;
773 
774 	if (tunnel->collect_md) {
775 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
776 		return NETDEV_TX_OK;
777 	}
778 
779 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
780 		goto free_skb;
781 
782 	if (skb_cow_head(skb, dev->needed_headroom))
783 		goto free_skb;
784 
785 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
786 	return NETDEV_TX_OK;
787 
788 free_skb:
789 	kfree_skb(skb);
790 	dev->stats.tx_dropped++;
791 	return NETDEV_TX_OK;
792 }
793 
794 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
795 {
796 	struct ip_tunnel *tunnel = netdev_priv(dev);
797 	int len;
798 
799 	len = tunnel->tun_hlen;
800 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
801 	len = tunnel->tun_hlen - len;
802 	tunnel->hlen = tunnel->hlen + len;
803 
804 	dev->needed_headroom = dev->needed_headroom + len;
805 	if (set_mtu)
806 		dev->mtu = max_t(int, dev->mtu - len, 68);
807 
808 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
809 		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
810 		    tunnel->encap.type == TUNNEL_ENCAP_NONE) {
811 			dev->features |= NETIF_F_GSO_SOFTWARE;
812 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
813 		} else {
814 			dev->features &= ~NETIF_F_GSO_SOFTWARE;
815 			dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
816 		}
817 		dev->features |= NETIF_F_LLTX;
818 	} else {
819 		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
820 		dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
821 	}
822 }
823 
824 static int ipgre_tunnel_ioctl(struct net_device *dev,
825 			      struct ifreq *ifr, int cmd)
826 {
827 	struct ip_tunnel_parm p;
828 	int err;
829 
830 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
831 		return -EFAULT;
832 
833 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
834 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
835 		    p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
836 		    ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
837 			return -EINVAL;
838 	}
839 
840 	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
841 	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
842 
843 	err = ip_tunnel_ioctl(dev, &p, cmd);
844 	if (err)
845 		return err;
846 
847 	if (cmd == SIOCCHGTUNNEL) {
848 		struct ip_tunnel *t = netdev_priv(dev);
849 
850 		t->parms.i_flags = p.i_flags;
851 		t->parms.o_flags = p.o_flags;
852 
853 		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
854 			ipgre_link_update(dev, true);
855 	}
856 
857 	p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
858 	p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
859 
860 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
861 		return -EFAULT;
862 
863 	return 0;
864 }
865 
866 /* Nice toy. Unfortunately, useless in real life :-)
867    It allows to construct virtual multiprotocol broadcast "LAN"
868    over the Internet, provided multicast routing is tuned.
869 
870 
871    I have no idea was this bicycle invented before me,
872    so that I had to set ARPHRD_IPGRE to a random value.
873    I have an impression, that Cisco could make something similar,
874    but this feature is apparently missing in IOS<=11.2(8).
875 
876    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
877    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
878 
879    ping -t 255 224.66.66.66
880 
881    If nobody answers, mbone does not work.
882 
883    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
884    ip addr add 10.66.66.<somewhat>/24 dev Universe
885    ifconfig Universe up
886    ifconfig Universe add fe80::<Your_real_addr>/10
887    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
888    ftp 10.66.66.66
889    ...
890    ftp fec0:6666:6666::193.233.7.65
891    ...
892  */
893 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
894 			unsigned short type,
895 			const void *daddr, const void *saddr, unsigned int len)
896 {
897 	struct ip_tunnel *t = netdev_priv(dev);
898 	struct iphdr *iph;
899 	struct gre_base_hdr *greh;
900 
901 	iph = skb_push(skb, t->hlen + sizeof(*iph));
902 	greh = (struct gre_base_hdr *)(iph+1);
903 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
904 	greh->protocol = htons(type);
905 
906 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
907 
908 	/* Set the source hardware address. */
909 	if (saddr)
910 		memcpy(&iph->saddr, saddr, 4);
911 	if (daddr)
912 		memcpy(&iph->daddr, daddr, 4);
913 	if (iph->daddr)
914 		return t->hlen + sizeof(*iph);
915 
916 	return -(t->hlen + sizeof(*iph));
917 }
918 
919 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
920 {
921 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
922 	memcpy(haddr, &iph->saddr, 4);
923 	return 4;
924 }
925 
926 static const struct header_ops ipgre_header_ops = {
927 	.create	= ipgre_header,
928 	.parse	= ipgre_header_parse,
929 };
930 
931 #ifdef CONFIG_NET_IPGRE_BROADCAST
932 static int ipgre_open(struct net_device *dev)
933 {
934 	struct ip_tunnel *t = netdev_priv(dev);
935 
936 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
937 		struct flowi4 fl4;
938 		struct rtable *rt;
939 
940 		rt = ip_route_output_gre(t->net, &fl4,
941 					 t->parms.iph.daddr,
942 					 t->parms.iph.saddr,
943 					 t->parms.o_key,
944 					 RT_TOS(t->parms.iph.tos),
945 					 t->parms.link);
946 		if (IS_ERR(rt))
947 			return -EADDRNOTAVAIL;
948 		dev = rt->dst.dev;
949 		ip_rt_put(rt);
950 		if (!__in_dev_get_rtnl(dev))
951 			return -EADDRNOTAVAIL;
952 		t->mlink = dev->ifindex;
953 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
954 	}
955 	return 0;
956 }
957 
958 static int ipgre_close(struct net_device *dev)
959 {
960 	struct ip_tunnel *t = netdev_priv(dev);
961 
962 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
963 		struct in_device *in_dev;
964 		in_dev = inetdev_by_index(t->net, t->mlink);
965 		if (in_dev)
966 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
967 	}
968 	return 0;
969 }
970 #endif
971 
972 static const struct net_device_ops ipgre_netdev_ops = {
973 	.ndo_init		= ipgre_tunnel_init,
974 	.ndo_uninit		= ip_tunnel_uninit,
975 #ifdef CONFIG_NET_IPGRE_BROADCAST
976 	.ndo_open		= ipgre_open,
977 	.ndo_stop		= ipgre_close,
978 #endif
979 	.ndo_start_xmit		= ipgre_xmit,
980 	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
981 	.ndo_change_mtu		= ip_tunnel_change_mtu,
982 	.ndo_get_stats64	= ip_tunnel_get_stats64,
983 	.ndo_get_iflink		= ip_tunnel_get_iflink,
984 };
985 
986 #define GRE_FEATURES (NETIF_F_SG |		\
987 		      NETIF_F_FRAGLIST |	\
988 		      NETIF_F_HIGHDMA |		\
989 		      NETIF_F_HW_CSUM)
990 
991 static void ipgre_tunnel_setup(struct net_device *dev)
992 {
993 	dev->netdev_ops		= &ipgre_netdev_ops;
994 	dev->type		= ARPHRD_IPGRE;
995 	ip_tunnel_setup(dev, ipgre_net_id);
996 }
997 
998 static void __gre_tunnel_init(struct net_device *dev)
999 {
1000 	struct ip_tunnel *tunnel;
1001 
1002 	tunnel = netdev_priv(dev);
1003 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1004 	tunnel->parms.iph.protocol = IPPROTO_GRE;
1005 
1006 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1007 
1008 	dev->features		|= GRE_FEATURES;
1009 	dev->hw_features	|= GRE_FEATURES;
1010 
1011 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
1012 		/* TCP offload with GRE SEQ is not supported, nor
1013 		 * can we support 2 levels of outer headers requiring
1014 		 * an update.
1015 		 */
1016 		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
1017 		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
1018 			dev->features    |= NETIF_F_GSO_SOFTWARE;
1019 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1020 		}
1021 
1022 		/* Can use a lockless transmit, unless we generate
1023 		 * output sequences
1024 		 */
1025 		dev->features |= NETIF_F_LLTX;
1026 	}
1027 }
1028 
1029 static int ipgre_tunnel_init(struct net_device *dev)
1030 {
1031 	struct ip_tunnel *tunnel = netdev_priv(dev);
1032 	struct iphdr *iph = &tunnel->parms.iph;
1033 
1034 	__gre_tunnel_init(dev);
1035 
1036 	memcpy(dev->dev_addr, &iph->saddr, 4);
1037 	memcpy(dev->broadcast, &iph->daddr, 4);
1038 
1039 	dev->flags		= IFF_NOARP;
1040 	netif_keep_dst(dev);
1041 	dev->addr_len		= 4;
1042 
1043 	if (iph->daddr && !tunnel->collect_md) {
1044 #ifdef CONFIG_NET_IPGRE_BROADCAST
1045 		if (ipv4_is_multicast(iph->daddr)) {
1046 			if (!iph->saddr)
1047 				return -EINVAL;
1048 			dev->flags = IFF_BROADCAST;
1049 			dev->header_ops = &ipgre_header_ops;
1050 		}
1051 #endif
1052 	} else if (!tunnel->collect_md) {
1053 		dev->header_ops = &ipgre_header_ops;
1054 	}
1055 
1056 	return ip_tunnel_init(dev);
1057 }
1058 
1059 static const struct gre_protocol ipgre_protocol = {
1060 	.handler     = gre_rcv,
1061 	.err_handler = gre_err,
1062 };
1063 
1064 static int __net_init ipgre_init_net(struct net *net)
1065 {
1066 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1067 }
1068 
1069 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1070 {
1071 	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1072 }
1073 
1074 static struct pernet_operations ipgre_net_ops = {
1075 	.init = ipgre_init_net,
1076 	.exit_batch = ipgre_exit_batch_net,
1077 	.id   = &ipgre_net_id,
1078 	.size = sizeof(struct ip_tunnel_net),
1079 };
1080 
1081 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1082 				 struct netlink_ext_ack *extack)
1083 {
1084 	__be16 flags;
1085 
1086 	if (!data)
1087 		return 0;
1088 
1089 	flags = 0;
1090 	if (data[IFLA_GRE_IFLAGS])
1091 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1092 	if (data[IFLA_GRE_OFLAGS])
1093 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1094 	if (flags & (GRE_VERSION|GRE_ROUTING))
1095 		return -EINVAL;
1096 
1097 	if (data[IFLA_GRE_COLLECT_METADATA] &&
1098 	    data[IFLA_GRE_ENCAP_TYPE] &&
1099 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1100 		return -EINVAL;
1101 
1102 	return 0;
1103 }
1104 
1105 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1106 			      struct netlink_ext_ack *extack)
1107 {
1108 	__be32 daddr;
1109 
1110 	if (tb[IFLA_ADDRESS]) {
1111 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1112 			return -EINVAL;
1113 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1114 			return -EADDRNOTAVAIL;
1115 	}
1116 
1117 	if (!data)
1118 		goto out;
1119 
1120 	if (data[IFLA_GRE_REMOTE]) {
1121 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1122 		if (!daddr)
1123 			return -EINVAL;
1124 	}
1125 
1126 out:
1127 	return ipgre_tunnel_validate(tb, data, extack);
1128 }
1129 
1130 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1131 			   struct netlink_ext_ack *extack)
1132 {
1133 	__be16 flags = 0;
1134 	int ret;
1135 
1136 	if (!data)
1137 		return 0;
1138 
1139 	ret = ipgre_tap_validate(tb, data, extack);
1140 	if (ret)
1141 		return ret;
1142 
1143 	/* ERSPAN should only have GRE sequence and key flag */
1144 	if (data[IFLA_GRE_OFLAGS])
1145 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1146 	if (data[IFLA_GRE_IFLAGS])
1147 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1148 	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1149 	    flags != (GRE_SEQ | GRE_KEY))
1150 		return -EINVAL;
1151 
1152 	/* ERSPAN Session ID only has 10-bit. Since we reuse
1153 	 * 32-bit key field as ID, check it's range.
1154 	 */
1155 	if (data[IFLA_GRE_IKEY] &&
1156 	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1157 		return -EINVAL;
1158 
1159 	if (data[IFLA_GRE_OKEY] &&
1160 	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1161 		return -EINVAL;
1162 
1163 	return 0;
1164 }
1165 
1166 static int ipgre_netlink_parms(struct net_device *dev,
1167 				struct nlattr *data[],
1168 				struct nlattr *tb[],
1169 				struct ip_tunnel_parm *parms,
1170 				__u32 *fwmark)
1171 {
1172 	struct ip_tunnel *t = netdev_priv(dev);
1173 
1174 	memset(parms, 0, sizeof(*parms));
1175 
1176 	parms->iph.protocol = IPPROTO_GRE;
1177 
1178 	if (!data)
1179 		return 0;
1180 
1181 	if (data[IFLA_GRE_LINK])
1182 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1183 
1184 	if (data[IFLA_GRE_IFLAGS])
1185 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1186 
1187 	if (data[IFLA_GRE_OFLAGS])
1188 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1189 
1190 	if (data[IFLA_GRE_IKEY])
1191 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1192 
1193 	if (data[IFLA_GRE_OKEY])
1194 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1195 
1196 	if (data[IFLA_GRE_LOCAL])
1197 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1198 
1199 	if (data[IFLA_GRE_REMOTE])
1200 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1201 
1202 	if (data[IFLA_GRE_TTL])
1203 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1204 
1205 	if (data[IFLA_GRE_TOS])
1206 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1207 
1208 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1209 		if (t->ignore_df)
1210 			return -EINVAL;
1211 		parms->iph.frag_off = htons(IP_DF);
1212 	}
1213 
1214 	if (data[IFLA_GRE_COLLECT_METADATA]) {
1215 		t->collect_md = true;
1216 		if (dev->type == ARPHRD_IPGRE)
1217 			dev->type = ARPHRD_NONE;
1218 	}
1219 
1220 	if (data[IFLA_GRE_IGNORE_DF]) {
1221 		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1222 		  && (parms->iph.frag_off & htons(IP_DF)))
1223 			return -EINVAL;
1224 		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1225 	}
1226 
1227 	if (data[IFLA_GRE_FWMARK])
1228 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1229 
1230 	if (data[IFLA_GRE_ERSPAN_VER]) {
1231 		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1232 
1233 		if (t->erspan_ver != 1 && t->erspan_ver != 2)
1234 			return -EINVAL;
1235 	}
1236 
1237 	if (t->erspan_ver == 1) {
1238 		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1239 			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1240 			if (t->index & ~INDEX_MASK)
1241 				return -EINVAL;
1242 		}
1243 	} else if (t->erspan_ver == 2) {
1244 		if (data[IFLA_GRE_ERSPAN_DIR]) {
1245 			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1246 			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1247 				return -EINVAL;
1248 		}
1249 		if (data[IFLA_GRE_ERSPAN_HWID]) {
1250 			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1251 			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1252 				return -EINVAL;
1253 		}
1254 	}
1255 
1256 	return 0;
1257 }
1258 
1259 /* This function returns true when ENCAP attributes are present in the nl msg */
1260 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1261 				      struct ip_tunnel_encap *ipencap)
1262 {
1263 	bool ret = false;
1264 
1265 	memset(ipencap, 0, sizeof(*ipencap));
1266 
1267 	if (!data)
1268 		return ret;
1269 
1270 	if (data[IFLA_GRE_ENCAP_TYPE]) {
1271 		ret = true;
1272 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1273 	}
1274 
1275 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1276 		ret = true;
1277 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1278 	}
1279 
1280 	if (data[IFLA_GRE_ENCAP_SPORT]) {
1281 		ret = true;
1282 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1283 	}
1284 
1285 	if (data[IFLA_GRE_ENCAP_DPORT]) {
1286 		ret = true;
1287 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1288 	}
1289 
1290 	return ret;
1291 }
1292 
1293 static int gre_tap_init(struct net_device *dev)
1294 {
1295 	__gre_tunnel_init(dev);
1296 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1297 	netif_keep_dst(dev);
1298 
1299 	return ip_tunnel_init(dev);
1300 }
1301 
1302 static const struct net_device_ops gre_tap_netdev_ops = {
1303 	.ndo_init		= gre_tap_init,
1304 	.ndo_uninit		= ip_tunnel_uninit,
1305 	.ndo_start_xmit		= gre_tap_xmit,
1306 	.ndo_set_mac_address 	= eth_mac_addr,
1307 	.ndo_validate_addr	= eth_validate_addr,
1308 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1309 	.ndo_get_stats64	= ip_tunnel_get_stats64,
1310 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1311 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1312 };
1313 
1314 static int erspan_tunnel_init(struct net_device *dev)
1315 {
1316 	struct ip_tunnel *tunnel = netdev_priv(dev);
1317 
1318 	tunnel->tun_hlen = 8;
1319 	tunnel->parms.iph.protocol = IPPROTO_GRE;
1320 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1321 		       erspan_hdr_len(tunnel->erspan_ver);
1322 
1323 	dev->features		|= GRE_FEATURES;
1324 	dev->hw_features	|= GRE_FEATURES;
1325 	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1326 	netif_keep_dst(dev);
1327 
1328 	return ip_tunnel_init(dev);
1329 }
1330 
1331 static const struct net_device_ops erspan_netdev_ops = {
1332 	.ndo_init		= erspan_tunnel_init,
1333 	.ndo_uninit		= ip_tunnel_uninit,
1334 	.ndo_start_xmit		= erspan_xmit,
1335 	.ndo_set_mac_address	= eth_mac_addr,
1336 	.ndo_validate_addr	= eth_validate_addr,
1337 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1338 	.ndo_get_stats64	= ip_tunnel_get_stats64,
1339 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1340 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1341 };
1342 
1343 static void ipgre_tap_setup(struct net_device *dev)
1344 {
1345 	ether_setup(dev);
1346 	dev->max_mtu = 0;
1347 	dev->netdev_ops	= &gre_tap_netdev_ops;
1348 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1349 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1350 	ip_tunnel_setup(dev, gre_tap_net_id);
1351 }
1352 
1353 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1354 			 struct nlattr *tb[], struct nlattr *data[],
1355 			 struct netlink_ext_ack *extack)
1356 {
1357 	struct ip_tunnel_parm p;
1358 	struct ip_tunnel_encap ipencap;
1359 	__u32 fwmark = 0;
1360 	int err;
1361 
1362 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1363 		struct ip_tunnel *t = netdev_priv(dev);
1364 		err = ip_tunnel_encap_setup(t, &ipencap);
1365 
1366 		if (err < 0)
1367 			return err;
1368 	}
1369 
1370 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1371 	if (err < 0)
1372 		return err;
1373 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1374 }
1375 
1376 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1377 			    struct nlattr *data[],
1378 			    struct netlink_ext_ack *extack)
1379 {
1380 	struct ip_tunnel *t = netdev_priv(dev);
1381 	struct ip_tunnel_encap ipencap;
1382 	__u32 fwmark = t->fwmark;
1383 	struct ip_tunnel_parm p;
1384 	int err;
1385 
1386 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1387 		err = ip_tunnel_encap_setup(t, &ipencap);
1388 
1389 		if (err < 0)
1390 			return err;
1391 	}
1392 
1393 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1394 	if (err < 0)
1395 		return err;
1396 
1397 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1398 	if (err < 0)
1399 		return err;
1400 
1401 	t->parms.i_flags = p.i_flags;
1402 	t->parms.o_flags = p.o_flags;
1403 
1404 	if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1405 		ipgre_link_update(dev, !tb[IFLA_MTU]);
1406 
1407 	return 0;
1408 }
1409 
1410 static size_t ipgre_get_size(const struct net_device *dev)
1411 {
1412 	return
1413 		/* IFLA_GRE_LINK */
1414 		nla_total_size(4) +
1415 		/* IFLA_GRE_IFLAGS */
1416 		nla_total_size(2) +
1417 		/* IFLA_GRE_OFLAGS */
1418 		nla_total_size(2) +
1419 		/* IFLA_GRE_IKEY */
1420 		nla_total_size(4) +
1421 		/* IFLA_GRE_OKEY */
1422 		nla_total_size(4) +
1423 		/* IFLA_GRE_LOCAL */
1424 		nla_total_size(4) +
1425 		/* IFLA_GRE_REMOTE */
1426 		nla_total_size(4) +
1427 		/* IFLA_GRE_TTL */
1428 		nla_total_size(1) +
1429 		/* IFLA_GRE_TOS */
1430 		nla_total_size(1) +
1431 		/* IFLA_GRE_PMTUDISC */
1432 		nla_total_size(1) +
1433 		/* IFLA_GRE_ENCAP_TYPE */
1434 		nla_total_size(2) +
1435 		/* IFLA_GRE_ENCAP_FLAGS */
1436 		nla_total_size(2) +
1437 		/* IFLA_GRE_ENCAP_SPORT */
1438 		nla_total_size(2) +
1439 		/* IFLA_GRE_ENCAP_DPORT */
1440 		nla_total_size(2) +
1441 		/* IFLA_GRE_COLLECT_METADATA */
1442 		nla_total_size(0) +
1443 		/* IFLA_GRE_IGNORE_DF */
1444 		nla_total_size(1) +
1445 		/* IFLA_GRE_FWMARK */
1446 		nla_total_size(4) +
1447 		/* IFLA_GRE_ERSPAN_INDEX */
1448 		nla_total_size(4) +
1449 		/* IFLA_GRE_ERSPAN_VER */
1450 		nla_total_size(1) +
1451 		/* IFLA_GRE_ERSPAN_DIR */
1452 		nla_total_size(1) +
1453 		/* IFLA_GRE_ERSPAN_HWID */
1454 		nla_total_size(2) +
1455 		0;
1456 }
1457 
1458 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1459 {
1460 	struct ip_tunnel *t = netdev_priv(dev);
1461 	struct ip_tunnel_parm *p = &t->parms;
1462 
1463 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1464 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1465 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1466 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1467 			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1468 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1469 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1470 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1471 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1472 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1473 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1474 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1475 		       !!(p->iph.frag_off & htons(IP_DF))) ||
1476 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1477 		goto nla_put_failure;
1478 
1479 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1480 			t->encap.type) ||
1481 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1482 			 t->encap.sport) ||
1483 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1484 			 t->encap.dport) ||
1485 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1486 			t->encap.flags))
1487 		goto nla_put_failure;
1488 
1489 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1490 		goto nla_put_failure;
1491 
1492 	if (t->collect_md) {
1493 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1494 			goto nla_put_failure;
1495 	}
1496 
1497 	if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1498 		goto nla_put_failure;
1499 
1500 	if (t->erspan_ver == 1) {
1501 		if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1502 			goto nla_put_failure;
1503 	} else if (t->erspan_ver == 2) {
1504 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1505 			goto nla_put_failure;
1506 		if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1507 			goto nla_put_failure;
1508 	}
1509 
1510 	return 0;
1511 
1512 nla_put_failure:
1513 	return -EMSGSIZE;
1514 }
1515 
1516 static void erspan_setup(struct net_device *dev)
1517 {
1518 	struct ip_tunnel *t = netdev_priv(dev);
1519 
1520 	ether_setup(dev);
1521 	dev->netdev_ops = &erspan_netdev_ops;
1522 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1523 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1524 	ip_tunnel_setup(dev, erspan_net_id);
1525 	t->erspan_ver = 1;
1526 }
1527 
1528 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1529 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1530 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1531 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1532 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1533 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1534 	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1535 	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1536 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1537 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1538 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1539 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1540 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1541 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1542 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1543 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1544 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1545 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1546 	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1547 	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1548 	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1549 	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1550 };
1551 
1552 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1553 	.kind		= "gre",
1554 	.maxtype	= IFLA_GRE_MAX,
1555 	.policy		= ipgre_policy,
1556 	.priv_size	= sizeof(struct ip_tunnel),
1557 	.setup		= ipgre_tunnel_setup,
1558 	.validate	= ipgre_tunnel_validate,
1559 	.newlink	= ipgre_newlink,
1560 	.changelink	= ipgre_changelink,
1561 	.dellink	= ip_tunnel_dellink,
1562 	.get_size	= ipgre_get_size,
1563 	.fill_info	= ipgre_fill_info,
1564 	.get_link_net	= ip_tunnel_get_link_net,
1565 };
1566 
1567 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1568 	.kind		= "gretap",
1569 	.maxtype	= IFLA_GRE_MAX,
1570 	.policy		= ipgre_policy,
1571 	.priv_size	= sizeof(struct ip_tunnel),
1572 	.setup		= ipgre_tap_setup,
1573 	.validate	= ipgre_tap_validate,
1574 	.newlink	= ipgre_newlink,
1575 	.changelink	= ipgre_changelink,
1576 	.dellink	= ip_tunnel_dellink,
1577 	.get_size	= ipgre_get_size,
1578 	.fill_info	= ipgre_fill_info,
1579 	.get_link_net	= ip_tunnel_get_link_net,
1580 };
1581 
1582 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1583 	.kind		= "erspan",
1584 	.maxtype	= IFLA_GRE_MAX,
1585 	.policy		= ipgre_policy,
1586 	.priv_size	= sizeof(struct ip_tunnel),
1587 	.setup		= erspan_setup,
1588 	.validate	= erspan_validate,
1589 	.newlink	= ipgre_newlink,
1590 	.changelink	= ipgre_changelink,
1591 	.dellink	= ip_tunnel_dellink,
1592 	.get_size	= ipgre_get_size,
1593 	.fill_info	= ipgre_fill_info,
1594 	.get_link_net	= ip_tunnel_get_link_net,
1595 };
1596 
1597 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1598 					u8 name_assign_type)
1599 {
1600 	struct nlattr *tb[IFLA_MAX + 1];
1601 	struct net_device *dev;
1602 	LIST_HEAD(list_kill);
1603 	struct ip_tunnel *t;
1604 	int err;
1605 
1606 	memset(&tb, 0, sizeof(tb));
1607 
1608 	dev = rtnl_create_link(net, name, name_assign_type,
1609 			       &ipgre_tap_ops, tb, NULL);
1610 	if (IS_ERR(dev))
1611 		return dev;
1612 
1613 	/* Configure flow based GRE device. */
1614 	t = netdev_priv(dev);
1615 	t->collect_md = true;
1616 
1617 	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1618 	if (err < 0) {
1619 		free_netdev(dev);
1620 		return ERR_PTR(err);
1621 	}
1622 
1623 	/* openvswitch users expect packet sizes to be unrestricted,
1624 	 * so set the largest MTU we can.
1625 	 */
1626 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1627 	if (err)
1628 		goto out;
1629 
1630 	err = rtnl_configure_link(dev, NULL);
1631 	if (err < 0)
1632 		goto out;
1633 
1634 	return dev;
1635 out:
1636 	ip_tunnel_dellink(dev, &list_kill);
1637 	unregister_netdevice_many(&list_kill);
1638 	return ERR_PTR(err);
1639 }
1640 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1641 
1642 static int __net_init ipgre_tap_init_net(struct net *net)
1643 {
1644 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1645 }
1646 
1647 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1648 {
1649 	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1650 }
1651 
1652 static struct pernet_operations ipgre_tap_net_ops = {
1653 	.init = ipgre_tap_init_net,
1654 	.exit_batch = ipgre_tap_exit_batch_net,
1655 	.id   = &gre_tap_net_id,
1656 	.size = sizeof(struct ip_tunnel_net),
1657 };
1658 
1659 static int __net_init erspan_init_net(struct net *net)
1660 {
1661 	return ip_tunnel_init_net(net, erspan_net_id,
1662 				  &erspan_link_ops, "erspan0");
1663 }
1664 
1665 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1666 {
1667 	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1668 }
1669 
1670 static struct pernet_operations erspan_net_ops = {
1671 	.init = erspan_init_net,
1672 	.exit_batch = erspan_exit_batch_net,
1673 	.id   = &erspan_net_id,
1674 	.size = sizeof(struct ip_tunnel_net),
1675 };
1676 
1677 static int __init ipgre_init(void)
1678 {
1679 	int err;
1680 
1681 	pr_info("GRE over IPv4 tunneling driver\n");
1682 
1683 	err = register_pernet_device(&ipgre_net_ops);
1684 	if (err < 0)
1685 		return err;
1686 
1687 	err = register_pernet_device(&ipgre_tap_net_ops);
1688 	if (err < 0)
1689 		goto pnet_tap_failed;
1690 
1691 	err = register_pernet_device(&erspan_net_ops);
1692 	if (err < 0)
1693 		goto pnet_erspan_failed;
1694 
1695 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1696 	if (err < 0) {
1697 		pr_info("%s: can't add protocol\n", __func__);
1698 		goto add_proto_failed;
1699 	}
1700 
1701 	err = rtnl_link_register(&ipgre_link_ops);
1702 	if (err < 0)
1703 		goto rtnl_link_failed;
1704 
1705 	err = rtnl_link_register(&ipgre_tap_ops);
1706 	if (err < 0)
1707 		goto tap_ops_failed;
1708 
1709 	err = rtnl_link_register(&erspan_link_ops);
1710 	if (err < 0)
1711 		goto erspan_link_failed;
1712 
1713 	return 0;
1714 
1715 erspan_link_failed:
1716 	rtnl_link_unregister(&ipgre_tap_ops);
1717 tap_ops_failed:
1718 	rtnl_link_unregister(&ipgre_link_ops);
1719 rtnl_link_failed:
1720 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1721 add_proto_failed:
1722 	unregister_pernet_device(&erspan_net_ops);
1723 pnet_erspan_failed:
1724 	unregister_pernet_device(&ipgre_tap_net_ops);
1725 pnet_tap_failed:
1726 	unregister_pernet_device(&ipgre_net_ops);
1727 	return err;
1728 }
1729 
1730 static void __exit ipgre_fini(void)
1731 {
1732 	rtnl_link_unregister(&ipgre_tap_ops);
1733 	rtnl_link_unregister(&ipgre_link_ops);
1734 	rtnl_link_unregister(&erspan_link_ops);
1735 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1736 	unregister_pernet_device(&ipgre_tap_net_ops);
1737 	unregister_pernet_device(&ipgre_net_ops);
1738 	unregister_pernet_device(&erspan_net_ops);
1739 }
1740 
1741 module_init(ipgre_init);
1742 module_exit(ipgre_fini);
1743 MODULE_LICENSE("GPL");
1744 MODULE_ALIAS_RTNL_LINK("gre");
1745 MODULE_ALIAS_RTNL_LINK("gretap");
1746 MODULE_ALIAS_RTNL_LINK("erspan");
1747 MODULE_ALIAS_NETDEV("gre0");
1748 MODULE_ALIAS_NETDEV("gretap0");
1749 MODULE_ALIAS_NETDEV("erspan0");
1750