xref: /openbmc/linux/net/ipv4/ip_gre.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /*
2  *	Linux NET3:	GRE over IP protocol decoder.
3  *
4  *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35 
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
52 
53 /*
54    Problems & solutions
55    --------------------
56 
57    1. The most important issue is detecting local dead loops.
58    They would cause complete host lockup in transmit, which
59    would be "resolved" by stack overflow or, if queueing is enabled,
60    with infinite looping in net_bh.
61 
62    We cannot track such dead loops during route installation,
63    it is infeasible task. The most general solutions would be
64    to keep skb->encapsulation counter (sort of local ttl),
65    and silently drop packet when it expires. It is a good
66    solution, but it supposes maintaining new variable in ALL
67    skb, even if no tunneling is used.
68 
69    Current solution: xmit_recursion breaks dead loops. This is a percpu
70    counter, since when we enter the first ndo_xmit(), cpu migration is
71    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
72 
73    2. Networking dead loops would not kill routers, but would really
74    kill network. IP hop limit plays role of "t->recursion" in this case,
75    if we copy it from packet being encapsulated to upper header.
76    It is very good solution, but it introduces two problems:
77 
78    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79      do not work over tunnels.
80    - traceroute does not work. I planned to relay ICMP from tunnel,
81      so that this problem would be solved and traceroute output
82      would even more informative. This idea appeared to be wrong:
83      only Linux complies to rfc1812 now (yes, guys, Linux is the only
84      true router now :-)), all routers (at least, in neighbourhood of mine)
85      return only 8 bytes of payload. It is the end.
86 
87    Hence, if we want that OSPF worked or traceroute said something reasonable,
88    we should search for another solution.
89 
90    One of them is to parse packet trying to detect inner encapsulation
91    made by our node. It is difficult or even impossible, especially,
92    taking into account fragmentation. TO be short, ttl is not solution at all.
93 
94    Current solution: The solution was UNEXPECTEDLY SIMPLE.
95    We force DF flag on tunnels with preconfigured hop limit,
96    that is ALL. :-) Well, it does not remove the problem completely,
97    but exponential growth of network traffic is changed to linear
98    (branches, that exceed pmtu are pruned) and tunnel mtu
99    rapidly degrades to value <68, where looping stops.
100    Yes, it is not good if there exists a router in the loop,
101    which does not force DF, even when encapsulating packets have DF set.
102    But it is not our problem! Nobody could accuse us, we made
103    all that we could make. Even if it is your gated who injected
104    fatal route to network, even if it were you who configured
105    fatal static route: you are innocent. :-)
106 
107    Alexey Kuznetsov.
108  */
109 
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
113 
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
117 				u32 id, u32 index,
118 				bool truncate, bool is_ipv4);
119 
120 static unsigned int ipgre_net_id __read_mostly;
121 static unsigned int gre_tap_net_id __read_mostly;
122 static unsigned int erspan_net_id __read_mostly;
123 
124 static void ipgre_err(struct sk_buff *skb, u32 info,
125 		      const struct tnl_ptk_info *tpi)
126 {
127 
128 	/* All the routers (except for Linux) return only
129 	   8 bytes of packet payload. It means, that precise relaying of
130 	   ICMP in the real Internet is absolutely infeasible.
131 
132 	   Moreover, Cisco "wise men" put GRE key to the third word
133 	   in GRE header. It makes impossible maintaining even soft
134 	   state for keyed GRE tunnels with enabled checksum. Tell
135 	   them "thank you".
136 
137 	   Well, I wonder, rfc1812 was written by Cisco employee,
138 	   what the hell these idiots break standards established
139 	   by themselves???
140 	   */
141 	struct net *net = dev_net(skb->dev);
142 	struct ip_tunnel_net *itn;
143 	const struct iphdr *iph;
144 	const int type = icmp_hdr(skb)->type;
145 	const int code = icmp_hdr(skb)->code;
146 	unsigned int data_len = 0;
147 	struct ip_tunnel *t;
148 
149 	switch (type) {
150 	default:
151 	case ICMP_PARAMETERPROB:
152 		return;
153 
154 	case ICMP_DEST_UNREACH:
155 		switch (code) {
156 		case ICMP_SR_FAILED:
157 		case ICMP_PORT_UNREACH:
158 			/* Impossible event. */
159 			return;
160 		default:
161 			/* All others are translated to HOST_UNREACH.
162 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
163 			   I believe they are just ether pollution. --ANK
164 			 */
165 			break;
166 		}
167 		break;
168 
169 	case ICMP_TIME_EXCEEDED:
170 		if (code != ICMP_EXC_TTL)
171 			return;
172 		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
173 		break;
174 
175 	case ICMP_REDIRECT:
176 		break;
177 	}
178 
179 	if (tpi->proto == htons(ETH_P_TEB))
180 		itn = net_generic(net, gre_tap_net_id);
181 	else
182 		itn = net_generic(net, ipgre_net_id);
183 
184 	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
185 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
186 			     iph->daddr, iph->saddr, tpi->key);
187 
188 	if (!t)
189 		return;
190 
191 #if IS_ENABLED(CONFIG_IPV6)
192        if (tpi->proto == htons(ETH_P_IPV6) &&
193            !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
194 				       type, data_len))
195                return;
196 #endif
197 
198 	if (t->parms.iph.daddr == 0 ||
199 	    ipv4_is_multicast(t->parms.iph.daddr))
200 		return;
201 
202 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
203 		return;
204 
205 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
206 		t->err_count++;
207 	else
208 		t->err_count = 1;
209 	t->err_time = jiffies;
210 }
211 
212 static void gre_err(struct sk_buff *skb, u32 info)
213 {
214 	/* All the routers (except for Linux) return only
215 	 * 8 bytes of packet payload. It means, that precise relaying of
216 	 * ICMP in the real Internet is absolutely infeasible.
217 	 *
218 	 * Moreover, Cisco "wise men" put GRE key to the third word
219 	 * in GRE header. It makes impossible maintaining even soft
220 	 * state for keyed
221 	 * GRE tunnels with enabled checksum. Tell them "thank you".
222 	 *
223 	 * Well, I wonder, rfc1812 was written by Cisco employee,
224 	 * what the hell these idiots break standards established
225 	 * by themselves???
226 	 */
227 
228 	const struct iphdr *iph = (struct iphdr *)skb->data;
229 	const int type = icmp_hdr(skb)->type;
230 	const int code = icmp_hdr(skb)->code;
231 	struct tnl_ptk_info tpi;
232 	bool csum_err = false;
233 
234 	if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
235 			     iph->ihl * 4) < 0) {
236 		if (!csum_err)		/* ignore csum errors. */
237 			return;
238 	}
239 
240 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
241 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
242 				 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
243 		return;
244 	}
245 	if (type == ICMP_REDIRECT) {
246 		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
247 			      IPPROTO_GRE, 0);
248 		return;
249 	}
250 
251 	ipgre_err(skb, info, &tpi);
252 }
253 
254 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
255 		      int gre_hdr_len)
256 {
257 	struct net *net = dev_net(skb->dev);
258 	struct metadata_dst *tun_dst = NULL;
259 	struct erspan_base_hdr *ershdr;
260 	struct erspan_metadata *pkt_md;
261 	struct ip_tunnel_net *itn;
262 	struct ip_tunnel *tunnel;
263 	const struct iphdr *iph;
264 	struct erspan_md2 *md2;
265 	int ver;
266 	int len;
267 
268 	itn = net_generic(net, erspan_net_id);
269 	len = gre_hdr_len + sizeof(*ershdr);
270 
271 	/* Check based hdr len */
272 	if (unlikely(!pskb_may_pull(skb, len)))
273 		return PACKET_REJECT;
274 
275 	iph = ip_hdr(skb);
276 	ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
277 	ver = ershdr->ver;
278 
279 	/* The original GRE header does not have key field,
280 	 * Use ERSPAN 10-bit session ID as key.
281 	 */
282 	tpi->key = cpu_to_be32(get_session_id(ershdr));
283 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
284 				  tpi->flags | TUNNEL_KEY,
285 				  iph->saddr, iph->daddr, tpi->key);
286 
287 	if (tunnel) {
288 		len = gre_hdr_len + erspan_hdr_len(ver);
289 		if (unlikely(!pskb_may_pull(skb, len)))
290 			return PACKET_REJECT;
291 
292 		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
293 		pkt_md = (struct erspan_metadata *)(ershdr + 1);
294 
295 		if (__iptunnel_pull_header(skb,
296 					   len,
297 					   htons(ETH_P_TEB),
298 					   false, false) < 0)
299 			goto drop;
300 
301 		if (tunnel->collect_md) {
302 			struct ip_tunnel_info *info;
303 			struct erspan_metadata *md;
304 			__be64 tun_id;
305 			__be16 flags;
306 
307 			tpi->flags |= TUNNEL_KEY;
308 			flags = tpi->flags;
309 			tun_id = key32_to_tunnel_id(tpi->key);
310 
311 			tun_dst = ip_tun_rx_dst(skb, flags,
312 						tun_id, sizeof(*md));
313 			if (!tun_dst)
314 				return PACKET_REJECT;
315 
316 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
317 			md->version = ver;
318 			md2 = &md->u.md2;
319 			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
320 						       ERSPAN_V2_MDSIZE);
321 
322 			info = &tun_dst->u.tun_info;
323 			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
324 			info->options_len = sizeof(*md);
325 		}
326 
327 		skb_reset_mac_header(skb);
328 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
329 		return PACKET_RCVD;
330 	}
331 drop:
332 	kfree_skb(skb);
333 	return PACKET_RCVD;
334 }
335 
336 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
337 		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
338 {
339 	struct metadata_dst *tun_dst = NULL;
340 	const struct iphdr *iph;
341 	struct ip_tunnel *tunnel;
342 
343 	iph = ip_hdr(skb);
344 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
345 				  iph->saddr, iph->daddr, tpi->key);
346 
347 	if (tunnel) {
348 		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
349 					   raw_proto, false) < 0)
350 			goto drop;
351 
352 		if (tunnel->dev->type != ARPHRD_NONE)
353 			skb_pop_mac_header(skb);
354 		else
355 			skb_reset_mac_header(skb);
356 		if (tunnel->collect_md) {
357 			__be16 flags;
358 			__be64 tun_id;
359 
360 			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
361 			tun_id = key32_to_tunnel_id(tpi->key);
362 			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
363 			if (!tun_dst)
364 				return PACKET_REJECT;
365 		}
366 
367 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
368 		return PACKET_RCVD;
369 	}
370 	return PACKET_NEXT;
371 
372 drop:
373 	kfree_skb(skb);
374 	return PACKET_RCVD;
375 }
376 
377 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
378 		     int hdr_len)
379 {
380 	struct net *net = dev_net(skb->dev);
381 	struct ip_tunnel_net *itn;
382 	int res;
383 
384 	if (tpi->proto == htons(ETH_P_TEB))
385 		itn = net_generic(net, gre_tap_net_id);
386 	else
387 		itn = net_generic(net, ipgre_net_id);
388 
389 	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
390 	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
391 		/* ipgre tunnels in collect metadata mode should receive
392 		 * also ETH_P_TEB traffic.
393 		 */
394 		itn = net_generic(net, ipgre_net_id);
395 		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
396 	}
397 	return res;
398 }
399 
400 static int gre_rcv(struct sk_buff *skb)
401 {
402 	struct tnl_ptk_info tpi;
403 	bool csum_err = false;
404 	int hdr_len;
405 
406 #ifdef CONFIG_NET_IPGRE_BROADCAST
407 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
408 		/* Looped back packet, drop it! */
409 		if (rt_is_output_route(skb_rtable(skb)))
410 			goto drop;
411 	}
412 #endif
413 
414 	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
415 	if (hdr_len < 0)
416 		goto drop;
417 
418 	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
419 		     tpi.proto == htons(ETH_P_ERSPAN2))) {
420 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
421 			return 0;
422 		goto out;
423 	}
424 
425 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
426 		return 0;
427 
428 out:
429 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
430 drop:
431 	kfree_skb(skb);
432 	return 0;
433 }
434 
435 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
436 		       const struct iphdr *tnl_params,
437 		       __be16 proto)
438 {
439 	struct ip_tunnel *tunnel = netdev_priv(dev);
440 
441 	if (tunnel->parms.o_flags & TUNNEL_SEQ)
442 		tunnel->o_seqno++;
443 
444 	/* Push GRE header. */
445 	gre_build_header(skb, tunnel->tun_hlen,
446 			 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
447 			 htonl(tunnel->o_seqno));
448 
449 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
450 }
451 
452 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
453 {
454 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
455 }
456 
457 static struct rtable *gre_get_rt(struct sk_buff *skb,
458 				 struct net_device *dev,
459 				 struct flowi4 *fl,
460 				 const struct ip_tunnel_key *key)
461 {
462 	struct net *net = dev_net(dev);
463 
464 	memset(fl, 0, sizeof(*fl));
465 	fl->daddr = key->u.ipv4.dst;
466 	fl->saddr = key->u.ipv4.src;
467 	fl->flowi4_tos = RT_TOS(key->tos);
468 	fl->flowi4_mark = skb->mark;
469 	fl->flowi4_proto = IPPROTO_GRE;
470 
471 	return ip_route_output_key(net, fl);
472 }
473 
474 static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
475 				      struct net_device *dev,
476 				      struct flowi4 *fl,
477 				      int tunnel_hlen)
478 {
479 	struct ip_tunnel_info *tun_info;
480 	const struct ip_tunnel_key *key;
481 	struct rtable *rt = NULL;
482 	int min_headroom;
483 	bool use_cache;
484 	int err;
485 
486 	tun_info = skb_tunnel_info(skb);
487 	key = &tun_info->key;
488 	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
489 
490 	if (use_cache)
491 		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
492 	if (!rt) {
493 		rt = gre_get_rt(skb, dev, fl, key);
494 		if (IS_ERR(rt))
495 			goto err_free_skb;
496 		if (use_cache)
497 			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
498 					  fl->saddr);
499 	}
500 
501 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
502 			+ tunnel_hlen + sizeof(struct iphdr);
503 	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
504 		int head_delta = SKB_DATA_ALIGN(min_headroom -
505 						skb_headroom(skb) +
506 						16);
507 		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
508 				       0, GFP_ATOMIC);
509 		if (unlikely(err))
510 			goto err_free_rt;
511 	}
512 	return rt;
513 
514 err_free_rt:
515 	ip_rt_put(rt);
516 err_free_skb:
517 	kfree_skb(skb);
518 	dev->stats.tx_dropped++;
519 	return NULL;
520 }
521 
522 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
523 			__be16 proto)
524 {
525 	struct ip_tunnel *tunnel = netdev_priv(dev);
526 	struct ip_tunnel_info *tun_info;
527 	const struct ip_tunnel_key *key;
528 	struct rtable *rt = NULL;
529 	struct flowi4 fl;
530 	int tunnel_hlen;
531 	__be16 df, flags;
532 
533 	tun_info = skb_tunnel_info(skb);
534 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
535 		     ip_tunnel_info_af(tun_info) != AF_INET))
536 		goto err_free_skb;
537 
538 	key = &tun_info->key;
539 	tunnel_hlen = gre_calc_hlen(key->tun_flags);
540 
541 	rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
542 	if (!rt)
543 		return;
544 
545 	/* Push Tunnel header. */
546 	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
547 		goto err_free_rt;
548 
549 	flags = tun_info->key.tun_flags &
550 		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
551 	gre_build_header(skb, tunnel_hlen, flags, proto,
552 			 tunnel_id_to_key32(tun_info->key.tun_id),
553 			 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
554 
555 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
556 
557 	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
558 		      key->tos, key->ttl, df, false);
559 	return;
560 
561 err_free_rt:
562 	ip_rt_put(rt);
563 err_free_skb:
564 	kfree_skb(skb);
565 	dev->stats.tx_dropped++;
566 }
567 
568 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
569 			   __be16 proto)
570 {
571 	struct ip_tunnel *tunnel = netdev_priv(dev);
572 	struct ip_tunnel_info *tun_info;
573 	const struct ip_tunnel_key *key;
574 	struct erspan_metadata *md;
575 	struct rtable *rt = NULL;
576 	bool truncate = false;
577 	struct flowi4 fl;
578 	int tunnel_hlen;
579 	int version;
580 	__be16 df;
581 
582 	tun_info = skb_tunnel_info(skb);
583 	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
584 		     ip_tunnel_info_af(tun_info) != AF_INET))
585 		goto err_free_skb;
586 
587 	key = &tun_info->key;
588 	md = ip_tunnel_info_opts(tun_info);
589 	if (!md)
590 		goto err_free_rt;
591 
592 	/* ERSPAN has fixed 8 byte GRE header */
593 	version = md->version;
594 	tunnel_hlen = 8 + erspan_hdr_len(version);
595 
596 	rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
597 	if (!rt)
598 		return;
599 
600 	if (gre_handle_offloads(skb, false))
601 		goto err_free_rt;
602 
603 	if (skb->len > dev->mtu + dev->hard_header_len) {
604 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
605 		truncate = true;
606 	}
607 
608 	if (version == 1) {
609 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
610 				    ntohl(md->u.index), truncate, true);
611 	} else if (version == 2) {
612 		erspan_build_header_v2(skb,
613 				       ntohl(tunnel_id_to_key32(key->tun_id)),
614 				       md->u.md2.dir,
615 				       get_hwid(&md->u.md2),
616 				       truncate, true);
617 	} else {
618 		goto err_free_rt;
619 	}
620 
621 	gre_build_header(skb, 8, TUNNEL_SEQ,
622 			 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
623 
624 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
625 
626 	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
627 		      key->tos, key->ttl, df, false);
628 	return;
629 
630 err_free_rt:
631 	ip_rt_put(rt);
632 err_free_skb:
633 	kfree_skb(skb);
634 	dev->stats.tx_dropped++;
635 }
636 
637 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
638 {
639 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
640 	struct rtable *rt;
641 	struct flowi4 fl4;
642 
643 	if (ip_tunnel_info_af(info) != AF_INET)
644 		return -EINVAL;
645 
646 	rt = gre_get_rt(skb, dev, &fl4, &info->key);
647 	if (IS_ERR(rt))
648 		return PTR_ERR(rt);
649 
650 	ip_rt_put(rt);
651 	info->key.u.ipv4.src = fl4.saddr;
652 	return 0;
653 }
654 
655 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
656 			      struct net_device *dev)
657 {
658 	struct ip_tunnel *tunnel = netdev_priv(dev);
659 	const struct iphdr *tnl_params;
660 
661 	if (tunnel->collect_md) {
662 		gre_fb_xmit(skb, dev, skb->protocol);
663 		return NETDEV_TX_OK;
664 	}
665 
666 	if (dev->header_ops) {
667 		/* Need space for new headers */
668 		if (skb_cow_head(skb, dev->needed_headroom -
669 				      (tunnel->hlen + sizeof(struct iphdr))))
670 			goto free_skb;
671 
672 		tnl_params = (const struct iphdr *)skb->data;
673 
674 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
675 		 * to gre header.
676 		 */
677 		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
678 		skb_reset_mac_header(skb);
679 	} else {
680 		if (skb_cow_head(skb, dev->needed_headroom))
681 			goto free_skb;
682 
683 		tnl_params = &tunnel->parms.iph;
684 	}
685 
686 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
687 		goto free_skb;
688 
689 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
690 	return NETDEV_TX_OK;
691 
692 free_skb:
693 	kfree_skb(skb);
694 	dev->stats.tx_dropped++;
695 	return NETDEV_TX_OK;
696 }
697 
698 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
699 			       struct net_device *dev)
700 {
701 	struct ip_tunnel *tunnel = netdev_priv(dev);
702 	bool truncate = false;
703 
704 	if (tunnel->collect_md) {
705 		erspan_fb_xmit(skb, dev, skb->protocol);
706 		return NETDEV_TX_OK;
707 	}
708 
709 	if (gre_handle_offloads(skb, false))
710 		goto free_skb;
711 
712 	if (skb_cow_head(skb, dev->needed_headroom))
713 		goto free_skb;
714 
715 	if (skb->len > dev->mtu + dev->hard_header_len) {
716 		pskb_trim(skb, dev->mtu + dev->hard_header_len);
717 		truncate = true;
718 	}
719 
720 	/* Push ERSPAN header */
721 	if (tunnel->erspan_ver == 1)
722 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
723 				    tunnel->index,
724 				    truncate, true);
725 	else
726 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
727 				       tunnel->dir, tunnel->hwid,
728 				       truncate, true);
729 
730 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
731 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
732 	return NETDEV_TX_OK;
733 
734 free_skb:
735 	kfree_skb(skb);
736 	dev->stats.tx_dropped++;
737 	return NETDEV_TX_OK;
738 }
739 
740 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
741 				struct net_device *dev)
742 {
743 	struct ip_tunnel *tunnel = netdev_priv(dev);
744 
745 	if (tunnel->collect_md) {
746 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
747 		return NETDEV_TX_OK;
748 	}
749 
750 	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
751 		goto free_skb;
752 
753 	if (skb_cow_head(skb, dev->needed_headroom))
754 		goto free_skb;
755 
756 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
757 	return NETDEV_TX_OK;
758 
759 free_skb:
760 	kfree_skb(skb);
761 	dev->stats.tx_dropped++;
762 	return NETDEV_TX_OK;
763 }
764 
765 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
766 {
767 	struct ip_tunnel *tunnel = netdev_priv(dev);
768 	int len;
769 
770 	len = tunnel->tun_hlen;
771 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
772 	len = tunnel->tun_hlen - len;
773 	tunnel->hlen = tunnel->hlen + len;
774 
775 	dev->needed_headroom = dev->needed_headroom + len;
776 	if (set_mtu)
777 		dev->mtu = max_t(int, dev->mtu - len, 68);
778 
779 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
780 		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
781 		    tunnel->encap.type == TUNNEL_ENCAP_NONE) {
782 			dev->features |= NETIF_F_GSO_SOFTWARE;
783 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
784 		}
785 		dev->features |= NETIF_F_LLTX;
786 	}
787 }
788 
789 static int ipgre_tunnel_ioctl(struct net_device *dev,
790 			      struct ifreq *ifr, int cmd)
791 {
792 	struct ip_tunnel_parm p;
793 	int err;
794 
795 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
796 		return -EFAULT;
797 
798 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
799 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
800 		    p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
801 		    ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
802 			return -EINVAL;
803 	}
804 
805 	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
806 	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
807 
808 	err = ip_tunnel_ioctl(dev, &p, cmd);
809 	if (err)
810 		return err;
811 
812 	if (cmd == SIOCCHGTUNNEL) {
813 		struct ip_tunnel *t = netdev_priv(dev);
814 
815 		t->parms.i_flags = p.i_flags;
816 		t->parms.o_flags = p.o_flags;
817 
818 		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
819 			ipgre_link_update(dev, true);
820 	}
821 
822 	p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
823 	p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
824 
825 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
826 		return -EFAULT;
827 
828 	return 0;
829 }
830 
831 /* Nice toy. Unfortunately, useless in real life :-)
832    It allows to construct virtual multiprotocol broadcast "LAN"
833    over the Internet, provided multicast routing is tuned.
834 
835 
836    I have no idea was this bicycle invented before me,
837    so that I had to set ARPHRD_IPGRE to a random value.
838    I have an impression, that Cisco could make something similar,
839    but this feature is apparently missing in IOS<=11.2(8).
840 
841    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
842    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
843 
844    ping -t 255 224.66.66.66
845 
846    If nobody answers, mbone does not work.
847 
848    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
849    ip addr add 10.66.66.<somewhat>/24 dev Universe
850    ifconfig Universe up
851    ifconfig Universe add fe80::<Your_real_addr>/10
852    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
853    ftp 10.66.66.66
854    ...
855    ftp fec0:6666:6666::193.233.7.65
856    ...
857  */
858 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
859 			unsigned short type,
860 			const void *daddr, const void *saddr, unsigned int len)
861 {
862 	struct ip_tunnel *t = netdev_priv(dev);
863 	struct iphdr *iph;
864 	struct gre_base_hdr *greh;
865 
866 	iph = skb_push(skb, t->hlen + sizeof(*iph));
867 	greh = (struct gre_base_hdr *)(iph+1);
868 	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
869 	greh->protocol = htons(type);
870 
871 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
872 
873 	/* Set the source hardware address. */
874 	if (saddr)
875 		memcpy(&iph->saddr, saddr, 4);
876 	if (daddr)
877 		memcpy(&iph->daddr, daddr, 4);
878 	if (iph->daddr)
879 		return t->hlen + sizeof(*iph);
880 
881 	return -(t->hlen + sizeof(*iph));
882 }
883 
884 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
885 {
886 	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
887 	memcpy(haddr, &iph->saddr, 4);
888 	return 4;
889 }
890 
891 static const struct header_ops ipgre_header_ops = {
892 	.create	= ipgre_header,
893 	.parse	= ipgre_header_parse,
894 };
895 
896 #ifdef CONFIG_NET_IPGRE_BROADCAST
897 static int ipgre_open(struct net_device *dev)
898 {
899 	struct ip_tunnel *t = netdev_priv(dev);
900 
901 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
902 		struct flowi4 fl4;
903 		struct rtable *rt;
904 
905 		rt = ip_route_output_gre(t->net, &fl4,
906 					 t->parms.iph.daddr,
907 					 t->parms.iph.saddr,
908 					 t->parms.o_key,
909 					 RT_TOS(t->parms.iph.tos),
910 					 t->parms.link);
911 		if (IS_ERR(rt))
912 			return -EADDRNOTAVAIL;
913 		dev = rt->dst.dev;
914 		ip_rt_put(rt);
915 		if (!__in_dev_get_rtnl(dev))
916 			return -EADDRNOTAVAIL;
917 		t->mlink = dev->ifindex;
918 		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
919 	}
920 	return 0;
921 }
922 
923 static int ipgre_close(struct net_device *dev)
924 {
925 	struct ip_tunnel *t = netdev_priv(dev);
926 
927 	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
928 		struct in_device *in_dev;
929 		in_dev = inetdev_by_index(t->net, t->mlink);
930 		if (in_dev)
931 			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
932 	}
933 	return 0;
934 }
935 #endif
936 
937 static const struct net_device_ops ipgre_netdev_ops = {
938 	.ndo_init		= ipgre_tunnel_init,
939 	.ndo_uninit		= ip_tunnel_uninit,
940 #ifdef CONFIG_NET_IPGRE_BROADCAST
941 	.ndo_open		= ipgre_open,
942 	.ndo_stop		= ipgre_close,
943 #endif
944 	.ndo_start_xmit		= ipgre_xmit,
945 	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
946 	.ndo_change_mtu		= ip_tunnel_change_mtu,
947 	.ndo_get_stats64	= ip_tunnel_get_stats64,
948 	.ndo_get_iflink		= ip_tunnel_get_iflink,
949 };
950 
951 #define GRE_FEATURES (NETIF_F_SG |		\
952 		      NETIF_F_FRAGLIST |	\
953 		      NETIF_F_HIGHDMA |		\
954 		      NETIF_F_HW_CSUM)
955 
956 static void ipgre_tunnel_setup(struct net_device *dev)
957 {
958 	dev->netdev_ops		= &ipgre_netdev_ops;
959 	dev->type		= ARPHRD_IPGRE;
960 	ip_tunnel_setup(dev, ipgre_net_id);
961 }
962 
963 static void __gre_tunnel_init(struct net_device *dev)
964 {
965 	struct ip_tunnel *tunnel;
966 	int t_hlen;
967 
968 	tunnel = netdev_priv(dev);
969 	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
970 	tunnel->parms.iph.protocol = IPPROTO_GRE;
971 
972 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
973 
974 	t_hlen = tunnel->hlen + sizeof(struct iphdr);
975 
976 	dev->features		|= GRE_FEATURES;
977 	dev->hw_features	|= GRE_FEATURES;
978 
979 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
980 		/* TCP offload with GRE SEQ is not supported, nor
981 		 * can we support 2 levels of outer headers requiring
982 		 * an update.
983 		 */
984 		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
985 		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
986 			dev->features    |= NETIF_F_GSO_SOFTWARE;
987 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
988 		}
989 
990 		/* Can use a lockless transmit, unless we generate
991 		 * output sequences
992 		 */
993 		dev->features |= NETIF_F_LLTX;
994 	}
995 }
996 
997 static int ipgre_tunnel_init(struct net_device *dev)
998 {
999 	struct ip_tunnel *tunnel = netdev_priv(dev);
1000 	struct iphdr *iph = &tunnel->parms.iph;
1001 
1002 	__gre_tunnel_init(dev);
1003 
1004 	memcpy(dev->dev_addr, &iph->saddr, 4);
1005 	memcpy(dev->broadcast, &iph->daddr, 4);
1006 
1007 	dev->flags		= IFF_NOARP;
1008 	netif_keep_dst(dev);
1009 	dev->addr_len		= 4;
1010 
1011 	if (iph->daddr && !tunnel->collect_md) {
1012 #ifdef CONFIG_NET_IPGRE_BROADCAST
1013 		if (ipv4_is_multicast(iph->daddr)) {
1014 			if (!iph->saddr)
1015 				return -EINVAL;
1016 			dev->flags = IFF_BROADCAST;
1017 			dev->header_ops = &ipgre_header_ops;
1018 		}
1019 #endif
1020 	} else if (!tunnel->collect_md) {
1021 		dev->header_ops = &ipgre_header_ops;
1022 	}
1023 
1024 	return ip_tunnel_init(dev);
1025 }
1026 
1027 static const struct gre_protocol ipgre_protocol = {
1028 	.handler     = gre_rcv,
1029 	.err_handler = gre_err,
1030 };
1031 
1032 static int __net_init ipgre_init_net(struct net *net)
1033 {
1034 	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1035 }
1036 
1037 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1038 {
1039 	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1040 }
1041 
1042 static struct pernet_operations ipgre_net_ops = {
1043 	.init = ipgre_init_net,
1044 	.exit_batch = ipgre_exit_batch_net,
1045 	.id   = &ipgre_net_id,
1046 	.size = sizeof(struct ip_tunnel_net),
1047 };
1048 
1049 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1050 				 struct netlink_ext_ack *extack)
1051 {
1052 	__be16 flags;
1053 
1054 	if (!data)
1055 		return 0;
1056 
1057 	flags = 0;
1058 	if (data[IFLA_GRE_IFLAGS])
1059 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1060 	if (data[IFLA_GRE_OFLAGS])
1061 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1062 	if (flags & (GRE_VERSION|GRE_ROUTING))
1063 		return -EINVAL;
1064 
1065 	if (data[IFLA_GRE_COLLECT_METADATA] &&
1066 	    data[IFLA_GRE_ENCAP_TYPE] &&
1067 	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1068 		return -EINVAL;
1069 
1070 	return 0;
1071 }
1072 
1073 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1074 			      struct netlink_ext_ack *extack)
1075 {
1076 	__be32 daddr;
1077 
1078 	if (tb[IFLA_ADDRESS]) {
1079 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1080 			return -EINVAL;
1081 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1082 			return -EADDRNOTAVAIL;
1083 	}
1084 
1085 	if (!data)
1086 		goto out;
1087 
1088 	if (data[IFLA_GRE_REMOTE]) {
1089 		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1090 		if (!daddr)
1091 			return -EINVAL;
1092 	}
1093 
1094 out:
1095 	return ipgre_tunnel_validate(tb, data, extack);
1096 }
1097 
1098 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1099 			   struct netlink_ext_ack *extack)
1100 {
1101 	__be16 flags = 0;
1102 	int ret;
1103 
1104 	if (!data)
1105 		return 0;
1106 
1107 	ret = ipgre_tap_validate(tb, data, extack);
1108 	if (ret)
1109 		return ret;
1110 
1111 	/* ERSPAN should only have GRE sequence and key flag */
1112 	if (data[IFLA_GRE_OFLAGS])
1113 		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1114 	if (data[IFLA_GRE_IFLAGS])
1115 		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1116 	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1117 	    flags != (GRE_SEQ | GRE_KEY))
1118 		return -EINVAL;
1119 
1120 	/* ERSPAN Session ID only has 10-bit. Since we reuse
1121 	 * 32-bit key field as ID, check it's range.
1122 	 */
1123 	if (data[IFLA_GRE_IKEY] &&
1124 	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1125 		return -EINVAL;
1126 
1127 	if (data[IFLA_GRE_OKEY] &&
1128 	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1129 		return -EINVAL;
1130 
1131 	return 0;
1132 }
1133 
1134 static int ipgre_netlink_parms(struct net_device *dev,
1135 				struct nlattr *data[],
1136 				struct nlattr *tb[],
1137 				struct ip_tunnel_parm *parms,
1138 				__u32 *fwmark)
1139 {
1140 	struct ip_tunnel *t = netdev_priv(dev);
1141 
1142 	memset(parms, 0, sizeof(*parms));
1143 
1144 	parms->iph.protocol = IPPROTO_GRE;
1145 
1146 	if (!data)
1147 		return 0;
1148 
1149 	if (data[IFLA_GRE_LINK])
1150 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1151 
1152 	if (data[IFLA_GRE_IFLAGS])
1153 		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1154 
1155 	if (data[IFLA_GRE_OFLAGS])
1156 		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1157 
1158 	if (data[IFLA_GRE_IKEY])
1159 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1160 
1161 	if (data[IFLA_GRE_OKEY])
1162 		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1163 
1164 	if (data[IFLA_GRE_LOCAL])
1165 		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1166 
1167 	if (data[IFLA_GRE_REMOTE])
1168 		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1169 
1170 	if (data[IFLA_GRE_TTL])
1171 		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1172 
1173 	if (data[IFLA_GRE_TOS])
1174 		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1175 
1176 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1177 		if (t->ignore_df)
1178 			return -EINVAL;
1179 		parms->iph.frag_off = htons(IP_DF);
1180 	}
1181 
1182 	if (data[IFLA_GRE_COLLECT_METADATA]) {
1183 		t->collect_md = true;
1184 		if (dev->type == ARPHRD_IPGRE)
1185 			dev->type = ARPHRD_NONE;
1186 	}
1187 
1188 	if (data[IFLA_GRE_IGNORE_DF]) {
1189 		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1190 		  && (parms->iph.frag_off & htons(IP_DF)))
1191 			return -EINVAL;
1192 		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1193 	}
1194 
1195 	if (data[IFLA_GRE_FWMARK])
1196 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1197 
1198 	if (data[IFLA_GRE_ERSPAN_VER]) {
1199 		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1200 
1201 		if (t->erspan_ver != 1 && t->erspan_ver != 2)
1202 			return -EINVAL;
1203 	}
1204 
1205 	if (t->erspan_ver == 1) {
1206 		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1207 			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1208 			if (t->index & ~INDEX_MASK)
1209 				return -EINVAL;
1210 		}
1211 	} else if (t->erspan_ver == 2) {
1212 		if (data[IFLA_GRE_ERSPAN_DIR]) {
1213 			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1214 			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1215 				return -EINVAL;
1216 		}
1217 		if (data[IFLA_GRE_ERSPAN_HWID]) {
1218 			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1219 			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1220 				return -EINVAL;
1221 		}
1222 	}
1223 
1224 	return 0;
1225 }
1226 
1227 /* This function returns true when ENCAP attributes are present in the nl msg */
1228 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1229 				      struct ip_tunnel_encap *ipencap)
1230 {
1231 	bool ret = false;
1232 
1233 	memset(ipencap, 0, sizeof(*ipencap));
1234 
1235 	if (!data)
1236 		return ret;
1237 
1238 	if (data[IFLA_GRE_ENCAP_TYPE]) {
1239 		ret = true;
1240 		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1241 	}
1242 
1243 	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1244 		ret = true;
1245 		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1246 	}
1247 
1248 	if (data[IFLA_GRE_ENCAP_SPORT]) {
1249 		ret = true;
1250 		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1251 	}
1252 
1253 	if (data[IFLA_GRE_ENCAP_DPORT]) {
1254 		ret = true;
1255 		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1256 	}
1257 
1258 	return ret;
1259 }
1260 
1261 static int gre_tap_init(struct net_device *dev)
1262 {
1263 	__gre_tunnel_init(dev);
1264 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1265 	netif_keep_dst(dev);
1266 
1267 	return ip_tunnel_init(dev);
1268 }
1269 
1270 static const struct net_device_ops gre_tap_netdev_ops = {
1271 	.ndo_init		= gre_tap_init,
1272 	.ndo_uninit		= ip_tunnel_uninit,
1273 	.ndo_start_xmit		= gre_tap_xmit,
1274 	.ndo_set_mac_address 	= eth_mac_addr,
1275 	.ndo_validate_addr	= eth_validate_addr,
1276 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1277 	.ndo_get_stats64	= ip_tunnel_get_stats64,
1278 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1279 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1280 };
1281 
1282 static int erspan_tunnel_init(struct net_device *dev)
1283 {
1284 	struct ip_tunnel *tunnel = netdev_priv(dev);
1285 	int t_hlen;
1286 
1287 	tunnel->tun_hlen = 8;
1288 	tunnel->parms.iph.protocol = IPPROTO_GRE;
1289 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1290 		       erspan_hdr_len(tunnel->erspan_ver);
1291 	t_hlen = tunnel->hlen + sizeof(struct iphdr);
1292 
1293 	dev->features		|= GRE_FEATURES;
1294 	dev->hw_features	|= GRE_FEATURES;
1295 	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1296 	netif_keep_dst(dev);
1297 
1298 	return ip_tunnel_init(dev);
1299 }
1300 
1301 static const struct net_device_ops erspan_netdev_ops = {
1302 	.ndo_init		= erspan_tunnel_init,
1303 	.ndo_uninit		= ip_tunnel_uninit,
1304 	.ndo_start_xmit		= erspan_xmit,
1305 	.ndo_set_mac_address	= eth_mac_addr,
1306 	.ndo_validate_addr	= eth_validate_addr,
1307 	.ndo_change_mtu		= ip_tunnel_change_mtu,
1308 	.ndo_get_stats64	= ip_tunnel_get_stats64,
1309 	.ndo_get_iflink		= ip_tunnel_get_iflink,
1310 	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1311 };
1312 
1313 static void ipgre_tap_setup(struct net_device *dev)
1314 {
1315 	ether_setup(dev);
1316 	dev->max_mtu = 0;
1317 	dev->netdev_ops	= &gre_tap_netdev_ops;
1318 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1319 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1320 	ip_tunnel_setup(dev, gre_tap_net_id);
1321 }
1322 
1323 bool is_gretap_dev(const struct net_device *dev)
1324 {
1325 	return dev->netdev_ops == &gre_tap_netdev_ops;
1326 }
1327 EXPORT_SYMBOL_GPL(is_gretap_dev);
1328 
1329 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1330 			 struct nlattr *tb[], struct nlattr *data[],
1331 			 struct netlink_ext_ack *extack)
1332 {
1333 	struct ip_tunnel_parm p;
1334 	struct ip_tunnel_encap ipencap;
1335 	__u32 fwmark = 0;
1336 	int err;
1337 
1338 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1339 		struct ip_tunnel *t = netdev_priv(dev);
1340 		err = ip_tunnel_encap_setup(t, &ipencap);
1341 
1342 		if (err < 0)
1343 			return err;
1344 	}
1345 
1346 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1347 	if (err < 0)
1348 		return err;
1349 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1350 }
1351 
1352 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1353 			    struct nlattr *data[],
1354 			    struct netlink_ext_ack *extack)
1355 {
1356 	struct ip_tunnel *t = netdev_priv(dev);
1357 	struct ip_tunnel_encap ipencap;
1358 	__u32 fwmark = t->fwmark;
1359 	struct ip_tunnel_parm p;
1360 	int err;
1361 
1362 	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1363 		err = ip_tunnel_encap_setup(t, &ipencap);
1364 
1365 		if (err < 0)
1366 			return err;
1367 	}
1368 
1369 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1370 	if (err < 0)
1371 		return err;
1372 
1373 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1374 	if (err < 0)
1375 		return err;
1376 
1377 	t->parms.i_flags = p.i_flags;
1378 	t->parms.o_flags = p.o_flags;
1379 
1380 	if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1381 		ipgre_link_update(dev, !tb[IFLA_MTU]);
1382 
1383 	return 0;
1384 }
1385 
1386 static size_t ipgre_get_size(const struct net_device *dev)
1387 {
1388 	return
1389 		/* IFLA_GRE_LINK */
1390 		nla_total_size(4) +
1391 		/* IFLA_GRE_IFLAGS */
1392 		nla_total_size(2) +
1393 		/* IFLA_GRE_OFLAGS */
1394 		nla_total_size(2) +
1395 		/* IFLA_GRE_IKEY */
1396 		nla_total_size(4) +
1397 		/* IFLA_GRE_OKEY */
1398 		nla_total_size(4) +
1399 		/* IFLA_GRE_LOCAL */
1400 		nla_total_size(4) +
1401 		/* IFLA_GRE_REMOTE */
1402 		nla_total_size(4) +
1403 		/* IFLA_GRE_TTL */
1404 		nla_total_size(1) +
1405 		/* IFLA_GRE_TOS */
1406 		nla_total_size(1) +
1407 		/* IFLA_GRE_PMTUDISC */
1408 		nla_total_size(1) +
1409 		/* IFLA_GRE_ENCAP_TYPE */
1410 		nla_total_size(2) +
1411 		/* IFLA_GRE_ENCAP_FLAGS */
1412 		nla_total_size(2) +
1413 		/* IFLA_GRE_ENCAP_SPORT */
1414 		nla_total_size(2) +
1415 		/* IFLA_GRE_ENCAP_DPORT */
1416 		nla_total_size(2) +
1417 		/* IFLA_GRE_COLLECT_METADATA */
1418 		nla_total_size(0) +
1419 		/* IFLA_GRE_IGNORE_DF */
1420 		nla_total_size(1) +
1421 		/* IFLA_GRE_FWMARK */
1422 		nla_total_size(4) +
1423 		/* IFLA_GRE_ERSPAN_INDEX */
1424 		nla_total_size(4) +
1425 		/* IFLA_GRE_ERSPAN_VER */
1426 		nla_total_size(1) +
1427 		/* IFLA_GRE_ERSPAN_DIR */
1428 		nla_total_size(1) +
1429 		/* IFLA_GRE_ERSPAN_HWID */
1430 		nla_total_size(2) +
1431 		0;
1432 }
1433 
1434 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1435 {
1436 	struct ip_tunnel *t = netdev_priv(dev);
1437 	struct ip_tunnel_parm *p = &t->parms;
1438 
1439 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1440 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1441 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1442 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1443 			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1444 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1445 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1446 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1447 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1448 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1449 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1450 	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1451 		       !!(p->iph.frag_off & htons(IP_DF))) ||
1452 	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1453 		goto nla_put_failure;
1454 
1455 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1456 			t->encap.type) ||
1457 	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1458 			 t->encap.sport) ||
1459 	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1460 			 t->encap.dport) ||
1461 	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1462 			t->encap.flags))
1463 		goto nla_put_failure;
1464 
1465 	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1466 		goto nla_put_failure;
1467 
1468 	if (t->collect_md) {
1469 		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1470 			goto nla_put_failure;
1471 	}
1472 
1473 	if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1474 		goto nla_put_failure;
1475 
1476 	if (t->erspan_ver == 1) {
1477 		if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1478 			goto nla_put_failure;
1479 	} else if (t->erspan_ver == 2) {
1480 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1481 			goto nla_put_failure;
1482 		if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1483 			goto nla_put_failure;
1484 	}
1485 
1486 	return 0;
1487 
1488 nla_put_failure:
1489 	return -EMSGSIZE;
1490 }
1491 
1492 static void erspan_setup(struct net_device *dev)
1493 {
1494 	ether_setup(dev);
1495 	dev->netdev_ops = &erspan_netdev_ops;
1496 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1497 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1498 	ip_tunnel_setup(dev, erspan_net_id);
1499 }
1500 
1501 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1502 	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1503 	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1504 	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1505 	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1506 	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1507 	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1508 	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1509 	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1510 	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1511 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1512 	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1513 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1514 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1515 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1516 	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1517 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1518 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1519 	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1520 	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1521 	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1522 	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1523 };
1524 
1525 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1526 	.kind		= "gre",
1527 	.maxtype	= IFLA_GRE_MAX,
1528 	.policy		= ipgre_policy,
1529 	.priv_size	= sizeof(struct ip_tunnel),
1530 	.setup		= ipgre_tunnel_setup,
1531 	.validate	= ipgre_tunnel_validate,
1532 	.newlink	= ipgre_newlink,
1533 	.changelink	= ipgre_changelink,
1534 	.dellink	= ip_tunnel_dellink,
1535 	.get_size	= ipgre_get_size,
1536 	.fill_info	= ipgre_fill_info,
1537 	.get_link_net	= ip_tunnel_get_link_net,
1538 };
1539 
1540 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1541 	.kind		= "gretap",
1542 	.maxtype	= IFLA_GRE_MAX,
1543 	.policy		= ipgre_policy,
1544 	.priv_size	= sizeof(struct ip_tunnel),
1545 	.setup		= ipgre_tap_setup,
1546 	.validate	= ipgre_tap_validate,
1547 	.newlink	= ipgre_newlink,
1548 	.changelink	= ipgre_changelink,
1549 	.dellink	= ip_tunnel_dellink,
1550 	.get_size	= ipgre_get_size,
1551 	.fill_info	= ipgre_fill_info,
1552 	.get_link_net	= ip_tunnel_get_link_net,
1553 };
1554 
1555 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1556 	.kind		= "erspan",
1557 	.maxtype	= IFLA_GRE_MAX,
1558 	.policy		= ipgre_policy,
1559 	.priv_size	= sizeof(struct ip_tunnel),
1560 	.setup		= erspan_setup,
1561 	.validate	= erspan_validate,
1562 	.newlink	= ipgre_newlink,
1563 	.changelink	= ipgre_changelink,
1564 	.dellink	= ip_tunnel_dellink,
1565 	.get_size	= ipgre_get_size,
1566 	.fill_info	= ipgre_fill_info,
1567 	.get_link_net	= ip_tunnel_get_link_net,
1568 };
1569 
1570 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1571 					u8 name_assign_type)
1572 {
1573 	struct nlattr *tb[IFLA_MAX + 1];
1574 	struct net_device *dev;
1575 	LIST_HEAD(list_kill);
1576 	struct ip_tunnel *t;
1577 	int err;
1578 
1579 	memset(&tb, 0, sizeof(tb));
1580 
1581 	dev = rtnl_create_link(net, name, name_assign_type,
1582 			       &ipgre_tap_ops, tb);
1583 	if (IS_ERR(dev))
1584 		return dev;
1585 
1586 	/* Configure flow based GRE device. */
1587 	t = netdev_priv(dev);
1588 	t->collect_md = true;
1589 
1590 	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1591 	if (err < 0) {
1592 		free_netdev(dev);
1593 		return ERR_PTR(err);
1594 	}
1595 
1596 	/* openvswitch users expect packet sizes to be unrestricted,
1597 	 * so set the largest MTU we can.
1598 	 */
1599 	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1600 	if (err)
1601 		goto out;
1602 
1603 	err = rtnl_configure_link(dev, NULL);
1604 	if (err < 0)
1605 		goto out;
1606 
1607 	return dev;
1608 out:
1609 	ip_tunnel_dellink(dev, &list_kill);
1610 	unregister_netdevice_many(&list_kill);
1611 	return ERR_PTR(err);
1612 }
1613 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1614 
1615 static int __net_init ipgre_tap_init_net(struct net *net)
1616 {
1617 	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1618 }
1619 
1620 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1621 {
1622 	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1623 }
1624 
1625 static struct pernet_operations ipgre_tap_net_ops = {
1626 	.init = ipgre_tap_init_net,
1627 	.exit_batch = ipgre_tap_exit_batch_net,
1628 	.id   = &gre_tap_net_id,
1629 	.size = sizeof(struct ip_tunnel_net),
1630 };
1631 
1632 static int __net_init erspan_init_net(struct net *net)
1633 {
1634 	return ip_tunnel_init_net(net, erspan_net_id,
1635 				  &erspan_link_ops, "erspan0");
1636 }
1637 
1638 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1639 {
1640 	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1641 }
1642 
1643 static struct pernet_operations erspan_net_ops = {
1644 	.init = erspan_init_net,
1645 	.exit_batch = erspan_exit_batch_net,
1646 	.id   = &erspan_net_id,
1647 	.size = sizeof(struct ip_tunnel_net),
1648 };
1649 
1650 static int __init ipgre_init(void)
1651 {
1652 	int err;
1653 
1654 	pr_info("GRE over IPv4 tunneling driver\n");
1655 
1656 	err = register_pernet_device(&ipgre_net_ops);
1657 	if (err < 0)
1658 		return err;
1659 
1660 	err = register_pernet_device(&ipgre_tap_net_ops);
1661 	if (err < 0)
1662 		goto pnet_tap_failed;
1663 
1664 	err = register_pernet_device(&erspan_net_ops);
1665 	if (err < 0)
1666 		goto pnet_erspan_failed;
1667 
1668 	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1669 	if (err < 0) {
1670 		pr_info("%s: can't add protocol\n", __func__);
1671 		goto add_proto_failed;
1672 	}
1673 
1674 	err = rtnl_link_register(&ipgre_link_ops);
1675 	if (err < 0)
1676 		goto rtnl_link_failed;
1677 
1678 	err = rtnl_link_register(&ipgre_tap_ops);
1679 	if (err < 0)
1680 		goto tap_ops_failed;
1681 
1682 	err = rtnl_link_register(&erspan_link_ops);
1683 	if (err < 0)
1684 		goto erspan_link_failed;
1685 
1686 	return 0;
1687 
1688 erspan_link_failed:
1689 	rtnl_link_unregister(&ipgre_tap_ops);
1690 tap_ops_failed:
1691 	rtnl_link_unregister(&ipgre_link_ops);
1692 rtnl_link_failed:
1693 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1694 add_proto_failed:
1695 	unregister_pernet_device(&erspan_net_ops);
1696 pnet_erspan_failed:
1697 	unregister_pernet_device(&ipgre_tap_net_ops);
1698 pnet_tap_failed:
1699 	unregister_pernet_device(&ipgre_net_ops);
1700 	return err;
1701 }
1702 
1703 static void __exit ipgre_fini(void)
1704 {
1705 	rtnl_link_unregister(&ipgre_tap_ops);
1706 	rtnl_link_unregister(&ipgre_link_ops);
1707 	rtnl_link_unregister(&erspan_link_ops);
1708 	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1709 	unregister_pernet_device(&ipgre_tap_net_ops);
1710 	unregister_pernet_device(&ipgre_net_ops);
1711 	unregister_pernet_device(&erspan_net_ops);
1712 }
1713 
1714 module_init(ipgre_init);
1715 module_exit(ipgre_fini);
1716 MODULE_LICENSE("GPL");
1717 MODULE_ALIAS_RTNL_LINK("gre");
1718 MODULE_ALIAS_RTNL_LINK("gretap");
1719 MODULE_ALIAS_RTNL_LINK("erspan");
1720 MODULE_ALIAS_NETDEV("gre0");
1721 MODULE_ALIAS_NETDEV("gretap0");
1722 MODULE_ALIAS_NETDEV("erspan0");
1723