xref: /openbmc/linux/net/ipv6/ip6_tunnel.c (revision 15e3ae36)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 tunneling device
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Ville Nuorvala		<vnuorval@tcs.hut.fi>
8  *	Yasuyuki Kozakai	<kozakai@linux-ipv6.org>
9  *
10  *      Based on:
11  *      linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
12  *
13  *      RFC 2473
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/module.h>
19 #include <linux/capability.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/sockios.h>
23 #include <linux/icmp.h>
24 #include <linux/if.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/init.h>
33 #include <linux/route.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/netfilter_ipv6.h>
36 #include <linux/slab.h>
37 #include <linux/hash.h>
38 #include <linux/etherdevice.h>
39 
40 #include <linux/uaccess.h>
41 #include <linux/atomic.h>
42 
43 #include <net/icmp.h>
44 #include <net/ip.h>
45 #include <net/ip_tunnels.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/xfrm.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55 #include <net/dst_metadata.h>
56 
57 MODULE_AUTHOR("Ville Nuorvala");
58 MODULE_DESCRIPTION("IPv6 tunneling device");
59 MODULE_LICENSE("GPL");
60 MODULE_ALIAS_RTNL_LINK("ip6tnl");
61 MODULE_ALIAS_NETDEV("ip6tnl0");
62 
63 #define IP6_TUNNEL_HASH_SIZE_SHIFT  5
64 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
65 
66 static bool log_ecn_error = true;
67 module_param(log_ecn_error, bool, 0644);
68 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
69 
70 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
71 {
72 	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
73 
74 	return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
75 }
76 
77 static int ip6_tnl_dev_init(struct net_device *dev);
78 static void ip6_tnl_dev_setup(struct net_device *dev);
79 static struct rtnl_link_ops ip6_link_ops __read_mostly;
80 
81 static unsigned int ip6_tnl_net_id __read_mostly;
82 struct ip6_tnl_net {
83 	/* the IPv6 tunnel fallback device */
84 	struct net_device *fb_tnl_dev;
85 	/* lists for storing tunnels in use */
86 	struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
87 	struct ip6_tnl __rcu *tnls_wc[1];
88 	struct ip6_tnl __rcu **tnls[2];
89 	struct ip6_tnl __rcu *collect_md_tun;
90 };
91 
92 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
93 {
94 	struct pcpu_sw_netstats tmp, sum = { 0 };
95 	int i;
96 
97 	for_each_possible_cpu(i) {
98 		unsigned int start;
99 		const struct pcpu_sw_netstats *tstats =
100 						   per_cpu_ptr(dev->tstats, i);
101 
102 		do {
103 			start = u64_stats_fetch_begin_irq(&tstats->syncp);
104 			tmp.rx_packets = tstats->rx_packets;
105 			tmp.rx_bytes = tstats->rx_bytes;
106 			tmp.tx_packets = tstats->tx_packets;
107 			tmp.tx_bytes =  tstats->tx_bytes;
108 		} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
109 
110 		sum.rx_packets += tmp.rx_packets;
111 		sum.rx_bytes   += tmp.rx_bytes;
112 		sum.tx_packets += tmp.tx_packets;
113 		sum.tx_bytes   += tmp.tx_bytes;
114 	}
115 	dev->stats.rx_packets = sum.rx_packets;
116 	dev->stats.rx_bytes   = sum.rx_bytes;
117 	dev->stats.tx_packets = sum.tx_packets;
118 	dev->stats.tx_bytes   = sum.tx_bytes;
119 	return &dev->stats;
120 }
121 
122 /**
123  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
124  *   @link: ifindex of underlying interface
125  *   @remote: the address of the tunnel exit-point
126  *   @local: the address of the tunnel entry-point
127  *
128  * Return:
129  *   tunnel matching given end-points if found,
130  *   else fallback tunnel if its device is up,
131  *   else %NULL
132  **/
133 
134 #define for_each_ip6_tunnel_rcu(start) \
135 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
136 
137 static struct ip6_tnl *
138 ip6_tnl_lookup(struct net *net, int link,
139 	       const struct in6_addr *remote, const struct in6_addr *local)
140 {
141 	unsigned int hash = HASH(remote, local);
142 	struct ip6_tnl *t, *cand = NULL;
143 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
144 	struct in6_addr any;
145 
146 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
147 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
148 		    !ipv6_addr_equal(remote, &t->parms.raddr) ||
149 		    !(t->dev->flags & IFF_UP))
150 			continue;
151 
152 		if (link == t->parms.link)
153 			return t;
154 		else
155 			cand = t;
156 	}
157 
158 	memset(&any, 0, sizeof(any));
159 	hash = HASH(&any, local);
160 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
161 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
162 		    !ipv6_addr_any(&t->parms.raddr) ||
163 		    !(t->dev->flags & IFF_UP))
164 			continue;
165 
166 		if (link == t->parms.link)
167 			return t;
168 		else if (!cand)
169 			cand = t;
170 	}
171 
172 	hash = HASH(remote, &any);
173 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
174 		if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
175 		    !ipv6_addr_any(&t->parms.laddr) ||
176 		    !(t->dev->flags & IFF_UP))
177 			continue;
178 
179 		if (link == t->parms.link)
180 			return t;
181 		else if (!cand)
182 			cand = t;
183 	}
184 
185 	if (cand)
186 		return cand;
187 
188 	t = rcu_dereference(ip6n->collect_md_tun);
189 	if (t && t->dev->flags & IFF_UP)
190 		return t;
191 
192 	t = rcu_dereference(ip6n->tnls_wc[0]);
193 	if (t && (t->dev->flags & IFF_UP))
194 		return t;
195 
196 	return NULL;
197 }
198 
199 /**
200  * ip6_tnl_bucket - get head of list matching given tunnel parameters
201  *   @p: parameters containing tunnel end-points
202  *
203  * Description:
204  *   ip6_tnl_bucket() returns the head of the list matching the
205  *   &struct in6_addr entries laddr and raddr in @p.
206  *
207  * Return: head of IPv6 tunnel list
208  **/
209 
210 static struct ip6_tnl __rcu **
211 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
212 {
213 	const struct in6_addr *remote = &p->raddr;
214 	const struct in6_addr *local = &p->laddr;
215 	unsigned int h = 0;
216 	int prio = 0;
217 
218 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
219 		prio = 1;
220 		h = HASH(remote, local);
221 	}
222 	return &ip6n->tnls[prio][h];
223 }
224 
225 /**
226  * ip6_tnl_link - add tunnel to hash table
227  *   @t: tunnel to be added
228  **/
229 
230 static void
231 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
232 {
233 	struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
234 
235 	if (t->parms.collect_md)
236 		rcu_assign_pointer(ip6n->collect_md_tun, t);
237 	rcu_assign_pointer(t->next , rtnl_dereference(*tp));
238 	rcu_assign_pointer(*tp, t);
239 }
240 
241 /**
242  * ip6_tnl_unlink - remove tunnel from hash table
243  *   @t: tunnel to be removed
244  **/
245 
246 static void
247 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
248 {
249 	struct ip6_tnl __rcu **tp;
250 	struct ip6_tnl *iter;
251 
252 	if (t->parms.collect_md)
253 		rcu_assign_pointer(ip6n->collect_md_tun, NULL);
254 
255 	for (tp = ip6_tnl_bucket(ip6n, &t->parms);
256 	     (iter = rtnl_dereference(*tp)) != NULL;
257 	     tp = &iter->next) {
258 		if (t == iter) {
259 			rcu_assign_pointer(*tp, t->next);
260 			break;
261 		}
262 	}
263 }
264 
265 static void ip6_dev_free(struct net_device *dev)
266 {
267 	struct ip6_tnl *t = netdev_priv(dev);
268 
269 	gro_cells_destroy(&t->gro_cells);
270 	dst_cache_destroy(&t->dst_cache);
271 	free_percpu(dev->tstats);
272 }
273 
274 static int ip6_tnl_create2(struct net_device *dev)
275 {
276 	struct ip6_tnl *t = netdev_priv(dev);
277 	struct net *net = dev_net(dev);
278 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
279 	int err;
280 
281 	t = netdev_priv(dev);
282 
283 	dev->rtnl_link_ops = &ip6_link_ops;
284 	err = register_netdevice(dev);
285 	if (err < 0)
286 		goto out;
287 
288 	strcpy(t->parms.name, dev->name);
289 
290 	dev_hold(dev);
291 	ip6_tnl_link(ip6n, t);
292 	return 0;
293 
294 out:
295 	return err;
296 }
297 
298 /**
299  * ip6_tnl_create - create a new tunnel
300  *   @p: tunnel parameters
301  *   @pt: pointer to new tunnel
302  *
303  * Description:
304  *   Create tunnel matching given parameters.
305  *
306  * Return:
307  *   created tunnel or error pointer
308  **/
309 
310 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
311 {
312 	struct net_device *dev;
313 	struct ip6_tnl *t;
314 	char name[IFNAMSIZ];
315 	int err = -E2BIG;
316 
317 	if (p->name[0]) {
318 		if (!dev_valid_name(p->name))
319 			goto failed;
320 		strlcpy(name, p->name, IFNAMSIZ);
321 	} else {
322 		sprintf(name, "ip6tnl%%d");
323 	}
324 	err = -ENOMEM;
325 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
326 			   ip6_tnl_dev_setup);
327 	if (!dev)
328 		goto failed;
329 
330 	dev_net_set(dev, net);
331 
332 	t = netdev_priv(dev);
333 	t->parms = *p;
334 	t->net = dev_net(dev);
335 	err = ip6_tnl_create2(dev);
336 	if (err < 0)
337 		goto failed_free;
338 
339 	return t;
340 
341 failed_free:
342 	free_netdev(dev);
343 failed:
344 	return ERR_PTR(err);
345 }
346 
347 /**
348  * ip6_tnl_locate - find or create tunnel matching given parameters
349  *   @p: tunnel parameters
350  *   @create: != 0 if allowed to create new tunnel if no match found
351  *
352  * Description:
353  *   ip6_tnl_locate() first tries to locate an existing tunnel
354  *   based on @parms. If this is unsuccessful, but @create is set a new
355  *   tunnel device is created and registered for use.
356  *
357  * Return:
358  *   matching tunnel or error pointer
359  **/
360 
361 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
362 		struct __ip6_tnl_parm *p, int create)
363 {
364 	const struct in6_addr *remote = &p->raddr;
365 	const struct in6_addr *local = &p->laddr;
366 	struct ip6_tnl __rcu **tp;
367 	struct ip6_tnl *t;
368 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
369 
370 	for (tp = ip6_tnl_bucket(ip6n, p);
371 	     (t = rtnl_dereference(*tp)) != NULL;
372 	     tp = &t->next) {
373 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
374 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
375 		    p->link == t->parms.link) {
376 			if (create)
377 				return ERR_PTR(-EEXIST);
378 
379 			return t;
380 		}
381 	}
382 	if (!create)
383 		return ERR_PTR(-ENODEV);
384 	return ip6_tnl_create(net, p);
385 }
386 
387 /**
388  * ip6_tnl_dev_uninit - tunnel device uninitializer
389  *   @dev: the device to be destroyed
390  *
391  * Description:
392  *   ip6_tnl_dev_uninit() removes tunnel from its list
393  **/
394 
395 static void
396 ip6_tnl_dev_uninit(struct net_device *dev)
397 {
398 	struct ip6_tnl *t = netdev_priv(dev);
399 	struct net *net = t->net;
400 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
401 
402 	if (dev == ip6n->fb_tnl_dev)
403 		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
404 	else
405 		ip6_tnl_unlink(ip6n, t);
406 	dst_cache_reset(&t->dst_cache);
407 	dev_put(dev);
408 }
409 
410 /**
411  * parse_tvl_tnl_enc_lim - handle encapsulation limit option
412  *   @skb: received socket buffer
413  *
414  * Return:
415  *   0 if none was found,
416  *   else index to encapsulation limit
417  **/
418 
419 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
420 {
421 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
422 	unsigned int nhoff = raw - skb->data;
423 	unsigned int off = nhoff + sizeof(*ipv6h);
424 	u8 next, nexthdr = ipv6h->nexthdr;
425 
426 	while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
427 		struct ipv6_opt_hdr *hdr;
428 		u16 optlen;
429 
430 		if (!pskb_may_pull(skb, off + sizeof(*hdr)))
431 			break;
432 
433 		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
434 		if (nexthdr == NEXTHDR_FRAGMENT) {
435 			struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
436 			if (frag_hdr->frag_off)
437 				break;
438 			optlen = 8;
439 		} else if (nexthdr == NEXTHDR_AUTH) {
440 			optlen = ipv6_authlen(hdr);
441 		} else {
442 			optlen = ipv6_optlen(hdr);
443 		}
444 		/* cache hdr->nexthdr, since pskb_may_pull() might
445 		 * invalidate hdr
446 		 */
447 		next = hdr->nexthdr;
448 		if (nexthdr == NEXTHDR_DEST) {
449 			u16 i = 2;
450 
451 			/* Remember : hdr is no longer valid at this point. */
452 			if (!pskb_may_pull(skb, off + optlen))
453 				break;
454 
455 			while (1) {
456 				struct ipv6_tlv_tnl_enc_lim *tel;
457 
458 				/* No more room for encapsulation limit */
459 				if (i + sizeof(*tel) > optlen)
460 					break;
461 
462 				tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
463 				/* return index of option if found and valid */
464 				if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
465 				    tel->length == 1)
466 					return i + off - nhoff;
467 				/* else jump to next option */
468 				if (tel->type)
469 					i += tel->length + 2;
470 				else
471 					i++;
472 			}
473 		}
474 		nexthdr = next;
475 		off += optlen;
476 	}
477 	return 0;
478 }
479 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
480 
481 /**
482  * ip6_tnl_err - tunnel error handler
483  *
484  * Description:
485  *   ip6_tnl_err() should handle errors in the tunnel according
486  *   to the specifications in RFC 2473.
487  **/
488 
489 static int
490 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
491 	    u8 *type, u8 *code, int *msg, __u32 *info, int offset)
492 {
493 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
494 	struct net *net = dev_net(skb->dev);
495 	u8 rel_type = ICMPV6_DEST_UNREACH;
496 	u8 rel_code = ICMPV6_ADDR_UNREACH;
497 	__u32 rel_info = 0;
498 	struct ip6_tnl *t;
499 	int err = -ENOENT;
500 	int rel_msg = 0;
501 	u8 tproto;
502 	__u16 len;
503 
504 	/* If the packet doesn't contain the original IPv6 header we are
505 	   in trouble since we might need the source address for further
506 	   processing of the error. */
507 
508 	rcu_read_lock();
509 	t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->daddr, &ipv6h->saddr);
510 	if (!t)
511 		goto out;
512 
513 	tproto = READ_ONCE(t->parms.proto);
514 	if (tproto != ipproto && tproto != 0)
515 		goto out;
516 
517 	err = 0;
518 
519 	switch (*type) {
520 	case ICMPV6_DEST_UNREACH:
521 		net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
522 				    t->parms.name);
523 		rel_msg = 1;
524 		break;
525 	case ICMPV6_TIME_EXCEED:
526 		if ((*code) == ICMPV6_EXC_HOPLIMIT) {
527 			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
528 					    t->parms.name);
529 			rel_msg = 1;
530 		}
531 		break;
532 	case ICMPV6_PARAMPROB: {
533 		struct ipv6_tlv_tnl_enc_lim *tel;
534 		__u32 teli;
535 
536 		teli = 0;
537 		if ((*code) == ICMPV6_HDR_FIELD)
538 			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
539 
540 		if (teli && teli == *info - 2) {
541 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
542 			if (tel->encap_limit == 0) {
543 				net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
544 						    t->parms.name);
545 				rel_msg = 1;
546 			}
547 		} else {
548 			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
549 					    t->parms.name);
550 		}
551 		break;
552 	}
553 	case ICMPV6_PKT_TOOBIG: {
554 		__u32 mtu;
555 
556 		ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
557 				sock_net_uid(net, NULL));
558 		mtu = *info - offset;
559 		if (mtu < IPV6_MIN_MTU)
560 			mtu = IPV6_MIN_MTU;
561 		len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
562 		if (len > mtu) {
563 			rel_type = ICMPV6_PKT_TOOBIG;
564 			rel_code = 0;
565 			rel_info = mtu;
566 			rel_msg = 1;
567 		}
568 		break;
569 	}
570 	case NDISC_REDIRECT:
571 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
572 			     sock_net_uid(net, NULL));
573 		break;
574 	}
575 
576 	*type = rel_type;
577 	*code = rel_code;
578 	*info = rel_info;
579 	*msg = rel_msg;
580 
581 out:
582 	rcu_read_unlock();
583 	return err;
584 }
585 
586 static int
587 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
588 	   u8 type, u8 code, int offset, __be32 info)
589 {
590 	__u32 rel_info = ntohl(info);
591 	const struct iphdr *eiph;
592 	struct sk_buff *skb2;
593 	int err, rel_msg = 0;
594 	u8 rel_type = type;
595 	u8 rel_code = code;
596 	struct rtable *rt;
597 	struct flowi4 fl4;
598 
599 	err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
600 			  &rel_msg, &rel_info, offset);
601 	if (err < 0)
602 		return err;
603 
604 	if (rel_msg == 0)
605 		return 0;
606 
607 	switch (rel_type) {
608 	case ICMPV6_DEST_UNREACH:
609 		if (rel_code != ICMPV6_ADDR_UNREACH)
610 			return 0;
611 		rel_type = ICMP_DEST_UNREACH;
612 		rel_code = ICMP_HOST_UNREACH;
613 		break;
614 	case ICMPV6_PKT_TOOBIG:
615 		if (rel_code != 0)
616 			return 0;
617 		rel_type = ICMP_DEST_UNREACH;
618 		rel_code = ICMP_FRAG_NEEDED;
619 		break;
620 	default:
621 		return 0;
622 	}
623 
624 	if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
625 		return 0;
626 
627 	skb2 = skb_clone(skb, GFP_ATOMIC);
628 	if (!skb2)
629 		return 0;
630 
631 	skb_dst_drop(skb2);
632 
633 	skb_pull(skb2, offset);
634 	skb_reset_network_header(skb2);
635 	eiph = ip_hdr(skb2);
636 
637 	/* Try to guess incoming interface */
638 	rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
639 				   0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
640 	if (IS_ERR(rt))
641 		goto out;
642 
643 	skb2->dev = rt->dst.dev;
644 	ip_rt_put(rt);
645 
646 	/* route "incoming" packet */
647 	if (rt->rt_flags & RTCF_LOCAL) {
648 		rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
649 					   eiph->daddr, eiph->saddr, 0, 0,
650 					   IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
651 		if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
652 			if (!IS_ERR(rt))
653 				ip_rt_put(rt);
654 			goto out;
655 		}
656 		skb_dst_set(skb2, &rt->dst);
657 	} else {
658 		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
659 				   skb2->dev) ||
660 		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
661 			goto out;
662 	}
663 
664 	/* change mtu on this route */
665 	if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
666 		if (rel_info > dst_mtu(skb_dst(skb2)))
667 			goto out;
668 
669 		skb_dst_update_pmtu_no_confirm(skb2, rel_info);
670 	}
671 
672 	icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
673 
674 out:
675 	kfree_skb(skb2);
676 	return 0;
677 }
678 
679 static int
680 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
681 	   u8 type, u8 code, int offset, __be32 info)
682 {
683 	__u32 rel_info = ntohl(info);
684 	int err, rel_msg = 0;
685 	u8 rel_type = type;
686 	u8 rel_code = code;
687 
688 	err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
689 			  &rel_msg, &rel_info, offset);
690 	if (err < 0)
691 		return err;
692 
693 	if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
694 		struct rt6_info *rt;
695 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
696 
697 		if (!skb2)
698 			return 0;
699 
700 		skb_dst_drop(skb2);
701 		skb_pull(skb2, offset);
702 		skb_reset_network_header(skb2);
703 
704 		/* Try to guess incoming interface */
705 		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
706 				NULL, 0, skb2, 0);
707 
708 		if (rt && rt->dst.dev)
709 			skb2->dev = rt->dst.dev;
710 
711 		icmpv6_send(skb2, rel_type, rel_code, rel_info);
712 
713 		ip6_rt_put(rt);
714 
715 		kfree_skb(skb2);
716 	}
717 
718 	return 0;
719 }
720 
721 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
722 				       const struct ipv6hdr *ipv6h,
723 				       struct sk_buff *skb)
724 {
725 	__u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
726 
727 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
728 		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
729 
730 	return IP6_ECN_decapsulate(ipv6h, skb);
731 }
732 
733 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
734 				       const struct ipv6hdr *ipv6h,
735 				       struct sk_buff *skb)
736 {
737 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
738 		ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
739 
740 	return IP6_ECN_decapsulate(ipv6h, skb);
741 }
742 
743 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
744 			     const struct in6_addr *laddr,
745 			     const struct in6_addr *raddr)
746 {
747 	struct __ip6_tnl_parm *p = &t->parms;
748 	int ltype = ipv6_addr_type(laddr);
749 	int rtype = ipv6_addr_type(raddr);
750 	__u32 flags = 0;
751 
752 	if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
753 		flags = IP6_TNL_F_CAP_PER_PACKET;
754 	} else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
755 		   rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
756 		   !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
757 		   (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
758 		if (ltype&IPV6_ADDR_UNICAST)
759 			flags |= IP6_TNL_F_CAP_XMIT;
760 		if (rtype&IPV6_ADDR_UNICAST)
761 			flags |= IP6_TNL_F_CAP_RCV;
762 	}
763 	return flags;
764 }
765 EXPORT_SYMBOL(ip6_tnl_get_cap);
766 
767 /* called with rcu_read_lock() */
768 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
769 				  const struct in6_addr *laddr,
770 				  const struct in6_addr *raddr)
771 {
772 	struct __ip6_tnl_parm *p = &t->parms;
773 	int ret = 0;
774 	struct net *net = t->net;
775 
776 	if ((p->flags & IP6_TNL_F_CAP_RCV) ||
777 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
778 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
779 		struct net_device *ldev = NULL;
780 
781 		if (p->link)
782 			ldev = dev_get_by_index_rcu(net, p->link);
783 
784 		if ((ipv6_addr_is_multicast(laddr) ||
785 		     likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false,
786 						    0, IFA_F_TENTATIVE))) &&
787 		    ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
788 		     likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true,
789 						     0, IFA_F_TENTATIVE))))
790 			ret = 1;
791 	}
792 	return ret;
793 }
794 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
795 
796 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
797 			 const struct tnl_ptk_info *tpi,
798 			 struct metadata_dst *tun_dst,
799 			 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
800 						const struct ipv6hdr *ipv6h,
801 						struct sk_buff *skb),
802 			 bool log_ecn_err)
803 {
804 	struct pcpu_sw_netstats *tstats;
805 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
806 	int err;
807 
808 	if ((!(tpi->flags & TUNNEL_CSUM) &&
809 	     (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
810 	    ((tpi->flags & TUNNEL_CSUM) &&
811 	     !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
812 		tunnel->dev->stats.rx_crc_errors++;
813 		tunnel->dev->stats.rx_errors++;
814 		goto drop;
815 	}
816 
817 	if (tunnel->parms.i_flags & TUNNEL_SEQ) {
818 		if (!(tpi->flags & TUNNEL_SEQ) ||
819 		    (tunnel->i_seqno &&
820 		     (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
821 			tunnel->dev->stats.rx_fifo_errors++;
822 			tunnel->dev->stats.rx_errors++;
823 			goto drop;
824 		}
825 		tunnel->i_seqno = ntohl(tpi->seq) + 1;
826 	}
827 
828 	skb->protocol = tpi->proto;
829 
830 	/* Warning: All skb pointers will be invalidated! */
831 	if (tunnel->dev->type == ARPHRD_ETHER) {
832 		if (!pskb_may_pull(skb, ETH_HLEN)) {
833 			tunnel->dev->stats.rx_length_errors++;
834 			tunnel->dev->stats.rx_errors++;
835 			goto drop;
836 		}
837 
838 		ipv6h = ipv6_hdr(skb);
839 		skb->protocol = eth_type_trans(skb, tunnel->dev);
840 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
841 	} else {
842 		skb->dev = tunnel->dev;
843 	}
844 
845 	skb_reset_network_header(skb);
846 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
847 
848 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
849 
850 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
851 	if (unlikely(err)) {
852 		if (log_ecn_err)
853 			net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
854 					     &ipv6h->saddr,
855 					     ipv6_get_dsfield(ipv6h));
856 		if (err > 1) {
857 			++tunnel->dev->stats.rx_frame_errors;
858 			++tunnel->dev->stats.rx_errors;
859 			goto drop;
860 		}
861 	}
862 
863 	tstats = this_cpu_ptr(tunnel->dev->tstats);
864 	u64_stats_update_begin(&tstats->syncp);
865 	tstats->rx_packets++;
866 	tstats->rx_bytes += skb->len;
867 	u64_stats_update_end(&tstats->syncp);
868 
869 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
870 
871 	if (tun_dst)
872 		skb_dst_set(skb, (struct dst_entry *)tun_dst);
873 
874 	gro_cells_receive(&tunnel->gro_cells, skb);
875 	return 0;
876 
877 drop:
878 	if (tun_dst)
879 		dst_release((struct dst_entry *)tun_dst);
880 	kfree_skb(skb);
881 	return 0;
882 }
883 
884 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
885 		const struct tnl_ptk_info *tpi,
886 		struct metadata_dst *tun_dst,
887 		bool log_ecn_err)
888 {
889 	return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
890 			     log_ecn_err);
891 }
892 EXPORT_SYMBOL(ip6_tnl_rcv);
893 
894 static const struct tnl_ptk_info tpi_v6 = {
895 	/* no tunnel info required for ipxip6. */
896 	.proto = htons(ETH_P_IPV6),
897 };
898 
899 static const struct tnl_ptk_info tpi_v4 = {
900 	/* no tunnel info required for ipxip6. */
901 	.proto = htons(ETH_P_IP),
902 };
903 
904 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
905 		      const struct tnl_ptk_info *tpi,
906 		      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
907 						  const struct ipv6hdr *ipv6h,
908 						  struct sk_buff *skb))
909 {
910 	struct ip6_tnl *t;
911 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
912 	struct metadata_dst *tun_dst = NULL;
913 	int ret = -1;
914 
915 	rcu_read_lock();
916 	t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->saddr, &ipv6h->daddr);
917 
918 	if (t) {
919 		u8 tproto = READ_ONCE(t->parms.proto);
920 
921 		if (tproto != ipproto && tproto != 0)
922 			goto drop;
923 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
924 			goto drop;
925 		ipv6h = ipv6_hdr(skb);
926 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
927 			goto drop;
928 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
929 			goto drop;
930 		if (t->parms.collect_md) {
931 			tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
932 			if (!tun_dst)
933 				goto drop;
934 		}
935 		ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
936 				    log_ecn_error);
937 	}
938 
939 	rcu_read_unlock();
940 
941 	return ret;
942 
943 drop:
944 	rcu_read_unlock();
945 	kfree_skb(skb);
946 	return 0;
947 }
948 
949 static int ip4ip6_rcv(struct sk_buff *skb)
950 {
951 	return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
952 			  ip4ip6_dscp_ecn_decapsulate);
953 }
954 
955 static int ip6ip6_rcv(struct sk_buff *skb)
956 {
957 	return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
958 			  ip6ip6_dscp_ecn_decapsulate);
959 }
960 
961 struct ipv6_tel_txoption {
962 	struct ipv6_txoptions ops;
963 	__u8 dst_opt[8];
964 };
965 
966 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
967 {
968 	memset(opt, 0, sizeof(struct ipv6_tel_txoption));
969 
970 	opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
971 	opt->dst_opt[3] = 1;
972 	opt->dst_opt[4] = encap_limit;
973 	opt->dst_opt[5] = IPV6_TLV_PADN;
974 	opt->dst_opt[6] = 1;
975 
976 	opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
977 	opt->ops.opt_nflen = 8;
978 }
979 
980 /**
981  * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
982  *   @t: the outgoing tunnel device
983  *   @hdr: IPv6 header from the incoming packet
984  *
985  * Description:
986  *   Avoid trivial tunneling loop by checking that tunnel exit-point
987  *   doesn't match source of incoming packet.
988  *
989  * Return:
990  *   1 if conflict,
991  *   0 else
992  **/
993 
994 static inline bool
995 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
996 {
997 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
998 }
999 
1000 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
1001 		     const struct in6_addr *laddr,
1002 		     const struct in6_addr *raddr)
1003 {
1004 	struct __ip6_tnl_parm *p = &t->parms;
1005 	int ret = 0;
1006 	struct net *net = t->net;
1007 
1008 	if (t->parms.collect_md)
1009 		return 1;
1010 
1011 	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
1012 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
1013 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
1014 		struct net_device *ldev = NULL;
1015 
1016 		rcu_read_lock();
1017 		if (p->link)
1018 			ldev = dev_get_by_index_rcu(net, p->link);
1019 
1020 		if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
1021 						      0, IFA_F_TENTATIVE)))
1022 			pr_warn("%s xmit: Local address not yet configured!\n",
1023 				p->name);
1024 		else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
1025 			 !ipv6_addr_is_multicast(raddr) &&
1026 			 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
1027 							  true, 0, IFA_F_TENTATIVE)))
1028 			pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1029 				p->name);
1030 		else
1031 			ret = 1;
1032 		rcu_read_unlock();
1033 	}
1034 	return ret;
1035 }
1036 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1037 
1038 /**
1039  * ip6_tnl_xmit - encapsulate packet and send
1040  *   @skb: the outgoing socket buffer
1041  *   @dev: the outgoing tunnel device
1042  *   @dsfield: dscp code for outer header
1043  *   @fl6: flow of tunneled packet
1044  *   @encap_limit: encapsulation limit
1045  *   @pmtu: Path MTU is stored if packet is too big
1046  *   @proto: next header value
1047  *
1048  * Description:
1049  *   Build new header and do some sanity checks on the packet before sending
1050  *   it.
1051  *
1052  * Return:
1053  *   0 on success
1054  *   -1 fail
1055  *   %-EMSGSIZE message too big. return mtu in this case.
1056  **/
1057 
1058 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1059 		 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1060 		 __u8 proto)
1061 {
1062 	struct ip6_tnl *t = netdev_priv(dev);
1063 	struct net *net = t->net;
1064 	struct net_device_stats *stats = &t->dev->stats;
1065 	struct ipv6hdr *ipv6h;
1066 	struct ipv6_tel_txoption opt;
1067 	struct dst_entry *dst = NULL, *ndst = NULL;
1068 	struct net_device *tdev;
1069 	int mtu;
1070 	unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1071 	unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1072 	unsigned int max_headroom = psh_hlen;
1073 	bool use_cache = false;
1074 	u8 hop_limit;
1075 	int err = -1;
1076 
1077 	if (t->parms.collect_md) {
1078 		hop_limit = skb_tunnel_info(skb)->key.ttl;
1079 		goto route_lookup;
1080 	} else {
1081 		hop_limit = t->parms.hop_limit;
1082 	}
1083 
1084 	/* NBMA tunnel */
1085 	if (ipv6_addr_any(&t->parms.raddr)) {
1086 		if (skb->protocol == htons(ETH_P_IPV6)) {
1087 			struct in6_addr *addr6;
1088 			struct neighbour *neigh;
1089 			int addr_type;
1090 
1091 			if (!skb_dst(skb))
1092 				goto tx_err_link_failure;
1093 
1094 			neigh = dst_neigh_lookup(skb_dst(skb),
1095 						 &ipv6_hdr(skb)->daddr);
1096 			if (!neigh)
1097 				goto tx_err_link_failure;
1098 
1099 			addr6 = (struct in6_addr *)&neigh->primary_key;
1100 			addr_type = ipv6_addr_type(addr6);
1101 
1102 			if (addr_type == IPV6_ADDR_ANY)
1103 				addr6 = &ipv6_hdr(skb)->daddr;
1104 
1105 			memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1106 			neigh_release(neigh);
1107 		}
1108 	} else if (t->parms.proto != 0 && !(t->parms.flags &
1109 					    (IP6_TNL_F_USE_ORIG_TCLASS |
1110 					     IP6_TNL_F_USE_ORIG_FWMARK))) {
1111 		/* enable the cache only if neither the outer protocol nor the
1112 		 * routing decision depends on the current inner header value
1113 		 */
1114 		use_cache = true;
1115 	}
1116 
1117 	if (use_cache)
1118 		dst = dst_cache_get(&t->dst_cache);
1119 
1120 	if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1121 		goto tx_err_link_failure;
1122 
1123 	if (!dst) {
1124 route_lookup:
1125 		/* add dsfield to flowlabel for route lookup */
1126 		fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1127 
1128 		dst = ip6_route_output(net, NULL, fl6);
1129 
1130 		if (dst->error)
1131 			goto tx_err_link_failure;
1132 		dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1133 		if (IS_ERR(dst)) {
1134 			err = PTR_ERR(dst);
1135 			dst = NULL;
1136 			goto tx_err_link_failure;
1137 		}
1138 		if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) &&
1139 		    ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1140 				       &fl6->daddr, 0, &fl6->saddr))
1141 			goto tx_err_link_failure;
1142 		ndst = dst;
1143 	}
1144 
1145 	tdev = dst->dev;
1146 
1147 	if (tdev == dev) {
1148 		stats->collisions++;
1149 		net_warn_ratelimited("%s: Local routing loop detected!\n",
1150 				     t->parms.name);
1151 		goto tx_err_dst_release;
1152 	}
1153 	mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1154 	if (encap_limit >= 0) {
1155 		max_headroom += 8;
1156 		mtu -= 8;
1157 	}
1158 	mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
1159 		       IPV6_MIN_MTU : IPV4_MIN_MTU);
1160 
1161 	skb_dst_update_pmtu_no_confirm(skb, mtu);
1162 	if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1163 		*pmtu = mtu;
1164 		err = -EMSGSIZE;
1165 		goto tx_err_dst_release;
1166 	}
1167 
1168 	if (t->err_count > 0) {
1169 		if (time_before(jiffies,
1170 				t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1171 			t->err_count--;
1172 
1173 			dst_link_failure(skb);
1174 		} else {
1175 			t->err_count = 0;
1176 		}
1177 	}
1178 
1179 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1180 
1181 	/*
1182 	 * Okay, now see if we can stuff it in the buffer as-is.
1183 	 */
1184 	max_headroom += LL_RESERVED_SPACE(tdev);
1185 
1186 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1187 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1188 		struct sk_buff *new_skb;
1189 
1190 		new_skb = skb_realloc_headroom(skb, max_headroom);
1191 		if (!new_skb)
1192 			goto tx_err_dst_release;
1193 
1194 		if (skb->sk)
1195 			skb_set_owner_w(new_skb, skb->sk);
1196 		consume_skb(skb);
1197 		skb = new_skb;
1198 	}
1199 
1200 	if (t->parms.collect_md) {
1201 		if (t->encap.type != TUNNEL_ENCAP_NONE)
1202 			goto tx_err_dst_release;
1203 	} else {
1204 		if (use_cache && ndst)
1205 			dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1206 	}
1207 	skb_dst_set(skb, dst);
1208 
1209 	if (hop_limit == 0) {
1210 		if (skb->protocol == htons(ETH_P_IP))
1211 			hop_limit = ip_hdr(skb)->ttl;
1212 		else if (skb->protocol == htons(ETH_P_IPV6))
1213 			hop_limit = ipv6_hdr(skb)->hop_limit;
1214 		else
1215 			hop_limit = ip6_dst_hoplimit(dst);
1216 	}
1217 
1218 	/* Calculate max headroom for all the headers and adjust
1219 	 * needed_headroom if necessary.
1220 	 */
1221 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1222 			+ dst->header_len + t->hlen;
1223 	if (max_headroom > dev->needed_headroom)
1224 		dev->needed_headroom = max_headroom;
1225 
1226 	err = ip6_tnl_encap(skb, t, &proto, fl6);
1227 	if (err)
1228 		return err;
1229 
1230 	if (encap_limit >= 0) {
1231 		init_tel_txopt(&opt, encap_limit);
1232 		ipv6_push_frag_opts(skb, &opt.ops, &proto);
1233 	}
1234 
1235 	skb_push(skb, sizeof(struct ipv6hdr));
1236 	skb_reset_network_header(skb);
1237 	ipv6h = ipv6_hdr(skb);
1238 	ip6_flow_hdr(ipv6h, dsfield,
1239 		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1240 	ipv6h->hop_limit = hop_limit;
1241 	ipv6h->nexthdr = proto;
1242 	ipv6h->saddr = fl6->saddr;
1243 	ipv6h->daddr = fl6->daddr;
1244 	ip6tunnel_xmit(NULL, skb, dev);
1245 	return 0;
1246 tx_err_link_failure:
1247 	stats->tx_carrier_errors++;
1248 	dst_link_failure(skb);
1249 tx_err_dst_release:
1250 	dst_release(dst);
1251 	return err;
1252 }
1253 EXPORT_SYMBOL(ip6_tnl_xmit);
1254 
1255 static inline int
1256 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1257 {
1258 	struct ip6_tnl *t = netdev_priv(dev);
1259 	const struct iphdr  *iph;
1260 	int encap_limit = -1;
1261 	struct flowi6 fl6;
1262 	__u8 dsfield;
1263 	__u32 mtu;
1264 	u8 tproto;
1265 	int err;
1266 
1267 	iph = ip_hdr(skb);
1268 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1269 
1270 	tproto = READ_ONCE(t->parms.proto);
1271 	if (tproto != IPPROTO_IPIP && tproto != 0)
1272 		return -1;
1273 
1274 	if (t->parms.collect_md) {
1275 		struct ip_tunnel_info *tun_info;
1276 		const struct ip_tunnel_key *key;
1277 
1278 		tun_info = skb_tunnel_info(skb);
1279 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1280 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1281 			return -1;
1282 		key = &tun_info->key;
1283 		memset(&fl6, 0, sizeof(fl6));
1284 		fl6.flowi6_proto = IPPROTO_IPIP;
1285 		fl6.saddr = key->u.ipv6.src;
1286 		fl6.daddr = key->u.ipv6.dst;
1287 		fl6.flowlabel = key->label;
1288 		dsfield =  key->tos;
1289 	} else {
1290 		if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1291 			encap_limit = t->parms.encap_limit;
1292 
1293 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1294 		fl6.flowi6_proto = IPPROTO_IPIP;
1295 
1296 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1297 			dsfield = ipv4_get_dsfield(iph);
1298 		else
1299 			dsfield = ip6_tclass(t->parms.flowinfo);
1300 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1301 			fl6.flowi6_mark = skb->mark;
1302 		else
1303 			fl6.flowi6_mark = t->parms.fwmark;
1304 	}
1305 
1306 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1307 	dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1308 
1309 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1310 		return -1;
1311 
1312 	skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1313 
1314 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1315 			   IPPROTO_IPIP);
1316 	if (err != 0) {
1317 		/* XXX: send ICMP error even if DF is not set. */
1318 		if (err == -EMSGSIZE)
1319 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1320 				  htonl(mtu));
1321 		return -1;
1322 	}
1323 
1324 	return 0;
1325 }
1326 
1327 static inline int
1328 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1329 {
1330 	struct ip6_tnl *t = netdev_priv(dev);
1331 	struct ipv6hdr *ipv6h;
1332 	int encap_limit = -1;
1333 	__u16 offset;
1334 	struct flowi6 fl6;
1335 	__u8 dsfield;
1336 	__u32 mtu;
1337 	u8 tproto;
1338 	int err;
1339 
1340 	ipv6h = ipv6_hdr(skb);
1341 	tproto = READ_ONCE(t->parms.proto);
1342 	if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1343 	    ip6_tnl_addr_conflict(t, ipv6h))
1344 		return -1;
1345 
1346 	if (t->parms.collect_md) {
1347 		struct ip_tunnel_info *tun_info;
1348 		const struct ip_tunnel_key *key;
1349 
1350 		tun_info = skb_tunnel_info(skb);
1351 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1352 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1353 			return -1;
1354 		key = &tun_info->key;
1355 		memset(&fl6, 0, sizeof(fl6));
1356 		fl6.flowi6_proto = IPPROTO_IPV6;
1357 		fl6.saddr = key->u.ipv6.src;
1358 		fl6.daddr = key->u.ipv6.dst;
1359 		fl6.flowlabel = key->label;
1360 		dsfield = key->tos;
1361 	} else {
1362 		offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1363 		/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1364 		ipv6h = ipv6_hdr(skb);
1365 		if (offset > 0) {
1366 			struct ipv6_tlv_tnl_enc_lim *tel;
1367 
1368 			tel = (void *)&skb_network_header(skb)[offset];
1369 			if (tel->encap_limit == 0) {
1370 				icmpv6_send(skb, ICMPV6_PARAMPROB,
1371 					    ICMPV6_HDR_FIELD, offset + 2);
1372 				return -1;
1373 			}
1374 			encap_limit = tel->encap_limit - 1;
1375 		} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1376 			encap_limit = t->parms.encap_limit;
1377 		}
1378 
1379 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1380 		fl6.flowi6_proto = IPPROTO_IPV6;
1381 
1382 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1383 			dsfield = ipv6_get_dsfield(ipv6h);
1384 		else
1385 			dsfield = ip6_tclass(t->parms.flowinfo);
1386 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1387 			fl6.flowlabel |= ip6_flowlabel(ipv6h);
1388 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1389 			fl6.flowi6_mark = skb->mark;
1390 		else
1391 			fl6.flowi6_mark = t->parms.fwmark;
1392 	}
1393 
1394 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1395 	dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1396 
1397 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1398 		return -1;
1399 
1400 	skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1401 
1402 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1403 			   IPPROTO_IPV6);
1404 	if (err != 0) {
1405 		if (err == -EMSGSIZE)
1406 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1407 		return -1;
1408 	}
1409 
1410 	return 0;
1411 }
1412 
1413 static netdev_tx_t
1414 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1415 {
1416 	struct ip6_tnl *t = netdev_priv(dev);
1417 	struct net_device_stats *stats = &t->dev->stats;
1418 	int ret;
1419 
1420 	if (!pskb_inet_may_pull(skb))
1421 		goto tx_err;
1422 
1423 	switch (skb->protocol) {
1424 	case htons(ETH_P_IP):
1425 		ret = ip4ip6_tnl_xmit(skb, dev);
1426 		break;
1427 	case htons(ETH_P_IPV6):
1428 		ret = ip6ip6_tnl_xmit(skb, dev);
1429 		break;
1430 	default:
1431 		goto tx_err;
1432 	}
1433 
1434 	if (ret < 0)
1435 		goto tx_err;
1436 
1437 	return NETDEV_TX_OK;
1438 
1439 tx_err:
1440 	stats->tx_errors++;
1441 	stats->tx_dropped++;
1442 	kfree_skb(skb);
1443 	return NETDEV_TX_OK;
1444 }
1445 
1446 static void ip6_tnl_link_config(struct ip6_tnl *t)
1447 {
1448 	struct net_device *dev = t->dev;
1449 	struct net_device *tdev = NULL;
1450 	struct __ip6_tnl_parm *p = &t->parms;
1451 	struct flowi6 *fl6 = &t->fl.u.ip6;
1452 	unsigned int mtu;
1453 	int t_hlen;
1454 
1455 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1456 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1457 
1458 	/* Set up flowi template */
1459 	fl6->saddr = p->laddr;
1460 	fl6->daddr = p->raddr;
1461 	fl6->flowi6_oif = p->link;
1462 	fl6->flowlabel = 0;
1463 
1464 	if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1465 		fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1466 	if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1467 		fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1468 
1469 	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1470 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1471 
1472 	if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1473 		dev->flags |= IFF_POINTOPOINT;
1474 	else
1475 		dev->flags &= ~IFF_POINTOPOINT;
1476 
1477 	t->tun_hlen = 0;
1478 	t->hlen = t->encap_hlen + t->tun_hlen;
1479 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1480 
1481 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
1482 		int strict = (ipv6_addr_type(&p->raddr) &
1483 			      (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1484 
1485 		struct rt6_info *rt = rt6_lookup(t->net,
1486 						 &p->raddr, &p->laddr,
1487 						 p->link, NULL, strict);
1488 		if (rt) {
1489 			tdev = rt->dst.dev;
1490 			ip6_rt_put(rt);
1491 		}
1492 
1493 		if (!tdev && p->link)
1494 			tdev = __dev_get_by_index(t->net, p->link);
1495 
1496 		if (tdev) {
1497 			dev->hard_header_len = tdev->hard_header_len + t_hlen;
1498 			mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
1499 
1500 			dev->mtu = mtu - t_hlen;
1501 			if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1502 				dev->mtu -= 8;
1503 
1504 			if (dev->mtu < IPV6_MIN_MTU)
1505 				dev->mtu = IPV6_MIN_MTU;
1506 		}
1507 	}
1508 }
1509 
1510 /**
1511  * ip6_tnl_change - update the tunnel parameters
1512  *   @t: tunnel to be changed
1513  *   @p: tunnel configuration parameters
1514  *
1515  * Description:
1516  *   ip6_tnl_change() updates the tunnel parameters
1517  **/
1518 
1519 static int
1520 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1521 {
1522 	t->parms.laddr = p->laddr;
1523 	t->parms.raddr = p->raddr;
1524 	t->parms.flags = p->flags;
1525 	t->parms.hop_limit = p->hop_limit;
1526 	t->parms.encap_limit = p->encap_limit;
1527 	t->parms.flowinfo = p->flowinfo;
1528 	t->parms.link = p->link;
1529 	t->parms.proto = p->proto;
1530 	t->parms.fwmark = p->fwmark;
1531 	dst_cache_reset(&t->dst_cache);
1532 	ip6_tnl_link_config(t);
1533 	return 0;
1534 }
1535 
1536 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1537 {
1538 	struct net *net = t->net;
1539 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1540 	int err;
1541 
1542 	ip6_tnl_unlink(ip6n, t);
1543 	synchronize_net();
1544 	err = ip6_tnl_change(t, p);
1545 	ip6_tnl_link(ip6n, t);
1546 	netdev_state_change(t->dev);
1547 	return err;
1548 }
1549 
1550 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1551 {
1552 	/* for default tnl0 device allow to change only the proto */
1553 	t->parms.proto = p->proto;
1554 	netdev_state_change(t->dev);
1555 	return 0;
1556 }
1557 
1558 static void
1559 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1560 {
1561 	p->laddr = u->laddr;
1562 	p->raddr = u->raddr;
1563 	p->flags = u->flags;
1564 	p->hop_limit = u->hop_limit;
1565 	p->encap_limit = u->encap_limit;
1566 	p->flowinfo = u->flowinfo;
1567 	p->link = u->link;
1568 	p->proto = u->proto;
1569 	memcpy(p->name, u->name, sizeof(u->name));
1570 }
1571 
1572 static void
1573 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1574 {
1575 	u->laddr = p->laddr;
1576 	u->raddr = p->raddr;
1577 	u->flags = p->flags;
1578 	u->hop_limit = p->hop_limit;
1579 	u->encap_limit = p->encap_limit;
1580 	u->flowinfo = p->flowinfo;
1581 	u->link = p->link;
1582 	u->proto = p->proto;
1583 	memcpy(u->name, p->name, sizeof(u->name));
1584 }
1585 
1586 /**
1587  * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1588  *   @dev: virtual device associated with tunnel
1589  *   @ifr: parameters passed from userspace
1590  *   @cmd: command to be performed
1591  *
1592  * Description:
1593  *   ip6_tnl_ioctl() is used for managing IPv6 tunnels
1594  *   from userspace.
1595  *
1596  *   The possible commands are the following:
1597  *     %SIOCGETTUNNEL: get tunnel parameters for device
1598  *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1599  *     %SIOCCHGTUNNEL: change tunnel parameters to those given
1600  *     %SIOCDELTUNNEL: delete tunnel
1601  *
1602  *   The fallback device "ip6tnl0", created during module
1603  *   initialization, can be used for creating other tunnel devices.
1604  *
1605  * Return:
1606  *   0 on success,
1607  *   %-EFAULT if unable to copy data to or from userspace,
1608  *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
1609  *   %-EINVAL if passed tunnel parameters are invalid,
1610  *   %-EEXIST if changing a tunnel's parameters would cause a conflict
1611  *   %-ENODEV if attempting to change or delete a nonexisting device
1612  **/
1613 
1614 static int
1615 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1616 {
1617 	int err = 0;
1618 	struct ip6_tnl_parm p;
1619 	struct __ip6_tnl_parm p1;
1620 	struct ip6_tnl *t = netdev_priv(dev);
1621 	struct net *net = t->net;
1622 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1623 
1624 	memset(&p1, 0, sizeof(p1));
1625 
1626 	switch (cmd) {
1627 	case SIOCGETTUNNEL:
1628 		if (dev == ip6n->fb_tnl_dev) {
1629 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1630 				err = -EFAULT;
1631 				break;
1632 			}
1633 			ip6_tnl_parm_from_user(&p1, &p);
1634 			t = ip6_tnl_locate(net, &p1, 0);
1635 			if (IS_ERR(t))
1636 				t = netdev_priv(dev);
1637 		} else {
1638 			memset(&p, 0, sizeof(p));
1639 		}
1640 		ip6_tnl_parm_to_user(&p, &t->parms);
1641 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1642 			err = -EFAULT;
1643 		}
1644 		break;
1645 	case SIOCADDTUNNEL:
1646 	case SIOCCHGTUNNEL:
1647 		err = -EPERM;
1648 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1649 			break;
1650 		err = -EFAULT;
1651 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1652 			break;
1653 		err = -EINVAL;
1654 		if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1655 		    p.proto != 0)
1656 			break;
1657 		ip6_tnl_parm_from_user(&p1, &p);
1658 		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1659 		if (cmd == SIOCCHGTUNNEL) {
1660 			if (!IS_ERR(t)) {
1661 				if (t->dev != dev) {
1662 					err = -EEXIST;
1663 					break;
1664 				}
1665 			} else
1666 				t = netdev_priv(dev);
1667 			if (dev == ip6n->fb_tnl_dev)
1668 				err = ip6_tnl0_update(t, &p1);
1669 			else
1670 				err = ip6_tnl_update(t, &p1);
1671 		}
1672 		if (!IS_ERR(t)) {
1673 			err = 0;
1674 			ip6_tnl_parm_to_user(&p, &t->parms);
1675 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1676 				err = -EFAULT;
1677 
1678 		} else {
1679 			err = PTR_ERR(t);
1680 		}
1681 		break;
1682 	case SIOCDELTUNNEL:
1683 		err = -EPERM;
1684 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1685 			break;
1686 
1687 		if (dev == ip6n->fb_tnl_dev) {
1688 			err = -EFAULT;
1689 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1690 				break;
1691 			err = -ENOENT;
1692 			ip6_tnl_parm_from_user(&p1, &p);
1693 			t = ip6_tnl_locate(net, &p1, 0);
1694 			if (IS_ERR(t))
1695 				break;
1696 			err = -EPERM;
1697 			if (t->dev == ip6n->fb_tnl_dev)
1698 				break;
1699 			dev = t->dev;
1700 		}
1701 		err = 0;
1702 		unregister_netdevice(dev);
1703 		break;
1704 	default:
1705 		err = -EINVAL;
1706 	}
1707 	return err;
1708 }
1709 
1710 /**
1711  * ip6_tnl_change_mtu - change mtu manually for tunnel device
1712  *   @dev: virtual device associated with tunnel
1713  *   @new_mtu: the new mtu
1714  *
1715  * Return:
1716  *   0 on success,
1717  *   %-EINVAL if mtu too small
1718  **/
1719 
1720 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1721 {
1722 	struct ip6_tnl *tnl = netdev_priv(dev);
1723 
1724 	if (tnl->parms.proto == IPPROTO_IPV6) {
1725 		if (new_mtu < IPV6_MIN_MTU)
1726 			return -EINVAL;
1727 	} else {
1728 		if (new_mtu < ETH_MIN_MTU)
1729 			return -EINVAL;
1730 	}
1731 	if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1732 		if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1733 			return -EINVAL;
1734 	} else {
1735 		if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1736 			return -EINVAL;
1737 	}
1738 	dev->mtu = new_mtu;
1739 	return 0;
1740 }
1741 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1742 
1743 int ip6_tnl_get_iflink(const struct net_device *dev)
1744 {
1745 	struct ip6_tnl *t = netdev_priv(dev);
1746 
1747 	return t->parms.link;
1748 }
1749 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1750 
1751 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1752 			  unsigned int num)
1753 {
1754 	if (num >= MAX_IPTUN_ENCAP_OPS)
1755 		return -ERANGE;
1756 
1757 	return !cmpxchg((const struct ip6_tnl_encap_ops **)
1758 			&ip6tun_encaps[num],
1759 			NULL, ops) ? 0 : -1;
1760 }
1761 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1762 
1763 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1764 			  unsigned int num)
1765 {
1766 	int ret;
1767 
1768 	if (num >= MAX_IPTUN_ENCAP_OPS)
1769 		return -ERANGE;
1770 
1771 	ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1772 		       &ip6tun_encaps[num],
1773 		       ops, NULL) == ops) ? 0 : -1;
1774 
1775 	synchronize_net();
1776 
1777 	return ret;
1778 }
1779 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1780 
1781 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1782 			struct ip_tunnel_encap *ipencap)
1783 {
1784 	int hlen;
1785 
1786 	memset(&t->encap, 0, sizeof(t->encap));
1787 
1788 	hlen = ip6_encap_hlen(ipencap);
1789 	if (hlen < 0)
1790 		return hlen;
1791 
1792 	t->encap.type = ipencap->type;
1793 	t->encap.sport = ipencap->sport;
1794 	t->encap.dport = ipencap->dport;
1795 	t->encap.flags = ipencap->flags;
1796 
1797 	t->encap_hlen = hlen;
1798 	t->hlen = t->encap_hlen + t->tun_hlen;
1799 
1800 	return 0;
1801 }
1802 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1803 
1804 static const struct net_device_ops ip6_tnl_netdev_ops = {
1805 	.ndo_init	= ip6_tnl_dev_init,
1806 	.ndo_uninit	= ip6_tnl_dev_uninit,
1807 	.ndo_start_xmit = ip6_tnl_start_xmit,
1808 	.ndo_do_ioctl	= ip6_tnl_ioctl,
1809 	.ndo_change_mtu = ip6_tnl_change_mtu,
1810 	.ndo_get_stats	= ip6_get_stats,
1811 	.ndo_get_iflink = ip6_tnl_get_iflink,
1812 };
1813 
1814 #define IPXIPX_FEATURES (NETIF_F_SG |		\
1815 			 NETIF_F_FRAGLIST |	\
1816 			 NETIF_F_HIGHDMA |	\
1817 			 NETIF_F_GSO_SOFTWARE |	\
1818 			 NETIF_F_HW_CSUM)
1819 
1820 /**
1821  * ip6_tnl_dev_setup - setup virtual tunnel device
1822  *   @dev: virtual device associated with tunnel
1823  *
1824  * Description:
1825  *   Initialize function pointers and device parameters
1826  **/
1827 
1828 static void ip6_tnl_dev_setup(struct net_device *dev)
1829 {
1830 	dev->netdev_ops = &ip6_tnl_netdev_ops;
1831 	dev->needs_free_netdev = true;
1832 	dev->priv_destructor = ip6_dev_free;
1833 
1834 	dev->type = ARPHRD_TUNNEL6;
1835 	dev->flags |= IFF_NOARP;
1836 	dev->addr_len = sizeof(struct in6_addr);
1837 	dev->features |= NETIF_F_LLTX;
1838 	netif_keep_dst(dev);
1839 
1840 	dev->features		|= IPXIPX_FEATURES;
1841 	dev->hw_features	|= IPXIPX_FEATURES;
1842 
1843 	/* This perm addr will be used as interface identifier by IPv6 */
1844 	dev->addr_assign_type = NET_ADDR_RANDOM;
1845 	eth_random_addr(dev->perm_addr);
1846 }
1847 
1848 
1849 /**
1850  * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1851  *   @dev: virtual device associated with tunnel
1852  **/
1853 
1854 static inline int
1855 ip6_tnl_dev_init_gen(struct net_device *dev)
1856 {
1857 	struct ip6_tnl *t = netdev_priv(dev);
1858 	int ret;
1859 	int t_hlen;
1860 
1861 	t->dev = dev;
1862 	t->net = dev_net(dev);
1863 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1864 	if (!dev->tstats)
1865 		return -ENOMEM;
1866 
1867 	ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1868 	if (ret)
1869 		goto free_stats;
1870 
1871 	ret = gro_cells_init(&t->gro_cells, dev);
1872 	if (ret)
1873 		goto destroy_dst;
1874 
1875 	t->tun_hlen = 0;
1876 	t->hlen = t->encap_hlen + t->tun_hlen;
1877 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1878 
1879 	dev->type = ARPHRD_TUNNEL6;
1880 	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1881 	dev->mtu = ETH_DATA_LEN - t_hlen;
1882 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1883 		dev->mtu -= 8;
1884 	dev->min_mtu = ETH_MIN_MTU;
1885 	dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1886 
1887 	return 0;
1888 
1889 destroy_dst:
1890 	dst_cache_destroy(&t->dst_cache);
1891 free_stats:
1892 	free_percpu(dev->tstats);
1893 	dev->tstats = NULL;
1894 
1895 	return ret;
1896 }
1897 
1898 /**
1899  * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1900  *   @dev: virtual device associated with tunnel
1901  **/
1902 
1903 static int ip6_tnl_dev_init(struct net_device *dev)
1904 {
1905 	struct ip6_tnl *t = netdev_priv(dev);
1906 	int err = ip6_tnl_dev_init_gen(dev);
1907 
1908 	if (err)
1909 		return err;
1910 	ip6_tnl_link_config(t);
1911 	if (t->parms.collect_md)
1912 		netif_keep_dst(dev);
1913 	return 0;
1914 }
1915 
1916 /**
1917  * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1918  *   @dev: fallback device
1919  *
1920  * Return: 0
1921  **/
1922 
1923 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1924 {
1925 	struct ip6_tnl *t = netdev_priv(dev);
1926 	struct net *net = dev_net(dev);
1927 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1928 
1929 	t->parms.proto = IPPROTO_IPV6;
1930 	dev_hold(dev);
1931 
1932 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
1933 	return 0;
1934 }
1935 
1936 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1937 			    struct netlink_ext_ack *extack)
1938 {
1939 	u8 proto;
1940 
1941 	if (!data || !data[IFLA_IPTUN_PROTO])
1942 		return 0;
1943 
1944 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1945 	if (proto != IPPROTO_IPV6 &&
1946 	    proto != IPPROTO_IPIP &&
1947 	    proto != 0)
1948 		return -EINVAL;
1949 
1950 	return 0;
1951 }
1952 
1953 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1954 				  struct __ip6_tnl_parm *parms)
1955 {
1956 	memset(parms, 0, sizeof(*parms));
1957 
1958 	if (!data)
1959 		return;
1960 
1961 	if (data[IFLA_IPTUN_LINK])
1962 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1963 
1964 	if (data[IFLA_IPTUN_LOCAL])
1965 		parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1966 
1967 	if (data[IFLA_IPTUN_REMOTE])
1968 		parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1969 
1970 	if (data[IFLA_IPTUN_TTL])
1971 		parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1972 
1973 	if (data[IFLA_IPTUN_ENCAP_LIMIT])
1974 		parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1975 
1976 	if (data[IFLA_IPTUN_FLOWINFO])
1977 		parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1978 
1979 	if (data[IFLA_IPTUN_FLAGS])
1980 		parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1981 
1982 	if (data[IFLA_IPTUN_PROTO])
1983 		parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1984 
1985 	if (data[IFLA_IPTUN_COLLECT_METADATA])
1986 		parms->collect_md = true;
1987 
1988 	if (data[IFLA_IPTUN_FWMARK])
1989 		parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
1990 }
1991 
1992 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1993 					struct ip_tunnel_encap *ipencap)
1994 {
1995 	bool ret = false;
1996 
1997 	memset(ipencap, 0, sizeof(*ipencap));
1998 
1999 	if (!data)
2000 		return ret;
2001 
2002 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
2003 		ret = true;
2004 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
2005 	}
2006 
2007 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
2008 		ret = true;
2009 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
2010 	}
2011 
2012 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
2013 		ret = true;
2014 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
2015 	}
2016 
2017 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
2018 		ret = true;
2019 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
2020 	}
2021 
2022 	return ret;
2023 }
2024 
2025 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
2026 			   struct nlattr *tb[], struct nlattr *data[],
2027 			   struct netlink_ext_ack *extack)
2028 {
2029 	struct net *net = dev_net(dev);
2030 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2031 	struct ip_tunnel_encap ipencap;
2032 	struct ip6_tnl *nt, *t;
2033 	int err;
2034 
2035 	nt = netdev_priv(dev);
2036 
2037 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2038 		err = ip6_tnl_encap_setup(nt, &ipencap);
2039 		if (err < 0)
2040 			return err;
2041 	}
2042 
2043 	ip6_tnl_netlink_parms(data, &nt->parms);
2044 
2045 	if (nt->parms.collect_md) {
2046 		if (rtnl_dereference(ip6n->collect_md_tun))
2047 			return -EEXIST;
2048 	} else {
2049 		t = ip6_tnl_locate(net, &nt->parms, 0);
2050 		if (!IS_ERR(t))
2051 			return -EEXIST;
2052 	}
2053 
2054 	err = ip6_tnl_create2(dev);
2055 	if (!err && tb[IFLA_MTU])
2056 		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2057 
2058 	return err;
2059 }
2060 
2061 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2062 			      struct nlattr *data[],
2063 			      struct netlink_ext_ack *extack)
2064 {
2065 	struct ip6_tnl *t = netdev_priv(dev);
2066 	struct __ip6_tnl_parm p;
2067 	struct net *net = t->net;
2068 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2069 	struct ip_tunnel_encap ipencap;
2070 
2071 	if (dev == ip6n->fb_tnl_dev)
2072 		return -EINVAL;
2073 
2074 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2075 		int err = ip6_tnl_encap_setup(t, &ipencap);
2076 
2077 		if (err < 0)
2078 			return err;
2079 	}
2080 	ip6_tnl_netlink_parms(data, &p);
2081 	if (p.collect_md)
2082 		return -EINVAL;
2083 
2084 	t = ip6_tnl_locate(net, &p, 0);
2085 	if (!IS_ERR(t)) {
2086 		if (t->dev != dev)
2087 			return -EEXIST;
2088 	} else
2089 		t = netdev_priv(dev);
2090 
2091 	return ip6_tnl_update(t, &p);
2092 }
2093 
2094 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2095 {
2096 	struct net *net = dev_net(dev);
2097 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2098 
2099 	if (dev != ip6n->fb_tnl_dev)
2100 		unregister_netdevice_queue(dev, head);
2101 }
2102 
2103 static size_t ip6_tnl_get_size(const struct net_device *dev)
2104 {
2105 	return
2106 		/* IFLA_IPTUN_LINK */
2107 		nla_total_size(4) +
2108 		/* IFLA_IPTUN_LOCAL */
2109 		nla_total_size(sizeof(struct in6_addr)) +
2110 		/* IFLA_IPTUN_REMOTE */
2111 		nla_total_size(sizeof(struct in6_addr)) +
2112 		/* IFLA_IPTUN_TTL */
2113 		nla_total_size(1) +
2114 		/* IFLA_IPTUN_ENCAP_LIMIT */
2115 		nla_total_size(1) +
2116 		/* IFLA_IPTUN_FLOWINFO */
2117 		nla_total_size(4) +
2118 		/* IFLA_IPTUN_FLAGS */
2119 		nla_total_size(4) +
2120 		/* IFLA_IPTUN_PROTO */
2121 		nla_total_size(1) +
2122 		/* IFLA_IPTUN_ENCAP_TYPE */
2123 		nla_total_size(2) +
2124 		/* IFLA_IPTUN_ENCAP_FLAGS */
2125 		nla_total_size(2) +
2126 		/* IFLA_IPTUN_ENCAP_SPORT */
2127 		nla_total_size(2) +
2128 		/* IFLA_IPTUN_ENCAP_DPORT */
2129 		nla_total_size(2) +
2130 		/* IFLA_IPTUN_COLLECT_METADATA */
2131 		nla_total_size(0) +
2132 		/* IFLA_IPTUN_FWMARK */
2133 		nla_total_size(4) +
2134 		0;
2135 }
2136 
2137 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2138 {
2139 	struct ip6_tnl *tunnel = netdev_priv(dev);
2140 	struct __ip6_tnl_parm *parm = &tunnel->parms;
2141 
2142 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2143 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2144 	    nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2145 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2146 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2147 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2148 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2149 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2150 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2151 		goto nla_put_failure;
2152 
2153 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2154 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2155 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2156 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2157 		goto nla_put_failure;
2158 
2159 	if (parm->collect_md)
2160 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2161 			goto nla_put_failure;
2162 
2163 	return 0;
2164 
2165 nla_put_failure:
2166 	return -EMSGSIZE;
2167 }
2168 
2169 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2170 {
2171 	struct ip6_tnl *tunnel = netdev_priv(dev);
2172 
2173 	return tunnel->net;
2174 }
2175 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2176 
2177 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2178 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
2179 	[IFLA_IPTUN_LOCAL]		= { .len = sizeof(struct in6_addr) },
2180 	[IFLA_IPTUN_REMOTE]		= { .len = sizeof(struct in6_addr) },
2181 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
2182 	[IFLA_IPTUN_ENCAP_LIMIT]	= { .type = NLA_U8 },
2183 	[IFLA_IPTUN_FLOWINFO]		= { .type = NLA_U32 },
2184 	[IFLA_IPTUN_FLAGS]		= { .type = NLA_U32 },
2185 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
2186 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
2187 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
2188 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
2189 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
2190 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
2191 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
2192 };
2193 
2194 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2195 	.kind		= "ip6tnl",
2196 	.maxtype	= IFLA_IPTUN_MAX,
2197 	.policy		= ip6_tnl_policy,
2198 	.priv_size	= sizeof(struct ip6_tnl),
2199 	.setup		= ip6_tnl_dev_setup,
2200 	.validate	= ip6_tnl_validate,
2201 	.newlink	= ip6_tnl_newlink,
2202 	.changelink	= ip6_tnl_changelink,
2203 	.dellink	= ip6_tnl_dellink,
2204 	.get_size	= ip6_tnl_get_size,
2205 	.fill_info	= ip6_tnl_fill_info,
2206 	.get_link_net	= ip6_tnl_get_link_net,
2207 };
2208 
2209 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2210 	.handler	= ip4ip6_rcv,
2211 	.err_handler	= ip4ip6_err,
2212 	.priority	=	1,
2213 };
2214 
2215 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2216 	.handler	= ip6ip6_rcv,
2217 	.err_handler	= ip6ip6_err,
2218 	.priority	=	1,
2219 };
2220 
2221 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
2222 {
2223 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2224 	struct net_device *dev, *aux;
2225 	int h;
2226 	struct ip6_tnl *t;
2227 
2228 	for_each_netdev_safe(net, dev, aux)
2229 		if (dev->rtnl_link_ops == &ip6_link_ops)
2230 			unregister_netdevice_queue(dev, list);
2231 
2232 	for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2233 		t = rtnl_dereference(ip6n->tnls_r_l[h]);
2234 		while (t) {
2235 			/* If dev is in the same netns, it has already
2236 			 * been added to the list by the previous loop.
2237 			 */
2238 			if (!net_eq(dev_net(t->dev), net))
2239 				unregister_netdevice_queue(t->dev, list);
2240 			t = rtnl_dereference(t->next);
2241 		}
2242 	}
2243 }
2244 
2245 static int __net_init ip6_tnl_init_net(struct net *net)
2246 {
2247 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2248 	struct ip6_tnl *t = NULL;
2249 	int err;
2250 
2251 	ip6n->tnls[0] = ip6n->tnls_wc;
2252 	ip6n->tnls[1] = ip6n->tnls_r_l;
2253 
2254 	if (!net_has_fallback_tunnels(net))
2255 		return 0;
2256 	err = -ENOMEM;
2257 	ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2258 					NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2259 
2260 	if (!ip6n->fb_tnl_dev)
2261 		goto err_alloc_dev;
2262 	dev_net_set(ip6n->fb_tnl_dev, net);
2263 	ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2264 	/* FB netdevice is special: we have one, and only one per netns.
2265 	 * Allowing to move it to another netns is clearly unsafe.
2266 	 */
2267 	ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2268 
2269 	err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2270 	if (err < 0)
2271 		goto err_register;
2272 
2273 	err = register_netdev(ip6n->fb_tnl_dev);
2274 	if (err < 0)
2275 		goto err_register;
2276 
2277 	t = netdev_priv(ip6n->fb_tnl_dev);
2278 
2279 	strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2280 	return 0;
2281 
2282 err_register:
2283 	free_netdev(ip6n->fb_tnl_dev);
2284 err_alloc_dev:
2285 	return err;
2286 }
2287 
2288 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
2289 {
2290 	struct net *net;
2291 	LIST_HEAD(list);
2292 
2293 	rtnl_lock();
2294 	list_for_each_entry(net, net_list, exit_list)
2295 		ip6_tnl_destroy_tunnels(net, &list);
2296 	unregister_netdevice_many(&list);
2297 	rtnl_unlock();
2298 }
2299 
2300 static struct pernet_operations ip6_tnl_net_ops = {
2301 	.init = ip6_tnl_init_net,
2302 	.exit_batch = ip6_tnl_exit_batch_net,
2303 	.id   = &ip6_tnl_net_id,
2304 	.size = sizeof(struct ip6_tnl_net),
2305 };
2306 
2307 /**
2308  * ip6_tunnel_init - register protocol and reserve needed resources
2309  *
2310  * Return: 0 on success
2311  **/
2312 
2313 static int __init ip6_tunnel_init(void)
2314 {
2315 	int  err;
2316 
2317 	if (!ipv6_mod_enabled())
2318 		return -EOPNOTSUPP;
2319 
2320 	err = register_pernet_device(&ip6_tnl_net_ops);
2321 	if (err < 0)
2322 		goto out_pernet;
2323 
2324 	err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2325 	if (err < 0) {
2326 		pr_err("%s: can't register ip4ip6\n", __func__);
2327 		goto out_ip4ip6;
2328 	}
2329 
2330 	err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2331 	if (err < 0) {
2332 		pr_err("%s: can't register ip6ip6\n", __func__);
2333 		goto out_ip6ip6;
2334 	}
2335 	err = rtnl_link_register(&ip6_link_ops);
2336 	if (err < 0)
2337 		goto rtnl_link_failed;
2338 
2339 	return 0;
2340 
2341 rtnl_link_failed:
2342 	xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2343 out_ip6ip6:
2344 	xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2345 out_ip4ip6:
2346 	unregister_pernet_device(&ip6_tnl_net_ops);
2347 out_pernet:
2348 	return err;
2349 }
2350 
2351 /**
2352  * ip6_tunnel_cleanup - free resources and unregister protocol
2353  **/
2354 
2355 static void __exit ip6_tunnel_cleanup(void)
2356 {
2357 	rtnl_link_unregister(&ip6_link_ops);
2358 	if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2359 		pr_info("%s: can't deregister ip4ip6\n", __func__);
2360 
2361 	if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2362 		pr_info("%s: can't deregister ip6ip6\n", __func__);
2363 
2364 	unregister_pernet_device(&ip6_tnl_net_ops);
2365 }
2366 
2367 module_init(ip6_tunnel_init);
2368 module_exit(ip6_tunnel_cleanup);
2369