xref: /openbmc/linux/net/ipv6/ip6_tunnel.c (revision 01cc2ec6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 tunneling device
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Ville Nuorvala		<vnuorval@tcs.hut.fi>
8  *	Yasuyuki Kozakai	<kozakai@linux-ipv6.org>
9  *
10  *      Based on:
11  *      linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
12  *
13  *      RFC 2473
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/module.h>
19 #include <linux/capability.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/sockios.h>
23 #include <linux/icmp.h>
24 #include <linux/if.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/init.h>
33 #include <linux/route.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/netfilter_ipv6.h>
36 #include <linux/slab.h>
37 #include <linux/hash.h>
38 #include <linux/etherdevice.h>
39 
40 #include <linux/uaccess.h>
41 #include <linux/atomic.h>
42 
43 #include <net/icmp.h>
44 #include <net/ip.h>
45 #include <net/ip_tunnels.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/xfrm.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55 #include <net/dst_metadata.h>
56 
57 MODULE_AUTHOR("Ville Nuorvala");
58 MODULE_DESCRIPTION("IPv6 tunneling device");
59 MODULE_LICENSE("GPL");
60 MODULE_ALIAS_RTNL_LINK("ip6tnl");
61 MODULE_ALIAS_NETDEV("ip6tnl0");
62 
63 #define IP6_TUNNEL_HASH_SIZE_SHIFT  5
64 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
65 
66 static bool log_ecn_error = true;
67 module_param(log_ecn_error, bool, 0644);
68 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
69 
70 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
71 {
72 	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
73 
74 	return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
75 }
76 
77 static int ip6_tnl_dev_init(struct net_device *dev);
78 static void ip6_tnl_dev_setup(struct net_device *dev);
79 static struct rtnl_link_ops ip6_link_ops __read_mostly;
80 
81 static unsigned int ip6_tnl_net_id __read_mostly;
82 struct ip6_tnl_net {
83 	/* the IPv6 tunnel fallback device */
84 	struct net_device *fb_tnl_dev;
85 	/* lists for storing tunnels in use */
86 	struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
87 	struct ip6_tnl __rcu *tnls_wc[1];
88 	struct ip6_tnl __rcu **tnls[2];
89 	struct ip6_tnl __rcu *collect_md_tun;
90 };
91 
92 static inline int ip6_tnl_mpls_supported(void)
93 {
94 	return IS_ENABLED(CONFIG_MPLS);
95 }
96 
97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
98 {
99 	struct pcpu_sw_netstats tmp, sum = { 0 };
100 	int i;
101 
102 	for_each_possible_cpu(i) {
103 		unsigned int start;
104 		const struct pcpu_sw_netstats *tstats =
105 						   per_cpu_ptr(dev->tstats, i);
106 
107 		do {
108 			start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 			tmp.rx_packets = tstats->rx_packets;
110 			tmp.rx_bytes = tstats->rx_bytes;
111 			tmp.tx_packets = tstats->tx_packets;
112 			tmp.tx_bytes =  tstats->tx_bytes;
113 		} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
114 
115 		sum.rx_packets += tmp.rx_packets;
116 		sum.rx_bytes   += tmp.rx_bytes;
117 		sum.tx_packets += tmp.tx_packets;
118 		sum.tx_bytes   += tmp.tx_bytes;
119 	}
120 	dev->stats.rx_packets = sum.rx_packets;
121 	dev->stats.rx_bytes   = sum.rx_bytes;
122 	dev->stats.tx_packets = sum.tx_packets;
123 	dev->stats.tx_bytes   = sum.tx_bytes;
124 	return &dev->stats;
125 }
126 
127 #define for_each_ip6_tunnel_rcu(start) \
128 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
129 
130 /**
131  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
132  *   @net: network namespace
133  *   @link: ifindex of underlying interface
134  *   @remote: the address of the tunnel exit-point
135  *   @local: the address of the tunnel entry-point
136  *
137  * Return:
138  *   tunnel matching given end-points if found,
139  *   else fallback tunnel if its device is up,
140  *   else %NULL
141  **/
142 
143 static struct ip6_tnl *
144 ip6_tnl_lookup(struct net *net, int link,
145 	       const struct in6_addr *remote, const struct in6_addr *local)
146 {
147 	unsigned int hash = HASH(remote, local);
148 	struct ip6_tnl *t, *cand = NULL;
149 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
150 	struct in6_addr any;
151 
152 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
153 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
154 		    !ipv6_addr_equal(remote, &t->parms.raddr) ||
155 		    !(t->dev->flags & IFF_UP))
156 			continue;
157 
158 		if (link == t->parms.link)
159 			return t;
160 		else
161 			cand = t;
162 	}
163 
164 	memset(&any, 0, sizeof(any));
165 	hash = HASH(&any, local);
166 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
167 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
168 		    !ipv6_addr_any(&t->parms.raddr) ||
169 		    !(t->dev->flags & IFF_UP))
170 			continue;
171 
172 		if (link == t->parms.link)
173 			return t;
174 		else if (!cand)
175 			cand = t;
176 	}
177 
178 	hash = HASH(remote, &any);
179 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
180 		if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
181 		    !ipv6_addr_any(&t->parms.laddr) ||
182 		    !(t->dev->flags & IFF_UP))
183 			continue;
184 
185 		if (link == t->parms.link)
186 			return t;
187 		else if (!cand)
188 			cand = t;
189 	}
190 
191 	if (cand)
192 		return cand;
193 
194 	t = rcu_dereference(ip6n->collect_md_tun);
195 	if (t && t->dev->flags & IFF_UP)
196 		return t;
197 
198 	t = rcu_dereference(ip6n->tnls_wc[0]);
199 	if (t && (t->dev->flags & IFF_UP))
200 		return t;
201 
202 	return NULL;
203 }
204 
205 /**
206  * ip6_tnl_bucket - get head of list matching given tunnel parameters
207  *   @p: parameters containing tunnel end-points
208  *
209  * Description:
210  *   ip6_tnl_bucket() returns the head of the list matching the
211  *   &struct in6_addr entries laddr and raddr in @p.
212  *
213  * Return: head of IPv6 tunnel list
214  **/
215 
216 static struct ip6_tnl __rcu **
217 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
218 {
219 	const struct in6_addr *remote = &p->raddr;
220 	const struct in6_addr *local = &p->laddr;
221 	unsigned int h = 0;
222 	int prio = 0;
223 
224 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
225 		prio = 1;
226 		h = HASH(remote, local);
227 	}
228 	return &ip6n->tnls[prio][h];
229 }
230 
231 /**
232  * ip6_tnl_link - add tunnel to hash table
233  *   @t: tunnel to be added
234  **/
235 
236 static void
237 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
238 {
239 	struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
240 
241 	if (t->parms.collect_md)
242 		rcu_assign_pointer(ip6n->collect_md_tun, t);
243 	rcu_assign_pointer(t->next , rtnl_dereference(*tp));
244 	rcu_assign_pointer(*tp, t);
245 }
246 
247 /**
248  * ip6_tnl_unlink - remove tunnel from hash table
249  *   @t: tunnel to be removed
250  **/
251 
252 static void
253 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
254 {
255 	struct ip6_tnl __rcu **tp;
256 	struct ip6_tnl *iter;
257 
258 	if (t->parms.collect_md)
259 		rcu_assign_pointer(ip6n->collect_md_tun, NULL);
260 
261 	for (tp = ip6_tnl_bucket(ip6n, &t->parms);
262 	     (iter = rtnl_dereference(*tp)) != NULL;
263 	     tp = &iter->next) {
264 		if (t == iter) {
265 			rcu_assign_pointer(*tp, t->next);
266 			break;
267 		}
268 	}
269 }
270 
271 static void ip6_dev_free(struct net_device *dev)
272 {
273 	struct ip6_tnl *t = netdev_priv(dev);
274 
275 	gro_cells_destroy(&t->gro_cells);
276 	dst_cache_destroy(&t->dst_cache);
277 	free_percpu(dev->tstats);
278 }
279 
280 static int ip6_tnl_create2(struct net_device *dev)
281 {
282 	struct ip6_tnl *t = netdev_priv(dev);
283 	struct net *net = dev_net(dev);
284 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
285 	int err;
286 
287 	t = netdev_priv(dev);
288 
289 	dev->rtnl_link_ops = &ip6_link_ops;
290 	err = register_netdevice(dev);
291 	if (err < 0)
292 		goto out;
293 
294 	strcpy(t->parms.name, dev->name);
295 
296 	dev_hold(dev);
297 	ip6_tnl_link(ip6n, t);
298 	return 0;
299 
300 out:
301 	return err;
302 }
303 
304 /**
305  * ip6_tnl_create - create a new tunnel
306  *   @net: network namespace
307  *   @p: tunnel parameters
308  *
309  * Description:
310  *   Create tunnel matching given parameters.
311  *
312  * Return:
313  *   created tunnel or error pointer
314  **/
315 
316 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
317 {
318 	struct net_device *dev;
319 	struct ip6_tnl *t;
320 	char name[IFNAMSIZ];
321 	int err = -E2BIG;
322 
323 	if (p->name[0]) {
324 		if (!dev_valid_name(p->name))
325 			goto failed;
326 		strlcpy(name, p->name, IFNAMSIZ);
327 	} else {
328 		sprintf(name, "ip6tnl%%d");
329 	}
330 	err = -ENOMEM;
331 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
332 			   ip6_tnl_dev_setup);
333 	if (!dev)
334 		goto failed;
335 
336 	dev_net_set(dev, net);
337 
338 	t = netdev_priv(dev);
339 	t->parms = *p;
340 	t->net = dev_net(dev);
341 	err = ip6_tnl_create2(dev);
342 	if (err < 0)
343 		goto failed_free;
344 
345 	return t;
346 
347 failed_free:
348 	free_netdev(dev);
349 failed:
350 	return ERR_PTR(err);
351 }
352 
353 /**
354  * ip6_tnl_locate - find or create tunnel matching given parameters
355  *   @net: network namespace
356  *   @p: tunnel parameters
357  *   @create: != 0 if allowed to create new tunnel if no match found
358  *
359  * Description:
360  *   ip6_tnl_locate() first tries to locate an existing tunnel
361  *   based on @parms. If this is unsuccessful, but @create is set a new
362  *   tunnel device is created and registered for use.
363  *
364  * Return:
365  *   matching tunnel or error pointer
366  **/
367 
368 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
369 		struct __ip6_tnl_parm *p, int create)
370 {
371 	const struct in6_addr *remote = &p->raddr;
372 	const struct in6_addr *local = &p->laddr;
373 	struct ip6_tnl __rcu **tp;
374 	struct ip6_tnl *t;
375 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
376 
377 	for (tp = ip6_tnl_bucket(ip6n, p);
378 	     (t = rtnl_dereference(*tp)) != NULL;
379 	     tp = &t->next) {
380 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
381 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
382 		    p->link == t->parms.link) {
383 			if (create)
384 				return ERR_PTR(-EEXIST);
385 
386 			return t;
387 		}
388 	}
389 	if (!create)
390 		return ERR_PTR(-ENODEV);
391 	return ip6_tnl_create(net, p);
392 }
393 
394 /**
395  * ip6_tnl_dev_uninit - tunnel device uninitializer
396  *   @dev: the device to be destroyed
397  *
398  * Description:
399  *   ip6_tnl_dev_uninit() removes tunnel from its list
400  **/
401 
402 static void
403 ip6_tnl_dev_uninit(struct net_device *dev)
404 {
405 	struct ip6_tnl *t = netdev_priv(dev);
406 	struct net *net = t->net;
407 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
408 
409 	if (dev == ip6n->fb_tnl_dev)
410 		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
411 	else
412 		ip6_tnl_unlink(ip6n, t);
413 	dst_cache_reset(&t->dst_cache);
414 	dev_put(dev);
415 }
416 
417 /**
418  * parse_tvl_tnl_enc_lim - handle encapsulation limit option
419  *   @skb: received socket buffer
420  *
421  * Return:
422  *   0 if none was found,
423  *   else index to encapsulation limit
424  **/
425 
426 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
427 {
428 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
429 	unsigned int nhoff = raw - skb->data;
430 	unsigned int off = nhoff + sizeof(*ipv6h);
431 	u8 next, nexthdr = ipv6h->nexthdr;
432 
433 	while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
434 		struct ipv6_opt_hdr *hdr;
435 		u16 optlen;
436 
437 		if (!pskb_may_pull(skb, off + sizeof(*hdr)))
438 			break;
439 
440 		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
441 		if (nexthdr == NEXTHDR_FRAGMENT) {
442 			struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
443 			if (frag_hdr->frag_off)
444 				break;
445 			optlen = 8;
446 		} else if (nexthdr == NEXTHDR_AUTH) {
447 			optlen = ipv6_authlen(hdr);
448 		} else {
449 			optlen = ipv6_optlen(hdr);
450 		}
451 		/* cache hdr->nexthdr, since pskb_may_pull() might
452 		 * invalidate hdr
453 		 */
454 		next = hdr->nexthdr;
455 		if (nexthdr == NEXTHDR_DEST) {
456 			u16 i = 2;
457 
458 			/* Remember : hdr is no longer valid at this point. */
459 			if (!pskb_may_pull(skb, off + optlen))
460 				break;
461 
462 			while (1) {
463 				struct ipv6_tlv_tnl_enc_lim *tel;
464 
465 				/* No more room for encapsulation limit */
466 				if (i + sizeof(*tel) > optlen)
467 					break;
468 
469 				tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
470 				/* return index of option if found and valid */
471 				if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
472 				    tel->length == 1)
473 					return i + off - nhoff;
474 				/* else jump to next option */
475 				if (tel->type)
476 					i += tel->length + 2;
477 				else
478 					i++;
479 			}
480 		}
481 		nexthdr = next;
482 		off += optlen;
483 	}
484 	return 0;
485 }
486 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
487 
488 /**
489  * ip6_tnl_err - tunnel error handler
490  *
491  * Description:
492  *   ip6_tnl_err() should handle errors in the tunnel according
493  *   to the specifications in RFC 2473.
494  **/
495 
496 static int
497 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
498 	    u8 *type, u8 *code, int *msg, __u32 *info, int offset)
499 {
500 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
501 	struct net *net = dev_net(skb->dev);
502 	u8 rel_type = ICMPV6_DEST_UNREACH;
503 	u8 rel_code = ICMPV6_ADDR_UNREACH;
504 	__u32 rel_info = 0;
505 	struct ip6_tnl *t;
506 	int err = -ENOENT;
507 	int rel_msg = 0;
508 	u8 tproto;
509 	__u16 len;
510 
511 	/* If the packet doesn't contain the original IPv6 header we are
512 	   in trouble since we might need the source address for further
513 	   processing of the error. */
514 
515 	rcu_read_lock();
516 	t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->daddr, &ipv6h->saddr);
517 	if (!t)
518 		goto out;
519 
520 	tproto = READ_ONCE(t->parms.proto);
521 	if (tproto != ipproto && tproto != 0)
522 		goto out;
523 
524 	err = 0;
525 
526 	switch (*type) {
527 	case ICMPV6_DEST_UNREACH:
528 		net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
529 				    t->parms.name);
530 		rel_msg = 1;
531 		break;
532 	case ICMPV6_TIME_EXCEED:
533 		if ((*code) == ICMPV6_EXC_HOPLIMIT) {
534 			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
535 					    t->parms.name);
536 			rel_msg = 1;
537 		}
538 		break;
539 	case ICMPV6_PARAMPROB: {
540 		struct ipv6_tlv_tnl_enc_lim *tel;
541 		__u32 teli;
542 
543 		teli = 0;
544 		if ((*code) == ICMPV6_HDR_FIELD)
545 			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
546 
547 		if (teli && teli == *info - 2) {
548 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
549 			if (tel->encap_limit == 0) {
550 				net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
551 						    t->parms.name);
552 				rel_msg = 1;
553 			}
554 		} else {
555 			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
556 					    t->parms.name);
557 		}
558 		break;
559 	}
560 	case ICMPV6_PKT_TOOBIG: {
561 		__u32 mtu;
562 
563 		ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
564 				sock_net_uid(net, NULL));
565 		mtu = *info - offset;
566 		if (mtu < IPV6_MIN_MTU)
567 			mtu = IPV6_MIN_MTU;
568 		len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
569 		if (len > mtu) {
570 			rel_type = ICMPV6_PKT_TOOBIG;
571 			rel_code = 0;
572 			rel_info = mtu;
573 			rel_msg = 1;
574 		}
575 		break;
576 	}
577 	case NDISC_REDIRECT:
578 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
579 			     sock_net_uid(net, NULL));
580 		break;
581 	}
582 
583 	*type = rel_type;
584 	*code = rel_code;
585 	*info = rel_info;
586 	*msg = rel_msg;
587 
588 out:
589 	rcu_read_unlock();
590 	return err;
591 }
592 
593 static int
594 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
595 	   u8 type, u8 code, int offset, __be32 info)
596 {
597 	__u32 rel_info = ntohl(info);
598 	const struct iphdr *eiph;
599 	struct sk_buff *skb2;
600 	int err, rel_msg = 0;
601 	u8 rel_type = type;
602 	u8 rel_code = code;
603 	struct rtable *rt;
604 	struct flowi4 fl4;
605 
606 	err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
607 			  &rel_msg, &rel_info, offset);
608 	if (err < 0)
609 		return err;
610 
611 	if (rel_msg == 0)
612 		return 0;
613 
614 	switch (rel_type) {
615 	case ICMPV6_DEST_UNREACH:
616 		if (rel_code != ICMPV6_ADDR_UNREACH)
617 			return 0;
618 		rel_type = ICMP_DEST_UNREACH;
619 		rel_code = ICMP_HOST_UNREACH;
620 		break;
621 	case ICMPV6_PKT_TOOBIG:
622 		if (rel_code != 0)
623 			return 0;
624 		rel_type = ICMP_DEST_UNREACH;
625 		rel_code = ICMP_FRAG_NEEDED;
626 		break;
627 	default:
628 		return 0;
629 	}
630 
631 	if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
632 		return 0;
633 
634 	skb2 = skb_clone(skb, GFP_ATOMIC);
635 	if (!skb2)
636 		return 0;
637 
638 	skb_dst_drop(skb2);
639 
640 	skb_pull(skb2, offset);
641 	skb_reset_network_header(skb2);
642 	eiph = ip_hdr(skb2);
643 
644 	/* Try to guess incoming interface */
645 	rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
646 				   0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
647 	if (IS_ERR(rt))
648 		goto out;
649 
650 	skb2->dev = rt->dst.dev;
651 	ip_rt_put(rt);
652 
653 	/* route "incoming" packet */
654 	if (rt->rt_flags & RTCF_LOCAL) {
655 		rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
656 					   eiph->daddr, eiph->saddr, 0, 0,
657 					   IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
658 		if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
659 			if (!IS_ERR(rt))
660 				ip_rt_put(rt);
661 			goto out;
662 		}
663 		skb_dst_set(skb2, &rt->dst);
664 	} else {
665 		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
666 				   skb2->dev) ||
667 		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
668 			goto out;
669 	}
670 
671 	/* change mtu on this route */
672 	if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
673 		if (rel_info > dst_mtu(skb_dst(skb2)))
674 			goto out;
675 
676 		skb_dst_update_pmtu_no_confirm(skb2, rel_info);
677 	}
678 
679 	icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
680 
681 out:
682 	kfree_skb(skb2);
683 	return 0;
684 }
685 
686 static int
687 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
688 	   u8 type, u8 code, int offset, __be32 info)
689 {
690 	__u32 rel_info = ntohl(info);
691 	int err, rel_msg = 0;
692 	u8 rel_type = type;
693 	u8 rel_code = code;
694 
695 	err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
696 			  &rel_msg, &rel_info, offset);
697 	if (err < 0)
698 		return err;
699 
700 	if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
701 		struct rt6_info *rt;
702 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
703 
704 		if (!skb2)
705 			return 0;
706 
707 		skb_dst_drop(skb2);
708 		skb_pull(skb2, offset);
709 		skb_reset_network_header(skb2);
710 
711 		/* Try to guess incoming interface */
712 		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
713 				NULL, 0, skb2, 0);
714 
715 		if (rt && rt->dst.dev)
716 			skb2->dev = rt->dst.dev;
717 
718 		icmpv6_send(skb2, rel_type, rel_code, rel_info);
719 
720 		ip6_rt_put(rt);
721 
722 		kfree_skb(skb2);
723 	}
724 
725 	return 0;
726 }
727 
728 static int
729 mplsip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
730 	    u8 type, u8 code, int offset, __be32 info)
731 {
732 	__u32 rel_info = ntohl(info);
733 	int err, rel_msg = 0;
734 	u8 rel_type = type;
735 	u8 rel_code = code;
736 
737 	err = ip6_tnl_err(skb, IPPROTO_MPLS, opt, &rel_type, &rel_code,
738 			  &rel_msg, &rel_info, offset);
739 	return err;
740 }
741 
742 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
743 				       const struct ipv6hdr *ipv6h,
744 				       struct sk_buff *skb)
745 {
746 	__u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
747 
748 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
749 		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
750 
751 	return IP6_ECN_decapsulate(ipv6h, skb);
752 }
753 
754 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
755 				       const struct ipv6hdr *ipv6h,
756 				       struct sk_buff *skb)
757 {
758 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
759 		ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
760 
761 	return IP6_ECN_decapsulate(ipv6h, skb);
762 }
763 
764 static inline int mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
765 					       const struct ipv6hdr *ipv6h,
766 					       struct sk_buff *skb)
767 {
768 	/* ECN is not supported in AF_MPLS */
769 	return 0;
770 }
771 
772 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
773 			     const struct in6_addr *laddr,
774 			     const struct in6_addr *raddr)
775 {
776 	struct __ip6_tnl_parm *p = &t->parms;
777 	int ltype = ipv6_addr_type(laddr);
778 	int rtype = ipv6_addr_type(raddr);
779 	__u32 flags = 0;
780 
781 	if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
782 		flags = IP6_TNL_F_CAP_PER_PACKET;
783 	} else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
784 		   rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
785 		   !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
786 		   (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
787 		if (ltype&IPV6_ADDR_UNICAST)
788 			flags |= IP6_TNL_F_CAP_XMIT;
789 		if (rtype&IPV6_ADDR_UNICAST)
790 			flags |= IP6_TNL_F_CAP_RCV;
791 	}
792 	return flags;
793 }
794 EXPORT_SYMBOL(ip6_tnl_get_cap);
795 
796 /* called with rcu_read_lock() */
797 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
798 				  const struct in6_addr *laddr,
799 				  const struct in6_addr *raddr)
800 {
801 	struct __ip6_tnl_parm *p = &t->parms;
802 	int ret = 0;
803 	struct net *net = t->net;
804 
805 	if ((p->flags & IP6_TNL_F_CAP_RCV) ||
806 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
807 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
808 		struct net_device *ldev = NULL;
809 
810 		if (p->link)
811 			ldev = dev_get_by_index_rcu(net, p->link);
812 
813 		if ((ipv6_addr_is_multicast(laddr) ||
814 		     likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false,
815 						    0, IFA_F_TENTATIVE))) &&
816 		    ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
817 		     likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true,
818 						     0, IFA_F_TENTATIVE))))
819 			ret = 1;
820 	}
821 	return ret;
822 }
823 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
824 
825 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
826 			 const struct tnl_ptk_info *tpi,
827 			 struct metadata_dst *tun_dst,
828 			 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
829 						const struct ipv6hdr *ipv6h,
830 						struct sk_buff *skb),
831 			 bool log_ecn_err)
832 {
833 	struct pcpu_sw_netstats *tstats;
834 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
835 	int err;
836 
837 	if ((!(tpi->flags & TUNNEL_CSUM) &&
838 	     (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
839 	    ((tpi->flags & TUNNEL_CSUM) &&
840 	     !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
841 		tunnel->dev->stats.rx_crc_errors++;
842 		tunnel->dev->stats.rx_errors++;
843 		goto drop;
844 	}
845 
846 	if (tunnel->parms.i_flags & TUNNEL_SEQ) {
847 		if (!(tpi->flags & TUNNEL_SEQ) ||
848 		    (tunnel->i_seqno &&
849 		     (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
850 			tunnel->dev->stats.rx_fifo_errors++;
851 			tunnel->dev->stats.rx_errors++;
852 			goto drop;
853 		}
854 		tunnel->i_seqno = ntohl(tpi->seq) + 1;
855 	}
856 
857 	skb->protocol = tpi->proto;
858 
859 	/* Warning: All skb pointers will be invalidated! */
860 	if (tunnel->dev->type == ARPHRD_ETHER) {
861 		if (!pskb_may_pull(skb, ETH_HLEN)) {
862 			tunnel->dev->stats.rx_length_errors++;
863 			tunnel->dev->stats.rx_errors++;
864 			goto drop;
865 		}
866 
867 		ipv6h = ipv6_hdr(skb);
868 		skb->protocol = eth_type_trans(skb, tunnel->dev);
869 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
870 	} else {
871 		skb->dev = tunnel->dev;
872 	}
873 
874 	skb_reset_network_header(skb);
875 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
876 
877 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
878 
879 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
880 	if (unlikely(err)) {
881 		if (log_ecn_err)
882 			net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
883 					     &ipv6h->saddr,
884 					     ipv6_get_dsfield(ipv6h));
885 		if (err > 1) {
886 			++tunnel->dev->stats.rx_frame_errors;
887 			++tunnel->dev->stats.rx_errors;
888 			goto drop;
889 		}
890 	}
891 
892 	tstats = this_cpu_ptr(tunnel->dev->tstats);
893 	u64_stats_update_begin(&tstats->syncp);
894 	tstats->rx_packets++;
895 	tstats->rx_bytes += skb->len;
896 	u64_stats_update_end(&tstats->syncp);
897 
898 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
899 
900 	if (tun_dst)
901 		skb_dst_set(skb, (struct dst_entry *)tun_dst);
902 
903 	gro_cells_receive(&tunnel->gro_cells, skb);
904 	return 0;
905 
906 drop:
907 	if (tun_dst)
908 		dst_release((struct dst_entry *)tun_dst);
909 	kfree_skb(skb);
910 	return 0;
911 }
912 
913 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
914 		const struct tnl_ptk_info *tpi,
915 		struct metadata_dst *tun_dst,
916 		bool log_ecn_err)
917 {
918 	int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
919 				    const struct ipv6hdr *ipv6h,
920 				    struct sk_buff *skb);
921 
922 	dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
923 	if (tpi->proto == htons(ETH_P_IP))
924 		dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
925 
926 	return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
927 			     log_ecn_err);
928 }
929 EXPORT_SYMBOL(ip6_tnl_rcv);
930 
931 static const struct tnl_ptk_info tpi_v6 = {
932 	/* no tunnel info required for ipxip6. */
933 	.proto = htons(ETH_P_IPV6),
934 };
935 
936 static const struct tnl_ptk_info tpi_v4 = {
937 	/* no tunnel info required for ipxip6. */
938 	.proto = htons(ETH_P_IP),
939 };
940 
941 static const struct tnl_ptk_info tpi_mpls = {
942 	/* no tunnel info required for mplsip6. */
943 	.proto = htons(ETH_P_MPLS_UC),
944 };
945 
946 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
947 		      const struct tnl_ptk_info *tpi,
948 		      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
949 						  const struct ipv6hdr *ipv6h,
950 						  struct sk_buff *skb))
951 {
952 	struct ip6_tnl *t;
953 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
954 	struct metadata_dst *tun_dst = NULL;
955 	int ret = -1;
956 
957 	rcu_read_lock();
958 	t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->saddr, &ipv6h->daddr);
959 
960 	if (t) {
961 		u8 tproto = READ_ONCE(t->parms.proto);
962 
963 		if (tproto != ipproto && tproto != 0)
964 			goto drop;
965 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
966 			goto drop;
967 		ipv6h = ipv6_hdr(skb);
968 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
969 			goto drop;
970 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
971 			goto drop;
972 		if (t->parms.collect_md) {
973 			tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
974 			if (!tun_dst)
975 				goto drop;
976 		}
977 		ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
978 				    log_ecn_error);
979 	}
980 
981 	rcu_read_unlock();
982 
983 	return ret;
984 
985 drop:
986 	rcu_read_unlock();
987 	kfree_skb(skb);
988 	return 0;
989 }
990 
991 static int ip4ip6_rcv(struct sk_buff *skb)
992 {
993 	return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
994 			  ip4ip6_dscp_ecn_decapsulate);
995 }
996 
997 static int ip6ip6_rcv(struct sk_buff *skb)
998 {
999 	return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
1000 			  ip6ip6_dscp_ecn_decapsulate);
1001 }
1002 
1003 static int mplsip6_rcv(struct sk_buff *skb)
1004 {
1005 	return ipxip6_rcv(skb, IPPROTO_MPLS, &tpi_mpls,
1006 			  mplsip6_dscp_ecn_decapsulate);
1007 }
1008 
1009 struct ipv6_tel_txoption {
1010 	struct ipv6_txoptions ops;
1011 	__u8 dst_opt[8];
1012 };
1013 
1014 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
1015 {
1016 	memset(opt, 0, sizeof(struct ipv6_tel_txoption));
1017 
1018 	opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
1019 	opt->dst_opt[3] = 1;
1020 	opt->dst_opt[4] = encap_limit;
1021 	opt->dst_opt[5] = IPV6_TLV_PADN;
1022 	opt->dst_opt[6] = 1;
1023 
1024 	opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
1025 	opt->ops.opt_nflen = 8;
1026 }
1027 
1028 /**
1029  * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
1030  *   @t: the outgoing tunnel device
1031  *   @hdr: IPv6 header from the incoming packet
1032  *
1033  * Description:
1034  *   Avoid trivial tunneling loop by checking that tunnel exit-point
1035  *   doesn't match source of incoming packet.
1036  *
1037  * Return:
1038  *   1 if conflict,
1039  *   0 else
1040  **/
1041 
1042 static inline bool
1043 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
1044 {
1045 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
1046 }
1047 
1048 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
1049 		     const struct in6_addr *laddr,
1050 		     const struct in6_addr *raddr)
1051 {
1052 	struct __ip6_tnl_parm *p = &t->parms;
1053 	int ret = 0;
1054 	struct net *net = t->net;
1055 
1056 	if (t->parms.collect_md)
1057 		return 1;
1058 
1059 	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
1060 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
1061 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
1062 		struct net_device *ldev = NULL;
1063 
1064 		rcu_read_lock();
1065 		if (p->link)
1066 			ldev = dev_get_by_index_rcu(net, p->link);
1067 
1068 		if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
1069 						      0, IFA_F_TENTATIVE)))
1070 			pr_warn("%s xmit: Local address not yet configured!\n",
1071 				p->name);
1072 		else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
1073 			 !ipv6_addr_is_multicast(raddr) &&
1074 			 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
1075 							  true, 0, IFA_F_TENTATIVE)))
1076 			pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1077 				p->name);
1078 		else
1079 			ret = 1;
1080 		rcu_read_unlock();
1081 	}
1082 	return ret;
1083 }
1084 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1085 
1086 /**
1087  * ip6_tnl_xmit - encapsulate packet and send
1088  *   @skb: the outgoing socket buffer
1089  *   @dev: the outgoing tunnel device
1090  *   @dsfield: dscp code for outer header
1091  *   @fl6: flow of tunneled packet
1092  *   @encap_limit: encapsulation limit
1093  *   @pmtu: Path MTU is stored if packet is too big
1094  *   @proto: next header value
1095  *
1096  * Description:
1097  *   Build new header and do some sanity checks on the packet before sending
1098  *   it.
1099  *
1100  * Return:
1101  *   0 on success
1102  *   -1 fail
1103  *   %-EMSGSIZE message too big. return mtu in this case.
1104  **/
1105 
1106 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1107 		 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1108 		 __u8 proto)
1109 {
1110 	struct ip6_tnl *t = netdev_priv(dev);
1111 	struct net *net = t->net;
1112 	struct net_device_stats *stats = &t->dev->stats;
1113 	struct ipv6hdr *ipv6h;
1114 	struct ipv6_tel_txoption opt;
1115 	struct dst_entry *dst = NULL, *ndst = NULL;
1116 	struct net_device *tdev;
1117 	int mtu;
1118 	unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1119 	unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1120 	unsigned int max_headroom = psh_hlen;
1121 	bool use_cache = false;
1122 	u8 hop_limit;
1123 	int err = -1;
1124 
1125 	if (t->parms.collect_md) {
1126 		hop_limit = skb_tunnel_info(skb)->key.ttl;
1127 		goto route_lookup;
1128 	} else {
1129 		hop_limit = t->parms.hop_limit;
1130 	}
1131 
1132 	/* NBMA tunnel */
1133 	if (ipv6_addr_any(&t->parms.raddr)) {
1134 		if (skb->protocol == htons(ETH_P_IPV6)) {
1135 			struct in6_addr *addr6;
1136 			struct neighbour *neigh;
1137 			int addr_type;
1138 
1139 			if (!skb_dst(skb))
1140 				goto tx_err_link_failure;
1141 
1142 			neigh = dst_neigh_lookup(skb_dst(skb),
1143 						 &ipv6_hdr(skb)->daddr);
1144 			if (!neigh)
1145 				goto tx_err_link_failure;
1146 
1147 			addr6 = (struct in6_addr *)&neigh->primary_key;
1148 			addr_type = ipv6_addr_type(addr6);
1149 
1150 			if (addr_type == IPV6_ADDR_ANY)
1151 				addr6 = &ipv6_hdr(skb)->daddr;
1152 
1153 			memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1154 			neigh_release(neigh);
1155 		}
1156 	} else if (t->parms.proto != 0 && !(t->parms.flags &
1157 					    (IP6_TNL_F_USE_ORIG_TCLASS |
1158 					     IP6_TNL_F_USE_ORIG_FWMARK))) {
1159 		/* enable the cache only if neither the outer protocol nor the
1160 		 * routing decision depends on the current inner header value
1161 		 */
1162 		use_cache = true;
1163 	}
1164 
1165 	if (use_cache)
1166 		dst = dst_cache_get(&t->dst_cache);
1167 
1168 	if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1169 		goto tx_err_link_failure;
1170 
1171 	if (!dst) {
1172 route_lookup:
1173 		/* add dsfield to flowlabel for route lookup */
1174 		fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1175 
1176 		dst = ip6_route_output(net, NULL, fl6);
1177 
1178 		if (dst->error)
1179 			goto tx_err_link_failure;
1180 		dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1181 		if (IS_ERR(dst)) {
1182 			err = PTR_ERR(dst);
1183 			dst = NULL;
1184 			goto tx_err_link_failure;
1185 		}
1186 		if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) &&
1187 		    ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1188 				       &fl6->daddr, 0, &fl6->saddr))
1189 			goto tx_err_link_failure;
1190 		ndst = dst;
1191 	}
1192 
1193 	tdev = dst->dev;
1194 
1195 	if (tdev == dev) {
1196 		stats->collisions++;
1197 		net_warn_ratelimited("%s: Local routing loop detected!\n",
1198 				     t->parms.name);
1199 		goto tx_err_dst_release;
1200 	}
1201 	mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1202 	if (encap_limit >= 0) {
1203 		max_headroom += 8;
1204 		mtu -= 8;
1205 	}
1206 	mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
1207 		       IPV6_MIN_MTU : IPV4_MIN_MTU);
1208 
1209 	skb_dst_update_pmtu_no_confirm(skb, mtu);
1210 	if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1211 		*pmtu = mtu;
1212 		err = -EMSGSIZE;
1213 		goto tx_err_dst_release;
1214 	}
1215 
1216 	if (t->err_count > 0) {
1217 		if (time_before(jiffies,
1218 				t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1219 			t->err_count--;
1220 
1221 			dst_link_failure(skb);
1222 		} else {
1223 			t->err_count = 0;
1224 		}
1225 	}
1226 
1227 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1228 
1229 	/*
1230 	 * Okay, now see if we can stuff it in the buffer as-is.
1231 	 */
1232 	max_headroom += LL_RESERVED_SPACE(tdev);
1233 
1234 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1235 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1236 		struct sk_buff *new_skb;
1237 
1238 		new_skb = skb_realloc_headroom(skb, max_headroom);
1239 		if (!new_skb)
1240 			goto tx_err_dst_release;
1241 
1242 		if (skb->sk)
1243 			skb_set_owner_w(new_skb, skb->sk);
1244 		consume_skb(skb);
1245 		skb = new_skb;
1246 	}
1247 
1248 	if (t->parms.collect_md) {
1249 		if (t->encap.type != TUNNEL_ENCAP_NONE)
1250 			goto tx_err_dst_release;
1251 	} else {
1252 		if (use_cache && ndst)
1253 			dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1254 	}
1255 	skb_dst_set(skb, dst);
1256 
1257 	if (hop_limit == 0) {
1258 		if (skb->protocol == htons(ETH_P_IP))
1259 			hop_limit = ip_hdr(skb)->ttl;
1260 		else if (skb->protocol == htons(ETH_P_IPV6))
1261 			hop_limit = ipv6_hdr(skb)->hop_limit;
1262 		else
1263 			hop_limit = ip6_dst_hoplimit(dst);
1264 	}
1265 
1266 	/* Calculate max headroom for all the headers and adjust
1267 	 * needed_headroom if necessary.
1268 	 */
1269 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1270 			+ dst->header_len + t->hlen;
1271 	if (max_headroom > dev->needed_headroom)
1272 		dev->needed_headroom = max_headroom;
1273 
1274 	err = ip6_tnl_encap(skb, t, &proto, fl6);
1275 	if (err)
1276 		return err;
1277 
1278 	if (encap_limit >= 0) {
1279 		init_tel_txopt(&opt, encap_limit);
1280 		ipv6_push_frag_opts(skb, &opt.ops, &proto);
1281 	}
1282 
1283 	skb_set_inner_ipproto(skb, proto);
1284 
1285 	skb_push(skb, sizeof(struct ipv6hdr));
1286 	skb_reset_network_header(skb);
1287 	ipv6h = ipv6_hdr(skb);
1288 	ip6_flow_hdr(ipv6h, dsfield,
1289 		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1290 	ipv6h->hop_limit = hop_limit;
1291 	ipv6h->nexthdr = proto;
1292 	ipv6h->saddr = fl6->saddr;
1293 	ipv6h->daddr = fl6->daddr;
1294 	ip6tunnel_xmit(NULL, skb, dev);
1295 	return 0;
1296 tx_err_link_failure:
1297 	stats->tx_carrier_errors++;
1298 	dst_link_failure(skb);
1299 tx_err_dst_release:
1300 	dst_release(dst);
1301 	return err;
1302 }
1303 EXPORT_SYMBOL(ip6_tnl_xmit);
1304 
1305 static inline int
1306 ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
1307 		u8 protocol)
1308 {
1309 	struct ip6_tnl *t = netdev_priv(dev);
1310 	struct ipv6hdr *ipv6h;
1311 	const struct iphdr  *iph;
1312 	int encap_limit = -1;
1313 	__u16 offset;
1314 	struct flowi6 fl6;
1315 	__u8 dsfield, orig_dsfield;
1316 	__u32 mtu;
1317 	u8 tproto;
1318 	int err;
1319 
1320 	tproto = READ_ONCE(t->parms.proto);
1321 	if (tproto != protocol && tproto != 0)
1322 		return -1;
1323 
1324 	if (t->parms.collect_md) {
1325 		struct ip_tunnel_info *tun_info;
1326 		const struct ip_tunnel_key *key;
1327 
1328 		tun_info = skb_tunnel_info(skb);
1329 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1330 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1331 			return -1;
1332 		key = &tun_info->key;
1333 		memset(&fl6, 0, sizeof(fl6));
1334 		fl6.flowi6_proto = protocol;
1335 		fl6.saddr = key->u.ipv6.src;
1336 		fl6.daddr = key->u.ipv6.dst;
1337 		fl6.flowlabel = key->label;
1338 		dsfield =  key->tos;
1339 		switch (protocol) {
1340 		case IPPROTO_IPIP:
1341 			iph = ip_hdr(skb);
1342 			orig_dsfield = ipv4_get_dsfield(iph);
1343 			break;
1344 		case IPPROTO_IPV6:
1345 			ipv6h = ipv6_hdr(skb);
1346 			orig_dsfield = ipv6_get_dsfield(ipv6h);
1347 			break;
1348 		default:
1349 			orig_dsfield = dsfield;
1350 			break;
1351 		}
1352 	} else {
1353 		if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1354 			encap_limit = t->parms.encap_limit;
1355 		if (protocol == IPPROTO_IPV6) {
1356 			offset = ip6_tnl_parse_tlv_enc_lim(skb,
1357 						skb_network_header(skb));
1358 			/* ip6_tnl_parse_tlv_enc_lim() might have
1359 			 * reallocated skb->head
1360 			 */
1361 			if (offset > 0) {
1362 				struct ipv6_tlv_tnl_enc_lim *tel;
1363 
1364 				tel = (void *)&skb_network_header(skb)[offset];
1365 				if (tel->encap_limit == 0) {
1366 					icmpv6_send(skb, ICMPV6_PARAMPROB,
1367 						ICMPV6_HDR_FIELD, offset + 2);
1368 					return -1;
1369 				}
1370 				encap_limit = tel->encap_limit - 1;
1371 			}
1372 		}
1373 
1374 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1375 		fl6.flowi6_proto = protocol;
1376 
1377 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1378 			fl6.flowi6_mark = skb->mark;
1379 		else
1380 			fl6.flowi6_mark = t->parms.fwmark;
1381 		switch (protocol) {
1382 		case IPPROTO_IPIP:
1383 			iph = ip_hdr(skb);
1384 			orig_dsfield = ipv4_get_dsfield(iph);
1385 			if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1386 				dsfield = orig_dsfield;
1387 			else
1388 				dsfield = ip6_tclass(t->parms.flowinfo);
1389 			break;
1390 		case IPPROTO_IPV6:
1391 			ipv6h = ipv6_hdr(skb);
1392 			orig_dsfield = ipv6_get_dsfield(ipv6h);
1393 			if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1394 				dsfield = orig_dsfield;
1395 			else
1396 				dsfield = ip6_tclass(t->parms.flowinfo);
1397 			if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1398 				fl6.flowlabel |= ip6_flowlabel(ipv6h);
1399 			break;
1400 		default:
1401 			orig_dsfield = dsfield = ip6_tclass(t->parms.flowinfo);
1402 			break;
1403 		}
1404 	}
1405 
1406 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1407 	dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield);
1408 
1409 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1410 		return -1;
1411 
1412 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1413 			   protocol);
1414 	if (err != 0) {
1415 		/* XXX: send ICMP error even if DF is not set. */
1416 		if (err == -EMSGSIZE)
1417 			switch (protocol) {
1418 			case IPPROTO_IPIP:
1419 				icmp_send(skb, ICMP_DEST_UNREACH,
1420 					  ICMP_FRAG_NEEDED, htonl(mtu));
1421 				break;
1422 			case IPPROTO_IPV6:
1423 				icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1424 				break;
1425 			default:
1426 				break;
1427 			}
1428 		return -1;
1429 	}
1430 
1431 	return 0;
1432 }
1433 
1434 static netdev_tx_t
1435 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1436 {
1437 	struct ip6_tnl *t = netdev_priv(dev);
1438 	struct net_device_stats *stats = &t->dev->stats;
1439 	u8 ipproto;
1440 	int ret;
1441 
1442 	if (!pskb_inet_may_pull(skb))
1443 		goto tx_err;
1444 
1445 	switch (skb->protocol) {
1446 	case htons(ETH_P_IP):
1447 		ipproto = IPPROTO_IPIP;
1448 		break;
1449 	case htons(ETH_P_IPV6):
1450 		if (ip6_tnl_addr_conflict(t, ipv6_hdr(skb)))
1451 			goto tx_err;
1452 		ipproto = IPPROTO_IPV6;
1453 		break;
1454 	case htons(ETH_P_MPLS_UC):
1455 		ipproto = IPPROTO_MPLS;
1456 		break;
1457 	default:
1458 		goto tx_err;
1459 	}
1460 
1461 	ret = ipxip6_tnl_xmit(skb, dev, ipproto);
1462 	if (ret < 0)
1463 		goto tx_err;
1464 
1465 	return NETDEV_TX_OK;
1466 
1467 tx_err:
1468 	stats->tx_errors++;
1469 	stats->tx_dropped++;
1470 	kfree_skb(skb);
1471 	return NETDEV_TX_OK;
1472 }
1473 
1474 static void ip6_tnl_link_config(struct ip6_tnl *t)
1475 {
1476 	struct net_device *dev = t->dev;
1477 	struct net_device *tdev = NULL;
1478 	struct __ip6_tnl_parm *p = &t->parms;
1479 	struct flowi6 *fl6 = &t->fl.u.ip6;
1480 	unsigned int mtu;
1481 	int t_hlen;
1482 
1483 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1484 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1485 
1486 	/* Set up flowi template */
1487 	fl6->saddr = p->laddr;
1488 	fl6->daddr = p->raddr;
1489 	fl6->flowi6_oif = p->link;
1490 	fl6->flowlabel = 0;
1491 
1492 	if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1493 		fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1494 	if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1495 		fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1496 
1497 	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1498 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1499 
1500 	if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1501 		dev->flags |= IFF_POINTOPOINT;
1502 	else
1503 		dev->flags &= ~IFF_POINTOPOINT;
1504 
1505 	t->tun_hlen = 0;
1506 	t->hlen = t->encap_hlen + t->tun_hlen;
1507 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1508 
1509 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
1510 		int strict = (ipv6_addr_type(&p->raddr) &
1511 			      (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1512 
1513 		struct rt6_info *rt = rt6_lookup(t->net,
1514 						 &p->raddr, &p->laddr,
1515 						 p->link, NULL, strict);
1516 		if (rt) {
1517 			tdev = rt->dst.dev;
1518 			ip6_rt_put(rt);
1519 		}
1520 
1521 		if (!tdev && p->link)
1522 			tdev = __dev_get_by_index(t->net, p->link);
1523 
1524 		if (tdev) {
1525 			dev->hard_header_len = tdev->hard_header_len + t_hlen;
1526 			mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
1527 
1528 			dev->mtu = mtu - t_hlen;
1529 			if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1530 				dev->mtu -= 8;
1531 
1532 			if (dev->mtu < IPV6_MIN_MTU)
1533 				dev->mtu = IPV6_MIN_MTU;
1534 		}
1535 	}
1536 }
1537 
1538 /**
1539  * ip6_tnl_change - update the tunnel parameters
1540  *   @t: tunnel to be changed
1541  *   @p: tunnel configuration parameters
1542  *
1543  * Description:
1544  *   ip6_tnl_change() updates the tunnel parameters
1545  **/
1546 
1547 static int
1548 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1549 {
1550 	t->parms.laddr = p->laddr;
1551 	t->parms.raddr = p->raddr;
1552 	t->parms.flags = p->flags;
1553 	t->parms.hop_limit = p->hop_limit;
1554 	t->parms.encap_limit = p->encap_limit;
1555 	t->parms.flowinfo = p->flowinfo;
1556 	t->parms.link = p->link;
1557 	t->parms.proto = p->proto;
1558 	t->parms.fwmark = p->fwmark;
1559 	dst_cache_reset(&t->dst_cache);
1560 	ip6_tnl_link_config(t);
1561 	return 0;
1562 }
1563 
1564 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1565 {
1566 	struct net *net = t->net;
1567 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1568 	int err;
1569 
1570 	ip6_tnl_unlink(ip6n, t);
1571 	synchronize_net();
1572 	err = ip6_tnl_change(t, p);
1573 	ip6_tnl_link(ip6n, t);
1574 	netdev_state_change(t->dev);
1575 	return err;
1576 }
1577 
1578 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1579 {
1580 	/* for default tnl0 device allow to change only the proto */
1581 	t->parms.proto = p->proto;
1582 	netdev_state_change(t->dev);
1583 	return 0;
1584 }
1585 
1586 static void
1587 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1588 {
1589 	p->laddr = u->laddr;
1590 	p->raddr = u->raddr;
1591 	p->flags = u->flags;
1592 	p->hop_limit = u->hop_limit;
1593 	p->encap_limit = u->encap_limit;
1594 	p->flowinfo = u->flowinfo;
1595 	p->link = u->link;
1596 	p->proto = u->proto;
1597 	memcpy(p->name, u->name, sizeof(u->name));
1598 }
1599 
1600 static void
1601 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1602 {
1603 	u->laddr = p->laddr;
1604 	u->raddr = p->raddr;
1605 	u->flags = p->flags;
1606 	u->hop_limit = p->hop_limit;
1607 	u->encap_limit = p->encap_limit;
1608 	u->flowinfo = p->flowinfo;
1609 	u->link = p->link;
1610 	u->proto = p->proto;
1611 	memcpy(u->name, p->name, sizeof(u->name));
1612 }
1613 
1614 /**
1615  * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1616  *   @dev: virtual device associated with tunnel
1617  *   @ifr: parameters passed from userspace
1618  *   @cmd: command to be performed
1619  *
1620  * Description:
1621  *   ip6_tnl_ioctl() is used for managing IPv6 tunnels
1622  *   from userspace.
1623  *
1624  *   The possible commands are the following:
1625  *     %SIOCGETTUNNEL: get tunnel parameters for device
1626  *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1627  *     %SIOCCHGTUNNEL: change tunnel parameters to those given
1628  *     %SIOCDELTUNNEL: delete tunnel
1629  *
1630  *   The fallback device "ip6tnl0", created during module
1631  *   initialization, can be used for creating other tunnel devices.
1632  *
1633  * Return:
1634  *   0 on success,
1635  *   %-EFAULT if unable to copy data to or from userspace,
1636  *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
1637  *   %-EINVAL if passed tunnel parameters are invalid,
1638  *   %-EEXIST if changing a tunnel's parameters would cause a conflict
1639  *   %-ENODEV if attempting to change or delete a nonexisting device
1640  **/
1641 
1642 static int
1643 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1644 {
1645 	int err = 0;
1646 	struct ip6_tnl_parm p;
1647 	struct __ip6_tnl_parm p1;
1648 	struct ip6_tnl *t = netdev_priv(dev);
1649 	struct net *net = t->net;
1650 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1651 
1652 	memset(&p1, 0, sizeof(p1));
1653 
1654 	switch (cmd) {
1655 	case SIOCGETTUNNEL:
1656 		if (dev == ip6n->fb_tnl_dev) {
1657 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1658 				err = -EFAULT;
1659 				break;
1660 			}
1661 			ip6_tnl_parm_from_user(&p1, &p);
1662 			t = ip6_tnl_locate(net, &p1, 0);
1663 			if (IS_ERR(t))
1664 				t = netdev_priv(dev);
1665 		} else {
1666 			memset(&p, 0, sizeof(p));
1667 		}
1668 		ip6_tnl_parm_to_user(&p, &t->parms);
1669 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1670 			err = -EFAULT;
1671 		}
1672 		break;
1673 	case SIOCADDTUNNEL:
1674 	case SIOCCHGTUNNEL:
1675 		err = -EPERM;
1676 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1677 			break;
1678 		err = -EFAULT;
1679 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1680 			break;
1681 		err = -EINVAL;
1682 		if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1683 		    p.proto != 0)
1684 			break;
1685 		ip6_tnl_parm_from_user(&p1, &p);
1686 		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1687 		if (cmd == SIOCCHGTUNNEL) {
1688 			if (!IS_ERR(t)) {
1689 				if (t->dev != dev) {
1690 					err = -EEXIST;
1691 					break;
1692 				}
1693 			} else
1694 				t = netdev_priv(dev);
1695 			if (dev == ip6n->fb_tnl_dev)
1696 				err = ip6_tnl0_update(t, &p1);
1697 			else
1698 				err = ip6_tnl_update(t, &p1);
1699 		}
1700 		if (!IS_ERR(t)) {
1701 			err = 0;
1702 			ip6_tnl_parm_to_user(&p, &t->parms);
1703 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1704 				err = -EFAULT;
1705 
1706 		} else {
1707 			err = PTR_ERR(t);
1708 		}
1709 		break;
1710 	case SIOCDELTUNNEL:
1711 		err = -EPERM;
1712 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1713 			break;
1714 
1715 		if (dev == ip6n->fb_tnl_dev) {
1716 			err = -EFAULT;
1717 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1718 				break;
1719 			err = -ENOENT;
1720 			ip6_tnl_parm_from_user(&p1, &p);
1721 			t = ip6_tnl_locate(net, &p1, 0);
1722 			if (IS_ERR(t))
1723 				break;
1724 			err = -EPERM;
1725 			if (t->dev == ip6n->fb_tnl_dev)
1726 				break;
1727 			dev = t->dev;
1728 		}
1729 		err = 0;
1730 		unregister_netdevice(dev);
1731 		break;
1732 	default:
1733 		err = -EINVAL;
1734 	}
1735 	return err;
1736 }
1737 
1738 /**
1739  * ip6_tnl_change_mtu - change mtu manually for tunnel device
1740  *   @dev: virtual device associated with tunnel
1741  *   @new_mtu: the new mtu
1742  *
1743  * Return:
1744  *   0 on success,
1745  *   %-EINVAL if mtu too small
1746  **/
1747 
1748 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1749 {
1750 	struct ip6_tnl *tnl = netdev_priv(dev);
1751 
1752 	if (tnl->parms.proto == IPPROTO_IPV6) {
1753 		if (new_mtu < IPV6_MIN_MTU)
1754 			return -EINVAL;
1755 	} else {
1756 		if (new_mtu < ETH_MIN_MTU)
1757 			return -EINVAL;
1758 	}
1759 	if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1760 		if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1761 			return -EINVAL;
1762 	} else {
1763 		if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1764 			return -EINVAL;
1765 	}
1766 	dev->mtu = new_mtu;
1767 	return 0;
1768 }
1769 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1770 
1771 int ip6_tnl_get_iflink(const struct net_device *dev)
1772 {
1773 	struct ip6_tnl *t = netdev_priv(dev);
1774 
1775 	return t->parms.link;
1776 }
1777 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1778 
1779 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1780 			  unsigned int num)
1781 {
1782 	if (num >= MAX_IPTUN_ENCAP_OPS)
1783 		return -ERANGE;
1784 
1785 	return !cmpxchg((const struct ip6_tnl_encap_ops **)
1786 			&ip6tun_encaps[num],
1787 			NULL, ops) ? 0 : -1;
1788 }
1789 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1790 
1791 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1792 			  unsigned int num)
1793 {
1794 	int ret;
1795 
1796 	if (num >= MAX_IPTUN_ENCAP_OPS)
1797 		return -ERANGE;
1798 
1799 	ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1800 		       &ip6tun_encaps[num],
1801 		       ops, NULL) == ops) ? 0 : -1;
1802 
1803 	synchronize_net();
1804 
1805 	return ret;
1806 }
1807 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1808 
1809 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1810 			struct ip_tunnel_encap *ipencap)
1811 {
1812 	int hlen;
1813 
1814 	memset(&t->encap, 0, sizeof(t->encap));
1815 
1816 	hlen = ip6_encap_hlen(ipencap);
1817 	if (hlen < 0)
1818 		return hlen;
1819 
1820 	t->encap.type = ipencap->type;
1821 	t->encap.sport = ipencap->sport;
1822 	t->encap.dport = ipencap->dport;
1823 	t->encap.flags = ipencap->flags;
1824 
1825 	t->encap_hlen = hlen;
1826 	t->hlen = t->encap_hlen + t->tun_hlen;
1827 
1828 	return 0;
1829 }
1830 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1831 
1832 static const struct net_device_ops ip6_tnl_netdev_ops = {
1833 	.ndo_init	= ip6_tnl_dev_init,
1834 	.ndo_uninit	= ip6_tnl_dev_uninit,
1835 	.ndo_start_xmit = ip6_tnl_start_xmit,
1836 	.ndo_do_ioctl	= ip6_tnl_ioctl,
1837 	.ndo_change_mtu = ip6_tnl_change_mtu,
1838 	.ndo_get_stats	= ip6_get_stats,
1839 	.ndo_get_iflink = ip6_tnl_get_iflink,
1840 };
1841 
1842 #define IPXIPX_FEATURES (NETIF_F_SG |		\
1843 			 NETIF_F_FRAGLIST |	\
1844 			 NETIF_F_HIGHDMA |	\
1845 			 NETIF_F_GSO_SOFTWARE |	\
1846 			 NETIF_F_HW_CSUM)
1847 
1848 /**
1849  * ip6_tnl_dev_setup - setup virtual tunnel device
1850  *   @dev: virtual device associated with tunnel
1851  *
1852  * Description:
1853  *   Initialize function pointers and device parameters
1854  **/
1855 
1856 static void ip6_tnl_dev_setup(struct net_device *dev)
1857 {
1858 	dev->netdev_ops = &ip6_tnl_netdev_ops;
1859 	dev->header_ops = &ip_tunnel_header_ops;
1860 	dev->needs_free_netdev = true;
1861 	dev->priv_destructor = ip6_dev_free;
1862 
1863 	dev->type = ARPHRD_TUNNEL6;
1864 	dev->flags |= IFF_NOARP;
1865 	dev->addr_len = sizeof(struct in6_addr);
1866 	dev->features |= NETIF_F_LLTX;
1867 	netif_keep_dst(dev);
1868 
1869 	dev->features		|= IPXIPX_FEATURES;
1870 	dev->hw_features	|= IPXIPX_FEATURES;
1871 
1872 	/* This perm addr will be used as interface identifier by IPv6 */
1873 	dev->addr_assign_type = NET_ADDR_RANDOM;
1874 	eth_random_addr(dev->perm_addr);
1875 }
1876 
1877 
1878 /**
1879  * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1880  *   @dev: virtual device associated with tunnel
1881  **/
1882 
1883 static inline int
1884 ip6_tnl_dev_init_gen(struct net_device *dev)
1885 {
1886 	struct ip6_tnl *t = netdev_priv(dev);
1887 	int ret;
1888 	int t_hlen;
1889 
1890 	t->dev = dev;
1891 	t->net = dev_net(dev);
1892 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1893 	if (!dev->tstats)
1894 		return -ENOMEM;
1895 
1896 	ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1897 	if (ret)
1898 		goto free_stats;
1899 
1900 	ret = gro_cells_init(&t->gro_cells, dev);
1901 	if (ret)
1902 		goto destroy_dst;
1903 
1904 	t->tun_hlen = 0;
1905 	t->hlen = t->encap_hlen + t->tun_hlen;
1906 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1907 
1908 	dev->type = ARPHRD_TUNNEL6;
1909 	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1910 	dev->mtu = ETH_DATA_LEN - t_hlen;
1911 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1912 		dev->mtu -= 8;
1913 	dev->min_mtu = ETH_MIN_MTU;
1914 	dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1915 
1916 	return 0;
1917 
1918 destroy_dst:
1919 	dst_cache_destroy(&t->dst_cache);
1920 free_stats:
1921 	free_percpu(dev->tstats);
1922 	dev->tstats = NULL;
1923 
1924 	return ret;
1925 }
1926 
1927 /**
1928  * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1929  *   @dev: virtual device associated with tunnel
1930  **/
1931 
1932 static int ip6_tnl_dev_init(struct net_device *dev)
1933 {
1934 	struct ip6_tnl *t = netdev_priv(dev);
1935 	int err = ip6_tnl_dev_init_gen(dev);
1936 
1937 	if (err)
1938 		return err;
1939 	ip6_tnl_link_config(t);
1940 	if (t->parms.collect_md)
1941 		netif_keep_dst(dev);
1942 	return 0;
1943 }
1944 
1945 /**
1946  * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1947  *   @dev: fallback device
1948  *
1949  * Return: 0
1950  **/
1951 
1952 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1953 {
1954 	struct ip6_tnl *t = netdev_priv(dev);
1955 	struct net *net = dev_net(dev);
1956 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1957 
1958 	t->parms.proto = IPPROTO_IPV6;
1959 	dev_hold(dev);
1960 
1961 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
1962 	return 0;
1963 }
1964 
1965 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1966 			    struct netlink_ext_ack *extack)
1967 {
1968 	u8 proto;
1969 
1970 	if (!data || !data[IFLA_IPTUN_PROTO])
1971 		return 0;
1972 
1973 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1974 	if (proto != IPPROTO_IPV6 &&
1975 	    proto != IPPROTO_IPIP &&
1976 	    proto != 0)
1977 		return -EINVAL;
1978 
1979 	return 0;
1980 }
1981 
1982 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1983 				  struct __ip6_tnl_parm *parms)
1984 {
1985 	memset(parms, 0, sizeof(*parms));
1986 
1987 	if (!data)
1988 		return;
1989 
1990 	if (data[IFLA_IPTUN_LINK])
1991 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1992 
1993 	if (data[IFLA_IPTUN_LOCAL])
1994 		parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1995 
1996 	if (data[IFLA_IPTUN_REMOTE])
1997 		parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1998 
1999 	if (data[IFLA_IPTUN_TTL])
2000 		parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
2001 
2002 	if (data[IFLA_IPTUN_ENCAP_LIMIT])
2003 		parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
2004 
2005 	if (data[IFLA_IPTUN_FLOWINFO])
2006 		parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
2007 
2008 	if (data[IFLA_IPTUN_FLAGS])
2009 		parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
2010 
2011 	if (data[IFLA_IPTUN_PROTO])
2012 		parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
2013 
2014 	if (data[IFLA_IPTUN_COLLECT_METADATA])
2015 		parms->collect_md = true;
2016 
2017 	if (data[IFLA_IPTUN_FWMARK])
2018 		parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
2019 }
2020 
2021 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
2022 					struct ip_tunnel_encap *ipencap)
2023 {
2024 	bool ret = false;
2025 
2026 	memset(ipencap, 0, sizeof(*ipencap));
2027 
2028 	if (!data)
2029 		return ret;
2030 
2031 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
2032 		ret = true;
2033 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
2034 	}
2035 
2036 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
2037 		ret = true;
2038 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
2039 	}
2040 
2041 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
2042 		ret = true;
2043 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
2044 	}
2045 
2046 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
2047 		ret = true;
2048 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
2049 	}
2050 
2051 	return ret;
2052 }
2053 
2054 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
2055 			   struct nlattr *tb[], struct nlattr *data[],
2056 			   struct netlink_ext_ack *extack)
2057 {
2058 	struct net *net = dev_net(dev);
2059 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2060 	struct ip_tunnel_encap ipencap;
2061 	struct ip6_tnl *nt, *t;
2062 	int err;
2063 
2064 	nt = netdev_priv(dev);
2065 
2066 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2067 		err = ip6_tnl_encap_setup(nt, &ipencap);
2068 		if (err < 0)
2069 			return err;
2070 	}
2071 
2072 	ip6_tnl_netlink_parms(data, &nt->parms);
2073 
2074 	if (nt->parms.collect_md) {
2075 		if (rtnl_dereference(ip6n->collect_md_tun))
2076 			return -EEXIST;
2077 	} else {
2078 		t = ip6_tnl_locate(net, &nt->parms, 0);
2079 		if (!IS_ERR(t))
2080 			return -EEXIST;
2081 	}
2082 
2083 	err = ip6_tnl_create2(dev);
2084 	if (!err && tb[IFLA_MTU])
2085 		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2086 
2087 	return err;
2088 }
2089 
2090 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2091 			      struct nlattr *data[],
2092 			      struct netlink_ext_ack *extack)
2093 {
2094 	struct ip6_tnl *t = netdev_priv(dev);
2095 	struct __ip6_tnl_parm p;
2096 	struct net *net = t->net;
2097 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2098 	struct ip_tunnel_encap ipencap;
2099 
2100 	if (dev == ip6n->fb_tnl_dev)
2101 		return -EINVAL;
2102 
2103 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2104 		int err = ip6_tnl_encap_setup(t, &ipencap);
2105 
2106 		if (err < 0)
2107 			return err;
2108 	}
2109 	ip6_tnl_netlink_parms(data, &p);
2110 	if (p.collect_md)
2111 		return -EINVAL;
2112 
2113 	t = ip6_tnl_locate(net, &p, 0);
2114 	if (!IS_ERR(t)) {
2115 		if (t->dev != dev)
2116 			return -EEXIST;
2117 	} else
2118 		t = netdev_priv(dev);
2119 
2120 	return ip6_tnl_update(t, &p);
2121 }
2122 
2123 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2124 {
2125 	struct net *net = dev_net(dev);
2126 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2127 
2128 	if (dev != ip6n->fb_tnl_dev)
2129 		unregister_netdevice_queue(dev, head);
2130 }
2131 
2132 static size_t ip6_tnl_get_size(const struct net_device *dev)
2133 {
2134 	return
2135 		/* IFLA_IPTUN_LINK */
2136 		nla_total_size(4) +
2137 		/* IFLA_IPTUN_LOCAL */
2138 		nla_total_size(sizeof(struct in6_addr)) +
2139 		/* IFLA_IPTUN_REMOTE */
2140 		nla_total_size(sizeof(struct in6_addr)) +
2141 		/* IFLA_IPTUN_TTL */
2142 		nla_total_size(1) +
2143 		/* IFLA_IPTUN_ENCAP_LIMIT */
2144 		nla_total_size(1) +
2145 		/* IFLA_IPTUN_FLOWINFO */
2146 		nla_total_size(4) +
2147 		/* IFLA_IPTUN_FLAGS */
2148 		nla_total_size(4) +
2149 		/* IFLA_IPTUN_PROTO */
2150 		nla_total_size(1) +
2151 		/* IFLA_IPTUN_ENCAP_TYPE */
2152 		nla_total_size(2) +
2153 		/* IFLA_IPTUN_ENCAP_FLAGS */
2154 		nla_total_size(2) +
2155 		/* IFLA_IPTUN_ENCAP_SPORT */
2156 		nla_total_size(2) +
2157 		/* IFLA_IPTUN_ENCAP_DPORT */
2158 		nla_total_size(2) +
2159 		/* IFLA_IPTUN_COLLECT_METADATA */
2160 		nla_total_size(0) +
2161 		/* IFLA_IPTUN_FWMARK */
2162 		nla_total_size(4) +
2163 		0;
2164 }
2165 
2166 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2167 {
2168 	struct ip6_tnl *tunnel = netdev_priv(dev);
2169 	struct __ip6_tnl_parm *parm = &tunnel->parms;
2170 
2171 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2172 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2173 	    nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2174 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2175 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2176 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2177 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2178 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2179 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2180 		goto nla_put_failure;
2181 
2182 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2183 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2184 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2185 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2186 		goto nla_put_failure;
2187 
2188 	if (parm->collect_md)
2189 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2190 			goto nla_put_failure;
2191 
2192 	return 0;
2193 
2194 nla_put_failure:
2195 	return -EMSGSIZE;
2196 }
2197 
2198 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2199 {
2200 	struct ip6_tnl *tunnel = netdev_priv(dev);
2201 
2202 	return tunnel->net;
2203 }
2204 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2205 
2206 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2207 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
2208 	[IFLA_IPTUN_LOCAL]		= { .len = sizeof(struct in6_addr) },
2209 	[IFLA_IPTUN_REMOTE]		= { .len = sizeof(struct in6_addr) },
2210 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
2211 	[IFLA_IPTUN_ENCAP_LIMIT]	= { .type = NLA_U8 },
2212 	[IFLA_IPTUN_FLOWINFO]		= { .type = NLA_U32 },
2213 	[IFLA_IPTUN_FLAGS]		= { .type = NLA_U32 },
2214 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
2215 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
2216 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
2217 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
2218 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
2219 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
2220 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
2221 };
2222 
2223 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2224 	.kind		= "ip6tnl",
2225 	.maxtype	= IFLA_IPTUN_MAX,
2226 	.policy		= ip6_tnl_policy,
2227 	.priv_size	= sizeof(struct ip6_tnl),
2228 	.setup		= ip6_tnl_dev_setup,
2229 	.validate	= ip6_tnl_validate,
2230 	.newlink	= ip6_tnl_newlink,
2231 	.changelink	= ip6_tnl_changelink,
2232 	.dellink	= ip6_tnl_dellink,
2233 	.get_size	= ip6_tnl_get_size,
2234 	.fill_info	= ip6_tnl_fill_info,
2235 	.get_link_net	= ip6_tnl_get_link_net,
2236 };
2237 
2238 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2239 	.handler	= ip4ip6_rcv,
2240 	.err_handler	= ip4ip6_err,
2241 	.priority	=	1,
2242 };
2243 
2244 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2245 	.handler	= ip6ip6_rcv,
2246 	.err_handler	= ip6ip6_err,
2247 	.priority	=	1,
2248 };
2249 
2250 static struct xfrm6_tunnel mplsip6_handler __read_mostly = {
2251 	.handler	= mplsip6_rcv,
2252 	.err_handler	= mplsip6_err,
2253 	.priority	=	1,
2254 };
2255 
2256 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
2257 {
2258 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2259 	struct net_device *dev, *aux;
2260 	int h;
2261 	struct ip6_tnl *t;
2262 
2263 	for_each_netdev_safe(net, dev, aux)
2264 		if (dev->rtnl_link_ops == &ip6_link_ops)
2265 			unregister_netdevice_queue(dev, list);
2266 
2267 	for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2268 		t = rtnl_dereference(ip6n->tnls_r_l[h]);
2269 		while (t) {
2270 			/* If dev is in the same netns, it has already
2271 			 * been added to the list by the previous loop.
2272 			 */
2273 			if (!net_eq(dev_net(t->dev), net))
2274 				unregister_netdevice_queue(t->dev, list);
2275 			t = rtnl_dereference(t->next);
2276 		}
2277 	}
2278 }
2279 
2280 static int __net_init ip6_tnl_init_net(struct net *net)
2281 {
2282 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2283 	struct ip6_tnl *t = NULL;
2284 	int err;
2285 
2286 	ip6n->tnls[0] = ip6n->tnls_wc;
2287 	ip6n->tnls[1] = ip6n->tnls_r_l;
2288 
2289 	if (!net_has_fallback_tunnels(net))
2290 		return 0;
2291 	err = -ENOMEM;
2292 	ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2293 					NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2294 
2295 	if (!ip6n->fb_tnl_dev)
2296 		goto err_alloc_dev;
2297 	dev_net_set(ip6n->fb_tnl_dev, net);
2298 	ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2299 	/* FB netdevice is special: we have one, and only one per netns.
2300 	 * Allowing to move it to another netns is clearly unsafe.
2301 	 */
2302 	ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2303 
2304 	err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2305 	if (err < 0)
2306 		goto err_register;
2307 
2308 	err = register_netdev(ip6n->fb_tnl_dev);
2309 	if (err < 0)
2310 		goto err_register;
2311 
2312 	t = netdev_priv(ip6n->fb_tnl_dev);
2313 
2314 	strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2315 	return 0;
2316 
2317 err_register:
2318 	free_netdev(ip6n->fb_tnl_dev);
2319 err_alloc_dev:
2320 	return err;
2321 }
2322 
2323 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
2324 {
2325 	struct net *net;
2326 	LIST_HEAD(list);
2327 
2328 	rtnl_lock();
2329 	list_for_each_entry(net, net_list, exit_list)
2330 		ip6_tnl_destroy_tunnels(net, &list);
2331 	unregister_netdevice_many(&list);
2332 	rtnl_unlock();
2333 }
2334 
2335 static struct pernet_operations ip6_tnl_net_ops = {
2336 	.init = ip6_tnl_init_net,
2337 	.exit_batch = ip6_tnl_exit_batch_net,
2338 	.id   = &ip6_tnl_net_id,
2339 	.size = sizeof(struct ip6_tnl_net),
2340 };
2341 
2342 /**
2343  * ip6_tunnel_init - register protocol and reserve needed resources
2344  *
2345  * Return: 0 on success
2346  **/
2347 
2348 static int __init ip6_tunnel_init(void)
2349 {
2350 	int  err;
2351 
2352 	if (!ipv6_mod_enabled())
2353 		return -EOPNOTSUPP;
2354 
2355 	err = register_pernet_device(&ip6_tnl_net_ops);
2356 	if (err < 0)
2357 		goto out_pernet;
2358 
2359 	err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2360 	if (err < 0) {
2361 		pr_err("%s: can't register ip4ip6\n", __func__);
2362 		goto out_ip4ip6;
2363 	}
2364 
2365 	err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2366 	if (err < 0) {
2367 		pr_err("%s: can't register ip6ip6\n", __func__);
2368 		goto out_ip6ip6;
2369 	}
2370 
2371 	if (ip6_tnl_mpls_supported()) {
2372 		err = xfrm6_tunnel_register(&mplsip6_handler, AF_MPLS);
2373 		if (err < 0) {
2374 			pr_err("%s: can't register mplsip6\n", __func__);
2375 			goto out_mplsip6;
2376 		}
2377 	}
2378 
2379 	err = rtnl_link_register(&ip6_link_ops);
2380 	if (err < 0)
2381 		goto rtnl_link_failed;
2382 
2383 	return 0;
2384 
2385 rtnl_link_failed:
2386 	if (ip6_tnl_mpls_supported())
2387 		xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS);
2388 out_mplsip6:
2389 	xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2390 out_ip6ip6:
2391 	xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2392 out_ip4ip6:
2393 	unregister_pernet_device(&ip6_tnl_net_ops);
2394 out_pernet:
2395 	return err;
2396 }
2397 
2398 /**
2399  * ip6_tunnel_cleanup - free resources and unregister protocol
2400  **/
2401 
2402 static void __exit ip6_tunnel_cleanup(void)
2403 {
2404 	rtnl_link_unregister(&ip6_link_ops);
2405 	if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2406 		pr_info("%s: can't deregister ip4ip6\n", __func__);
2407 
2408 	if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2409 		pr_info("%s: can't deregister ip6ip6\n", __func__);
2410 
2411 	if (ip6_tnl_mpls_supported() &&
2412 	    xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS))
2413 		pr_info("%s: can't deregister mplsip6\n", __func__);
2414 	unregister_pernet_device(&ip6_tnl_net_ops);
2415 }
2416 
2417 module_init(ip6_tunnel_init);
2418 module_exit(ip6_tunnel_cleanup);
2419