xref: /openbmc/linux/net/ipv6/ip6_tunnel.c (revision 612a462a)
1 /*
2  *	IPv6 tunneling device
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Ville Nuorvala		<vnuorval@tcs.hut.fi>
7  *	Yasuyuki Kozakai	<kozakai@linux-ipv6.org>
8  *
9  *      Based on:
10  *      linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
11  *
12  *      RFC 2473
13  *
14  *	This program is free software; you can redistribute it and/or
15  *      modify it under the terms of the GNU General Public License
16  *      as published by the Free Software Foundation; either version
17  *      2 of the License, or (at your option) any later version.
18  *
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
29 #include <linux/if.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
44 
45 #include <linux/uaccess.h>
46 #include <linux/atomic.h>
47 
48 #include <net/icmp.h>
49 #include <net/ip.h>
50 #include <net/ip_tunnels.h>
51 #include <net/ipv6.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
55 #include <net/xfrm.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
61 
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
67 
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT  5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
70 
71 static bool log_ecn_error = true;
72 module_param(log_ecn_error, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74 
75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
76 {
77 	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
78 
79 	return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
80 }
81 
82 static int ip6_tnl_dev_init(struct net_device *dev);
83 static void ip6_tnl_dev_setup(struct net_device *dev);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly;
85 
86 static unsigned int ip6_tnl_net_id __read_mostly;
87 struct ip6_tnl_net {
88 	/* the IPv6 tunnel fallback device */
89 	struct net_device *fb_tnl_dev;
90 	/* lists for storing tunnels in use */
91 	struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
92 	struct ip6_tnl __rcu *tnls_wc[1];
93 	struct ip6_tnl __rcu **tnls[2];
94 	struct ip6_tnl __rcu *collect_md_tun;
95 };
96 
97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
98 {
99 	struct pcpu_sw_netstats tmp, sum = { 0 };
100 	int i;
101 
102 	for_each_possible_cpu(i) {
103 		unsigned int start;
104 		const struct pcpu_sw_netstats *tstats =
105 						   per_cpu_ptr(dev->tstats, i);
106 
107 		do {
108 			start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 			tmp.rx_packets = tstats->rx_packets;
110 			tmp.rx_bytes = tstats->rx_bytes;
111 			tmp.tx_packets = tstats->tx_packets;
112 			tmp.tx_bytes =  tstats->tx_bytes;
113 		} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
114 
115 		sum.rx_packets += tmp.rx_packets;
116 		sum.rx_bytes   += tmp.rx_bytes;
117 		sum.tx_packets += tmp.tx_packets;
118 		sum.tx_bytes   += tmp.tx_bytes;
119 	}
120 	dev->stats.rx_packets = sum.rx_packets;
121 	dev->stats.rx_bytes   = sum.rx_bytes;
122 	dev->stats.tx_packets = sum.tx_packets;
123 	dev->stats.tx_bytes   = sum.tx_bytes;
124 	return &dev->stats;
125 }
126 
127 /**
128  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129  *   @remote: the address of the tunnel exit-point
130  *   @local: the address of the tunnel entry-point
131  *
132  * Return:
133  *   tunnel matching given end-points if found,
134  *   else fallback tunnel if its device is up,
135  *   else %NULL
136  **/
137 
138 #define for_each_ip6_tunnel_rcu(start) \
139 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
140 
141 static struct ip6_tnl *
142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
143 {
144 	unsigned int hash = HASH(remote, local);
145 	struct ip6_tnl *t;
146 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
147 	struct in6_addr any;
148 
149 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
151 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
152 		    (t->dev->flags & IFF_UP))
153 			return t;
154 	}
155 
156 	memset(&any, 0, sizeof(any));
157 	hash = HASH(&any, local);
158 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 		    ipv6_addr_any(&t->parms.raddr) &&
161 		    (t->dev->flags & IFF_UP))
162 			return t;
163 	}
164 
165 	hash = HASH(remote, &any);
166 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
167 		if (ipv6_addr_equal(remote, &t->parms.raddr) &&
168 		    ipv6_addr_any(&t->parms.laddr) &&
169 		    (t->dev->flags & IFF_UP))
170 			return t;
171 	}
172 
173 	t = rcu_dereference(ip6n->collect_md_tun);
174 	if (t && t->dev->flags & IFF_UP)
175 		return t;
176 
177 	t = rcu_dereference(ip6n->tnls_wc[0]);
178 	if (t && (t->dev->flags & IFF_UP))
179 		return t;
180 
181 	return NULL;
182 }
183 
184 /**
185  * ip6_tnl_bucket - get head of list matching given tunnel parameters
186  *   @p: parameters containing tunnel end-points
187  *
188  * Description:
189  *   ip6_tnl_bucket() returns the head of the list matching the
190  *   &struct in6_addr entries laddr and raddr in @p.
191  *
192  * Return: head of IPv6 tunnel list
193  **/
194 
195 static struct ip6_tnl __rcu **
196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
197 {
198 	const struct in6_addr *remote = &p->raddr;
199 	const struct in6_addr *local = &p->laddr;
200 	unsigned int h = 0;
201 	int prio = 0;
202 
203 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
204 		prio = 1;
205 		h = HASH(remote, local);
206 	}
207 	return &ip6n->tnls[prio][h];
208 }
209 
210 /**
211  * ip6_tnl_link - add tunnel to hash table
212  *   @t: tunnel to be added
213  **/
214 
215 static void
216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
217 {
218 	struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
219 
220 	if (t->parms.collect_md)
221 		rcu_assign_pointer(ip6n->collect_md_tun, t);
222 	rcu_assign_pointer(t->next , rtnl_dereference(*tp));
223 	rcu_assign_pointer(*tp, t);
224 }
225 
226 /**
227  * ip6_tnl_unlink - remove tunnel from hash table
228  *   @t: tunnel to be removed
229  **/
230 
231 static void
232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
233 {
234 	struct ip6_tnl __rcu **tp;
235 	struct ip6_tnl *iter;
236 
237 	if (t->parms.collect_md)
238 		rcu_assign_pointer(ip6n->collect_md_tun, NULL);
239 
240 	for (tp = ip6_tnl_bucket(ip6n, &t->parms);
241 	     (iter = rtnl_dereference(*tp)) != NULL;
242 	     tp = &iter->next) {
243 		if (t == iter) {
244 			rcu_assign_pointer(*tp, t->next);
245 			break;
246 		}
247 	}
248 }
249 
250 static void ip6_dev_free(struct net_device *dev)
251 {
252 	struct ip6_tnl *t = netdev_priv(dev);
253 
254 	gro_cells_destroy(&t->gro_cells);
255 	dst_cache_destroy(&t->dst_cache);
256 	free_percpu(dev->tstats);
257 }
258 
259 static int ip6_tnl_create2(struct net_device *dev)
260 {
261 	struct ip6_tnl *t = netdev_priv(dev);
262 	struct net *net = dev_net(dev);
263 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
264 	int err;
265 
266 	t = netdev_priv(dev);
267 
268 	dev->rtnl_link_ops = &ip6_link_ops;
269 	err = register_netdevice(dev);
270 	if (err < 0)
271 		goto out;
272 
273 	strcpy(t->parms.name, dev->name);
274 
275 	dev_hold(dev);
276 	ip6_tnl_link(ip6n, t);
277 	return 0;
278 
279 out:
280 	return err;
281 }
282 
283 /**
284  * ip6_tnl_create - create a new tunnel
285  *   @p: tunnel parameters
286  *   @pt: pointer to new tunnel
287  *
288  * Description:
289  *   Create tunnel matching given parameters.
290  *
291  * Return:
292  *   created tunnel or error pointer
293  **/
294 
295 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
296 {
297 	struct net_device *dev;
298 	struct ip6_tnl *t;
299 	char name[IFNAMSIZ];
300 	int err = -ENOMEM;
301 
302 	if (p->name[0])
303 		strlcpy(name, p->name, IFNAMSIZ);
304 	else
305 		sprintf(name, "ip6tnl%%d");
306 
307 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
308 			   ip6_tnl_dev_setup);
309 	if (!dev)
310 		goto failed;
311 
312 	dev_net_set(dev, net);
313 
314 	t = netdev_priv(dev);
315 	t->parms = *p;
316 	t->net = dev_net(dev);
317 	err = ip6_tnl_create2(dev);
318 	if (err < 0)
319 		goto failed_free;
320 
321 	return t;
322 
323 failed_free:
324 	free_netdev(dev);
325 failed:
326 	return ERR_PTR(err);
327 }
328 
329 /**
330  * ip6_tnl_locate - find or create tunnel matching given parameters
331  *   @p: tunnel parameters
332  *   @create: != 0 if allowed to create new tunnel if no match found
333  *
334  * Description:
335  *   ip6_tnl_locate() first tries to locate an existing tunnel
336  *   based on @parms. If this is unsuccessful, but @create is set a new
337  *   tunnel device is created and registered for use.
338  *
339  * Return:
340  *   matching tunnel or error pointer
341  **/
342 
343 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
344 		struct __ip6_tnl_parm *p, int create)
345 {
346 	const struct in6_addr *remote = &p->raddr;
347 	const struct in6_addr *local = &p->laddr;
348 	struct ip6_tnl __rcu **tp;
349 	struct ip6_tnl *t;
350 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
351 
352 	for (tp = ip6_tnl_bucket(ip6n, p);
353 	     (t = rtnl_dereference(*tp)) != NULL;
354 	     tp = &t->next) {
355 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
356 		    ipv6_addr_equal(remote, &t->parms.raddr)) {
357 			if (create)
358 				return ERR_PTR(-EEXIST);
359 
360 			return t;
361 		}
362 	}
363 	if (!create)
364 		return ERR_PTR(-ENODEV);
365 	return ip6_tnl_create(net, p);
366 }
367 
368 /**
369  * ip6_tnl_dev_uninit - tunnel device uninitializer
370  *   @dev: the device to be destroyed
371  *
372  * Description:
373  *   ip6_tnl_dev_uninit() removes tunnel from its list
374  **/
375 
376 static void
377 ip6_tnl_dev_uninit(struct net_device *dev)
378 {
379 	struct ip6_tnl *t = netdev_priv(dev);
380 	struct net *net = t->net;
381 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
382 
383 	if (dev == ip6n->fb_tnl_dev)
384 		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
385 	else
386 		ip6_tnl_unlink(ip6n, t);
387 	dst_cache_reset(&t->dst_cache);
388 	dev_put(dev);
389 }
390 
391 /**
392  * parse_tvl_tnl_enc_lim - handle encapsulation limit option
393  *   @skb: received socket buffer
394  *
395  * Return:
396  *   0 if none was found,
397  *   else index to encapsulation limit
398  **/
399 
400 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
401 {
402 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
403 	unsigned int nhoff = raw - skb->data;
404 	unsigned int off = nhoff + sizeof(*ipv6h);
405 	u8 next, nexthdr = ipv6h->nexthdr;
406 
407 	while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
408 		struct ipv6_opt_hdr *hdr;
409 		u16 optlen;
410 
411 		if (!pskb_may_pull(skb, off + sizeof(*hdr)))
412 			break;
413 
414 		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
415 		if (nexthdr == NEXTHDR_FRAGMENT) {
416 			struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
417 			if (frag_hdr->frag_off)
418 				break;
419 			optlen = 8;
420 		} else if (nexthdr == NEXTHDR_AUTH) {
421 			optlen = (hdr->hdrlen + 2) << 2;
422 		} else {
423 			optlen = ipv6_optlen(hdr);
424 		}
425 		/* cache hdr->nexthdr, since pskb_may_pull() might
426 		 * invalidate hdr
427 		 */
428 		next = hdr->nexthdr;
429 		if (nexthdr == NEXTHDR_DEST) {
430 			u16 i = 2;
431 
432 			/* Remember : hdr is no longer valid at this point. */
433 			if (!pskb_may_pull(skb, off + optlen))
434 				break;
435 
436 			while (1) {
437 				struct ipv6_tlv_tnl_enc_lim *tel;
438 
439 				/* No more room for encapsulation limit */
440 				if (i + sizeof(*tel) > optlen)
441 					break;
442 
443 				tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
444 				/* return index of option if found and valid */
445 				if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
446 				    tel->length == 1)
447 					return i + off - nhoff;
448 				/* else jump to next option */
449 				if (tel->type)
450 					i += tel->length + 2;
451 				else
452 					i++;
453 			}
454 		}
455 		nexthdr = next;
456 		off += optlen;
457 	}
458 	return 0;
459 }
460 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
461 
462 /**
463  * ip6_tnl_err - tunnel error handler
464  *
465  * Description:
466  *   ip6_tnl_err() should handle errors in the tunnel according
467  *   to the specifications in RFC 2473.
468  **/
469 
470 static int
471 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
472 	    u8 *type, u8 *code, int *msg, __u32 *info, int offset)
473 {
474 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
475 	struct ip6_tnl *t;
476 	int rel_msg = 0;
477 	u8 rel_type = ICMPV6_DEST_UNREACH;
478 	u8 rel_code = ICMPV6_ADDR_UNREACH;
479 	u8 tproto;
480 	__u32 rel_info = 0;
481 	__u16 len;
482 	int err = -ENOENT;
483 
484 	/* If the packet doesn't contain the original IPv6 header we are
485 	   in trouble since we might need the source address for further
486 	   processing of the error. */
487 
488 	rcu_read_lock();
489 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
490 	if (!t)
491 		goto out;
492 
493 	tproto = ACCESS_ONCE(t->parms.proto);
494 	if (tproto != ipproto && tproto != 0)
495 		goto out;
496 
497 	err = 0;
498 
499 	switch (*type) {
500 		__u32 teli;
501 		struct ipv6_tlv_tnl_enc_lim *tel;
502 		__u32 mtu;
503 	case ICMPV6_DEST_UNREACH:
504 		net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
505 				    t->parms.name);
506 		rel_msg = 1;
507 		break;
508 	case ICMPV6_TIME_EXCEED:
509 		if ((*code) == ICMPV6_EXC_HOPLIMIT) {
510 			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
511 					    t->parms.name);
512 			rel_msg = 1;
513 		}
514 		break;
515 	case ICMPV6_PARAMPROB:
516 		teli = 0;
517 		if ((*code) == ICMPV6_HDR_FIELD)
518 			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
519 
520 		if (teli && teli == *info - 2) {
521 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
522 			if (tel->encap_limit == 0) {
523 				net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
524 						    t->parms.name);
525 				rel_msg = 1;
526 			}
527 		} else {
528 			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
529 					    t->parms.name);
530 		}
531 		break;
532 	case ICMPV6_PKT_TOOBIG:
533 		mtu = *info - offset;
534 		if (mtu < IPV6_MIN_MTU)
535 			mtu = IPV6_MIN_MTU;
536 		t->dev->mtu = mtu;
537 
538 		len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
539 		if (len > mtu) {
540 			rel_type = ICMPV6_PKT_TOOBIG;
541 			rel_code = 0;
542 			rel_info = mtu;
543 			rel_msg = 1;
544 		}
545 		break;
546 	}
547 
548 	*type = rel_type;
549 	*code = rel_code;
550 	*info = rel_info;
551 	*msg = rel_msg;
552 
553 out:
554 	rcu_read_unlock();
555 	return err;
556 }
557 
558 static int
559 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
560 	   u8 type, u8 code, int offset, __be32 info)
561 {
562 	int rel_msg = 0;
563 	u8 rel_type = type;
564 	u8 rel_code = code;
565 	__u32 rel_info = ntohl(info);
566 	int err;
567 	struct sk_buff *skb2;
568 	const struct iphdr *eiph;
569 	struct rtable *rt;
570 	struct flowi4 fl4;
571 
572 	err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
573 			  &rel_msg, &rel_info, offset);
574 	if (err < 0)
575 		return err;
576 
577 	if (rel_msg == 0)
578 		return 0;
579 
580 	switch (rel_type) {
581 	case ICMPV6_DEST_UNREACH:
582 		if (rel_code != ICMPV6_ADDR_UNREACH)
583 			return 0;
584 		rel_type = ICMP_DEST_UNREACH;
585 		rel_code = ICMP_HOST_UNREACH;
586 		break;
587 	case ICMPV6_PKT_TOOBIG:
588 		if (rel_code != 0)
589 			return 0;
590 		rel_type = ICMP_DEST_UNREACH;
591 		rel_code = ICMP_FRAG_NEEDED;
592 		break;
593 	case NDISC_REDIRECT:
594 		rel_type = ICMP_REDIRECT;
595 		rel_code = ICMP_REDIR_HOST;
596 	default:
597 		return 0;
598 	}
599 
600 	if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
601 		return 0;
602 
603 	skb2 = skb_clone(skb, GFP_ATOMIC);
604 	if (!skb2)
605 		return 0;
606 
607 	skb_dst_drop(skb2);
608 
609 	skb_pull(skb2, offset);
610 	skb_reset_network_header(skb2);
611 	eiph = ip_hdr(skb2);
612 
613 	/* Try to guess incoming interface */
614 	rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
615 				   eiph->saddr, 0,
616 				   0, 0,
617 				   IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
618 	if (IS_ERR(rt))
619 		goto out;
620 
621 	skb2->dev = rt->dst.dev;
622 
623 	/* route "incoming" packet */
624 	if (rt->rt_flags & RTCF_LOCAL) {
625 		ip_rt_put(rt);
626 		rt = NULL;
627 		rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
628 					   eiph->daddr, eiph->saddr,
629 					   0, 0,
630 					   IPPROTO_IPIP,
631 					   RT_TOS(eiph->tos), 0);
632 		if (IS_ERR(rt) ||
633 		    rt->dst.dev->type != ARPHRD_TUNNEL) {
634 			if (!IS_ERR(rt))
635 				ip_rt_put(rt);
636 			goto out;
637 		}
638 		skb_dst_set(skb2, &rt->dst);
639 	} else {
640 		ip_rt_put(rt);
641 		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
642 				   skb2->dev) ||
643 		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
644 			goto out;
645 	}
646 
647 	/* change mtu on this route */
648 	if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
649 		if (rel_info > dst_mtu(skb_dst(skb2)))
650 			goto out;
651 
652 		skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
653 	}
654 	if (rel_type == ICMP_REDIRECT)
655 		skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
656 
657 	icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
658 
659 out:
660 	kfree_skb(skb2);
661 	return 0;
662 }
663 
664 static int
665 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
666 	   u8 type, u8 code, int offset, __be32 info)
667 {
668 	int rel_msg = 0;
669 	u8 rel_type = type;
670 	u8 rel_code = code;
671 	__u32 rel_info = ntohl(info);
672 	int err;
673 
674 	err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
675 			  &rel_msg, &rel_info, offset);
676 	if (err < 0)
677 		return err;
678 
679 	if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
680 		struct rt6_info *rt;
681 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
682 
683 		if (!skb2)
684 			return 0;
685 
686 		skb_dst_drop(skb2);
687 		skb_pull(skb2, offset);
688 		skb_reset_network_header(skb2);
689 
690 		/* Try to guess incoming interface */
691 		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
692 				NULL, 0, 0);
693 
694 		if (rt && rt->dst.dev)
695 			skb2->dev = rt->dst.dev;
696 
697 		icmpv6_send(skb2, rel_type, rel_code, rel_info);
698 
699 		ip6_rt_put(rt);
700 
701 		kfree_skb(skb2);
702 	}
703 
704 	return 0;
705 }
706 
707 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
708 				       const struct ipv6hdr *ipv6h,
709 				       struct sk_buff *skb)
710 {
711 	__u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
712 
713 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
714 		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
715 
716 	return IP6_ECN_decapsulate(ipv6h, skb);
717 }
718 
719 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
720 				       const struct ipv6hdr *ipv6h,
721 				       struct sk_buff *skb)
722 {
723 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
724 		ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
725 
726 	return IP6_ECN_decapsulate(ipv6h, skb);
727 }
728 
729 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
730 			     const struct in6_addr *laddr,
731 			     const struct in6_addr *raddr)
732 {
733 	struct __ip6_tnl_parm *p = &t->parms;
734 	int ltype = ipv6_addr_type(laddr);
735 	int rtype = ipv6_addr_type(raddr);
736 	__u32 flags = 0;
737 
738 	if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
739 		flags = IP6_TNL_F_CAP_PER_PACKET;
740 	} else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
741 		   rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
742 		   !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
743 		   (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
744 		if (ltype&IPV6_ADDR_UNICAST)
745 			flags |= IP6_TNL_F_CAP_XMIT;
746 		if (rtype&IPV6_ADDR_UNICAST)
747 			flags |= IP6_TNL_F_CAP_RCV;
748 	}
749 	return flags;
750 }
751 EXPORT_SYMBOL(ip6_tnl_get_cap);
752 
753 /* called with rcu_read_lock() */
754 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
755 				  const struct in6_addr *laddr,
756 				  const struct in6_addr *raddr)
757 {
758 	struct __ip6_tnl_parm *p = &t->parms;
759 	int ret = 0;
760 	struct net *net = t->net;
761 
762 	if ((p->flags & IP6_TNL_F_CAP_RCV) ||
763 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
764 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
765 		struct net_device *ldev = NULL;
766 
767 		if (p->link)
768 			ldev = dev_get_by_index_rcu(net, p->link);
769 
770 		if ((ipv6_addr_is_multicast(laddr) ||
771 		     likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
772 		    likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
773 			ret = 1;
774 	}
775 	return ret;
776 }
777 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
778 
779 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
780 			 const struct tnl_ptk_info *tpi,
781 			 struct metadata_dst *tun_dst,
782 			 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
783 						const struct ipv6hdr *ipv6h,
784 						struct sk_buff *skb),
785 			 bool log_ecn_err)
786 {
787 	struct pcpu_sw_netstats *tstats;
788 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
789 	int err;
790 
791 	if ((!(tpi->flags & TUNNEL_CSUM) &&
792 	     (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
793 	    ((tpi->flags & TUNNEL_CSUM) &&
794 	     !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
795 		tunnel->dev->stats.rx_crc_errors++;
796 		tunnel->dev->stats.rx_errors++;
797 		goto drop;
798 	}
799 
800 	if (tunnel->parms.i_flags & TUNNEL_SEQ) {
801 		if (!(tpi->flags & TUNNEL_SEQ) ||
802 		    (tunnel->i_seqno &&
803 		     (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
804 			tunnel->dev->stats.rx_fifo_errors++;
805 			tunnel->dev->stats.rx_errors++;
806 			goto drop;
807 		}
808 		tunnel->i_seqno = ntohl(tpi->seq) + 1;
809 	}
810 
811 	skb->protocol = tpi->proto;
812 
813 	/* Warning: All skb pointers will be invalidated! */
814 	if (tunnel->dev->type == ARPHRD_ETHER) {
815 		if (!pskb_may_pull(skb, ETH_HLEN)) {
816 			tunnel->dev->stats.rx_length_errors++;
817 			tunnel->dev->stats.rx_errors++;
818 			goto drop;
819 		}
820 
821 		ipv6h = ipv6_hdr(skb);
822 		skb->protocol = eth_type_trans(skb, tunnel->dev);
823 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
824 	} else {
825 		skb->dev = tunnel->dev;
826 	}
827 
828 	skb_reset_network_header(skb);
829 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
830 
831 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
832 
833 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
834 	if (unlikely(err)) {
835 		if (log_ecn_err)
836 			net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
837 					     &ipv6h->saddr,
838 					     ipv6_get_dsfield(ipv6h));
839 		if (err > 1) {
840 			++tunnel->dev->stats.rx_frame_errors;
841 			++tunnel->dev->stats.rx_errors;
842 			goto drop;
843 		}
844 	}
845 
846 	tstats = this_cpu_ptr(tunnel->dev->tstats);
847 	u64_stats_update_begin(&tstats->syncp);
848 	tstats->rx_packets++;
849 	tstats->rx_bytes += skb->len;
850 	u64_stats_update_end(&tstats->syncp);
851 
852 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
853 
854 	if (tun_dst)
855 		skb_dst_set(skb, (struct dst_entry *)tun_dst);
856 
857 	gro_cells_receive(&tunnel->gro_cells, skb);
858 	return 0;
859 
860 drop:
861 	if (tun_dst)
862 		dst_release((struct dst_entry *)tun_dst);
863 	kfree_skb(skb);
864 	return 0;
865 }
866 
867 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
868 		const struct tnl_ptk_info *tpi,
869 		struct metadata_dst *tun_dst,
870 		bool log_ecn_err)
871 {
872 	return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
873 			     log_ecn_err);
874 }
875 EXPORT_SYMBOL(ip6_tnl_rcv);
876 
877 static const struct tnl_ptk_info tpi_v6 = {
878 	/* no tunnel info required for ipxip6. */
879 	.proto = htons(ETH_P_IPV6),
880 };
881 
882 static const struct tnl_ptk_info tpi_v4 = {
883 	/* no tunnel info required for ipxip6. */
884 	.proto = htons(ETH_P_IP),
885 };
886 
887 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
888 		      const struct tnl_ptk_info *tpi,
889 		      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
890 						  const struct ipv6hdr *ipv6h,
891 						  struct sk_buff *skb))
892 {
893 	struct ip6_tnl *t;
894 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
895 	struct metadata_dst *tun_dst = NULL;
896 	int ret = -1;
897 
898 	rcu_read_lock();
899 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
900 
901 	if (t) {
902 		u8 tproto = ACCESS_ONCE(t->parms.proto);
903 
904 		if (tproto != ipproto && tproto != 0)
905 			goto drop;
906 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
907 			goto drop;
908 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
909 			goto drop;
910 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
911 			goto drop;
912 		if (t->parms.collect_md) {
913 			tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
914 			if (!tun_dst)
915 				return 0;
916 		}
917 		ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
918 				    log_ecn_error);
919 	}
920 
921 	rcu_read_unlock();
922 
923 	return ret;
924 
925 drop:
926 	rcu_read_unlock();
927 	kfree_skb(skb);
928 	return 0;
929 }
930 
931 static int ip4ip6_rcv(struct sk_buff *skb)
932 {
933 	return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
934 			  ip4ip6_dscp_ecn_decapsulate);
935 }
936 
937 static int ip6ip6_rcv(struct sk_buff *skb)
938 {
939 	return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
940 			  ip6ip6_dscp_ecn_decapsulate);
941 }
942 
943 struct ipv6_tel_txoption {
944 	struct ipv6_txoptions ops;
945 	__u8 dst_opt[8];
946 };
947 
948 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
949 {
950 	memset(opt, 0, sizeof(struct ipv6_tel_txoption));
951 
952 	opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
953 	opt->dst_opt[3] = 1;
954 	opt->dst_opt[4] = encap_limit;
955 	opt->dst_opt[5] = IPV6_TLV_PADN;
956 	opt->dst_opt[6] = 1;
957 
958 	opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
959 	opt->ops.opt_nflen = 8;
960 }
961 
962 /**
963  * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
964  *   @t: the outgoing tunnel device
965  *   @hdr: IPv6 header from the incoming packet
966  *
967  * Description:
968  *   Avoid trivial tunneling loop by checking that tunnel exit-point
969  *   doesn't match source of incoming packet.
970  *
971  * Return:
972  *   1 if conflict,
973  *   0 else
974  **/
975 
976 static inline bool
977 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
978 {
979 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
980 }
981 
982 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
983 		     const struct in6_addr *laddr,
984 		     const struct in6_addr *raddr)
985 {
986 	struct __ip6_tnl_parm *p = &t->parms;
987 	int ret = 0;
988 	struct net *net = t->net;
989 
990 	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
991 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
992 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
993 		struct net_device *ldev = NULL;
994 
995 		rcu_read_lock();
996 		if (p->link)
997 			ldev = dev_get_by_index_rcu(net, p->link);
998 
999 		if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
1000 			pr_warn("%s xmit: Local address not yet configured!\n",
1001 				p->name);
1002 		else if (!ipv6_addr_is_multicast(raddr) &&
1003 			 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
1004 			pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1005 				p->name);
1006 		else
1007 			ret = 1;
1008 		rcu_read_unlock();
1009 	}
1010 	return ret;
1011 }
1012 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1013 
1014 /**
1015  * ip6_tnl_xmit - encapsulate packet and send
1016  *   @skb: the outgoing socket buffer
1017  *   @dev: the outgoing tunnel device
1018  *   @dsfield: dscp code for outer header
1019  *   @fl6: flow of tunneled packet
1020  *   @encap_limit: encapsulation limit
1021  *   @pmtu: Path MTU is stored if packet is too big
1022  *   @proto: next header value
1023  *
1024  * Description:
1025  *   Build new header and do some sanity checks on the packet before sending
1026  *   it.
1027  *
1028  * Return:
1029  *   0 on success
1030  *   -1 fail
1031  *   %-EMSGSIZE message too big. return mtu in this case.
1032  **/
1033 
1034 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1035 		 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1036 		 __u8 proto)
1037 {
1038 	struct ip6_tnl *t = netdev_priv(dev);
1039 	struct net *net = t->net;
1040 	struct net_device_stats *stats = &t->dev->stats;
1041 	struct ipv6hdr *ipv6h;
1042 	struct ipv6_tel_txoption opt;
1043 	struct dst_entry *dst = NULL, *ndst = NULL;
1044 	struct net_device *tdev;
1045 	int mtu;
1046 	unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1047 	unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1048 	unsigned int max_headroom = psh_hlen;
1049 	bool use_cache = false;
1050 	u8 hop_limit;
1051 	int err = -1;
1052 
1053 	if (t->parms.collect_md) {
1054 		hop_limit = skb_tunnel_info(skb)->key.ttl;
1055 		goto route_lookup;
1056 	} else {
1057 		hop_limit = t->parms.hop_limit;
1058 	}
1059 
1060 	/* NBMA tunnel */
1061 	if (ipv6_addr_any(&t->parms.raddr)) {
1062 		if (skb->protocol == htons(ETH_P_IPV6)) {
1063 			struct in6_addr *addr6;
1064 			struct neighbour *neigh;
1065 			int addr_type;
1066 
1067 			if (!skb_dst(skb))
1068 				goto tx_err_link_failure;
1069 
1070 			neigh = dst_neigh_lookup(skb_dst(skb),
1071 						 &ipv6_hdr(skb)->daddr);
1072 			if (!neigh)
1073 				goto tx_err_link_failure;
1074 
1075 			addr6 = (struct in6_addr *)&neigh->primary_key;
1076 			addr_type = ipv6_addr_type(addr6);
1077 
1078 			if (addr_type == IPV6_ADDR_ANY)
1079 				addr6 = &ipv6_hdr(skb)->daddr;
1080 
1081 			memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1082 			neigh_release(neigh);
1083 		}
1084 	} else if (!(t->parms.flags &
1085 		     (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
1086 		/* enable the cache only only if the routing decision does
1087 		 * not depend on the current inner header value
1088 		 */
1089 		use_cache = true;
1090 	}
1091 
1092 	if (use_cache)
1093 		dst = dst_cache_get(&t->dst_cache);
1094 
1095 	if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1096 		goto tx_err_link_failure;
1097 
1098 	if (!dst) {
1099 route_lookup:
1100 		/* add dsfield to flowlabel for route lookup */
1101 		fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1102 
1103 		dst = ip6_route_output(net, NULL, fl6);
1104 
1105 		if (dst->error)
1106 			goto tx_err_link_failure;
1107 		dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1108 		if (IS_ERR(dst)) {
1109 			err = PTR_ERR(dst);
1110 			dst = NULL;
1111 			goto tx_err_link_failure;
1112 		}
1113 		if (t->parms.collect_md &&
1114 		    ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1115 				       &fl6->daddr, 0, &fl6->saddr))
1116 			goto tx_err_link_failure;
1117 		ndst = dst;
1118 	}
1119 
1120 	tdev = dst->dev;
1121 
1122 	if (tdev == dev) {
1123 		stats->collisions++;
1124 		net_warn_ratelimited("%s: Local routing loop detected!\n",
1125 				     t->parms.name);
1126 		goto tx_err_dst_release;
1127 	}
1128 	mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1129 	if (encap_limit >= 0) {
1130 		max_headroom += 8;
1131 		mtu -= 8;
1132 	}
1133 	if (mtu < IPV6_MIN_MTU)
1134 		mtu = IPV6_MIN_MTU;
1135 	if (skb_dst(skb) && !t->parms.collect_md)
1136 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1137 	if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1138 		*pmtu = mtu;
1139 		err = -EMSGSIZE;
1140 		goto tx_err_dst_release;
1141 	}
1142 
1143 	if (t->err_count > 0) {
1144 		if (time_before(jiffies,
1145 				t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1146 			t->err_count--;
1147 
1148 			dst_link_failure(skb);
1149 		} else {
1150 			t->err_count = 0;
1151 		}
1152 	}
1153 
1154 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1155 
1156 	/*
1157 	 * Okay, now see if we can stuff it in the buffer as-is.
1158 	 */
1159 	max_headroom += LL_RESERVED_SPACE(tdev);
1160 
1161 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1162 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1163 		struct sk_buff *new_skb;
1164 
1165 		new_skb = skb_realloc_headroom(skb, max_headroom);
1166 		if (!new_skb)
1167 			goto tx_err_dst_release;
1168 
1169 		if (skb->sk)
1170 			skb_set_owner_w(new_skb, skb->sk);
1171 		consume_skb(skb);
1172 		skb = new_skb;
1173 	}
1174 
1175 	if (t->parms.collect_md) {
1176 		if (t->encap.type != TUNNEL_ENCAP_NONE)
1177 			goto tx_err_dst_release;
1178 	} else {
1179 		if (use_cache && ndst)
1180 			dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1181 	}
1182 	skb_dst_set(skb, dst);
1183 
1184 	if (encap_limit >= 0) {
1185 		init_tel_txopt(&opt, encap_limit);
1186 		ipv6_push_frag_opts(skb, &opt.ops, &proto);
1187 	}
1188 	hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
1189 
1190 	/* Calculate max headroom for all the headers and adjust
1191 	 * needed_headroom if necessary.
1192 	 */
1193 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1194 			+ dst->header_len + t->hlen;
1195 	if (max_headroom > dev->needed_headroom)
1196 		dev->needed_headroom = max_headroom;
1197 
1198 	err = ip6_tnl_encap(skb, t, &proto, fl6);
1199 	if (err)
1200 		return err;
1201 
1202 	skb_push(skb, sizeof(struct ipv6hdr));
1203 	skb_reset_network_header(skb);
1204 	ipv6h = ipv6_hdr(skb);
1205 	ip6_flow_hdr(ipv6h, dsfield,
1206 		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1207 	ipv6h->hop_limit = hop_limit;
1208 	ipv6h->nexthdr = proto;
1209 	ipv6h->saddr = fl6->saddr;
1210 	ipv6h->daddr = fl6->daddr;
1211 	ip6tunnel_xmit(NULL, skb, dev);
1212 	return 0;
1213 tx_err_link_failure:
1214 	stats->tx_carrier_errors++;
1215 	dst_link_failure(skb);
1216 tx_err_dst_release:
1217 	dst_release(dst);
1218 	return err;
1219 }
1220 EXPORT_SYMBOL(ip6_tnl_xmit);
1221 
1222 static inline int
1223 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1224 {
1225 	struct ip6_tnl *t = netdev_priv(dev);
1226 	const struct iphdr  *iph = ip_hdr(skb);
1227 	int encap_limit = -1;
1228 	struct flowi6 fl6;
1229 	__u8 dsfield;
1230 	__u32 mtu;
1231 	u8 tproto;
1232 	int err;
1233 
1234 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1235 
1236 	tproto = ACCESS_ONCE(t->parms.proto);
1237 	if (tproto != IPPROTO_IPIP && tproto != 0)
1238 		return -1;
1239 
1240 	if (t->parms.collect_md) {
1241 		struct ip_tunnel_info *tun_info;
1242 		const struct ip_tunnel_key *key;
1243 
1244 		tun_info = skb_tunnel_info(skb);
1245 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1246 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1247 			return -1;
1248 		key = &tun_info->key;
1249 		memset(&fl6, 0, sizeof(fl6));
1250 		fl6.flowi6_proto = IPPROTO_IPIP;
1251 		fl6.daddr = key->u.ipv6.dst;
1252 		fl6.flowlabel = key->label;
1253 		dsfield =  key->tos;
1254 	} else {
1255 		if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1256 			encap_limit = t->parms.encap_limit;
1257 
1258 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1259 		fl6.flowi6_proto = IPPROTO_IPIP;
1260 
1261 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1262 			dsfield = ipv4_get_dsfield(iph);
1263 		else
1264 			dsfield = ip6_tclass(t->parms.flowinfo);
1265 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1266 			fl6.flowi6_mark = skb->mark;
1267 		else
1268 			fl6.flowi6_mark = t->parms.fwmark;
1269 	}
1270 
1271 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1272 
1273 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1274 		return -1;
1275 
1276 	dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1277 
1278 	skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1279 
1280 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1281 			   IPPROTO_IPIP);
1282 	if (err != 0) {
1283 		/* XXX: send ICMP error even if DF is not set. */
1284 		if (err == -EMSGSIZE)
1285 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1286 				  htonl(mtu));
1287 		return -1;
1288 	}
1289 
1290 	return 0;
1291 }
1292 
1293 static inline int
1294 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1295 {
1296 	struct ip6_tnl *t = netdev_priv(dev);
1297 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1298 	int encap_limit = -1;
1299 	__u16 offset;
1300 	struct flowi6 fl6;
1301 	__u8 dsfield;
1302 	__u32 mtu;
1303 	u8 tproto;
1304 	int err;
1305 
1306 	tproto = ACCESS_ONCE(t->parms.proto);
1307 	if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1308 	    ip6_tnl_addr_conflict(t, ipv6h))
1309 		return -1;
1310 
1311 	if (t->parms.collect_md) {
1312 		struct ip_tunnel_info *tun_info;
1313 		const struct ip_tunnel_key *key;
1314 
1315 		tun_info = skb_tunnel_info(skb);
1316 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1317 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1318 			return -1;
1319 		key = &tun_info->key;
1320 		memset(&fl6, 0, sizeof(fl6));
1321 		fl6.flowi6_proto = IPPROTO_IPV6;
1322 		fl6.daddr = key->u.ipv6.dst;
1323 		fl6.flowlabel = key->label;
1324 		dsfield = key->tos;
1325 	} else {
1326 		offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1327 		/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1328 		ipv6h = ipv6_hdr(skb);
1329 		if (offset > 0) {
1330 			struct ipv6_tlv_tnl_enc_lim *tel;
1331 
1332 			tel = (void *)&skb_network_header(skb)[offset];
1333 			if (tel->encap_limit == 0) {
1334 				icmpv6_send(skb, ICMPV6_PARAMPROB,
1335 					    ICMPV6_HDR_FIELD, offset + 2);
1336 				return -1;
1337 			}
1338 			encap_limit = tel->encap_limit - 1;
1339 		} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1340 			encap_limit = t->parms.encap_limit;
1341 		}
1342 
1343 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1344 		fl6.flowi6_proto = IPPROTO_IPV6;
1345 
1346 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1347 			dsfield = ipv6_get_dsfield(ipv6h);
1348 		else
1349 			dsfield = ip6_tclass(t->parms.flowinfo);
1350 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1351 			fl6.flowlabel |= ip6_flowlabel(ipv6h);
1352 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1353 			fl6.flowi6_mark = skb->mark;
1354 		else
1355 			fl6.flowi6_mark = t->parms.fwmark;
1356 	}
1357 
1358 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1359 
1360 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1361 		return -1;
1362 
1363 	dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1364 
1365 	skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1366 
1367 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1368 			   IPPROTO_IPV6);
1369 	if (err != 0) {
1370 		if (err == -EMSGSIZE)
1371 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1372 		return -1;
1373 	}
1374 
1375 	return 0;
1376 }
1377 
1378 static netdev_tx_t
1379 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1380 {
1381 	struct ip6_tnl *t = netdev_priv(dev);
1382 	struct net_device_stats *stats = &t->dev->stats;
1383 	int ret;
1384 
1385 	switch (skb->protocol) {
1386 	case htons(ETH_P_IP):
1387 		ret = ip4ip6_tnl_xmit(skb, dev);
1388 		break;
1389 	case htons(ETH_P_IPV6):
1390 		ret = ip6ip6_tnl_xmit(skb, dev);
1391 		break;
1392 	default:
1393 		goto tx_err;
1394 	}
1395 
1396 	if (ret < 0)
1397 		goto tx_err;
1398 
1399 	return NETDEV_TX_OK;
1400 
1401 tx_err:
1402 	stats->tx_errors++;
1403 	stats->tx_dropped++;
1404 	kfree_skb(skb);
1405 	return NETDEV_TX_OK;
1406 }
1407 
1408 static void ip6_tnl_link_config(struct ip6_tnl *t)
1409 {
1410 	struct net_device *dev = t->dev;
1411 	struct __ip6_tnl_parm *p = &t->parms;
1412 	struct flowi6 *fl6 = &t->fl.u.ip6;
1413 	int t_hlen;
1414 
1415 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1416 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1417 
1418 	/* Set up flowi template */
1419 	fl6->saddr = p->laddr;
1420 	fl6->daddr = p->raddr;
1421 	fl6->flowi6_oif = p->link;
1422 	fl6->flowlabel = 0;
1423 
1424 	if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1425 		fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1426 	if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1427 		fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1428 
1429 	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1430 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1431 
1432 	if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1433 		dev->flags |= IFF_POINTOPOINT;
1434 	else
1435 		dev->flags &= ~IFF_POINTOPOINT;
1436 
1437 	t->tun_hlen = 0;
1438 	t->hlen = t->encap_hlen + t->tun_hlen;
1439 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1440 
1441 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
1442 		int strict = (ipv6_addr_type(&p->raddr) &
1443 			      (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1444 
1445 		struct rt6_info *rt = rt6_lookup(t->net,
1446 						 &p->raddr, &p->laddr,
1447 						 p->link, strict);
1448 
1449 		if (!rt)
1450 			return;
1451 
1452 		if (rt->dst.dev) {
1453 			dev->hard_header_len = rt->dst.dev->hard_header_len +
1454 				t_hlen;
1455 
1456 			dev->mtu = rt->dst.dev->mtu - t_hlen;
1457 			if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1458 				dev->mtu -= 8;
1459 
1460 			if (dev->mtu < IPV6_MIN_MTU)
1461 				dev->mtu = IPV6_MIN_MTU;
1462 		}
1463 		ip6_rt_put(rt);
1464 	}
1465 }
1466 
1467 /**
1468  * ip6_tnl_change - update the tunnel parameters
1469  *   @t: tunnel to be changed
1470  *   @p: tunnel configuration parameters
1471  *
1472  * Description:
1473  *   ip6_tnl_change() updates the tunnel parameters
1474  **/
1475 
1476 static int
1477 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1478 {
1479 	t->parms.laddr = p->laddr;
1480 	t->parms.raddr = p->raddr;
1481 	t->parms.flags = p->flags;
1482 	t->parms.hop_limit = p->hop_limit;
1483 	t->parms.encap_limit = p->encap_limit;
1484 	t->parms.flowinfo = p->flowinfo;
1485 	t->parms.link = p->link;
1486 	t->parms.proto = p->proto;
1487 	t->parms.fwmark = p->fwmark;
1488 	dst_cache_reset(&t->dst_cache);
1489 	ip6_tnl_link_config(t);
1490 	return 0;
1491 }
1492 
1493 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1494 {
1495 	struct net *net = t->net;
1496 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1497 	int err;
1498 
1499 	ip6_tnl_unlink(ip6n, t);
1500 	synchronize_net();
1501 	err = ip6_tnl_change(t, p);
1502 	ip6_tnl_link(ip6n, t);
1503 	netdev_state_change(t->dev);
1504 	return err;
1505 }
1506 
1507 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1508 {
1509 	/* for default tnl0 device allow to change only the proto */
1510 	t->parms.proto = p->proto;
1511 	netdev_state_change(t->dev);
1512 	return 0;
1513 }
1514 
1515 static void
1516 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1517 {
1518 	p->laddr = u->laddr;
1519 	p->raddr = u->raddr;
1520 	p->flags = u->flags;
1521 	p->hop_limit = u->hop_limit;
1522 	p->encap_limit = u->encap_limit;
1523 	p->flowinfo = u->flowinfo;
1524 	p->link = u->link;
1525 	p->proto = u->proto;
1526 	memcpy(p->name, u->name, sizeof(u->name));
1527 }
1528 
1529 static void
1530 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1531 {
1532 	u->laddr = p->laddr;
1533 	u->raddr = p->raddr;
1534 	u->flags = p->flags;
1535 	u->hop_limit = p->hop_limit;
1536 	u->encap_limit = p->encap_limit;
1537 	u->flowinfo = p->flowinfo;
1538 	u->link = p->link;
1539 	u->proto = p->proto;
1540 	memcpy(u->name, p->name, sizeof(u->name));
1541 }
1542 
1543 /**
1544  * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1545  *   @dev: virtual device associated with tunnel
1546  *   @ifr: parameters passed from userspace
1547  *   @cmd: command to be performed
1548  *
1549  * Description:
1550  *   ip6_tnl_ioctl() is used for managing IPv6 tunnels
1551  *   from userspace.
1552  *
1553  *   The possible commands are the following:
1554  *     %SIOCGETTUNNEL: get tunnel parameters for device
1555  *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1556  *     %SIOCCHGTUNNEL: change tunnel parameters to those given
1557  *     %SIOCDELTUNNEL: delete tunnel
1558  *
1559  *   The fallback device "ip6tnl0", created during module
1560  *   initialization, can be used for creating other tunnel devices.
1561  *
1562  * Return:
1563  *   0 on success,
1564  *   %-EFAULT if unable to copy data to or from userspace,
1565  *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
1566  *   %-EINVAL if passed tunnel parameters are invalid,
1567  *   %-EEXIST if changing a tunnel's parameters would cause a conflict
1568  *   %-ENODEV if attempting to change or delete a nonexisting device
1569  **/
1570 
1571 static int
1572 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1573 {
1574 	int err = 0;
1575 	struct ip6_tnl_parm p;
1576 	struct __ip6_tnl_parm p1;
1577 	struct ip6_tnl *t = netdev_priv(dev);
1578 	struct net *net = t->net;
1579 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1580 
1581 	memset(&p1, 0, sizeof(p1));
1582 
1583 	switch (cmd) {
1584 	case SIOCGETTUNNEL:
1585 		if (dev == ip6n->fb_tnl_dev) {
1586 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1587 				err = -EFAULT;
1588 				break;
1589 			}
1590 			ip6_tnl_parm_from_user(&p1, &p);
1591 			t = ip6_tnl_locate(net, &p1, 0);
1592 			if (IS_ERR(t))
1593 				t = netdev_priv(dev);
1594 		} else {
1595 			memset(&p, 0, sizeof(p));
1596 		}
1597 		ip6_tnl_parm_to_user(&p, &t->parms);
1598 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1599 			err = -EFAULT;
1600 		}
1601 		break;
1602 	case SIOCADDTUNNEL:
1603 	case SIOCCHGTUNNEL:
1604 		err = -EPERM;
1605 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1606 			break;
1607 		err = -EFAULT;
1608 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1609 			break;
1610 		err = -EINVAL;
1611 		if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1612 		    p.proto != 0)
1613 			break;
1614 		ip6_tnl_parm_from_user(&p1, &p);
1615 		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1616 		if (cmd == SIOCCHGTUNNEL) {
1617 			if (!IS_ERR(t)) {
1618 				if (t->dev != dev) {
1619 					err = -EEXIST;
1620 					break;
1621 				}
1622 			} else
1623 				t = netdev_priv(dev);
1624 			if (dev == ip6n->fb_tnl_dev)
1625 				err = ip6_tnl0_update(t, &p1);
1626 			else
1627 				err = ip6_tnl_update(t, &p1);
1628 		}
1629 		if (!IS_ERR(t)) {
1630 			err = 0;
1631 			ip6_tnl_parm_to_user(&p, &t->parms);
1632 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1633 				err = -EFAULT;
1634 
1635 		} else {
1636 			err = PTR_ERR(t);
1637 		}
1638 		break;
1639 	case SIOCDELTUNNEL:
1640 		err = -EPERM;
1641 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1642 			break;
1643 
1644 		if (dev == ip6n->fb_tnl_dev) {
1645 			err = -EFAULT;
1646 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1647 				break;
1648 			err = -ENOENT;
1649 			ip6_tnl_parm_from_user(&p1, &p);
1650 			t = ip6_tnl_locate(net, &p1, 0);
1651 			if (IS_ERR(t))
1652 				break;
1653 			err = -EPERM;
1654 			if (t->dev == ip6n->fb_tnl_dev)
1655 				break;
1656 			dev = t->dev;
1657 		}
1658 		err = 0;
1659 		unregister_netdevice(dev);
1660 		break;
1661 	default:
1662 		err = -EINVAL;
1663 	}
1664 	return err;
1665 }
1666 
1667 /**
1668  * ip6_tnl_change_mtu - change mtu manually for tunnel device
1669  *   @dev: virtual device associated with tunnel
1670  *   @new_mtu: the new mtu
1671  *
1672  * Return:
1673  *   0 on success,
1674  *   %-EINVAL if mtu too small
1675  **/
1676 
1677 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1678 {
1679 	struct ip6_tnl *tnl = netdev_priv(dev);
1680 
1681 	if (tnl->parms.proto == IPPROTO_IPIP) {
1682 		if (new_mtu < ETH_MIN_MTU)
1683 			return -EINVAL;
1684 	} else {
1685 		if (new_mtu < IPV6_MIN_MTU)
1686 			return -EINVAL;
1687 	}
1688 	if (new_mtu > 0xFFF8 - dev->hard_header_len)
1689 		return -EINVAL;
1690 	dev->mtu = new_mtu;
1691 	return 0;
1692 }
1693 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1694 
1695 int ip6_tnl_get_iflink(const struct net_device *dev)
1696 {
1697 	struct ip6_tnl *t = netdev_priv(dev);
1698 
1699 	return t->parms.link;
1700 }
1701 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1702 
1703 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1704 			  unsigned int num)
1705 {
1706 	if (num >= MAX_IPTUN_ENCAP_OPS)
1707 		return -ERANGE;
1708 
1709 	return !cmpxchg((const struct ip6_tnl_encap_ops **)
1710 			&ip6tun_encaps[num],
1711 			NULL, ops) ? 0 : -1;
1712 }
1713 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1714 
1715 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1716 			  unsigned int num)
1717 {
1718 	int ret;
1719 
1720 	if (num >= MAX_IPTUN_ENCAP_OPS)
1721 		return -ERANGE;
1722 
1723 	ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1724 		       &ip6tun_encaps[num],
1725 		       ops, NULL) == ops) ? 0 : -1;
1726 
1727 	synchronize_net();
1728 
1729 	return ret;
1730 }
1731 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1732 
1733 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1734 			struct ip_tunnel_encap *ipencap)
1735 {
1736 	int hlen;
1737 
1738 	memset(&t->encap, 0, sizeof(t->encap));
1739 
1740 	hlen = ip6_encap_hlen(ipencap);
1741 	if (hlen < 0)
1742 		return hlen;
1743 
1744 	t->encap.type = ipencap->type;
1745 	t->encap.sport = ipencap->sport;
1746 	t->encap.dport = ipencap->dport;
1747 	t->encap.flags = ipencap->flags;
1748 
1749 	t->encap_hlen = hlen;
1750 	t->hlen = t->encap_hlen + t->tun_hlen;
1751 
1752 	return 0;
1753 }
1754 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1755 
1756 static const struct net_device_ops ip6_tnl_netdev_ops = {
1757 	.ndo_init	= ip6_tnl_dev_init,
1758 	.ndo_uninit	= ip6_tnl_dev_uninit,
1759 	.ndo_start_xmit = ip6_tnl_start_xmit,
1760 	.ndo_do_ioctl	= ip6_tnl_ioctl,
1761 	.ndo_change_mtu = ip6_tnl_change_mtu,
1762 	.ndo_get_stats	= ip6_get_stats,
1763 	.ndo_get_iflink = ip6_tnl_get_iflink,
1764 };
1765 
1766 #define IPXIPX_FEATURES (NETIF_F_SG |		\
1767 			 NETIF_F_FRAGLIST |	\
1768 			 NETIF_F_HIGHDMA |	\
1769 			 NETIF_F_GSO_SOFTWARE |	\
1770 			 NETIF_F_HW_CSUM)
1771 
1772 /**
1773  * ip6_tnl_dev_setup - setup virtual tunnel device
1774  *   @dev: virtual device associated with tunnel
1775  *
1776  * Description:
1777  *   Initialize function pointers and device parameters
1778  **/
1779 
1780 static void ip6_tnl_dev_setup(struct net_device *dev)
1781 {
1782 	dev->netdev_ops = &ip6_tnl_netdev_ops;
1783 	dev->needs_free_netdev = true;
1784 	dev->priv_destructor = ip6_dev_free;
1785 
1786 	dev->type = ARPHRD_TUNNEL6;
1787 	dev->flags |= IFF_NOARP;
1788 	dev->addr_len = sizeof(struct in6_addr);
1789 	dev->features |= NETIF_F_LLTX;
1790 	netif_keep_dst(dev);
1791 
1792 	dev->features		|= IPXIPX_FEATURES;
1793 	dev->hw_features	|= IPXIPX_FEATURES;
1794 
1795 	/* This perm addr will be used as interface identifier by IPv6 */
1796 	dev->addr_assign_type = NET_ADDR_RANDOM;
1797 	eth_random_addr(dev->perm_addr);
1798 }
1799 
1800 
1801 /**
1802  * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1803  *   @dev: virtual device associated with tunnel
1804  **/
1805 
1806 static inline int
1807 ip6_tnl_dev_init_gen(struct net_device *dev)
1808 {
1809 	struct ip6_tnl *t = netdev_priv(dev);
1810 	int ret;
1811 	int t_hlen;
1812 
1813 	t->dev = dev;
1814 	t->net = dev_net(dev);
1815 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1816 	if (!dev->tstats)
1817 		return -ENOMEM;
1818 
1819 	ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1820 	if (ret)
1821 		goto free_stats;
1822 
1823 	ret = gro_cells_init(&t->gro_cells, dev);
1824 	if (ret)
1825 		goto destroy_dst;
1826 
1827 	t->tun_hlen = 0;
1828 	t->hlen = t->encap_hlen + t->tun_hlen;
1829 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1830 
1831 	dev->type = ARPHRD_TUNNEL6;
1832 	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1833 	dev->mtu = ETH_DATA_LEN - t_hlen;
1834 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1835 		dev->mtu -= 8;
1836 	dev->min_mtu = ETH_MIN_MTU;
1837 	dev->max_mtu = 0xFFF8 - dev->hard_header_len;
1838 
1839 	return 0;
1840 
1841 destroy_dst:
1842 	dst_cache_destroy(&t->dst_cache);
1843 free_stats:
1844 	free_percpu(dev->tstats);
1845 	dev->tstats = NULL;
1846 
1847 	return ret;
1848 }
1849 
1850 /**
1851  * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1852  *   @dev: virtual device associated with tunnel
1853  **/
1854 
1855 static int ip6_tnl_dev_init(struct net_device *dev)
1856 {
1857 	struct ip6_tnl *t = netdev_priv(dev);
1858 	int err = ip6_tnl_dev_init_gen(dev);
1859 
1860 	if (err)
1861 		return err;
1862 	ip6_tnl_link_config(t);
1863 	if (t->parms.collect_md) {
1864 		dev->features |= NETIF_F_NETNS_LOCAL;
1865 		netif_keep_dst(dev);
1866 	}
1867 	return 0;
1868 }
1869 
1870 /**
1871  * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1872  *   @dev: fallback device
1873  *
1874  * Return: 0
1875  **/
1876 
1877 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1878 {
1879 	struct ip6_tnl *t = netdev_priv(dev);
1880 	struct net *net = dev_net(dev);
1881 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1882 
1883 	t->parms.proto = IPPROTO_IPV6;
1884 	dev_hold(dev);
1885 
1886 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
1887 	return 0;
1888 }
1889 
1890 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1891 			    struct netlink_ext_ack *extack)
1892 {
1893 	u8 proto;
1894 
1895 	if (!data || !data[IFLA_IPTUN_PROTO])
1896 		return 0;
1897 
1898 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1899 	if (proto != IPPROTO_IPV6 &&
1900 	    proto != IPPROTO_IPIP &&
1901 	    proto != 0)
1902 		return -EINVAL;
1903 
1904 	return 0;
1905 }
1906 
1907 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1908 				  struct __ip6_tnl_parm *parms)
1909 {
1910 	memset(parms, 0, sizeof(*parms));
1911 
1912 	if (!data)
1913 		return;
1914 
1915 	if (data[IFLA_IPTUN_LINK])
1916 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1917 
1918 	if (data[IFLA_IPTUN_LOCAL])
1919 		parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1920 
1921 	if (data[IFLA_IPTUN_REMOTE])
1922 		parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1923 
1924 	if (data[IFLA_IPTUN_TTL])
1925 		parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1926 
1927 	if (data[IFLA_IPTUN_ENCAP_LIMIT])
1928 		parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1929 
1930 	if (data[IFLA_IPTUN_FLOWINFO])
1931 		parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1932 
1933 	if (data[IFLA_IPTUN_FLAGS])
1934 		parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1935 
1936 	if (data[IFLA_IPTUN_PROTO])
1937 		parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1938 
1939 	if (data[IFLA_IPTUN_COLLECT_METADATA])
1940 		parms->collect_md = true;
1941 
1942 	if (data[IFLA_IPTUN_FWMARK])
1943 		parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
1944 }
1945 
1946 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1947 					struct ip_tunnel_encap *ipencap)
1948 {
1949 	bool ret = false;
1950 
1951 	memset(ipencap, 0, sizeof(*ipencap));
1952 
1953 	if (!data)
1954 		return ret;
1955 
1956 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1957 		ret = true;
1958 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1959 	}
1960 
1961 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1962 		ret = true;
1963 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1964 	}
1965 
1966 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1967 		ret = true;
1968 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1969 	}
1970 
1971 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1972 		ret = true;
1973 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1974 	}
1975 
1976 	return ret;
1977 }
1978 
1979 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1980 			   struct nlattr *tb[], struct nlattr *data[],
1981 			   struct netlink_ext_ack *extack)
1982 {
1983 	struct net *net = dev_net(dev);
1984 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1985 	struct ip6_tnl *nt, *t;
1986 	struct ip_tunnel_encap ipencap;
1987 
1988 	nt = netdev_priv(dev);
1989 
1990 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1991 		int err = ip6_tnl_encap_setup(nt, &ipencap);
1992 
1993 		if (err < 0)
1994 			return err;
1995 	}
1996 
1997 	ip6_tnl_netlink_parms(data, &nt->parms);
1998 
1999 	if (nt->parms.collect_md) {
2000 		if (rtnl_dereference(ip6n->collect_md_tun))
2001 			return -EEXIST;
2002 	} else {
2003 		t = ip6_tnl_locate(net, &nt->parms, 0);
2004 		if (!IS_ERR(t))
2005 			return -EEXIST;
2006 	}
2007 
2008 	return ip6_tnl_create2(dev);
2009 }
2010 
2011 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2012 			      struct nlattr *data[],
2013 			      struct netlink_ext_ack *extack)
2014 {
2015 	struct ip6_tnl *t = netdev_priv(dev);
2016 	struct __ip6_tnl_parm p;
2017 	struct net *net = t->net;
2018 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2019 	struct ip_tunnel_encap ipencap;
2020 
2021 	if (dev == ip6n->fb_tnl_dev)
2022 		return -EINVAL;
2023 
2024 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2025 		int err = ip6_tnl_encap_setup(t, &ipencap);
2026 
2027 		if (err < 0)
2028 			return err;
2029 	}
2030 	ip6_tnl_netlink_parms(data, &p);
2031 	if (p.collect_md)
2032 		return -EINVAL;
2033 
2034 	t = ip6_tnl_locate(net, &p, 0);
2035 	if (!IS_ERR(t)) {
2036 		if (t->dev != dev)
2037 			return -EEXIST;
2038 	} else
2039 		t = netdev_priv(dev);
2040 
2041 	return ip6_tnl_update(t, &p);
2042 }
2043 
2044 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2045 {
2046 	struct net *net = dev_net(dev);
2047 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2048 
2049 	if (dev != ip6n->fb_tnl_dev)
2050 		unregister_netdevice_queue(dev, head);
2051 }
2052 
2053 static size_t ip6_tnl_get_size(const struct net_device *dev)
2054 {
2055 	return
2056 		/* IFLA_IPTUN_LINK */
2057 		nla_total_size(4) +
2058 		/* IFLA_IPTUN_LOCAL */
2059 		nla_total_size(sizeof(struct in6_addr)) +
2060 		/* IFLA_IPTUN_REMOTE */
2061 		nla_total_size(sizeof(struct in6_addr)) +
2062 		/* IFLA_IPTUN_TTL */
2063 		nla_total_size(1) +
2064 		/* IFLA_IPTUN_ENCAP_LIMIT */
2065 		nla_total_size(1) +
2066 		/* IFLA_IPTUN_FLOWINFO */
2067 		nla_total_size(4) +
2068 		/* IFLA_IPTUN_FLAGS */
2069 		nla_total_size(4) +
2070 		/* IFLA_IPTUN_PROTO */
2071 		nla_total_size(1) +
2072 		/* IFLA_IPTUN_ENCAP_TYPE */
2073 		nla_total_size(2) +
2074 		/* IFLA_IPTUN_ENCAP_FLAGS */
2075 		nla_total_size(2) +
2076 		/* IFLA_IPTUN_ENCAP_SPORT */
2077 		nla_total_size(2) +
2078 		/* IFLA_IPTUN_ENCAP_DPORT */
2079 		nla_total_size(2) +
2080 		/* IFLA_IPTUN_COLLECT_METADATA */
2081 		nla_total_size(0) +
2082 		/* IFLA_IPTUN_FWMARK */
2083 		nla_total_size(4) +
2084 		0;
2085 }
2086 
2087 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2088 {
2089 	struct ip6_tnl *tunnel = netdev_priv(dev);
2090 	struct __ip6_tnl_parm *parm = &tunnel->parms;
2091 
2092 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2093 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2094 	    nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2095 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2096 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2097 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2098 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2099 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2100 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2101 		goto nla_put_failure;
2102 
2103 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2104 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2105 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2106 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2107 		goto nla_put_failure;
2108 
2109 	if (parm->collect_md)
2110 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2111 			goto nla_put_failure;
2112 
2113 	return 0;
2114 
2115 nla_put_failure:
2116 	return -EMSGSIZE;
2117 }
2118 
2119 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2120 {
2121 	struct ip6_tnl *tunnel = netdev_priv(dev);
2122 
2123 	return tunnel->net;
2124 }
2125 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2126 
2127 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2128 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
2129 	[IFLA_IPTUN_LOCAL]		= { .len = sizeof(struct in6_addr) },
2130 	[IFLA_IPTUN_REMOTE]		= { .len = sizeof(struct in6_addr) },
2131 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
2132 	[IFLA_IPTUN_ENCAP_LIMIT]	= { .type = NLA_U8 },
2133 	[IFLA_IPTUN_FLOWINFO]		= { .type = NLA_U32 },
2134 	[IFLA_IPTUN_FLAGS]		= { .type = NLA_U32 },
2135 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
2136 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
2137 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
2138 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
2139 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
2140 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
2141 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
2142 };
2143 
2144 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2145 	.kind		= "ip6tnl",
2146 	.maxtype	= IFLA_IPTUN_MAX,
2147 	.policy		= ip6_tnl_policy,
2148 	.priv_size	= sizeof(struct ip6_tnl),
2149 	.setup		= ip6_tnl_dev_setup,
2150 	.validate	= ip6_tnl_validate,
2151 	.newlink	= ip6_tnl_newlink,
2152 	.changelink	= ip6_tnl_changelink,
2153 	.dellink	= ip6_tnl_dellink,
2154 	.get_size	= ip6_tnl_get_size,
2155 	.fill_info	= ip6_tnl_fill_info,
2156 	.get_link_net	= ip6_tnl_get_link_net,
2157 };
2158 
2159 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2160 	.handler	= ip4ip6_rcv,
2161 	.err_handler	= ip4ip6_err,
2162 	.priority	=	1,
2163 };
2164 
2165 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2166 	.handler	= ip6ip6_rcv,
2167 	.err_handler	= ip6ip6_err,
2168 	.priority	=	1,
2169 };
2170 
2171 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
2172 {
2173 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2174 	struct net_device *dev, *aux;
2175 	int h;
2176 	struct ip6_tnl *t;
2177 	LIST_HEAD(list);
2178 
2179 	for_each_netdev_safe(net, dev, aux)
2180 		if (dev->rtnl_link_ops == &ip6_link_ops)
2181 			unregister_netdevice_queue(dev, &list);
2182 
2183 	for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2184 		t = rtnl_dereference(ip6n->tnls_r_l[h]);
2185 		while (t) {
2186 			/* If dev is in the same netns, it has already
2187 			 * been added to the list by the previous loop.
2188 			 */
2189 			if (!net_eq(dev_net(t->dev), net))
2190 				unregister_netdevice_queue(t->dev, &list);
2191 			t = rtnl_dereference(t->next);
2192 		}
2193 	}
2194 
2195 	unregister_netdevice_many(&list);
2196 }
2197 
2198 static int __net_init ip6_tnl_init_net(struct net *net)
2199 {
2200 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2201 	struct ip6_tnl *t = NULL;
2202 	int err;
2203 
2204 	ip6n->tnls[0] = ip6n->tnls_wc;
2205 	ip6n->tnls[1] = ip6n->tnls_r_l;
2206 
2207 	err = -ENOMEM;
2208 	ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2209 					NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2210 
2211 	if (!ip6n->fb_tnl_dev)
2212 		goto err_alloc_dev;
2213 	dev_net_set(ip6n->fb_tnl_dev, net);
2214 	ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2215 	/* FB netdevice is special: we have one, and only one per netns.
2216 	 * Allowing to move it to another netns is clearly unsafe.
2217 	 */
2218 	ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2219 
2220 	err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2221 	if (err < 0)
2222 		goto err_register;
2223 
2224 	err = register_netdev(ip6n->fb_tnl_dev);
2225 	if (err < 0)
2226 		goto err_register;
2227 
2228 	t = netdev_priv(ip6n->fb_tnl_dev);
2229 
2230 	strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2231 	return 0;
2232 
2233 err_register:
2234 	free_netdev(ip6n->fb_tnl_dev);
2235 err_alloc_dev:
2236 	return err;
2237 }
2238 
2239 static void __net_exit ip6_tnl_exit_net(struct net *net)
2240 {
2241 	rtnl_lock();
2242 	ip6_tnl_destroy_tunnels(net);
2243 	rtnl_unlock();
2244 }
2245 
2246 static struct pernet_operations ip6_tnl_net_ops = {
2247 	.init = ip6_tnl_init_net,
2248 	.exit = ip6_tnl_exit_net,
2249 	.id   = &ip6_tnl_net_id,
2250 	.size = sizeof(struct ip6_tnl_net),
2251 };
2252 
2253 /**
2254  * ip6_tunnel_init - register protocol and reserve needed resources
2255  *
2256  * Return: 0 on success
2257  **/
2258 
2259 static int __init ip6_tunnel_init(void)
2260 {
2261 	int  err;
2262 
2263 	if (!ipv6_mod_enabled())
2264 		return -EOPNOTSUPP;
2265 
2266 	err = register_pernet_device(&ip6_tnl_net_ops);
2267 	if (err < 0)
2268 		goto out_pernet;
2269 
2270 	err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2271 	if (err < 0) {
2272 		pr_err("%s: can't register ip4ip6\n", __func__);
2273 		goto out_ip4ip6;
2274 	}
2275 
2276 	err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2277 	if (err < 0) {
2278 		pr_err("%s: can't register ip6ip6\n", __func__);
2279 		goto out_ip6ip6;
2280 	}
2281 	err = rtnl_link_register(&ip6_link_ops);
2282 	if (err < 0)
2283 		goto rtnl_link_failed;
2284 
2285 	return 0;
2286 
2287 rtnl_link_failed:
2288 	xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2289 out_ip6ip6:
2290 	xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2291 out_ip4ip6:
2292 	unregister_pernet_device(&ip6_tnl_net_ops);
2293 out_pernet:
2294 	return err;
2295 }
2296 
2297 /**
2298  * ip6_tunnel_cleanup - free resources and unregister protocol
2299  **/
2300 
2301 static void __exit ip6_tunnel_cleanup(void)
2302 {
2303 	rtnl_link_unregister(&ip6_link_ops);
2304 	if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2305 		pr_info("%s: can't deregister ip4ip6\n", __func__);
2306 
2307 	if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2308 		pr_info("%s: can't deregister ip6ip6\n", __func__);
2309 
2310 	unregister_pernet_device(&ip6_tnl_net_ops);
2311 }
2312 
2313 module_init(ip6_tunnel_init);
2314 module_exit(ip6_tunnel_cleanup);
2315