xref: /openbmc/linux/net/ipv6/ip6_tunnel.c (revision 1c2dd16a)
1 /*
2  *	IPv6 tunneling device
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Ville Nuorvala		<vnuorval@tcs.hut.fi>
7  *	Yasuyuki Kozakai	<kozakai@linux-ipv6.org>
8  *
9  *      Based on:
10  *      linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
11  *
12  *      RFC 2473
13  *
14  *	This program is free software; you can redistribute it and/or
15  *      modify it under the terms of the GNU General Public License
16  *      as published by the Free Software Foundation; either version
17  *      2 of the License, or (at your option) any later version.
18  *
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
29 #include <linux/if.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
44 
45 #include <linux/uaccess.h>
46 #include <linux/atomic.h>
47 
48 #include <net/icmp.h>
49 #include <net/ip.h>
50 #include <net/ip_tunnels.h>
51 #include <net/ipv6.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
55 #include <net/xfrm.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
61 
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
67 
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT  5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
70 
71 static bool log_ecn_error = true;
72 module_param(log_ecn_error, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74 
75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
76 {
77 	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
78 
79 	return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
80 }
81 
82 static int ip6_tnl_dev_init(struct net_device *dev);
83 static void ip6_tnl_dev_setup(struct net_device *dev);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly;
85 
86 static unsigned int ip6_tnl_net_id __read_mostly;
87 struct ip6_tnl_net {
88 	/* the IPv6 tunnel fallback device */
89 	struct net_device *fb_tnl_dev;
90 	/* lists for storing tunnels in use */
91 	struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
92 	struct ip6_tnl __rcu *tnls_wc[1];
93 	struct ip6_tnl __rcu **tnls[2];
94 	struct ip6_tnl __rcu *collect_md_tun;
95 };
96 
97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
98 {
99 	struct pcpu_sw_netstats tmp, sum = { 0 };
100 	int i;
101 
102 	for_each_possible_cpu(i) {
103 		unsigned int start;
104 		const struct pcpu_sw_netstats *tstats =
105 						   per_cpu_ptr(dev->tstats, i);
106 
107 		do {
108 			start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 			tmp.rx_packets = tstats->rx_packets;
110 			tmp.rx_bytes = tstats->rx_bytes;
111 			tmp.tx_packets = tstats->tx_packets;
112 			tmp.tx_bytes =  tstats->tx_bytes;
113 		} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
114 
115 		sum.rx_packets += tmp.rx_packets;
116 		sum.rx_bytes   += tmp.rx_bytes;
117 		sum.tx_packets += tmp.tx_packets;
118 		sum.tx_bytes   += tmp.tx_bytes;
119 	}
120 	dev->stats.rx_packets = sum.rx_packets;
121 	dev->stats.rx_bytes   = sum.rx_bytes;
122 	dev->stats.tx_packets = sum.tx_packets;
123 	dev->stats.tx_bytes   = sum.tx_bytes;
124 	return &dev->stats;
125 }
126 
127 /**
128  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129  *   @remote: the address of the tunnel exit-point
130  *   @local: the address of the tunnel entry-point
131  *
132  * Return:
133  *   tunnel matching given end-points if found,
134  *   else fallback tunnel if its device is up,
135  *   else %NULL
136  **/
137 
138 #define for_each_ip6_tunnel_rcu(start) \
139 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
140 
141 static struct ip6_tnl *
142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
143 {
144 	unsigned int hash = HASH(remote, local);
145 	struct ip6_tnl *t;
146 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
147 	struct in6_addr any;
148 
149 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
151 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
152 		    (t->dev->flags & IFF_UP))
153 			return t;
154 	}
155 
156 	memset(&any, 0, sizeof(any));
157 	hash = HASH(&any, local);
158 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 		    ipv6_addr_any(&t->parms.raddr) &&
161 		    (t->dev->flags & IFF_UP))
162 			return t;
163 	}
164 
165 	hash = HASH(remote, &any);
166 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
167 		if (ipv6_addr_equal(remote, &t->parms.raddr) &&
168 		    ipv6_addr_any(&t->parms.laddr) &&
169 		    (t->dev->flags & IFF_UP))
170 			return t;
171 	}
172 
173 	t = rcu_dereference(ip6n->collect_md_tun);
174 	if (t)
175 		return t;
176 
177 	t = rcu_dereference(ip6n->tnls_wc[0]);
178 	if (t && (t->dev->flags & IFF_UP))
179 		return t;
180 
181 	return NULL;
182 }
183 
184 /**
185  * ip6_tnl_bucket - get head of list matching given tunnel parameters
186  *   @p: parameters containing tunnel end-points
187  *
188  * Description:
189  *   ip6_tnl_bucket() returns the head of the list matching the
190  *   &struct in6_addr entries laddr and raddr in @p.
191  *
192  * Return: head of IPv6 tunnel list
193  **/
194 
195 static struct ip6_tnl __rcu **
196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
197 {
198 	const struct in6_addr *remote = &p->raddr;
199 	const struct in6_addr *local = &p->laddr;
200 	unsigned int h = 0;
201 	int prio = 0;
202 
203 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
204 		prio = 1;
205 		h = HASH(remote, local);
206 	}
207 	return &ip6n->tnls[prio][h];
208 }
209 
210 /**
211  * ip6_tnl_link - add tunnel to hash table
212  *   @t: tunnel to be added
213  **/
214 
215 static void
216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
217 {
218 	struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
219 
220 	if (t->parms.collect_md)
221 		rcu_assign_pointer(ip6n->collect_md_tun, t);
222 	rcu_assign_pointer(t->next , rtnl_dereference(*tp));
223 	rcu_assign_pointer(*tp, t);
224 }
225 
226 /**
227  * ip6_tnl_unlink - remove tunnel from hash table
228  *   @t: tunnel to be removed
229  **/
230 
231 static void
232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
233 {
234 	struct ip6_tnl __rcu **tp;
235 	struct ip6_tnl *iter;
236 
237 	if (t->parms.collect_md)
238 		rcu_assign_pointer(ip6n->collect_md_tun, NULL);
239 
240 	for (tp = ip6_tnl_bucket(ip6n, &t->parms);
241 	     (iter = rtnl_dereference(*tp)) != NULL;
242 	     tp = &iter->next) {
243 		if (t == iter) {
244 			rcu_assign_pointer(*tp, t->next);
245 			break;
246 		}
247 	}
248 }
249 
250 static void ip6_dev_free(struct net_device *dev)
251 {
252 	struct ip6_tnl *t = netdev_priv(dev);
253 
254 	gro_cells_destroy(&t->gro_cells);
255 	dst_cache_destroy(&t->dst_cache);
256 	free_percpu(dev->tstats);
257 	free_netdev(dev);
258 }
259 
260 static int ip6_tnl_create2(struct net_device *dev)
261 {
262 	struct ip6_tnl *t = netdev_priv(dev);
263 	struct net *net = dev_net(dev);
264 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
265 	int err;
266 
267 	t = netdev_priv(dev);
268 
269 	dev->rtnl_link_ops = &ip6_link_ops;
270 	err = register_netdevice(dev);
271 	if (err < 0)
272 		goto out;
273 
274 	strcpy(t->parms.name, dev->name);
275 
276 	dev_hold(dev);
277 	ip6_tnl_link(ip6n, t);
278 	return 0;
279 
280 out:
281 	return err;
282 }
283 
284 /**
285  * ip6_tnl_create - create a new tunnel
286  *   @p: tunnel parameters
287  *   @pt: pointer to new tunnel
288  *
289  * Description:
290  *   Create tunnel matching given parameters.
291  *
292  * Return:
293  *   created tunnel or error pointer
294  **/
295 
296 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
297 {
298 	struct net_device *dev;
299 	struct ip6_tnl *t;
300 	char name[IFNAMSIZ];
301 	int err = -ENOMEM;
302 
303 	if (p->name[0])
304 		strlcpy(name, p->name, IFNAMSIZ);
305 	else
306 		sprintf(name, "ip6tnl%%d");
307 
308 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
309 			   ip6_tnl_dev_setup);
310 	if (!dev)
311 		goto failed;
312 
313 	dev_net_set(dev, net);
314 
315 	t = netdev_priv(dev);
316 	t->parms = *p;
317 	t->net = dev_net(dev);
318 	err = ip6_tnl_create2(dev);
319 	if (err < 0)
320 		goto failed_free;
321 
322 	return t;
323 
324 failed_free:
325 	ip6_dev_free(dev);
326 failed:
327 	return ERR_PTR(err);
328 }
329 
330 /**
331  * ip6_tnl_locate - find or create tunnel matching given parameters
332  *   @p: tunnel parameters
333  *   @create: != 0 if allowed to create new tunnel if no match found
334  *
335  * Description:
336  *   ip6_tnl_locate() first tries to locate an existing tunnel
337  *   based on @parms. If this is unsuccessful, but @create is set a new
338  *   tunnel device is created and registered for use.
339  *
340  * Return:
341  *   matching tunnel or error pointer
342  **/
343 
344 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
345 		struct __ip6_tnl_parm *p, int create)
346 {
347 	const struct in6_addr *remote = &p->raddr;
348 	const struct in6_addr *local = &p->laddr;
349 	struct ip6_tnl __rcu **tp;
350 	struct ip6_tnl *t;
351 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
352 
353 	for (tp = ip6_tnl_bucket(ip6n, p);
354 	     (t = rtnl_dereference(*tp)) != NULL;
355 	     tp = &t->next) {
356 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
357 		    ipv6_addr_equal(remote, &t->parms.raddr)) {
358 			if (create)
359 				return ERR_PTR(-EEXIST);
360 
361 			return t;
362 		}
363 	}
364 	if (!create)
365 		return ERR_PTR(-ENODEV);
366 	return ip6_tnl_create(net, p);
367 }
368 
369 /**
370  * ip6_tnl_dev_uninit - tunnel device uninitializer
371  *   @dev: the device to be destroyed
372  *
373  * Description:
374  *   ip6_tnl_dev_uninit() removes tunnel from its list
375  **/
376 
377 static void
378 ip6_tnl_dev_uninit(struct net_device *dev)
379 {
380 	struct ip6_tnl *t = netdev_priv(dev);
381 	struct net *net = t->net;
382 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
383 
384 	if (dev == ip6n->fb_tnl_dev)
385 		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
386 	else
387 		ip6_tnl_unlink(ip6n, t);
388 	dst_cache_reset(&t->dst_cache);
389 	dev_put(dev);
390 }
391 
392 /**
393  * parse_tvl_tnl_enc_lim - handle encapsulation limit option
394  *   @skb: received socket buffer
395  *
396  * Return:
397  *   0 if none was found,
398  *   else index to encapsulation limit
399  **/
400 
401 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
402 {
403 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
404 	unsigned int nhoff = raw - skb->data;
405 	unsigned int off = nhoff + sizeof(*ipv6h);
406 	u8 next, nexthdr = ipv6h->nexthdr;
407 
408 	while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
409 		struct ipv6_opt_hdr *hdr;
410 		u16 optlen;
411 
412 		if (!pskb_may_pull(skb, off + sizeof(*hdr)))
413 			break;
414 
415 		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
416 		if (nexthdr == NEXTHDR_FRAGMENT) {
417 			struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
418 			if (frag_hdr->frag_off)
419 				break;
420 			optlen = 8;
421 		} else if (nexthdr == NEXTHDR_AUTH) {
422 			optlen = (hdr->hdrlen + 2) << 2;
423 		} else {
424 			optlen = ipv6_optlen(hdr);
425 		}
426 		/* cache hdr->nexthdr, since pskb_may_pull() might
427 		 * invalidate hdr
428 		 */
429 		next = hdr->nexthdr;
430 		if (nexthdr == NEXTHDR_DEST) {
431 			u16 i = 2;
432 
433 			/* Remember : hdr is no longer valid at this point. */
434 			if (!pskb_may_pull(skb, off + optlen))
435 				break;
436 
437 			while (1) {
438 				struct ipv6_tlv_tnl_enc_lim *tel;
439 
440 				/* No more room for encapsulation limit */
441 				if (i + sizeof(*tel) > optlen)
442 					break;
443 
444 				tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
445 				/* return index of option if found and valid */
446 				if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
447 				    tel->length == 1)
448 					return i + off - nhoff;
449 				/* else jump to next option */
450 				if (tel->type)
451 					i += tel->length + 2;
452 				else
453 					i++;
454 			}
455 		}
456 		nexthdr = next;
457 		off += optlen;
458 	}
459 	return 0;
460 }
461 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
462 
463 /**
464  * ip6_tnl_err - tunnel error handler
465  *
466  * Description:
467  *   ip6_tnl_err() should handle errors in the tunnel according
468  *   to the specifications in RFC 2473.
469  **/
470 
471 static int
472 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
473 	    u8 *type, u8 *code, int *msg, __u32 *info, int offset)
474 {
475 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
476 	struct ip6_tnl *t;
477 	int rel_msg = 0;
478 	u8 rel_type = ICMPV6_DEST_UNREACH;
479 	u8 rel_code = ICMPV6_ADDR_UNREACH;
480 	u8 tproto;
481 	__u32 rel_info = 0;
482 	__u16 len;
483 	int err = -ENOENT;
484 
485 	/* If the packet doesn't contain the original IPv6 header we are
486 	   in trouble since we might need the source address for further
487 	   processing of the error. */
488 
489 	rcu_read_lock();
490 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
491 	if (!t)
492 		goto out;
493 
494 	tproto = ACCESS_ONCE(t->parms.proto);
495 	if (tproto != ipproto && tproto != 0)
496 		goto out;
497 
498 	err = 0;
499 
500 	switch (*type) {
501 		__u32 teli;
502 		struct ipv6_tlv_tnl_enc_lim *tel;
503 		__u32 mtu;
504 	case ICMPV6_DEST_UNREACH:
505 		net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
506 				    t->parms.name);
507 		rel_msg = 1;
508 		break;
509 	case ICMPV6_TIME_EXCEED:
510 		if ((*code) == ICMPV6_EXC_HOPLIMIT) {
511 			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
512 					    t->parms.name);
513 			rel_msg = 1;
514 		}
515 		break;
516 	case ICMPV6_PARAMPROB:
517 		teli = 0;
518 		if ((*code) == ICMPV6_HDR_FIELD)
519 			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
520 
521 		if (teli && teli == *info - 2) {
522 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
523 			if (tel->encap_limit == 0) {
524 				net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
525 						    t->parms.name);
526 				rel_msg = 1;
527 			}
528 		} else {
529 			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
530 					    t->parms.name);
531 		}
532 		break;
533 	case ICMPV6_PKT_TOOBIG:
534 		mtu = *info - offset;
535 		if (mtu < IPV6_MIN_MTU)
536 			mtu = IPV6_MIN_MTU;
537 		t->dev->mtu = mtu;
538 
539 		len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
540 		if (len > mtu) {
541 			rel_type = ICMPV6_PKT_TOOBIG;
542 			rel_code = 0;
543 			rel_info = mtu;
544 			rel_msg = 1;
545 		}
546 		break;
547 	}
548 
549 	*type = rel_type;
550 	*code = rel_code;
551 	*info = rel_info;
552 	*msg = rel_msg;
553 
554 out:
555 	rcu_read_unlock();
556 	return err;
557 }
558 
559 static int
560 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
561 	   u8 type, u8 code, int offset, __be32 info)
562 {
563 	int rel_msg = 0;
564 	u8 rel_type = type;
565 	u8 rel_code = code;
566 	__u32 rel_info = ntohl(info);
567 	int err;
568 	struct sk_buff *skb2;
569 	const struct iphdr *eiph;
570 	struct rtable *rt;
571 	struct flowi4 fl4;
572 
573 	err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
574 			  &rel_msg, &rel_info, offset);
575 	if (err < 0)
576 		return err;
577 
578 	if (rel_msg == 0)
579 		return 0;
580 
581 	switch (rel_type) {
582 	case ICMPV6_DEST_UNREACH:
583 		if (rel_code != ICMPV6_ADDR_UNREACH)
584 			return 0;
585 		rel_type = ICMP_DEST_UNREACH;
586 		rel_code = ICMP_HOST_UNREACH;
587 		break;
588 	case ICMPV6_PKT_TOOBIG:
589 		if (rel_code != 0)
590 			return 0;
591 		rel_type = ICMP_DEST_UNREACH;
592 		rel_code = ICMP_FRAG_NEEDED;
593 		break;
594 	case NDISC_REDIRECT:
595 		rel_type = ICMP_REDIRECT;
596 		rel_code = ICMP_REDIR_HOST;
597 	default:
598 		return 0;
599 	}
600 
601 	if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
602 		return 0;
603 
604 	skb2 = skb_clone(skb, GFP_ATOMIC);
605 	if (!skb2)
606 		return 0;
607 
608 	skb_dst_drop(skb2);
609 
610 	skb_pull(skb2, offset);
611 	skb_reset_network_header(skb2);
612 	eiph = ip_hdr(skb2);
613 
614 	/* Try to guess incoming interface */
615 	rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
616 				   eiph->saddr, 0,
617 				   0, 0,
618 				   IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
619 	if (IS_ERR(rt))
620 		goto out;
621 
622 	skb2->dev = rt->dst.dev;
623 
624 	/* route "incoming" packet */
625 	if (rt->rt_flags & RTCF_LOCAL) {
626 		ip_rt_put(rt);
627 		rt = NULL;
628 		rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
629 					   eiph->daddr, eiph->saddr,
630 					   0, 0,
631 					   IPPROTO_IPIP,
632 					   RT_TOS(eiph->tos), 0);
633 		if (IS_ERR(rt) ||
634 		    rt->dst.dev->type != ARPHRD_TUNNEL) {
635 			if (!IS_ERR(rt))
636 				ip_rt_put(rt);
637 			goto out;
638 		}
639 		skb_dst_set(skb2, &rt->dst);
640 	} else {
641 		ip_rt_put(rt);
642 		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
643 				   skb2->dev) ||
644 		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
645 			goto out;
646 	}
647 
648 	/* change mtu on this route */
649 	if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
650 		if (rel_info > dst_mtu(skb_dst(skb2)))
651 			goto out;
652 
653 		skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
654 	}
655 	if (rel_type == ICMP_REDIRECT)
656 		skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
657 
658 	icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
659 
660 out:
661 	kfree_skb(skb2);
662 	return 0;
663 }
664 
665 static int
666 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
667 	   u8 type, u8 code, int offset, __be32 info)
668 {
669 	int rel_msg = 0;
670 	u8 rel_type = type;
671 	u8 rel_code = code;
672 	__u32 rel_info = ntohl(info);
673 	int err;
674 
675 	err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
676 			  &rel_msg, &rel_info, offset);
677 	if (err < 0)
678 		return err;
679 
680 	if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
681 		struct rt6_info *rt;
682 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
683 
684 		if (!skb2)
685 			return 0;
686 
687 		skb_dst_drop(skb2);
688 		skb_pull(skb2, offset);
689 		skb_reset_network_header(skb2);
690 
691 		/* Try to guess incoming interface */
692 		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
693 				NULL, 0, 0);
694 
695 		if (rt && rt->dst.dev)
696 			skb2->dev = rt->dst.dev;
697 
698 		icmpv6_send(skb2, rel_type, rel_code, rel_info);
699 
700 		ip6_rt_put(rt);
701 
702 		kfree_skb(skb2);
703 	}
704 
705 	return 0;
706 }
707 
708 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
709 				       const struct ipv6hdr *ipv6h,
710 				       struct sk_buff *skb)
711 {
712 	__u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
713 
714 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
715 		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
716 
717 	return IP6_ECN_decapsulate(ipv6h, skb);
718 }
719 
720 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
721 				       const struct ipv6hdr *ipv6h,
722 				       struct sk_buff *skb)
723 {
724 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
725 		ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
726 
727 	return IP6_ECN_decapsulate(ipv6h, skb);
728 }
729 
730 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
731 			     const struct in6_addr *laddr,
732 			     const struct in6_addr *raddr)
733 {
734 	struct __ip6_tnl_parm *p = &t->parms;
735 	int ltype = ipv6_addr_type(laddr);
736 	int rtype = ipv6_addr_type(raddr);
737 	__u32 flags = 0;
738 
739 	if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
740 		flags = IP6_TNL_F_CAP_PER_PACKET;
741 	} else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
742 		   rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
743 		   !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
744 		   (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
745 		if (ltype&IPV6_ADDR_UNICAST)
746 			flags |= IP6_TNL_F_CAP_XMIT;
747 		if (rtype&IPV6_ADDR_UNICAST)
748 			flags |= IP6_TNL_F_CAP_RCV;
749 	}
750 	return flags;
751 }
752 EXPORT_SYMBOL(ip6_tnl_get_cap);
753 
754 /* called with rcu_read_lock() */
755 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
756 				  const struct in6_addr *laddr,
757 				  const struct in6_addr *raddr)
758 {
759 	struct __ip6_tnl_parm *p = &t->parms;
760 	int ret = 0;
761 	struct net *net = t->net;
762 
763 	if ((p->flags & IP6_TNL_F_CAP_RCV) ||
764 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
765 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
766 		struct net_device *ldev = NULL;
767 
768 		if (p->link)
769 			ldev = dev_get_by_index_rcu(net, p->link);
770 
771 		if ((ipv6_addr_is_multicast(laddr) ||
772 		     likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
773 		    likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
774 			ret = 1;
775 	}
776 	return ret;
777 }
778 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
779 
780 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
781 			 const struct tnl_ptk_info *tpi,
782 			 struct metadata_dst *tun_dst,
783 			 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
784 						const struct ipv6hdr *ipv6h,
785 						struct sk_buff *skb),
786 			 bool log_ecn_err)
787 {
788 	struct pcpu_sw_netstats *tstats;
789 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
790 	int err;
791 
792 	if ((!(tpi->flags & TUNNEL_CSUM) &&
793 	     (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
794 	    ((tpi->flags & TUNNEL_CSUM) &&
795 	     !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
796 		tunnel->dev->stats.rx_crc_errors++;
797 		tunnel->dev->stats.rx_errors++;
798 		goto drop;
799 	}
800 
801 	if (tunnel->parms.i_flags & TUNNEL_SEQ) {
802 		if (!(tpi->flags & TUNNEL_SEQ) ||
803 		    (tunnel->i_seqno &&
804 		     (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
805 			tunnel->dev->stats.rx_fifo_errors++;
806 			tunnel->dev->stats.rx_errors++;
807 			goto drop;
808 		}
809 		tunnel->i_seqno = ntohl(tpi->seq) + 1;
810 	}
811 
812 	skb->protocol = tpi->proto;
813 
814 	/* Warning: All skb pointers will be invalidated! */
815 	if (tunnel->dev->type == ARPHRD_ETHER) {
816 		if (!pskb_may_pull(skb, ETH_HLEN)) {
817 			tunnel->dev->stats.rx_length_errors++;
818 			tunnel->dev->stats.rx_errors++;
819 			goto drop;
820 		}
821 
822 		ipv6h = ipv6_hdr(skb);
823 		skb->protocol = eth_type_trans(skb, tunnel->dev);
824 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
825 	} else {
826 		skb->dev = tunnel->dev;
827 	}
828 
829 	skb_reset_network_header(skb);
830 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
831 
832 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
833 
834 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
835 	if (unlikely(err)) {
836 		if (log_ecn_err)
837 			net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
838 					     &ipv6h->saddr,
839 					     ipv6_get_dsfield(ipv6h));
840 		if (err > 1) {
841 			++tunnel->dev->stats.rx_frame_errors;
842 			++tunnel->dev->stats.rx_errors;
843 			goto drop;
844 		}
845 	}
846 
847 	tstats = this_cpu_ptr(tunnel->dev->tstats);
848 	u64_stats_update_begin(&tstats->syncp);
849 	tstats->rx_packets++;
850 	tstats->rx_bytes += skb->len;
851 	u64_stats_update_end(&tstats->syncp);
852 
853 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
854 
855 	if (tun_dst)
856 		skb_dst_set(skb, (struct dst_entry *)tun_dst);
857 
858 	gro_cells_receive(&tunnel->gro_cells, skb);
859 	return 0;
860 
861 drop:
862 	kfree_skb(skb);
863 	return 0;
864 }
865 
866 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
867 		const struct tnl_ptk_info *tpi,
868 		struct metadata_dst *tun_dst,
869 		bool log_ecn_err)
870 {
871 	return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
872 			     log_ecn_err);
873 }
874 EXPORT_SYMBOL(ip6_tnl_rcv);
875 
876 static const struct tnl_ptk_info tpi_v6 = {
877 	/* no tunnel info required for ipxip6. */
878 	.proto = htons(ETH_P_IPV6),
879 };
880 
881 static const struct tnl_ptk_info tpi_v4 = {
882 	/* no tunnel info required for ipxip6. */
883 	.proto = htons(ETH_P_IP),
884 };
885 
886 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
887 		      const struct tnl_ptk_info *tpi,
888 		      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
889 						  const struct ipv6hdr *ipv6h,
890 						  struct sk_buff *skb))
891 {
892 	struct ip6_tnl *t;
893 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
894 	struct metadata_dst *tun_dst = NULL;
895 	int ret = -1;
896 
897 	rcu_read_lock();
898 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
899 
900 	if (t) {
901 		u8 tproto = ACCESS_ONCE(t->parms.proto);
902 
903 		if (tproto != ipproto && tproto != 0)
904 			goto drop;
905 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
906 			goto drop;
907 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
908 			goto drop;
909 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
910 			goto drop;
911 		if (t->parms.collect_md) {
912 			tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
913 			if (!tun_dst)
914 				return 0;
915 		}
916 		ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
917 				    log_ecn_error);
918 	}
919 
920 	rcu_read_unlock();
921 
922 	return ret;
923 
924 drop:
925 	rcu_read_unlock();
926 	kfree_skb(skb);
927 	return 0;
928 }
929 
930 static int ip4ip6_rcv(struct sk_buff *skb)
931 {
932 	return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
933 			  ip4ip6_dscp_ecn_decapsulate);
934 }
935 
936 static int ip6ip6_rcv(struct sk_buff *skb)
937 {
938 	return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
939 			  ip6ip6_dscp_ecn_decapsulate);
940 }
941 
942 struct ipv6_tel_txoption {
943 	struct ipv6_txoptions ops;
944 	__u8 dst_opt[8];
945 };
946 
947 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
948 {
949 	memset(opt, 0, sizeof(struct ipv6_tel_txoption));
950 
951 	opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
952 	opt->dst_opt[3] = 1;
953 	opt->dst_opt[4] = encap_limit;
954 	opt->dst_opt[5] = IPV6_TLV_PADN;
955 	opt->dst_opt[6] = 1;
956 
957 	opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
958 	opt->ops.opt_nflen = 8;
959 }
960 
961 /**
962  * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
963  *   @t: the outgoing tunnel device
964  *   @hdr: IPv6 header from the incoming packet
965  *
966  * Description:
967  *   Avoid trivial tunneling loop by checking that tunnel exit-point
968  *   doesn't match source of incoming packet.
969  *
970  * Return:
971  *   1 if conflict,
972  *   0 else
973  **/
974 
975 static inline bool
976 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
977 {
978 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
979 }
980 
981 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
982 		     const struct in6_addr *laddr,
983 		     const struct in6_addr *raddr)
984 {
985 	struct __ip6_tnl_parm *p = &t->parms;
986 	int ret = 0;
987 	struct net *net = t->net;
988 
989 	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
990 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
991 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
992 		struct net_device *ldev = NULL;
993 
994 		rcu_read_lock();
995 		if (p->link)
996 			ldev = dev_get_by_index_rcu(net, p->link);
997 
998 		if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
999 			pr_warn("%s xmit: Local address not yet configured!\n",
1000 				p->name);
1001 		else if (!ipv6_addr_is_multicast(raddr) &&
1002 			 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
1003 			pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1004 				p->name);
1005 		else
1006 			ret = 1;
1007 		rcu_read_unlock();
1008 	}
1009 	return ret;
1010 }
1011 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1012 
1013 /**
1014  * ip6_tnl_xmit - encapsulate packet and send
1015  *   @skb: the outgoing socket buffer
1016  *   @dev: the outgoing tunnel device
1017  *   @dsfield: dscp code for outer header
1018  *   @fl6: flow of tunneled packet
1019  *   @encap_limit: encapsulation limit
1020  *   @pmtu: Path MTU is stored if packet is too big
1021  *   @proto: next header value
1022  *
1023  * Description:
1024  *   Build new header and do some sanity checks on the packet before sending
1025  *   it.
1026  *
1027  * Return:
1028  *   0 on success
1029  *   -1 fail
1030  *   %-EMSGSIZE message too big. return mtu in this case.
1031  **/
1032 
1033 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1034 		 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1035 		 __u8 proto)
1036 {
1037 	struct ip6_tnl *t = netdev_priv(dev);
1038 	struct net *net = t->net;
1039 	struct net_device_stats *stats = &t->dev->stats;
1040 	struct ipv6hdr *ipv6h;
1041 	struct ipv6_tel_txoption opt;
1042 	struct dst_entry *dst = NULL, *ndst = NULL;
1043 	struct net_device *tdev;
1044 	int mtu;
1045 	unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1046 	unsigned int max_headroom = psh_hlen;
1047 	bool use_cache = false;
1048 	u8 hop_limit;
1049 	int err = -1;
1050 
1051 	if (t->parms.collect_md) {
1052 		hop_limit = skb_tunnel_info(skb)->key.ttl;
1053 		goto route_lookup;
1054 	} else {
1055 		hop_limit = t->parms.hop_limit;
1056 	}
1057 
1058 	/* NBMA tunnel */
1059 	if (ipv6_addr_any(&t->parms.raddr)) {
1060 		if (skb->protocol == htons(ETH_P_IPV6)) {
1061 			struct in6_addr *addr6;
1062 			struct neighbour *neigh;
1063 			int addr_type;
1064 
1065 			if (!skb_dst(skb))
1066 				goto tx_err_link_failure;
1067 
1068 			neigh = dst_neigh_lookup(skb_dst(skb),
1069 						 &ipv6_hdr(skb)->daddr);
1070 			if (!neigh)
1071 				goto tx_err_link_failure;
1072 
1073 			addr6 = (struct in6_addr *)&neigh->primary_key;
1074 			addr_type = ipv6_addr_type(addr6);
1075 
1076 			if (addr_type == IPV6_ADDR_ANY)
1077 				addr6 = &ipv6_hdr(skb)->daddr;
1078 
1079 			memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1080 			neigh_release(neigh);
1081 		}
1082 	} else if (!(t->parms.flags &
1083 		     (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
1084 		/* enable the cache only only if the routing decision does
1085 		 * not depend on the current inner header value
1086 		 */
1087 		use_cache = true;
1088 	}
1089 
1090 	if (use_cache)
1091 		dst = dst_cache_get(&t->dst_cache);
1092 
1093 	if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1094 		goto tx_err_link_failure;
1095 
1096 	if (!dst) {
1097 route_lookup:
1098 		dst = ip6_route_output(net, NULL, fl6);
1099 
1100 		if (dst->error)
1101 			goto tx_err_link_failure;
1102 		dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1103 		if (IS_ERR(dst)) {
1104 			err = PTR_ERR(dst);
1105 			dst = NULL;
1106 			goto tx_err_link_failure;
1107 		}
1108 		if (t->parms.collect_md &&
1109 		    ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1110 				       &fl6->daddr, 0, &fl6->saddr))
1111 			goto tx_err_link_failure;
1112 		ndst = dst;
1113 	}
1114 
1115 	tdev = dst->dev;
1116 
1117 	if (tdev == dev) {
1118 		stats->collisions++;
1119 		net_warn_ratelimited("%s: Local routing loop detected!\n",
1120 				     t->parms.name);
1121 		goto tx_err_dst_release;
1122 	}
1123 	mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
1124 	if (encap_limit >= 0) {
1125 		max_headroom += 8;
1126 		mtu -= 8;
1127 	}
1128 	if (mtu < IPV6_MIN_MTU)
1129 		mtu = IPV6_MIN_MTU;
1130 	if (skb_dst(skb) && !t->parms.collect_md)
1131 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1132 	if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
1133 		*pmtu = mtu;
1134 		err = -EMSGSIZE;
1135 		goto tx_err_dst_release;
1136 	}
1137 
1138 	if (t->err_count > 0) {
1139 		if (time_before(jiffies,
1140 				t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1141 			t->err_count--;
1142 
1143 			dst_link_failure(skb);
1144 		} else {
1145 			t->err_count = 0;
1146 		}
1147 	}
1148 
1149 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1150 
1151 	/*
1152 	 * Okay, now see if we can stuff it in the buffer as-is.
1153 	 */
1154 	max_headroom += LL_RESERVED_SPACE(tdev);
1155 
1156 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1157 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1158 		struct sk_buff *new_skb;
1159 
1160 		new_skb = skb_realloc_headroom(skb, max_headroom);
1161 		if (!new_skb)
1162 			goto tx_err_dst_release;
1163 
1164 		if (skb->sk)
1165 			skb_set_owner_w(new_skb, skb->sk);
1166 		consume_skb(skb);
1167 		skb = new_skb;
1168 	}
1169 
1170 	if (t->parms.collect_md) {
1171 		if (t->encap.type != TUNNEL_ENCAP_NONE)
1172 			goto tx_err_dst_release;
1173 	} else {
1174 		if (use_cache && ndst)
1175 			dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1176 	}
1177 	skb_dst_set(skb, dst);
1178 
1179 	if (encap_limit >= 0) {
1180 		init_tel_txopt(&opt, encap_limit);
1181 		ipv6_push_frag_opts(skb, &opt.ops, &proto);
1182 	}
1183 
1184 	/* Calculate max headroom for all the headers and adjust
1185 	 * needed_headroom if necessary.
1186 	 */
1187 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1188 			+ dst->header_len + t->hlen;
1189 	if (max_headroom > dev->needed_headroom)
1190 		dev->needed_headroom = max_headroom;
1191 
1192 	err = ip6_tnl_encap(skb, t, &proto, fl6);
1193 	if (err)
1194 		return err;
1195 
1196 	skb_push(skb, sizeof(struct ipv6hdr));
1197 	skb_reset_network_header(skb);
1198 	ipv6h = ipv6_hdr(skb);
1199 	ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
1200 		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1201 	ipv6h->hop_limit = hop_limit;
1202 	ipv6h->nexthdr = proto;
1203 	ipv6h->saddr = fl6->saddr;
1204 	ipv6h->daddr = fl6->daddr;
1205 	ip6tunnel_xmit(NULL, skb, dev);
1206 	return 0;
1207 tx_err_link_failure:
1208 	stats->tx_carrier_errors++;
1209 	dst_link_failure(skb);
1210 tx_err_dst_release:
1211 	dst_release(dst);
1212 	return err;
1213 }
1214 EXPORT_SYMBOL(ip6_tnl_xmit);
1215 
1216 static inline int
1217 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1218 {
1219 	struct ip6_tnl *t = netdev_priv(dev);
1220 	const struct iphdr  *iph = ip_hdr(skb);
1221 	int encap_limit = -1;
1222 	struct flowi6 fl6;
1223 	__u8 dsfield;
1224 	__u32 mtu;
1225 	u8 tproto;
1226 	int err;
1227 
1228 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1229 
1230 	tproto = ACCESS_ONCE(t->parms.proto);
1231 	if (tproto != IPPROTO_IPIP && tproto != 0)
1232 		return -1;
1233 
1234 	dsfield = ipv4_get_dsfield(iph);
1235 
1236 	if (t->parms.collect_md) {
1237 		struct ip_tunnel_info *tun_info;
1238 		const struct ip_tunnel_key *key;
1239 
1240 		tun_info = skb_tunnel_info(skb);
1241 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1242 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1243 			return -1;
1244 		key = &tun_info->key;
1245 		memset(&fl6, 0, sizeof(fl6));
1246 		fl6.flowi6_proto = IPPROTO_IPIP;
1247 		fl6.daddr = key->u.ipv6.dst;
1248 		fl6.flowlabel = key->label;
1249 	} else {
1250 		if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1251 			encap_limit = t->parms.encap_limit;
1252 
1253 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1254 		fl6.flowi6_proto = IPPROTO_IPIP;
1255 
1256 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1257 			fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
1258 					 & IPV6_TCLASS_MASK;
1259 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1260 			fl6.flowi6_mark = skb->mark;
1261 		else
1262 			fl6.flowi6_mark = t->parms.fwmark;
1263 	}
1264 
1265 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1266 
1267 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1268 		return -1;
1269 
1270 	skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1271 
1272 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1273 			   IPPROTO_IPIP);
1274 	if (err != 0) {
1275 		/* XXX: send ICMP error even if DF is not set. */
1276 		if (err == -EMSGSIZE)
1277 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1278 				  htonl(mtu));
1279 		return -1;
1280 	}
1281 
1282 	return 0;
1283 }
1284 
1285 static inline int
1286 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1287 {
1288 	struct ip6_tnl *t = netdev_priv(dev);
1289 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1290 	int encap_limit = -1;
1291 	__u16 offset;
1292 	struct flowi6 fl6;
1293 	__u8 dsfield;
1294 	__u32 mtu;
1295 	u8 tproto;
1296 	int err;
1297 
1298 	tproto = ACCESS_ONCE(t->parms.proto);
1299 	if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1300 	    ip6_tnl_addr_conflict(t, ipv6h))
1301 		return -1;
1302 
1303 	dsfield = ipv6_get_dsfield(ipv6h);
1304 
1305 	if (t->parms.collect_md) {
1306 		struct ip_tunnel_info *tun_info;
1307 		const struct ip_tunnel_key *key;
1308 
1309 		tun_info = skb_tunnel_info(skb);
1310 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1311 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1312 			return -1;
1313 		key = &tun_info->key;
1314 		memset(&fl6, 0, sizeof(fl6));
1315 		fl6.flowi6_proto = IPPROTO_IPV6;
1316 		fl6.daddr = key->u.ipv6.dst;
1317 		fl6.flowlabel = key->label;
1318 	} else {
1319 		offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1320 		/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1321 		ipv6h = ipv6_hdr(skb);
1322 		if (offset > 0) {
1323 			struct ipv6_tlv_tnl_enc_lim *tel;
1324 
1325 			tel = (void *)&skb_network_header(skb)[offset];
1326 			if (tel->encap_limit == 0) {
1327 				icmpv6_send(skb, ICMPV6_PARAMPROB,
1328 					    ICMPV6_HDR_FIELD, offset + 2);
1329 				return -1;
1330 			}
1331 			encap_limit = tel->encap_limit - 1;
1332 		} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1333 			encap_limit = t->parms.encap_limit;
1334 		}
1335 
1336 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1337 		fl6.flowi6_proto = IPPROTO_IPV6;
1338 
1339 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1340 			fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
1341 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1342 			fl6.flowlabel |= ip6_flowlabel(ipv6h);
1343 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1344 			fl6.flowi6_mark = skb->mark;
1345 		else
1346 			fl6.flowi6_mark = t->parms.fwmark;
1347 	}
1348 
1349 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1350 
1351 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1352 		return -1;
1353 
1354 	skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1355 
1356 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1357 			   IPPROTO_IPV6);
1358 	if (err != 0) {
1359 		if (err == -EMSGSIZE)
1360 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1361 		return -1;
1362 	}
1363 
1364 	return 0;
1365 }
1366 
1367 static netdev_tx_t
1368 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1369 {
1370 	struct ip6_tnl *t = netdev_priv(dev);
1371 	struct net_device_stats *stats = &t->dev->stats;
1372 	int ret;
1373 
1374 	switch (skb->protocol) {
1375 	case htons(ETH_P_IP):
1376 		ret = ip4ip6_tnl_xmit(skb, dev);
1377 		break;
1378 	case htons(ETH_P_IPV6):
1379 		ret = ip6ip6_tnl_xmit(skb, dev);
1380 		break;
1381 	default:
1382 		goto tx_err;
1383 	}
1384 
1385 	if (ret < 0)
1386 		goto tx_err;
1387 
1388 	return NETDEV_TX_OK;
1389 
1390 tx_err:
1391 	stats->tx_errors++;
1392 	stats->tx_dropped++;
1393 	kfree_skb(skb);
1394 	return NETDEV_TX_OK;
1395 }
1396 
1397 static void ip6_tnl_link_config(struct ip6_tnl *t)
1398 {
1399 	struct net_device *dev = t->dev;
1400 	struct __ip6_tnl_parm *p = &t->parms;
1401 	struct flowi6 *fl6 = &t->fl.u.ip6;
1402 	int t_hlen;
1403 
1404 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1405 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1406 
1407 	/* Set up flowi template */
1408 	fl6->saddr = p->laddr;
1409 	fl6->daddr = p->raddr;
1410 	fl6->flowi6_oif = p->link;
1411 	fl6->flowlabel = 0;
1412 
1413 	if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1414 		fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1415 	if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1416 		fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1417 
1418 	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1419 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1420 
1421 	if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1422 		dev->flags |= IFF_POINTOPOINT;
1423 	else
1424 		dev->flags &= ~IFF_POINTOPOINT;
1425 
1426 	t->tun_hlen = 0;
1427 	t->hlen = t->encap_hlen + t->tun_hlen;
1428 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1429 
1430 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
1431 		int strict = (ipv6_addr_type(&p->raddr) &
1432 			      (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1433 
1434 		struct rt6_info *rt = rt6_lookup(t->net,
1435 						 &p->raddr, &p->laddr,
1436 						 p->link, strict);
1437 
1438 		if (!rt)
1439 			return;
1440 
1441 		if (rt->dst.dev) {
1442 			dev->hard_header_len = rt->dst.dev->hard_header_len +
1443 				t_hlen;
1444 
1445 			dev->mtu = rt->dst.dev->mtu - t_hlen;
1446 			if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1447 				dev->mtu -= 8;
1448 
1449 			if (dev->mtu < IPV6_MIN_MTU)
1450 				dev->mtu = IPV6_MIN_MTU;
1451 		}
1452 		ip6_rt_put(rt);
1453 	}
1454 }
1455 
1456 /**
1457  * ip6_tnl_change - update the tunnel parameters
1458  *   @t: tunnel to be changed
1459  *   @p: tunnel configuration parameters
1460  *
1461  * Description:
1462  *   ip6_tnl_change() updates the tunnel parameters
1463  **/
1464 
1465 static int
1466 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1467 {
1468 	t->parms.laddr = p->laddr;
1469 	t->parms.raddr = p->raddr;
1470 	t->parms.flags = p->flags;
1471 	t->parms.hop_limit = p->hop_limit;
1472 	t->parms.encap_limit = p->encap_limit;
1473 	t->parms.flowinfo = p->flowinfo;
1474 	t->parms.link = p->link;
1475 	t->parms.proto = p->proto;
1476 	t->parms.fwmark = p->fwmark;
1477 	dst_cache_reset(&t->dst_cache);
1478 	ip6_tnl_link_config(t);
1479 	return 0;
1480 }
1481 
1482 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1483 {
1484 	struct net *net = t->net;
1485 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1486 	int err;
1487 
1488 	ip6_tnl_unlink(ip6n, t);
1489 	synchronize_net();
1490 	err = ip6_tnl_change(t, p);
1491 	ip6_tnl_link(ip6n, t);
1492 	netdev_state_change(t->dev);
1493 	return err;
1494 }
1495 
1496 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1497 {
1498 	/* for default tnl0 device allow to change only the proto */
1499 	t->parms.proto = p->proto;
1500 	netdev_state_change(t->dev);
1501 	return 0;
1502 }
1503 
1504 static void
1505 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1506 {
1507 	p->laddr = u->laddr;
1508 	p->raddr = u->raddr;
1509 	p->flags = u->flags;
1510 	p->hop_limit = u->hop_limit;
1511 	p->encap_limit = u->encap_limit;
1512 	p->flowinfo = u->flowinfo;
1513 	p->link = u->link;
1514 	p->proto = u->proto;
1515 	memcpy(p->name, u->name, sizeof(u->name));
1516 }
1517 
1518 static void
1519 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1520 {
1521 	u->laddr = p->laddr;
1522 	u->raddr = p->raddr;
1523 	u->flags = p->flags;
1524 	u->hop_limit = p->hop_limit;
1525 	u->encap_limit = p->encap_limit;
1526 	u->flowinfo = p->flowinfo;
1527 	u->link = p->link;
1528 	u->proto = p->proto;
1529 	memcpy(u->name, p->name, sizeof(u->name));
1530 }
1531 
1532 /**
1533  * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1534  *   @dev: virtual device associated with tunnel
1535  *   @ifr: parameters passed from userspace
1536  *   @cmd: command to be performed
1537  *
1538  * Description:
1539  *   ip6_tnl_ioctl() is used for managing IPv6 tunnels
1540  *   from userspace.
1541  *
1542  *   The possible commands are the following:
1543  *     %SIOCGETTUNNEL: get tunnel parameters for device
1544  *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1545  *     %SIOCCHGTUNNEL: change tunnel parameters to those given
1546  *     %SIOCDELTUNNEL: delete tunnel
1547  *
1548  *   The fallback device "ip6tnl0", created during module
1549  *   initialization, can be used for creating other tunnel devices.
1550  *
1551  * Return:
1552  *   0 on success,
1553  *   %-EFAULT if unable to copy data to or from userspace,
1554  *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
1555  *   %-EINVAL if passed tunnel parameters are invalid,
1556  *   %-EEXIST if changing a tunnel's parameters would cause a conflict
1557  *   %-ENODEV if attempting to change or delete a nonexisting device
1558  **/
1559 
1560 static int
1561 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1562 {
1563 	int err = 0;
1564 	struct ip6_tnl_parm p;
1565 	struct __ip6_tnl_parm p1;
1566 	struct ip6_tnl *t = netdev_priv(dev);
1567 	struct net *net = t->net;
1568 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1569 
1570 	memset(&p1, 0, sizeof(p1));
1571 
1572 	switch (cmd) {
1573 	case SIOCGETTUNNEL:
1574 		if (dev == ip6n->fb_tnl_dev) {
1575 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1576 				err = -EFAULT;
1577 				break;
1578 			}
1579 			ip6_tnl_parm_from_user(&p1, &p);
1580 			t = ip6_tnl_locate(net, &p1, 0);
1581 			if (IS_ERR(t))
1582 				t = netdev_priv(dev);
1583 		} else {
1584 			memset(&p, 0, sizeof(p));
1585 		}
1586 		ip6_tnl_parm_to_user(&p, &t->parms);
1587 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1588 			err = -EFAULT;
1589 		}
1590 		break;
1591 	case SIOCADDTUNNEL:
1592 	case SIOCCHGTUNNEL:
1593 		err = -EPERM;
1594 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1595 			break;
1596 		err = -EFAULT;
1597 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1598 			break;
1599 		err = -EINVAL;
1600 		if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1601 		    p.proto != 0)
1602 			break;
1603 		ip6_tnl_parm_from_user(&p1, &p);
1604 		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1605 		if (cmd == SIOCCHGTUNNEL) {
1606 			if (!IS_ERR(t)) {
1607 				if (t->dev != dev) {
1608 					err = -EEXIST;
1609 					break;
1610 				}
1611 			} else
1612 				t = netdev_priv(dev);
1613 			if (dev == ip6n->fb_tnl_dev)
1614 				err = ip6_tnl0_update(t, &p1);
1615 			else
1616 				err = ip6_tnl_update(t, &p1);
1617 		}
1618 		if (!IS_ERR(t)) {
1619 			err = 0;
1620 			ip6_tnl_parm_to_user(&p, &t->parms);
1621 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1622 				err = -EFAULT;
1623 
1624 		} else {
1625 			err = PTR_ERR(t);
1626 		}
1627 		break;
1628 	case SIOCDELTUNNEL:
1629 		err = -EPERM;
1630 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1631 			break;
1632 
1633 		if (dev == ip6n->fb_tnl_dev) {
1634 			err = -EFAULT;
1635 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1636 				break;
1637 			err = -ENOENT;
1638 			ip6_tnl_parm_from_user(&p1, &p);
1639 			t = ip6_tnl_locate(net, &p1, 0);
1640 			if (IS_ERR(t))
1641 				break;
1642 			err = -EPERM;
1643 			if (t->dev == ip6n->fb_tnl_dev)
1644 				break;
1645 			dev = t->dev;
1646 		}
1647 		err = 0;
1648 		unregister_netdevice(dev);
1649 		break;
1650 	default:
1651 		err = -EINVAL;
1652 	}
1653 	return err;
1654 }
1655 
1656 /**
1657  * ip6_tnl_change_mtu - change mtu manually for tunnel device
1658  *   @dev: virtual device associated with tunnel
1659  *   @new_mtu: the new mtu
1660  *
1661  * Return:
1662  *   0 on success,
1663  *   %-EINVAL if mtu too small
1664  **/
1665 
1666 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1667 {
1668 	struct ip6_tnl *tnl = netdev_priv(dev);
1669 
1670 	if (tnl->parms.proto == IPPROTO_IPIP) {
1671 		if (new_mtu < ETH_MIN_MTU)
1672 			return -EINVAL;
1673 	} else {
1674 		if (new_mtu < IPV6_MIN_MTU)
1675 			return -EINVAL;
1676 	}
1677 	if (new_mtu > 0xFFF8 - dev->hard_header_len)
1678 		return -EINVAL;
1679 	dev->mtu = new_mtu;
1680 	return 0;
1681 }
1682 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1683 
1684 int ip6_tnl_get_iflink(const struct net_device *dev)
1685 {
1686 	struct ip6_tnl *t = netdev_priv(dev);
1687 
1688 	return t->parms.link;
1689 }
1690 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1691 
1692 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1693 			  unsigned int num)
1694 {
1695 	if (num >= MAX_IPTUN_ENCAP_OPS)
1696 		return -ERANGE;
1697 
1698 	return !cmpxchg((const struct ip6_tnl_encap_ops **)
1699 			&ip6tun_encaps[num],
1700 			NULL, ops) ? 0 : -1;
1701 }
1702 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1703 
1704 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1705 			  unsigned int num)
1706 {
1707 	int ret;
1708 
1709 	if (num >= MAX_IPTUN_ENCAP_OPS)
1710 		return -ERANGE;
1711 
1712 	ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1713 		       &ip6tun_encaps[num],
1714 		       ops, NULL) == ops) ? 0 : -1;
1715 
1716 	synchronize_net();
1717 
1718 	return ret;
1719 }
1720 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1721 
1722 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1723 			struct ip_tunnel_encap *ipencap)
1724 {
1725 	int hlen;
1726 
1727 	memset(&t->encap, 0, sizeof(t->encap));
1728 
1729 	hlen = ip6_encap_hlen(ipencap);
1730 	if (hlen < 0)
1731 		return hlen;
1732 
1733 	t->encap.type = ipencap->type;
1734 	t->encap.sport = ipencap->sport;
1735 	t->encap.dport = ipencap->dport;
1736 	t->encap.flags = ipencap->flags;
1737 
1738 	t->encap_hlen = hlen;
1739 	t->hlen = t->encap_hlen + t->tun_hlen;
1740 
1741 	return 0;
1742 }
1743 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1744 
1745 static const struct net_device_ops ip6_tnl_netdev_ops = {
1746 	.ndo_init	= ip6_tnl_dev_init,
1747 	.ndo_uninit	= ip6_tnl_dev_uninit,
1748 	.ndo_start_xmit = ip6_tnl_start_xmit,
1749 	.ndo_do_ioctl	= ip6_tnl_ioctl,
1750 	.ndo_change_mtu = ip6_tnl_change_mtu,
1751 	.ndo_get_stats	= ip6_get_stats,
1752 	.ndo_get_iflink = ip6_tnl_get_iflink,
1753 };
1754 
1755 #define IPXIPX_FEATURES (NETIF_F_SG |		\
1756 			 NETIF_F_FRAGLIST |	\
1757 			 NETIF_F_HIGHDMA |	\
1758 			 NETIF_F_GSO_SOFTWARE |	\
1759 			 NETIF_F_HW_CSUM)
1760 
1761 /**
1762  * ip6_tnl_dev_setup - setup virtual tunnel device
1763  *   @dev: virtual device associated with tunnel
1764  *
1765  * Description:
1766  *   Initialize function pointers and device parameters
1767  **/
1768 
1769 static void ip6_tnl_dev_setup(struct net_device *dev)
1770 {
1771 	dev->netdev_ops = &ip6_tnl_netdev_ops;
1772 	dev->destructor = ip6_dev_free;
1773 
1774 	dev->type = ARPHRD_TUNNEL6;
1775 	dev->flags |= IFF_NOARP;
1776 	dev->addr_len = sizeof(struct in6_addr);
1777 	dev->features |= NETIF_F_LLTX;
1778 	netif_keep_dst(dev);
1779 
1780 	dev->features		|= IPXIPX_FEATURES;
1781 	dev->hw_features	|= IPXIPX_FEATURES;
1782 
1783 	/* This perm addr will be used as interface identifier by IPv6 */
1784 	dev->addr_assign_type = NET_ADDR_RANDOM;
1785 	eth_random_addr(dev->perm_addr);
1786 }
1787 
1788 
1789 /**
1790  * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1791  *   @dev: virtual device associated with tunnel
1792  **/
1793 
1794 static inline int
1795 ip6_tnl_dev_init_gen(struct net_device *dev)
1796 {
1797 	struct ip6_tnl *t = netdev_priv(dev);
1798 	int ret;
1799 	int t_hlen;
1800 
1801 	t->dev = dev;
1802 	t->net = dev_net(dev);
1803 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1804 	if (!dev->tstats)
1805 		return -ENOMEM;
1806 
1807 	ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1808 	if (ret)
1809 		goto free_stats;
1810 
1811 	ret = gro_cells_init(&t->gro_cells, dev);
1812 	if (ret)
1813 		goto destroy_dst;
1814 
1815 	t->tun_hlen = 0;
1816 	t->hlen = t->encap_hlen + t->tun_hlen;
1817 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1818 
1819 	dev->type = ARPHRD_TUNNEL6;
1820 	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1821 	dev->mtu = ETH_DATA_LEN - t_hlen;
1822 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1823 		dev->mtu -= 8;
1824 	dev->min_mtu = ETH_MIN_MTU;
1825 	dev->max_mtu = 0xFFF8 - dev->hard_header_len;
1826 
1827 	return 0;
1828 
1829 destroy_dst:
1830 	dst_cache_destroy(&t->dst_cache);
1831 free_stats:
1832 	free_percpu(dev->tstats);
1833 	dev->tstats = NULL;
1834 
1835 	return ret;
1836 }
1837 
1838 /**
1839  * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1840  *   @dev: virtual device associated with tunnel
1841  **/
1842 
1843 static int ip6_tnl_dev_init(struct net_device *dev)
1844 {
1845 	struct ip6_tnl *t = netdev_priv(dev);
1846 	int err = ip6_tnl_dev_init_gen(dev);
1847 
1848 	if (err)
1849 		return err;
1850 	ip6_tnl_link_config(t);
1851 	if (t->parms.collect_md) {
1852 		dev->features |= NETIF_F_NETNS_LOCAL;
1853 		netif_keep_dst(dev);
1854 	}
1855 	return 0;
1856 }
1857 
1858 /**
1859  * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1860  *   @dev: fallback device
1861  *
1862  * Return: 0
1863  **/
1864 
1865 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1866 {
1867 	struct ip6_tnl *t = netdev_priv(dev);
1868 	struct net *net = dev_net(dev);
1869 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1870 
1871 	t->parms.proto = IPPROTO_IPV6;
1872 	dev_hold(dev);
1873 
1874 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
1875 	return 0;
1876 }
1877 
1878 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
1879 {
1880 	u8 proto;
1881 
1882 	if (!data || !data[IFLA_IPTUN_PROTO])
1883 		return 0;
1884 
1885 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1886 	if (proto != IPPROTO_IPV6 &&
1887 	    proto != IPPROTO_IPIP &&
1888 	    proto != 0)
1889 		return -EINVAL;
1890 
1891 	return 0;
1892 }
1893 
1894 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1895 				  struct __ip6_tnl_parm *parms)
1896 {
1897 	memset(parms, 0, sizeof(*parms));
1898 
1899 	if (!data)
1900 		return;
1901 
1902 	if (data[IFLA_IPTUN_LINK])
1903 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1904 
1905 	if (data[IFLA_IPTUN_LOCAL])
1906 		parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1907 
1908 	if (data[IFLA_IPTUN_REMOTE])
1909 		parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1910 
1911 	if (data[IFLA_IPTUN_TTL])
1912 		parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1913 
1914 	if (data[IFLA_IPTUN_ENCAP_LIMIT])
1915 		parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1916 
1917 	if (data[IFLA_IPTUN_FLOWINFO])
1918 		parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1919 
1920 	if (data[IFLA_IPTUN_FLAGS])
1921 		parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1922 
1923 	if (data[IFLA_IPTUN_PROTO])
1924 		parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1925 
1926 	if (data[IFLA_IPTUN_COLLECT_METADATA])
1927 		parms->collect_md = true;
1928 
1929 	if (data[IFLA_IPTUN_FWMARK])
1930 		parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
1931 }
1932 
1933 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1934 					struct ip_tunnel_encap *ipencap)
1935 {
1936 	bool ret = false;
1937 
1938 	memset(ipencap, 0, sizeof(*ipencap));
1939 
1940 	if (!data)
1941 		return ret;
1942 
1943 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1944 		ret = true;
1945 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1946 	}
1947 
1948 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1949 		ret = true;
1950 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1951 	}
1952 
1953 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1954 		ret = true;
1955 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1956 	}
1957 
1958 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1959 		ret = true;
1960 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1961 	}
1962 
1963 	return ret;
1964 }
1965 
1966 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1967 			   struct nlattr *tb[], struct nlattr *data[])
1968 {
1969 	struct net *net = dev_net(dev);
1970 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1971 	struct ip6_tnl *nt, *t;
1972 	struct ip_tunnel_encap ipencap;
1973 
1974 	nt = netdev_priv(dev);
1975 
1976 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1977 		int err = ip6_tnl_encap_setup(nt, &ipencap);
1978 
1979 		if (err < 0)
1980 			return err;
1981 	}
1982 
1983 	ip6_tnl_netlink_parms(data, &nt->parms);
1984 
1985 	if (nt->parms.collect_md) {
1986 		if (rtnl_dereference(ip6n->collect_md_tun))
1987 			return -EEXIST;
1988 	} else {
1989 		t = ip6_tnl_locate(net, &nt->parms, 0);
1990 		if (!IS_ERR(t))
1991 			return -EEXIST;
1992 	}
1993 
1994 	return ip6_tnl_create2(dev);
1995 }
1996 
1997 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
1998 			      struct nlattr *data[])
1999 {
2000 	struct ip6_tnl *t = netdev_priv(dev);
2001 	struct __ip6_tnl_parm p;
2002 	struct net *net = t->net;
2003 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2004 	struct ip_tunnel_encap ipencap;
2005 
2006 	if (dev == ip6n->fb_tnl_dev)
2007 		return -EINVAL;
2008 
2009 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2010 		int err = ip6_tnl_encap_setup(t, &ipencap);
2011 
2012 		if (err < 0)
2013 			return err;
2014 	}
2015 	ip6_tnl_netlink_parms(data, &p);
2016 	if (p.collect_md)
2017 		return -EINVAL;
2018 
2019 	t = ip6_tnl_locate(net, &p, 0);
2020 	if (!IS_ERR(t)) {
2021 		if (t->dev != dev)
2022 			return -EEXIST;
2023 	} else
2024 		t = netdev_priv(dev);
2025 
2026 	return ip6_tnl_update(t, &p);
2027 }
2028 
2029 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2030 {
2031 	struct net *net = dev_net(dev);
2032 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2033 
2034 	if (dev != ip6n->fb_tnl_dev)
2035 		unregister_netdevice_queue(dev, head);
2036 }
2037 
2038 static size_t ip6_tnl_get_size(const struct net_device *dev)
2039 {
2040 	return
2041 		/* IFLA_IPTUN_LINK */
2042 		nla_total_size(4) +
2043 		/* IFLA_IPTUN_LOCAL */
2044 		nla_total_size(sizeof(struct in6_addr)) +
2045 		/* IFLA_IPTUN_REMOTE */
2046 		nla_total_size(sizeof(struct in6_addr)) +
2047 		/* IFLA_IPTUN_TTL */
2048 		nla_total_size(1) +
2049 		/* IFLA_IPTUN_ENCAP_LIMIT */
2050 		nla_total_size(1) +
2051 		/* IFLA_IPTUN_FLOWINFO */
2052 		nla_total_size(4) +
2053 		/* IFLA_IPTUN_FLAGS */
2054 		nla_total_size(4) +
2055 		/* IFLA_IPTUN_PROTO */
2056 		nla_total_size(1) +
2057 		/* IFLA_IPTUN_ENCAP_TYPE */
2058 		nla_total_size(2) +
2059 		/* IFLA_IPTUN_ENCAP_FLAGS */
2060 		nla_total_size(2) +
2061 		/* IFLA_IPTUN_ENCAP_SPORT */
2062 		nla_total_size(2) +
2063 		/* IFLA_IPTUN_ENCAP_DPORT */
2064 		nla_total_size(2) +
2065 		/* IFLA_IPTUN_COLLECT_METADATA */
2066 		nla_total_size(0) +
2067 		/* IFLA_IPTUN_FWMARK */
2068 		nla_total_size(4) +
2069 		0;
2070 }
2071 
2072 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2073 {
2074 	struct ip6_tnl *tunnel = netdev_priv(dev);
2075 	struct __ip6_tnl_parm *parm = &tunnel->parms;
2076 
2077 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2078 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2079 	    nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2080 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2081 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2082 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2083 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2084 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2085 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2086 		goto nla_put_failure;
2087 
2088 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2089 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2090 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2091 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2092 		goto nla_put_failure;
2093 
2094 	if (parm->collect_md)
2095 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2096 			goto nla_put_failure;
2097 
2098 	return 0;
2099 
2100 nla_put_failure:
2101 	return -EMSGSIZE;
2102 }
2103 
2104 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2105 {
2106 	struct ip6_tnl *tunnel = netdev_priv(dev);
2107 
2108 	return tunnel->net;
2109 }
2110 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2111 
2112 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2113 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
2114 	[IFLA_IPTUN_LOCAL]		= { .len = sizeof(struct in6_addr) },
2115 	[IFLA_IPTUN_REMOTE]		= { .len = sizeof(struct in6_addr) },
2116 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
2117 	[IFLA_IPTUN_ENCAP_LIMIT]	= { .type = NLA_U8 },
2118 	[IFLA_IPTUN_FLOWINFO]		= { .type = NLA_U32 },
2119 	[IFLA_IPTUN_FLAGS]		= { .type = NLA_U32 },
2120 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
2121 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
2122 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
2123 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
2124 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
2125 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
2126 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
2127 };
2128 
2129 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2130 	.kind		= "ip6tnl",
2131 	.maxtype	= IFLA_IPTUN_MAX,
2132 	.policy		= ip6_tnl_policy,
2133 	.priv_size	= sizeof(struct ip6_tnl),
2134 	.setup		= ip6_tnl_dev_setup,
2135 	.validate	= ip6_tnl_validate,
2136 	.newlink	= ip6_tnl_newlink,
2137 	.changelink	= ip6_tnl_changelink,
2138 	.dellink	= ip6_tnl_dellink,
2139 	.get_size	= ip6_tnl_get_size,
2140 	.fill_info	= ip6_tnl_fill_info,
2141 	.get_link_net	= ip6_tnl_get_link_net,
2142 };
2143 
2144 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2145 	.handler	= ip4ip6_rcv,
2146 	.err_handler	= ip4ip6_err,
2147 	.priority	=	1,
2148 };
2149 
2150 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2151 	.handler	= ip6ip6_rcv,
2152 	.err_handler	= ip6ip6_err,
2153 	.priority	=	1,
2154 };
2155 
2156 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
2157 {
2158 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2159 	struct net_device *dev, *aux;
2160 	int h;
2161 	struct ip6_tnl *t;
2162 	LIST_HEAD(list);
2163 
2164 	for_each_netdev_safe(net, dev, aux)
2165 		if (dev->rtnl_link_ops == &ip6_link_ops)
2166 			unregister_netdevice_queue(dev, &list);
2167 
2168 	for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2169 		t = rtnl_dereference(ip6n->tnls_r_l[h]);
2170 		while (t) {
2171 			/* If dev is in the same netns, it has already
2172 			 * been added to the list by the previous loop.
2173 			 */
2174 			if (!net_eq(dev_net(t->dev), net))
2175 				unregister_netdevice_queue(t->dev, &list);
2176 			t = rtnl_dereference(t->next);
2177 		}
2178 	}
2179 
2180 	unregister_netdevice_many(&list);
2181 }
2182 
2183 static int __net_init ip6_tnl_init_net(struct net *net)
2184 {
2185 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2186 	struct ip6_tnl *t = NULL;
2187 	int err;
2188 
2189 	ip6n->tnls[0] = ip6n->tnls_wc;
2190 	ip6n->tnls[1] = ip6n->tnls_r_l;
2191 
2192 	err = -ENOMEM;
2193 	ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2194 					NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2195 
2196 	if (!ip6n->fb_tnl_dev)
2197 		goto err_alloc_dev;
2198 	dev_net_set(ip6n->fb_tnl_dev, net);
2199 	ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2200 	/* FB netdevice is special: we have one, and only one per netns.
2201 	 * Allowing to move it to another netns is clearly unsafe.
2202 	 */
2203 	ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2204 
2205 	err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2206 	if (err < 0)
2207 		goto err_register;
2208 
2209 	err = register_netdev(ip6n->fb_tnl_dev);
2210 	if (err < 0)
2211 		goto err_register;
2212 
2213 	t = netdev_priv(ip6n->fb_tnl_dev);
2214 
2215 	strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2216 	return 0;
2217 
2218 err_register:
2219 	ip6_dev_free(ip6n->fb_tnl_dev);
2220 err_alloc_dev:
2221 	return err;
2222 }
2223 
2224 static void __net_exit ip6_tnl_exit_net(struct net *net)
2225 {
2226 	rtnl_lock();
2227 	ip6_tnl_destroy_tunnels(net);
2228 	rtnl_unlock();
2229 }
2230 
2231 static struct pernet_operations ip6_tnl_net_ops = {
2232 	.init = ip6_tnl_init_net,
2233 	.exit = ip6_tnl_exit_net,
2234 	.id   = &ip6_tnl_net_id,
2235 	.size = sizeof(struct ip6_tnl_net),
2236 };
2237 
2238 /**
2239  * ip6_tunnel_init - register protocol and reserve needed resources
2240  *
2241  * Return: 0 on success
2242  **/
2243 
2244 static int __init ip6_tunnel_init(void)
2245 {
2246 	int  err;
2247 
2248 	err = register_pernet_device(&ip6_tnl_net_ops);
2249 	if (err < 0)
2250 		goto out_pernet;
2251 
2252 	err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2253 	if (err < 0) {
2254 		pr_err("%s: can't register ip4ip6\n", __func__);
2255 		goto out_ip4ip6;
2256 	}
2257 
2258 	err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2259 	if (err < 0) {
2260 		pr_err("%s: can't register ip6ip6\n", __func__);
2261 		goto out_ip6ip6;
2262 	}
2263 	err = rtnl_link_register(&ip6_link_ops);
2264 	if (err < 0)
2265 		goto rtnl_link_failed;
2266 
2267 	return 0;
2268 
2269 rtnl_link_failed:
2270 	xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2271 out_ip6ip6:
2272 	xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2273 out_ip4ip6:
2274 	unregister_pernet_device(&ip6_tnl_net_ops);
2275 out_pernet:
2276 	return err;
2277 }
2278 
2279 /**
2280  * ip6_tunnel_cleanup - free resources and unregister protocol
2281  **/
2282 
2283 static void __exit ip6_tunnel_cleanup(void)
2284 {
2285 	rtnl_link_unregister(&ip6_link_ops);
2286 	if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2287 		pr_info("%s: can't deregister ip4ip6\n", __func__);
2288 
2289 	if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2290 		pr_info("%s: can't deregister ip6ip6\n", __func__);
2291 
2292 	unregister_pernet_device(&ip6_tnl_net_ops);
2293 }
2294 
2295 module_init(ip6_tunnel_init);
2296 module_exit(ip6_tunnel_cleanup);
2297