xref: /openbmc/linux/net/ipv6/ip6_tunnel.c (revision ecefa105)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 tunneling device
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Ville Nuorvala		<vnuorval@tcs.hut.fi>
8  *	Yasuyuki Kozakai	<kozakai@linux-ipv6.org>
9  *
10  *      Based on:
11  *      linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
12  *
13  *      RFC 2473
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/module.h>
19 #include <linux/capability.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/sockios.h>
23 #include <linux/icmp.h>
24 #include <linux/if.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/init.h>
33 #include <linux/route.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/netfilter_ipv6.h>
36 #include <linux/slab.h>
37 #include <linux/hash.h>
38 #include <linux/etherdevice.h>
39 
40 #include <linux/uaccess.h>
41 #include <linux/atomic.h>
42 
43 #include <net/icmp.h>
44 #include <net/ip.h>
45 #include <net/ip_tunnels.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/xfrm.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55 #include <net/dst_metadata.h>
56 
57 MODULE_AUTHOR("Ville Nuorvala");
58 MODULE_DESCRIPTION("IPv6 tunneling device");
59 MODULE_LICENSE("GPL");
60 MODULE_ALIAS_RTNL_LINK("ip6tnl");
61 MODULE_ALIAS_NETDEV("ip6tnl0");
62 
63 #define IP6_TUNNEL_HASH_SIZE_SHIFT  5
64 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
65 
66 static bool log_ecn_error = true;
67 module_param(log_ecn_error, bool, 0644);
68 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
69 
70 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
71 {
72 	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
73 
74 	return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
75 }
76 
77 static int ip6_tnl_dev_init(struct net_device *dev);
78 static void ip6_tnl_dev_setup(struct net_device *dev);
79 static struct rtnl_link_ops ip6_link_ops __read_mostly;
80 
81 static unsigned int ip6_tnl_net_id __read_mostly;
82 struct ip6_tnl_net {
83 	/* the IPv6 tunnel fallback device */
84 	struct net_device *fb_tnl_dev;
85 	/* lists for storing tunnels in use */
86 	struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
87 	struct ip6_tnl __rcu *tnls_wc[1];
88 	struct ip6_tnl __rcu **tnls[2];
89 	struct ip6_tnl __rcu *collect_md_tun;
90 };
91 
92 static inline int ip6_tnl_mpls_supported(void)
93 {
94 	return IS_ENABLED(CONFIG_MPLS);
95 }
96 
97 #define for_each_ip6_tunnel_rcu(start) \
98 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
99 
100 /**
101  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
102  *   @net: network namespace
103  *   @link: ifindex of underlying interface
104  *   @remote: the address of the tunnel exit-point
105  *   @local: the address of the tunnel entry-point
106  *
107  * Return:
108  *   tunnel matching given end-points if found,
109  *   else fallback tunnel if its device is up,
110  *   else %NULL
111  **/
112 
113 static struct ip6_tnl *
114 ip6_tnl_lookup(struct net *net, int link,
115 	       const struct in6_addr *remote, const struct in6_addr *local)
116 {
117 	unsigned int hash = HASH(remote, local);
118 	struct ip6_tnl *t, *cand = NULL;
119 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
120 	struct in6_addr any;
121 
122 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
123 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
124 		    !ipv6_addr_equal(remote, &t->parms.raddr) ||
125 		    !(t->dev->flags & IFF_UP))
126 			continue;
127 
128 		if (link == t->parms.link)
129 			return t;
130 		else
131 			cand = t;
132 	}
133 
134 	memset(&any, 0, sizeof(any));
135 	hash = HASH(&any, local);
136 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
137 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
138 		    !ipv6_addr_any(&t->parms.raddr) ||
139 		    !(t->dev->flags & IFF_UP))
140 			continue;
141 
142 		if (link == t->parms.link)
143 			return t;
144 		else if (!cand)
145 			cand = t;
146 	}
147 
148 	hash = HASH(remote, &any);
149 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 		if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
151 		    !ipv6_addr_any(&t->parms.laddr) ||
152 		    !(t->dev->flags & IFF_UP))
153 			continue;
154 
155 		if (link == t->parms.link)
156 			return t;
157 		else if (!cand)
158 			cand = t;
159 	}
160 
161 	if (cand)
162 		return cand;
163 
164 	t = rcu_dereference(ip6n->collect_md_tun);
165 	if (t && t->dev->flags & IFF_UP)
166 		return t;
167 
168 	t = rcu_dereference(ip6n->tnls_wc[0]);
169 	if (t && (t->dev->flags & IFF_UP))
170 		return t;
171 
172 	return NULL;
173 }
174 
175 /**
176  * ip6_tnl_bucket - get head of list matching given tunnel parameters
177  *   @ip6n: the private data for ip6_vti in the netns
178  *   @p: parameters containing tunnel end-points
179  *
180  * Description:
181  *   ip6_tnl_bucket() returns the head of the list matching the
182  *   &struct in6_addr entries laddr and raddr in @p.
183  *
184  * Return: head of IPv6 tunnel list
185  **/
186 
187 static struct ip6_tnl __rcu **
188 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
189 {
190 	const struct in6_addr *remote = &p->raddr;
191 	const struct in6_addr *local = &p->laddr;
192 	unsigned int h = 0;
193 	int prio = 0;
194 
195 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
196 		prio = 1;
197 		h = HASH(remote, local);
198 	}
199 	return &ip6n->tnls[prio][h];
200 }
201 
202 /**
203  * ip6_tnl_link - add tunnel to hash table
204  *   @ip6n: the private data for ip6_vti in the netns
205  *   @t: tunnel to be added
206  **/
207 
208 static void
209 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
210 {
211 	struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
212 
213 	if (t->parms.collect_md)
214 		rcu_assign_pointer(ip6n->collect_md_tun, t);
215 	rcu_assign_pointer(t->next , rtnl_dereference(*tp));
216 	rcu_assign_pointer(*tp, t);
217 }
218 
219 /**
220  * ip6_tnl_unlink - remove tunnel from hash table
221  *   @ip6n: the private data for ip6_vti in the netns
222  *   @t: tunnel to be removed
223  **/
224 
225 static void
226 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
227 {
228 	struct ip6_tnl __rcu **tp;
229 	struct ip6_tnl *iter;
230 
231 	if (t->parms.collect_md)
232 		rcu_assign_pointer(ip6n->collect_md_tun, NULL);
233 
234 	for (tp = ip6_tnl_bucket(ip6n, &t->parms);
235 	     (iter = rtnl_dereference(*tp)) != NULL;
236 	     tp = &iter->next) {
237 		if (t == iter) {
238 			rcu_assign_pointer(*tp, t->next);
239 			break;
240 		}
241 	}
242 }
243 
244 static void ip6_dev_free(struct net_device *dev)
245 {
246 	struct ip6_tnl *t = netdev_priv(dev);
247 
248 	gro_cells_destroy(&t->gro_cells);
249 	dst_cache_destroy(&t->dst_cache);
250 	free_percpu(dev->tstats);
251 }
252 
253 static int ip6_tnl_create2(struct net_device *dev)
254 {
255 	struct ip6_tnl *t = netdev_priv(dev);
256 	struct net *net = dev_net(dev);
257 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
258 	int err;
259 
260 	dev->rtnl_link_ops = &ip6_link_ops;
261 	err = register_netdevice(dev);
262 	if (err < 0)
263 		goto out;
264 
265 	strcpy(t->parms.name, dev->name);
266 
267 	ip6_tnl_link(ip6n, t);
268 	return 0;
269 
270 out:
271 	return err;
272 }
273 
274 /**
275  * ip6_tnl_create - create a new tunnel
276  *   @net: network namespace
277  *   @p: tunnel parameters
278  *
279  * Description:
280  *   Create tunnel matching given parameters.
281  *
282  * Return:
283  *   created tunnel or error pointer
284  **/
285 
286 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
287 {
288 	struct net_device *dev;
289 	struct ip6_tnl *t;
290 	char name[IFNAMSIZ];
291 	int err = -E2BIG;
292 
293 	if (p->name[0]) {
294 		if (!dev_valid_name(p->name))
295 			goto failed;
296 		strscpy(name, p->name, IFNAMSIZ);
297 	} else {
298 		sprintf(name, "ip6tnl%%d");
299 	}
300 	err = -ENOMEM;
301 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
302 			   ip6_tnl_dev_setup);
303 	if (!dev)
304 		goto failed;
305 
306 	dev_net_set(dev, net);
307 
308 	t = netdev_priv(dev);
309 	t->parms = *p;
310 	t->net = dev_net(dev);
311 	err = ip6_tnl_create2(dev);
312 	if (err < 0)
313 		goto failed_free;
314 
315 	return t;
316 
317 failed_free:
318 	free_netdev(dev);
319 failed:
320 	return ERR_PTR(err);
321 }
322 
323 /**
324  * ip6_tnl_locate - find or create tunnel matching given parameters
325  *   @net: network namespace
326  *   @p: tunnel parameters
327  *   @create: != 0 if allowed to create new tunnel if no match found
328  *
329  * Description:
330  *   ip6_tnl_locate() first tries to locate an existing tunnel
331  *   based on @parms. If this is unsuccessful, but @create is set a new
332  *   tunnel device is created and registered for use.
333  *
334  * Return:
335  *   matching tunnel or error pointer
336  **/
337 
338 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
339 		struct __ip6_tnl_parm *p, int create)
340 {
341 	const struct in6_addr *remote = &p->raddr;
342 	const struct in6_addr *local = &p->laddr;
343 	struct ip6_tnl __rcu **tp;
344 	struct ip6_tnl *t;
345 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
346 
347 	for (tp = ip6_tnl_bucket(ip6n, p);
348 	     (t = rtnl_dereference(*tp)) != NULL;
349 	     tp = &t->next) {
350 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
351 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
352 		    p->link == t->parms.link) {
353 			if (create)
354 				return ERR_PTR(-EEXIST);
355 
356 			return t;
357 		}
358 	}
359 	if (!create)
360 		return ERR_PTR(-ENODEV);
361 	return ip6_tnl_create(net, p);
362 }
363 
364 /**
365  * ip6_tnl_dev_uninit - tunnel device uninitializer
366  *   @dev: the device to be destroyed
367  *
368  * Description:
369  *   ip6_tnl_dev_uninit() removes tunnel from its list
370  **/
371 
372 static void
373 ip6_tnl_dev_uninit(struct net_device *dev)
374 {
375 	struct ip6_tnl *t = netdev_priv(dev);
376 	struct net *net = t->net;
377 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
378 
379 	if (dev == ip6n->fb_tnl_dev)
380 		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
381 	else
382 		ip6_tnl_unlink(ip6n, t);
383 	dst_cache_reset(&t->dst_cache);
384 	netdev_put(dev, &t->dev_tracker);
385 }
386 
387 /**
388  * ip6_tnl_parse_tlv_enc_lim - handle encapsulation limit option
389  *   @skb: received socket buffer
390  *   @raw: the ICMPv6 error message data
391  *
392  * Return:
393  *   0 if none was found,
394  *   else index to encapsulation limit
395  **/
396 
397 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
398 {
399 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
400 	unsigned int nhoff = raw - skb->data;
401 	unsigned int off = nhoff + sizeof(*ipv6h);
402 	u8 next, nexthdr = ipv6h->nexthdr;
403 
404 	while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
405 		struct ipv6_opt_hdr *hdr;
406 		u16 optlen;
407 
408 		if (!pskb_may_pull(skb, off + sizeof(*hdr)))
409 			break;
410 
411 		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
412 		if (nexthdr == NEXTHDR_FRAGMENT) {
413 			struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
414 			if (frag_hdr->frag_off)
415 				break;
416 			optlen = 8;
417 		} else if (nexthdr == NEXTHDR_AUTH) {
418 			optlen = ipv6_authlen(hdr);
419 		} else {
420 			optlen = ipv6_optlen(hdr);
421 		}
422 		/* cache hdr->nexthdr, since pskb_may_pull() might
423 		 * invalidate hdr
424 		 */
425 		next = hdr->nexthdr;
426 		if (nexthdr == NEXTHDR_DEST) {
427 			u16 i = 2;
428 
429 			/* Remember : hdr is no longer valid at this point. */
430 			if (!pskb_may_pull(skb, off + optlen))
431 				break;
432 
433 			while (1) {
434 				struct ipv6_tlv_tnl_enc_lim *tel;
435 
436 				/* No more room for encapsulation limit */
437 				if (i + sizeof(*tel) > optlen)
438 					break;
439 
440 				tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
441 				/* return index of option if found and valid */
442 				if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
443 				    tel->length == 1)
444 					return i + off - nhoff;
445 				/* else jump to next option */
446 				if (tel->type)
447 					i += tel->length + 2;
448 				else
449 					i++;
450 			}
451 		}
452 		nexthdr = next;
453 		off += optlen;
454 	}
455 	return 0;
456 }
457 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
458 
459 /* ip6_tnl_err() should handle errors in the tunnel according to the
460  * specifications in RFC 2473.
461  */
462 static int
463 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
464 	    u8 *type, u8 *code, int *msg, __u32 *info, int offset)
465 {
466 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
467 	struct net *net = dev_net(skb->dev);
468 	u8 rel_type = ICMPV6_DEST_UNREACH;
469 	u8 rel_code = ICMPV6_ADDR_UNREACH;
470 	__u32 rel_info = 0;
471 	struct ip6_tnl *t;
472 	int err = -ENOENT;
473 	int rel_msg = 0;
474 	u8 tproto;
475 	__u16 len;
476 
477 	/* If the packet doesn't contain the original IPv6 header we are
478 	   in trouble since we might need the source address for further
479 	   processing of the error. */
480 
481 	rcu_read_lock();
482 	t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->daddr, &ipv6h->saddr);
483 	if (!t)
484 		goto out;
485 
486 	tproto = READ_ONCE(t->parms.proto);
487 	if (tproto != ipproto && tproto != 0)
488 		goto out;
489 
490 	err = 0;
491 
492 	switch (*type) {
493 	case ICMPV6_DEST_UNREACH:
494 		net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
495 				    t->parms.name);
496 		rel_msg = 1;
497 		break;
498 	case ICMPV6_TIME_EXCEED:
499 		if ((*code) == ICMPV6_EXC_HOPLIMIT) {
500 			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
501 					    t->parms.name);
502 			rel_msg = 1;
503 		}
504 		break;
505 	case ICMPV6_PARAMPROB: {
506 		struct ipv6_tlv_tnl_enc_lim *tel;
507 		__u32 teli;
508 
509 		teli = 0;
510 		if ((*code) == ICMPV6_HDR_FIELD)
511 			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
512 
513 		if (teli && teli == *info - 2) {
514 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
515 			if (tel->encap_limit == 0) {
516 				net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
517 						    t->parms.name);
518 				rel_msg = 1;
519 			}
520 		} else {
521 			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
522 					    t->parms.name);
523 		}
524 		break;
525 	}
526 	case ICMPV6_PKT_TOOBIG: {
527 		__u32 mtu;
528 
529 		ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
530 				sock_net_uid(net, NULL));
531 		mtu = *info - offset;
532 		if (mtu < IPV6_MIN_MTU)
533 			mtu = IPV6_MIN_MTU;
534 		len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
535 		if (len > mtu) {
536 			rel_type = ICMPV6_PKT_TOOBIG;
537 			rel_code = 0;
538 			rel_info = mtu;
539 			rel_msg = 1;
540 		}
541 		break;
542 	}
543 	case NDISC_REDIRECT:
544 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
545 			     sock_net_uid(net, NULL));
546 		break;
547 	}
548 
549 	*type = rel_type;
550 	*code = rel_code;
551 	*info = rel_info;
552 	*msg = rel_msg;
553 
554 out:
555 	rcu_read_unlock();
556 	return err;
557 }
558 
559 static int
560 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
561 	   u8 type, u8 code, int offset, __be32 info)
562 {
563 	__u32 rel_info = ntohl(info);
564 	const struct iphdr *eiph;
565 	struct sk_buff *skb2;
566 	int err, rel_msg = 0;
567 	u8 rel_type = type;
568 	u8 rel_code = code;
569 	struct rtable *rt;
570 	struct flowi4 fl4;
571 
572 	err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
573 			  &rel_msg, &rel_info, offset);
574 	if (err < 0)
575 		return err;
576 
577 	if (rel_msg == 0)
578 		return 0;
579 
580 	switch (rel_type) {
581 	case ICMPV6_DEST_UNREACH:
582 		if (rel_code != ICMPV6_ADDR_UNREACH)
583 			return 0;
584 		rel_type = ICMP_DEST_UNREACH;
585 		rel_code = ICMP_HOST_UNREACH;
586 		break;
587 	case ICMPV6_PKT_TOOBIG:
588 		if (rel_code != 0)
589 			return 0;
590 		rel_type = ICMP_DEST_UNREACH;
591 		rel_code = ICMP_FRAG_NEEDED;
592 		break;
593 	default:
594 		return 0;
595 	}
596 
597 	if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
598 		return 0;
599 
600 	skb2 = skb_clone(skb, GFP_ATOMIC);
601 	if (!skb2)
602 		return 0;
603 
604 	skb_dst_drop(skb2);
605 
606 	skb_pull(skb2, offset);
607 	skb_reset_network_header(skb2);
608 	eiph = ip_hdr(skb2);
609 
610 	/* Try to guess incoming interface */
611 	rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
612 				   0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
613 	if (IS_ERR(rt))
614 		goto out;
615 
616 	skb2->dev = rt->dst.dev;
617 	ip_rt_put(rt);
618 
619 	/* route "incoming" packet */
620 	if (rt->rt_flags & RTCF_LOCAL) {
621 		rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
622 					   eiph->daddr, eiph->saddr, 0, 0,
623 					   IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
624 		if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
625 			if (!IS_ERR(rt))
626 				ip_rt_put(rt);
627 			goto out;
628 		}
629 		skb_dst_set(skb2, &rt->dst);
630 	} else {
631 		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
632 				   skb2->dev) ||
633 		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
634 			goto out;
635 	}
636 
637 	/* change mtu on this route */
638 	if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
639 		if (rel_info > dst_mtu(skb_dst(skb2)))
640 			goto out;
641 
642 		skb_dst_update_pmtu_no_confirm(skb2, rel_info);
643 	}
644 
645 	icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
646 
647 out:
648 	kfree_skb(skb2);
649 	return 0;
650 }
651 
652 static int
653 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
654 	   u8 type, u8 code, int offset, __be32 info)
655 {
656 	__u32 rel_info = ntohl(info);
657 	int err, rel_msg = 0;
658 	u8 rel_type = type;
659 	u8 rel_code = code;
660 
661 	err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
662 			  &rel_msg, &rel_info, offset);
663 	if (err < 0)
664 		return err;
665 
666 	if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
667 		struct rt6_info *rt;
668 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
669 
670 		if (!skb2)
671 			return 0;
672 
673 		skb_dst_drop(skb2);
674 		skb_pull(skb2, offset);
675 		skb_reset_network_header(skb2);
676 
677 		/* Try to guess incoming interface */
678 		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
679 				NULL, 0, skb2, 0);
680 
681 		if (rt && rt->dst.dev)
682 			skb2->dev = rt->dst.dev;
683 
684 		icmpv6_send(skb2, rel_type, rel_code, rel_info);
685 
686 		ip6_rt_put(rt);
687 
688 		kfree_skb(skb2);
689 	}
690 
691 	return 0;
692 }
693 
694 static int
695 mplsip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
696 	    u8 type, u8 code, int offset, __be32 info)
697 {
698 	__u32 rel_info = ntohl(info);
699 	int err, rel_msg = 0;
700 	u8 rel_type = type;
701 	u8 rel_code = code;
702 
703 	err = ip6_tnl_err(skb, IPPROTO_MPLS, opt, &rel_type, &rel_code,
704 			  &rel_msg, &rel_info, offset);
705 	return err;
706 }
707 
708 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
709 				       const struct ipv6hdr *ipv6h,
710 				       struct sk_buff *skb)
711 {
712 	__u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
713 
714 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
715 		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
716 
717 	return IP6_ECN_decapsulate(ipv6h, skb);
718 }
719 
720 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
721 				       const struct ipv6hdr *ipv6h,
722 				       struct sk_buff *skb)
723 {
724 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
725 		ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
726 
727 	return IP6_ECN_decapsulate(ipv6h, skb);
728 }
729 
730 static inline int mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
731 					       const struct ipv6hdr *ipv6h,
732 					       struct sk_buff *skb)
733 {
734 	/* ECN is not supported in AF_MPLS */
735 	return 0;
736 }
737 
738 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
739 			     const struct in6_addr *laddr,
740 			     const struct in6_addr *raddr)
741 {
742 	struct __ip6_tnl_parm *p = &t->parms;
743 	int ltype = ipv6_addr_type(laddr);
744 	int rtype = ipv6_addr_type(raddr);
745 	__u32 flags = 0;
746 
747 	if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
748 		flags = IP6_TNL_F_CAP_PER_PACKET;
749 	} else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
750 		   rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
751 		   !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
752 		   (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
753 		if (ltype&IPV6_ADDR_UNICAST)
754 			flags |= IP6_TNL_F_CAP_XMIT;
755 		if (rtype&IPV6_ADDR_UNICAST)
756 			flags |= IP6_TNL_F_CAP_RCV;
757 	}
758 	return flags;
759 }
760 EXPORT_SYMBOL(ip6_tnl_get_cap);
761 
762 /* called with rcu_read_lock() */
763 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
764 				  const struct in6_addr *laddr,
765 				  const struct in6_addr *raddr)
766 {
767 	struct __ip6_tnl_parm *p = &t->parms;
768 	int ret = 0;
769 	struct net *net = t->net;
770 
771 	if ((p->flags & IP6_TNL_F_CAP_RCV) ||
772 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
773 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
774 		struct net_device *ldev = NULL;
775 
776 		if (p->link)
777 			ldev = dev_get_by_index_rcu(net, p->link);
778 
779 		if ((ipv6_addr_is_multicast(laddr) ||
780 		     likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false,
781 						    0, IFA_F_TENTATIVE))) &&
782 		    ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
783 		     likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true,
784 						     0, IFA_F_TENTATIVE))))
785 			ret = 1;
786 	}
787 	return ret;
788 }
789 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
790 
791 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
792 			 const struct tnl_ptk_info *tpi,
793 			 struct metadata_dst *tun_dst,
794 			 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
795 						const struct ipv6hdr *ipv6h,
796 						struct sk_buff *skb),
797 			 bool log_ecn_err)
798 {
799 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
800 	int err;
801 
802 	if ((!(tpi->flags & TUNNEL_CSUM) &&
803 	     (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
804 	    ((tpi->flags & TUNNEL_CSUM) &&
805 	     !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
806 		DEV_STATS_INC(tunnel->dev, rx_crc_errors);
807 		DEV_STATS_INC(tunnel->dev, rx_errors);
808 		goto drop;
809 	}
810 
811 	if (tunnel->parms.i_flags & TUNNEL_SEQ) {
812 		if (!(tpi->flags & TUNNEL_SEQ) ||
813 		    (tunnel->i_seqno &&
814 		     (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
815 			DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
816 			DEV_STATS_INC(tunnel->dev, rx_errors);
817 			goto drop;
818 		}
819 		tunnel->i_seqno = ntohl(tpi->seq) + 1;
820 	}
821 
822 	skb->protocol = tpi->proto;
823 
824 	/* Warning: All skb pointers will be invalidated! */
825 	if (tunnel->dev->type == ARPHRD_ETHER) {
826 		if (!pskb_may_pull(skb, ETH_HLEN)) {
827 			DEV_STATS_INC(tunnel->dev, rx_length_errors);
828 			DEV_STATS_INC(tunnel->dev, rx_errors);
829 			goto drop;
830 		}
831 
832 		ipv6h = ipv6_hdr(skb);
833 		skb->protocol = eth_type_trans(skb, tunnel->dev);
834 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
835 	} else {
836 		skb->dev = tunnel->dev;
837 		skb_reset_mac_header(skb);
838 	}
839 
840 	skb_reset_network_header(skb);
841 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
842 
843 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
844 
845 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
846 	if (unlikely(err)) {
847 		if (log_ecn_err)
848 			net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
849 					     &ipv6h->saddr,
850 					     ipv6_get_dsfield(ipv6h));
851 		if (err > 1) {
852 			DEV_STATS_INC(tunnel->dev, rx_frame_errors);
853 			DEV_STATS_INC(tunnel->dev, rx_errors);
854 			goto drop;
855 		}
856 	}
857 
858 	dev_sw_netstats_rx_add(tunnel->dev, skb->len);
859 
860 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
861 
862 	if (tun_dst)
863 		skb_dst_set(skb, (struct dst_entry *)tun_dst);
864 
865 	gro_cells_receive(&tunnel->gro_cells, skb);
866 	return 0;
867 
868 drop:
869 	if (tun_dst)
870 		dst_release((struct dst_entry *)tun_dst);
871 	kfree_skb(skb);
872 	return 0;
873 }
874 
875 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
876 		const struct tnl_ptk_info *tpi,
877 		struct metadata_dst *tun_dst,
878 		bool log_ecn_err)
879 {
880 	int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
881 				    const struct ipv6hdr *ipv6h,
882 				    struct sk_buff *skb);
883 
884 	dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
885 	if (tpi->proto == htons(ETH_P_IP))
886 		dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
887 
888 	return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
889 			     log_ecn_err);
890 }
891 EXPORT_SYMBOL(ip6_tnl_rcv);
892 
893 static const struct tnl_ptk_info tpi_v6 = {
894 	/* no tunnel info required for ipxip6. */
895 	.proto = htons(ETH_P_IPV6),
896 };
897 
898 static const struct tnl_ptk_info tpi_v4 = {
899 	/* no tunnel info required for ipxip6. */
900 	.proto = htons(ETH_P_IP),
901 };
902 
903 static const struct tnl_ptk_info tpi_mpls = {
904 	/* no tunnel info required for mplsip6. */
905 	.proto = htons(ETH_P_MPLS_UC),
906 };
907 
908 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
909 		      const struct tnl_ptk_info *tpi,
910 		      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
911 						  const struct ipv6hdr *ipv6h,
912 						  struct sk_buff *skb))
913 {
914 	struct ip6_tnl *t;
915 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
916 	struct metadata_dst *tun_dst = NULL;
917 	int ret = -1;
918 
919 	rcu_read_lock();
920 	t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->saddr, &ipv6h->daddr);
921 
922 	if (t) {
923 		u8 tproto = READ_ONCE(t->parms.proto);
924 
925 		if (tproto != ipproto && tproto != 0)
926 			goto drop;
927 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
928 			goto drop;
929 		ipv6h = ipv6_hdr(skb);
930 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
931 			goto drop;
932 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
933 			goto drop;
934 		if (t->parms.collect_md) {
935 			tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
936 			if (!tun_dst)
937 				goto drop;
938 		}
939 		ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
940 				    log_ecn_error);
941 	}
942 
943 	rcu_read_unlock();
944 
945 	return ret;
946 
947 drop:
948 	rcu_read_unlock();
949 	kfree_skb(skb);
950 	return 0;
951 }
952 
953 static int ip4ip6_rcv(struct sk_buff *skb)
954 {
955 	return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
956 			  ip4ip6_dscp_ecn_decapsulate);
957 }
958 
959 static int ip6ip6_rcv(struct sk_buff *skb)
960 {
961 	return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
962 			  ip6ip6_dscp_ecn_decapsulate);
963 }
964 
965 static int mplsip6_rcv(struct sk_buff *skb)
966 {
967 	return ipxip6_rcv(skb, IPPROTO_MPLS, &tpi_mpls,
968 			  mplsip6_dscp_ecn_decapsulate);
969 }
970 
971 struct ipv6_tel_txoption {
972 	struct ipv6_txoptions ops;
973 	__u8 dst_opt[8];
974 };
975 
976 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
977 {
978 	memset(opt, 0, sizeof(struct ipv6_tel_txoption));
979 
980 	opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
981 	opt->dst_opt[3] = 1;
982 	opt->dst_opt[4] = encap_limit;
983 	opt->dst_opt[5] = IPV6_TLV_PADN;
984 	opt->dst_opt[6] = 1;
985 
986 	opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
987 	opt->ops.opt_nflen = 8;
988 }
989 
990 /**
991  * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
992  *   @t: the outgoing tunnel device
993  *   @hdr: IPv6 header from the incoming packet
994  *
995  * Description:
996  *   Avoid trivial tunneling loop by checking that tunnel exit-point
997  *   doesn't match source of incoming packet.
998  *
999  * Return:
1000  *   1 if conflict,
1001  *   0 else
1002  **/
1003 
1004 static inline bool
1005 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
1006 {
1007 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
1008 }
1009 
1010 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
1011 		     const struct in6_addr *laddr,
1012 		     const struct in6_addr *raddr)
1013 {
1014 	struct __ip6_tnl_parm *p = &t->parms;
1015 	int ret = 0;
1016 	struct net *net = t->net;
1017 
1018 	if (t->parms.collect_md)
1019 		return 1;
1020 
1021 	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
1022 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
1023 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
1024 		struct net_device *ldev = NULL;
1025 
1026 		rcu_read_lock();
1027 		if (p->link)
1028 			ldev = dev_get_by_index_rcu(net, p->link);
1029 
1030 		if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
1031 						      0, IFA_F_TENTATIVE)))
1032 			pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
1033 					    p->name);
1034 		else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
1035 			 !ipv6_addr_is_multicast(raddr) &&
1036 			 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
1037 							  true, 0, IFA_F_TENTATIVE)))
1038 			pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
1039 					    p->name);
1040 		else
1041 			ret = 1;
1042 		rcu_read_unlock();
1043 	}
1044 	return ret;
1045 }
1046 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1047 
1048 /**
1049  * ip6_tnl_xmit - encapsulate packet and send
1050  *   @skb: the outgoing socket buffer
1051  *   @dev: the outgoing tunnel device
1052  *   @dsfield: dscp code for outer header
1053  *   @fl6: flow of tunneled packet
1054  *   @encap_limit: encapsulation limit
1055  *   @pmtu: Path MTU is stored if packet is too big
1056  *   @proto: next header value
1057  *
1058  * Description:
1059  *   Build new header and do some sanity checks on the packet before sending
1060  *   it.
1061  *
1062  * Return:
1063  *   0 on success
1064  *   -1 fail
1065  *   %-EMSGSIZE message too big. return mtu in this case.
1066  **/
1067 
1068 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1069 		 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1070 		 __u8 proto)
1071 {
1072 	struct ip6_tnl *t = netdev_priv(dev);
1073 	struct net *net = t->net;
1074 	struct ipv6hdr *ipv6h;
1075 	struct ipv6_tel_txoption opt;
1076 	struct dst_entry *dst = NULL, *ndst = NULL;
1077 	struct net_device *tdev;
1078 	int mtu;
1079 	unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1080 	unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1081 	unsigned int max_headroom = psh_hlen;
1082 	__be16 payload_protocol;
1083 	bool use_cache = false;
1084 	u8 hop_limit;
1085 	int err = -1;
1086 
1087 	payload_protocol = skb_protocol(skb, true);
1088 
1089 	if (t->parms.collect_md) {
1090 		hop_limit = skb_tunnel_info(skb)->key.ttl;
1091 		goto route_lookup;
1092 	} else {
1093 		hop_limit = t->parms.hop_limit;
1094 	}
1095 
1096 	/* NBMA tunnel */
1097 	if (ipv6_addr_any(&t->parms.raddr)) {
1098 		if (payload_protocol == htons(ETH_P_IPV6)) {
1099 			struct in6_addr *addr6;
1100 			struct neighbour *neigh;
1101 			int addr_type;
1102 
1103 			if (!skb_dst(skb))
1104 				goto tx_err_link_failure;
1105 
1106 			neigh = dst_neigh_lookup(skb_dst(skb),
1107 						 &ipv6_hdr(skb)->daddr);
1108 			if (!neigh)
1109 				goto tx_err_link_failure;
1110 
1111 			addr6 = (struct in6_addr *)&neigh->primary_key;
1112 			addr_type = ipv6_addr_type(addr6);
1113 
1114 			if (addr_type == IPV6_ADDR_ANY)
1115 				addr6 = &ipv6_hdr(skb)->daddr;
1116 
1117 			memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1118 			neigh_release(neigh);
1119 		} else if (payload_protocol == htons(ETH_P_IP)) {
1120 			const struct rtable *rt = skb_rtable(skb);
1121 
1122 			if (!rt)
1123 				goto tx_err_link_failure;
1124 
1125 			if (rt->rt_gw_family == AF_INET6)
1126 				memcpy(&fl6->daddr, &rt->rt_gw6, sizeof(fl6->daddr));
1127 		}
1128 	} else if (t->parms.proto != 0 && !(t->parms.flags &
1129 					    (IP6_TNL_F_USE_ORIG_TCLASS |
1130 					     IP6_TNL_F_USE_ORIG_FWMARK))) {
1131 		/* enable the cache only if neither the outer protocol nor the
1132 		 * routing decision depends on the current inner header value
1133 		 */
1134 		use_cache = true;
1135 	}
1136 
1137 	if (use_cache)
1138 		dst = dst_cache_get(&t->dst_cache);
1139 
1140 	if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1141 		goto tx_err_link_failure;
1142 
1143 	if (!dst) {
1144 route_lookup:
1145 		/* add dsfield to flowlabel for route lookup */
1146 		fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1147 
1148 		dst = ip6_route_output(net, NULL, fl6);
1149 
1150 		if (dst->error)
1151 			goto tx_err_link_failure;
1152 		dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1153 		if (IS_ERR(dst)) {
1154 			err = PTR_ERR(dst);
1155 			dst = NULL;
1156 			goto tx_err_link_failure;
1157 		}
1158 		if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) &&
1159 		    ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1160 				       &fl6->daddr, 0, &fl6->saddr))
1161 			goto tx_err_link_failure;
1162 		ndst = dst;
1163 	}
1164 
1165 	tdev = dst->dev;
1166 
1167 	if (tdev == dev) {
1168 		DEV_STATS_INC(dev, collisions);
1169 		net_warn_ratelimited("%s: Local routing loop detected!\n",
1170 				     t->parms.name);
1171 		goto tx_err_dst_release;
1172 	}
1173 	mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1174 	if (encap_limit >= 0) {
1175 		max_headroom += 8;
1176 		mtu -= 8;
1177 	}
1178 	mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
1179 		       IPV6_MIN_MTU : IPV4_MIN_MTU);
1180 
1181 	skb_dst_update_pmtu_no_confirm(skb, mtu);
1182 	if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1183 		*pmtu = mtu;
1184 		err = -EMSGSIZE;
1185 		goto tx_err_dst_release;
1186 	}
1187 
1188 	if (t->err_count > 0) {
1189 		if (time_before(jiffies,
1190 				t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1191 			t->err_count--;
1192 
1193 			dst_link_failure(skb);
1194 		} else {
1195 			t->err_count = 0;
1196 		}
1197 	}
1198 
1199 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1200 
1201 	/*
1202 	 * Okay, now see if we can stuff it in the buffer as-is.
1203 	 */
1204 	max_headroom += LL_RESERVED_SPACE(tdev);
1205 
1206 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1207 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1208 		struct sk_buff *new_skb;
1209 
1210 		new_skb = skb_realloc_headroom(skb, max_headroom);
1211 		if (!new_skb)
1212 			goto tx_err_dst_release;
1213 
1214 		if (skb->sk)
1215 			skb_set_owner_w(new_skb, skb->sk);
1216 		consume_skb(skb);
1217 		skb = new_skb;
1218 	}
1219 
1220 	if (t->parms.collect_md) {
1221 		if (t->encap.type != TUNNEL_ENCAP_NONE)
1222 			goto tx_err_dst_release;
1223 	} else {
1224 		if (use_cache && ndst)
1225 			dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1226 	}
1227 	skb_dst_set(skb, dst);
1228 
1229 	if (hop_limit == 0) {
1230 		if (payload_protocol == htons(ETH_P_IP))
1231 			hop_limit = ip_hdr(skb)->ttl;
1232 		else if (payload_protocol == htons(ETH_P_IPV6))
1233 			hop_limit = ipv6_hdr(skb)->hop_limit;
1234 		else
1235 			hop_limit = ip6_dst_hoplimit(dst);
1236 	}
1237 
1238 	/* Calculate max headroom for all the headers and adjust
1239 	 * needed_headroom if necessary.
1240 	 */
1241 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1242 			+ dst->header_len + t->hlen;
1243 	if (max_headroom > dev->needed_headroom)
1244 		dev->needed_headroom = max_headroom;
1245 
1246 	err = ip6_tnl_encap(skb, t, &proto, fl6);
1247 	if (err)
1248 		return err;
1249 
1250 	if (encap_limit >= 0) {
1251 		init_tel_txopt(&opt, encap_limit);
1252 		ipv6_push_frag_opts(skb, &opt.ops, &proto);
1253 	}
1254 
1255 	skb_push(skb, sizeof(struct ipv6hdr));
1256 	skb_reset_network_header(skb);
1257 	ipv6h = ipv6_hdr(skb);
1258 	ip6_flow_hdr(ipv6h, dsfield,
1259 		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1260 	ipv6h->hop_limit = hop_limit;
1261 	ipv6h->nexthdr = proto;
1262 	ipv6h->saddr = fl6->saddr;
1263 	ipv6h->daddr = fl6->daddr;
1264 	ip6tunnel_xmit(NULL, skb, dev);
1265 	return 0;
1266 tx_err_link_failure:
1267 	DEV_STATS_INC(dev, tx_carrier_errors);
1268 	dst_link_failure(skb);
1269 tx_err_dst_release:
1270 	dst_release(dst);
1271 	return err;
1272 }
1273 EXPORT_SYMBOL(ip6_tnl_xmit);
1274 
1275 static inline int
1276 ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
1277 		u8 protocol)
1278 {
1279 	struct ip6_tnl *t = netdev_priv(dev);
1280 	struct ipv6hdr *ipv6h;
1281 	const struct iphdr  *iph;
1282 	int encap_limit = -1;
1283 	__u16 offset;
1284 	struct flowi6 fl6;
1285 	__u8 dsfield, orig_dsfield;
1286 	__u32 mtu;
1287 	u8 tproto;
1288 	int err;
1289 
1290 	tproto = READ_ONCE(t->parms.proto);
1291 	if (tproto != protocol && tproto != 0)
1292 		return -1;
1293 
1294 	if (t->parms.collect_md) {
1295 		struct ip_tunnel_info *tun_info;
1296 		const struct ip_tunnel_key *key;
1297 
1298 		tun_info = skb_tunnel_info(skb);
1299 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1300 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1301 			return -1;
1302 		key = &tun_info->key;
1303 		memset(&fl6, 0, sizeof(fl6));
1304 		fl6.flowi6_proto = protocol;
1305 		fl6.saddr = key->u.ipv6.src;
1306 		fl6.daddr = key->u.ipv6.dst;
1307 		fl6.flowlabel = key->label;
1308 		dsfield =  key->tos;
1309 		switch (protocol) {
1310 		case IPPROTO_IPIP:
1311 			iph = ip_hdr(skb);
1312 			orig_dsfield = ipv4_get_dsfield(iph);
1313 			break;
1314 		case IPPROTO_IPV6:
1315 			ipv6h = ipv6_hdr(skb);
1316 			orig_dsfield = ipv6_get_dsfield(ipv6h);
1317 			break;
1318 		default:
1319 			orig_dsfield = dsfield;
1320 			break;
1321 		}
1322 	} else {
1323 		if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1324 			encap_limit = t->parms.encap_limit;
1325 		if (protocol == IPPROTO_IPV6) {
1326 			offset = ip6_tnl_parse_tlv_enc_lim(skb,
1327 						skb_network_header(skb));
1328 			/* ip6_tnl_parse_tlv_enc_lim() might have
1329 			 * reallocated skb->head
1330 			 */
1331 			if (offset > 0) {
1332 				struct ipv6_tlv_tnl_enc_lim *tel;
1333 
1334 				tel = (void *)&skb_network_header(skb)[offset];
1335 				if (tel->encap_limit == 0) {
1336 					icmpv6_ndo_send(skb, ICMPV6_PARAMPROB,
1337 							ICMPV6_HDR_FIELD, offset + 2);
1338 					return -1;
1339 				}
1340 				encap_limit = tel->encap_limit - 1;
1341 			}
1342 		}
1343 
1344 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1345 		fl6.flowi6_proto = protocol;
1346 
1347 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1348 			fl6.flowi6_mark = skb->mark;
1349 		else
1350 			fl6.flowi6_mark = t->parms.fwmark;
1351 		switch (protocol) {
1352 		case IPPROTO_IPIP:
1353 			iph = ip_hdr(skb);
1354 			orig_dsfield = ipv4_get_dsfield(iph);
1355 			if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1356 				dsfield = orig_dsfield;
1357 			else
1358 				dsfield = ip6_tclass(t->parms.flowinfo);
1359 			break;
1360 		case IPPROTO_IPV6:
1361 			ipv6h = ipv6_hdr(skb);
1362 			orig_dsfield = ipv6_get_dsfield(ipv6h);
1363 			if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1364 				dsfield = orig_dsfield;
1365 			else
1366 				dsfield = ip6_tclass(t->parms.flowinfo);
1367 			if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1368 				fl6.flowlabel |= ip6_flowlabel(ipv6h);
1369 			break;
1370 		default:
1371 			orig_dsfield = dsfield = ip6_tclass(t->parms.flowinfo);
1372 			break;
1373 		}
1374 	}
1375 
1376 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1377 	dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield);
1378 
1379 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1380 		return -1;
1381 
1382 	skb_set_inner_ipproto(skb, protocol);
1383 
1384 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1385 			   protocol);
1386 	if (err != 0) {
1387 		/* XXX: send ICMP error even if DF is not set. */
1388 		if (err == -EMSGSIZE)
1389 			switch (protocol) {
1390 			case IPPROTO_IPIP:
1391 				icmp_ndo_send(skb, ICMP_DEST_UNREACH,
1392 					      ICMP_FRAG_NEEDED, htonl(mtu));
1393 				break;
1394 			case IPPROTO_IPV6:
1395 				icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1396 				break;
1397 			default:
1398 				break;
1399 			}
1400 		return -1;
1401 	}
1402 
1403 	return 0;
1404 }
1405 
1406 static netdev_tx_t
1407 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1408 {
1409 	struct ip6_tnl *t = netdev_priv(dev);
1410 	u8 ipproto;
1411 	int ret;
1412 
1413 	if (!pskb_inet_may_pull(skb))
1414 		goto tx_err;
1415 
1416 	switch (skb->protocol) {
1417 	case htons(ETH_P_IP):
1418 		ipproto = IPPROTO_IPIP;
1419 		break;
1420 	case htons(ETH_P_IPV6):
1421 		if (ip6_tnl_addr_conflict(t, ipv6_hdr(skb)))
1422 			goto tx_err;
1423 		ipproto = IPPROTO_IPV6;
1424 		break;
1425 	case htons(ETH_P_MPLS_UC):
1426 		ipproto = IPPROTO_MPLS;
1427 		break;
1428 	default:
1429 		goto tx_err;
1430 	}
1431 
1432 	ret = ipxip6_tnl_xmit(skb, dev, ipproto);
1433 	if (ret < 0)
1434 		goto tx_err;
1435 
1436 	return NETDEV_TX_OK;
1437 
1438 tx_err:
1439 	DEV_STATS_INC(dev, tx_errors);
1440 	DEV_STATS_INC(dev, tx_dropped);
1441 	kfree_skb(skb);
1442 	return NETDEV_TX_OK;
1443 }
1444 
1445 static void ip6_tnl_link_config(struct ip6_tnl *t)
1446 {
1447 	struct net_device *dev = t->dev;
1448 	struct net_device *tdev = NULL;
1449 	struct __ip6_tnl_parm *p = &t->parms;
1450 	struct flowi6 *fl6 = &t->fl.u.ip6;
1451 	int t_hlen;
1452 	int mtu;
1453 
1454 	__dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr));
1455 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1456 
1457 	/* Set up flowi template */
1458 	fl6->saddr = p->laddr;
1459 	fl6->daddr = p->raddr;
1460 	fl6->flowi6_oif = p->link;
1461 	fl6->flowlabel = 0;
1462 
1463 	if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1464 		fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1465 	if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1466 		fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1467 
1468 	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1469 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1470 
1471 	if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1472 		dev->flags |= IFF_POINTOPOINT;
1473 	else
1474 		dev->flags &= ~IFF_POINTOPOINT;
1475 
1476 	t->tun_hlen = 0;
1477 	t->hlen = t->encap_hlen + t->tun_hlen;
1478 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1479 
1480 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
1481 		int strict = (ipv6_addr_type(&p->raddr) &
1482 			      (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1483 
1484 		struct rt6_info *rt = rt6_lookup(t->net,
1485 						 &p->raddr, &p->laddr,
1486 						 p->link, NULL, strict);
1487 		if (rt) {
1488 			tdev = rt->dst.dev;
1489 			ip6_rt_put(rt);
1490 		}
1491 
1492 		if (!tdev && p->link)
1493 			tdev = __dev_get_by_index(t->net, p->link);
1494 
1495 		if (tdev) {
1496 			dev->hard_header_len = tdev->hard_header_len + t_hlen;
1497 			mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
1498 
1499 			mtu = mtu - t_hlen;
1500 			if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1501 				mtu -= 8;
1502 
1503 			if (mtu < IPV6_MIN_MTU)
1504 				mtu = IPV6_MIN_MTU;
1505 			WRITE_ONCE(dev->mtu, mtu);
1506 		}
1507 	}
1508 }
1509 
1510 /**
1511  * ip6_tnl_change - update the tunnel parameters
1512  *   @t: tunnel to be changed
1513  *   @p: tunnel configuration parameters
1514  *
1515  * Description:
1516  *   ip6_tnl_change() updates the tunnel parameters
1517  **/
1518 
1519 static void
1520 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1521 {
1522 	t->parms.laddr = p->laddr;
1523 	t->parms.raddr = p->raddr;
1524 	t->parms.flags = p->flags;
1525 	t->parms.hop_limit = p->hop_limit;
1526 	t->parms.encap_limit = p->encap_limit;
1527 	t->parms.flowinfo = p->flowinfo;
1528 	t->parms.link = p->link;
1529 	t->parms.proto = p->proto;
1530 	t->parms.fwmark = p->fwmark;
1531 	dst_cache_reset(&t->dst_cache);
1532 	ip6_tnl_link_config(t);
1533 }
1534 
1535 static void ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1536 {
1537 	struct net *net = t->net;
1538 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1539 
1540 	ip6_tnl_unlink(ip6n, t);
1541 	synchronize_net();
1542 	ip6_tnl_change(t, p);
1543 	ip6_tnl_link(ip6n, t);
1544 	netdev_state_change(t->dev);
1545 }
1546 
1547 static void ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1548 {
1549 	/* for default tnl0 device allow to change only the proto */
1550 	t->parms.proto = p->proto;
1551 	netdev_state_change(t->dev);
1552 }
1553 
1554 static void
1555 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1556 {
1557 	p->laddr = u->laddr;
1558 	p->raddr = u->raddr;
1559 	p->flags = u->flags;
1560 	p->hop_limit = u->hop_limit;
1561 	p->encap_limit = u->encap_limit;
1562 	p->flowinfo = u->flowinfo;
1563 	p->link = u->link;
1564 	p->proto = u->proto;
1565 	memcpy(p->name, u->name, sizeof(u->name));
1566 }
1567 
1568 static void
1569 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1570 {
1571 	u->laddr = p->laddr;
1572 	u->raddr = p->raddr;
1573 	u->flags = p->flags;
1574 	u->hop_limit = p->hop_limit;
1575 	u->encap_limit = p->encap_limit;
1576 	u->flowinfo = p->flowinfo;
1577 	u->link = p->link;
1578 	u->proto = p->proto;
1579 	memcpy(u->name, p->name, sizeof(u->name));
1580 }
1581 
1582 /**
1583  * ip6_tnl_siocdevprivate - configure ipv6 tunnels from userspace
1584  *   @dev: virtual device associated with tunnel
1585  *   @ifr: unused
1586  *   @data: parameters passed from userspace
1587  *   @cmd: command to be performed
1588  *
1589  * Description:
1590  *   ip6_tnl_ioctl() is used for managing IPv6 tunnels
1591  *   from userspace.
1592  *
1593  *   The possible commands are the following:
1594  *     %SIOCGETTUNNEL: get tunnel parameters for device
1595  *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1596  *     %SIOCCHGTUNNEL: change tunnel parameters to those given
1597  *     %SIOCDELTUNNEL: delete tunnel
1598  *
1599  *   The fallback device "ip6tnl0", created during module
1600  *   initialization, can be used for creating other tunnel devices.
1601  *
1602  * Return:
1603  *   0 on success,
1604  *   %-EFAULT if unable to copy data to or from userspace,
1605  *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
1606  *   %-EINVAL if passed tunnel parameters are invalid,
1607  *   %-EEXIST if changing a tunnel's parameters would cause a conflict
1608  *   %-ENODEV if attempting to change or delete a nonexisting device
1609  **/
1610 
1611 static int
1612 ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
1613 		       void __user *data, int cmd)
1614 {
1615 	int err = 0;
1616 	struct ip6_tnl_parm p;
1617 	struct __ip6_tnl_parm p1;
1618 	struct ip6_tnl *t = netdev_priv(dev);
1619 	struct net *net = t->net;
1620 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1621 
1622 	memset(&p1, 0, sizeof(p1));
1623 
1624 	switch (cmd) {
1625 	case SIOCGETTUNNEL:
1626 		if (dev == ip6n->fb_tnl_dev) {
1627 			if (copy_from_user(&p, data, sizeof(p))) {
1628 				err = -EFAULT;
1629 				break;
1630 			}
1631 			ip6_tnl_parm_from_user(&p1, &p);
1632 			t = ip6_tnl_locate(net, &p1, 0);
1633 			if (IS_ERR(t))
1634 				t = netdev_priv(dev);
1635 		} else {
1636 			memset(&p, 0, sizeof(p));
1637 		}
1638 		ip6_tnl_parm_to_user(&p, &t->parms);
1639 		if (copy_to_user(data, &p, sizeof(p)))
1640 			err = -EFAULT;
1641 		break;
1642 	case SIOCADDTUNNEL:
1643 	case SIOCCHGTUNNEL:
1644 		err = -EPERM;
1645 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1646 			break;
1647 		err = -EFAULT;
1648 		if (copy_from_user(&p, data, sizeof(p)))
1649 			break;
1650 		err = -EINVAL;
1651 		if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1652 		    p.proto != 0)
1653 			break;
1654 		ip6_tnl_parm_from_user(&p1, &p);
1655 		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1656 		if (cmd == SIOCCHGTUNNEL) {
1657 			if (!IS_ERR(t)) {
1658 				if (t->dev != dev) {
1659 					err = -EEXIST;
1660 					break;
1661 				}
1662 			} else
1663 				t = netdev_priv(dev);
1664 			if (dev == ip6n->fb_tnl_dev)
1665 				ip6_tnl0_update(t, &p1);
1666 			else
1667 				ip6_tnl_update(t, &p1);
1668 		}
1669 		if (!IS_ERR(t)) {
1670 			err = 0;
1671 			ip6_tnl_parm_to_user(&p, &t->parms);
1672 			if (copy_to_user(data, &p, sizeof(p)))
1673 				err = -EFAULT;
1674 
1675 		} else {
1676 			err = PTR_ERR(t);
1677 		}
1678 		break;
1679 	case SIOCDELTUNNEL:
1680 		err = -EPERM;
1681 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1682 			break;
1683 
1684 		if (dev == ip6n->fb_tnl_dev) {
1685 			err = -EFAULT;
1686 			if (copy_from_user(&p, data, sizeof(p)))
1687 				break;
1688 			err = -ENOENT;
1689 			ip6_tnl_parm_from_user(&p1, &p);
1690 			t = ip6_tnl_locate(net, &p1, 0);
1691 			if (IS_ERR(t))
1692 				break;
1693 			err = -EPERM;
1694 			if (t->dev == ip6n->fb_tnl_dev)
1695 				break;
1696 			dev = t->dev;
1697 		}
1698 		err = 0;
1699 		unregister_netdevice(dev);
1700 		break;
1701 	default:
1702 		err = -EINVAL;
1703 	}
1704 	return err;
1705 }
1706 
1707 /**
1708  * ip6_tnl_change_mtu - change mtu manually for tunnel device
1709  *   @dev: virtual device associated with tunnel
1710  *   @new_mtu: the new mtu
1711  *
1712  * Return:
1713  *   0 on success,
1714  *   %-EINVAL if mtu too small
1715  **/
1716 
1717 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1718 {
1719 	struct ip6_tnl *tnl = netdev_priv(dev);
1720 
1721 	if (tnl->parms.proto == IPPROTO_IPV6) {
1722 		if (new_mtu < IPV6_MIN_MTU)
1723 			return -EINVAL;
1724 	} else {
1725 		if (new_mtu < ETH_MIN_MTU)
1726 			return -EINVAL;
1727 	}
1728 	if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1729 		if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1730 			return -EINVAL;
1731 	} else {
1732 		if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1733 			return -EINVAL;
1734 	}
1735 	dev->mtu = new_mtu;
1736 	return 0;
1737 }
1738 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1739 
1740 int ip6_tnl_get_iflink(const struct net_device *dev)
1741 {
1742 	struct ip6_tnl *t = netdev_priv(dev);
1743 
1744 	return t->parms.link;
1745 }
1746 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1747 
1748 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1749 			  unsigned int num)
1750 {
1751 	if (num >= MAX_IPTUN_ENCAP_OPS)
1752 		return -ERANGE;
1753 
1754 	return !cmpxchg((const struct ip6_tnl_encap_ops **)
1755 			&ip6tun_encaps[num],
1756 			NULL, ops) ? 0 : -1;
1757 }
1758 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1759 
1760 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1761 			  unsigned int num)
1762 {
1763 	int ret;
1764 
1765 	if (num >= MAX_IPTUN_ENCAP_OPS)
1766 		return -ERANGE;
1767 
1768 	ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1769 		       &ip6tun_encaps[num],
1770 		       ops, NULL) == ops) ? 0 : -1;
1771 
1772 	synchronize_net();
1773 
1774 	return ret;
1775 }
1776 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1777 
1778 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1779 			struct ip_tunnel_encap *ipencap)
1780 {
1781 	int hlen;
1782 
1783 	memset(&t->encap, 0, sizeof(t->encap));
1784 
1785 	hlen = ip6_encap_hlen(ipencap);
1786 	if (hlen < 0)
1787 		return hlen;
1788 
1789 	t->encap.type = ipencap->type;
1790 	t->encap.sport = ipencap->sport;
1791 	t->encap.dport = ipencap->dport;
1792 	t->encap.flags = ipencap->flags;
1793 
1794 	t->encap_hlen = hlen;
1795 	t->hlen = t->encap_hlen + t->tun_hlen;
1796 
1797 	return 0;
1798 }
1799 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1800 
1801 static const struct net_device_ops ip6_tnl_netdev_ops = {
1802 	.ndo_init	= ip6_tnl_dev_init,
1803 	.ndo_uninit	= ip6_tnl_dev_uninit,
1804 	.ndo_start_xmit = ip6_tnl_start_xmit,
1805 	.ndo_siocdevprivate = ip6_tnl_siocdevprivate,
1806 	.ndo_change_mtu = ip6_tnl_change_mtu,
1807 	.ndo_get_stats64 = dev_get_tstats64,
1808 	.ndo_get_iflink = ip6_tnl_get_iflink,
1809 };
1810 
1811 #define IPXIPX_FEATURES (NETIF_F_SG |		\
1812 			 NETIF_F_FRAGLIST |	\
1813 			 NETIF_F_HIGHDMA |	\
1814 			 NETIF_F_GSO_SOFTWARE |	\
1815 			 NETIF_F_HW_CSUM)
1816 
1817 /**
1818  * ip6_tnl_dev_setup - setup virtual tunnel device
1819  *   @dev: virtual device associated with tunnel
1820  *
1821  * Description:
1822  *   Initialize function pointers and device parameters
1823  **/
1824 
1825 static void ip6_tnl_dev_setup(struct net_device *dev)
1826 {
1827 	dev->netdev_ops = &ip6_tnl_netdev_ops;
1828 	dev->header_ops = &ip_tunnel_header_ops;
1829 	dev->needs_free_netdev = true;
1830 	dev->priv_destructor = ip6_dev_free;
1831 
1832 	dev->type = ARPHRD_TUNNEL6;
1833 	dev->flags |= IFF_NOARP;
1834 	dev->addr_len = sizeof(struct in6_addr);
1835 	dev->features |= NETIF_F_LLTX;
1836 	netif_keep_dst(dev);
1837 
1838 	dev->features		|= IPXIPX_FEATURES;
1839 	dev->hw_features	|= IPXIPX_FEATURES;
1840 
1841 	/* This perm addr will be used as interface identifier by IPv6 */
1842 	dev->addr_assign_type = NET_ADDR_RANDOM;
1843 	eth_random_addr(dev->perm_addr);
1844 }
1845 
1846 
1847 /**
1848  * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1849  *   @dev: virtual device associated with tunnel
1850  **/
1851 
1852 static inline int
1853 ip6_tnl_dev_init_gen(struct net_device *dev)
1854 {
1855 	struct ip6_tnl *t = netdev_priv(dev);
1856 	int ret;
1857 	int t_hlen;
1858 
1859 	t->dev = dev;
1860 	t->net = dev_net(dev);
1861 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1862 	if (!dev->tstats)
1863 		return -ENOMEM;
1864 
1865 	ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1866 	if (ret)
1867 		goto free_stats;
1868 
1869 	ret = gro_cells_init(&t->gro_cells, dev);
1870 	if (ret)
1871 		goto destroy_dst;
1872 
1873 	t->tun_hlen = 0;
1874 	t->hlen = t->encap_hlen + t->tun_hlen;
1875 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1876 
1877 	dev->type = ARPHRD_TUNNEL6;
1878 	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1879 	dev->mtu = ETH_DATA_LEN - t_hlen;
1880 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1881 		dev->mtu -= 8;
1882 	dev->min_mtu = ETH_MIN_MTU;
1883 	dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1884 
1885 	netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
1886 	return 0;
1887 
1888 destroy_dst:
1889 	dst_cache_destroy(&t->dst_cache);
1890 free_stats:
1891 	free_percpu(dev->tstats);
1892 	dev->tstats = NULL;
1893 
1894 	return ret;
1895 }
1896 
1897 /**
1898  * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1899  *   @dev: virtual device associated with tunnel
1900  **/
1901 
1902 static int ip6_tnl_dev_init(struct net_device *dev)
1903 {
1904 	struct ip6_tnl *t = netdev_priv(dev);
1905 	int err = ip6_tnl_dev_init_gen(dev);
1906 
1907 	if (err)
1908 		return err;
1909 	ip6_tnl_link_config(t);
1910 	if (t->parms.collect_md)
1911 		netif_keep_dst(dev);
1912 	return 0;
1913 }
1914 
1915 /**
1916  * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1917  *   @dev: fallback device
1918  *
1919  * Return: 0
1920  **/
1921 
1922 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1923 {
1924 	struct ip6_tnl *t = netdev_priv(dev);
1925 	struct net *net = dev_net(dev);
1926 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1927 
1928 	t->parms.proto = IPPROTO_IPV6;
1929 
1930 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
1931 	return 0;
1932 }
1933 
1934 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1935 			    struct netlink_ext_ack *extack)
1936 {
1937 	u8 proto;
1938 
1939 	if (!data || !data[IFLA_IPTUN_PROTO])
1940 		return 0;
1941 
1942 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1943 	if (proto != IPPROTO_IPV6 &&
1944 	    proto != IPPROTO_IPIP &&
1945 	    proto != 0)
1946 		return -EINVAL;
1947 
1948 	return 0;
1949 }
1950 
1951 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1952 				  struct __ip6_tnl_parm *parms)
1953 {
1954 	memset(parms, 0, sizeof(*parms));
1955 
1956 	if (!data)
1957 		return;
1958 
1959 	if (data[IFLA_IPTUN_LINK])
1960 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1961 
1962 	if (data[IFLA_IPTUN_LOCAL])
1963 		parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1964 
1965 	if (data[IFLA_IPTUN_REMOTE])
1966 		parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1967 
1968 	if (data[IFLA_IPTUN_TTL])
1969 		parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1970 
1971 	if (data[IFLA_IPTUN_ENCAP_LIMIT])
1972 		parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1973 
1974 	if (data[IFLA_IPTUN_FLOWINFO])
1975 		parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1976 
1977 	if (data[IFLA_IPTUN_FLAGS])
1978 		parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1979 
1980 	if (data[IFLA_IPTUN_PROTO])
1981 		parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1982 
1983 	if (data[IFLA_IPTUN_COLLECT_METADATA])
1984 		parms->collect_md = true;
1985 
1986 	if (data[IFLA_IPTUN_FWMARK])
1987 		parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
1988 }
1989 
1990 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1991 			   struct nlattr *tb[], struct nlattr *data[],
1992 			   struct netlink_ext_ack *extack)
1993 {
1994 	struct net *net = dev_net(dev);
1995 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1996 	struct ip_tunnel_encap ipencap;
1997 	struct ip6_tnl *nt, *t;
1998 	int err;
1999 
2000 	nt = netdev_priv(dev);
2001 
2002 	if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
2003 		err = ip6_tnl_encap_setup(nt, &ipencap);
2004 		if (err < 0)
2005 			return err;
2006 	}
2007 
2008 	ip6_tnl_netlink_parms(data, &nt->parms);
2009 
2010 	if (nt->parms.collect_md) {
2011 		if (rtnl_dereference(ip6n->collect_md_tun))
2012 			return -EEXIST;
2013 	} else {
2014 		t = ip6_tnl_locate(net, &nt->parms, 0);
2015 		if (!IS_ERR(t))
2016 			return -EEXIST;
2017 	}
2018 
2019 	err = ip6_tnl_create2(dev);
2020 	if (!err && tb[IFLA_MTU])
2021 		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2022 
2023 	return err;
2024 }
2025 
2026 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2027 			      struct nlattr *data[],
2028 			      struct netlink_ext_ack *extack)
2029 {
2030 	struct ip6_tnl *t = netdev_priv(dev);
2031 	struct __ip6_tnl_parm p;
2032 	struct net *net = t->net;
2033 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2034 	struct ip_tunnel_encap ipencap;
2035 
2036 	if (dev == ip6n->fb_tnl_dev)
2037 		return -EINVAL;
2038 
2039 	if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
2040 		int err = ip6_tnl_encap_setup(t, &ipencap);
2041 
2042 		if (err < 0)
2043 			return err;
2044 	}
2045 	ip6_tnl_netlink_parms(data, &p);
2046 	if (p.collect_md)
2047 		return -EINVAL;
2048 
2049 	t = ip6_tnl_locate(net, &p, 0);
2050 	if (!IS_ERR(t)) {
2051 		if (t->dev != dev)
2052 			return -EEXIST;
2053 	} else
2054 		t = netdev_priv(dev);
2055 
2056 	ip6_tnl_update(t, &p);
2057 	return 0;
2058 }
2059 
2060 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2061 {
2062 	struct net *net = dev_net(dev);
2063 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2064 
2065 	if (dev != ip6n->fb_tnl_dev)
2066 		unregister_netdevice_queue(dev, head);
2067 }
2068 
2069 static size_t ip6_tnl_get_size(const struct net_device *dev)
2070 {
2071 	return
2072 		/* IFLA_IPTUN_LINK */
2073 		nla_total_size(4) +
2074 		/* IFLA_IPTUN_LOCAL */
2075 		nla_total_size(sizeof(struct in6_addr)) +
2076 		/* IFLA_IPTUN_REMOTE */
2077 		nla_total_size(sizeof(struct in6_addr)) +
2078 		/* IFLA_IPTUN_TTL */
2079 		nla_total_size(1) +
2080 		/* IFLA_IPTUN_ENCAP_LIMIT */
2081 		nla_total_size(1) +
2082 		/* IFLA_IPTUN_FLOWINFO */
2083 		nla_total_size(4) +
2084 		/* IFLA_IPTUN_FLAGS */
2085 		nla_total_size(4) +
2086 		/* IFLA_IPTUN_PROTO */
2087 		nla_total_size(1) +
2088 		/* IFLA_IPTUN_ENCAP_TYPE */
2089 		nla_total_size(2) +
2090 		/* IFLA_IPTUN_ENCAP_FLAGS */
2091 		nla_total_size(2) +
2092 		/* IFLA_IPTUN_ENCAP_SPORT */
2093 		nla_total_size(2) +
2094 		/* IFLA_IPTUN_ENCAP_DPORT */
2095 		nla_total_size(2) +
2096 		/* IFLA_IPTUN_COLLECT_METADATA */
2097 		nla_total_size(0) +
2098 		/* IFLA_IPTUN_FWMARK */
2099 		nla_total_size(4) +
2100 		0;
2101 }
2102 
2103 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2104 {
2105 	struct ip6_tnl *tunnel = netdev_priv(dev);
2106 	struct __ip6_tnl_parm *parm = &tunnel->parms;
2107 
2108 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2109 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2110 	    nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2111 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2112 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2113 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2114 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2115 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2116 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2117 		goto nla_put_failure;
2118 
2119 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2120 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2121 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2122 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2123 		goto nla_put_failure;
2124 
2125 	if (parm->collect_md)
2126 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2127 			goto nla_put_failure;
2128 
2129 	return 0;
2130 
2131 nla_put_failure:
2132 	return -EMSGSIZE;
2133 }
2134 
2135 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2136 {
2137 	struct ip6_tnl *tunnel = netdev_priv(dev);
2138 
2139 	return tunnel->net;
2140 }
2141 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2142 
2143 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2144 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
2145 	[IFLA_IPTUN_LOCAL]		= { .len = sizeof(struct in6_addr) },
2146 	[IFLA_IPTUN_REMOTE]		= { .len = sizeof(struct in6_addr) },
2147 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
2148 	[IFLA_IPTUN_ENCAP_LIMIT]	= { .type = NLA_U8 },
2149 	[IFLA_IPTUN_FLOWINFO]		= { .type = NLA_U32 },
2150 	[IFLA_IPTUN_FLAGS]		= { .type = NLA_U32 },
2151 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
2152 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
2153 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
2154 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
2155 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
2156 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
2157 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
2158 };
2159 
2160 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2161 	.kind		= "ip6tnl",
2162 	.maxtype	= IFLA_IPTUN_MAX,
2163 	.policy		= ip6_tnl_policy,
2164 	.priv_size	= sizeof(struct ip6_tnl),
2165 	.setup		= ip6_tnl_dev_setup,
2166 	.validate	= ip6_tnl_validate,
2167 	.newlink	= ip6_tnl_newlink,
2168 	.changelink	= ip6_tnl_changelink,
2169 	.dellink	= ip6_tnl_dellink,
2170 	.get_size	= ip6_tnl_get_size,
2171 	.fill_info	= ip6_tnl_fill_info,
2172 	.get_link_net	= ip6_tnl_get_link_net,
2173 };
2174 
2175 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2176 	.handler	= ip4ip6_rcv,
2177 	.err_handler	= ip4ip6_err,
2178 	.priority	=	1,
2179 };
2180 
2181 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2182 	.handler	= ip6ip6_rcv,
2183 	.err_handler	= ip6ip6_err,
2184 	.priority	=	1,
2185 };
2186 
2187 static struct xfrm6_tunnel mplsip6_handler __read_mostly = {
2188 	.handler	= mplsip6_rcv,
2189 	.err_handler	= mplsip6_err,
2190 	.priority	=	1,
2191 };
2192 
2193 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
2194 {
2195 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2196 	struct net_device *dev, *aux;
2197 	int h;
2198 	struct ip6_tnl *t;
2199 
2200 	for_each_netdev_safe(net, dev, aux)
2201 		if (dev->rtnl_link_ops == &ip6_link_ops)
2202 			unregister_netdevice_queue(dev, list);
2203 
2204 	for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2205 		t = rtnl_dereference(ip6n->tnls_r_l[h]);
2206 		while (t) {
2207 			/* If dev is in the same netns, it has already
2208 			 * been added to the list by the previous loop.
2209 			 */
2210 			if (!net_eq(dev_net(t->dev), net))
2211 				unregister_netdevice_queue(t->dev, list);
2212 			t = rtnl_dereference(t->next);
2213 		}
2214 	}
2215 
2216 	t = rtnl_dereference(ip6n->tnls_wc[0]);
2217 	while (t) {
2218 		/* If dev is in the same netns, it has already
2219 		 * been added to the list by the previous loop.
2220 		 */
2221 		if (!net_eq(dev_net(t->dev), net))
2222 			unregister_netdevice_queue(t->dev, list);
2223 		t = rtnl_dereference(t->next);
2224 	}
2225 }
2226 
2227 static int __net_init ip6_tnl_init_net(struct net *net)
2228 {
2229 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2230 	struct ip6_tnl *t = NULL;
2231 	int err;
2232 
2233 	ip6n->tnls[0] = ip6n->tnls_wc;
2234 	ip6n->tnls[1] = ip6n->tnls_r_l;
2235 
2236 	if (!net_has_fallback_tunnels(net))
2237 		return 0;
2238 	err = -ENOMEM;
2239 	ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2240 					NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2241 
2242 	if (!ip6n->fb_tnl_dev)
2243 		goto err_alloc_dev;
2244 	dev_net_set(ip6n->fb_tnl_dev, net);
2245 	ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2246 	/* FB netdevice is special: we have one, and only one per netns.
2247 	 * Allowing to move it to another netns is clearly unsafe.
2248 	 */
2249 	ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2250 
2251 	err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2252 	if (err < 0)
2253 		goto err_register;
2254 
2255 	err = register_netdev(ip6n->fb_tnl_dev);
2256 	if (err < 0)
2257 		goto err_register;
2258 
2259 	t = netdev_priv(ip6n->fb_tnl_dev);
2260 
2261 	strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2262 	return 0;
2263 
2264 err_register:
2265 	free_netdev(ip6n->fb_tnl_dev);
2266 err_alloc_dev:
2267 	return err;
2268 }
2269 
2270 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
2271 {
2272 	struct net *net;
2273 	LIST_HEAD(list);
2274 
2275 	rtnl_lock();
2276 	list_for_each_entry(net, net_list, exit_list)
2277 		ip6_tnl_destroy_tunnels(net, &list);
2278 	unregister_netdevice_many(&list);
2279 	rtnl_unlock();
2280 }
2281 
2282 static struct pernet_operations ip6_tnl_net_ops = {
2283 	.init = ip6_tnl_init_net,
2284 	.exit_batch = ip6_tnl_exit_batch_net,
2285 	.id   = &ip6_tnl_net_id,
2286 	.size = sizeof(struct ip6_tnl_net),
2287 };
2288 
2289 /**
2290  * ip6_tunnel_init - register protocol and reserve needed resources
2291  *
2292  * Return: 0 on success
2293  **/
2294 
2295 static int __init ip6_tunnel_init(void)
2296 {
2297 	int  err;
2298 
2299 	if (!ipv6_mod_enabled())
2300 		return -EOPNOTSUPP;
2301 
2302 	err = register_pernet_device(&ip6_tnl_net_ops);
2303 	if (err < 0)
2304 		goto out_pernet;
2305 
2306 	err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2307 	if (err < 0) {
2308 		pr_err("%s: can't register ip4ip6\n", __func__);
2309 		goto out_ip4ip6;
2310 	}
2311 
2312 	err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2313 	if (err < 0) {
2314 		pr_err("%s: can't register ip6ip6\n", __func__);
2315 		goto out_ip6ip6;
2316 	}
2317 
2318 	if (ip6_tnl_mpls_supported()) {
2319 		err = xfrm6_tunnel_register(&mplsip6_handler, AF_MPLS);
2320 		if (err < 0) {
2321 			pr_err("%s: can't register mplsip6\n", __func__);
2322 			goto out_mplsip6;
2323 		}
2324 	}
2325 
2326 	err = rtnl_link_register(&ip6_link_ops);
2327 	if (err < 0)
2328 		goto rtnl_link_failed;
2329 
2330 	return 0;
2331 
2332 rtnl_link_failed:
2333 	if (ip6_tnl_mpls_supported())
2334 		xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS);
2335 out_mplsip6:
2336 	xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2337 out_ip6ip6:
2338 	xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2339 out_ip4ip6:
2340 	unregister_pernet_device(&ip6_tnl_net_ops);
2341 out_pernet:
2342 	return err;
2343 }
2344 
2345 /**
2346  * ip6_tunnel_cleanup - free resources and unregister protocol
2347  **/
2348 
2349 static void __exit ip6_tunnel_cleanup(void)
2350 {
2351 	rtnl_link_unregister(&ip6_link_ops);
2352 	if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2353 		pr_info("%s: can't deregister ip4ip6\n", __func__);
2354 
2355 	if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2356 		pr_info("%s: can't deregister ip6ip6\n", __func__);
2357 
2358 	if (ip6_tnl_mpls_supported() &&
2359 	    xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS))
2360 		pr_info("%s: can't deregister mplsip6\n", __func__);
2361 	unregister_pernet_device(&ip6_tnl_net_ops);
2362 }
2363 
2364 module_init(ip6_tunnel_init);
2365 module_exit(ip6_tunnel_cleanup);
2366