xref: /openbmc/linux/net/ipv6/sit.c (revision 803f6914)
1 /*
2  *	IPv6 over IPv4 tunnel device - Simple Internet Transition (SIT)
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
8  *
9  *	This program is free software; you can redistribute it and/or
10  *      modify it under the terms of the GNU General Public License
11  *      as published by the Free Software Foundation; either version
12  *      2 of the License, or (at your option) any later version.
13  *
14  *	Changes:
15  * Roger Venning <r.venning@telstra.com>:	6to4 support
16  * Nate Thompson <nate@thebog.net>:		6to4 support
17  * Fred Templin <fred.l.templin@boeing.com>:	isatap support
18  */
19 
20 #include <linux/module.h>
21 #include <linux/capability.h>
22 #include <linux/errno.h>
23 #include <linux/types.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/net.h>
27 #include <linux/in6.h>
28 #include <linux/netdevice.h>
29 #include <linux/if_arp.h>
30 #include <linux/icmp.h>
31 #include <linux/slab.h>
32 #include <asm/uaccess.h>
33 #include <linux/init.h>
34 #include <linux/netfilter_ipv4.h>
35 #include <linux/if_ether.h>
36 
37 #include <net/sock.h>
38 #include <net/snmp.h>
39 
40 #include <net/ipv6.h>
41 #include <net/protocol.h>
42 #include <net/transp_v6.h>
43 #include <net/ip6_fib.h>
44 #include <net/ip6_route.h>
45 #include <net/ndisc.h>
46 #include <net/addrconf.h>
47 #include <net/ip.h>
48 #include <net/udp.h>
49 #include <net/icmp.h>
50 #include <net/ipip.h>
51 #include <net/inet_ecn.h>
52 #include <net/xfrm.h>
53 #include <net/dsfield.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 
57 /*
58    This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
59 
60    For comments look at net/ipv4/ip_gre.c --ANK
61  */
62 
63 #define HASH_SIZE  16
64 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
65 
66 static int ipip6_tunnel_init(struct net_device *dev);
67 static void ipip6_tunnel_setup(struct net_device *dev);
68 static void ipip6_dev_free(struct net_device *dev);
69 
70 static int sit_net_id __read_mostly;
71 struct sit_net {
72 	struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
73 	struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
74 	struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
75 	struct ip_tunnel __rcu *tunnels_wc[1];
76 	struct ip_tunnel __rcu **tunnels[4];
77 
78 	struct net_device *fb_tunnel_dev;
79 };
80 
81 /*
82  * Locking : hash tables are protected by RCU and RTNL
83  */
84 
85 #define for_each_ip_tunnel_rcu(start) \
86 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
87 
88 /* often modified stats are per cpu, other are shared (netdev->stats) */
89 struct pcpu_tstats {
90 	unsigned long	rx_packets;
91 	unsigned long	rx_bytes;
92 	unsigned long	tx_packets;
93 	unsigned long	tx_bytes;
94 } __attribute__((aligned(4*sizeof(unsigned long))));
95 
96 static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
97 {
98 	struct pcpu_tstats sum = { 0 };
99 	int i;
100 
101 	for_each_possible_cpu(i) {
102 		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
103 
104 		sum.rx_packets += tstats->rx_packets;
105 		sum.rx_bytes   += tstats->rx_bytes;
106 		sum.tx_packets += tstats->tx_packets;
107 		sum.tx_bytes   += tstats->tx_bytes;
108 	}
109 	dev->stats.rx_packets = sum.rx_packets;
110 	dev->stats.rx_bytes   = sum.rx_bytes;
111 	dev->stats.tx_packets = sum.tx_packets;
112 	dev->stats.tx_bytes   = sum.tx_bytes;
113 	return &dev->stats;
114 }
115 /*
116  * Must be invoked with rcu_read_lock
117  */
118 static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
119 		struct net_device *dev, __be32 remote, __be32 local)
120 {
121 	unsigned int h0 = HASH(remote);
122 	unsigned int h1 = HASH(local);
123 	struct ip_tunnel *t;
124 	struct sit_net *sitn = net_generic(net, sit_net_id);
125 
126 	for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) {
127 		if (local == t->parms.iph.saddr &&
128 		    remote == t->parms.iph.daddr &&
129 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
130 		    (t->dev->flags & IFF_UP))
131 			return t;
132 	}
133 	for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) {
134 		if (remote == t->parms.iph.daddr &&
135 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
136 		    (t->dev->flags & IFF_UP))
137 			return t;
138 	}
139 	for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) {
140 		if (local == t->parms.iph.saddr &&
141 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
142 		    (t->dev->flags & IFF_UP))
143 			return t;
144 	}
145 	t = rcu_dereference(sitn->tunnels_wc[0]);
146 	if ((t != NULL) && (t->dev->flags & IFF_UP))
147 		return t;
148 	return NULL;
149 }
150 
151 static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn,
152 		struct ip_tunnel_parm *parms)
153 {
154 	__be32 remote = parms->iph.daddr;
155 	__be32 local = parms->iph.saddr;
156 	unsigned int h = 0;
157 	int prio = 0;
158 
159 	if (remote) {
160 		prio |= 2;
161 		h ^= HASH(remote);
162 	}
163 	if (local) {
164 		prio |= 1;
165 		h ^= HASH(local);
166 	}
167 	return &sitn->tunnels[prio][h];
168 }
169 
170 static inline struct ip_tunnel __rcu **ipip6_bucket(struct sit_net *sitn,
171 		struct ip_tunnel *t)
172 {
173 	return __ipip6_bucket(sitn, &t->parms);
174 }
175 
176 static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
177 {
178 	struct ip_tunnel __rcu **tp;
179 	struct ip_tunnel *iter;
180 
181 	for (tp = ipip6_bucket(sitn, t);
182 	     (iter = rtnl_dereference(*tp)) != NULL;
183 	     tp = &iter->next) {
184 		if (t == iter) {
185 			rcu_assign_pointer(*tp, t->next);
186 			break;
187 		}
188 	}
189 }
190 
191 static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
192 {
193 	struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
194 
195 	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
196 	rcu_assign_pointer(*tp, t);
197 }
198 
199 static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
200 {
201 #ifdef CONFIG_IPV6_SIT_6RD
202 	struct ip_tunnel *t = netdev_priv(dev);
203 
204 	if (t->dev == sitn->fb_tunnel_dev) {
205 		ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
206 		t->ip6rd.relay_prefix = 0;
207 		t->ip6rd.prefixlen = 16;
208 		t->ip6rd.relay_prefixlen = 0;
209 	} else {
210 		struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev);
211 		memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd));
212 	}
213 #endif
214 }
215 
216 static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
217 		struct ip_tunnel_parm *parms, int create)
218 {
219 	__be32 remote = parms->iph.daddr;
220 	__be32 local = parms->iph.saddr;
221 	struct ip_tunnel *t, *nt;
222 	struct ip_tunnel __rcu **tp;
223 	struct net_device *dev;
224 	char name[IFNAMSIZ];
225 	struct sit_net *sitn = net_generic(net, sit_net_id);
226 
227 	for (tp = __ipip6_bucket(sitn, parms);
228 	    (t = rtnl_dereference(*tp)) != NULL;
229 	     tp = &t->next) {
230 		if (local == t->parms.iph.saddr &&
231 		    remote == t->parms.iph.daddr &&
232 		    parms->link == t->parms.link) {
233 			if (create)
234 				return NULL;
235 			else
236 				return t;
237 		}
238 	}
239 	if (!create)
240 		goto failed;
241 
242 	if (parms->name[0])
243 		strlcpy(name, parms->name, IFNAMSIZ);
244 	else
245 		strcpy(name, "sit%d");
246 
247 	dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup);
248 	if (dev == NULL)
249 		return NULL;
250 
251 	dev_net_set(dev, net);
252 
253 	nt = netdev_priv(dev);
254 
255 	nt->parms = *parms;
256 	if (ipip6_tunnel_init(dev) < 0)
257 		goto failed_free;
258 	ipip6_tunnel_clone_6rd(dev, sitn);
259 
260 	if (parms->i_flags & SIT_ISATAP)
261 		dev->priv_flags |= IFF_ISATAP;
262 
263 	if (register_netdevice(dev) < 0)
264 		goto failed_free;
265 
266 	strcpy(nt->parms.name, dev->name);
267 
268 	dev_hold(dev);
269 
270 	ipip6_tunnel_link(sitn, nt);
271 	return nt;
272 
273 failed_free:
274 	ipip6_dev_free(dev);
275 failed:
276 	return NULL;
277 }
278 
279 #define for_each_prl_rcu(start)			\
280 	for (prl = rcu_dereference(start);	\
281 	     prl;				\
282 	     prl = rcu_dereference(prl->next))
283 
284 static struct ip_tunnel_prl_entry *
285 __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
286 {
287 	struct ip_tunnel_prl_entry *prl;
288 
289 	for_each_prl_rcu(t->prl)
290 		if (prl->addr == addr)
291 			break;
292 	return prl;
293 
294 }
295 
296 static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
297 				struct ip_tunnel_prl __user *a)
298 {
299 	struct ip_tunnel_prl kprl, *kp;
300 	struct ip_tunnel_prl_entry *prl;
301 	unsigned int cmax, c = 0, ca, len;
302 	int ret = 0;
303 
304 	if (copy_from_user(&kprl, a, sizeof(kprl)))
305 		return -EFAULT;
306 	cmax = kprl.datalen / sizeof(kprl);
307 	if (cmax > 1 && kprl.addr != htonl(INADDR_ANY))
308 		cmax = 1;
309 
310 	/* For simple GET or for root users,
311 	 * we try harder to allocate.
312 	 */
313 	kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
314 		kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
315 		NULL;
316 
317 	rcu_read_lock();
318 
319 	ca = t->prl_count < cmax ? t->prl_count : cmax;
320 
321 	if (!kp) {
322 		/* We don't try hard to allocate much memory for
323 		 * non-root users.
324 		 * For root users, retry allocating enough memory for
325 		 * the answer.
326 		 */
327 		kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC);
328 		if (!kp) {
329 			ret = -ENOMEM;
330 			goto out;
331 		}
332 	}
333 
334 	c = 0;
335 	for_each_prl_rcu(t->prl) {
336 		if (c >= cmax)
337 			break;
338 		if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
339 			continue;
340 		kp[c].addr = prl->addr;
341 		kp[c].flags = prl->flags;
342 		c++;
343 		if (kprl.addr != htonl(INADDR_ANY))
344 			break;
345 	}
346 out:
347 	rcu_read_unlock();
348 
349 	len = sizeof(*kp) * c;
350 	ret = 0;
351 	if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen))
352 		ret = -EFAULT;
353 
354 	kfree(kp);
355 
356 	return ret;
357 }
358 
359 static int
360 ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
361 {
362 	struct ip_tunnel_prl_entry *p;
363 	int err = 0;
364 
365 	if (a->addr == htonl(INADDR_ANY))
366 		return -EINVAL;
367 
368 	ASSERT_RTNL();
369 
370 	for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) {
371 		if (p->addr == a->addr) {
372 			if (chg) {
373 				p->flags = a->flags;
374 				goto out;
375 			}
376 			err = -EEXIST;
377 			goto out;
378 		}
379 	}
380 
381 	if (chg) {
382 		err = -ENXIO;
383 		goto out;
384 	}
385 
386 	p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL);
387 	if (!p) {
388 		err = -ENOBUFS;
389 		goto out;
390 	}
391 
392 	p->next = t->prl;
393 	p->addr = a->addr;
394 	p->flags = a->flags;
395 	t->prl_count++;
396 	rcu_assign_pointer(t->prl, p);
397 out:
398 	return err;
399 }
400 
401 static void prl_list_destroy_rcu(struct rcu_head *head)
402 {
403 	struct ip_tunnel_prl_entry *p, *n;
404 
405 	p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
406 	do {
407 		n = rcu_dereference_protected(p->next, 1);
408 		kfree(p);
409 		p = n;
410 	} while (p);
411 }
412 
413 static int
414 ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
415 {
416 	struct ip_tunnel_prl_entry *x;
417 	struct ip_tunnel_prl_entry __rcu **p;
418 	int err = 0;
419 
420 	ASSERT_RTNL();
421 
422 	if (a && a->addr != htonl(INADDR_ANY)) {
423 		for (p = &t->prl;
424 		     (x = rtnl_dereference(*p)) != NULL;
425 		     p = &x->next) {
426 			if (x->addr == a->addr) {
427 				*p = x->next;
428 				kfree_rcu(x, rcu_head);
429 				t->prl_count--;
430 				goto out;
431 			}
432 		}
433 		err = -ENXIO;
434 	} else {
435 		x = rtnl_dereference(t->prl);
436 		if (x) {
437 			t->prl_count = 0;
438 			call_rcu(&x->rcu_head, prl_list_destroy_rcu);
439 			t->prl = NULL;
440 		}
441 	}
442 out:
443 	return err;
444 }
445 
446 static int
447 isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
448 {
449 	struct ip_tunnel_prl_entry *p;
450 	int ok = 1;
451 
452 	rcu_read_lock();
453 	p = __ipip6_tunnel_locate_prl(t, iph->saddr);
454 	if (p) {
455 		if (p->flags & PRL_DEFAULT)
456 			skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT;
457 		else
458 			skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
459 	} else {
460 		const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
461 
462 		if (ipv6_addr_is_isatap(addr6) &&
463 		    (addr6->s6_addr32[3] == iph->saddr) &&
464 		    ipv6_chk_prefix(addr6, t->dev))
465 			skb->ndisc_nodetype = NDISC_NODETYPE_HOST;
466 		else
467 			ok = 0;
468 	}
469 	rcu_read_unlock();
470 	return ok;
471 }
472 
473 static void ipip6_tunnel_uninit(struct net_device *dev)
474 {
475 	struct net *net = dev_net(dev);
476 	struct sit_net *sitn = net_generic(net, sit_net_id);
477 
478 	if (dev == sitn->fb_tunnel_dev) {
479 		RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL);
480 	} else {
481 		ipip6_tunnel_unlink(sitn, netdev_priv(dev));
482 		ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
483 	}
484 	dev_put(dev);
485 }
486 
487 
488 static int ipip6_err(struct sk_buff *skb, u32 info)
489 {
490 
491 /* All the routers (except for Linux) return only
492    8 bytes of packet payload. It means, that precise relaying of
493    ICMP in the real Internet is absolutely infeasible.
494  */
495 	const struct iphdr *iph = (const struct iphdr *)skb->data;
496 	const int type = icmp_hdr(skb)->type;
497 	const int code = icmp_hdr(skb)->code;
498 	struct ip_tunnel *t;
499 	int err;
500 
501 	switch (type) {
502 	default:
503 	case ICMP_PARAMETERPROB:
504 		return 0;
505 
506 	case ICMP_DEST_UNREACH:
507 		switch (code) {
508 		case ICMP_SR_FAILED:
509 		case ICMP_PORT_UNREACH:
510 			/* Impossible event. */
511 			return 0;
512 		case ICMP_FRAG_NEEDED:
513 			/* Soft state for pmtu is maintained by IP core. */
514 			return 0;
515 		default:
516 			/* All others are translated to HOST_UNREACH.
517 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
518 			   I believe they are just ether pollution. --ANK
519 			 */
520 			break;
521 		}
522 		break;
523 	case ICMP_TIME_EXCEEDED:
524 		if (code != ICMP_EXC_TTL)
525 			return 0;
526 		break;
527 	}
528 
529 	err = -ENOENT;
530 
531 	rcu_read_lock();
532 	t = ipip6_tunnel_lookup(dev_net(skb->dev),
533 				skb->dev,
534 				iph->daddr,
535 				iph->saddr);
536 	if (t == NULL || t->parms.iph.daddr == 0)
537 		goto out;
538 
539 	err = 0;
540 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
541 		goto out;
542 
543 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
544 		t->err_count++;
545 	else
546 		t->err_count = 1;
547 	t->err_time = jiffies;
548 out:
549 	rcu_read_unlock();
550 	return err;
551 }
552 
553 static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
554 {
555 	if (INET_ECN_is_ce(iph->tos))
556 		IP6_ECN_set_ce(ipv6_hdr(skb));
557 }
558 
559 static int ipip6_rcv(struct sk_buff *skb)
560 {
561 	const struct iphdr *iph;
562 	struct ip_tunnel *tunnel;
563 
564 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
565 		goto out;
566 
567 	iph = ip_hdr(skb);
568 
569 	rcu_read_lock();
570 	tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
571 				     iph->saddr, iph->daddr);
572 	if (tunnel != NULL) {
573 		struct pcpu_tstats *tstats;
574 
575 		secpath_reset(skb);
576 		skb->mac_header = skb->network_header;
577 		skb_reset_network_header(skb);
578 		IPCB(skb)->flags = 0;
579 		skb->protocol = htons(ETH_P_IPV6);
580 		skb->pkt_type = PACKET_HOST;
581 
582 		if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
583 		    !isatap_chksrc(skb, iph, tunnel)) {
584 			tunnel->dev->stats.rx_errors++;
585 			rcu_read_unlock();
586 			kfree_skb(skb);
587 			return 0;
588 		}
589 
590 		tstats = this_cpu_ptr(tunnel->dev->tstats);
591 		tstats->rx_packets++;
592 		tstats->rx_bytes += skb->len;
593 
594 		__skb_tunnel_rx(skb, tunnel->dev);
595 
596 		ipip6_ecn_decapsulate(iph, skb);
597 
598 		netif_rx(skb);
599 
600 		rcu_read_unlock();
601 		return 0;
602 	}
603 
604 	/* no tunnel matched,  let upstream know, ipsec may handle it */
605 	rcu_read_unlock();
606 	return 1;
607 out:
608 	kfree_skb(skb);
609 	return 0;
610 }
611 
612 /*
613  * Returns the embedded IPv4 address if the IPv6 address
614  * comes from 6rd / 6to4 (RFC 3056) addr space.
615  */
616 static inline
617 __be32 try_6rd(const struct in6_addr *v6dst, struct ip_tunnel *tunnel)
618 {
619 	__be32 dst = 0;
620 
621 #ifdef CONFIG_IPV6_SIT_6RD
622 	if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
623 			      tunnel->ip6rd.prefixlen)) {
624 		unsigned int pbw0, pbi0;
625 		int pbi1;
626 		u32 d;
627 
628 		pbw0 = tunnel->ip6rd.prefixlen >> 5;
629 		pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
630 
631 		d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
632 		    tunnel->ip6rd.relay_prefixlen;
633 
634 		pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
635 		if (pbi1 > 0)
636 			d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >>
637 			     (32 - pbi1);
638 
639 		dst = tunnel->ip6rd.relay_prefix | htonl(d);
640 	}
641 #else
642 	if (v6dst->s6_addr16[0] == htons(0x2002)) {
643 		/* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
644 		memcpy(&dst, &v6dst->s6_addr16[1], 4);
645 	}
646 #endif
647 	return dst;
648 }
649 
650 /*
651  *	This function assumes it is being called from dev_queue_xmit()
652  *	and that skb is filled properly by that function.
653  */
654 
655 static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
656 				     struct net_device *dev)
657 {
658 	struct ip_tunnel *tunnel = netdev_priv(dev);
659 	struct pcpu_tstats *tstats;
660 	const struct iphdr  *tiph = &tunnel->parms.iph;
661 	const struct ipv6hdr *iph6 = ipv6_hdr(skb);
662 	u8     tos = tunnel->parms.iph.tos;
663 	__be16 df = tiph->frag_off;
664 	struct rtable *rt;     			/* Route to the other host */
665 	struct net_device *tdev;		/* Device to other host */
666 	struct iphdr  *iph;			/* Our new IP header */
667 	unsigned int max_headroom;		/* The extra header space needed */
668 	__be32 dst = tiph->daddr;
669 	struct flowi4 fl4;
670 	int    mtu;
671 	const struct in6_addr *addr6;
672 	int addr_type;
673 
674 	if (skb->protocol != htons(ETH_P_IPV6))
675 		goto tx_error;
676 
677 	if (tos == 1)
678 		tos = ipv6_get_dsfield(iph6);
679 
680 	/* ISATAP (RFC4214) - must come before 6to4 */
681 	if (dev->priv_flags & IFF_ISATAP) {
682 		struct neighbour *neigh = NULL;
683 		bool do_tx_error = false;
684 
685 		if (skb_dst(skb))
686 			neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
687 
688 		if (neigh == NULL) {
689 			if (net_ratelimit())
690 				printk(KERN_DEBUG "sit: nexthop == NULL\n");
691 			goto tx_error;
692 		}
693 
694 		addr6 = (const struct in6_addr*)&neigh->primary_key;
695 		addr_type = ipv6_addr_type(addr6);
696 
697 		if ((addr_type & IPV6_ADDR_UNICAST) &&
698 		     ipv6_addr_is_isatap(addr6))
699 			dst = addr6->s6_addr32[3];
700 		else
701 			do_tx_error = true;
702 
703 		neigh_release(neigh);
704 		if (do_tx_error)
705 			goto tx_error;
706 	}
707 
708 	if (!dst)
709 		dst = try_6rd(&iph6->daddr, tunnel);
710 
711 	if (!dst) {
712 		struct neighbour *neigh = NULL;
713 		bool do_tx_error = false;
714 
715 		if (skb_dst(skb))
716 			neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
717 
718 		if (neigh == NULL) {
719 			if (net_ratelimit())
720 				printk(KERN_DEBUG "sit: nexthop == NULL\n");
721 			goto tx_error;
722 		}
723 
724 		addr6 = (const struct in6_addr*)&neigh->primary_key;
725 		addr_type = ipv6_addr_type(addr6);
726 
727 		if (addr_type == IPV6_ADDR_ANY) {
728 			addr6 = &ipv6_hdr(skb)->daddr;
729 			addr_type = ipv6_addr_type(addr6);
730 		}
731 
732 		if ((addr_type & IPV6_ADDR_COMPATv4) != 0)
733 			dst = addr6->s6_addr32[3];
734 		else
735 			do_tx_error = true;
736 
737 		neigh_release(neigh);
738 		if (do_tx_error)
739 			goto tx_error;
740 	}
741 
742 	rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
743 				   dst, tiph->saddr,
744 				   0, 0,
745 				   IPPROTO_IPV6, RT_TOS(tos),
746 				   tunnel->parms.link);
747 	if (IS_ERR(rt)) {
748 		dev->stats.tx_carrier_errors++;
749 		goto tx_error_icmp;
750 	}
751 	if (rt->rt_type != RTN_UNICAST) {
752 		ip_rt_put(rt);
753 		dev->stats.tx_carrier_errors++;
754 		goto tx_error_icmp;
755 	}
756 	tdev = rt->dst.dev;
757 
758 	if (tdev == dev) {
759 		ip_rt_put(rt);
760 		dev->stats.collisions++;
761 		goto tx_error;
762 	}
763 
764 	if (df) {
765 		mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
766 
767 		if (mtu < 68) {
768 			dev->stats.collisions++;
769 			ip_rt_put(rt);
770 			goto tx_error;
771 		}
772 
773 		if (mtu < IPV6_MIN_MTU) {
774 			mtu = IPV6_MIN_MTU;
775 			df = 0;
776 		}
777 
778 		if (tunnel->parms.iph.daddr && skb_dst(skb))
779 			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
780 
781 		if (skb->len > mtu) {
782 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
783 			ip_rt_put(rt);
784 			goto tx_error;
785 		}
786 	}
787 
788 	if (tunnel->err_count > 0) {
789 		if (time_before(jiffies,
790 				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
791 			tunnel->err_count--;
792 			dst_link_failure(skb);
793 		} else
794 			tunnel->err_count = 0;
795 	}
796 
797 	/*
798 	 * Okay, now see if we can stuff it in the buffer as-is.
799 	 */
800 	max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr);
801 
802 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
803 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
804 		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
805 		if (!new_skb) {
806 			ip_rt_put(rt);
807 			dev->stats.tx_dropped++;
808 			dev_kfree_skb(skb);
809 			return NETDEV_TX_OK;
810 		}
811 		if (skb->sk)
812 			skb_set_owner_w(new_skb, skb->sk);
813 		dev_kfree_skb(skb);
814 		skb = new_skb;
815 		iph6 = ipv6_hdr(skb);
816 	}
817 
818 	skb->transport_header = skb->network_header;
819 	skb_push(skb, sizeof(struct iphdr));
820 	skb_reset_network_header(skb);
821 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
822 	IPCB(skb)->flags = 0;
823 	skb_dst_drop(skb);
824 	skb_dst_set(skb, &rt->dst);
825 
826 	/*
827 	 *	Push down and install the IPIP header.
828 	 */
829 
830 	iph 			=	ip_hdr(skb);
831 	iph->version		=	4;
832 	iph->ihl		=	sizeof(struct iphdr)>>2;
833 	iph->frag_off		=	df;
834 	iph->protocol		=	IPPROTO_IPV6;
835 	iph->tos		=	INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
836 	iph->daddr		=	fl4.daddr;
837 	iph->saddr		=	fl4.saddr;
838 
839 	if ((iph->ttl = tiph->ttl) == 0)
840 		iph->ttl	=	iph6->hop_limit;
841 
842 	nf_reset(skb);
843 	tstats = this_cpu_ptr(dev->tstats);
844 	__IPTUNNEL_XMIT(tstats, &dev->stats);
845 	return NETDEV_TX_OK;
846 
847 tx_error_icmp:
848 	dst_link_failure(skb);
849 tx_error:
850 	dev->stats.tx_errors++;
851 	dev_kfree_skb(skb);
852 	return NETDEV_TX_OK;
853 }
854 
855 static void ipip6_tunnel_bind_dev(struct net_device *dev)
856 {
857 	struct net_device *tdev = NULL;
858 	struct ip_tunnel *tunnel;
859 	const struct iphdr *iph;
860 	struct flowi4 fl4;
861 
862 	tunnel = netdev_priv(dev);
863 	iph = &tunnel->parms.iph;
864 
865 	if (iph->daddr) {
866 		struct rtable *rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
867 							  iph->daddr, iph->saddr,
868 							  0, 0,
869 							  IPPROTO_IPV6,
870 							  RT_TOS(iph->tos),
871 							  tunnel->parms.link);
872 
873 		if (!IS_ERR(rt)) {
874 			tdev = rt->dst.dev;
875 			ip_rt_put(rt);
876 		}
877 		dev->flags |= IFF_POINTOPOINT;
878 	}
879 
880 	if (!tdev && tunnel->parms.link)
881 		tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
882 
883 	if (tdev) {
884 		dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
885 		dev->mtu = tdev->mtu - sizeof(struct iphdr);
886 		if (dev->mtu < IPV6_MIN_MTU)
887 			dev->mtu = IPV6_MIN_MTU;
888 	}
889 	dev->iflink = tunnel->parms.link;
890 }
891 
892 static int
893 ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
894 {
895 	int err = 0;
896 	struct ip_tunnel_parm p;
897 	struct ip_tunnel_prl prl;
898 	struct ip_tunnel *t;
899 	struct net *net = dev_net(dev);
900 	struct sit_net *sitn = net_generic(net, sit_net_id);
901 #ifdef CONFIG_IPV6_SIT_6RD
902 	struct ip_tunnel_6rd ip6rd;
903 #endif
904 
905 	switch (cmd) {
906 	case SIOCGETTUNNEL:
907 #ifdef CONFIG_IPV6_SIT_6RD
908 	case SIOCGET6RD:
909 #endif
910 		t = NULL;
911 		if (dev == sitn->fb_tunnel_dev) {
912 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
913 				err = -EFAULT;
914 				break;
915 			}
916 			t = ipip6_tunnel_locate(net, &p, 0);
917 		}
918 		if (t == NULL)
919 			t = netdev_priv(dev);
920 
921 		err = -EFAULT;
922 		if (cmd == SIOCGETTUNNEL) {
923 			memcpy(&p, &t->parms, sizeof(p));
924 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p,
925 					 sizeof(p)))
926 				goto done;
927 #ifdef CONFIG_IPV6_SIT_6RD
928 		} else {
929 			ip6rd.prefix = t->ip6rd.prefix;
930 			ip6rd.relay_prefix = t->ip6rd.relay_prefix;
931 			ip6rd.prefixlen = t->ip6rd.prefixlen;
932 			ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
933 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd,
934 					 sizeof(ip6rd)))
935 				goto done;
936 #endif
937 		}
938 		err = 0;
939 		break;
940 
941 	case SIOCADDTUNNEL:
942 	case SIOCCHGTUNNEL:
943 		err = -EPERM;
944 		if (!capable(CAP_NET_ADMIN))
945 			goto done;
946 
947 		err = -EFAULT;
948 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
949 			goto done;
950 
951 		err = -EINVAL;
952 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 ||
953 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
954 			goto done;
955 		if (p.iph.ttl)
956 			p.iph.frag_off |= htons(IP_DF);
957 
958 		t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
959 
960 		if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
961 			if (t != NULL) {
962 				if (t->dev != dev) {
963 					err = -EEXIST;
964 					break;
965 				}
966 			} else {
967 				if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
968 				    (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
969 					err = -EINVAL;
970 					break;
971 				}
972 				t = netdev_priv(dev);
973 				ipip6_tunnel_unlink(sitn, t);
974 				synchronize_net();
975 				t->parms.iph.saddr = p.iph.saddr;
976 				t->parms.iph.daddr = p.iph.daddr;
977 				memcpy(dev->dev_addr, &p.iph.saddr, 4);
978 				memcpy(dev->broadcast, &p.iph.daddr, 4);
979 				ipip6_tunnel_link(sitn, t);
980 				netdev_state_change(dev);
981 			}
982 		}
983 
984 		if (t) {
985 			err = 0;
986 			if (cmd == SIOCCHGTUNNEL) {
987 				t->parms.iph.ttl = p.iph.ttl;
988 				t->parms.iph.tos = p.iph.tos;
989 				if (t->parms.link != p.link) {
990 					t->parms.link = p.link;
991 					ipip6_tunnel_bind_dev(dev);
992 					netdev_state_change(dev);
993 				}
994 			}
995 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
996 				err = -EFAULT;
997 		} else
998 			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
999 		break;
1000 
1001 	case SIOCDELTUNNEL:
1002 		err = -EPERM;
1003 		if (!capable(CAP_NET_ADMIN))
1004 			goto done;
1005 
1006 		if (dev == sitn->fb_tunnel_dev) {
1007 			err = -EFAULT;
1008 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1009 				goto done;
1010 			err = -ENOENT;
1011 			if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL)
1012 				goto done;
1013 			err = -EPERM;
1014 			if (t == netdev_priv(sitn->fb_tunnel_dev))
1015 				goto done;
1016 			dev = t->dev;
1017 		}
1018 		unregister_netdevice(dev);
1019 		err = 0;
1020 		break;
1021 
1022 	case SIOCGETPRL:
1023 		err = -EINVAL;
1024 		if (dev == sitn->fb_tunnel_dev)
1025 			goto done;
1026 		err = -ENOENT;
1027 		if (!(t = netdev_priv(dev)))
1028 			goto done;
1029 		err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data);
1030 		break;
1031 
1032 	case SIOCADDPRL:
1033 	case SIOCDELPRL:
1034 	case SIOCCHGPRL:
1035 		err = -EPERM;
1036 		if (!capable(CAP_NET_ADMIN))
1037 			goto done;
1038 		err = -EINVAL;
1039 		if (dev == sitn->fb_tunnel_dev)
1040 			goto done;
1041 		err = -EFAULT;
1042 		if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl)))
1043 			goto done;
1044 		err = -ENOENT;
1045 		if (!(t = netdev_priv(dev)))
1046 			goto done;
1047 
1048 		switch (cmd) {
1049 		case SIOCDELPRL:
1050 			err = ipip6_tunnel_del_prl(t, &prl);
1051 			break;
1052 		case SIOCADDPRL:
1053 		case SIOCCHGPRL:
1054 			err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
1055 			break;
1056 		}
1057 		netdev_state_change(dev);
1058 		break;
1059 
1060 #ifdef CONFIG_IPV6_SIT_6RD
1061 	case SIOCADD6RD:
1062 	case SIOCCHG6RD:
1063 	case SIOCDEL6RD:
1064 		err = -EPERM;
1065 		if (!capable(CAP_NET_ADMIN))
1066 			goto done;
1067 
1068 		err = -EFAULT;
1069 		if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data,
1070 				   sizeof(ip6rd)))
1071 			goto done;
1072 
1073 		t = netdev_priv(dev);
1074 
1075 		if (cmd != SIOCDEL6RD) {
1076 			struct in6_addr prefix;
1077 			__be32 relay_prefix;
1078 
1079 			err = -EINVAL;
1080 			if (ip6rd.relay_prefixlen > 32 ||
1081 			    ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64)
1082 				goto done;
1083 
1084 			ipv6_addr_prefix(&prefix, &ip6rd.prefix,
1085 					 ip6rd.prefixlen);
1086 			if (!ipv6_addr_equal(&prefix, &ip6rd.prefix))
1087 				goto done;
1088 			if (ip6rd.relay_prefixlen)
1089 				relay_prefix = ip6rd.relay_prefix &
1090 					       htonl(0xffffffffUL <<
1091 						     (32 - ip6rd.relay_prefixlen));
1092 			else
1093 				relay_prefix = 0;
1094 			if (relay_prefix != ip6rd.relay_prefix)
1095 				goto done;
1096 
1097 			t->ip6rd.prefix = prefix;
1098 			t->ip6rd.relay_prefix = relay_prefix;
1099 			t->ip6rd.prefixlen = ip6rd.prefixlen;
1100 			t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
1101 		} else
1102 			ipip6_tunnel_clone_6rd(dev, sitn);
1103 
1104 		err = 0;
1105 		break;
1106 #endif
1107 
1108 	default:
1109 		err = -EINVAL;
1110 	}
1111 
1112 done:
1113 	return err;
1114 }
1115 
1116 static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1117 {
1118 	if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr))
1119 		return -EINVAL;
1120 	dev->mtu = new_mtu;
1121 	return 0;
1122 }
1123 
1124 static const struct net_device_ops ipip6_netdev_ops = {
1125 	.ndo_uninit	= ipip6_tunnel_uninit,
1126 	.ndo_start_xmit	= ipip6_tunnel_xmit,
1127 	.ndo_do_ioctl	= ipip6_tunnel_ioctl,
1128 	.ndo_change_mtu	= ipip6_tunnel_change_mtu,
1129 	.ndo_get_stats	= ipip6_get_stats,
1130 };
1131 
1132 static void ipip6_dev_free(struct net_device *dev)
1133 {
1134 	free_percpu(dev->tstats);
1135 	free_netdev(dev);
1136 }
1137 
1138 static void ipip6_tunnel_setup(struct net_device *dev)
1139 {
1140 	dev->netdev_ops		= &ipip6_netdev_ops;
1141 	dev->destructor 	= ipip6_dev_free;
1142 
1143 	dev->type		= ARPHRD_SIT;
1144 	dev->hard_header_len 	= LL_MAX_HEADER + sizeof(struct iphdr);
1145 	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr);
1146 	dev->flags		= IFF_NOARP;
1147 	dev->priv_flags	       &= ~IFF_XMIT_DST_RELEASE;
1148 	dev->iflink		= 0;
1149 	dev->addr_len		= 4;
1150 	dev->features		|= NETIF_F_NETNS_LOCAL;
1151 	dev->features		|= NETIF_F_LLTX;
1152 }
1153 
1154 static int ipip6_tunnel_init(struct net_device *dev)
1155 {
1156 	struct ip_tunnel *tunnel = netdev_priv(dev);
1157 
1158 	tunnel->dev = dev;
1159 
1160 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1161 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1162 
1163 	ipip6_tunnel_bind_dev(dev);
1164 	dev->tstats = alloc_percpu(struct pcpu_tstats);
1165 	if (!dev->tstats)
1166 		return -ENOMEM;
1167 
1168 	return 0;
1169 }
1170 
1171 static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1172 {
1173 	struct ip_tunnel *tunnel = netdev_priv(dev);
1174 	struct iphdr *iph = &tunnel->parms.iph;
1175 	struct net *net = dev_net(dev);
1176 	struct sit_net *sitn = net_generic(net, sit_net_id);
1177 
1178 	tunnel->dev = dev;
1179 	strcpy(tunnel->parms.name, dev->name);
1180 
1181 	iph->version		= 4;
1182 	iph->protocol		= IPPROTO_IPV6;
1183 	iph->ihl		= 5;
1184 	iph->ttl		= 64;
1185 
1186 	dev->tstats = alloc_percpu(struct pcpu_tstats);
1187 	if (!dev->tstats)
1188 		return -ENOMEM;
1189 	dev_hold(dev);
1190 	rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
1191 	return 0;
1192 }
1193 
1194 static struct xfrm_tunnel sit_handler __read_mostly = {
1195 	.handler	=	ipip6_rcv,
1196 	.err_handler	=	ipip6_err,
1197 	.priority	=	1,
1198 };
1199 
1200 static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1201 {
1202 	int prio;
1203 
1204 	for (prio = 1; prio < 4; prio++) {
1205 		int h;
1206 		for (h = 0; h < HASH_SIZE; h++) {
1207 			struct ip_tunnel *t;
1208 
1209 			t = rtnl_dereference(sitn->tunnels[prio][h]);
1210 			while (t != NULL) {
1211 				unregister_netdevice_queue(t->dev, head);
1212 				t = rtnl_dereference(t->next);
1213 			}
1214 		}
1215 	}
1216 }
1217 
1218 static int __net_init sit_init_net(struct net *net)
1219 {
1220 	struct sit_net *sitn = net_generic(net, sit_net_id);
1221 	struct ip_tunnel *t;
1222 	int err;
1223 
1224 	sitn->tunnels[0] = sitn->tunnels_wc;
1225 	sitn->tunnels[1] = sitn->tunnels_l;
1226 	sitn->tunnels[2] = sitn->tunnels_r;
1227 	sitn->tunnels[3] = sitn->tunnels_r_l;
1228 
1229 	sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0",
1230 					   ipip6_tunnel_setup);
1231 	if (!sitn->fb_tunnel_dev) {
1232 		err = -ENOMEM;
1233 		goto err_alloc_dev;
1234 	}
1235 	dev_net_set(sitn->fb_tunnel_dev, net);
1236 
1237 	err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
1238 	if (err)
1239 		goto err_dev_free;
1240 
1241 	ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
1242 
1243 	if ((err = register_netdev(sitn->fb_tunnel_dev)))
1244 		goto err_reg_dev;
1245 
1246 	t = netdev_priv(sitn->fb_tunnel_dev);
1247 
1248 	strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
1249 	return 0;
1250 
1251 err_reg_dev:
1252 	dev_put(sitn->fb_tunnel_dev);
1253 err_dev_free:
1254 	ipip6_dev_free(sitn->fb_tunnel_dev);
1255 err_alloc_dev:
1256 	return err;
1257 }
1258 
1259 static void __net_exit sit_exit_net(struct net *net)
1260 {
1261 	struct sit_net *sitn = net_generic(net, sit_net_id);
1262 	LIST_HEAD(list);
1263 
1264 	rtnl_lock();
1265 	sit_destroy_tunnels(sitn, &list);
1266 	unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
1267 	unregister_netdevice_many(&list);
1268 	rtnl_unlock();
1269 }
1270 
1271 static struct pernet_operations sit_net_ops = {
1272 	.init = sit_init_net,
1273 	.exit = sit_exit_net,
1274 	.id   = &sit_net_id,
1275 	.size = sizeof(struct sit_net),
1276 };
1277 
1278 static void __exit sit_cleanup(void)
1279 {
1280 	xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1281 
1282 	unregister_pernet_device(&sit_net_ops);
1283 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1284 }
1285 
1286 static int __init sit_init(void)
1287 {
1288 	int err;
1289 
1290 	printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
1291 
1292 	err = register_pernet_device(&sit_net_ops);
1293 	if (err < 0)
1294 		return err;
1295 	err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
1296 	if (err < 0) {
1297 		unregister_pernet_device(&sit_net_ops);
1298 		printk(KERN_INFO "sit init: Can't add protocol\n");
1299 	}
1300 	return err;
1301 }
1302 
1303 module_init(sit_init);
1304 module_exit(sit_cleanup);
1305 MODULE_LICENSE("GPL");
1306 MODULE_ALIAS_NETDEV("sit0");
1307