xref: /openbmc/linux/net/ipv4/ipip.c (revision cd4d09ec)
1 /*
2  *	Linux NET3:	IP/IP protocol decoder.
3  *
4  *	Authors:
5  *		Sam Lantinga (slouken@cs.ucdavis.edu)  02/01/95
6  *
7  *	Fixes:
8  *		Alan Cox	:	Merged and made usable non modular (its so tiny its silly as
9  *					a module taking up 2 pages).
10  *		Alan Cox	: 	Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph)
11  *					to keep ip_forward happy.
12  *		Alan Cox	:	More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8).
13  *		Kai Schulte	:	Fixed #defines for IP_FIREWALL->FIREWALL
14  *              David Woodhouse :       Perform some basic ICMP handling.
15  *                                      IPIP Routing without decapsulation.
16  *              Carlos Picoto   :       GRE over IP support
17  *		Alexey Kuznetsov:	Reworked. Really, now it is truncated version of ipv4/ip_gre.c.
18  *					I do not want to merge them together.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *	modify it under the terms of the GNU General Public License
22  *	as published by the Free Software Foundation; either version
23  *	2 of the License, or (at your option) any later version.
24  *
25  */
26 
27 /* tunnel.c: an IP tunnel driver
28 
29 	The purpose of this driver is to provide an IP tunnel through
30 	which you can tunnel network traffic transparently across subnets.
31 
32 	This was written by looking at Nick Holloway's dummy driver
33 	Thanks for the great code!
34 
35 		-Sam Lantinga	(slouken@cs.ucdavis.edu)  02/01/95
36 
37 	Minor tweaks:
38 		Cleaned up the code a little and added some pre-1.3.0 tweaks.
39 		dev->hard_header/hard_header_len changed to use no headers.
40 		Comments/bracketing tweaked.
41 		Made the tunnels use dev->name not tunnel: when error reporting.
42 		Added tx_dropped stat
43 
44 		-Alan Cox	(alan@lxorguk.ukuu.org.uk) 21 March 95
45 
46 	Reworked:
47 		Changed to tunnel to destination gateway in addition to the
48 			tunnel's pointopoint address
49 		Almost completely rewritten
50 		Note:  There is currently no firewall or ICMP handling done.
51 
52 		-Sam Lantinga	(slouken@cs.ucdavis.edu) 02/13/96
53 
54 */
55 
56 /* Things I wish I had known when writing the tunnel driver:
57 
58 	When the tunnel_xmit() function is called, the skb contains the
59 	packet to be sent (plus a great deal of extra info), and dev
60 	contains the tunnel device that _we_ are.
61 
62 	When we are passed a packet, we are expected to fill in the
63 	source address with our source IP address.
64 
65 	What is the proper way to allocate, copy and free a buffer?
66 	After you allocate it, it is a "0 length" chunk of memory
67 	starting at zero.  If you want to add headers to the buffer
68 	later, you'll have to call "skb_reserve(skb, amount)" with
69 	the amount of memory you want reserved.  Then, you call
70 	"skb_put(skb, amount)" with the amount of space you want in
71 	the buffer.  skb_put() returns a pointer to the top (#0) of
72 	that buffer.  skb->len is set to the amount of space you have
73 	"allocated" with skb_put().  You can then write up to skb->len
74 	bytes to that buffer.  If you need more, you can call skb_put()
75 	again with the additional amount of space you need.  You can
76 	find out how much more space you can allocate by calling
77 	"skb_tailroom(skb)".
78 	Now, to add header space, call "skb_push(skb, header_len)".
79 	This creates space at the beginning of the buffer and returns
80 	a pointer to this new space.  If later you need to strip a
81 	header from a buffer, call "skb_pull(skb, header_len)".
82 	skb_headroom() will return how much space is left at the top
83 	of the buffer (before the main data).  Remember, this headroom
84 	space must be reserved before the skb_put() function is called.
85 	*/
86 
87 /*
88    This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c
89 
90    For comments look at net/ipv4/ip_gre.c --ANK
91  */
92 
93 
94 #include <linux/capability.h>
95 #include <linux/module.h>
96 #include <linux/types.h>
97 #include <linux/kernel.h>
98 #include <linux/slab.h>
99 #include <asm/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <linux/in.h>
103 #include <linux/tcp.h>
104 #include <linux/udp.h>
105 #include <linux/if_arp.h>
106 #include <linux/init.h>
107 #include <linux/netfilter_ipv4.h>
108 #include <linux/if_ether.h>
109 
110 #include <net/sock.h>
111 #include <net/ip.h>
112 #include <net/icmp.h>
113 #include <net/ip_tunnels.h>
114 #include <net/inet_ecn.h>
115 #include <net/xfrm.h>
116 #include <net/net_namespace.h>
117 #include <net/netns/generic.h>
118 
119 static bool log_ecn_error = true;
120 module_param(log_ecn_error, bool, 0644);
121 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
122 
123 static int ipip_net_id __read_mostly;
124 
125 static int ipip_tunnel_init(struct net_device *dev);
126 static struct rtnl_link_ops ipip_link_ops __read_mostly;
127 
128 static int ipip_err(struct sk_buff *skb, u32 info)
129 {
130 
131 /* All the routers (except for Linux) return only
132    8 bytes of packet payload. It means, that precise relaying of
133    ICMP in the real Internet is absolutely infeasible.
134  */
135 	struct net *net = dev_net(skb->dev);
136 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
137 	const struct iphdr *iph = (const struct iphdr *)skb->data;
138 	struct ip_tunnel *t;
139 	int err;
140 	const int type = icmp_hdr(skb)->type;
141 	const int code = icmp_hdr(skb)->code;
142 
143 	err = -ENOENT;
144 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
145 			     iph->daddr, iph->saddr, 0);
146 	if (!t)
147 		goto out;
148 
149 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
150 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
151 				 t->parms.link, 0, IPPROTO_IPIP, 0);
152 		err = 0;
153 		goto out;
154 	}
155 
156 	if (type == ICMP_REDIRECT) {
157 		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
158 			      IPPROTO_IPIP, 0);
159 		err = 0;
160 		goto out;
161 	}
162 
163 	if (t->parms.iph.daddr == 0)
164 		goto out;
165 
166 	err = 0;
167 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
168 		goto out;
169 
170 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
171 		t->err_count++;
172 	else
173 		t->err_count = 1;
174 	t->err_time = jiffies;
175 
176 out:
177 	return err;
178 }
179 
180 static const struct tnl_ptk_info tpi = {
181 	/* no tunnel info required for ipip. */
182 	.proto = htons(ETH_P_IP),
183 };
184 
185 static int ipip_rcv(struct sk_buff *skb)
186 {
187 	struct net *net = dev_net(skb->dev);
188 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
189 	struct ip_tunnel *tunnel;
190 	const struct iphdr *iph;
191 
192 	iph = ip_hdr(skb);
193 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
194 			iph->saddr, iph->daddr, 0);
195 	if (tunnel) {
196 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
197 			goto drop;
198 		if (iptunnel_pull_header(skb, 0, tpi.proto))
199 			goto drop;
200 		return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
201 	}
202 
203 	return -1;
204 
205 drop:
206 	kfree_skb(skb);
207 	return 0;
208 }
209 
210 /*
211  *	This function assumes it is being called from dev_queue_xmit()
212  *	and that skb is filled properly by that function.
213  */
214 static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
215 {
216 	struct ip_tunnel *tunnel = netdev_priv(dev);
217 	const struct iphdr  *tiph = &tunnel->parms.iph;
218 
219 	if (unlikely(skb->protocol != htons(ETH_P_IP)))
220 		goto tx_error;
221 
222 	skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
223 	if (IS_ERR(skb))
224 		goto out;
225 
226 	skb_set_inner_ipproto(skb, IPPROTO_IPIP);
227 
228 	ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
229 	return NETDEV_TX_OK;
230 
231 tx_error:
232 	kfree_skb(skb);
233 out:
234 	dev->stats.tx_errors++;
235 	return NETDEV_TX_OK;
236 }
237 
238 static int
239 ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
240 {
241 	int err = 0;
242 	struct ip_tunnel_parm p;
243 
244 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
245 		return -EFAULT;
246 
247 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
248 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
249 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
250 			return -EINVAL;
251 	}
252 
253 	p.i_key = p.o_key = 0;
254 	p.i_flags = p.o_flags = 0;
255 	err = ip_tunnel_ioctl(dev, &p, cmd);
256 	if (err)
257 		return err;
258 
259 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
260 		return -EFAULT;
261 
262 	return 0;
263 }
264 
265 static const struct net_device_ops ipip_netdev_ops = {
266 	.ndo_init       = ipip_tunnel_init,
267 	.ndo_uninit     = ip_tunnel_uninit,
268 	.ndo_start_xmit	= ipip_tunnel_xmit,
269 	.ndo_do_ioctl	= ipip_tunnel_ioctl,
270 	.ndo_change_mtu = ip_tunnel_change_mtu,
271 	.ndo_get_stats64 = ip_tunnel_get_stats64,
272 	.ndo_get_iflink = ip_tunnel_get_iflink,
273 };
274 
275 #define IPIP_FEATURES (NETIF_F_SG |		\
276 		       NETIF_F_FRAGLIST |	\
277 		       NETIF_F_HIGHDMA |	\
278 		       NETIF_F_GSO_SOFTWARE |	\
279 		       NETIF_F_HW_CSUM)
280 
281 static void ipip_tunnel_setup(struct net_device *dev)
282 {
283 	dev->netdev_ops		= &ipip_netdev_ops;
284 
285 	dev->type		= ARPHRD_TUNNEL;
286 	dev->flags		= IFF_NOARP;
287 	dev->addr_len		= 4;
288 	dev->features		|= NETIF_F_LLTX;
289 	netif_keep_dst(dev);
290 
291 	dev->features		|= IPIP_FEATURES;
292 	dev->hw_features	|= IPIP_FEATURES;
293 	ip_tunnel_setup(dev, ipip_net_id);
294 }
295 
296 static int ipip_tunnel_init(struct net_device *dev)
297 {
298 	struct ip_tunnel *tunnel = netdev_priv(dev);
299 
300 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
301 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
302 
303 	tunnel->tun_hlen = 0;
304 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
305 	tunnel->parms.iph.protocol = IPPROTO_IPIP;
306 	return ip_tunnel_init(dev);
307 }
308 
309 static void ipip_netlink_parms(struct nlattr *data[],
310 			       struct ip_tunnel_parm *parms)
311 {
312 	memset(parms, 0, sizeof(*parms));
313 
314 	parms->iph.version = 4;
315 	parms->iph.protocol = IPPROTO_IPIP;
316 	parms->iph.ihl = 5;
317 
318 	if (!data)
319 		return;
320 
321 	if (data[IFLA_IPTUN_LINK])
322 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
323 
324 	if (data[IFLA_IPTUN_LOCAL])
325 		parms->iph.saddr = nla_get_in_addr(data[IFLA_IPTUN_LOCAL]);
326 
327 	if (data[IFLA_IPTUN_REMOTE])
328 		parms->iph.daddr = nla_get_in_addr(data[IFLA_IPTUN_REMOTE]);
329 
330 	if (data[IFLA_IPTUN_TTL]) {
331 		parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
332 		if (parms->iph.ttl)
333 			parms->iph.frag_off = htons(IP_DF);
334 	}
335 
336 	if (data[IFLA_IPTUN_TOS])
337 		parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
338 
339 	if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
340 		parms->iph.frag_off = htons(IP_DF);
341 }
342 
343 /* This function returns true when ENCAP attributes are present in the nl msg */
344 static bool ipip_netlink_encap_parms(struct nlattr *data[],
345 				     struct ip_tunnel_encap *ipencap)
346 {
347 	bool ret = false;
348 
349 	memset(ipencap, 0, sizeof(*ipencap));
350 
351 	if (!data)
352 		return ret;
353 
354 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
355 		ret = true;
356 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
357 	}
358 
359 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
360 		ret = true;
361 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
362 	}
363 
364 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
365 		ret = true;
366 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
367 	}
368 
369 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
370 		ret = true;
371 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
372 	}
373 
374 	return ret;
375 }
376 
377 static int ipip_newlink(struct net *src_net, struct net_device *dev,
378 			struct nlattr *tb[], struct nlattr *data[])
379 {
380 	struct ip_tunnel_parm p;
381 	struct ip_tunnel_encap ipencap;
382 
383 	if (ipip_netlink_encap_parms(data, &ipencap)) {
384 		struct ip_tunnel *t = netdev_priv(dev);
385 		int err = ip_tunnel_encap_setup(t, &ipencap);
386 
387 		if (err < 0)
388 			return err;
389 	}
390 
391 	ipip_netlink_parms(data, &p);
392 	return ip_tunnel_newlink(dev, tb, &p);
393 }
394 
395 static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
396 			   struct nlattr *data[])
397 {
398 	struct ip_tunnel_parm p;
399 	struct ip_tunnel_encap ipencap;
400 
401 	if (ipip_netlink_encap_parms(data, &ipencap)) {
402 		struct ip_tunnel *t = netdev_priv(dev);
403 		int err = ip_tunnel_encap_setup(t, &ipencap);
404 
405 		if (err < 0)
406 			return err;
407 	}
408 
409 	ipip_netlink_parms(data, &p);
410 
411 	if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
412 	    (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
413 		return -EINVAL;
414 
415 	return ip_tunnel_changelink(dev, tb, &p);
416 }
417 
418 static size_t ipip_get_size(const struct net_device *dev)
419 {
420 	return
421 		/* IFLA_IPTUN_LINK */
422 		nla_total_size(4) +
423 		/* IFLA_IPTUN_LOCAL */
424 		nla_total_size(4) +
425 		/* IFLA_IPTUN_REMOTE */
426 		nla_total_size(4) +
427 		/* IFLA_IPTUN_TTL */
428 		nla_total_size(1) +
429 		/* IFLA_IPTUN_TOS */
430 		nla_total_size(1) +
431 		/* IFLA_IPTUN_PMTUDISC */
432 		nla_total_size(1) +
433 		/* IFLA_IPTUN_ENCAP_TYPE */
434 		nla_total_size(2) +
435 		/* IFLA_IPTUN_ENCAP_FLAGS */
436 		nla_total_size(2) +
437 		/* IFLA_IPTUN_ENCAP_SPORT */
438 		nla_total_size(2) +
439 		/* IFLA_IPTUN_ENCAP_DPORT */
440 		nla_total_size(2) +
441 		0;
442 }
443 
444 static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
445 {
446 	struct ip_tunnel *tunnel = netdev_priv(dev);
447 	struct ip_tunnel_parm *parm = &tunnel->parms;
448 
449 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
450 	    nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
451 	    nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
452 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
453 	    nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
454 	    nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
455 		       !!(parm->iph.frag_off & htons(IP_DF))))
456 		goto nla_put_failure;
457 
458 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
459 			tunnel->encap.type) ||
460 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
461 			 tunnel->encap.sport) ||
462 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
463 			 tunnel->encap.dport) ||
464 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
465 			tunnel->encap.flags))
466 		goto nla_put_failure;
467 
468 	return 0;
469 
470 nla_put_failure:
471 	return -EMSGSIZE;
472 }
473 
474 static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
475 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
476 	[IFLA_IPTUN_LOCAL]		= { .type = NLA_U32 },
477 	[IFLA_IPTUN_REMOTE]		= { .type = NLA_U32 },
478 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
479 	[IFLA_IPTUN_TOS]		= { .type = NLA_U8 },
480 	[IFLA_IPTUN_PMTUDISC]		= { .type = NLA_U8 },
481 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
482 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
483 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
484 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
485 };
486 
487 static struct rtnl_link_ops ipip_link_ops __read_mostly = {
488 	.kind		= "ipip",
489 	.maxtype	= IFLA_IPTUN_MAX,
490 	.policy		= ipip_policy,
491 	.priv_size	= sizeof(struct ip_tunnel),
492 	.setup		= ipip_tunnel_setup,
493 	.newlink	= ipip_newlink,
494 	.changelink	= ipip_changelink,
495 	.dellink	= ip_tunnel_dellink,
496 	.get_size	= ipip_get_size,
497 	.fill_info	= ipip_fill_info,
498 	.get_link_net	= ip_tunnel_get_link_net,
499 };
500 
501 static struct xfrm_tunnel ipip_handler __read_mostly = {
502 	.handler	=	ipip_rcv,
503 	.err_handler	=	ipip_err,
504 	.priority	=	1,
505 };
506 
507 static int __net_init ipip_init_net(struct net *net)
508 {
509 	return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
510 }
511 
512 static void __net_exit ipip_exit_net(struct net *net)
513 {
514 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
515 	ip_tunnel_delete_net(itn, &ipip_link_ops);
516 }
517 
518 static struct pernet_operations ipip_net_ops = {
519 	.init = ipip_init_net,
520 	.exit = ipip_exit_net,
521 	.id   = &ipip_net_id,
522 	.size = sizeof(struct ip_tunnel_net),
523 };
524 
525 static int __init ipip_init(void)
526 {
527 	int err;
528 
529 	pr_info("ipip: IPv4 over IPv4 tunneling driver\n");
530 
531 	err = register_pernet_device(&ipip_net_ops);
532 	if (err < 0)
533 		return err;
534 	err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
535 	if (err < 0) {
536 		pr_info("%s: can't register tunnel\n", __func__);
537 		goto xfrm_tunnel_failed;
538 	}
539 	err = rtnl_link_register(&ipip_link_ops);
540 	if (err < 0)
541 		goto rtnl_link_failed;
542 
543 out:
544 	return err;
545 
546 rtnl_link_failed:
547 	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
548 xfrm_tunnel_failed:
549 	unregister_pernet_device(&ipip_net_ops);
550 	goto out;
551 }
552 
553 static void __exit ipip_fini(void)
554 {
555 	rtnl_link_unregister(&ipip_link_ops);
556 	if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
557 		pr_info("%s: can't deregister tunnel\n", __func__);
558 
559 	unregister_pernet_device(&ipip_net_ops);
560 }
561 
562 module_init(ipip_init);
563 module_exit(ipip_fini);
564 MODULE_LICENSE("GPL");
565 MODULE_ALIAS_RTNL_LINK("ipip");
566 MODULE_ALIAS_NETDEV("tunl0");
567