xref: /openbmc/linux/net/ipv4/ipip.c (revision 8bd1369b)
1 /*
2  *	Linux NET3:	IP/IP protocol decoder.
3  *
4  *	Authors:
5  *		Sam Lantinga (slouken@cs.ucdavis.edu)  02/01/95
6  *
7  *	Fixes:
8  *		Alan Cox	:	Merged and made usable non modular (its so tiny its silly as
9  *					a module taking up 2 pages).
10  *		Alan Cox	: 	Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph)
11  *					to keep ip_forward happy.
12  *		Alan Cox	:	More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8).
13  *		Kai Schulte	:	Fixed #defines for IP_FIREWALL->FIREWALL
14  *              David Woodhouse :       Perform some basic ICMP handling.
15  *                                      IPIP Routing without decapsulation.
16  *              Carlos Picoto   :       GRE over IP support
17  *		Alexey Kuznetsov:	Reworked. Really, now it is truncated version of ipv4/ip_gre.c.
18  *					I do not want to merge them together.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *	modify it under the terms of the GNU General Public License
22  *	as published by the Free Software Foundation; either version
23  *	2 of the License, or (at your option) any later version.
24  *
25  */
26 
27 /* tunnel.c: an IP tunnel driver
28 
29 	The purpose of this driver is to provide an IP tunnel through
30 	which you can tunnel network traffic transparently across subnets.
31 
32 	This was written by looking at Nick Holloway's dummy driver
33 	Thanks for the great code!
34 
35 		-Sam Lantinga	(slouken@cs.ucdavis.edu)  02/01/95
36 
37 	Minor tweaks:
38 		Cleaned up the code a little and added some pre-1.3.0 tweaks.
39 		dev->hard_header/hard_header_len changed to use no headers.
40 		Comments/bracketing tweaked.
41 		Made the tunnels use dev->name not tunnel: when error reporting.
42 		Added tx_dropped stat
43 
44 		-Alan Cox	(alan@lxorguk.ukuu.org.uk) 21 March 95
45 
46 	Reworked:
47 		Changed to tunnel to destination gateway in addition to the
48 			tunnel's pointopoint address
49 		Almost completely rewritten
50 		Note:  There is currently no firewall or ICMP handling done.
51 
52 		-Sam Lantinga	(slouken@cs.ucdavis.edu) 02/13/96
53 
54 */
55 
56 /* Things I wish I had known when writing the tunnel driver:
57 
58 	When the tunnel_xmit() function is called, the skb contains the
59 	packet to be sent (plus a great deal of extra info), and dev
60 	contains the tunnel device that _we_ are.
61 
62 	When we are passed a packet, we are expected to fill in the
63 	source address with our source IP address.
64 
65 	What is the proper way to allocate, copy and free a buffer?
66 	After you allocate it, it is a "0 length" chunk of memory
67 	starting at zero.  If you want to add headers to the buffer
68 	later, you'll have to call "skb_reserve(skb, amount)" with
69 	the amount of memory you want reserved.  Then, you call
70 	"skb_put(skb, amount)" with the amount of space you want in
71 	the buffer.  skb_put() returns a pointer to the top (#0) of
72 	that buffer.  skb->len is set to the amount of space you have
73 	"allocated" with skb_put().  You can then write up to skb->len
74 	bytes to that buffer.  If you need more, you can call skb_put()
75 	again with the additional amount of space you need.  You can
76 	find out how much more space you can allocate by calling
77 	"skb_tailroom(skb)".
78 	Now, to add header space, call "skb_push(skb, header_len)".
79 	This creates space at the beginning of the buffer and returns
80 	a pointer to this new space.  If later you need to strip a
81 	header from a buffer, call "skb_pull(skb, header_len)".
82 	skb_headroom() will return how much space is left at the top
83 	of the buffer (before the main data).  Remember, this headroom
84 	space must be reserved before the skb_put() function is called.
85 	*/
86 
87 /*
88    This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c
89 
90    For comments look at net/ipv4/ip_gre.c --ANK
91  */
92 
93 
94 #include <linux/capability.h>
95 #include <linux/module.h>
96 #include <linux/types.h>
97 #include <linux/kernel.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <linux/in.h>
103 #include <linux/tcp.h>
104 #include <linux/udp.h>
105 #include <linux/if_arp.h>
106 #include <linux/init.h>
107 #include <linux/netfilter_ipv4.h>
108 #include <linux/if_ether.h>
109 
110 #include <net/sock.h>
111 #include <net/ip.h>
112 #include <net/icmp.h>
113 #include <net/ip_tunnels.h>
114 #include <net/inet_ecn.h>
115 #include <net/xfrm.h>
116 #include <net/net_namespace.h>
117 #include <net/netns/generic.h>
118 #include <net/dst_metadata.h>
119 
120 static bool log_ecn_error = true;
121 module_param(log_ecn_error, bool, 0644);
122 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
123 
124 static unsigned int ipip_net_id __read_mostly;
125 
126 static int ipip_tunnel_init(struct net_device *dev);
127 static struct rtnl_link_ops ipip_link_ops __read_mostly;
128 
129 static int ipip_err(struct sk_buff *skb, u32 info)
130 {
131 	/* All the routers (except for Linux) return only
132 	 * 8 bytes of packet payload. It means, that precise relaying of
133 	 * ICMP in the real Internet is absolutely infeasible.
134 	 */
135 	struct net *net = dev_net(skb->dev);
136 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
137 	const struct iphdr *iph = (const struct iphdr *)skb->data;
138 	const int type = icmp_hdr(skb)->type;
139 	const int code = icmp_hdr(skb)->code;
140 	struct ip_tunnel *t;
141 	int err = 0;
142 
143 	switch (type) {
144 	case ICMP_DEST_UNREACH:
145 		switch (code) {
146 		case ICMP_SR_FAILED:
147 			/* Impossible event. */
148 			goto out;
149 		default:
150 			/* All others are translated to HOST_UNREACH.
151 			 * rfc2003 contains "deep thoughts" about NET_UNREACH,
152 			 * I believe they are just ether pollution. --ANK
153 			 */
154 			break;
155 		}
156 		break;
157 
158 	case ICMP_TIME_EXCEEDED:
159 		if (code != ICMP_EXC_TTL)
160 			goto out;
161 		break;
162 
163 	case ICMP_REDIRECT:
164 		break;
165 
166 	default:
167 		goto out;
168 	}
169 
170 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
171 			     iph->daddr, iph->saddr, 0);
172 	if (!t) {
173 		err = -ENOENT;
174 		goto out;
175 	}
176 
177 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
178 		ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
179 				 iph->protocol, 0);
180 		goto out;
181 	}
182 
183 	if (type == ICMP_REDIRECT) {
184 		ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
185 		goto out;
186 	}
187 
188 	if (t->parms.iph.daddr == 0) {
189 		err = -ENOENT;
190 		goto out;
191 	}
192 
193 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
194 		goto out;
195 
196 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
197 		t->err_count++;
198 	else
199 		t->err_count = 1;
200 	t->err_time = jiffies;
201 
202 out:
203 	return err;
204 }
205 
206 static const struct tnl_ptk_info ipip_tpi = {
207 	/* no tunnel info required for ipip. */
208 	.proto = htons(ETH_P_IP),
209 };
210 
211 #if IS_ENABLED(CONFIG_MPLS)
212 static const struct tnl_ptk_info mplsip_tpi = {
213 	/* no tunnel info required for mplsip. */
214 	.proto = htons(ETH_P_MPLS_UC),
215 };
216 #endif
217 
218 static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
219 {
220 	struct net *net = dev_net(skb->dev);
221 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
222 	struct metadata_dst *tun_dst = NULL;
223 	struct ip_tunnel *tunnel;
224 	const struct iphdr *iph;
225 
226 	iph = ip_hdr(skb);
227 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
228 			iph->saddr, iph->daddr, 0);
229 	if (tunnel) {
230 		const struct tnl_ptk_info *tpi;
231 
232 		if (tunnel->parms.iph.protocol != ipproto &&
233 		    tunnel->parms.iph.protocol != 0)
234 			goto drop;
235 
236 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
237 			goto drop;
238 #if IS_ENABLED(CONFIG_MPLS)
239 		if (ipproto == IPPROTO_MPLS)
240 			tpi = &mplsip_tpi;
241 		else
242 #endif
243 			tpi = &ipip_tpi;
244 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
245 			goto drop;
246 		if (tunnel->collect_md) {
247 			tun_dst = ip_tun_rx_dst(skb, 0, 0, 0);
248 			if (!tun_dst)
249 				return 0;
250 		}
251 		return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
252 	}
253 
254 	return -1;
255 
256 drop:
257 	kfree_skb(skb);
258 	return 0;
259 }
260 
261 static int ipip_rcv(struct sk_buff *skb)
262 {
263 	return ipip_tunnel_rcv(skb, IPPROTO_IPIP);
264 }
265 
266 #if IS_ENABLED(CONFIG_MPLS)
267 static int mplsip_rcv(struct sk_buff *skb)
268 {
269 	return ipip_tunnel_rcv(skb, IPPROTO_MPLS);
270 }
271 #endif
272 
273 /*
274  *	This function assumes it is being called from dev_queue_xmit()
275  *	and that skb is filled properly by that function.
276  */
277 static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
278 				    struct net_device *dev)
279 {
280 	struct ip_tunnel *tunnel = netdev_priv(dev);
281 	const struct iphdr  *tiph = &tunnel->parms.iph;
282 	u8 ipproto;
283 
284 	switch (skb->protocol) {
285 	case htons(ETH_P_IP):
286 		ipproto = IPPROTO_IPIP;
287 		break;
288 #if IS_ENABLED(CONFIG_MPLS)
289 	case htons(ETH_P_MPLS_UC):
290 		ipproto = IPPROTO_MPLS;
291 		break;
292 #endif
293 	default:
294 		goto tx_error;
295 	}
296 
297 	if (tiph->protocol != ipproto && tiph->protocol != 0)
298 		goto tx_error;
299 
300 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4))
301 		goto tx_error;
302 
303 	skb_set_inner_ipproto(skb, ipproto);
304 
305 	if (tunnel->collect_md)
306 		ip_md_tunnel_xmit(skb, dev, ipproto);
307 	else
308 		ip_tunnel_xmit(skb, dev, tiph, ipproto);
309 	return NETDEV_TX_OK;
310 
311 tx_error:
312 	kfree_skb(skb);
313 
314 	dev->stats.tx_errors++;
315 	return NETDEV_TX_OK;
316 }
317 
318 static bool ipip_tunnel_ioctl_verify_protocol(u8 ipproto)
319 {
320 	switch (ipproto) {
321 	case 0:
322 	case IPPROTO_IPIP:
323 #if IS_ENABLED(CONFIG_MPLS)
324 	case IPPROTO_MPLS:
325 #endif
326 		return true;
327 	}
328 
329 	return false;
330 }
331 
332 static int
333 ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
334 {
335 	int err = 0;
336 	struct ip_tunnel_parm p;
337 
338 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
339 		return -EFAULT;
340 
341 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
342 		if (p.iph.version != 4 ||
343 		    !ipip_tunnel_ioctl_verify_protocol(p.iph.protocol) ||
344 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
345 			return -EINVAL;
346 	}
347 
348 	p.i_key = p.o_key = 0;
349 	p.i_flags = p.o_flags = 0;
350 	err = ip_tunnel_ioctl(dev, &p, cmd);
351 	if (err)
352 		return err;
353 
354 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
355 		return -EFAULT;
356 
357 	return 0;
358 }
359 
360 static const struct net_device_ops ipip_netdev_ops = {
361 	.ndo_init       = ipip_tunnel_init,
362 	.ndo_uninit     = ip_tunnel_uninit,
363 	.ndo_start_xmit	= ipip_tunnel_xmit,
364 	.ndo_do_ioctl	= ipip_tunnel_ioctl,
365 	.ndo_change_mtu = ip_tunnel_change_mtu,
366 	.ndo_get_stats64 = ip_tunnel_get_stats64,
367 	.ndo_get_iflink = ip_tunnel_get_iflink,
368 };
369 
370 #define IPIP_FEATURES (NETIF_F_SG |		\
371 		       NETIF_F_FRAGLIST |	\
372 		       NETIF_F_HIGHDMA |	\
373 		       NETIF_F_GSO_SOFTWARE |	\
374 		       NETIF_F_HW_CSUM)
375 
376 static void ipip_tunnel_setup(struct net_device *dev)
377 {
378 	dev->netdev_ops		= &ipip_netdev_ops;
379 
380 	dev->type		= ARPHRD_TUNNEL;
381 	dev->flags		= IFF_NOARP;
382 	dev->addr_len		= 4;
383 	dev->features		|= NETIF_F_LLTX;
384 	netif_keep_dst(dev);
385 
386 	dev->features		|= IPIP_FEATURES;
387 	dev->hw_features	|= IPIP_FEATURES;
388 	ip_tunnel_setup(dev, ipip_net_id);
389 }
390 
391 static int ipip_tunnel_init(struct net_device *dev)
392 {
393 	struct ip_tunnel *tunnel = netdev_priv(dev);
394 
395 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
396 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
397 
398 	tunnel->tun_hlen = 0;
399 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
400 	return ip_tunnel_init(dev);
401 }
402 
403 static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
404 				struct netlink_ext_ack *extack)
405 {
406 	u8 proto;
407 
408 	if (!data || !data[IFLA_IPTUN_PROTO])
409 		return 0;
410 
411 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
412 	if (proto != IPPROTO_IPIP && proto != IPPROTO_MPLS && proto != 0)
413 		return -EINVAL;
414 
415 	return 0;
416 }
417 
418 static void ipip_netlink_parms(struct nlattr *data[],
419 			       struct ip_tunnel_parm *parms, bool *collect_md,
420 			       __u32 *fwmark)
421 {
422 	memset(parms, 0, sizeof(*parms));
423 
424 	parms->iph.version = 4;
425 	parms->iph.protocol = IPPROTO_IPIP;
426 	parms->iph.ihl = 5;
427 	*collect_md = false;
428 
429 	if (!data)
430 		return;
431 
432 	if (data[IFLA_IPTUN_LINK])
433 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
434 
435 	if (data[IFLA_IPTUN_LOCAL])
436 		parms->iph.saddr = nla_get_in_addr(data[IFLA_IPTUN_LOCAL]);
437 
438 	if (data[IFLA_IPTUN_REMOTE])
439 		parms->iph.daddr = nla_get_in_addr(data[IFLA_IPTUN_REMOTE]);
440 
441 	if (data[IFLA_IPTUN_TTL]) {
442 		parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
443 		if (parms->iph.ttl)
444 			parms->iph.frag_off = htons(IP_DF);
445 	}
446 
447 	if (data[IFLA_IPTUN_TOS])
448 		parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
449 
450 	if (data[IFLA_IPTUN_PROTO])
451 		parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
452 
453 	if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
454 		parms->iph.frag_off = htons(IP_DF);
455 
456 	if (data[IFLA_IPTUN_COLLECT_METADATA])
457 		*collect_md = true;
458 
459 	if (data[IFLA_IPTUN_FWMARK])
460 		*fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
461 }
462 
463 /* This function returns true when ENCAP attributes are present in the nl msg */
464 static bool ipip_netlink_encap_parms(struct nlattr *data[],
465 				     struct ip_tunnel_encap *ipencap)
466 {
467 	bool ret = false;
468 
469 	memset(ipencap, 0, sizeof(*ipencap));
470 
471 	if (!data)
472 		return ret;
473 
474 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
475 		ret = true;
476 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
477 	}
478 
479 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
480 		ret = true;
481 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
482 	}
483 
484 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
485 		ret = true;
486 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
487 	}
488 
489 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
490 		ret = true;
491 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
492 	}
493 
494 	return ret;
495 }
496 
497 static int ipip_newlink(struct net *src_net, struct net_device *dev,
498 			struct nlattr *tb[], struct nlattr *data[],
499 			struct netlink_ext_ack *extack)
500 {
501 	struct ip_tunnel *t = netdev_priv(dev);
502 	struct ip_tunnel_parm p;
503 	struct ip_tunnel_encap ipencap;
504 	__u32 fwmark = 0;
505 
506 	if (ipip_netlink_encap_parms(data, &ipencap)) {
507 		int err = ip_tunnel_encap_setup(t, &ipencap);
508 
509 		if (err < 0)
510 			return err;
511 	}
512 
513 	ipip_netlink_parms(data, &p, &t->collect_md, &fwmark);
514 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
515 }
516 
517 static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
518 			   struct nlattr *data[],
519 			   struct netlink_ext_ack *extack)
520 {
521 	struct ip_tunnel *t = netdev_priv(dev);
522 	struct ip_tunnel_parm p;
523 	struct ip_tunnel_encap ipencap;
524 	bool collect_md;
525 	__u32 fwmark = t->fwmark;
526 
527 	if (ipip_netlink_encap_parms(data, &ipencap)) {
528 		int err = ip_tunnel_encap_setup(t, &ipencap);
529 
530 		if (err < 0)
531 			return err;
532 	}
533 
534 	ipip_netlink_parms(data, &p, &collect_md, &fwmark);
535 	if (collect_md)
536 		return -EINVAL;
537 
538 	if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
539 	    (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
540 		return -EINVAL;
541 
542 	return ip_tunnel_changelink(dev, tb, &p, fwmark);
543 }
544 
545 static size_t ipip_get_size(const struct net_device *dev)
546 {
547 	return
548 		/* IFLA_IPTUN_LINK */
549 		nla_total_size(4) +
550 		/* IFLA_IPTUN_LOCAL */
551 		nla_total_size(4) +
552 		/* IFLA_IPTUN_REMOTE */
553 		nla_total_size(4) +
554 		/* IFLA_IPTUN_TTL */
555 		nla_total_size(1) +
556 		/* IFLA_IPTUN_TOS */
557 		nla_total_size(1) +
558 		/* IFLA_IPTUN_PROTO */
559 		nla_total_size(1) +
560 		/* IFLA_IPTUN_PMTUDISC */
561 		nla_total_size(1) +
562 		/* IFLA_IPTUN_ENCAP_TYPE */
563 		nla_total_size(2) +
564 		/* IFLA_IPTUN_ENCAP_FLAGS */
565 		nla_total_size(2) +
566 		/* IFLA_IPTUN_ENCAP_SPORT */
567 		nla_total_size(2) +
568 		/* IFLA_IPTUN_ENCAP_DPORT */
569 		nla_total_size(2) +
570 		/* IFLA_IPTUN_COLLECT_METADATA */
571 		nla_total_size(0) +
572 		/* IFLA_IPTUN_FWMARK */
573 		nla_total_size(4) +
574 		0;
575 }
576 
577 static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
578 {
579 	struct ip_tunnel *tunnel = netdev_priv(dev);
580 	struct ip_tunnel_parm *parm = &tunnel->parms;
581 
582 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
583 	    nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
584 	    nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
585 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
586 	    nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
587 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
588 	    nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
589 		       !!(parm->iph.frag_off & htons(IP_DF))) ||
590 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark))
591 		goto nla_put_failure;
592 
593 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
594 			tunnel->encap.type) ||
595 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
596 			 tunnel->encap.sport) ||
597 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
598 			 tunnel->encap.dport) ||
599 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
600 			tunnel->encap.flags))
601 		goto nla_put_failure;
602 
603 	if (tunnel->collect_md)
604 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
605 			goto nla_put_failure;
606 	return 0;
607 
608 nla_put_failure:
609 	return -EMSGSIZE;
610 }
611 
612 static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
613 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
614 	[IFLA_IPTUN_LOCAL]		= { .type = NLA_U32 },
615 	[IFLA_IPTUN_REMOTE]		= { .type = NLA_U32 },
616 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
617 	[IFLA_IPTUN_TOS]		= { .type = NLA_U8 },
618 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
619 	[IFLA_IPTUN_PMTUDISC]		= { .type = NLA_U8 },
620 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
621 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
622 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
623 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
624 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
625 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
626 };
627 
628 static struct rtnl_link_ops ipip_link_ops __read_mostly = {
629 	.kind		= "ipip",
630 	.maxtype	= IFLA_IPTUN_MAX,
631 	.policy		= ipip_policy,
632 	.priv_size	= sizeof(struct ip_tunnel),
633 	.setup		= ipip_tunnel_setup,
634 	.validate	= ipip_tunnel_validate,
635 	.newlink	= ipip_newlink,
636 	.changelink	= ipip_changelink,
637 	.dellink	= ip_tunnel_dellink,
638 	.get_size	= ipip_get_size,
639 	.fill_info	= ipip_fill_info,
640 	.get_link_net	= ip_tunnel_get_link_net,
641 };
642 
643 static struct xfrm_tunnel ipip_handler __read_mostly = {
644 	.handler	=	ipip_rcv,
645 	.err_handler	=	ipip_err,
646 	.priority	=	1,
647 };
648 
649 #if IS_ENABLED(CONFIG_MPLS)
650 static struct xfrm_tunnel mplsip_handler __read_mostly = {
651 	.handler	=	mplsip_rcv,
652 	.err_handler	=	ipip_err,
653 	.priority	=	1,
654 };
655 #endif
656 
657 static int __net_init ipip_init_net(struct net *net)
658 {
659 	return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
660 }
661 
662 static void __net_exit ipip_exit_batch_net(struct list_head *list_net)
663 {
664 	ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops);
665 }
666 
667 static struct pernet_operations ipip_net_ops = {
668 	.init = ipip_init_net,
669 	.exit_batch = ipip_exit_batch_net,
670 	.id   = &ipip_net_id,
671 	.size = sizeof(struct ip_tunnel_net),
672 };
673 
674 static int __init ipip_init(void)
675 {
676 	int err;
677 
678 	pr_info("ipip: IPv4 and MPLS over IPv4 tunneling driver\n");
679 
680 	err = register_pernet_device(&ipip_net_ops);
681 	if (err < 0)
682 		return err;
683 	err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
684 	if (err < 0) {
685 		pr_info("%s: can't register tunnel\n", __func__);
686 		goto xfrm_tunnel_ipip_failed;
687 	}
688 #if IS_ENABLED(CONFIG_MPLS)
689 	err = xfrm4_tunnel_register(&mplsip_handler, AF_MPLS);
690 	if (err < 0) {
691 		pr_info("%s: can't register tunnel\n", __func__);
692 		goto xfrm_tunnel_mplsip_failed;
693 	}
694 #endif
695 	err = rtnl_link_register(&ipip_link_ops);
696 	if (err < 0)
697 		goto rtnl_link_failed;
698 
699 out:
700 	return err;
701 
702 rtnl_link_failed:
703 #if IS_ENABLED(CONFIG_MPLS)
704 	xfrm4_tunnel_deregister(&mplsip_handler, AF_INET);
705 xfrm_tunnel_mplsip_failed:
706 
707 #endif
708 	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
709 xfrm_tunnel_ipip_failed:
710 	unregister_pernet_device(&ipip_net_ops);
711 	goto out;
712 }
713 
714 static void __exit ipip_fini(void)
715 {
716 	rtnl_link_unregister(&ipip_link_ops);
717 	if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
718 		pr_info("%s: can't deregister tunnel\n", __func__);
719 #if IS_ENABLED(CONFIG_MPLS)
720 	if (xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS))
721 		pr_info("%s: can't deregister tunnel\n", __func__);
722 #endif
723 	unregister_pernet_device(&ipip_net_ops);
724 }
725 
726 module_init(ipip_init);
727 module_exit(ipip_fini);
728 MODULE_LICENSE("GPL");
729 MODULE_ALIAS_RTNL_LINK("ipip");
730 MODULE_ALIAS_NETDEV("tunl0");
731