xref: /openbmc/linux/net/ipv4/ipip.c (revision 8e694cd2)
1 /*
2  *	Linux NET3:	IP/IP protocol decoder.
3  *
4  *	Authors:
5  *		Sam Lantinga (slouken@cs.ucdavis.edu)  02/01/95
6  *
7  *	Fixes:
8  *		Alan Cox	:	Merged and made usable non modular (its so tiny its silly as
9  *					a module taking up 2 pages).
10  *		Alan Cox	: 	Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph)
11  *					to keep ip_forward happy.
12  *		Alan Cox	:	More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8).
13  *		Kai Schulte	:	Fixed #defines for IP_FIREWALL->FIREWALL
14  *              David Woodhouse :       Perform some basic ICMP handling.
15  *                                      IPIP Routing without decapsulation.
16  *              Carlos Picoto   :       GRE over IP support
17  *		Alexey Kuznetsov:	Reworked. Really, now it is truncated version of ipv4/ip_gre.c.
18  *					I do not want to merge them together.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *	modify it under the terms of the GNU General Public License
22  *	as published by the Free Software Foundation; either version
23  *	2 of the License, or (at your option) any later version.
24  *
25  */
26 
27 /* tunnel.c: an IP tunnel driver
28 
29 	The purpose of this driver is to provide an IP tunnel through
30 	which you can tunnel network traffic transparently across subnets.
31 
32 	This was written by looking at Nick Holloway's dummy driver
33 	Thanks for the great code!
34 
35 		-Sam Lantinga	(slouken@cs.ucdavis.edu)  02/01/95
36 
37 	Minor tweaks:
38 		Cleaned up the code a little and added some pre-1.3.0 tweaks.
39 		dev->hard_header/hard_header_len changed to use no headers.
40 		Comments/bracketing tweaked.
41 		Made the tunnels use dev->name not tunnel: when error reporting.
42 		Added tx_dropped stat
43 
44 		-Alan Cox	(alan@lxorguk.ukuu.org.uk) 21 March 95
45 
46 	Reworked:
47 		Changed to tunnel to destination gateway in addition to the
48 			tunnel's pointopoint address
49 		Almost completely rewritten
50 		Note:  There is currently no firewall or ICMP handling done.
51 
52 		-Sam Lantinga	(slouken@cs.ucdavis.edu) 02/13/96
53 
54 */
55 
56 /* Things I wish I had known when writing the tunnel driver:
57 
58 	When the tunnel_xmit() function is called, the skb contains the
59 	packet to be sent (plus a great deal of extra info), and dev
60 	contains the tunnel device that _we_ are.
61 
62 	When we are passed a packet, we are expected to fill in the
63 	source address with our source IP address.
64 
65 	What is the proper way to allocate, copy and free a buffer?
66 	After you allocate it, it is a "0 length" chunk of memory
67 	starting at zero.  If you want to add headers to the buffer
68 	later, you'll have to call "skb_reserve(skb, amount)" with
69 	the amount of memory you want reserved.  Then, you call
70 	"skb_put(skb, amount)" with the amount of space you want in
71 	the buffer.  skb_put() returns a pointer to the top (#0) of
72 	that buffer.  skb->len is set to the amount of space you have
73 	"allocated" with skb_put().  You can then write up to skb->len
74 	bytes to that buffer.  If you need more, you can call skb_put()
75 	again with the additional amount of space you need.  You can
76 	find out how much more space you can allocate by calling
77 	"skb_tailroom(skb)".
78 	Now, to add header space, call "skb_push(skb, header_len)".
79 	This creates space at the beginning of the buffer and returns
80 	a pointer to this new space.  If later you need to strip a
81 	header from a buffer, call "skb_pull(skb, header_len)".
82 	skb_headroom() will return how much space is left at the top
83 	of the buffer (before the main data).  Remember, this headroom
84 	space must be reserved before the skb_put() function is called.
85 	*/
86 
87 /*
88    This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c
89 
90    For comments look at net/ipv4/ip_gre.c --ANK
91  */
92 
93 
94 #include <linux/capability.h>
95 #include <linux/module.h>
96 #include <linux/types.h>
97 #include <linux/kernel.h>
98 #include <linux/slab.h>
99 #include <asm/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <linux/in.h>
103 #include <linux/tcp.h>
104 #include <linux/udp.h>
105 #include <linux/if_arp.h>
106 #include <linux/init.h>
107 #include <linux/netfilter_ipv4.h>
108 #include <linux/if_ether.h>
109 
110 #include <net/sock.h>
111 #include <net/ip.h>
112 #include <net/icmp.h>
113 #include <net/ip_tunnels.h>
114 #include <net/inet_ecn.h>
115 #include <net/xfrm.h>
116 #include <net/net_namespace.h>
117 #include <net/netns/generic.h>
118 
119 static bool log_ecn_error = true;
120 module_param(log_ecn_error, bool, 0644);
121 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
122 
123 static int ipip_net_id __read_mostly;
124 
125 static int ipip_tunnel_init(struct net_device *dev);
126 static struct rtnl_link_ops ipip_link_ops __read_mostly;
127 
128 static int ipip_err(struct sk_buff *skb, u32 info)
129 {
130 
131 /* All the routers (except for Linux) return only
132    8 bytes of packet payload. It means, that precise relaying of
133    ICMP in the real Internet is absolutely infeasible.
134  */
135 	struct net *net = dev_net(skb->dev);
136 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
137 	const struct iphdr *iph = (const struct iphdr *)skb->data;
138 	struct ip_tunnel *t;
139 	int err;
140 	const int type = icmp_hdr(skb)->type;
141 	const int code = icmp_hdr(skb)->code;
142 
143 	err = -ENOENT;
144 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
145 			     iph->daddr, iph->saddr, 0);
146 	if (!t)
147 		goto out;
148 
149 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
150 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
151 				 t->parms.link, 0, iph->protocol, 0);
152 		err = 0;
153 		goto out;
154 	}
155 
156 	if (type == ICMP_REDIRECT) {
157 		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
158 			      iph->protocol, 0);
159 		err = 0;
160 		goto out;
161 	}
162 
163 	if (t->parms.iph.daddr == 0)
164 		goto out;
165 
166 	err = 0;
167 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
168 		goto out;
169 
170 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
171 		t->err_count++;
172 	else
173 		t->err_count = 1;
174 	t->err_time = jiffies;
175 
176 out:
177 	return err;
178 }
179 
180 static const struct tnl_ptk_info ipip_tpi = {
181 	/* no tunnel info required for ipip. */
182 	.proto = htons(ETH_P_IP),
183 };
184 
185 #if IS_ENABLED(CONFIG_MPLS)
186 static const struct tnl_ptk_info mplsip_tpi = {
187 	/* no tunnel info required for mplsip. */
188 	.proto = htons(ETH_P_MPLS_UC),
189 };
190 #endif
191 
192 static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
193 {
194 	struct net *net = dev_net(skb->dev);
195 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
196 	struct ip_tunnel *tunnel;
197 	const struct iphdr *iph;
198 
199 	iph = ip_hdr(skb);
200 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
201 			iph->saddr, iph->daddr, 0);
202 	if (tunnel) {
203 		const struct tnl_ptk_info *tpi;
204 
205 		if (tunnel->parms.iph.protocol != ipproto &&
206 		    tunnel->parms.iph.protocol != 0)
207 			goto drop;
208 
209 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
210 			goto drop;
211 #if IS_ENABLED(CONFIG_MPLS)
212 		if (ipproto == IPPROTO_MPLS)
213 			tpi = &mplsip_tpi;
214 		else
215 #endif
216 			tpi = &ipip_tpi;
217 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
218 			goto drop;
219 		return ip_tunnel_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
220 	}
221 
222 	return -1;
223 
224 drop:
225 	kfree_skb(skb);
226 	return 0;
227 }
228 
229 static int ipip_rcv(struct sk_buff *skb)
230 {
231 	return ipip_tunnel_rcv(skb, IPPROTO_IPIP);
232 }
233 
234 #if IS_ENABLED(CONFIG_MPLS)
235 static int mplsip_rcv(struct sk_buff *skb)
236 {
237 	return ipip_tunnel_rcv(skb, IPPROTO_MPLS);
238 }
239 #endif
240 
241 /*
242  *	This function assumes it is being called from dev_queue_xmit()
243  *	and that skb is filled properly by that function.
244  */
245 static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
246 				    struct net_device *dev)
247 {
248 	struct ip_tunnel *tunnel = netdev_priv(dev);
249 	const struct iphdr  *tiph = &tunnel->parms.iph;
250 	u8 ipproto;
251 
252 	switch (skb->protocol) {
253 	case htons(ETH_P_IP):
254 		ipproto = IPPROTO_IPIP;
255 		break;
256 #if IS_ENABLED(CONFIG_MPLS)
257 	case htons(ETH_P_MPLS_UC):
258 		ipproto = IPPROTO_MPLS;
259 		break;
260 #endif
261 	default:
262 		goto tx_error;
263 	}
264 
265 	if (tiph->protocol != ipproto && tiph->protocol != 0)
266 		goto tx_error;
267 
268 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4))
269 		goto tx_error;
270 
271 	skb_set_inner_ipproto(skb, ipproto);
272 
273 	ip_tunnel_xmit(skb, dev, tiph, ipproto);
274 	return NETDEV_TX_OK;
275 
276 tx_error:
277 	kfree_skb(skb);
278 
279 	dev->stats.tx_errors++;
280 	return NETDEV_TX_OK;
281 }
282 
283 static bool ipip_tunnel_ioctl_verify_protocol(u8 ipproto)
284 {
285 	switch (ipproto) {
286 	case 0:
287 	case IPPROTO_IPIP:
288 #if IS_ENABLED(CONFIG_MPLS)
289 	case IPPROTO_MPLS:
290 #endif
291 		return true;
292 	}
293 
294 	return false;
295 }
296 
297 static int
298 ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
299 {
300 	int err = 0;
301 	struct ip_tunnel_parm p;
302 
303 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
304 		return -EFAULT;
305 
306 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
307 		if (p.iph.version != 4 ||
308 		    !ipip_tunnel_ioctl_verify_protocol(p.iph.protocol) ||
309 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
310 			return -EINVAL;
311 	}
312 
313 	p.i_key = p.o_key = 0;
314 	p.i_flags = p.o_flags = 0;
315 	err = ip_tunnel_ioctl(dev, &p, cmd);
316 	if (err)
317 		return err;
318 
319 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
320 		return -EFAULT;
321 
322 	return 0;
323 }
324 
325 static const struct net_device_ops ipip_netdev_ops = {
326 	.ndo_init       = ipip_tunnel_init,
327 	.ndo_uninit     = ip_tunnel_uninit,
328 	.ndo_start_xmit	= ipip_tunnel_xmit,
329 	.ndo_do_ioctl	= ipip_tunnel_ioctl,
330 	.ndo_change_mtu = ip_tunnel_change_mtu,
331 	.ndo_get_stats64 = ip_tunnel_get_stats64,
332 	.ndo_get_iflink = ip_tunnel_get_iflink,
333 };
334 
335 #define IPIP_FEATURES (NETIF_F_SG |		\
336 		       NETIF_F_FRAGLIST |	\
337 		       NETIF_F_HIGHDMA |	\
338 		       NETIF_F_GSO_SOFTWARE |	\
339 		       NETIF_F_HW_CSUM)
340 
341 static void ipip_tunnel_setup(struct net_device *dev)
342 {
343 	dev->netdev_ops		= &ipip_netdev_ops;
344 
345 	dev->type		= ARPHRD_TUNNEL;
346 	dev->flags		= IFF_NOARP;
347 	dev->addr_len		= 4;
348 	dev->features		|= NETIF_F_LLTX;
349 	netif_keep_dst(dev);
350 
351 	dev->features		|= IPIP_FEATURES;
352 	dev->hw_features	|= IPIP_FEATURES;
353 	ip_tunnel_setup(dev, ipip_net_id);
354 }
355 
356 static int ipip_tunnel_init(struct net_device *dev)
357 {
358 	struct ip_tunnel *tunnel = netdev_priv(dev);
359 
360 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
361 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
362 
363 	tunnel->tun_hlen = 0;
364 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
365 	return ip_tunnel_init(dev);
366 }
367 
368 static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
369 {
370 	u8 proto;
371 
372 	if (!data || !data[IFLA_IPTUN_PROTO])
373 		return 0;
374 
375 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
376 	if (proto != IPPROTO_IPIP && proto != IPPROTO_MPLS && proto != 0)
377 		return -EINVAL;
378 
379 	return 0;
380 }
381 
382 static void ipip_netlink_parms(struct nlattr *data[],
383 			       struct ip_tunnel_parm *parms)
384 {
385 	memset(parms, 0, sizeof(*parms));
386 
387 	parms->iph.version = 4;
388 	parms->iph.protocol = IPPROTO_IPIP;
389 	parms->iph.ihl = 5;
390 
391 	if (!data)
392 		return;
393 
394 	if (data[IFLA_IPTUN_LINK])
395 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
396 
397 	if (data[IFLA_IPTUN_LOCAL])
398 		parms->iph.saddr = nla_get_in_addr(data[IFLA_IPTUN_LOCAL]);
399 
400 	if (data[IFLA_IPTUN_REMOTE])
401 		parms->iph.daddr = nla_get_in_addr(data[IFLA_IPTUN_REMOTE]);
402 
403 	if (data[IFLA_IPTUN_TTL]) {
404 		parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
405 		if (parms->iph.ttl)
406 			parms->iph.frag_off = htons(IP_DF);
407 	}
408 
409 	if (data[IFLA_IPTUN_TOS])
410 		parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
411 
412 	if (data[IFLA_IPTUN_PROTO])
413 		parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
414 
415 	if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
416 		parms->iph.frag_off = htons(IP_DF);
417 }
418 
419 /* This function returns true when ENCAP attributes are present in the nl msg */
420 static bool ipip_netlink_encap_parms(struct nlattr *data[],
421 				     struct ip_tunnel_encap *ipencap)
422 {
423 	bool ret = false;
424 
425 	memset(ipencap, 0, sizeof(*ipencap));
426 
427 	if (!data)
428 		return ret;
429 
430 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
431 		ret = true;
432 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
433 	}
434 
435 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
436 		ret = true;
437 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
438 	}
439 
440 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
441 		ret = true;
442 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
443 	}
444 
445 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
446 		ret = true;
447 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
448 	}
449 
450 	return ret;
451 }
452 
453 static int ipip_newlink(struct net *src_net, struct net_device *dev,
454 			struct nlattr *tb[], struct nlattr *data[])
455 {
456 	struct ip_tunnel_parm p;
457 	struct ip_tunnel_encap ipencap;
458 
459 	if (ipip_netlink_encap_parms(data, &ipencap)) {
460 		struct ip_tunnel *t = netdev_priv(dev);
461 		int err = ip_tunnel_encap_setup(t, &ipencap);
462 
463 		if (err < 0)
464 			return err;
465 	}
466 
467 	ipip_netlink_parms(data, &p);
468 	return ip_tunnel_newlink(dev, tb, &p);
469 }
470 
471 static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
472 			   struct nlattr *data[])
473 {
474 	struct ip_tunnel_parm p;
475 	struct ip_tunnel_encap ipencap;
476 
477 	if (ipip_netlink_encap_parms(data, &ipencap)) {
478 		struct ip_tunnel *t = netdev_priv(dev);
479 		int err = ip_tunnel_encap_setup(t, &ipencap);
480 
481 		if (err < 0)
482 			return err;
483 	}
484 
485 	ipip_netlink_parms(data, &p);
486 
487 	if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
488 	    (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
489 		return -EINVAL;
490 
491 	return ip_tunnel_changelink(dev, tb, &p);
492 }
493 
494 static size_t ipip_get_size(const struct net_device *dev)
495 {
496 	return
497 		/* IFLA_IPTUN_LINK */
498 		nla_total_size(4) +
499 		/* IFLA_IPTUN_LOCAL */
500 		nla_total_size(4) +
501 		/* IFLA_IPTUN_REMOTE */
502 		nla_total_size(4) +
503 		/* IFLA_IPTUN_TTL */
504 		nla_total_size(1) +
505 		/* IFLA_IPTUN_TOS */
506 		nla_total_size(1) +
507 		/* IFLA_IPTUN_PROTO */
508 		nla_total_size(1) +
509 		/* IFLA_IPTUN_PMTUDISC */
510 		nla_total_size(1) +
511 		/* IFLA_IPTUN_ENCAP_TYPE */
512 		nla_total_size(2) +
513 		/* IFLA_IPTUN_ENCAP_FLAGS */
514 		nla_total_size(2) +
515 		/* IFLA_IPTUN_ENCAP_SPORT */
516 		nla_total_size(2) +
517 		/* IFLA_IPTUN_ENCAP_DPORT */
518 		nla_total_size(2) +
519 		0;
520 }
521 
522 static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
523 {
524 	struct ip_tunnel *tunnel = netdev_priv(dev);
525 	struct ip_tunnel_parm *parm = &tunnel->parms;
526 
527 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
528 	    nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
529 	    nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
530 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
531 	    nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
532 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
533 	    nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
534 		       !!(parm->iph.frag_off & htons(IP_DF))))
535 		goto nla_put_failure;
536 
537 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
538 			tunnel->encap.type) ||
539 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
540 			 tunnel->encap.sport) ||
541 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
542 			 tunnel->encap.dport) ||
543 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
544 			tunnel->encap.flags))
545 		goto nla_put_failure;
546 
547 	return 0;
548 
549 nla_put_failure:
550 	return -EMSGSIZE;
551 }
552 
553 static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
554 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
555 	[IFLA_IPTUN_LOCAL]		= { .type = NLA_U32 },
556 	[IFLA_IPTUN_REMOTE]		= { .type = NLA_U32 },
557 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
558 	[IFLA_IPTUN_TOS]		= { .type = NLA_U8 },
559 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
560 	[IFLA_IPTUN_PMTUDISC]		= { .type = NLA_U8 },
561 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
562 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
563 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
564 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
565 };
566 
567 static struct rtnl_link_ops ipip_link_ops __read_mostly = {
568 	.kind		= "ipip",
569 	.maxtype	= IFLA_IPTUN_MAX,
570 	.policy		= ipip_policy,
571 	.priv_size	= sizeof(struct ip_tunnel),
572 	.setup		= ipip_tunnel_setup,
573 	.validate	= ipip_tunnel_validate,
574 	.newlink	= ipip_newlink,
575 	.changelink	= ipip_changelink,
576 	.dellink	= ip_tunnel_dellink,
577 	.get_size	= ipip_get_size,
578 	.fill_info	= ipip_fill_info,
579 	.get_link_net	= ip_tunnel_get_link_net,
580 };
581 
582 static struct xfrm_tunnel ipip_handler __read_mostly = {
583 	.handler	=	ipip_rcv,
584 	.err_handler	=	ipip_err,
585 	.priority	=	1,
586 };
587 
588 #if IS_ENABLED(CONFIG_MPLS)
589 static struct xfrm_tunnel mplsip_handler __read_mostly = {
590 	.handler	=	mplsip_rcv,
591 	.err_handler	=	ipip_err,
592 	.priority	=	1,
593 };
594 #endif
595 
596 static int __net_init ipip_init_net(struct net *net)
597 {
598 	return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
599 }
600 
601 static void __net_exit ipip_exit_net(struct net *net)
602 {
603 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
604 	ip_tunnel_delete_net(itn, &ipip_link_ops);
605 }
606 
607 static struct pernet_operations ipip_net_ops = {
608 	.init = ipip_init_net,
609 	.exit = ipip_exit_net,
610 	.id   = &ipip_net_id,
611 	.size = sizeof(struct ip_tunnel_net),
612 };
613 
614 static int __init ipip_init(void)
615 {
616 	int err;
617 
618 	pr_info("ipip: IPv4 and MPLS over IPv4 tunneling driver\n");
619 
620 	err = register_pernet_device(&ipip_net_ops);
621 	if (err < 0)
622 		return err;
623 	err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
624 	if (err < 0) {
625 		pr_info("%s: can't register tunnel\n", __func__);
626 		goto xfrm_tunnel_ipip_failed;
627 	}
628 #if IS_ENABLED(CONFIG_MPLS)
629 	err = xfrm4_tunnel_register(&mplsip_handler, AF_MPLS);
630 	if (err < 0) {
631 		pr_info("%s: can't register tunnel\n", __func__);
632 		goto xfrm_tunnel_mplsip_failed;
633 	}
634 #endif
635 	err = rtnl_link_register(&ipip_link_ops);
636 	if (err < 0)
637 		goto rtnl_link_failed;
638 
639 out:
640 	return err;
641 
642 rtnl_link_failed:
643 #if IS_ENABLED(CONFIG_MPLS)
644 	xfrm4_tunnel_deregister(&mplsip_handler, AF_INET);
645 xfrm_tunnel_mplsip_failed:
646 
647 #endif
648 	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
649 xfrm_tunnel_ipip_failed:
650 	unregister_pernet_device(&ipip_net_ops);
651 	goto out;
652 }
653 
654 static void __exit ipip_fini(void)
655 {
656 	rtnl_link_unregister(&ipip_link_ops);
657 	if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
658 		pr_info("%s: can't deregister tunnel\n", __func__);
659 #if IS_ENABLED(CONFIG_MPLS)
660 	if (xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS))
661 		pr_info("%s: can't deregister tunnel\n", __func__);
662 #endif
663 	unregister_pernet_device(&ipip_net_ops);
664 }
665 
666 module_init(ipip_init);
667 module_exit(ipip_fini);
668 MODULE_LICENSE("GPL");
669 MODULE_ALIAS_RTNL_LINK("ipip");
670 MODULE_ALIAS_NETDEV("tunl0");
671