xref: /openbmc/linux/net/ipv4/ipip.c (revision f3a8b664)
1 /*
2  *	Linux NET3:	IP/IP protocol decoder.
3  *
4  *	Authors:
5  *		Sam Lantinga (slouken@cs.ucdavis.edu)  02/01/95
6  *
7  *	Fixes:
8  *		Alan Cox	:	Merged and made usable non modular (its so tiny its silly as
9  *					a module taking up 2 pages).
10  *		Alan Cox	: 	Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph)
11  *					to keep ip_forward happy.
12  *		Alan Cox	:	More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8).
13  *		Kai Schulte	:	Fixed #defines for IP_FIREWALL->FIREWALL
14  *              David Woodhouse :       Perform some basic ICMP handling.
15  *                                      IPIP Routing without decapsulation.
16  *              Carlos Picoto   :       GRE over IP support
17  *		Alexey Kuznetsov:	Reworked. Really, now it is truncated version of ipv4/ip_gre.c.
18  *					I do not want to merge them together.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *	modify it under the terms of the GNU General Public License
22  *	as published by the Free Software Foundation; either version
23  *	2 of the License, or (at your option) any later version.
24  *
25  */
26 
27 /* tunnel.c: an IP tunnel driver
28 
29 	The purpose of this driver is to provide an IP tunnel through
30 	which you can tunnel network traffic transparently across subnets.
31 
32 	This was written by looking at Nick Holloway's dummy driver
33 	Thanks for the great code!
34 
35 		-Sam Lantinga	(slouken@cs.ucdavis.edu)  02/01/95
36 
37 	Minor tweaks:
38 		Cleaned up the code a little and added some pre-1.3.0 tweaks.
39 		dev->hard_header/hard_header_len changed to use no headers.
40 		Comments/bracketing tweaked.
41 		Made the tunnels use dev->name not tunnel: when error reporting.
42 		Added tx_dropped stat
43 
44 		-Alan Cox	(alan@lxorguk.ukuu.org.uk) 21 March 95
45 
46 	Reworked:
47 		Changed to tunnel to destination gateway in addition to the
48 			tunnel's pointopoint address
49 		Almost completely rewritten
50 		Note:  There is currently no firewall or ICMP handling done.
51 
52 		-Sam Lantinga	(slouken@cs.ucdavis.edu) 02/13/96
53 
54 */
55 
56 /* Things I wish I had known when writing the tunnel driver:
57 
58 	When the tunnel_xmit() function is called, the skb contains the
59 	packet to be sent (plus a great deal of extra info), and dev
60 	contains the tunnel device that _we_ are.
61 
62 	When we are passed a packet, we are expected to fill in the
63 	source address with our source IP address.
64 
65 	What is the proper way to allocate, copy and free a buffer?
66 	After you allocate it, it is a "0 length" chunk of memory
67 	starting at zero.  If you want to add headers to the buffer
68 	later, you'll have to call "skb_reserve(skb, amount)" with
69 	the amount of memory you want reserved.  Then, you call
70 	"skb_put(skb, amount)" with the amount of space you want in
71 	the buffer.  skb_put() returns a pointer to the top (#0) of
72 	that buffer.  skb->len is set to the amount of space you have
73 	"allocated" with skb_put().  You can then write up to skb->len
74 	bytes to that buffer.  If you need more, you can call skb_put()
75 	again with the additional amount of space you need.  You can
76 	find out how much more space you can allocate by calling
77 	"skb_tailroom(skb)".
78 	Now, to add header space, call "skb_push(skb, header_len)".
79 	This creates space at the beginning of the buffer and returns
80 	a pointer to this new space.  If later you need to strip a
81 	header from a buffer, call "skb_pull(skb, header_len)".
82 	skb_headroom() will return how much space is left at the top
83 	of the buffer (before the main data).  Remember, this headroom
84 	space must be reserved before the skb_put() function is called.
85 	*/
86 
87 /*
88    This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c
89 
90    For comments look at net/ipv4/ip_gre.c --ANK
91  */
92 
93 
94 #include <linux/capability.h>
95 #include <linux/module.h>
96 #include <linux/types.h>
97 #include <linux/kernel.h>
98 #include <linux/slab.h>
99 #include <asm/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <linux/in.h>
103 #include <linux/tcp.h>
104 #include <linux/udp.h>
105 #include <linux/if_arp.h>
106 #include <linux/init.h>
107 #include <linux/netfilter_ipv4.h>
108 #include <linux/if_ether.h>
109 
110 #include <net/sock.h>
111 #include <net/ip.h>
112 #include <net/icmp.h>
113 #include <net/ip_tunnels.h>
114 #include <net/inet_ecn.h>
115 #include <net/xfrm.h>
116 #include <net/net_namespace.h>
117 #include <net/netns/generic.h>
118 #include <net/dst_metadata.h>
119 
120 static bool log_ecn_error = true;
121 module_param(log_ecn_error, bool, 0644);
122 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
123 
124 static int ipip_net_id __read_mostly;
125 
126 static int ipip_tunnel_init(struct net_device *dev);
127 static struct rtnl_link_ops ipip_link_ops __read_mostly;
128 
129 static int ipip_err(struct sk_buff *skb, u32 info)
130 {
131 
132 /* All the routers (except for Linux) return only
133    8 bytes of packet payload. It means, that precise relaying of
134    ICMP in the real Internet is absolutely infeasible.
135  */
136 	struct net *net = dev_net(skb->dev);
137 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
138 	const struct iphdr *iph = (const struct iphdr *)skb->data;
139 	struct ip_tunnel *t;
140 	int err;
141 	const int type = icmp_hdr(skb)->type;
142 	const int code = icmp_hdr(skb)->code;
143 
144 	err = -ENOENT;
145 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
146 			     iph->daddr, iph->saddr, 0);
147 	if (!t)
148 		goto out;
149 
150 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
151 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
152 				 t->parms.link, 0, iph->protocol, 0);
153 		err = 0;
154 		goto out;
155 	}
156 
157 	if (type == ICMP_REDIRECT) {
158 		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
159 			      iph->protocol, 0);
160 		err = 0;
161 		goto out;
162 	}
163 
164 	if (t->parms.iph.daddr == 0)
165 		goto out;
166 
167 	err = 0;
168 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
169 		goto out;
170 
171 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
172 		t->err_count++;
173 	else
174 		t->err_count = 1;
175 	t->err_time = jiffies;
176 
177 out:
178 	return err;
179 }
180 
181 static const struct tnl_ptk_info ipip_tpi = {
182 	/* no tunnel info required for ipip. */
183 	.proto = htons(ETH_P_IP),
184 };
185 
186 #if IS_ENABLED(CONFIG_MPLS)
187 static const struct tnl_ptk_info mplsip_tpi = {
188 	/* no tunnel info required for mplsip. */
189 	.proto = htons(ETH_P_MPLS_UC),
190 };
191 #endif
192 
193 static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
194 {
195 	struct net *net = dev_net(skb->dev);
196 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
197 	struct metadata_dst *tun_dst = NULL;
198 	struct ip_tunnel *tunnel;
199 	const struct iphdr *iph;
200 
201 	iph = ip_hdr(skb);
202 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
203 			iph->saddr, iph->daddr, 0);
204 	if (tunnel) {
205 		const struct tnl_ptk_info *tpi;
206 
207 		if (tunnel->parms.iph.protocol != ipproto &&
208 		    tunnel->parms.iph.protocol != 0)
209 			goto drop;
210 
211 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
212 			goto drop;
213 #if IS_ENABLED(CONFIG_MPLS)
214 		if (ipproto == IPPROTO_MPLS)
215 			tpi = &mplsip_tpi;
216 		else
217 #endif
218 			tpi = &ipip_tpi;
219 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
220 			goto drop;
221 		if (tunnel->collect_md) {
222 			tun_dst = ip_tun_rx_dst(skb, 0, 0, 0);
223 			if (!tun_dst)
224 				return 0;
225 		}
226 		return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
227 	}
228 
229 	return -1;
230 
231 drop:
232 	kfree_skb(skb);
233 	return 0;
234 }
235 
236 static int ipip_rcv(struct sk_buff *skb)
237 {
238 	return ipip_tunnel_rcv(skb, IPPROTO_IPIP);
239 }
240 
241 #if IS_ENABLED(CONFIG_MPLS)
242 static int mplsip_rcv(struct sk_buff *skb)
243 {
244 	return ipip_tunnel_rcv(skb, IPPROTO_MPLS);
245 }
246 #endif
247 
248 /*
249  *	This function assumes it is being called from dev_queue_xmit()
250  *	and that skb is filled properly by that function.
251  */
252 static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
253 				    struct net_device *dev)
254 {
255 	struct ip_tunnel *tunnel = netdev_priv(dev);
256 	const struct iphdr  *tiph = &tunnel->parms.iph;
257 	u8 ipproto;
258 
259 	switch (skb->protocol) {
260 	case htons(ETH_P_IP):
261 		ipproto = IPPROTO_IPIP;
262 		break;
263 #if IS_ENABLED(CONFIG_MPLS)
264 	case htons(ETH_P_MPLS_UC):
265 		ipproto = IPPROTO_MPLS;
266 		break;
267 #endif
268 	default:
269 		goto tx_error;
270 	}
271 
272 	if (tiph->protocol != ipproto && tiph->protocol != 0)
273 		goto tx_error;
274 
275 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4))
276 		goto tx_error;
277 
278 	skb_set_inner_ipproto(skb, ipproto);
279 
280 	if (tunnel->collect_md)
281 		ip_md_tunnel_xmit(skb, dev, ipproto);
282 	else
283 		ip_tunnel_xmit(skb, dev, tiph, ipproto);
284 	return NETDEV_TX_OK;
285 
286 tx_error:
287 	kfree_skb(skb);
288 
289 	dev->stats.tx_errors++;
290 	return NETDEV_TX_OK;
291 }
292 
293 static bool ipip_tunnel_ioctl_verify_protocol(u8 ipproto)
294 {
295 	switch (ipproto) {
296 	case 0:
297 	case IPPROTO_IPIP:
298 #if IS_ENABLED(CONFIG_MPLS)
299 	case IPPROTO_MPLS:
300 #endif
301 		return true;
302 	}
303 
304 	return false;
305 }
306 
307 static int
308 ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
309 {
310 	int err = 0;
311 	struct ip_tunnel_parm p;
312 
313 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
314 		return -EFAULT;
315 
316 	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
317 		if (p.iph.version != 4 ||
318 		    !ipip_tunnel_ioctl_verify_protocol(p.iph.protocol) ||
319 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
320 			return -EINVAL;
321 	}
322 
323 	p.i_key = p.o_key = 0;
324 	p.i_flags = p.o_flags = 0;
325 	err = ip_tunnel_ioctl(dev, &p, cmd);
326 	if (err)
327 		return err;
328 
329 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
330 		return -EFAULT;
331 
332 	return 0;
333 }
334 
335 static const struct net_device_ops ipip_netdev_ops = {
336 	.ndo_init       = ipip_tunnel_init,
337 	.ndo_uninit     = ip_tunnel_uninit,
338 	.ndo_start_xmit	= ipip_tunnel_xmit,
339 	.ndo_do_ioctl	= ipip_tunnel_ioctl,
340 	.ndo_change_mtu = ip_tunnel_change_mtu,
341 	.ndo_get_stats64 = ip_tunnel_get_stats64,
342 	.ndo_get_iflink = ip_tunnel_get_iflink,
343 };
344 
345 #define IPIP_FEATURES (NETIF_F_SG |		\
346 		       NETIF_F_FRAGLIST |	\
347 		       NETIF_F_HIGHDMA |	\
348 		       NETIF_F_GSO_SOFTWARE |	\
349 		       NETIF_F_HW_CSUM)
350 
351 static void ipip_tunnel_setup(struct net_device *dev)
352 {
353 	dev->netdev_ops		= &ipip_netdev_ops;
354 
355 	dev->type		= ARPHRD_TUNNEL;
356 	dev->flags		= IFF_NOARP;
357 	dev->addr_len		= 4;
358 	dev->features		|= NETIF_F_LLTX;
359 	netif_keep_dst(dev);
360 
361 	dev->features		|= IPIP_FEATURES;
362 	dev->hw_features	|= IPIP_FEATURES;
363 	ip_tunnel_setup(dev, ipip_net_id);
364 }
365 
366 static int ipip_tunnel_init(struct net_device *dev)
367 {
368 	struct ip_tunnel *tunnel = netdev_priv(dev);
369 
370 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
371 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
372 
373 	tunnel->tun_hlen = 0;
374 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
375 	return ip_tunnel_init(dev);
376 }
377 
378 static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
379 {
380 	u8 proto;
381 
382 	if (!data || !data[IFLA_IPTUN_PROTO])
383 		return 0;
384 
385 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
386 	if (proto != IPPROTO_IPIP && proto != IPPROTO_MPLS && proto != 0)
387 		return -EINVAL;
388 
389 	return 0;
390 }
391 
392 static void ipip_netlink_parms(struct nlattr *data[],
393 			       struct ip_tunnel_parm *parms, bool *collect_md)
394 {
395 	memset(parms, 0, sizeof(*parms));
396 
397 	parms->iph.version = 4;
398 	parms->iph.protocol = IPPROTO_IPIP;
399 	parms->iph.ihl = 5;
400 	*collect_md = false;
401 
402 	if (!data)
403 		return;
404 
405 	if (data[IFLA_IPTUN_LINK])
406 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
407 
408 	if (data[IFLA_IPTUN_LOCAL])
409 		parms->iph.saddr = nla_get_in_addr(data[IFLA_IPTUN_LOCAL]);
410 
411 	if (data[IFLA_IPTUN_REMOTE])
412 		parms->iph.daddr = nla_get_in_addr(data[IFLA_IPTUN_REMOTE]);
413 
414 	if (data[IFLA_IPTUN_TTL]) {
415 		parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
416 		if (parms->iph.ttl)
417 			parms->iph.frag_off = htons(IP_DF);
418 	}
419 
420 	if (data[IFLA_IPTUN_TOS])
421 		parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
422 
423 	if (data[IFLA_IPTUN_PROTO])
424 		parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
425 
426 	if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
427 		parms->iph.frag_off = htons(IP_DF);
428 
429 	if (data[IFLA_IPTUN_COLLECT_METADATA])
430 		*collect_md = true;
431 }
432 
433 /* This function returns true when ENCAP attributes are present in the nl msg */
434 static bool ipip_netlink_encap_parms(struct nlattr *data[],
435 				     struct ip_tunnel_encap *ipencap)
436 {
437 	bool ret = false;
438 
439 	memset(ipencap, 0, sizeof(*ipencap));
440 
441 	if (!data)
442 		return ret;
443 
444 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
445 		ret = true;
446 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
447 	}
448 
449 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
450 		ret = true;
451 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
452 	}
453 
454 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
455 		ret = true;
456 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
457 	}
458 
459 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
460 		ret = true;
461 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
462 	}
463 
464 	return ret;
465 }
466 
467 static int ipip_newlink(struct net *src_net, struct net_device *dev,
468 			struct nlattr *tb[], struct nlattr *data[])
469 {
470 	struct ip_tunnel *t = netdev_priv(dev);
471 	struct ip_tunnel_parm p;
472 	struct ip_tunnel_encap ipencap;
473 
474 	if (ipip_netlink_encap_parms(data, &ipencap)) {
475 		int err = ip_tunnel_encap_setup(t, &ipencap);
476 
477 		if (err < 0)
478 			return err;
479 	}
480 
481 	ipip_netlink_parms(data, &p, &t->collect_md);
482 	return ip_tunnel_newlink(dev, tb, &p);
483 }
484 
485 static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
486 			   struct nlattr *data[])
487 {
488 	struct ip_tunnel_parm p;
489 	struct ip_tunnel_encap ipencap;
490 	bool collect_md;
491 
492 	if (ipip_netlink_encap_parms(data, &ipencap)) {
493 		struct ip_tunnel *t = netdev_priv(dev);
494 		int err = ip_tunnel_encap_setup(t, &ipencap);
495 
496 		if (err < 0)
497 			return err;
498 	}
499 
500 	ipip_netlink_parms(data, &p, &collect_md);
501 	if (collect_md)
502 		return -EINVAL;
503 
504 	if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
505 	    (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
506 		return -EINVAL;
507 
508 	return ip_tunnel_changelink(dev, tb, &p);
509 }
510 
511 static size_t ipip_get_size(const struct net_device *dev)
512 {
513 	return
514 		/* IFLA_IPTUN_LINK */
515 		nla_total_size(4) +
516 		/* IFLA_IPTUN_LOCAL */
517 		nla_total_size(4) +
518 		/* IFLA_IPTUN_REMOTE */
519 		nla_total_size(4) +
520 		/* IFLA_IPTUN_TTL */
521 		nla_total_size(1) +
522 		/* IFLA_IPTUN_TOS */
523 		nla_total_size(1) +
524 		/* IFLA_IPTUN_PROTO */
525 		nla_total_size(1) +
526 		/* IFLA_IPTUN_PMTUDISC */
527 		nla_total_size(1) +
528 		/* IFLA_IPTUN_ENCAP_TYPE */
529 		nla_total_size(2) +
530 		/* IFLA_IPTUN_ENCAP_FLAGS */
531 		nla_total_size(2) +
532 		/* IFLA_IPTUN_ENCAP_SPORT */
533 		nla_total_size(2) +
534 		/* IFLA_IPTUN_ENCAP_DPORT */
535 		nla_total_size(2) +
536 		/* IFLA_IPTUN_COLLECT_METADATA */
537 		nla_total_size(0) +
538 		0;
539 }
540 
541 static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
542 {
543 	struct ip_tunnel *tunnel = netdev_priv(dev);
544 	struct ip_tunnel_parm *parm = &tunnel->parms;
545 
546 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
547 	    nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
548 	    nla_put_in_addr(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
549 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
550 	    nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
551 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
552 	    nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
553 		       !!(parm->iph.frag_off & htons(IP_DF))))
554 		goto nla_put_failure;
555 
556 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
557 			tunnel->encap.type) ||
558 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
559 			 tunnel->encap.sport) ||
560 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
561 			 tunnel->encap.dport) ||
562 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
563 			tunnel->encap.flags))
564 		goto nla_put_failure;
565 
566 	if (tunnel->collect_md)
567 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
568 			goto nla_put_failure;
569 	return 0;
570 
571 nla_put_failure:
572 	return -EMSGSIZE;
573 }
574 
575 static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
576 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
577 	[IFLA_IPTUN_LOCAL]		= { .type = NLA_U32 },
578 	[IFLA_IPTUN_REMOTE]		= { .type = NLA_U32 },
579 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
580 	[IFLA_IPTUN_TOS]		= { .type = NLA_U8 },
581 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
582 	[IFLA_IPTUN_PMTUDISC]		= { .type = NLA_U8 },
583 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
584 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
585 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
586 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
587 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
588 };
589 
590 static struct rtnl_link_ops ipip_link_ops __read_mostly = {
591 	.kind		= "ipip",
592 	.maxtype	= IFLA_IPTUN_MAX,
593 	.policy		= ipip_policy,
594 	.priv_size	= sizeof(struct ip_tunnel),
595 	.setup		= ipip_tunnel_setup,
596 	.validate	= ipip_tunnel_validate,
597 	.newlink	= ipip_newlink,
598 	.changelink	= ipip_changelink,
599 	.dellink	= ip_tunnel_dellink,
600 	.get_size	= ipip_get_size,
601 	.fill_info	= ipip_fill_info,
602 	.get_link_net	= ip_tunnel_get_link_net,
603 };
604 
605 static struct xfrm_tunnel ipip_handler __read_mostly = {
606 	.handler	=	ipip_rcv,
607 	.err_handler	=	ipip_err,
608 	.priority	=	1,
609 };
610 
611 #if IS_ENABLED(CONFIG_MPLS)
612 static struct xfrm_tunnel mplsip_handler __read_mostly = {
613 	.handler	=	mplsip_rcv,
614 	.err_handler	=	ipip_err,
615 	.priority	=	1,
616 };
617 #endif
618 
619 static int __net_init ipip_init_net(struct net *net)
620 {
621 	return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
622 }
623 
624 static void __net_exit ipip_exit_net(struct net *net)
625 {
626 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
627 	ip_tunnel_delete_net(itn, &ipip_link_ops);
628 }
629 
630 static struct pernet_operations ipip_net_ops = {
631 	.init = ipip_init_net,
632 	.exit = ipip_exit_net,
633 	.id   = &ipip_net_id,
634 	.size = sizeof(struct ip_tunnel_net),
635 };
636 
637 static int __init ipip_init(void)
638 {
639 	int err;
640 
641 	pr_info("ipip: IPv4 and MPLS over IPv4 tunneling driver\n");
642 
643 	err = register_pernet_device(&ipip_net_ops);
644 	if (err < 0)
645 		return err;
646 	err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
647 	if (err < 0) {
648 		pr_info("%s: can't register tunnel\n", __func__);
649 		goto xfrm_tunnel_ipip_failed;
650 	}
651 #if IS_ENABLED(CONFIG_MPLS)
652 	err = xfrm4_tunnel_register(&mplsip_handler, AF_MPLS);
653 	if (err < 0) {
654 		pr_info("%s: can't register tunnel\n", __func__);
655 		goto xfrm_tunnel_mplsip_failed;
656 	}
657 #endif
658 	err = rtnl_link_register(&ipip_link_ops);
659 	if (err < 0)
660 		goto rtnl_link_failed;
661 
662 out:
663 	return err;
664 
665 rtnl_link_failed:
666 #if IS_ENABLED(CONFIG_MPLS)
667 	xfrm4_tunnel_deregister(&mplsip_handler, AF_INET);
668 xfrm_tunnel_mplsip_failed:
669 
670 #endif
671 	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
672 xfrm_tunnel_ipip_failed:
673 	unregister_pernet_device(&ipip_net_ops);
674 	goto out;
675 }
676 
677 static void __exit ipip_fini(void)
678 {
679 	rtnl_link_unregister(&ipip_link_ops);
680 	if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
681 		pr_info("%s: can't deregister tunnel\n", __func__);
682 #if IS_ENABLED(CONFIG_MPLS)
683 	if (xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS))
684 		pr_info("%s: can't deregister tunnel\n", __func__);
685 #endif
686 	unregister_pernet_device(&ipip_net_ops);
687 }
688 
689 module_init(ipip_init);
690 module_exit(ipip_fini);
691 MODULE_LICENSE("GPL");
692 MODULE_ALIAS_RTNL_LINK("ipip");
693 MODULE_ALIAS_NETDEV("tunl0");
694